From 3218445630dada0b917df8fa54d123831740cf45 Mon Sep 17 00:00:00 2001 From: flywukong <2229306838@qq.com> Date: Mon, 28 Mar 2022 11:59:41 +0800 Subject: [PATCH 01/18] [PATCH] [R4R]add sharedStorage for prefetching to L1 (#792) * add sharedStorage for prefetching to L1 * remote originStorage in stateObjects * fix core * fix bug of sync map * remove read lock when get & set keys * statedb copy use CopyWithSharedStorage * reduce lock access * fix comment * avoid sharedPool effects on other modules * remove tryPreload * fix comment * fix var name * fix lint * fix L1 miss data && data condition * fix comment --- core/blockchain.go | 2 +- core/state/shared_pool.go | 39 ++++++++++++++ core/state/state_object.go | 56 ++++++++++++++++---- core/state/statedb.go | 104 +++++++++---------------------------- core/state_prefetcher.go | 1 + core/state_processor.go | 1 - 6 files changed, 111 insertions(+), 92 deletions(-) create mode 100644 core/state/shared_pool.go diff --git a/core/blockchain.go b/core/blockchain.go index 96c607a0ef..9ed4317b13 100644 --- a/core/blockchain.go +++ b/core/blockchain.go @@ -2102,7 +2102,7 @@ func (bc *BlockChain) insertChain(chain types.Blocks, verifySeals bool) (int, er if parent == nil { parent = bc.GetHeader(block.ParentHash(), block.NumberU64()-1) } - statedb, err := state.New(parent.Root, bc.stateCache, bc.snaps) + statedb, err := state.NewWithSharedPool(parent.Root, bc.stateCache, bc.snaps) if err != nil { return it.index, err } diff --git a/core/state/shared_pool.go b/core/state/shared_pool.go new file mode 100644 index 0000000000..ba96c2c27d --- /dev/null +++ b/core/state/shared_pool.go @@ -0,0 +1,39 @@ +package state + +import ( + "sync" + + "github.com/ethereum/go-ethereum/common" +) + +// sharedPool is used to store maps of originStorage of stateObjects +type StoragePool struct { + sync.RWMutex + sharedMap map[common.Address]*sync.Map +} + +func NewStoragePool() *StoragePool { + sharedMap := make(map[common.Address]*sync.Map) + return &StoragePool{ + sync.RWMutex{}, + sharedMap, + } +} + +// getStorage Check whether the storage exist in pool, +// new one if not exist, the content of storage will be fetched in stateObjects.GetCommittedState() +func (s *StoragePool) getStorage(address common.Address) *sync.Map { + s.RLock() + storageMap, ok := s.sharedMap[address] + s.RUnlock() + if !ok { + s.Lock() + defer s.Unlock() + if storageMap, ok = s.sharedMap[address]; !ok { + m := new(sync.Map) + s.sharedMap[address] = m + return m + } + } + return storageMap +} diff --git a/core/state/state_object.go b/core/state/state_object.go index 36adf786d6..8830a9d0ad 100644 --- a/core/state/state_object.go +++ b/core/state/state_object.go @@ -21,6 +21,7 @@ import ( "fmt" "io" "math/big" + "sync" "time" "github.com/ethereum/go-ethereum/common" @@ -79,7 +80,9 @@ type StateObject struct { trie Trie // storage trie, which becomes non-nil on first access code Code // contract bytecode, which gets set when code is loaded - originStorage Storage // Storage cache of original entries to dedup rewrites, reset for every transaction + sharedOriginStorage *sync.Map // Storage cache of original entries to dedup rewrites, reset for every transaction + originStorage Storage + pendingStorage Storage // Storage entries that need to be flushed to disk, at the end of an entire block dirtyStorage Storage // Storage entries that have been modified in the current transaction execution fakeStorage Storage // Fake storage which constructed by caller for debugging purpose. @@ -120,14 +123,21 @@ func newObject(db *StateDB, address common.Address, data Account) *StateObject { if data.Root == (common.Hash{}) { data.Root = emptyRoot } + var storageMap *sync.Map + // Check whether the storage exist in pool, new originStorage if not exist + if db != nil && db.storagePool != nil { + storageMap = db.GetStorage(address) + } + return &StateObject{ - db: db, - address: address, - addrHash: crypto.Keccak256Hash(address[:]), - data: data, - originStorage: make(Storage), - pendingStorage: make(Storage), - dirtyStorage: make(Storage), + db: db, + address: address, + addrHash: crypto.Keccak256Hash(address[:]), + data: data, + sharedOriginStorage: storageMap, + originStorage: make(Storage), + pendingStorage: make(Storage), + dirtyStorage: make(Storage), } } @@ -194,6 +204,29 @@ func (s *StateObject) GetState(db Database, key common.Hash) common.Hash { return s.GetCommittedState(db, key) } +func (s *StateObject) getOriginStorage(key common.Hash) (common.Hash, bool) { + if value, cached := s.originStorage[key]; cached { + return value, true + } + // if L1 cache miss, try to get it from shared pool + if s.sharedOriginStorage != nil { + val, ok := s.sharedOriginStorage.Load(key) + if !ok { + return common.Hash{}, false + } + s.originStorage[key] = val.(common.Hash) + return val.(common.Hash), true + } + return common.Hash{}, false +} + +func (s *StateObject) setOriginStorage(key common.Hash, value common.Hash) { + if s.db.writeOnSharedStorage && s.sharedOriginStorage != nil { + s.sharedOriginStorage.Store(key, value) + } + s.originStorage[key] = value +} + // GetCommittedState retrieves a value from the committed account storage trie. func (s *StateObject) GetCommittedState(db Database, key common.Hash) common.Hash { // If the fake storage is set, only lookup the state here(in the debugging mode) @@ -204,7 +237,8 @@ func (s *StateObject) GetCommittedState(db Database, key common.Hash) common.Has if value, pending := s.pendingStorage[key]; pending { return value } - if value, cached := s.originStorage[key]; cached { + + if value, cached := s.getOriginStorage(key); cached { return value } // If no live objects are available, attempt to use snapshots @@ -263,7 +297,7 @@ func (s *StateObject) GetCommittedState(db Database, key common.Hash) common.Has } value.SetBytes(content) } - s.originStorage[key] = value + s.setOriginStorage(key, value) return value } @@ -320,6 +354,7 @@ func (s *StateObject) finalise(prefetch bool) { slotsToPrefetch = append(slotsToPrefetch, common.CopyBytes(key[:])) // Copy needed for closure } } + if s.db.prefetcher != nil && prefetch && len(slotsToPrefetch) > 0 && s.data.Root != emptyRoot { s.db.prefetcher.prefetch(s.data.Root, slotsToPrefetch, s.addrHash) } @@ -356,7 +391,6 @@ func (s *StateObject) updateTrie(db Database) Trie { continue } s.originStorage[key] = value - var v []byte if (value == common.Hash{}) { s.setError(tr.TryDelete(key[:])) diff --git a/core/state/statedb.go b/core/state/statedb.go index 3a4297ea2f..debf0c65dd 100644 --- a/core/state/statedb.go +++ b/core/state/statedb.go @@ -39,10 +39,7 @@ import ( "github.com/ethereum/go-ethereum/trie" ) -const ( - preLoadLimit = 128 - defaultNumOfSlots = 100 -) +const defaultNumOfSlots = 100 type revision struct { id int @@ -188,9 +185,11 @@ type StateDB struct { snapStorage map[common.Address]map[string][]byte // This map holds 'live' objects, which will get modified while processing a state transition. - stateObjects map[common.Address]*StateObject - stateObjectsPending map[common.Address]struct{} // State objects finalized but not yet written to the trie - stateObjectsDirty map[common.Address]struct{} // State objects modified in the current execution + stateObjects map[common.Address]*StateObject + stateObjectsPending map[common.Address]struct{} // State objects finalized but not yet written to the trie + stateObjectsDirty map[common.Address]struct{} // State objects modified in the current execution + storagePool *StoragePool // sharedPool to store L1 originStorage of stateObjects + writeOnSharedStorage bool // Write to the shared origin storage of a stateObject while reading from the underlying storage layer. isParallel bool parallel ParallelState // to keep all the parallel execution elements @@ -264,6 +263,16 @@ func NewSlotDB(db *StateDB, systemAddr common.Address, baseTxIndex int, keepSyst return slotDB } +// NewWithSharedPool creates a new state with sharedStorge on layer 1.5 +func NewWithSharedPool(root common.Hash, db Database, snaps *snapshot.Tree) (*StateDB, error) { + statedb, err := newStateDB(root, db, snaps) + if err != nil { + return nil, err + } + statedb.storagePool = NewStoragePool() + return statedb, nil +} + func newStateDB(root common.Hash, db Database, snaps *snapshot.Tree) (*StateDB, error) { sdb := &StateDB{ db: db, @@ -480,6 +489,10 @@ func (s *StateDB) MergeSlotDB(slotDb *StateDB, slotReceipt *types.Receipt, txInd return changeList } +func (s *StateDB) EnableWriteOnSharedStorage() { + s.writeOnSharedStorage = true +} + // StartPrefetcher initializes a new trie prefetcher to pull in nodes from the // state trie concurrently while the state is mutated so that when we reach the // commit phase, most of the needed data is already hot. @@ -1080,78 +1093,6 @@ func (s *StateDB) getStateObject(addr common.Address) *StateObject { return nil } -func (s *StateDB) TryPreload(block *types.Block, signer types.Signer) { - accounts := make(map[common.Address]bool, block.Transactions().Len()) - accountsSlice := make([]common.Address, 0, block.Transactions().Len()) - for _, tx := range block.Transactions() { - from, err := types.Sender(signer, tx) - if err != nil { - break - } - accounts[from] = true - if tx.To() != nil { - accounts[*tx.To()] = true - } - } - for account := range accounts { - accountsSlice = append(accountsSlice, account) - } - if len(accountsSlice) >= preLoadLimit && len(accountsSlice) > runtime.NumCPU() { - objsChan := make(chan []*StateObject, runtime.NumCPU()) - for i := 0; i < runtime.NumCPU(); i++ { - start := i * len(accountsSlice) / runtime.NumCPU() - end := (i + 1) * len(accountsSlice) / runtime.NumCPU() - if i+1 == runtime.NumCPU() { - end = len(accountsSlice) - } - go func(start, end int) { - objs := s.preloadStateObject(accountsSlice[start:end]) - objsChan <- objs - }(start, end) - } - for i := 0; i < runtime.NumCPU(); i++ { - objs := <-objsChan - for _, obj := range objs { - s.SetStateObject(obj) - } - } - } -} - -func (s *StateDB) preloadStateObject(address []common.Address) []*StateObject { - // Prefer live objects if any is available - if s.snap == nil { - return nil - } - hasher := crypto.NewKeccakState() - objs := make([]*StateObject, 0, len(address)) - for _, addr := range address { - // If no live objects are available, attempt to use snapshots - if acc, err := s.snap.Account(crypto.HashData(hasher, addr.Bytes())); err == nil { - if acc == nil { - continue - } - data := &Account{ - Nonce: acc.Nonce, - Balance: acc.Balance, - CodeHash: acc.CodeHash, - Root: common.BytesToHash(acc.Root), - } - if len(data.CodeHash) == 0 { - data.CodeHash = emptyCodeHash - } - if data.Root == (common.Hash{}) { - data.Root = emptyRoot - } - // Insert into the live set - obj := newObject(s, addr, *data) - objs = append(objs, obj) - } - // Do not enable this feature when snapshot is not enabled. - } - return objs -} - // getDeletedStateObject is similar to getStateObject, but instead of returning // nil for a deleted state object, it returns the actual object with the deleted // flag set. This is needed by the state journal to revert to the correct s- @@ -1332,6 +1273,7 @@ func (s *StateDB) Copy() *StateDB { stateObjects: make(map[common.Address]*StateObject, len(s.journal.dirties)), stateObjectsPending: make(map[common.Address]struct{}, len(s.stateObjectsPending)), stateObjectsDirty: make(map[common.Address]struct{}, len(s.journal.dirties)), + storagePool: s.storagePool, refund: s.refund, logs: make(map[common.Hash][]*types.Log, len(s.logs)), logSize: s.logSize, @@ -2221,3 +2163,7 @@ func (s *StateDB) GetDirtyAccounts() []common.Address { } return accounts } + +func (s *StateDB) GetStorage(address common.Address) *sync.Map { + return s.storagePool.getStorage(address) +} diff --git a/core/state_prefetcher.go b/core/state_prefetcher.go index d559a03a0f..2832d1433e 100644 --- a/core/state_prefetcher.go +++ b/core/state_prefetcher.go @@ -67,6 +67,7 @@ func (p *statePrefetcher) Prefetch(block *types.Block, statedb *state.StateDB, c for i := 0; i < prefetchThread; i++ { go func(idx int) { newStatedb := statedb.Copy() + newStatedb.EnableWriteOnSharedStorage() gaspool := new(GasPool).AddGas(block.GasLimit()) blockContext := NewEVMBlockContext(header, p.bc, nil) evm := vm.NewEVM(blockContext, vm.TxContext{}, statedb, p.config, cfg) diff --git a/core/state_processor.go b/core/state_processor.go index 38fe88ef99..b3aebcab89 100644 --- a/core/state_processor.go +++ b/core/state_processor.go @@ -949,7 +949,6 @@ func (p *ParallelStateProcessor) Process(block *types.Block, statedb *state.Stat // Before transactions are executed, do shared preparation for Process() & ProcessParallel() func (p *StateProcessor) preExecute(block *types.Block, statedb *state.StateDB, cfg vm.Config, parallel bool) (types.Signer, *vm.EVM, *AsyncReceiptBloomGenerator) { signer := types.MakeSigner(p.bc.chainConfig, block.Number()) - statedb.TryPreload(block, signer) // Mutate the block and state according to any hard-fork specs if p.config.DAOForkSupport && p.config.DAOForkBlock != nil && p.config.DAOForkBlock.Cmp(block.Number()) == 0 { misc.ApplyDAOHardFork(statedb) From dbf78a3072bac386044d158e50d2784f8d4c68cb Mon Sep 17 00:00:00 2001 From: lunarblock <84233204+lunarblock@users.noreply.github.com> Date: Tue, 12 Apr 2022 11:02:02 +0800 Subject: [PATCH 02/18] [R4R] add sync pool for slotdb and remove deepCopy for mergeSlotDB (#1) * add sync pool for slotdb and remove deepCopy for mergeSlotDB * don't panic if there is anything wrong reading state * use map in sequential mode and sync.map in parallel mode * fix the comments --- core/state/dump.go | 2 +- core/state/state_object.go | 189 ++++++++++++++++++++++++++--------- core/state/state_test.go | 53 ++++++---- core/state/statedb.go | 197 +++++++++++++++++++++++++++++++------ core/state_processor.go | 15 +++ 5 files changed, 362 insertions(+), 94 deletions(-) diff --git a/core/state/dump.go b/core/state/dump.go index b25da714fd..55f4c7754d 100644 --- a/core/state/dump.go +++ b/core/state/dump.go @@ -138,7 +138,7 @@ func (s *StateDB) DumpToCollector(c DumpCollector, excludeCode, excludeStorage, account.SecureKey = it.Key } addr := common.BytesToAddress(addrBytes) - obj := newObject(s, addr, data) + obj := newObject(s, s.isParallel, addr, data) if !excludeCode { account.Code = common.Bytes2Hex(obj.Code(s.db)) } diff --git a/core/state/state_object.go b/core/state/state_object.go index 8830a9d0ad..ce8926609a 100644 --- a/core/state/state_object.go +++ b/core/state/state_object.go @@ -38,9 +38,18 @@ func (c Code) String() string { return string(c) //strings.Join(Disassemble(c), " ") } -type Storage map[common.Hash]common.Hash +type Storage interface { + String() string + GetValue(hash common.Hash) (common.Hash, bool) + StoreValue(hash common.Hash, value common.Hash) + Length() (length int) + Copy() Storage + Range(func(key, value interface{}) bool) +} + +type StorageMap map[common.Hash]common.Hash -func (s Storage) String() (str string) { +func (s StorageMap) String() (str string) { for key, value := range s { str += fmt.Sprintf("%X : %X\n", key, value) } @@ -48,8 +57,8 @@ func (s Storage) String() (str string) { return } -func (s Storage) Copy() Storage { - cpy := make(Storage) +func (s StorageMap) Copy() Storage { + cpy := make(StorageMap) for key, value := range s { cpy[key] = value } @@ -57,6 +66,79 @@ func (s Storage) Copy() Storage { return cpy } +func (s StorageMap) GetValue(hash common.Hash) (common.Hash, bool) { + value, ok := s[hash] + return value, ok +} + +func (s StorageMap) StoreValue(hash common.Hash, value common.Hash) { + s[hash] = value +} + +func (s StorageMap) Length() int { + return len(s) +} + +func (s StorageMap) Range(f func(hash, value interface{}) bool) { + for k, v := range s { + result := f(k, v) + if !result { + return + } + } +} + +type StorageSyncMap struct { + sync.Map +} + +func (s *StorageSyncMap) String() (str string) { + s.Range(func(key, value interface{}) bool { + str += fmt.Sprintf("%X : %X\n", key, value) + return true + }) + + return +} + +func (s *StorageSyncMap) GetValue(hash common.Hash) (common.Hash, bool) { + value, ok := s.Load(hash) + if !ok { + return common.Hash{}, ok + } + + return value.(common.Hash), ok +} + +func (s *StorageSyncMap) StoreValue(hash common.Hash, value common.Hash) { + s.Store(hash, value) +} + +func (s *StorageSyncMap) Length() (length int) { + s.Range(func(key, value interface{}) bool { + length++ + return true + }) + return length +} + +func (s *StorageSyncMap) Copy() Storage { + cpy := StorageSyncMap{} + s.Range(func(key, value interface{}) bool { + cpy.Store(key, value) + return true + }) + + return &cpy +} + +func newStorage(isParallel bool) Storage { + if isParallel { + return &StorageSyncMap{} + } + return make(StorageMap) +} + // StateObject represents an Ethereum account which is being modified. // // The usage pattern is as follows: @@ -80,12 +162,12 @@ type StateObject struct { trie Trie // storage trie, which becomes non-nil on first access code Code // contract bytecode, which gets set when code is loaded + isParallel bool // isParallel indicates this state object is used in parallel mode sharedOriginStorage *sync.Map // Storage cache of original entries to dedup rewrites, reset for every transaction - originStorage Storage - - pendingStorage Storage // Storage entries that need to be flushed to disk, at the end of an entire block - dirtyStorage Storage // Storage entries that have been modified in the current transaction execution - fakeStorage Storage // Fake storage which constructed by caller for debugging purpose. + originStorage Storage // Storage cache of original entries to dedup rewrites, reset for every transaction + pendingStorage Storage // Storage entries that need to be flushed to disk, at the end of an entire block + dirtyStorage Storage // Storage entries that have been modified in the current transaction execution + fakeStorage Storage // Fake storage which constructed by caller for debugging purpose. // Cache flags. // When an object is marked suicided it will be delete from the trie @@ -113,7 +195,7 @@ type Account struct { } // newObject creates a state object. -func newObject(db *StateDB, address common.Address, data Account) *StateObject { +func newObject(db *StateDB, isParallel bool, address common.Address, data Account) *StateObject { if data.Balance == nil { data.Balance = new(big.Int) } @@ -134,10 +216,11 @@ func newObject(db *StateDB, address common.Address, data Account) *StateObject { address: address, addrHash: crypto.Keccak256Hash(address[:]), data: data, + isParallel: isParallel, sharedOriginStorage: storageMap, - originStorage: make(Storage), - pendingStorage: make(Storage), - dirtyStorage: make(Storage), + originStorage: newStorage(isParallel), + dirtyStorage: newStorage(isParallel), + pendingStorage: newStorage(isParallel), } } @@ -193,10 +276,11 @@ func (s *StateObject) getTrie(db Database) Trie { func (s *StateObject) GetState(db Database, key common.Hash) common.Hash { // If the fake storage is set, only lookup the state here(in the debugging mode) if s.fakeStorage != nil { - return s.fakeStorage[key] + fakeValue, _ := s.fakeStorage.GetValue(key) + return fakeValue } // If we have a dirty value for this state entry, return it - value, dirty := s.dirtyStorage[key] + value, dirty := s.dirtyStorage.GetValue(key) if dirty { return value } @@ -205,7 +289,7 @@ func (s *StateObject) GetState(db Database, key common.Hash) common.Hash { } func (s *StateObject) getOriginStorage(key common.Hash) (common.Hash, bool) { - if value, cached := s.originStorage[key]; cached { + if value, cached := s.originStorage.GetValue(key); cached { return value, true } // if L1 cache miss, try to get it from shared pool @@ -214,7 +298,7 @@ func (s *StateObject) getOriginStorage(key common.Hash) (common.Hash, bool) { if !ok { return common.Hash{}, false } - s.originStorage[key] = val.(common.Hash) + s.originStorage.StoreValue(key, val.(common.Hash)) return val.(common.Hash), true } return common.Hash{}, false @@ -224,17 +308,18 @@ func (s *StateObject) setOriginStorage(key common.Hash, value common.Hash) { if s.db.writeOnSharedStorage && s.sharedOriginStorage != nil { s.sharedOriginStorage.Store(key, value) } - s.originStorage[key] = value + s.originStorage.StoreValue(key, value) } // GetCommittedState retrieves a value from the committed account storage trie. func (s *StateObject) GetCommittedState(db Database, key common.Hash) common.Hash { // If the fake storage is set, only lookup the state here(in the debugging mode) if s.fakeStorage != nil { - return s.fakeStorage[key] + fakeValue, _ := s.fakeStorage.GetValue(key) + return fakeValue } // If we have a pending write or clean cached, return that - if value, pending := s.pendingStorage[key]; pending { + if value, pending := s.pendingStorage.GetValue(key); pending { return value } @@ -305,7 +390,7 @@ func (s *StateObject) GetCommittedState(db Database, key common.Hash) common.Has func (s *StateObject) SetState(db Database, key, value common.Hash) { // If the fake storage is set, put the temporary state update here. if s.fakeStorage != nil { - s.fakeStorage[key] = value + s.fakeStorage.StoreValue(key, value) return } // If the new value is the same as old, don't set @@ -331,35 +416,39 @@ func (s *StateObject) SetState(db Database, key, value common.Hash) { func (s *StateObject) SetStorage(storage map[common.Hash]common.Hash) { // Allocate fake storage if it's nil. if s.fakeStorage == nil { - s.fakeStorage = make(Storage) + s.fakeStorage = newStorage(s.isParallel) } for key, value := range storage { - s.fakeStorage[key] = value + s.fakeStorage.StoreValue(key, value) } // Don't bother journal since this function should only be used for // debugging and the `fake` storage won't be committed to database. } func (s *StateObject) setState(key, value common.Hash) { - s.dirtyStorage[key] = value + s.dirtyStorage.StoreValue(key, value) } // finalise moves all dirty storage slots into the pending area to be hashed or // committed later. It is invoked at the end of every transaction. func (s *StateObject) finalise(prefetch bool) { - slotsToPrefetch := make([][]byte, 0, len(s.dirtyStorage)) - for key, value := range s.dirtyStorage { - s.pendingStorage[key] = value - if value != s.originStorage[key] { - slotsToPrefetch = append(slotsToPrefetch, common.CopyBytes(key[:])) // Copy needed for closure + slotsToPrefetch := make([][]byte, 0, s.dirtyStorage.Length()) + s.dirtyStorage.Range(func(key, value interface{}) bool { + s.pendingStorage.StoreValue(key.(common.Hash), value.(common.Hash)) + + originalValue, _ := s.originStorage.GetValue(key.(common.Hash)) + if value.(common.Hash) != originalValue { + originalKey := key.(common.Hash) + slotsToPrefetch = append(slotsToPrefetch, common.CopyBytes(originalKey[:])) // Copy needed for closure } - } + return true + }) if s.db.prefetcher != nil && prefetch && len(slotsToPrefetch) > 0 && s.data.Root != emptyRoot { s.db.prefetcher.prefetch(s.data.Root, slotsToPrefetch, s.addrHash) } - if len(s.dirtyStorage) > 0 { - s.dirtyStorage = make(Storage) + if s.dirtyStorage.Length() > 0 { + s.dirtyStorage = newStorage(s.isParallel) } } @@ -368,7 +457,7 @@ func (s *StateObject) finalise(prefetch bool) { func (s *StateObject) updateTrie(db Database) Trie { // Make sure all dirty slots are finalized into the pending storage area s.finalise(false) // Don't prefetch any more, pull directly if need be - if len(s.pendingStorage) == 0 { + if s.pendingStorage.Length() == 0 { return s.trie } // Track the amount of time wasted on updating the storage trie @@ -384,20 +473,26 @@ func (s *StateObject) updateTrie(db Database) Trie { // Insert all the pending updates into the trie tr := s.getTrie(db) - usedStorage := make([][]byte, 0, len(s.pendingStorage)) - for key, value := range s.pendingStorage { + usedStorage := make([][]byte, 0, s.pendingStorage.Length()) + s.pendingStorage.Range(func(k, v interface{}) bool { + key := k.(common.Hash) + value := v.(common.Hash) + // Skip noop changes, persist actual changes - if value == s.originStorage[key] { - continue + originalValue, _ := s.originStorage.GetValue(k.(common.Hash)) + if v.(common.Hash) == originalValue { + return true } - s.originStorage[key] = value - var v []byte + + s.originStorage.StoreValue(k.(common.Hash), v.(common.Hash)) + + var vs []byte if (value == common.Hash{}) { s.setError(tr.TryDelete(key[:])) } else { // Encoding []byte cannot fail, ok to ignore the error. - v, _ = rlp.EncodeToBytes(common.TrimLeftZeroes(value[:])) - s.setError(tr.TryUpdate(key[:], v)) + vs, _ = rlp.EncodeToBytes(common.TrimLeftZeroes(value[:])) + s.setError(tr.TryUpdate(key[:], vs)) } // If state snapshotting is active, cache the data til commit if s.db.snap != nil { @@ -409,16 +504,18 @@ func (s *StateObject) updateTrie(db Database) Trie { s.db.snapStorage[s.address] = storage } } - storage[string(key[:])] = v // v will be nil if value is 0x00 + storage[string(key[:])] = vs // v will be nil if value is 0x00 s.db.snapMux.Unlock() } usedStorage = append(usedStorage, common.CopyBytes(key[:])) // Copy needed for closure - } + return true + }) + if s.db.prefetcher != nil { s.db.prefetcher.used(s.data.Root, usedStorage) } - if len(s.pendingStorage) > 0 { - s.pendingStorage = make(Storage) + if s.pendingStorage.Length() > 0 { + s.pendingStorage = newStorage(s.isParallel) } return tr } @@ -506,7 +603,7 @@ func (s *StateObject) setBalance(amount *big.Int) { func (s *StateObject) ReturnGas(gas *big.Int) {} func (s *StateObject) deepCopy(db *StateDB) *StateObject { - stateObject := newObject(db, s.address, s.data) + stateObject := newObject(db, s.isParallel, s.address, s.data) if s.trie != nil { stateObject.trie = db.db.CopyTrie(s.trie) } diff --git a/core/state/state_test.go b/core/state/state_test.go index 4be9ae8ce3..a8417b13e7 100644 --- a/core/state/state_test.go +++ b/core/state/state_test.go @@ -227,30 +227,47 @@ func compareStateObjects(so0, so1 *StateObject, t *testing.T) { t.Fatalf("Code mismatch: have %v, want %v", so0.code, so1.code) } - if len(so1.dirtyStorage) != len(so0.dirtyStorage) { - t.Errorf("Dirty storage size mismatch: have %d, want %d", len(so1.dirtyStorage), len(so0.dirtyStorage)) + if so1.dirtyStorage.Length() != so0.dirtyStorage.Length() { + t.Errorf("Dirty storage size mismatch: have %d, want %d", so1.dirtyStorage.Length(), so0.dirtyStorage.Length()) } - for k, v := range so1.dirtyStorage { - if so0.dirtyStorage[k] != v { - t.Errorf("Dirty storage key %x mismatch: have %v, want %v", k, so0.dirtyStorage[k], v) + + so1.dirtyStorage.Range(func(key, value interface{}) bool { + k, v := key.(common.Hash), value.(common.Hash) + + if tmpV, _ := so0.dirtyStorage.GetValue(k); tmpV != v { + t.Errorf("Dirty storage key %x mismatch: have %v, want %v", k, tmpV.String(), v) } - } - for k, v := range so0.dirtyStorage { - if so1.dirtyStorage[k] != v { + return true + }) + + so0.dirtyStorage.Range(func(key, value interface{}) bool { + k, v := key.(common.Hash), value.(common.Hash) + + if tmpV, _ := so1.dirtyStorage.GetValue(k); tmpV != v { t.Errorf("Dirty storage key %x mismatch: have %v, want none.", k, v) } + return true + }) + + if so1.originStorage.Length() != so0.originStorage.Length() { + t.Errorf("Origin storage size mismatch: have %d, want %d", so1.originStorage.Length(), so0.originStorage.Length()) } - if len(so1.originStorage) != len(so0.originStorage) { - t.Errorf("Origin storage size mismatch: have %d, want %d", len(so1.originStorage), len(so0.originStorage)) - } - for k, v := range so1.originStorage { - if so0.originStorage[k] != v { - t.Errorf("Origin storage key %x mismatch: have %v, want %v", k, so0.originStorage[k], v) + + so1.originStorage.Range(func(key, value interface{}) bool { + k, v := key.(common.Hash), value.(common.Hash) + + if tmpV, _ := so0.originStorage.GetValue(k); tmpV != v { + t.Errorf("Origin storage key %x mismatch: have %v, want %v", k, tmpV, v) } - } - for k, v := range so0.originStorage { - if so1.originStorage[k] != v { + return true + }) + + so0.originStorage.Range(func(key, value interface{}) bool { + k, v := key.(common.Hash), value.(common.Hash) + + if tmpV, _ := so1.originStorage.GetValue(k); tmpV != v { t.Errorf("Origin storage key %x mismatch: have %v, want none.", k, v) } - } + return true + }) } diff --git a/core/state/statedb.go b/core/state/statedb.go index debf0c65dd..4c67b6e531 100644 --- a/core/state/statedb.go +++ b/core/state/statedb.go @@ -153,6 +153,9 @@ type ParallelState struct { systemAddress common.Address systemAddressOpsCount int keepSystemAddressBalance bool + + // we may need to redo for some specific reasons, like we read the wrong state and need to panic in sequential mode in SubRefund + needsRedo bool } // StateDB structs within the ethereum protocol are used to store anything @@ -379,9 +382,9 @@ func (s *StateDB) MergeSlotDB(slotDb *StateDB, slotReceipt *types.Receipt, txInd } else { // addr already in main DB, do merge: balance, KV, code, State(create, suicide) // can not do copy or ownership transfer directly, since dirtyObj could have outdated - // data(may be update within the conflict window) + // data(may be updated within the conflict window) - var newMainObj *StateObject + var newMainObj = mainObj // we don't need to copy the object since the storages are thread safe if _, created := slotDb.parallel.addrStateChangesInSlot[addr]; created { // there are 3 kinds of state change: // 1.Suicide @@ -405,7 +408,6 @@ func (s *StateDB) MergeSlotDB(slotDb *StateDB, slotReceipt *types.Receipt, txInd // deepCopy a temporary *StateObject for safety, since slot could read the address, // dispatch should avoid overwrite the StateObject directly otherwise, it could // crash for: concurrent map iteration and map write - newMainObj = mainObj.deepCopy(s) if _, balanced := slotDb.parallel.balanceChangesInSlot[addr]; balanced { log.Debug("merge state object: Balance", "newMainObj.Balance()", newMainObj.Balance(), @@ -630,6 +632,12 @@ func (s *StateDB) AddRefund(gas uint64) { func (s *StateDB) SubRefund(gas uint64) { s.journal.append(refundChange{prev: s.refund}) if gas > s.refund { + if s.isParallel { + // we don't need to panic here if we read the wrong state, we just need to redo this transaction + log.Info(fmt.Sprintf("Refund counter below zero (gas: %d > refund: %d)", gas, s.refund), "tx", s.thash.String()) + s.parallel.needsRedo = true + return + } panic(fmt.Sprintf("Refund counter below zero (gas: %d > refund: %d)", gas, s.refund)) } s.refund -= gas @@ -711,6 +719,11 @@ func (s *StateDB) SystemAddressRedo() bool { return s.parallel.systemAddressOpsCount > 2 } +// NeedsRedo returns true if there is any clear reason that we need to redo this transaction +func (s *StateDB) NeedsRedo() bool { + return s.parallel.needsRedo +} + func (s *StateDB) GetCode(addr common.Address) []byte { if s.parallel.isSlotDB { s.parallel.codeReadsInSlot[addr] = struct{}{} @@ -1158,7 +1171,7 @@ func (s *StateDB) getDeletedStateObject(addr common.Address) *StateObject { } } // Insert into the live set - obj := newObject(s, addr, *data) + obj := newObject(s, s.isParallel, addr, *data) s.SetStateObject(obj) return obj } @@ -1199,7 +1212,7 @@ func (s *StateDB) createObject(addr common.Address) (newobj, prev *StateObject) s.snapDestructs[prev.address] = struct{}{} } } - newobj = newObject(s, addr, Account{}) + newobj = newObject(s, s.isParallel, addr, Account{}) newobj.setNonce(0) // sets the object to dirty if prev == nil { s.journal.append(createObjectChange{account: &addr}) @@ -1243,7 +1256,7 @@ func (s *StateDB) ForEachStorage(addr common.Address, cb func(key, value common. for it.Next() { key := common.BytesToHash(s.trie.GetKey(it.Key)) - if value, dirty := so.dirtyStorage[key]; dirty { + if value, dirty := so.dirtyStorage.GetValue(key); dirty { if !cb(key, value) { return nil } @@ -1369,40 +1382,166 @@ func (s *StateDB) Copy() *StateDB { return state } -// Copy all the basic fields, initialize the memory ones +var addressStructPool = sync.Pool{ + New: func() interface{} { return make(map[common.Address]struct{}, defaultNumOfSlots) }, +} + +var journalPool = sync.Pool{ + New: func() interface{} { + return &journal{ + dirties: make(map[common.Address]int, defaultNumOfSlots), + entries: make([]journalEntry, 0, defaultNumOfSlots), + } + }, +} + +var stateKeysPool = sync.Pool{ + New: func() interface{} { return make(map[common.Address]StateKeys, defaultNumOfSlots) }, +} + +var stateObjectsPool = sync.Pool{ + New: func() interface{} { return make(map[common.Address]*StateObject, defaultNumOfSlots) }, +} + +var snapAccountPool = sync.Pool{ + New: func() interface{} { return make(map[common.Address][]byte, defaultNumOfSlots) }, +} + +var snapStoragePool = sync.Pool{ + New: func() interface{} { return make(map[common.Address]map[string][]byte, defaultNumOfSlots) }, +} + +var snapStorageValuePool = sync.Pool{ + New: func() interface{} { return make(map[string][]byte, defaultNumOfSlots) }, +} + +var logsPool = sync.Pool{ + New: func() interface{} { return make(map[common.Hash][]*types.Log, defaultNumOfSlots) }, +} + +func (s *StateDB) SlotDBPutSyncPool() { + for key := range s.parallel.stateObjectsSuicidedInSlot { + delete(s.parallel.stateObjectsSuicidedInSlot, key) + } + addressStructPool.Put(s.parallel.stateObjectsSuicidedInSlot) + + for key := range s.parallel.codeReadsInSlot { + delete(s.parallel.codeReadsInSlot, key) + } + addressStructPool.Put(s.parallel.codeReadsInSlot) + + for key := range s.parallel.codeChangesInSlot { + delete(s.parallel.codeChangesInSlot, key) + } + addressStructPool.Put(s.parallel.codeChangesInSlot) + + for key := range s.parallel.balanceChangesInSlot { + delete(s.parallel.balanceChangesInSlot, key) + } + addressStructPool.Put(s.parallel.balanceChangesInSlot) + + for key := range s.parallel.balanceReadsInSlot { + delete(s.parallel.balanceReadsInSlot, key) + } + addressStructPool.Put(s.parallel.balanceReadsInSlot) + + for key := range s.parallel.addrStateReadsInSlot { + delete(s.parallel.addrStateReadsInSlot, key) + } + addressStructPool.Put(s.parallel.addrStateReadsInSlot) + + for key := range s.parallel.nonceChangesInSlot { + delete(s.parallel.nonceChangesInSlot, key) + } + addressStructPool.Put(s.parallel.nonceChangesInSlot) + + for key := range s.stateObjectsPending { + delete(s.stateObjectsPending, key) + } + addressStructPool.Put(s.stateObjectsPending) + + for key := range s.stateObjectsDirty { + delete(s.stateObjectsDirty, key) + } + addressStructPool.Put(s.stateObjectsDirty) + + for key := range s.journal.dirties { + delete(s.journal.dirties, key) + } + s.journal.entries = s.journal.entries[:0] + journalPool.Put(s.journal) + + for key := range s.parallel.stateChangesInSlot { + delete(s.parallel.stateChangesInSlot, key) + } + stateKeysPool.Put(s.parallel.stateChangesInSlot) + + for key := range s.parallel.stateReadsInSlot { + delete(s.parallel.stateReadsInSlot, key) + } + stateKeysPool.Put(s.parallel.stateReadsInSlot) + + for key := range s.parallel.dirtiedStateObjectsInSlot { + delete(s.parallel.dirtiedStateObjectsInSlot, key) + } + stateObjectsPool.Put(s.parallel.dirtiedStateObjectsInSlot) + + for key := range s.snapDestructs { + delete(s.snapDestructs, key) + } + addressStructPool.Put(s.snapDestructs) + + for key := range s.snapAccounts { + delete(s.snapAccounts, key) + } + snapAccountPool.Put(s.snapAccounts) + + for key, storage := range s.snapStorage { + for key := range storage { + delete(storage, key) + } + snapStorageValuePool.Put(storage) + delete(s.snapStorage, key) + } + snapStoragePool.Put(s.snapStorage) + + for key := range s.logs { + delete(s.logs, key) + } + logsPool.Put(s.logs) +} + +// CopyForSlot copy all the basic fields, initialize the memory ones func (s *StateDB) CopyForSlot() *StateDB { parallel := ParallelState{ // use base(dispatcher) slot db's stateObjects. // It is a SyncMap, only readable to slot, not writable stateObjects: s.parallel.stateObjects, - stateObjectsSuicidedInSlot: make(map[common.Address]struct{}, 10), - codeReadsInSlot: make(map[common.Address]struct{}, defaultNumOfSlots), - codeChangesInSlot: make(map[common.Address]struct{}, 10), - stateChangesInSlot: make(map[common.Address]StateKeys, defaultNumOfSlots), - stateReadsInSlot: make(map[common.Address]StateKeys, defaultNumOfSlots), - balanceChangesInSlot: make(map[common.Address]struct{}, defaultNumOfSlots), - balanceReadsInSlot: make(map[common.Address]struct{}, defaultNumOfSlots), - addrStateReadsInSlot: make(map[common.Address]struct{}, defaultNumOfSlots), - addrStateChangesInSlot: make(map[common.Address]struct{}, 10), - nonceChangesInSlot: make(map[common.Address]struct{}, 10), + stateObjectsSuicidedInSlot: addressStructPool.Get().(map[common.Address]struct{}), + codeReadsInSlot: addressStructPool.Get().(map[common.Address]struct{}), + codeChangesInSlot: addressStructPool.Get().(map[common.Address]struct{}), + stateChangesInSlot: stateKeysPool.Get().(map[common.Address]StateKeys), + stateReadsInSlot: stateKeysPool.Get().(map[common.Address]StateKeys), + balanceChangesInSlot: addressStructPool.Get().(map[common.Address]struct{}), + balanceReadsInSlot: addressStructPool.Get().(map[common.Address]struct{}), + addrStateReadsInSlot: addressStructPool.Get().(map[common.Address]struct{}), + addrStateChangesInSlot: addressStructPool.Get().(map[common.Address]struct{}), + nonceChangesInSlot: addressStructPool.Get().(map[common.Address]struct{}), isSlotDB: true, - dirtiedStateObjectsInSlot: make(map[common.Address]*StateObject, defaultNumOfSlots), + dirtiedStateObjectsInSlot: stateObjectsPool.Get().(map[common.Address]*StateObject), } state := &StateDB{ db: s.db, trie: s.db.CopyTrie(s.trie), stateObjects: make(map[common.Address]*StateObject), // replaced by parallel.stateObjects in parallel mode - stateObjectsPending: make(map[common.Address]struct{}, defaultNumOfSlots), - stateObjectsDirty: make(map[common.Address]struct{}, defaultNumOfSlots), + stateObjectsPending: addressStructPool.Get().(map[common.Address]struct{}), + stateObjectsDirty: addressStructPool.Get().(map[common.Address]struct{}), refund: s.refund, // should be 0 - logs: make(map[common.Hash][]*types.Log, defaultNumOfSlots), + logs: logsPool.Get().(map[common.Hash][]*types.Log), logSize: 0, preimages: make(map[common.Hash][]byte, len(s.preimages)), - journal: newJournal(), + journal: journalPool.Get().(*journal), hasher: crypto.NewKeccakState(), - snapDestructs: make(map[common.Address]struct{}), - snapAccounts: make(map[common.Address][]byte), - snapStorage: make(map[common.Address]map[string][]byte), isParallel: true, parallel: parallel, } @@ -1419,18 +1558,18 @@ func (s *StateDB) CopyForSlot() *StateDB { state.snaps = s.snaps state.snap = s.snap // deep copy needed - state.snapDestructs = make(map[common.Address]struct{}) + state.snapDestructs = addressStructPool.Get().(map[common.Address]struct{}) for k, v := range s.snapDestructs { state.snapDestructs[k] = v } // - state.snapAccounts = make(map[common.Address][]byte) + state.snapAccounts = snapAccountPool.Get().(map[common.Address][]byte) for k, v := range s.snapAccounts { state.snapAccounts[k] = v } - state.snapStorage = make(map[common.Address]map[string][]byte) + state.snapStorage = snapStoragePool.Get().(map[common.Address]map[string][]byte) for k, v := range s.snapStorage { - temp := make(map[string][]byte) + temp := snapStorageValuePool.Get().(map[string][]byte) for kk, vv := range v { temp[kk] = vv } diff --git a/core/state_processor.go b/core/state_processor.go index b3aebcab89..4af8b39262 100644 --- a/core/state_processor.go +++ b/core/state_processor.go @@ -75,6 +75,7 @@ type ParallelStateProcessor struct { txResultChan chan *ParallelTxResult // to notify dispatcher that a tx is done slotState []*SlotState // idle, or pending messages mergedTxIndex int // the latest finalized tx index + slotDBsToRelease []*state.StateDB debugErrorRedoNum int debugConflictRedoNum int } @@ -591,6 +592,7 @@ func (p *ParallelStateProcessor) dispatchToIdleSlot(statedb *state.StateDB, txRe if len(slot.mergedChangeList) == 0 { // first transaction of a slot, there is no usable SlotDB, have to create one for it. txReq.slotDB = state.NewSlotDB(statedb, consensus.SystemAddress, p.mergedTxIndex, false) + p.slotDBsToRelease = append(p.slotDBsToRelease, txReq.slotDB) } log.Debug("dispatchToIdleSlot", "Slot", i, "txIndex", txReq.txIndex) slot.tailTxReq = txReq @@ -616,6 +618,7 @@ func (p *ParallelStateProcessor) waitUntilNextTxDone(statedb *state.StateDB, gp // the target slot is waiting for new slotDB slotState := p.slotState[result.slotIndex] slotDB := state.NewSlotDB(statedb, consensus.SystemAddress, p.mergedTxIndex, result.keepSystem) + p.slotDBsToRelease = append(p.slotDBsToRelease, slotDB) slotState.slotdbChan <- slotDB continue } @@ -718,6 +721,9 @@ func (p *ParallelStateProcessor) execInSlot(slotIndex int, txReq *ParallelTxRequ if slotDB.SystemAddressRedo() { hasConflict = true systemAddrConflict = true + } else if slotDB.NeedsRedo() { + // if this is any reason that indicates this transaction needs to redo, skip the conflict check + hasConflict = true } else { for index := 0; index < p.parallelNum; index++ { if index == slotIndex { @@ -831,6 +837,15 @@ func (p *ParallelStateProcessor) resetState(txNum int, statedb *state.StateDB) { statedb.PrepareForParallel() + stateDBsToRelease := p.slotDBsToRelease + p.slotDBsToRelease = make([]*state.StateDB, 0, txNum) + + go func() { + for _, slotDB := range stateDBsToRelease { + slotDB.SlotDBPutSyncPool() + } + }() + for _, slot := range p.slotState { slot.tailTxReq = nil slot.mergedChangeList = make([]state.SlotChangeList, 0) From 24c53f571f7c219db74ae00d4b83805910645472 Mon Sep 17 00:00:00 2001 From: setunapo Date: Wed, 23 Mar 2022 16:38:45 +0800 Subject: [PATCH 03/18] WIP: Parallel 2.0 streaming pipeline ** dispatch hunry slot ** remove idle dispatch ** static Dispatch --- core/state/statedb.go | 10 + core/state_processor.go | 446 ++++++++++++++++++++++++-------------- core/types/transaction.go | 8 + core/vm/evm.go | 2 +- core/vm/interface.go | 2 + 5 files changed, 308 insertions(+), 160 deletions(-) diff --git a/core/state/statedb.go b/core/state/statedb.go index 4c67b6e531..a38dca5aef 100644 --- a/core/state/statedb.go +++ b/core/state/statedb.go @@ -152,6 +152,7 @@ type ParallelState struct { // needs real system address's balance, the transaction will be marked redo with keepSystemAddressBalance = true systemAddress common.Address systemAddressOpsCount int + nonceIncreased uint64 // create contract keepSystemAddressBalance bool // we may need to redo for some specific reasons, like we read the wrong state and need to panic in sequential mode in SubRefund @@ -689,6 +690,10 @@ func (s *StateDB) BlockHash() common.Hash { return s.bhash } +func (s *StateDB) IsSlotDB() bool { + return s.parallel.isSlotDB +} + // BaseTxIndex returns the tx index that slot db based. func (s *StateDB) BaseTxIndex() int { return s.parallel.baseTxIndex @@ -724,6 +729,10 @@ func (s *StateDB) NeedsRedo() bool { return s.parallel.needsRedo } +func (s *StateDB) NonceIncreased() uint64 { + return s.parallel.nonceIncreased +} + func (s *StateDB) GetCode(addr common.Address) []byte { if s.parallel.isSlotDB { s.parallel.codeReadsInSlot[addr] = struct{}{} @@ -945,6 +954,7 @@ func (s *StateDB) NonceChanged(addr common.Address) { if s.parallel.isSlotDB { log.Debug("NonceChanged", "txIndex", s.txIndex, "addr", addr) s.parallel.nonceChangesInSlot[addr] = struct{}{} + s.parallel.nonceIncreased++ } } diff --git a/core/state_processor.go b/core/state_processor.go index 4af8b39262..f98adea519 100644 --- a/core/state_processor.go +++ b/core/state_processor.go @@ -47,6 +47,7 @@ const ( recentTime = 1024 * 3 recentDiffLayerTimeout = 5 farDiffLayerTimeout = 2 + maxUnitSize = 10 ) // StateProcessor is a basic Processor, which takes care of transitioning @@ -70,11 +71,12 @@ func NewStateProcessor(config *params.ChainConfig, bc *BlockChain, engine consen // add for parallel executions type ParallelStateProcessor struct { StateProcessor - parallelNum int // leave a CPU to dispatcher - queueSize int // parallel slot's maximum number of pending Txs - txResultChan chan *ParallelTxResult // to notify dispatcher that a tx is done - slotState []*SlotState // idle, or pending messages - mergedTxIndex int // the latest finalized tx index + parallelNum int // leave a CPU to dispatcher + queueSize int // parallel slot's maximum number of pending Txs + txResultChan chan *ParallelTxResult // to notify dispatcher that a tx is done + // txReqAccountSorted map[common.Address][]*ParallelTxRequest // fixme: *ParallelTxRequest => ParallelTxRequest? + slotState []*SlotState // idle, or pending messages + mergedTxIndex int // the latest finalized tx index slotDBsToRelease []*state.StateDB debugErrorRedoNum int debugConflictRedoNum int @@ -394,11 +396,12 @@ func (p *LightStateProcessor) LightProcess(diffLayer *types.DiffLayer, block *ty } type SlotState struct { - tailTxReq *ParallelTxRequest // tail pending Tx of the slot, should be accessed on dispatcher only. - pendingTxReqChan chan *ParallelTxRequest - pendingTxReqList []*ParallelTxRequest // maintained by dispatcher for dispatch policy - mergedChangeList []state.SlotChangeList - slotdbChan chan *state.StateDB // dispatch will create and send this slotDB to slot + pendingTxReqChan chan struct{} + pendingConfirmChan chan *ParallelTxResult + pendingTxReqList []*ParallelTxRequest // maintained by dispatcher for dispatch policy + mergedChangeList []state.SlotChangeList + slotdbChan chan *state.StateDB // dispatch will create and send this slotDB to slot + // txReqUnits []*ParallelDispatchUnit // only dispatch can access } type ParallelTxResult struct { @@ -409,6 +412,9 @@ type ParallelTxResult struct { txReq *ParallelTxRequest receipt *types.Receipt slotDB *state.StateDB // if updated, it is not equal to txReq.slotDB + gpSlot *GasPool + evm *vm.EVM + result *ExecutionResult } type ParallelTxRequest struct { @@ -435,9 +441,15 @@ func (p *ParallelStateProcessor) init() { for i := 0; i < p.parallelNum; i++ { p.slotState[i] = &SlotState{ - slotdbChan: make(chan *state.StateDB, 1), - pendingTxReqChan: make(chan *ParallelTxRequest, p.queueSize), + slotdbChan: make(chan *state.StateDB, 1), + pendingTxReqChan: make(chan struct{}, 1), + pendingConfirmChan: make(chan *ParallelTxResult, p.queueSize), } + // start the shadow slot first + go func(slotIndex int) { + p.runShadowSlotLoop(slotIndex) // this loop will be permanent live + }(i) + // start the slot's goroutine go func(slotIndex int) { p.runSlotLoop(slotIndex) // this loop will be permanent live @@ -525,6 +537,7 @@ func (p *ParallelStateProcessor) hasStateConflict(readDb *state.StateDB, changeL // for parallel execute, we put contracts of same address in a slot, // since these txs probably would have conflicts +/* func (p *ParallelStateProcessor) queueSameToAddress(txReq *ParallelTxRequest) bool { txToAddr := txReq.tx.To() // To() == nil means contract creation, no same To address @@ -532,9 +545,6 @@ func (p *ParallelStateProcessor) queueSameToAddress(txReq *ParallelTxRequest) bo return false } for i, slot := range p.slotState { - if slot.tailTxReq == nil { // this slot is idle - continue - } for _, pending := range slot.pendingTxReqList { // To() == nil means contract creation, skip it. if pending.tx.To() == nil { @@ -544,7 +554,6 @@ func (p *ParallelStateProcessor) queueSameToAddress(txReq *ParallelTxRequest) bo if *txToAddr == *pending.tx.To() { select { case slot.pendingTxReqChan <- txReq: - slot.tailTxReq = txReq slot.pendingTxReqList = append(slot.pendingTxReqList, txReq) log.Debug("queue same To address", "Slot", i, "txIndex", txReq.txIndex) return true @@ -557,21 +566,18 @@ func (p *ParallelStateProcessor) queueSameToAddress(txReq *ParallelTxRequest) bo } return false } - +*/ // for parallel execute, we put contracts of same address in a slot, // since these txs probably would have conflicts +/* func (p *ParallelStateProcessor) queueSameFromAddress(txReq *ParallelTxRequest) bool { txFromAddr := txReq.msg.From() for i, slot := range p.slotState { - if slot.tailTxReq == nil { // this slot is idle - continue - } for _, pending := range slot.pendingTxReqList { // same from address, put it on slot's pending list. if txFromAddr == pending.msg.From() { select { case slot.pendingTxReqChan <- txReq: - slot.tailTxReq = txReq slot.pendingTxReqList = append(slot.pendingTxReqList, txReq) log.Debug("queue same From address", "Slot", i, "txIndex", txReq.txIndex) return true @@ -584,26 +590,159 @@ func (p *ParallelStateProcessor) queueSameFromAddress(txReq *ParallelTxRequest) } return false } +*/ +/* +func (p *ParallelStateProcessor) dispatchToHungrySlot(statedb *state.StateDB, txReq *ParallelTxRequest) bool { + var workload int = len(p.slotState[0].pendingTxReqList) + var slotIndex int = 0 + for i, slot := range p.slotState { // can start from index 1 + if len(slot.pendingTxReqList) < workload { + slotIndex = i + workload = len(slot.pendingTxReqList) + } + } + if workload >= p.queueSize { + log.Debug("dispatch no Hungry Slot, all slots are full of task", "queueSize", p.queueSize) + return false + } -// if there is idle slot, dispatch the msg to the first idle slot -func (p *ParallelStateProcessor) dispatchToIdleSlot(statedb *state.StateDB, txReq *ParallelTxRequest) bool { - for i, slot := range p.slotState { - if slot.tailTxReq == nil { - if len(slot.mergedChangeList) == 0 { - // first transaction of a slot, there is no usable SlotDB, have to create one for it. - txReq.slotDB = state.NewSlotDB(statedb, consensus.SystemAddress, p.mergedTxIndex, false) - p.slotDBsToRelease = append(p.slotDBsToRelease, txReq.slotDB) + if workload == 0 && txReq.slotDB == nil { + // Create a SlotDB for idle slot to save an IPC channel cost for updateSlotDB + txReq.slotDB = state.NewSlotDB(statedb, consensus.SystemAddress, p.mergedTxIndex, false) + } + + log.Debug("dispatch To Hungry Slot", "slot", slotIndex, "workload", workload, "txIndex", txReq.txIndex) + slot := p.slotState[slotIndex] + select { + case slot.pendingTxReqChan <- txReq: + slot.pendingTxReqList = append(slot.pendingTxReqList, txReq) + return true + default: + log.Error("dispatch To Hungry Slot, but chan <- txReq failed??", "Slot", slotIndex, "txIndex", txReq.txIndex) + break + } + + return false +} +*/ + +// 1.Sliding Window: + +// txReqAccountSorted +// Unit: a slice of *TxReq, with len <= maxParallelUnitSize +// Units should be ordered by TxIndex +// TxReq's TxIndex of a Unit should be within a certain range: ParallelNum * maxParallelUnitSize? + +// Dispatch an Unit once for each slot? +// Unit make policy: +// 1.From +// 2.To... +/* +type ParallelDispatchUnit struct { + startTxIndex int + endTxIndex int + txsSize int + txReqs []*ParallelTxRequest +} +*/ + +// Try best to make the unit full, it is full when: +// ** maxUnitSize reached +// ** tx index range reached +// Avoid to make it full immediately, swicth to next unit when: +// ** full +// ** not full, but the Tx of the same address has exhausted + +// New Unit will be created by batch +// ** first + +// Benefit of StaticDispatch: +// ** try best to make Txs with same From() in same slot +// ** reduce IPC cost by dispatch in Unit + +// 2022.03.25: too complicated, apply simple method first... +// ** make sure same From in same slot +// ** try to make it balanced, queue to the most hungry slot for new Address +func (p *ParallelStateProcessor) doStaticDispatch(mainStatedb *state.StateDB, txReqs []*ParallelTxRequest) { + + fromSlotMap := make(map[common.Address]int, 100) + toSlotMap := make(map[common.Address]int, 100) + for _, txReq := range txReqs { + var slotIndex int = -1 + if i, ok := fromSlotMap[txReq.msg.From()]; ok { + // first: same From are all in same slot + slotIndex = i + } else if txReq.msg.To() != nil { + // To Address, with txIndex sorted, could be in different slot. + // fixme: Create will move to hungry slot + if i, ok := toSlotMap[*txReq.msg.To()]; ok { + slotIndex = i } - log.Debug("dispatchToIdleSlot", "Slot", i, "txIndex", txReq.txIndex) - slot.tailTxReq = txReq - slot.pendingTxReqList = append(slot.pendingTxReqList, txReq) - slot.pendingTxReqChan <- txReq - return true } + + // not found, dispatch to most hungry slot + if slotIndex == -1 { + var workload int = len(p.slotState[0].pendingTxReqList) + slotIndex = 0 + for i, slot := range p.slotState { // can start from index 1 + if len(slot.pendingTxReqList) < workload { + slotIndex = i + workload = len(slot.pendingTxReqList) + } + } + } + // update + fromSlotMap[txReq.msg.From()] = slotIndex + if txReq.msg.To() != nil { + toSlotMap[*txReq.msg.To()] = slotIndex + } + + slot := p.slotState[slotIndex] + slot.pendingTxReqList = append(slot.pendingTxReqList, txReq) } - return false } +// get the most hungry slot + +/* + // + unitsInBatch := make([]*ParallelDispatchUnit, p.parallelNum ) + + slotIndex :=0 + for _, txReqs := range p.txReqAccountSorted { + currentUnit := unitsInBatch[slotIndex] + slotIndex := (slotIndex+1) % p.parallelNum + if currentUnit.txsSize >= maxUnitSize { + // current slot's unit is full, try next slot's unit + continue + } + var unit *ParallelDispatchUnit + for _, txReq := range txReqs { + numUnit := len(p.slotState[slotIndex].txReqUnits) + // create a unit for the first one + if numUnit == 0 { + unit = &ParallelDispatchUnit{ + startTxIndex: txReq.txIndex, + endTxIndex: txReq.txIndex + txIndexSize, + txsSize: 0, + } + unit.txReqs = append(unit.txReqs, txReq) + continue + } + // + unit = p.slotState[slotIndex].txReqUnits[numUnit-1] + // unit is already full + if unit.txsSize >= maxParallelUnitSize { + + } + } + } + // first: move From() to unit + + allUnit = append(allUnit) + } +*/ + // wait until the next Tx is executed and its result is merged to the main stateDB func (p *ParallelStateProcessor) waitUntilNextTxDone(statedb *state.StateDB, gp *GasPool) *ParallelTxResult { var result *ParallelTxResult @@ -625,7 +764,6 @@ func (p *ParallelStateProcessor) waitUntilNextTxDone(statedb *state.StateDB, gp // ok, the tx result is valid and can be merged break } - if err := gp.SubGas(result.receipt.GasUsed); err != nil { log.Error("gas limit reached", "block", result.txReq.block.Number(), "txIndex", result.txReq.txIndex, "GasUsed", result.receipt.GasUsed, "gp.Gas", gp.Gas()) @@ -635,10 +773,6 @@ func (p *ParallelStateProcessor) waitUntilNextTxDone(statedb *state.StateDB, gp resultTxIndex := result.txReq.txIndex resultSlotState := p.slotState[resultSlotIndex] resultSlotState.pendingTxReqList = resultSlotState.pendingTxReqList[1:] - if resultSlotState.tailTxReq.txIndex == resultTxIndex { - log.Debug("ProcessParallel slot is idle", "Slot", resultSlotIndex) - resultSlotState.tailTxReq = nil - } // Slot's mergedChangeList is produced by dispatcher, while consumed by slot. // It is safe, since write and read is in sequential, do write -> notify -> read @@ -657,31 +791,36 @@ func (p *ParallelStateProcessor) waitUntilNextTxDone(statedb *state.StateDB, gp return result } -func (p *ParallelStateProcessor) execInSlot(slotIndex int, txReq *ParallelTxRequest) *ParallelTxResult { - txIndex := txReq.txIndex - tx := txReq.tx +func (p *ParallelStateProcessor) executeInSlot(slotIndex int, txReq *ParallelTxRequest) *ParallelTxResult { slotDB := txReq.slotDB - slotGasLimit := txReq.gasLimit // not accurate, but it is ok for block import. - msg := txReq.msg - block := txReq.block - header := block.Header() - cfg := txReq.vmConfig - bloomProcessor := txReq.bloomProcessor - - blockContext := NewEVMBlockContext(header, p.bc, nil) // can share blockContext within a block for efficiency - vmenv := vm.NewEVM(blockContext, vm.TxContext{}, slotDB, p.config, cfg) + slotDB.Prepare(txReq.tx.Hash(), txReq.block.Hash(), txReq.txIndex) + blockContext := NewEVMBlockContext(txReq.block.Header(), p.bc, nil) // can share blockContext within a block for efficiency + vmenv := vm.NewEVM(blockContext, vm.TxContext{}, slotDB, p.config, txReq.vmConfig) + // gasLimit not accurate, but it is ok for block import. + // each slot would use its own gas pool, and will do gaslimit check later + gpSlot := new(GasPool).AddGas(txReq.gasLimit) - var receipt *types.Receipt - var result *ExecutionResult - var err error - var evm *vm.EVM + evm, result, err := applyTransactionStageExecution(txReq.msg, gpSlot, slotDB, vmenv) + log.Debug("In Slot, Stage Execution done", "Slot", slotIndex, "txIndex", txReq.txIndex, "slotDB.baseTxIndex", slotDB.BaseTxIndex()) - slotDB.Prepare(tx.Hash(), block.Hash(), txIndex) - log.Debug("exec In Slot", "Slot", slotIndex, "txIndex", txIndex, "slotDB.baseTxIndex", slotDB.BaseTxIndex()) + return &ParallelTxResult{ + updateSlotDB: false, + slotIndex: slotIndex, + txReq: txReq, + receipt: nil, // receipt is generated in finalize stage + slotDB: slotDB, + err: err, + gpSlot: gpSlot, + evm: evm, + result: result, + } +} - gpSlot := new(GasPool).AddGas(slotGasLimit) // each slot would use its own gas pool, and will do gaslimit check later - evm, result, err = applyTransactionStageExecution(msg, gpSlot, slotDB, vmenv) - log.Debug("Stage Execution done", "Slot", slotIndex, "txIndex", txIndex, "slotDB.baseTxIndex", slotDB.BaseTxIndex()) +func (p *ParallelStateProcessor) executeInShadowSlot(slotIndex int, txResult *ParallelTxResult) *ParallelTxResult { + txReq := txResult.txReq + txIndex := txReq.txIndex + slotDB := txReq.slotDB + header := txReq.block.Header() // wait until the previous tx is finalized. if txReq.waitTxChan != nil { @@ -689,35 +828,14 @@ func (p *ParallelStateProcessor) execInSlot(slotIndex int, txReq *ParallelTxRequ <-txReq.waitTxChan // close the channel } - // in parallel mode, tx can run into trouble, for example: err="nonce too high" - // in these cases, we will wait and re-run. - if err != nil { - p.debugErrorRedoNum++ - log.Debug("Stage Execution err", "Slot", slotIndex, "txIndex", txIndex, - "current slotDB.baseTxIndex", slotDB.BaseTxIndex(), "err", err) - redoResult := &ParallelTxResult{ - updateSlotDB: true, - slotIndex: slotIndex, - txReq: txReq, - receipt: receipt, - err: err, - } - p.txResultChan <- redoResult - slotDB = <-p.slotState[slotIndex].slotdbChan - slotDB.Prepare(tx.Hash(), block.Hash(), txIndex) - log.Debug("Stage Execution get new slotdb to redo", "Slot", slotIndex, - "txIndex", txIndex, "new slotDB.baseTxIndex", slotDB.BaseTxIndex()) - gpSlot = new(GasPool).AddGas(slotGasLimit) - evm, result, err = applyTransactionStageExecution(msg, gpSlot, slotDB, vmenv) - if err != nil { - log.Error("Stage Execution redo, error", err) - } + if txResult.err != nil { + log.Error("executeInShadowSlot should have no error", "err", txResult.err) } // do conflict detect hasConflict := false systemAddrConflict := false - log.Debug("Stage Execution done, do conflict check", "Slot", slotIndex, "txIndex", txIndex) + log.Debug("Shadow Stage Execution done, do conflict check", "Slot", slotIndex, "txIndex", txIndex) if slotDB.SystemAddressRedo() { hasConflict = true systemAddrConflict = true @@ -726,12 +844,11 @@ func (p *ParallelStateProcessor) execInSlot(slotIndex int, txReq *ParallelTxRequ hasConflict = true } else { for index := 0; index < p.parallelNum; index++ { - if index == slotIndex { - continue - } - + // log.Debug("Shadow conflict check", "Slot", slotIndex, "txIndex", txIndex) // check all finalizedDb from current slot's for _, changeList := range p.slotState[index].mergedChangeList { + // log.Debug("Shadow conflict check", "changeList.TxIndex", changeList.TxIndex, + // "slotDB.BaseTxIndex()", slotDB.BaseTxIndex()) if changeList.TxIndex <= slotDB.BaseTxIndex() { continue } @@ -756,73 +873,93 @@ func (p *ParallelStateProcessor) execInSlot(slotIndex int, txReq *ParallelTxRequ updateSlotDB: true, keepSystem: systemAddrConflict, slotIndex: slotIndex, - txReq: txReq, - receipt: receipt, - err: err, } p.txResultChan <- redoResult - slotDB = <-p.slotState[slotIndex].slotdbChan - slotDB.Prepare(tx.Hash(), block.Hash(), txIndex) - gpSlot = new(GasPool).AddGas(slotGasLimit) - evm, result, err = applyTransactionStageExecution(msg, gpSlot, slotDB, vmenv) - if err != nil { - log.Error("Stage Execution conflict redo, error", err) + updatedSlotDB := <-p.slotState[slotIndex].slotdbChan + updatedSlotDB.Prepare(txReq.tx.Hash(), txReq.block.Hash(), txIndex) + gpSlot := new(GasPool).AddGas(txReq.gasLimit) + + txResult.slotDB = updatedSlotDB + txResult.gpSlot = gpSlot + + blockContext := NewEVMBlockContext(header, p.bc, nil) // can share blockContext within a block for efficiency + vmenv := vm.NewEVM(blockContext, vm.TxContext{}, updatedSlotDB, p.config, txReq.vmConfig) + txResult.evm, txResult.result, txResult.err = applyTransactionStageExecution(txReq.msg, + gpSlot, updatedSlotDB, vmenv) + + if txResult.err != nil { + log.Error("Stage Execution conflict redo, error", txResult.err) } } // goroutine unsafe operation will be handled from here for safety - gasConsumed := slotGasLimit - gpSlot.Gas() - if gasConsumed != result.UsedGas { + gasConsumed := txReq.gasLimit - txResult.gpSlot.Gas() + if gasConsumed != txResult.result.UsedGas { log.Error("gasConsumed != result.UsedGas mismatch", - "gasConsumed", gasConsumed, "result.UsedGas", result.UsedGas) + "gasConsumed", gasConsumed, "result.UsedGas", txResult.result.UsedGas) } - log.Debug("ok to finalize this TX", - "Slot", slotIndex, "txIndex", txIndex, "result.UsedGas", result.UsedGas, "txReq.usedGas", *txReq.usedGas) + log.Debug("ok to finalize this TX", "Slot", slotIndex, "txIndex", txIndex, + "result.UsedGas", txResult.result.UsedGas, "txReq.usedGas", *txReq.usedGas) + // ok, time to do finalize, stage2 should not be parallel - receipt, err = applyTransactionStageFinalization(evm, result, msg, p.config, slotDB, header, tx, txReq.usedGas, bloomProcessor) + txResult.receipt, txResult.err = applyTransactionStageFinalization(txResult.evm, txResult.result, + txReq.msg, p.config, txResult.slotDB, header, + txReq.tx, txReq.usedGas, txReq.bloomProcessor) - if result.Failed() { + if txResult.result.Failed() { // if Tx is reverted, all its state change will be discarded - log.Debug("TX reverted?", "Slot", slotIndex, "txIndex", txIndex, "result.Err", result.Err) - slotDB.RevertSlotDB(msg.From()) + log.Debug("TX reverted?", "Slot", slotIndex, "txIndex", txIndex, + "result.Err", txResult.result.Err) + txResult.slotDB.RevertSlotDB(txReq.msg.From()) } - return &ParallelTxResult{ - updateSlotDB: false, - slotIndex: slotIndex, - txReq: txReq, - receipt: receipt, - slotDB: slotDB, - err: err, - } + txResult.updateSlotDB = false + return txResult } func (p *ParallelStateProcessor) runSlotLoop(slotIndex int) { curSlot := p.slotState[slotIndex] for { // wait for new TxReq - txReq := <-curSlot.pendingTxReqChan + <-curSlot.pendingTxReqChan + // receive a dispatched message - log.Debug("SlotLoop received a new TxReq", "Slot", slotIndex, "txIndex", txReq.txIndex) // SlotDB create rational: // ** for a dispatched tx, // the slot should be idle, it is better to create a new SlotDB, since new Tx is not related to previous Tx // ** for a queued tx, // it is better to create a new SlotDB, since COW is used. - if txReq.slotDB == nil { - result := &ParallelTxResult{ - updateSlotDB: true, - slotIndex: slotIndex, - err: nil, + for _, txReq := range curSlot.pendingTxReqList { + log.Debug("SlotLoop received a new TxReq", "Slot", slotIndex, "txIndex", txReq.txIndex) + if txReq.slotDB == nil { + result := &ParallelTxResult{ + updateSlotDB: true, + slotIndex: slotIndex, + err: nil, + } + p.txResultChan <- result + txReq.slotDB = <-curSlot.slotdbChan } - p.txResultChan <- result - txReq.slotDB = <-curSlot.slotdbChan + result := p.executeInSlot(slotIndex, txReq) + curSlot.pendingConfirmChan <- result } - result := p.execInSlot(slotIndex, txReq) - log.Debug("SlotLoop the TxReq is done", "Slot", slotIndex, "err", result.err) - p.txResultChan <- result + } +} + +func (p *ParallelStateProcessor) runShadowSlotLoop(slotIndex int) { + curSlot := p.slotState[slotIndex] + for { + log.Debug("runShadowSlotLoop wait", "slotIndex", slotIndex) + // ParallelTxResult from pendingConfirmChan is not confirmed yet + unconfirmedResult := <-curSlot.pendingConfirmChan + + log.Debug("runShadowSlotLoop to confirm the TxResult from master slot", "Slot", slotIndex, "txIndex", unconfirmedResult.txReq.txIndex) + confirmedResult := p.executeInShadowSlot(slotIndex, unconfirmedResult) + + log.Debug("runShadowSlotLoop the TxReq is done", "Slot", slotIndex, "err", confirmedResult.err) + p.txResultChan <- confirmedResult } } @@ -834,6 +971,7 @@ func (p *ParallelStateProcessor) resetState(txNum int, statedb *state.StateDB) { p.mergedTxIndex = -1 p.debugErrorRedoNum = 0 p.debugConflictRedoNum = 0 + // p.txReqAccountSorted = make(map[common.Address][]*ParallelTxRequest) // fixme: to be reused? statedb.PrepareForParallel() @@ -847,7 +985,6 @@ func (p *ParallelStateProcessor) resetState(txNum int, statedb *state.StateDB) { }() for _, slot := range p.slotState { - slot.tailTxReq = nil slot.mergedChangeList = make([]state.SlotChangeList, 0) slot.pendingTxReqList = make([]*ParallelTxRequest, 0) } @@ -872,6 +1009,7 @@ func (p *ParallelStateProcessor) Process(block *types.Block, statedb *state.Stat signer, _, bloomProcessor := p.preExecute(block, statedb, cfg, true) var waitTxChan, curTxChan chan struct{} + var txReqs []*ParallelTxRequest for i, tx := range block.Transactions() { if isPoSA { if isSystemTx, err := posa.IsSystemTransaction(tx, block.Header()); err != nil { @@ -883,7 +1021,10 @@ func (p *ParallelStateProcessor) Process(block *types.Block, statedb *state.Stat } // can be moved it into slot for efficiency, but signer is not concurrent safe - msg, err := tx.AsMessage(signer) + // Parallel Execution 1.0&2.0 is for full sync mode, Nonce PreCheck is not necessary + // And since we will do out-of-order execution, the Nonce PreCheck could fail. + // We will disable it and leave it to Parallel 3.0 which is for validator mode + msg, err := tx.AsParallelMessage(signer) if err != nil { return statedb, nil, nil, 0, err } @@ -905,45 +1046,32 @@ func (p *ParallelStateProcessor) Process(block *types.Block, statedb *state.Stat waitTxChan: waitTxChan, curTxChan: curTxChan, } - - // to optimize the for { for {} } loop code style? it is ok right now. - for { - if p.queueSameFromAddress(txReq) { - break - } - - if p.queueSameToAddress(txReq) { - break - } - // if idle slot available, just dispatch and process next tx. - if p.dispatchToIdleSlot(statedb, txReq) { - break - } - log.Debug("ProcessParallel no slot available, wait", "txIndex", txReq.txIndex) - // no idle slot, wait until a tx is executed and merged. - result := p.waitUntilNextTxDone(statedb, gp) - - // update tx result - if result.err != nil { - log.Warn("ProcessParallel a failed tx", "resultSlotIndex", result.slotIndex, - "resultTxIndex", result.txReq.txIndex, "result.err", result.err) - return statedb, nil, nil, 0, fmt.Errorf("could not apply tx %d [%v]: %w", result.txReq.txIndex, result.txReq.tx.Hash().Hex(), result.err) - } - - commonTxs = append(commonTxs, result.txReq.tx) - receipts = append(receipts, result.receipt) - } + txReqs = append(txReqs, txReq) + // from := txReq.msg.From() + // p.txReqAccountSorted[from] = append(p.txReqAccountSorted[from], txReq) + // Generate TxReqUnit every 80() transaction? + // if (i + 1) % *(p.parallelNum *10) == 0 { + // p.txReqAccountSorted = make(map[common.Address][]*ParallelTxRequest) // fixme: memory reuse? + // } } + p.doStaticDispatch(statedb, txReqs) + for _, slot := range p.slotState { + slot.pendingTxReqChan <- struct{}{} + } + for { + if len(commonTxs)+len(systemTxs) == txNum { + break + } - // wait until all tx request are done - for len(commonTxs)+len(systemTxs) < txNum { result := p.waitUntilNextTxDone(statedb, gp) + // update tx result if result.err != nil { log.Warn("ProcessParallel a failed tx", "resultSlotIndex", result.slotIndex, "resultTxIndex", result.txReq.txIndex, "result.err", result.err) return statedb, nil, nil, 0, fmt.Errorf("could not apply tx %d [%v]: %w", result.txReq.txIndex, result.txReq.tx.Hash().Hex(), result.err) } + commonTxs = append(commonTxs, result.txReq.tx) receipts = append(receipts, result.receipt) } diff --git a/core/types/transaction.go b/core/types/transaction.go index 74c011544b..9a6bebc42e 100644 --- a/core/types/transaction.go +++ b/core/types/transaction.go @@ -535,6 +535,14 @@ func (tx *Transaction) AsMessage(s Signer) (Message, error) { return msg, err } +// Parallel 1.0&2.0 will skip nonce check, since it is not necessary for sync mode. +// Parallel 3.0 will reenable it, nonce check for parallel execution will be designed. +func (tx *Transaction) AsParallelMessage(s Signer) (Message, error) { + msg, err := tx.AsMessage(s) + msg.checkNonce = false + return msg, err +} + func (m Message) From() common.Address { return m.from } func (m Message) To() *common.Address { return m.to } func (m Message) GasPrice() *big.Int { return m.gasPrice } diff --git a/core/vm/evm.go b/core/vm/evm.go index c7c8e0596c..1970f97910 100644 --- a/core/vm/evm.go +++ b/core/vm/evm.go @@ -475,7 +475,7 @@ func (evm *EVM) create(caller ContractRef, codeAndHash *codeAndHash, gas uint64, } nonce := evm.StateDB.GetNonce(caller.Address()) evm.StateDB.SetNonce(caller.Address(), nonce+1) - evm.StateDB.NonceChanged(caller.Address()) + evm.StateDB.NonceChanged(caller.Address()) // fixme: nonce double -- // We add this to the access list _before_ taking a snapshot. Even if the creation fails, // the access-list change should not be rolled back if evm.chainRules.IsBerlin { diff --git a/core/vm/interface.go b/core/vm/interface.go index c3d99aaa76..f424ed9cb9 100644 --- a/core/vm/interface.go +++ b/core/vm/interface.go @@ -75,6 +75,8 @@ type StateDB interface { AddPreimage(common.Hash, []byte) ForEachStorage(common.Address, func(common.Hash, common.Hash) bool) error + + IsSlotDB() bool } // CallContext provides a basic interface for the EVM calling conventions. The EVM From f02c988a5cd23134a8fea2ede847093bd554f4e0 Mon Sep 17 00:00:00 2001 From: setunapo Date: Tue, 29 Mar 2022 22:36:23 +0800 Subject: [PATCH 04/18] WIP: implement unconfirmed db reference ** to reduce conflict rate ** light copy ** conflict detect --- core/blockchain.go | 19 +- core/state/journal.go | 8 +- core/state/state_object.go | 45 +- core/state/statedb.go | 1199 ++++++++++++++++++++++++++++-------- core/state_processor.go | 213 ++----- core/types/transaction.go | 4 +- core/vm/evm.go | 1 - core/vm/interface.go | 3 - 8 files changed, 1059 insertions(+), 433 deletions(-) diff --git a/core/blockchain.go b/core/blockchain.go index 9ed4317b13..417f3a0037 100644 --- a/core/blockchain.go +++ b/core/blockchain.go @@ -2113,14 +2113,17 @@ func (bc *BlockChain) insertChain(chain types.Blocks, verifySeals bool) (int, er var followupInterrupt uint32 // For diff sync, it may fallback to full sync, so we still do prefetch // parallel mode has a pipeline, similar to this prefetch, to save CPU we disable this prefetch for parallel - if !bc.parallelExecution { - if len(block.Transactions()) >= prefetchTxNumber { - throwaway := statedb.Copy() - go func(start time.Time, followup *types.Block, throwaway *state.StateDB, interrupt *uint32) { - bc.prefetcher.Prefetch(followup, throwaway, bc.vmConfig, &followupInterrupt) - }(time.Now(), block, throwaway, &followupInterrupt) - } - } + /* + // disable prefetch for parallel bugfix + if !bc.parallelExecution { + if len(block.Transactions()) >= prefetchTxNumber { + throwaway := statedb.Copy() + go func(start time.Time, followup *types.Block, throwaway *state.StateDB, interrupt *uint32) { + bc.prefetcher.Prefetch(followup, throwaway, bc.vmConfig, &followupInterrupt) + }(time.Now(), block, throwaway, &followupInterrupt) + } + } + */ //Process block using the parent state as reference point substart := time.Now() if bc.pipeCommit { diff --git a/core/state/journal.go b/core/state/journal.go index b3a2956f75..5afe8886bb 100644 --- a/core/state/journal.go +++ b/core/state/journal.go @@ -155,7 +155,13 @@ func (ch createObjectChange) dirtied() *common.Address { } func (ch resetObjectChange) revert(s *StateDB) { - s.SetStateObject(ch.prev) + if s.parallel.isSlotDB { + // ch.prev must be from dirtiedStateObjectsInSlot, put it back + s.parallel.dirtiedStateObjectsInSlot[ch.prev.address] = ch.prev + } else { + // ch.prev was got from main DB, put it back to main DB. + s.SetStateObject(ch.prev) + } if !ch.prevdestruct && s.snap != nil { delete(s.snapDestructs, ch.prev.address) } diff --git a/core/state/state_object.go b/core/state/state_object.go index ce8926609a..7adb5bdbe6 100644 --- a/core/state/state_object.go +++ b/core/state/state_object.go @@ -197,7 +197,7 @@ type Account struct { // newObject creates a state object. func newObject(db *StateDB, isParallel bool, address common.Address, data Account) *StateObject { if data.Balance == nil { - data.Balance = new(big.Int) + data.Balance = new(big.Int) // todo: why not common.Big0? } if data.CodeHash == nil { data.CodeHash = emptyCodeHash @@ -284,6 +284,7 @@ func (s *StateObject) GetState(db Database, key common.Hash) common.Hash { if dirty { return value } + // Otherwise return the entry's original value return s.GetCommittedState(db, key) } @@ -353,9 +354,12 @@ func (s *StateObject) GetCommittedState(db Database, key common.Hash) common.Has // 1) resurrect happened, and new slot values were set -- those should // have been handles via pendingStorage above. // 2) we don't have new values, and can deliver empty response back - if _, destructed := s.db.snapDestructs[s.address]; destructed { + s.db.snapParallelLock.RLock() + if _, destructed := s.db.snapDestructs[s.address]; destructed { // fixme: use sync.Map, instead of RWMutex? + s.db.snapParallelLock.RUnlock() return common.Hash{} } + s.db.snapParallelLock.RUnlock() enc, err = s.db.snap.Storage(s.addrHash, crypto.Keccak256Hash(key.Bytes())) } // If snapshot unavailable or reading from it failed, load from the database @@ -394,7 +398,14 @@ func (s *StateObject) SetState(db Database, key, value common.Hash) { return } // If the new value is the same as old, don't set - prev := s.GetState(db, key) + // In parallel mode, it has to get from StateDB, in case: + // a.the Slot did not set the key before and try to set it to `val_1` + // b.Unconfirmed DB has set the key to `val_2` + // c.if we use StateObject.GetState, and the key load from the main DB is `val_1` + // this `SetState could be skipped` + // d.Finally, the key's value will be `val_2`, while it should be `val_1` + // such as: https://bscscan.com/txs?block=2491181 + prev := s.db.GetState(s.address, key) if prev == value { return } @@ -404,6 +415,10 @@ func (s *StateObject) SetState(db Database, key, value common.Hash) { key: key, prevalue: prev, }) + if s.db.parallel.isSlotDB { + s.db.parallel.kvChangesInSlot[s.address][key] = struct{}{} // should be moved to here, after `s.db.GetState()` + } + s.setState(key, value) } @@ -484,7 +499,7 @@ func (s *StateObject) updateTrie(db Database) Trie { return true } - s.originStorage.StoreValue(k.(common.Hash), v.(common.Hash)) + s.setOriginStorage(key, value) var vs []byte if (value == common.Hash{}) { @@ -602,6 +617,19 @@ func (s *StateObject) setBalance(amount *big.Int) { // Return the gas back to the origin. Used by the Virtual machine or Closures func (s *StateObject) ReturnGas(gas *big.Int) {} +func (s *StateObject) lightCopy(db *StateDB) *StateObject { + stateObject := newObject(db, s.isParallel, s.address, s.data) + if s.trie != nil { + // fixme: no need to copy trie for light copy, since light copied object won't access trie DB + stateObject.trie = db.db.CopyTrie(s.trie) + } + stateObject.code = s.code + stateObject.suicided = false // should be false + stateObject.dirtyCode = s.dirtyCode // it is not used in slot, but keep it is ok + stateObject.deleted = false // should be false + return stateObject +} + func (s *StateObject) deepCopy(db *StateDB) *StateObject { stateObject := newObject(db, s.isParallel, s.address, s.data) if s.trie != nil { @@ -619,9 +647,12 @@ func (s *StateObject) deepCopy(db *StateDB) *StateObject { func (s *StateObject) MergeSlotObject(db Database, dirtyObjs *StateObject, keys StateKeys) { for key := range keys { - // better to do s.GetState(db, key) to load originStorage for this key? - // since originStorage was in dirtyObjs, but it works even originStorage miss the state object. - s.SetState(db, key, dirtyObjs.GetState(db, key)) + // In parallel mode, always GetState by StateDB, not by StateObject directly, + // since it the KV could exist in unconfirmed DB. + // But here, it should be ok, since the KV should be changed and valid in the SlotDB, + // we still do GetState by StateDB, it is not an issue. + val := dirtyObjs.db.GetState(s.address, key) + s.SetState(db, key, val) } } diff --git a/core/state/statedb.go b/core/state/statedb.go index a38dca5aef..f46e6b3556 100644 --- a/core/state/statedb.go +++ b/core/state/statedb.go @@ -18,6 +18,7 @@ package state import ( + "bytes" "errors" "fmt" "math/big" @@ -94,6 +95,13 @@ func (s *StateDB) loadStateObj(addr common.Address) (*StateObject, bool) { // storeStateObj is the entry for storing state object to stateObjects in StateDB or stateObjects in parallel func (s *StateDB) storeStateObj(addr common.Address, stateObject *StateObject) { if s.isParallel { + // When a state object is stored into s.parallel.stateObjects, + // it belongs to base StateDB, it is confirmed and valid. + if s.parallel.isSlotDB { + // the object could be create in SlotDB, if it got the object from DB and + // update it to the shared `s.parallel.stateObjects`` + stateObject.db = s.parallel.baseStateDB + } s.parallel.stateObjects.Store(addr, stateObject) } else { s.stateObjects[addr] = stateObject @@ -109,21 +117,10 @@ func (s *StateDB) deleteStateObj(addr common.Address) { } } -// For parallel mode only, keep the change list for later conflict detect -type SlotChangeList struct { - TxIndex int - StateObjectSuicided map[common.Address]struct{} - StateChangeSet map[common.Address]StateKeys - BalanceChangeSet map[common.Address]struct{} - CodeChangeSet map[common.Address]struct{} - AddrStateChangeSet map[common.Address]struct{} - NonceChangeSet map[common.Address]struct{} -} - // For parallel mode only type ParallelState struct { - isSlotDB bool // isSlotDB denotes StateDB is used in slot - + isSlotDB bool // isSlotDB denotes StateDB is used in slot + SlotIndex int // fixme: to be removed // stateObjects holds the state objects in the base slot db // the reason for using stateObjects instead of stateObjects on the outside is // we need a thread safe map to hold state objects since there are many slots will read @@ -131,28 +128,37 @@ type ParallelState struct { // And we will merge all the changes made by the concurrent slot into it. stateObjects *StateObjectSyncMap - baseTxIndex int // slotDB is created base on this tx index. + baseStateDB *StateDB // for parallel mode, there will be a base StateDB in dispatcher routine. + baseTxIndex int // slotDB is created base on this tx index. dirtiedStateObjectsInSlot map[common.Address]*StateObject - // for conflict check + unconfirmedDBInShot map[int]*StateDB // do unconfirmed reference in same slot. + + // we will record the read detail for conflict check and + // the changed addr or key for object merge, the changed detail can be acheived from the dirty object + nonceChangesInSlot map[common.Address]struct{} + nonceReadsInSlot map[common.Address]uint64 balanceChangesInSlot map[common.Address]struct{} // the address's balance has been changed - balanceReadsInSlot map[common.Address]struct{} // the address's balance has been read and used. - codeReadsInSlot map[common.Address]struct{} - codeChangesInSlot map[common.Address]struct{} - stateReadsInSlot map[common.Address]StateKeys - stateChangesInSlot map[common.Address]StateKeys // no need record value + balanceReadsInSlot map[common.Address]*big.Int // the address's balance has been read and used. + // codeSize can be derived based on code, but codeHash can not directly derived based on code + // - codeSize is 0 for address not exist or empty code + // - codeHash is `common.Hash{}` for address not exist, emptyCodeHash(`Keccak256Hash(nil)`) for empty code + // so we use codeReadsInSlot & codeHashReadsInSlot to keep code and codeHash, codeSize is derived from code + codeReadsInSlot map[common.Address][]byte // empty if address not exist or no code in this address + codeHashReadsInSlot map[common.Address]common.Hash + codeChangesInSlot map[common.Address]struct{} + kvReadsInSlot map[common.Address]Storage + kvChangesInSlot map[common.Address]StateKeys // value will be kept in dirtiedStateObjectsInSlot // Actions such as SetCode, Suicide will change address's state. // Later call like Exist(), Empty(), HasSuicided() depend on the address's state. - addrStateReadsInSlot map[common.Address]struct{} - addrStateChangesInSlot map[common.Address]struct{} - stateObjectsSuicidedInSlot map[common.Address]struct{} - nonceChangesInSlot map[common.Address]struct{} + addrStateReadsInSlot map[common.Address]bool // true: exist, false: not exist or deleted + addrStateChangesInSlot map[common.Address]bool // true: created, false: deleted + // Transaction will pay gas fee to system address. // Parallel execution will clear system address's balance at first, in order to maintain transaction's // gas fee value. Normal transaction will access system address twice, otherwise it means the transaction // needs real system address's balance, the transaction will be marked redo with keepSystemAddressBalance = true systemAddress common.Address systemAddressOpsCount int - nonceIncreased uint64 // create contract keepSystemAddressBalance bool // we may need to redo for some specific reasons, like we read the wrong state and need to panic in sequential mode in SubRefund @@ -181,12 +187,13 @@ type StateDB struct { fullProcessed bool pipeCommit bool - snapMux sync.Mutex - snaps *snapshot.Tree - snap snapshot.Snapshot - snapDestructs map[common.Address]struct{} - snapAccounts map[common.Address][]byte - snapStorage map[common.Address]map[string][]byte + snapMux sync.Mutex + snaps *snapshot.Tree + snap snapshot.Snapshot + snapParallelLock sync.RWMutex // for parallel mode, for main StateDB, slot will read snapshot, while processor will write. + snapDestructs map[common.Address]struct{} + snapAccounts map[common.Address][]byte + snapStorage map[common.Address]map[string][]byte // This map holds 'live' objects, which will get modified while processing a state transition. stateObjects map[common.Address]*StateObject @@ -246,13 +253,24 @@ func New(root common.Hash, db Database, snaps *snapshot.Tree) (*StateDB, error) // NewSlotDB creates a new State DB based on the provided StateDB. // With parallel, each execution slot would have its own StateDB. -func NewSlotDB(db *StateDB, systemAddr common.Address, baseTxIndex int, keepSystem bool) *StateDB { +func NewSlotDB(db *StateDB, systemAddr common.Address, txIndex int, baseTxIndex int, keepSystem bool, + unconfirmedDBs *sync.Map /*map[int]*StateDB*/) *StateDB { slotDB := db.CopyForSlot() + slotDB.txIndex = txIndex slotDB.originalRoot = db.originalRoot + slotDB.parallel.baseStateDB = db slotDB.parallel.baseTxIndex = baseTxIndex slotDB.parallel.systemAddress = systemAddr slotDB.parallel.systemAddressOpsCount = 0 slotDB.parallel.keepSystemAddressBalance = keepSystem + slotDB.storagePool = NewStoragePool() + slotDB.EnableWriteOnSharedStorage() + for index := baseTxIndex + 1; index < slotDB.txIndex; index++ { // txIndex + unconfirmedDB, ok := unconfirmedDBs.Load(index) + if ok { + slotDB.parallel.unconfirmedDBInShot[index] = unconfirmedDB.(*StateDB) + } + } // All transactions will pay gas fee to the systemAddr at the end, this address is // deemed to conflict, we handle it specially, clear it now and set it back to the main @@ -279,13 +297,16 @@ func NewWithSharedPool(root common.Hash, db Database, snaps *snapshot.Tree) (*St func newStateDB(root common.Hash, db Database, snaps *snapshot.Tree) (*StateDB, error) { sdb := &StateDB{ - db: db, - originalRoot: root, - snaps: snaps, - stateObjects: make(map[common.Address]*StateObject, defaultNumOfSlots), - parallel: ParallelState{}, + db: db, + originalRoot: root, + snaps: snaps, + stateObjects: make(map[common.Address]*StateObject, defaultNumOfSlots), + parallel: ParallelState{ + SlotIndex: -1, + }, stateObjectsPending: make(map[common.Address]struct{}, defaultNumOfSlots), stateObjectsDirty: make(map[common.Address]struct{}, defaultNumOfSlots), + txIndex: -1, logs: make(map[common.Hash][]*types.Log, defaultNumOfSlots), preimages: make(map[common.Hash][]byte), journal: newJournal(), @@ -306,28 +327,36 @@ func newStateDB(root common.Hash, db Database, snaps *snapshot.Tree) (*StateDB, return nil, err } sdb.trie = tr + sdb.EnableWriteOnSharedStorage() // fixme:remove when s.originStorage[key] is enabled return sdb, nil } func (s *StateDB) getStateObjectFromStateObjects(addr common.Address) (*StateObject, bool) { - if s.parallel.isSlotDB { - obj, ok := s.parallel.dirtiedStateObjectsInSlot[addr] - if ok { - return obj, ok - } - } return s.loadStateObj(addr) } -// RevertSlotDB keep its read list for conflict detect and discard its state changes except its own balance change, -// if the transaction execution is reverted, +// RevertSlotDB keep the Read list for conflict detect, +// discard all state changes except: +// - nonce and balance of from address +// - balance of system address: will be used on merge to update SystemAddress's balance func (s *StateDB) RevertSlotDB(from common.Address) { - s.parallel.stateObjectsSuicidedInSlot = make(map[common.Address]struct{}) - s.parallel.stateChangesInSlot = make(map[common.Address]StateKeys) + s.parallel.kvChangesInSlot = make(map[common.Address]StateKeys) + + // balance := s.parallel.balanceChangesInSlot[from] + s.parallel.nonceChangesInSlot = make(map[common.Address]struct{}) s.parallel.balanceChangesInSlot = make(map[common.Address]struct{}, 1) + s.parallel.addrStateChangesInSlot = make(map[common.Address]bool) // 0: created, 1: deleted + + selfStateObject := s.parallel.dirtiedStateObjectsInSlot[from] + systemAddress := s.parallel.systemAddress + systemStateObject := s.parallel.dirtiedStateObjectsInSlot[systemAddress] + s.parallel.dirtiedStateObjectsInSlot = make(map[common.Address]*StateObject, 2) + // keep these elements + s.parallel.dirtiedStateObjectsInSlot[from] = selfStateObject + s.parallel.dirtiedStateObjectsInSlot[systemAddress] = systemStateObject s.parallel.balanceChangesInSlot[from] = struct{}{} - s.parallel.addrStateChangesInSlot = make(map[common.Address]struct{}) - s.parallel.nonceChangesInSlot = make(map[common.Address]struct{}) + s.parallel.balanceChangesInSlot[systemAddress] = struct{}{} + s.parallel.nonceChangesInSlot[from] = struct{}{} } // PrepareForParallel prepares for state db to be used in parallel execution mode. @@ -340,7 +369,7 @@ func (s *StateDB) PrepareForParallel() { // finalized(dirty -> pending) on execution slot, the execution results should be // merged back to the main StateDB. // And it will return and keep the slot's change list for later conflict detect. -func (s *StateDB) MergeSlotDB(slotDb *StateDB, slotReceipt *types.Receipt, txIndex int) SlotChangeList { +func (s *StateDB) MergeSlotDB(slotDb *StateDB, slotReceipt *types.Receipt, txIndex int) { // receipt.Logs use unified log index within a block // align slotDB's log index to the block stateDB's logSize for _, l := range slotReceipt.Logs { @@ -368,25 +397,35 @@ func (s *StateDB) MergeSlotDB(slotDb *StateDB, slotReceipt *types.Receipt, txInd } // stateObjects: KV, balance, nonce... - dirtyObj, ok := slotDb.getStateObjectFromStateObjects(addr) + dirtyObj, ok := slotDb.parallel.dirtiedStateObjectsInSlot[addr] if !ok { - log.Error("parallel merge, but dirty object not exist!", "txIndex:", slotDb.txIndex, "addr", addr) + log.Error("parallel merge, but dirty object not exist!", "SlotIndex", slotDb.parallel.SlotIndex, "txIndex:", slotDb.txIndex, "addr", addr) continue } mainObj, exist := s.loadStateObj(addr) - if !exist { + if !exist { // fixme: it is also state change // addr not exist on main DB, do ownership transfer - dirtyObj.db = s - dirtyObj.finalise(true) // true: prefetch on dispatcher - s.storeStateObj(addr, dirtyObj) - delete(slotDb.parallel.dirtiedStateObjectsInSlot, addr) // transfer ownership + // dirtyObj.db = s + // dirtyObj.finalise(true) // true: prefetch on dispatcher + mainObj = dirtyObj.deepCopy(s) + mainObj.finalise(true) + s.storeStateObj(addr, mainObj) + // fixme: should not delete, would cause unconfirmed DB incorrect? + // delete(slotDb.parallel.dirtiedStateObjectsInSlot, addr) // transfer ownership, fixme: shared read? + if dirtyObj.deleted { + // remove the addr from snapAccounts&snapStorage only when object is deleted. + // "deleted" is not equal to "snapDestructs", since createObject() will add an addr for + // snapDestructs to destroy previous object, while it will keep the addr in snapAccounts & snapAccounts + delete(s.snapAccounts, addr) + delete(s.snapStorage, addr) + } } else { // addr already in main DB, do merge: balance, KV, code, State(create, suicide) // can not do copy or ownership transfer directly, since dirtyObj could have outdated // data(may be updated within the conflict window) var newMainObj = mainObj // we don't need to copy the object since the storages are thread safe - if _, created := slotDb.parallel.addrStateChangesInSlot[addr]; created { + if _, ok := slotDb.parallel.addrStateChangesInSlot[addr]; ok { // there are 3 kinds of state change: // 1.Suicide // 2.Empty Delete @@ -394,10 +433,11 @@ func (s *StateDB) MergeSlotDB(slotDb *StateDB, slotReceipt *types.Receipt, txInd // a.AddBalance,SetState to an unexist or deleted(suicide, empty delete) address. // b.CreateAccount: like DAO the fork, regenerate a account carry its balance without KV // For these state change, do ownership transafer for efficiency: - log.Debug("MergeSlotDB state object merge: addr state change") - dirtyObj.db = s - newMainObj = dirtyObj - delete(slotDb.parallel.dirtiedStateObjectsInSlot, addr) // transfer ownership + // dirtyObj.db = s + // newMainObj = dirtyObj + newMainObj = dirtyObj.deepCopy(s) + // should not delete, would cause unconfirmed DB incorrect. + // delete(slotDb.parallel.dirtiedStateObjectsInSlot, addr) // transfer ownership, fixme: shared read? if dirtyObj.deleted { // remove the addr from snapAccounts&snapStorage only when object is deleted. // "deleted" is not equal to "snapDestructs", since createObject() will add an addr for @@ -409,24 +449,22 @@ func (s *StateDB) MergeSlotDB(slotDb *StateDB, slotReceipt *types.Receipt, txInd // deepCopy a temporary *StateObject for safety, since slot could read the address, // dispatch should avoid overwrite the StateObject directly otherwise, it could // crash for: concurrent map iteration and map write + if _, balanced := slotDb.parallel.balanceChangesInSlot[addr]; balanced { - log.Debug("merge state object: Balance", - "newMainObj.Balance()", newMainObj.Balance(), - "dirtyObj.Balance()", dirtyObj.Balance()) newMainObj.SetBalance(dirtyObj.Balance()) } if _, coded := slotDb.parallel.codeChangesInSlot[addr]; coded { - log.Debug("merge state object: Code") newMainObj.code = dirtyObj.code newMainObj.data.CodeHash = dirtyObj.data.CodeHash newMainObj.dirtyCode = true } - if keys, stated := slotDb.parallel.stateChangesInSlot[addr]; stated { - log.Debug("merge state object: KV") + if keys, stated := slotDb.parallel.kvChangesInSlot[addr]; stated { newMainObj.MergeSlotObject(s.db, dirtyObj, keys) } - // dirtyObj.Nonce() should not be less than newMainObj - newMainObj.setNonce(dirtyObj.Nonce()) + if _, nonced := slotDb.parallel.nonceChangesInSlot[addr]; nonced { + // dirtyObj.Nonce() should not be less than newMainObj + newMainObj.setNonce(dirtyObj.Nonce()) + } } newMainObj.finalise(true) // true: prefetch on dispatcher // update the object @@ -461,7 +499,9 @@ func (s *StateDB) MergeSlotDB(slotDb *StateDB, slotReceipt *types.Receipt, txInd // One transaction add balance 0 to an empty address, will delete it(delete empty is enabled). // While another concurrent transaction could add a none-zero balance to it, make it not empty // We fixed it by add a addr state read record for add balance 0 + s.snapParallelLock.Lock() s.snapDestructs[k] = struct{}{} + s.snapParallelLock.Unlock() } // slotDb.snapAccounts should be empty, comment out and to be deleted later @@ -477,24 +517,14 @@ func (s *StateDB) MergeSlotDB(slotDb *StateDB, slotReceipt *types.Receipt, txInd // s.snapStorage[k] = temp // } } - - // to create a new object to store change list for conflict detect, - // since slot db reuse is disabled, we do not need to do copy. - changeList := SlotChangeList{ - TxIndex: txIndex, - StateObjectSuicided: slotDb.parallel.stateObjectsSuicidedInSlot, - StateChangeSet: slotDb.parallel.stateChangesInSlot, - BalanceChangeSet: slotDb.parallel.balanceChangesInSlot, - CodeChangeSet: slotDb.parallel.codeChangesInSlot, - AddrStateChangeSet: slotDb.parallel.addrStateChangesInSlot, - NonceChangeSet: slotDb.parallel.nonceChangesInSlot, - } - return changeList } func (s *StateDB) EnableWriteOnSharedStorage() { s.writeOnSharedStorage = true } +func (s *StateDB) SetSlotIndex(index int) { + s.parallel.SlotIndex = index +} // StartPrefetcher initializes a new trie prefetcher to pull in nodes from the // state trie concurrently while the state is mutated so that when we reach the @@ -644,40 +674,367 @@ func (s *StateDB) SubRefund(gas uint64) { s.refund -= gas } +// For Parallel Execution Mode, it can be seen as Penetrated Access: +// ------------------------------------------------------- +// | BaseTxIndex | Unconfirmed Txs... | Current TxIndex | +// ------------------------------------------------------- +// Access from the unconfirmed DB with range&priority: txIndex -1(previous tx) -> baseTxIndex + 1 +func (s *StateDB) getBalanceFromUnconfirmedDB(addr common.Address) *big.Int { + if addr == s.parallel.systemAddress { + // never get systemaddress from unconfirmed DB + return nil + } + + for i := s.txIndex - 1; i > s.parallel.baseTxIndex; i-- { + if db, ok := s.parallel.unconfirmedDBInShot[i]; ok { + // 1.Refer the state of address, exist or not in dirtiedStateObjectsInSlot + if obj, exist := db.parallel.dirtiedStateObjectsInSlot[addr]; exist { + balanceHit := false + if _, exist := db.parallel.addrStateChangesInSlot[addr]; exist { + balanceHit = true + } + if _, exist := db.parallel.balanceChangesInSlot[addr]; exist { // only changed balance is reliable + balanceHit = true + } + if !balanceHit { + continue + } + balance := obj.Balance() + if obj.deleted { + balance = common.Big0 + } + return balance + } + } + } + return nil +} + +// Similar to getBalanceFromUnconfirmedDB +func (s *StateDB) getNonceFromUnconfirmedDB(addr common.Address) (uint64, bool) { + if addr == s.parallel.systemAddress { + // never get systemaddress from unconfirmed DB + return 0, false + } + + for i := s.txIndex - 1; i > s.parallel.baseTxIndex; i-- { + if unconfirmedDb, ok := s.parallel.unconfirmedDBInShot[i]; ok { + nonceHit := false + if _, ok := unconfirmedDb.parallel.addrStateChangesInSlot[addr]; ok { + nonceHit = true + } else if _, ok := unconfirmedDb.parallel.nonceChangesInSlot[addr]; ok { + nonceHit = true + } + if !nonceHit { + // nonce refer not hit, try next unconfirmedDb + continue + } + // nonce hit, return the nonce + obj := unconfirmedDb.parallel.dirtiedStateObjectsInSlot[addr] + if obj == nil { + // could not exist, if it is changed but reverted + // fixme: revert should remove the change record + log.Debug("Get nonce from UnconfirmedDB, changed but object not exist, ", + "txIndex", s.txIndex, "referred txIndex", i, "addr", addr) + continue + } + nonce := obj.Nonce() + // deleted object with nonce == 0 + if obj.deleted { + nonce = 0 + } + return nonce, true + } + } + return 0, false +} + +// Similar to getBalanceFromUnconfirmedDB +// It is not only for code, but also codeHash and codeSize, we return the *StateObject for convienence. +func (s *StateDB) getCodeFromUnconfirmedDB(addr common.Address) ([]byte, bool) { + if addr == s.parallel.systemAddress { + // never get systemaddress from unconfirmed DB + return nil, false + } + + for i := s.txIndex - 1; i > s.parallel.baseTxIndex; i-- { + if db, ok := s.parallel.unconfirmedDBInShot[i]; ok { + codeHit := false + if _, exist := db.parallel.addrStateChangesInSlot[addr]; exist { + codeHit = true + } + if _, exist := db.parallel.codeChangesInSlot[addr]; exist { + codeHit = true + } + if !codeHit { + // try next unconfirmedDb + continue + } + obj := db.parallel.dirtiedStateObjectsInSlot[addr] + if obj == nil { + // could not exist, if it is changed but reverted + // fixme: revert should remove the change record + log.Debug("Get code from UnconfirmedDB, changed but object not exist, ", + "txIndex", s.txIndex, "referred txIndex", i, "addr", addr) + continue + } + code := obj.Code(s.db) + if obj.deleted { + code = nil + } + return code, true + } + } + return nil, false +} + +// Similar to getCodeFromUnconfirmedDB +// but differ when address is deleted or not exist +func (s *StateDB) getCodeHashFromUnconfirmedDB(addr common.Address) (common.Hash, bool) { + if addr == s.parallel.systemAddress { + // never get systemaddress from unconfirmed DB + return common.Hash{}, false + } + + for i := s.txIndex - 1; i > s.parallel.baseTxIndex; i-- { + if db, ok := s.parallel.unconfirmedDBInShot[i]; ok { + hashHit := false + if _, exist := db.parallel.addrStateChangesInSlot[addr]; exist { + hashHit = true + } + if _, exist := db.parallel.codeChangesInSlot[addr]; exist { + hashHit = true + } + if !hashHit { + // try next unconfirmedDb + continue + } + + obj := db.parallel.dirtiedStateObjectsInSlot[addr] + if obj == nil { + // could not exist, if it is changed but reverted + // fixme: revert should remove the change record + log.Debug("Get codeHash from UnconfirmedDB, changed but object not exist, ", + "txIndex", s.txIndex, "referred txIndex", i, "addr", addr) + continue + } + codeHash := common.Hash{} + if !obj.deleted { + codeHash = common.BytesToHash(obj.CodeHash()) + } + return codeHash, true + } + } + return common.Hash{}, false +} + +// Similar to getCodeFromUnconfirmedDB +// It is for address state check of: Exist(), Empty() and HasSuicided() +// Since the unconfirmed DB should have done Finalise() with `deleteEmptyObjects = true` +// If the dirty address is empty or suicided, it will be marked as deleted, so we only need to return `deleted` or not. +func (s *StateDB) getAddrStateFromUnconfirmedDB(addr common.Address) (deleted bool, exist bool) { + if addr == s.parallel.systemAddress { + // never get systemaddress from unconfirmed DB + return false, false + } + + // check the unconfirmed DB with range: baseTxIndex -> txIndex -1(previous tx) + for i := s.txIndex - 1; i > s.parallel.baseTxIndex; i-- { + if db, ok := s.parallel.unconfirmedDBInShot[i]; ok { + if exist, ok := db.parallel.addrStateChangesInSlot[addr]; ok { + if _, ok := db.parallel.dirtiedStateObjectsInSlot[addr]; !ok { + // could not exist, if it is changed but reverted + // fixme: revert should remove the change record + log.Debug("Get addr State from UnconfirmedDB, changed but object not exist, ", + "txIndex", s.txIndex, "referred txIndex", i, "addr", addr) + continue + } + + return exist, true + } + } + } + return false, false +} + +func (s *StateDB) getKVFromUnconfirmedDB(addr common.Address, key common.Hash) (common.Hash, bool) { + // check the unconfirmed DB with range: baseTxIndex -> txIndex -1(previous tx) + for i := s.txIndex - 1; i > s.parallel.baseTxIndex; i-- { + if db, ok := s.parallel.unconfirmedDBInShot[i]; ok { + if obj, ok := db.parallel.dirtiedStateObjectsInSlot[addr]; ok { // if deleted on merge, can get from main StateDB, ok but fixme: concurrent safe + if obj.deleted { + return common.Hash{}, true + } + if _, ok := db.parallel.kvChangesInSlot[addr]; ok { + if val, exist := obj.dirtyStorage.GetValue(key); exist { + return val, true + } + if val, exist := obj.pendingStorage.GetValue(key); exist { // fixme: can be removed + log.Error("Get KV from Unconfirmed StateDB, in pending", + "my txIndex", s.txIndex, "DB's txIndex", i, "addr", addr, + "key", key, "val", val) + return val, true + } + } + } + } + } + return common.Hash{}, false +} + +func (s *StateDB) getStateObjectFromUnconfirmedDB(addr common.Address) (*StateObject, bool) { + // check the unconfirmed DB with range: baseTxIndex -> txIndex -1(previous tx) + for i := s.txIndex - 1; i > s.parallel.baseTxIndex; i-- { + if db, ok := s.parallel.unconfirmedDBInShot[i]; ok { + if obj, ok := db.parallel.dirtiedStateObjectsInSlot[addr]; ok { // if deleted on merge, can get from main StateDB, ok but fixme: concurrent safe + return obj, true + } + } + } + return nil, false +} + // Exist reports whether the given account address exists in the state. // Notably this also returns true for suicided accounts. func (s *StateDB) Exist(addr common.Address) bool { - return s.getStateObject(addr) != nil + if s.parallel.isSlotDB { + // 1.Try to get from dirty + if obj, ok := s.parallel.dirtiedStateObjectsInSlot[addr]; ok { + // dirty object should not be deleted, since deleted is only flagged on finalise + // and if it is suicided in contract call, suicide is taken as exist until it is finalised + // todo: add a check here, to be removed later + if obj.deleted || obj.suicided { + log.Error("Exist in dirty, but marked as deleted or suicided", + "txIndex", s.txIndex, "baseTxIndex:", s.parallel.baseTxIndex) + } + return true + } + // 2.Try to get from uncomfirmed & main DB + // 2.1 Already read before + if exist, ok := s.parallel.addrStateReadsInSlot[addr]; ok { + return exist + } + // 2.2 Try to get from unconfirmed DB if exist + if exist, ok := s.getAddrStateFromUnconfirmedDB(addr); ok { + s.parallel.addrStateReadsInSlot[addr] = exist // update and cache + return exist + } + } + // 3.Try to get from main StateDB + exist := s.getStateObjectNoSlot(addr) != nil + if s.parallel.isSlotDB { + s.parallel.addrStateReadsInSlot[addr] = exist // update and cache + } + return exist } // Empty returns whether the state object is either non-existent // or empty according to the EIP161 specification (balance = nonce = code = 0) func (s *StateDB) Empty(addr common.Address) bool { - so := s.getStateObject(addr) - return so == nil || so.empty() + if s.parallel.isSlotDB { + // 1.Try to get from dirty + if obj, ok := s.parallel.dirtiedStateObjectsInSlot[addr]; ok { + // dirty object is light copied and fixup on need, + // empty could be wrong, except it is created with this TX + if _, ok := s.parallel.addrStateChangesInSlot[addr]; ok { + return obj.empty() + } + // so we have to check it manually + // empty means: Nonce == 0 && Balance == 0 && CodeHash == emptyCodeHash + if s.GetNonce(addr) != 0 { + return false + } + if s.GetBalance(addr).Sign() != 0 { + return false + } + codeHash := s.GetCodeHash(addr) + return bytes.Equal(codeHash.Bytes(), emptyCodeHash) // code is empty, the object is empty + } + // 2.Try to get from uncomfirmed & main DB + // 2.1 Already read before + if exist, ok := s.parallel.addrStateReadsInSlot[addr]; ok { + // exist means not empty + return !exist + } + // 2.2 Try to get from unconfirmed DB if exist + if exist, ok := s.getAddrStateFromUnconfirmedDB(addr); ok { + s.parallel.addrStateReadsInSlot[addr] = exist // update and cache + return !exist + } + } + + so := s.getStateObjectNoSlot(addr) + empty := (so == nil || so.empty()) + if s.parallel.isSlotDB { + s.parallel.addrStateReadsInSlot[addr] = !empty // update and cache + } + return empty } // GetBalance retrieves the balance from the given address or 0 if object not found +// GetFrom the dirty list => from unconfirmed DB => get from main stateDB func (s *StateDB) GetBalance(addr common.Address) *big.Int { if s.parallel.isSlotDB { - s.parallel.balanceReadsInSlot[addr] = struct{}{} if addr == s.parallel.systemAddress { s.parallel.systemAddressOpsCount++ } + // 1.Try to get from dirty + if _, ok := s.parallel.balanceChangesInSlot[addr]; ok { + obj := s.parallel.dirtiedStateObjectsInSlot[addr] // addr must exist in dirtiedStateObjectsInSlot + return obj.Balance() + } + // 2.Try to get from uncomfirmed DB or main DB + // 2.1 Already read before + if balance, ok := s.parallel.balanceReadsInSlot[addr]; ok { + return balance + } + // 2.2 Try to get from unconfirmed DB if exist + if balance := s.getBalanceFromUnconfirmedDB(addr); balance != nil { + s.parallel.balanceReadsInSlot[addr] = balance + return balance + } } - stateObject := s.getStateObject(addr) + // 3. Try to get from main StateObejct + balance := common.Big0 + stateObject := s.getStateObjectNoSlot(addr) if stateObject != nil { - return stateObject.Balance() + balance = stateObject.Balance() } - return common.Big0 + if s.parallel.isSlotDB { + s.parallel.balanceReadsInSlot[addr] = balance + } + return balance } func (s *StateDB) GetNonce(addr common.Address) uint64 { - stateObject := s.getStateObject(addr) + if s.parallel.isSlotDB { + // 1.Try to get from dirty + if _, ok := s.parallel.nonceChangesInSlot[addr]; ok { + obj := s.parallel.dirtiedStateObjectsInSlot[addr] // addr must exist + return obj.Nonce() + } + // 2.Try to get from uncomfirmed DB or main DB + // 2.1 Already read before + if nonce, ok := s.parallel.nonceReadsInSlot[addr]; ok { + return nonce + } + // 2.2 Try to get from unconfirmed DB if exist + if nonce, ok := s.getNonceFromUnconfirmedDB(addr); ok { + s.parallel.nonceReadsInSlot[addr] = nonce + return nonce + } + } + // 3.Try to get from main StateDB + var nonce uint64 = 0 + stateObject := s.getStateObjectNoSlot(addr) if stateObject != nil { - return stateObject.Nonce() + nonce = stateObject.Nonce() } - return 0 + if s.parallel.isSlotDB { + s.parallel.nonceReadsInSlot[addr] = nonce + } + + return nonce } // TxIndex returns the current transaction index set by Prepare. @@ -690,29 +1047,95 @@ func (s *StateDB) BlockHash() common.Hash { return s.bhash } -func (s *StateDB) IsSlotDB() bool { - return s.parallel.isSlotDB -} - // BaseTxIndex returns the tx index that slot db based. func (s *StateDB) BaseTxIndex() int { return s.parallel.baseTxIndex } -func (s *StateDB) CodeReadsInSlot() map[common.Address]struct{} { - return s.parallel.codeReadsInSlot -} - -func (s *StateDB) AddressReadsInSlot() map[common.Address]struct{} { - return s.parallel.addrStateReadsInSlot -} - -func (s *StateDB) StateReadsInSlot() map[common.Address]StateKeys { - return s.parallel.stateReadsInSlot -} +func (s *StateDB) IsParallelReadsValid() bool { + slotDB := s + if !slotDB.parallel.isSlotDB { + log.Error("IsSlotDBReadsValid slotDB should be slot DB", "txIndex", slotDB.txIndex) + return false + } -func (s *StateDB) BalanceReadsInSlot() map[common.Address]struct{} { - return s.parallel.balanceReadsInSlot + mainDB := slotDB.parallel.baseStateDB + if mainDB.parallel.isSlotDB { + log.Error("IsSlotDBReadsValid s should be main DB", "txIndex", slotDB.txIndex) + return false + } + // for nonce + for addr, nonceSlot := range slotDB.parallel.nonceReadsInSlot { + nonceMain := mainDB.GetNonce(addr) + if nonceSlot != nonceMain { + log.Debug("IsSlotDBReadsValid nonce read is invalid", "addr", addr, + "nonceSlot", nonceSlot, "nonceMain", nonceMain, + "txIndex", slotDB.txIndex, "baseTxIndex", slotDB.parallel.baseTxIndex) + return false + } + } + // balance + for addr, balanceSlot := range slotDB.parallel.balanceReadsInSlot { + balanceMain := mainDB.GetBalance(addr) + if balanceSlot.Cmp(balanceMain) != 0 { + log.Debug("IsSlotDBReadsValid balance read is invalid", "addr", addr, + "balanceSlot", balanceSlot, "balanceMain", balanceMain, + "txIndex", slotDB.txIndex, "baseTxIndex", slotDB.parallel.baseTxIndex) + return false + } + } + // check code + for addr, codeSlot := range slotDB.parallel.codeReadsInSlot { + codeMain := mainDB.GetCode(addr) + if !bytes.Equal(codeSlot, codeMain) { + log.Debug("IsSlotDBReadsValid code read is invalid", "addr", addr, + "len codeSlot", len(codeSlot), "len codeMain", len(codeMain), + "txIndex", slotDB.txIndex, "baseTxIndex", slotDB.parallel.baseTxIndex) + return false + } + } + // check codeHash + for addr, codeHashSlot := range slotDB.parallel.codeHashReadsInSlot { + codeHashMain := mainDB.GetCodeHash(addr) + if !bytes.Equal(codeHashSlot.Bytes(), codeHashMain.Bytes()) { + log.Debug("IsSlotDBReadsValid codehash read is invalid", "addr", addr, + "codeHashSlot", codeHashSlot, "codeHashMain", codeHashMain, + "txIndex", slotDB.txIndex, "baseTxIndex", slotDB.parallel.baseTxIndex) + return false + } + } + // check KV + for addr, slotStorage := range slotDB.parallel.kvReadsInSlot { + conflict := false + slotStorage.Range(func(keySlot, valSlot interface{}) bool { + valMain := mainDB.GetState(addr, keySlot.(common.Hash)) + if !bytes.Equal(valSlot.(common.Hash).Bytes(), valMain.Bytes()) { + log.Debug("IsSlotDBReadsValid KV read is invalid", "addr", addr, + "key", keySlot.(common.Hash), "valSlot", valSlot.(common.Hash), "valMain", valMain, + "txIndex", slotDB.txIndex, "baseTxIndex", slotDB.parallel.baseTxIndex) + conflict = true + return false // return false, Range will be terminated. + } + return true // return true, Range will try next KV + }) + if conflict { + return false + } + } + // addr state check + for addr, stateSlot := range slotDB.parallel.addrStateReadsInSlot { + stateMain := false // addr not exist + if mainDB.getStateObjectNoSlot(addr) != nil { + stateMain = true // addr exist in main DB + } + if stateSlot != stateMain { + log.Debug("IsSlotDBReadsValid addrState read invalid(true: exist, false: not exist)", + "addr", addr, "stateSlot", stateSlot, "stateMain", stateMain, + "txIndex", slotDB.txIndex, "baseTxIndex", slotDB.parallel.baseTxIndex) + return false + } + } + return true } // For most of the transactions, systemAddressOpsCount should be 2: @@ -729,60 +1152,161 @@ func (s *StateDB) NeedsRedo() bool { return s.parallel.needsRedo } -func (s *StateDB) NonceIncreased() uint64 { - return s.parallel.nonceIncreased -} - func (s *StateDB) GetCode(addr common.Address) []byte { if s.parallel.isSlotDB { - s.parallel.codeReadsInSlot[addr] = struct{}{} + // 1.Try to get from dirty + if _, ok := s.parallel.codeChangesInSlot[addr]; ok { + obj := s.parallel.dirtiedStateObjectsInSlot[addr] // addr must exist in dirtiedStateObjectsInSlot + code := obj.Code(s.db) + return code + } + // 2.Try to get from uncomfirmed DB or main DB + // 2.1 Already read before + if code, ok := s.parallel.codeReadsInSlot[addr]; ok { + return code + } + // 2.2 Try to get from unconfirmed DB if exist + if code, ok := s.getCodeFromUnconfirmedDB(addr); ok { + s.parallel.codeReadsInSlot[addr] = code + return code + } } - stateObject := s.getStateObject(addr) + // 3. Try to get from main StateObejct + stateObject := s.getStateObjectNoSlot(addr) + var code []byte if stateObject != nil { - return stateObject.Code(s.db) + code = stateObject.Code(s.db) } - return nil + if s.parallel.isSlotDB { + s.parallel.codeReadsInSlot[addr] = code + } + return code } func (s *StateDB) GetCodeSize(addr common.Address) int { if s.parallel.isSlotDB { - s.parallel.codeReadsInSlot[addr] = struct{}{} // code size is part of code + // 1.Try to get from dirty + if _, ok := s.parallel.codeChangesInSlot[addr]; ok { + obj := s.parallel.dirtiedStateObjectsInSlot[addr] // addr must exist in dirtiedStateObjectsInSlot + return obj.CodeSize(s.db) + } + // 2.Try to get from uncomfirmed DB or main DB + // 2.1 Already read before + if code, ok := s.parallel.codeReadsInSlot[addr]; ok { + return len(code) // len(nil) is 0 too + } + // 2.2 Try to get from unconfirmed DB if exist + if code, ok := s.getCodeFromUnconfirmedDB(addr); ok { + s.parallel.codeReadsInSlot[addr] = code + return len(code) // len(nil) is 0 too + } } - stateObject := s.getStateObject(addr) + // 3. Try to get from main StateObejct + var codeSize int = 0 + var code []byte + stateObject := s.getStateObjectNoSlot(addr) + if stateObject != nil { - return stateObject.CodeSize(s.db) + code = stateObject.Code(s.db) + codeSize = stateObject.CodeSize(s.db) + } + if s.parallel.isSlotDB { + s.parallel.codeReadsInSlot[addr] = code } - return 0 + return codeSize } +// return value of GetCodeHash: +// - common.Hash{}: the address does not exist +// - emptyCodeHash: the address exist, but code is empty +// - others: the address exist, and code is not empty func (s *StateDB) GetCodeHash(addr common.Address) common.Hash { if s.parallel.isSlotDB { - s.parallel.codeReadsInSlot[addr] = struct{}{} // code hash is part of code + // 1.Try to get from dirty + if _, ok := s.parallel.codeChangesInSlot[addr]; ok { + obj := s.parallel.dirtiedStateObjectsInSlot[addr] // addr must exist in dirtiedStateObjectsInSlot + return common.BytesToHash(obj.CodeHash()) + } + // 2.Try to get from uncomfirmed DB or main DB + // 2.1 Already read before + if codeHash, ok := s.parallel.codeHashReadsInSlot[addr]; ok { + return codeHash + } + // 2.2 Try to get from unconfirmed DB if exist + if codeHash, ok := s.getCodeHashFromUnconfirmedDB(addr); ok { + s.parallel.codeHashReadsInSlot[addr] = codeHash + return codeHash + } } - stateObject := s.getStateObject(addr) - if stateObject == nil { - return common.Hash{} + // 3. Try to get from main StateObejct + stateObject := s.getStateObjectNoSlot(addr) + codeHash := common.Hash{} + if stateObject != nil { + codeHash = common.BytesToHash(stateObject.CodeHash()) + } + if s.parallel.isSlotDB { + s.parallel.codeHashReadsInSlot[addr] = codeHash } - return common.BytesToHash(stateObject.CodeHash()) + return codeHash } // GetState retrieves a value from the given account's storage trie. +// For parallel mode wih, get from the state in order: +// -> self dirty, both Slot & MainProcessor +// -> pending of self: Slot on merge +// -> pending of unconfirmed DB +// -> pending of main StateDB +// -> origin func (s *StateDB) GetState(addr common.Address, hash common.Hash) common.Hash { if s.parallel.isSlotDB { - if s.parallel.stateReadsInSlot[addr] == nil { - s.parallel.stateReadsInSlot[addr] = make(map[common.Hash]struct{}, defaultNumOfSlots) + + // 1.Try to get from dirty + if exist, ok := s.parallel.addrStateChangesInSlot[addr]; ok { + if !exist { + return common.Hash{} + } + obj := s.parallel.dirtiedStateObjectsInSlot[addr] // addr must exist in dirtiedStateObjectsInSlot + return obj.GetState(s.db, hash) + } + if keys, ok := s.parallel.kvChangesInSlot[addr]; ok { + if _, ok := keys[hash]; ok { + obj := s.parallel.dirtiedStateObjectsInSlot[addr] // addr must exist in dirtiedStateObjectsInSlot + return obj.GetState(s.db, hash) + } + } + // 2.Try to get from uncomfirmed DB or main DB + // 2.1 Already read before + if storage, ok := s.parallel.kvReadsInSlot[addr]; ok { + if val, ok := storage.GetValue(hash); ok { + return val + } + } + // 2.2 Try to get from unconfirmed DB if exist + if val, ok := s.getKVFromUnconfirmedDB(addr, hash); ok { + if s.parallel.kvReadsInSlot[addr] == nil { + s.parallel.kvReadsInSlot[addr] = newStorage(false) + } + s.parallel.kvReadsInSlot[addr].StoreValue(hash, val) // update cache + return val } - s.parallel.stateReadsInSlot[addr][hash] = struct{}{} } - stateObject := s.getStateObject(addr) + // 3.Get from main StateDB + stateObject := s.getStateObjectNoSlot(addr) + val := common.Hash{} if stateObject != nil { - return stateObject.GetState(s.db, hash) + val = stateObject.GetState(s.db, hash) + } + if s.parallel.isSlotDB { + if s.parallel.kvReadsInSlot[addr] == nil { + s.parallel.kvReadsInSlot[addr] = newStorage(false) + } + s.parallel.kvReadsInSlot[addr].StoreValue(hash, val) // update cache } - return common.Hash{} + return val } // GetProof returns the Merkle proof for a given account. @@ -825,17 +1349,38 @@ func (s *StateDB) GetStorageProofByHash(a common.Address, key common.Hash) ([][] // GetCommittedState retrieves a value from the given account's committed storage trie. func (s *StateDB) GetCommittedState(addr common.Address, hash common.Hash) common.Hash { if s.parallel.isSlotDB { - if s.parallel.stateReadsInSlot[addr] == nil { - s.parallel.stateReadsInSlot[addr] = make(map[common.Hash]struct{}, defaultNumOfSlots) + // 1.No need to get from pending of itself even on merge, since stateobject in SlotDB won't do finalise + // 2.Try to get from uncomfirmed DB or main DB + // KVs in unconfirmed DB can be seen as pending storage + // KVs in main DB are merged from SlotDB and has done finalise() on merge, can be seen as pending storage too. + // 2.1 Already read before + if storage, ok := s.parallel.kvReadsInSlot[addr]; ok { + if val, ok := storage.GetValue(hash); ok { + return val + } + } + // 2.2 Try to get from unconfirmed DB if exist + if val, ok := s.getKVFromUnconfirmedDB(addr, hash); ok { + if s.parallel.kvReadsInSlot[addr] == nil { + s.parallel.kvReadsInSlot[addr] = newStorage(false) + } + s.parallel.kvReadsInSlot[addr].StoreValue(hash, val) // update cache + return val } - s.parallel.stateReadsInSlot[addr][hash] = struct{}{} } - - stateObject := s.getStateObject(addr) + // 3. Try to get from main DB + stateObject := s.getStateObjectNoSlot(addr) + val := common.Hash{} if stateObject != nil { - return stateObject.GetCommittedState(s.db, hash) + val = stateObject.GetCommittedState(s.db, hash) + } + if s.parallel.isSlotDB { + if s.parallel.kvReadsInSlot[addr] == nil { + s.parallel.kvReadsInSlot[addr] = newStorage(false) + } + s.parallel.kvReadsInSlot[addr].StoreValue(hash, val) // update cache } - return common.Hash{} + return val } // Database retrieves the low level database supporting the lower level trie ops. @@ -856,7 +1401,20 @@ func (s *StateDB) StorageTrie(addr common.Address) Trie { } func (s *StateDB) HasSuicided(addr common.Address) bool { - stateObject := s.getStateObject(addr) + if s.parallel.isSlotDB { + // 1.Try to get from dirty + if obj, ok := s.parallel.dirtiedStateObjectsInSlot[addr]; ok { + return obj.suicided + } + // 2.Try to get from uncomfirmed + if deleted, ok := s.getAddrStateFromUnconfirmedDB(addr); ok { + if deleted { + return false + } + return false + } + } + stateObject := s.getStateObjectNoSlot(addr) if stateObject != nil { return stateObject.suicided } @@ -867,30 +1425,62 @@ func (s *StateDB) HasSuicided(addr common.Address) bool { * SETTERS */ +// the source mainObj should be got from the main StateDB +// we have to update its nonce, balance, code if they have updated in the unconfirmed DBs +/* +func (s *StateDB) unconfirmedLightCopy(mainObj *StateObject) *StateObject { + newObj := mainObj.lightCopy(s) // copied nonce, balance, code from base DB + + // do balance fixup only when it exist in unconfirmed DB + if nonce, ok := s.getNonceFromUnconfirmedDB(mainObj.address); ok { + // code got from unconfirmed DB + newObj.setNonce(nonce) + } + + // do balance fixup + if balance := s.getBalanceFromUnconfirmedDB(mainObj.address); balance != nil { + // balance got from unconfirmed DB + newObj.setBalance(balance) + } + // do code fixup + if codeObj, ok := s.getCodeFromUnconfirmedDB(mainObj.address); ok { + newObj.setCode(crypto.Keccak256Hash(codeObj), codeObj) // fixme: to confirm if we should use "codeObj.Code(db)" + newObj.dirtyCode = false // copy does not make the code dirty, + } + return newObj +} +*/ + // AddBalance adds amount to the account associated with addr. func (s *StateDB) AddBalance(addr common.Address, amount *big.Int) { - if s.parallel.isSlotDB { - if amount.Sign() != 0 { - s.parallel.balanceChangesInSlot[addr] = struct{}{} - // add balance will perform a read operation first - s.parallel.balanceReadsInSlot[addr] = struct{}{} - } else { - // if amount == 0, no balance change, but there is still an empty check. - // take this empty check as addr state read(create, suicide, empty delete) - s.parallel.addrStateReadsInSlot[addr] = struct{}{} - } - if addr == s.parallel.systemAddress { - s.parallel.systemAddressOpsCount++ - } - } + // if s.parallel.isSlotDB { + // add balance will perform a read operation first + // s.parallel.balanceReadsInSlot[addr] = struct{}{} // fixme: to make the the balance valid, since unconfirmed would refer it. + // if amount.Sign() == 0 { + // if amount == 0, no balance change, but there is still an empty check. + // take this empty check as addr state read(create, suicide, empty delete) + // s.parallel.addrStateReadsInSlot[addr] = struct{}{} + // } + // } stateObject := s.GetOrNewStateObject(addr) if stateObject != nil { if s.parallel.isSlotDB { + if addr == s.parallel.systemAddress { + s.parallel.systemAddressOpsCount++ + } if _, ok := s.parallel.dirtiedStateObjectsInSlot[addr]; !ok { - newStateObject := stateObject.deepCopy(s) + newStateObject := stateObject.lightCopy(s) // light copy from main DB + // do balance fixup from the confirmed DB, it could be more reliable than main DB + if balance := s.getBalanceFromUnconfirmedDB(addr); balance != nil { + newStateObject.setBalance(balance) + } + s.parallel.balanceReadsInSlot[addr] = newStateObject.Balance() // could read from main DB or unconfirmed DB + newStateObject.AddBalance(amount) s.parallel.dirtiedStateObjectsInSlot[addr] = newStateObject + // if amount.Sign() != 0 { // todo: to reenable it + s.parallel.balanceChangesInSlot[addr] = struct{}{} return } } @@ -900,24 +1490,30 @@ func (s *StateDB) AddBalance(addr common.Address, amount *big.Int) { // SubBalance subtracts amount from the account associated with addr. func (s *StateDB) SubBalance(addr common.Address, amount *big.Int) { - if s.parallel.isSlotDB { - if amount.Sign() != 0 { - s.parallel.balanceChangesInSlot[addr] = struct{}{} - // unlike add, sub 0 balance will not touch empty object - s.parallel.balanceReadsInSlot[addr] = struct{}{} - } - if addr == s.parallel.systemAddress { - s.parallel.systemAddressOpsCount++ - } - } - + // if s.parallel.isSlotDB { + // if amount.Sign() != 0 { + // unlike add, sub 0 balance will not touch empty object + // s.parallel.balanceReadsInSlot[addr] = struct{}{} + // } + // } stateObject := s.GetOrNewStateObject(addr) if stateObject != nil { if s.parallel.isSlotDB { + if addr == s.parallel.systemAddress { + s.parallel.systemAddressOpsCount++ + } if _, ok := s.parallel.dirtiedStateObjectsInSlot[addr]; !ok { - newStateObject := stateObject.deepCopy(s) + newStateObject := stateObject.lightCopy(s) // light copy from main DB + // do balance fixup from the confirmed DB, it could be more reliable than main DB + if balance := s.getBalanceFromUnconfirmedDB(addr); balance != nil { + newStateObject.setBalance(balance) + } + s.parallel.balanceReadsInSlot[addr] = newStateObject.Balance() newStateObject.SubBalance(amount) s.parallel.dirtiedStateObjectsInSlot[addr] = newStateObject + + // if amount.Sign() != 0 { // todo: to reenable it + s.parallel.balanceChangesInSlot[addr] = struct{}{} return } } @@ -929,13 +1525,12 @@ func (s *StateDB) SetBalance(addr common.Address, amount *big.Int) { stateObject := s.GetOrNewStateObject(addr) if stateObject != nil { if s.parallel.isSlotDB { - s.parallel.balanceChangesInSlot[addr] = struct{}{} if addr == s.parallel.systemAddress { s.parallel.systemAddressOpsCount++ } - + s.parallel.balanceChangesInSlot[addr] = struct{}{} if _, ok := s.parallel.dirtiedStateObjectsInSlot[addr]; !ok { - newStateObject := stateObject.deepCopy(s) + newStateObject := stateObject.lightCopy(s) newStateObject.SetBalance(amount) s.parallel.dirtiedStateObjectsInSlot[addr] = newStateObject return @@ -945,25 +1540,13 @@ func (s *StateDB) SetBalance(addr common.Address, amount *big.Int) { } } -// Generally sender's nonce will be increased by 1 for each transaction -// But if the contract tries to create a new contract, its nonce will be advanced -// for each opCreate or opCreate2. Nonce is key to transaction execution, once it is -// changed for contract created, the concurrent transaction will be marked invalid if -// they accessed the address. -func (s *StateDB) NonceChanged(addr common.Address) { - if s.parallel.isSlotDB { - log.Debug("NonceChanged", "txIndex", s.txIndex, "addr", addr) - s.parallel.nonceChangesInSlot[addr] = struct{}{} - s.parallel.nonceIncreased++ - } -} - func (s *StateDB) SetNonce(addr common.Address, nonce uint64) { stateObject := s.GetOrNewStateObject(addr) if stateObject != nil { if s.parallel.isSlotDB { + s.parallel.nonceChangesInSlot[addr] = struct{}{} if _, ok := s.parallel.dirtiedStateObjectsInSlot[addr]; !ok { - newStateObject := stateObject.deepCopy(s) + newStateObject := stateObject.lightCopy(s) newStateObject.SetNonce(nonce) s.parallel.dirtiedStateObjectsInSlot[addr] = newStateObject return @@ -976,41 +1559,44 @@ func (s *StateDB) SetNonce(addr common.Address, nonce uint64) { func (s *StateDB) SetCode(addr common.Address, code []byte) { stateObject := s.GetOrNewStateObject(addr) if stateObject != nil { + codeHash := crypto.Keccak256Hash(code) if s.parallel.isSlotDB { s.parallel.codeChangesInSlot[addr] = struct{}{} - if _, ok := s.parallel.dirtiedStateObjectsInSlot[addr]; !ok { - newStateObject := stateObject.deepCopy(s) - newStateObject.SetCode(crypto.Keccak256Hash(code), code) + newStateObject := stateObject.lightCopy(s) + newStateObject.SetCode(codeHash, code) s.parallel.dirtiedStateObjectsInSlot[addr] = newStateObject return } } - stateObject.SetCode(crypto.Keccak256Hash(code), code) + stateObject.SetCode(codeHash, code) } } func (s *StateDB) SetState(addr common.Address, key, value common.Hash) { - stateObject := s.GetOrNewStateObject(addr) + stateObject := s.GetOrNewStateObject(addr) // attention: if StateObject's lightCopy, its storage is only a part of the full storage, if stateObject != nil { if s.parallel.isSlotDB { if s.parallel.baseTxIndex+1 == s.txIndex { // we check if state is unchanged // only when current transaction is the next transaction to be committed - if stateObject.GetState(s.db, key) == value { + // fixme: there is a bug, block: 14,962,284, + // stateObject is in dirty (light copy), but the key is in mainStateDB + // stateObject dirty -> committed, will skip mainStateDB dirty + if s.GetState(addr, key) == value { log.Debug("Skip set same state", "baseTxIndex", s.parallel.baseTxIndex, - "txIndex", s.txIndex) + "txIndex", s.txIndex, "addr", addr, + "key", key, "value", value) return } } - if s.parallel.stateChangesInSlot[addr] == nil { - s.parallel.stateChangesInSlot[addr] = make(StateKeys, defaultNumOfSlots) + if s.parallel.kvChangesInSlot[addr] == nil { + s.parallel.kvChangesInSlot[addr] = make(StateKeys) // make(Storage, defaultNumOfSlots) } - s.parallel.stateChangesInSlot[addr][key] = struct{}{} if _, ok := s.parallel.dirtiedStateObjectsInSlot[addr]; !ok { - newStateObject := stateObject.deepCopy(s) + newStateObject := stateObject.lightCopy(s) newStateObject.SetState(s.db, key, value) s.parallel.dirtiedStateObjectsInSlot[addr] = newStateObject return @@ -1023,7 +1609,7 @@ func (s *StateDB) SetState(addr common.Address, key, value common.Hash) { // SetStorage replaces the entire storage for the specified account with given // storage. This function should only be used for debugging. func (s *StateDB) SetStorage(addr common.Address, storage map[common.Hash]common.Hash) { - stateObject := s.GetOrNewStateObject(addr) + stateObject := s.GetOrNewStateObject(addr) // fixme: parallel mode? if stateObject != nil { stateObject.SetStorage(storage) } @@ -1035,26 +1621,45 @@ func (s *StateDB) SetStorage(addr common.Address, storage map[common.Hash]common // The account's state object is still available until the state is committed, // getStateObject will return a non-nil account after Suicide. func (s *StateDB) Suicide(addr common.Address) bool { - stateObject := s.getStateObject(addr) + var stateObject *StateObject + if s.parallel.isSlotDB { + // 1.Try to get from dirty, it could be suicided inside of contract call + stateObject = s.parallel.dirtiedStateObjectsInSlot[addr] + // 2.Try to get from uncomfirmed, if deleted return false, since the address does not exist + if stateObject == nil { + if deleted, ok := s.getAddrStateFromUnconfirmedDB(addr); ok { + if deleted { + return false + } + } + } + } + // 3.Try to get from main StateDB + if stateObject == nil { + stateObject = s.getStateObjectNoSlot(addr) + } if stateObject == nil { return false } s.journal.append(suicideChange{ account: &addr, - prev: stateObject.suicided, + prev: stateObject.suicided, // todo: must be false? prevbalance: new(big.Int).Set(stateObject.Balance()), }) if s.parallel.isSlotDB { - s.parallel.stateObjectsSuicidedInSlot[addr] = struct{}{} - s.parallel.addrStateChangesInSlot[addr] = struct{}{} if _, ok := s.parallel.dirtiedStateObjectsInSlot[addr]; !ok { // do copy-on-write for suicide "write" - newStateObject := stateObject.deepCopy(s) + newStateObject := stateObject.lightCopy(s) newStateObject.markSuicided() newStateObject.data.Balance = new(big.Int) s.parallel.dirtiedStateObjectsInSlot[addr] = newStateObject + s.parallel.addrStateChangesInSlot[addr] = false // false: the address does not exist any more, + s.parallel.nonceChangesInSlot[addr] = struct{}{} + s.parallel.balanceChangesInSlot[addr] = struct{}{} + s.parallel.codeChangesInSlot[addr] = struct{}{} + // s.parallel.kvChangesInSlot[addr] = make(StateKeys) // all key changes are discarded return true } } @@ -1105,9 +1710,20 @@ func (s *StateDB) deleteStateObject(obj *StateObject) { // getStateObject retrieves a state object given by the address, returning nil if // the object is not found or was deleted in this execution context. If you need // to differentiate between non-existent/just-deleted, use getDeletedStateObject. +// fixme: avoid getStateObjectNoSlot, may be we define a new struct SlotDB which inherit StateDB +func (s *StateDB) getStateObjectNoSlot(addr common.Address) *StateObject { + if obj := s.getDeletedStateObject(addr); obj != nil && !obj.deleted { + return obj + } + return nil +} + +// for parallel execution mode, try to get dirty StateObject in slot first. func (s *StateDB) getStateObject(addr common.Address) *StateObject { if s.parallel.isSlotDB { - s.parallel.addrStateReadsInSlot[addr] = struct{}{} + if obj, ok := s.parallel.dirtiedStateObjectsInSlot[addr]; ok { + return obj + } } if obj := s.getDeletedStateObject(addr); obj != nil && !obj.deleted { @@ -1187,38 +1803,66 @@ func (s *StateDB) getDeletedStateObject(addr common.Address) *StateObject { } func (s *StateDB) SetStateObject(object *StateObject) { - if s.parallel.isSlotDB { - s.parallel.dirtiedStateObjectsInSlot[object.Address()] = object - } else { - s.storeStateObj(object.Address(), object) - } + s.storeStateObj(object.Address(), object) } // GetOrNewStateObject retrieves a state object or create a new state object if nil. +// dirtyInSlot -> Unconfirmed DB -> main DB -> snapshot, no? create one func (s *StateDB) GetOrNewStateObject(addr common.Address) *StateObject { - stateObject := s.getStateObject(addr) + var stateObject *StateObject = nil + exist := true + if s.parallel.isSlotDB { + if stateObject, ok := s.parallel.dirtiedStateObjectsInSlot[addr]; ok { + return stateObject + } + stateObject, _ = s.getStateObjectFromUnconfirmedDB(addr) + } + if stateObject == nil { - stateObject, _ = s.createObject(addr) + stateObject = s.getStateObjectNoSlot(addr) + } + if stateObject == nil || stateObject.deleted { + stateObject = s.createObject(addr) + exist = false + } + + if s.parallel.isSlotDB { + s.parallel.addrStateReadsInSlot[addr] = exist // true: exist, false: not exist } return stateObject } // createObject creates a new state object. If there is an existing account with // the given address, it is overwritten and returned as the second return value. -func (s *StateDB) createObject(addr common.Address) (newobj, prev *StateObject) { + +// prev is used for CreateAccount to get its balance +// Parallel mode: +// if prev in dirty: revert is ok +// if prev in unconfirmed DB: addr state read record, revert should not put it back +// if prev in main DB: addr state read record, revert should not put it back +// if pre no exist: addr state read record, + +// `prev` is used to handle revert, to recover with the `prev` object +// In Parallel mode, we only need to recover to `prev` in SlotDB, +// a.if it is not in SlotDB, `revert` will remove it from the SlotDB +// b.if it is exist in SlotDB, `revert` will recover to the `prev` in SlotDB +// c.as `snapDestructs` it is the same +func (s *StateDB) createObject(addr common.Address) (newobj *StateObject) { + var prev *StateObject = nil if s.parallel.isSlotDB { - s.parallel.addrStateReadsInSlot[addr] = struct{}{} // will try to get the previous object. - s.parallel.addrStateChangesInSlot[addr] = struct{}{} + // do not get from unconfirmed DB, since it will has problem on revert + prev = s.parallel.dirtiedStateObjectsInSlot[addr] + } else { + prev = s.getDeletedStateObject(addr) // Note, prev might have been deleted, we need that! } - prev = s.getDeletedStateObject(addr) // Note, prev might have been deleted, we need that! - var prevdestruct bool + if s.snap != nil && prev != nil { _, prevdestruct = s.snapDestructs[prev.address] if !prevdestruct { - // createObject for deleted object will destroy the previous trie node first - // and update the trie tree with the new object on block commit. + // To destroy the previous trie node first and update the trie tree + // with the new object on block commit. s.snapDestructs[prev.address] = struct{}{} } } @@ -1229,11 +1873,19 @@ func (s *StateDB) createObject(addr common.Address) (newobj, prev *StateObject) } else { s.journal.append(resetObjectChange{prev: prev, prevdestruct: prevdestruct}) } - s.SetStateObject(newobj) - if prev != nil && !prev.deleted { - return newobj, prev + + if s.parallel.isSlotDB { + s.parallel.dirtiedStateObjectsInSlot[addr] = newobj + s.parallel.addrStateChangesInSlot[addr] = true // the object is created + s.parallel.nonceChangesInSlot[addr] = struct{}{} + s.parallel.balanceChangesInSlot[addr] = struct{}{} + s.parallel.codeChangesInSlot[addr] = struct{}{} + // notice: all the KVs are cleared if any + s.parallel.kvChangesInSlot[addr] = make(StateKeys) + } else { + s.SetStateObject(newobj) } - return newobj, nil + return newobj } // CreateAccount explicitly creates a state object. If a state object with the address @@ -1247,14 +1899,12 @@ func (s *StateDB) createObject(addr common.Address) (newobj, prev *StateObject) // // Carrying over the balance ensures that Ether doesn't disappear. func (s *StateDB) CreateAccount(addr common.Address) { - newObj, prev := s.createObject(addr) - if prev != nil { - newObj.setBalance(prev.data.Balance) - } - if s.parallel.isSlotDB { - s.parallel.balanceReadsInSlot[addr] = struct{}{} // read the balance of previous object - s.parallel.dirtiedStateObjectsInSlot[addr] = newObj - } + // no matter it is got from dirty, unconfirmed or main DB + // if addr not exist, preBalance will be common.Big0, it is same as new(big.Int) which + // is the value newObject(), + preBalance := s.GetBalance(addr) // parallel balance read will be recorded inside of GetBalance + newObj := s.createObject(addr) + newObj.setBalance(new(big.Int).Set(preBalance)) // new big.Int for newObj } func (s *StateDB) ForEachStorage(addr common.Address, cb func(key, value common.Hash) bool) error { @@ -1413,6 +2063,10 @@ var stateObjectsPool = sync.Pool{ New: func() interface{} { return make(map[common.Address]*StateObject, defaultNumOfSlots) }, } +var balancePool = sync.Pool{ + New: func() interface{} { return make(map[common.Address]*big.Int, defaultNumOfSlots) }, +} + var snapAccountPool = sync.Pool{ New: func() interface{} { return make(map[common.Address][]byte, defaultNumOfSlots) }, } @@ -1430,25 +2084,20 @@ var logsPool = sync.Pool{ } func (s *StateDB) SlotDBPutSyncPool() { - for key := range s.parallel.stateObjectsSuicidedInSlot { - delete(s.parallel.stateObjectsSuicidedInSlot, key) - } - addressStructPool.Put(s.parallel.stateObjectsSuicidedInSlot) - for key := range s.parallel.codeReadsInSlot { delete(s.parallel.codeReadsInSlot, key) } addressStructPool.Put(s.parallel.codeReadsInSlot) - for key := range s.parallel.codeChangesInSlot { - delete(s.parallel.codeChangesInSlot, key) - } - addressStructPool.Put(s.parallel.codeChangesInSlot) + // for key := range s.parallel.codeChangesInSlot { + // delete(s.parallel.codeChangesInSlot, key) + // } + // addressStructPool.Put(s.parallel.codeChangesInSlot) for key := range s.parallel.balanceChangesInSlot { delete(s.parallel.balanceChangesInSlot, key) } - addressStructPool.Put(s.parallel.balanceChangesInSlot) + balancePool.Put(s.parallel.balanceChangesInSlot) for key := range s.parallel.balanceReadsInSlot { delete(s.parallel.balanceReadsInSlot, key) @@ -1481,15 +2130,15 @@ func (s *StateDB) SlotDBPutSyncPool() { s.journal.entries = s.journal.entries[:0] journalPool.Put(s.journal) - for key := range s.parallel.stateChangesInSlot { - delete(s.parallel.stateChangesInSlot, key) - } - stateKeysPool.Put(s.parallel.stateChangesInSlot) + // for key := range s.parallel.kvChangesInSlot { + // delete(s.parallel.kvChangesInSlot, key) + //} + //stateKeysPool.Put(s.parallel.kvChangesInSlot) - for key := range s.parallel.stateReadsInSlot { - delete(s.parallel.stateReadsInSlot, key) + for key := range s.parallel.kvReadsInSlot { + delete(s.parallel.kvReadsInSlot, key) } - stateKeysPool.Put(s.parallel.stateReadsInSlot) + stateKeysPool.Put(s.parallel.kvReadsInSlot) for key := range s.parallel.dirtiedStateObjectsInSlot { delete(s.parallel.dirtiedStateObjectsInSlot, key) @@ -1526,27 +2175,31 @@ func (s *StateDB) CopyForSlot() *StateDB { parallel := ParallelState{ // use base(dispatcher) slot db's stateObjects. // It is a SyncMap, only readable to slot, not writable - stateObjects: s.parallel.stateObjects, - stateObjectsSuicidedInSlot: addressStructPool.Get().(map[common.Address]struct{}), - codeReadsInSlot: addressStructPool.Get().(map[common.Address]struct{}), - codeChangesInSlot: addressStructPool.Get().(map[common.Address]struct{}), - stateChangesInSlot: stateKeysPool.Get().(map[common.Address]StateKeys), - stateReadsInSlot: stateKeysPool.Get().(map[common.Address]StateKeys), - balanceChangesInSlot: addressStructPool.Get().(map[common.Address]struct{}), - balanceReadsInSlot: addressStructPool.Get().(map[common.Address]struct{}), - addrStateReadsInSlot: addressStructPool.Get().(map[common.Address]struct{}), - addrStateChangesInSlot: addressStructPool.Get().(map[common.Address]struct{}), - nonceChangesInSlot: addressStructPool.Get().(map[common.Address]struct{}), - isSlotDB: true, - dirtiedStateObjectsInSlot: stateObjectsPool.Get().(map[common.Address]*StateObject), + stateObjects: s.parallel.stateObjects, + unconfirmedDBInShot: make(map[int]*StateDB, 100), + + codeReadsInSlot: make(map[common.Address][]byte, 10), // addressStructPool.Get().(map[common.Address]struct{}), + codeHashReadsInSlot: make(map[common.Address]common.Hash), + codeChangesInSlot: make(map[common.Address]struct{}), + kvChangesInSlot: make(map[common.Address]StateKeys), // stateKeysPool.Get().(map[common.Address]StateKeys), + kvReadsInSlot: make(map[common.Address]Storage, 100), // stateKeysPool.Get().(map[common.Address]Storage), + balanceChangesInSlot: make(map[common.Address]struct{}, 10), // balancePool.Get().(map[common.Address]struct{}, 10), + balanceReadsInSlot: make(map[common.Address]*big.Int), // addressStructPool.Get().(map[common.Address]struct{}), + addrStateReadsInSlot: make(map[common.Address]bool), // addressStructPool.Get().(map[common.Address]struct{}), + addrStateChangesInSlot: make(map[common.Address]bool), // addressStructPool.Get().(map[common.Address]struct{}), + nonceChangesInSlot: make(map[common.Address]struct{}), // addressStructPool.Get().(map[common.Address]struct{}), + nonceReadsInSlot: make(map[common.Address]uint64), + + isSlotDB: true, + dirtiedStateObjectsInSlot: stateObjectsPool.Get().(map[common.Address]*StateObject), } state := &StateDB{ db: s.db, trie: s.db.CopyTrie(s.trie), stateObjects: make(map[common.Address]*StateObject), // replaced by parallel.stateObjects in parallel mode - stateObjectsPending: addressStructPool.Get().(map[common.Address]struct{}), - stateObjectsDirty: addressStructPool.Get().(map[common.Address]struct{}), - refund: s.refund, // should be 0 + stateObjectsPending: make(map[common.Address]struct{}), // addressStructPool.Get().(map[common.Address]struct{}), + stateObjectsDirty: make(map[common.Address]struct{}), // addressStructPool.Get().(map[common.Address]struct{}), + refund: s.refund, // should be 0 logs: logsPool.Get().(map[common.Hash][]*types.Log), logSize: 0, preimages: make(map[common.Hash][]byte, len(s.preimages)), @@ -1568,10 +2221,12 @@ func (s *StateDB) CopyForSlot() *StateDB { state.snaps = s.snaps state.snap = s.snap // deep copy needed - state.snapDestructs = addressStructPool.Get().(map[common.Address]struct{}) + state.snapDestructs = make(map[common.Address]struct{}) // addressStructPool.Get().(map[common.Address]struct{}) + s.snapParallelLock.RLock() for k, v := range s.snapDestructs { state.snapDestructs[k] = v } + s.snapParallelLock.RUnlock() // state.snapAccounts = snapAccountPool.Get().(map[common.Address][]byte) for k, v := range s.snapAccounts { @@ -1635,10 +2290,22 @@ func (s *StateDB) WaitPipeVerification() error { // Finalise finalises the state by removing the s destructed objects and clears // the journal as well as the refunds. Finalise, however, will not push any updates // into the tries just yet. Only IntermediateRoot or Commit will do that. -func (s *StateDB) Finalise(deleteEmptyObjects bool) { +func (s *StateDB) Finalise(deleteEmptyObjects bool) { // fixme: concurrent safe... addressesToPrefetch := make([][]byte, 0, len(s.journal.dirties)) for addr := range s.journal.dirties { - obj, exist := s.getStateObjectFromStateObjects(addr) + var obj *StateObject + var exist bool + if s.parallel.isSlotDB { + obj = s.parallel.dirtiedStateObjectsInSlot[addr] + if obj != nil { + exist = true + } else { + log.Error("StateDB Finalise dirty addr not in dirtiedStateObjectsInSlot", + "addr", addr) + } + } else { + obj, exist = s.getStateObjectFromStateObjects(addr) + } if !exist { // ripeMD is 'touched' at block 1714175, in tx 0x1237f737031e40bcde4a8b7e717b2d15e3ecadfe49bb1bbc71ee9deb09c6fcf2 // That tx goes out of gas, and although the notion of 'touched' does not exist there, the @@ -1650,7 +2317,7 @@ func (s *StateDB) Finalise(deleteEmptyObjects bool) { } if obj.suicided || (deleteEmptyObjects && obj.empty()) { if s.parallel.isSlotDB { - s.parallel.addrStateChangesInSlot[addr] = struct{}{} // empty an StateObject is a state change + s.parallel.addrStateChangesInSlot[addr] = false // false: deleted } obj.deleted = true diff --git a/core/state_processor.go b/core/state_processor.go index f98adea519..9e6da7fee1 100644 --- a/core/state_processor.go +++ b/core/state_processor.go @@ -399,9 +399,9 @@ type SlotState struct { pendingTxReqChan chan struct{} pendingConfirmChan chan *ParallelTxResult pendingTxReqList []*ParallelTxRequest // maintained by dispatcher for dispatch policy - mergedChangeList []state.SlotChangeList - slotdbChan chan *state.StateDB // dispatch will create and send this slotDB to slot - // txReqUnits []*ParallelDispatchUnit // only dispatch can access + slotdbChan chan *state.StateDB // dispatch will create and send this slotDB to slot + // txReqUnits []*ParallelDispatchUnit // only dispatch can accesssd + unconfirmedStateDBs *sync.Map // [int]*state.StateDB // fixme: concurrent safe, not use sync.Map? } type ParallelTxResult struct { @@ -457,84 +457,6 @@ func (p *ParallelStateProcessor) init() { } } -// conflict check uses conflict window, it will check all state changes from (cfWindowStart + 1) -// to the previous Tx, if any state in readDb is updated in changeList, then it is conflicted -func (p *ParallelStateProcessor) hasStateConflict(readDb *state.StateDB, changeList state.SlotChangeList) bool { - // check KV change - reads := readDb.StateReadsInSlot() - writes := changeList.StateChangeSet - for readAddr, readKeys := range reads { - if _, exist := changeList.AddrStateChangeSet[readAddr]; exist { - log.Debug("conflict: read addr changed state", "addr", readAddr) - return true - } - if writeKeys, ok := writes[readAddr]; ok { - // readAddr exist - for writeKey := range writeKeys { - // same addr and same key, mark conflicted - if _, ok := readKeys[writeKey]; ok { - log.Debug("conflict: state conflict", "addr", readAddr, "key", writeKey) - return true - } - } - } - } - // check balance change - balanceReads := readDb.BalanceReadsInSlot() - balanceWrite := changeList.BalanceChangeSet - for readAddr := range balanceReads { - if _, exist := changeList.AddrStateChangeSet[readAddr]; exist { - // SystemAddress is special, SystemAddressRedo() is prepared for it. - // Since txIndex = 0 will create StateObject for SystemAddress, skip its state change check - if readAddr != consensus.SystemAddress { - log.Debug("conflict: read addr changed balance", "addr", readAddr) - return true - } - } - if _, ok := balanceWrite[readAddr]; ok { - if readAddr != consensus.SystemAddress { - log.Debug("conflict: balance conflict", "addr", readAddr) - return true - } - } - } - - // check code change - codeReads := readDb.CodeReadsInSlot() - codeWrite := changeList.CodeChangeSet - for readAddr := range codeReads { - if _, exist := changeList.AddrStateChangeSet[readAddr]; exist { - log.Debug("conflict: read addr changed code", "addr", readAddr) - return true - } - if _, ok := codeWrite[readAddr]; ok { - log.Debug("conflict: code conflict", "addr", readAddr) - return true - } - } - - // check address state change: create, suicide... - addrReads := readDb.AddressReadsInSlot() - addrWrite := changeList.AddrStateChangeSet - nonceWrite := changeList.NonceChangeSet - for readAddr := range addrReads { - if _, ok := addrWrite[readAddr]; ok { - // SystemAddress is special, SystemAddressRedo() is prepared for it. - // Since txIndex = 0 will create StateObject for SystemAddress, skip its state change check - if readAddr != consensus.SystemAddress { - log.Debug("conflict: address state conflict", "addr", readAddr) - return true - } - } - if _, ok := nonceWrite[readAddr]; ok { - log.Debug("conflict: address nonce conflict", "addr", readAddr) - return true - } - } - - return false -} - // for parallel execute, we put contracts of same address in a slot, // since these txs probably would have conflicts /* @@ -756,7 +678,9 @@ func (p *ParallelStateProcessor) waitUntilNextTxDone(statedb *state.StateDB, gp if result.updateSlotDB { // the target slot is waiting for new slotDB slotState := p.slotState[result.slotIndex] - slotDB := state.NewSlotDB(statedb, consensus.SystemAddress, p.mergedTxIndex, result.keepSystem) + slotDB := state.NewSlotDB(statedb, consensus.SystemAddress, result.txReq.txIndex, + p.mergedTxIndex, result.keepSystem, slotState.unconfirmedStateDBs) + slotDB.SetSlotIndex(result.slotIndex) p.slotDBsToRelease = append(p.slotDBsToRelease, slotDB) slotState.slotdbChan <- slotDB continue @@ -773,12 +697,7 @@ func (p *ParallelStateProcessor) waitUntilNextTxDone(statedb *state.StateDB, gp resultTxIndex := result.txReq.txIndex resultSlotState := p.slotState[resultSlotIndex] resultSlotState.pendingTxReqList = resultSlotState.pendingTxReqList[1:] - - // Slot's mergedChangeList is produced by dispatcher, while consumed by slot. - // It is safe, since write and read is in sequential, do write -> notify -> read - // It is not good, but work right now. - changeList := statedb.MergeSlotDB(result.slotDB, result.receipt, resultTxIndex) - resultSlotState.mergedChangeList = append(resultSlotState.mergedChangeList, changeList) + statedb.MergeSlotDB(result.slotDB, result.receipt, resultTxIndex) if resultTxIndex != p.mergedTxIndex+1 { log.Error("ProcessParallel tx result out of order", "resultTxIndex", resultTxIndex, @@ -798,10 +717,33 @@ func (p *ParallelStateProcessor) executeInSlot(slotIndex int, txReq *ParallelTxR vmenv := vm.NewEVM(blockContext, vm.TxContext{}, slotDB, p.config, txReq.vmConfig) // gasLimit not accurate, but it is ok for block import. // each slot would use its own gas pool, and will do gaslimit check later - gpSlot := new(GasPool).AddGas(txReq.gasLimit) + gpSlot := new(GasPool).AddGas(txReq.gasLimit) // block.GasLimit() evm, result, err := applyTransactionStageExecution(txReq.msg, gpSlot, slotDB, vmenv) - log.Debug("In Slot, Stage Execution done", "Slot", slotIndex, "txIndex", txReq.txIndex, "slotDB.baseTxIndex", slotDB.BaseTxIndex()) + + if err != nil { + // the error could be caused by unconfirmed balance reference, + // the balance could insufficient to pay its gas limit, which cause it preCheck.buyGas() failed + // redo could solve it. + log.Warn("In slot execution error", "error", err) + return &ParallelTxResult{ + updateSlotDB: false, + slotIndex: slotIndex, + txReq: txReq, + receipt: nil, // receipt is generated in finalize stage + slotDB: slotDB, + err: err, + gpSlot: gpSlot, + evm: evm, + result: result, + } + } + + if result.Failed() { + // if Tx is reverted, all its state change will be discarded + slotDB.RevertSlotDB(txReq.msg.From()) + } + slotDB.Finalise(true) // Finalise could write s.parallel.addrStateChangesInSlot[addr], keep Read and Write in same routine to avoid crash return &ParallelTxResult{ updateSlotDB: false, @@ -824,45 +766,28 @@ func (p *ParallelStateProcessor) executeInShadowSlot(slotIndex int, txResult *Pa // wait until the previous tx is finalized. if txReq.waitTxChan != nil { - log.Debug("Stage wait previous Tx done", "Slot", slotIndex, "txIndex", txIndex) <-txReq.waitTxChan // close the channel } - if txResult.err != nil { - log.Error("executeInShadowSlot should have no error", "err", txResult.err) - } - // do conflict detect hasConflict := false systemAddrConflict := false - log.Debug("Shadow Stage Execution done, do conflict check", "Slot", slotIndex, "txIndex", txIndex) - if slotDB.SystemAddressRedo() { + if txResult.err != nil { + log.Debug("redo, since in slot execute failed", "err", txResult.err) + hasConflict = true + } else if slotDB.SystemAddressRedo() { + log.Debug("Stage Execution conflict for SystemAddressRedo", "Slot", slotIndex, + "txIndex", txIndex) hasConflict = true systemAddrConflict = true } else if slotDB.NeedsRedo() { // if this is any reason that indicates this transaction needs to redo, skip the conflict check hasConflict = true } else { - for index := 0; index < p.parallelNum; index++ { - // log.Debug("Shadow conflict check", "Slot", slotIndex, "txIndex", txIndex) - // check all finalizedDb from current slot's - for _, changeList := range p.slotState[index].mergedChangeList { - // log.Debug("Shadow conflict check", "changeList.TxIndex", changeList.TxIndex, - // "slotDB.BaseTxIndex()", slotDB.BaseTxIndex()) - if changeList.TxIndex <= slotDB.BaseTxIndex() { - continue - } - if p.hasStateConflict(slotDB, changeList) { - log.Debug("Stage Execution conflict", "Slot", slotIndex, - "txIndex", txIndex, " conflict slot", index, "slotDB.baseTxIndex", slotDB.BaseTxIndex(), - "conflict txIndex", changeList.TxIndex) - hasConflict = true - break - } - } - if hasConflict { - break - } + // to check if what the slot db read is correct. + // refDetail := slotDB.UnconfirmedRefList() + if !slotDB.IsParallelReadsValid() { + hasConflict = true } } @@ -873,6 +798,7 @@ func (p *ParallelStateProcessor) executeInShadowSlot(slotIndex int, txResult *Pa updateSlotDB: true, keepSystem: systemAddrConflict, slotIndex: slotIndex, + txReq: txReq, } p.txResultChan <- redoResult updatedSlotDB := <-p.slotState[slotIndex].slotdbChan @@ -890,6 +816,13 @@ func (p *ParallelStateProcessor) executeInShadowSlot(slotIndex int, txResult *Pa if txResult.err != nil { log.Error("Stage Execution conflict redo, error", txResult.err) } + + if txResult.result.Failed() { + // if Tx is reverted, all its state change will be discarded + log.Debug("TX reverted?", "Slot", slotIndex, "txIndex", txIndex, + "result.Err", txResult.result.Err) + txResult.slotDB.RevertSlotDB(txReq.msg.From()) + } } // goroutine unsafe operation will be handled from here for safety @@ -899,21 +832,11 @@ func (p *ParallelStateProcessor) executeInShadowSlot(slotIndex int, txResult *Pa "gasConsumed", gasConsumed, "result.UsedGas", txResult.result.UsedGas) } - log.Debug("ok to finalize this TX", "Slot", slotIndex, "txIndex", txIndex, - "result.UsedGas", txResult.result.UsedGas, "txReq.usedGas", *txReq.usedGas) - // ok, time to do finalize, stage2 should not be parallel txResult.receipt, txResult.err = applyTransactionStageFinalization(txResult.evm, txResult.result, txReq.msg, p.config, txResult.slotDB, header, txReq.tx, txReq.usedGas, txReq.bloomProcessor) - if txResult.result.Failed() { - // if Tx is reverted, all its state change will be discarded - log.Debug("TX reverted?", "Slot", slotIndex, "txIndex", txIndex, - "result.Err", txResult.result.Err) - txResult.slotDB.RevertSlotDB(txReq.msg.From()) - } - txResult.updateSlotDB = false return txResult } @@ -932,17 +855,18 @@ func (p *ParallelStateProcessor) runSlotLoop(slotIndex int) { // ** for a queued tx, // it is better to create a new SlotDB, since COW is used. for _, txReq := range curSlot.pendingTxReqList { - log.Debug("SlotLoop received a new TxReq", "Slot", slotIndex, "txIndex", txReq.txIndex) if txReq.slotDB == nil { result := &ParallelTxResult{ updateSlotDB: true, slotIndex: slotIndex, err: nil, + txReq: txReq, } p.txResultChan <- result txReq.slotDB = <-curSlot.slotdbChan } result := p.executeInSlot(slotIndex, txReq) + curSlot.unconfirmedStateDBs.Store(txReq.txIndex, txReq.slotDB) curSlot.pendingConfirmChan <- result } } @@ -951,14 +875,9 @@ func (p *ParallelStateProcessor) runSlotLoop(slotIndex int) { func (p *ParallelStateProcessor) runShadowSlotLoop(slotIndex int) { curSlot := p.slotState[slotIndex] for { - log.Debug("runShadowSlotLoop wait", "slotIndex", slotIndex) // ParallelTxResult from pendingConfirmChan is not confirmed yet unconfirmedResult := <-curSlot.pendingConfirmChan - - log.Debug("runShadowSlotLoop to confirm the TxResult from master slot", "Slot", slotIndex, "txIndex", unconfirmedResult.txReq.txIndex) confirmedResult := p.executeInShadowSlot(slotIndex, unconfirmedResult) - - log.Debug("runShadowSlotLoop the TxReq is done", "Slot", slotIndex, "err", confirmedResult.err) p.txResultChan <- confirmedResult } } @@ -975,18 +894,18 @@ func (p *ParallelStateProcessor) resetState(txNum int, statedb *state.StateDB) { statedb.PrepareForParallel() - stateDBsToRelease := p.slotDBsToRelease p.slotDBsToRelease = make([]*state.StateDB, 0, txNum) - - go func() { - for _, slotDB := range stateDBsToRelease { - slotDB.SlotDBPutSyncPool() - } - }() - + /* + stateDBsToRelease := p.slotDBsToRelease + go func() { + for _, slotDB := range stateDBsToRelease { + slotDB.SlotDBPutSyncPool() + } + }() + */ for _, slot := range p.slotState { - slot.mergedChangeList = make([]state.SlotChangeList, 0) slot.pendingTxReqList = make([]*ParallelTxRequest, 0) + slot.unconfirmedStateDBs = new(sync.Map) // make(map[int]*state.StateDB), fixme: resue not new? } } @@ -997,6 +916,7 @@ func (p *ParallelStateProcessor) Process(block *types.Block, statedb *state.Stat header = block.Header() gp = new(GasPool).AddGas(block.GasLimit()) ) + log.Info("ProcessParallel", "block", header.Number) var receipts = make([]*types.Receipt, 0) txNum := len(block.Transactions()) p.resetState(txNum, statedb) @@ -1024,7 +944,7 @@ func (p *ParallelStateProcessor) Process(block *types.Block, statedb *state.Stat // Parallel Execution 1.0&2.0 is for full sync mode, Nonce PreCheck is not necessary // And since we will do out-of-order execution, the Nonce PreCheck could fail. // We will disable it and leave it to Parallel 3.0 which is for validator mode - msg, err := tx.AsParallelMessage(signer) + msg, err := tx.AsMessageNoNonceCheck(signer) if err != nil { return statedb, nil, nil, 0, err } @@ -1037,7 +957,7 @@ func (p *ParallelStateProcessor) Process(block *types.Block, statedb *state.Stat txIndex: i, tx: tx, slotDB: nil, - gasLimit: gp.Gas(), + gasLimit: block.GasLimit(), // gp.Gas(). msg: msg, block: block, vmConfig: cfg, @@ -1067,7 +987,7 @@ func (p *ParallelStateProcessor) Process(block *types.Block, statedb *state.Stat // update tx result if result.err != nil { - log.Warn("ProcessParallel a failed tx", "resultSlotIndex", result.slotIndex, + log.Error("ProcessParallel a failed tx", "resultSlotIndex", result.slotIndex, "resultTxIndex", result.txReq.txIndex, "result.err", result.err) return statedb, nil, nil, 0, fmt.Errorf("could not apply tx %d [%v]: %w", result.txReq.txIndex, result.txReq.tx.Hash().Hex(), result.err) } @@ -1145,6 +1065,9 @@ func (p *StateProcessor) Process(block *types.Block, statedb *state.StateDB, cfg ) var receipts = make([]*types.Receipt, 0) txNum := len(block.Transactions()) + if txNum > 0 { + log.Info("Process", "block", header.Number, "txNum", txNum) + } commonTxs := make([]*types.Transaction, 0, txNum) // Iterate over and process the individual transactions posa, isPoSA := p.engine.(consensus.PoSA) diff --git a/core/types/transaction.go b/core/types/transaction.go index 9a6bebc42e..821c43a157 100644 --- a/core/types/transaction.go +++ b/core/types/transaction.go @@ -536,8 +536,8 @@ func (tx *Transaction) AsMessage(s Signer) (Message, error) { } // Parallel 1.0&2.0 will skip nonce check, since it is not necessary for sync mode. -// Parallel 3.0 will reenable it, nonce check for parallel execution will be designed. -func (tx *Transaction) AsParallelMessage(s Signer) (Message, error) { +// Parallel 3.0 will reenable it, nonce check for parallel execution will be designed then. +func (tx *Transaction) AsMessageNoNonceCheck(s Signer) (Message, error) { msg, err := tx.AsMessage(s) msg.checkNonce = false return msg, err diff --git a/core/vm/evm.go b/core/vm/evm.go index 1970f97910..53e2e8797b 100644 --- a/core/vm/evm.go +++ b/core/vm/evm.go @@ -475,7 +475,6 @@ func (evm *EVM) create(caller ContractRef, codeAndHash *codeAndHash, gas uint64, } nonce := evm.StateDB.GetNonce(caller.Address()) evm.StateDB.SetNonce(caller.Address(), nonce+1) - evm.StateDB.NonceChanged(caller.Address()) // fixme: nonce double -- // We add this to the access list _before_ taking a snapshot. Even if the creation fails, // the access-list change should not be rolled back if evm.chainRules.IsBerlin { diff --git a/core/vm/interface.go b/core/vm/interface.go index f424ed9cb9..ad9b05d666 100644 --- a/core/vm/interface.go +++ b/core/vm/interface.go @@ -31,7 +31,6 @@ type StateDB interface { AddBalance(common.Address, *big.Int) GetBalance(common.Address) *big.Int - NonceChanged(common.Address) GetNonce(common.Address) uint64 SetNonce(common.Address, uint64) @@ -75,8 +74,6 @@ type StateDB interface { AddPreimage(common.Hash, []byte) ForEachStorage(common.Address, func(common.Hash, common.Hash) bool) error - - IsSlotDB() bool } // CallContext provides a basic interface for the EVM calling conventions. The EVM From d3c8d8bc685e803a954f7daa3cd4cb50745c9cf6 Mon Sep 17 00:00:00 2001 From: setunapo Date: Wed, 20 Apr 2022 15:30:03 +0800 Subject: [PATCH 05/18] reenable sequential prefetch & disable sync.pool --- core/blockchain.go | 19 +++++------ core/state/statedb.go | 73 ++++++++++++++++++++++--------------------- 2 files changed, 45 insertions(+), 47 deletions(-) diff --git a/core/blockchain.go b/core/blockchain.go index 417f3a0037..9ed4317b13 100644 --- a/core/blockchain.go +++ b/core/blockchain.go @@ -2113,17 +2113,14 @@ func (bc *BlockChain) insertChain(chain types.Blocks, verifySeals bool) (int, er var followupInterrupt uint32 // For diff sync, it may fallback to full sync, so we still do prefetch // parallel mode has a pipeline, similar to this prefetch, to save CPU we disable this prefetch for parallel - /* - // disable prefetch for parallel bugfix - if !bc.parallelExecution { - if len(block.Transactions()) >= prefetchTxNumber { - throwaway := statedb.Copy() - go func(start time.Time, followup *types.Block, throwaway *state.StateDB, interrupt *uint32) { - bc.prefetcher.Prefetch(followup, throwaway, bc.vmConfig, &followupInterrupt) - }(time.Now(), block, throwaway, &followupInterrupt) - } - } - */ + if !bc.parallelExecution { + if len(block.Transactions()) >= prefetchTxNumber { + throwaway := statedb.Copy() + go func(start time.Time, followup *types.Block, throwaway *state.StateDB, interrupt *uint32) { + bc.prefetcher.Prefetch(followup, throwaway, bc.vmConfig, &followupInterrupt) + }(time.Now(), block, throwaway, &followupInterrupt) + } + } //Process block using the parent state as reference point substart := time.Now() if bc.pipeCommit { diff --git a/core/state/statedb.go b/core/state/statedb.go index f46e6b3556..9263e46fd8 100644 --- a/core/state/statedb.go +++ b/core/state/statedb.go @@ -2042,6 +2042,7 @@ func (s *StateDB) Copy() *StateDB { return state } +/* var addressStructPool = sync.Pool{ New: func() interface{} { return make(map[common.Address]struct{}, defaultNumOfSlots) }, } @@ -2084,30 +2085,30 @@ var logsPool = sync.Pool{ } func (s *StateDB) SlotDBPutSyncPool() { - for key := range s.parallel.codeReadsInSlot { - delete(s.parallel.codeReadsInSlot, key) - } - addressStructPool.Put(s.parallel.codeReadsInSlot) + // for key := range s.parallel.codeReadsInSlot { + // delete(s.parallel.codeReadsInSlot, key) + //} + //addressStructPool.Put(s.parallel.codeReadsInSlot) - // for key := range s.parallel.codeChangesInSlot { - // delete(s.parallel.codeChangesInSlot, key) - // } - // addressStructPool.Put(s.parallel.codeChangesInSlot) + for key := range s.parallel.codeChangesInSlot { + delete(s.parallel.codeChangesInSlot, key) + } + addressStructPool.Put(s.parallel.codeChangesInSlot) for key := range s.parallel.balanceChangesInSlot { delete(s.parallel.balanceChangesInSlot, key) } - balancePool.Put(s.parallel.balanceChangesInSlot) + addressStructPool.Put(s.parallel.balanceChangesInSlot) for key := range s.parallel.balanceReadsInSlot { delete(s.parallel.balanceReadsInSlot, key) } - addressStructPool.Put(s.parallel.balanceReadsInSlot) + balancePool.Put(s.parallel.balanceReadsInSlot) - for key := range s.parallel.addrStateReadsInSlot { - delete(s.parallel.addrStateReadsInSlot, key) - } - addressStructPool.Put(s.parallel.addrStateReadsInSlot) + // for key := range s.parallel.addrStateReadsInSlot { + // delete(s.parallel.addrStateReadsInSlot, key) + // } + // addressStructPool.Put(s.parallel.addrStateReadsInSlot) for key := range s.parallel.nonceChangesInSlot { delete(s.parallel.nonceChangesInSlot, key) @@ -2130,15 +2131,15 @@ func (s *StateDB) SlotDBPutSyncPool() { s.journal.entries = s.journal.entries[:0] journalPool.Put(s.journal) - // for key := range s.parallel.kvChangesInSlot { - // delete(s.parallel.kvChangesInSlot, key) - //} - //stateKeysPool.Put(s.parallel.kvChangesInSlot) - - for key := range s.parallel.kvReadsInSlot { - delete(s.parallel.kvReadsInSlot, key) + for key := range s.parallel.kvChangesInSlot { + delete(s.parallel.kvChangesInSlot, key) } - stateKeysPool.Put(s.parallel.kvReadsInSlot) + stateKeysPool.Put(s.parallel.kvChangesInSlot) + + // for key := range s.parallel.kvReadsInSlot { + // delete(s.parallel.kvReadsInSlot, key) + // } + // stateKeysPool.Put(s.parallel.kvReadsInSlot) for key := range s.parallel.dirtiedStateObjectsInSlot { delete(s.parallel.dirtiedStateObjectsInSlot, key) @@ -2169,7 +2170,7 @@ func (s *StateDB) SlotDBPutSyncPool() { } logsPool.Put(s.logs) } - +*/ // CopyForSlot copy all the basic fields, initialize the memory ones func (s *StateDB) CopyForSlot() *StateDB { parallel := ParallelState{ @@ -2180,10 +2181,10 @@ func (s *StateDB) CopyForSlot() *StateDB { codeReadsInSlot: make(map[common.Address][]byte, 10), // addressStructPool.Get().(map[common.Address]struct{}), codeHashReadsInSlot: make(map[common.Address]common.Hash), - codeChangesInSlot: make(map[common.Address]struct{}), + codeChangesInSlot: make(map[common.Address]struct{}), // addressStructPool.Get().(map[common.Address]struct{}), kvChangesInSlot: make(map[common.Address]StateKeys), // stateKeysPool.Get().(map[common.Address]StateKeys), kvReadsInSlot: make(map[common.Address]Storage, 100), // stateKeysPool.Get().(map[common.Address]Storage), - balanceChangesInSlot: make(map[common.Address]struct{}, 10), // balancePool.Get().(map[common.Address]struct{}, 10), + balanceChangesInSlot: make(map[common.Address]struct{}), // addressStructPool.Get().(map[common.Address]struct{}), balanceReadsInSlot: make(map[common.Address]*big.Int), // addressStructPool.Get().(map[common.Address]struct{}), addrStateReadsInSlot: make(map[common.Address]bool), // addressStructPool.Get().(map[common.Address]struct{}), addrStateChangesInSlot: make(map[common.Address]bool), // addressStructPool.Get().(map[common.Address]struct{}), @@ -2191,19 +2192,19 @@ func (s *StateDB) CopyForSlot() *StateDB { nonceReadsInSlot: make(map[common.Address]uint64), isSlotDB: true, - dirtiedStateObjectsInSlot: stateObjectsPool.Get().(map[common.Address]*StateObject), + dirtiedStateObjectsInSlot: make(map[common.Address]*StateObject), // stateObjectsPool.Get().(map[common.Address]*StateObject), } state := &StateDB{ db: s.db, trie: s.db.CopyTrie(s.trie), - stateObjects: make(map[common.Address]*StateObject), // replaced by parallel.stateObjects in parallel mode - stateObjectsPending: make(map[common.Address]struct{}), // addressStructPool.Get().(map[common.Address]struct{}), - stateObjectsDirty: make(map[common.Address]struct{}), // addressStructPool.Get().(map[common.Address]struct{}), - refund: s.refund, // should be 0 - logs: logsPool.Get().(map[common.Hash][]*types.Log), + stateObjects: make(map[common.Address]*StateObject), // replaced by parallel.stateObjects in parallel mode + stateObjectsPending: make(map[common.Address]struct{}), // addressStructPool.Get().(map[common.Address]struct{}), + stateObjectsDirty: make(map[common.Address]struct{}), //addressStructPool.Get().(map[common.Address]struct{}), + refund: s.refund, // should be 0 + logs: make(map[common.Hash][]*types.Log, defaultNumOfSlots), // logsPool.Get().(map[common.Hash][]*types.Log), logSize: 0, preimages: make(map[common.Hash][]byte, len(s.preimages)), - journal: journalPool.Get().(*journal), + journal: newJournal(), // journalPool.Get().(*journal), hasher: crypto.NewKeccakState(), isParallel: true, parallel: parallel, @@ -2221,20 +2222,20 @@ func (s *StateDB) CopyForSlot() *StateDB { state.snaps = s.snaps state.snap = s.snap // deep copy needed - state.snapDestructs = make(map[common.Address]struct{}) // addressStructPool.Get().(map[common.Address]struct{}) + state.snapDestructs = make(map[common.Address]struct{}) //addressStructPool.Get().(map[common.Address]struct{}) s.snapParallelLock.RLock() for k, v := range s.snapDestructs { state.snapDestructs[k] = v } s.snapParallelLock.RUnlock() // - state.snapAccounts = snapAccountPool.Get().(map[common.Address][]byte) + state.snapAccounts = make(map[common.Address][]byte) // snapAccountPool.Get().(map[common.Address][]byte) for k, v := range s.snapAccounts { state.snapAccounts[k] = v } - state.snapStorage = snapStoragePool.Get().(map[common.Address]map[string][]byte) + state.snapStorage = make(map[common.Address]map[string][]byte) // snapStoragePool.Get().(map[common.Address]map[string][]byte) for k, v := range s.snapStorage { - temp := snapStorageValuePool.Get().(map[string][]byte) + temp := make(map[string][]byte) // snapStorageValuePool.Get().(map[string][]byte) for kk, vv := range v { temp[kk] = vv } From 1bf23041b5c184b7e42e5a4beb172d79d00052c4 Mon Sep 17 00:00:00 2001 From: setunapo Date: Thu, 21 Apr 2022 20:44:33 +0800 Subject: [PATCH 06/18] Fix: suicide, concurrent store, CreateAccount, revert... balance systemaddr, nonce, code, suicide bad previous balance, nonce, code... addrSnapDestructsReadsInSlot skip system address balance check --- core/state/journal.go | 5 + core/state/state_object.go | 15 +- core/state/statedb.go | 300 ++++++++++++++++++++++++++----------- 3 files changed, 229 insertions(+), 91 deletions(-) diff --git a/core/state/journal.go b/core/state/journal.go index 5afe8886bb..96655d007d 100644 --- a/core/state/journal.go +++ b/core/state/journal.go @@ -144,6 +144,11 @@ type ( func (ch createObjectChange) revert(s *StateDB) { if s.parallel.isSlotDB { delete(s.parallel.dirtiedStateObjectsInSlot, *ch.account) + delete(s.parallel.addrStateChangesInSlot, *ch.account) + delete(s.parallel.nonceChangesInSlot, *ch.account) + delete(s.parallel.balanceChangesInSlot, *ch.account) + delete(s.parallel.codeChangesInSlot, *ch.account) + delete(s.parallel.kvChangesInSlot, *ch.account) } else { s.deleteStateObj(*ch.account) } diff --git a/core/state/state_object.go b/core/state/state_object.go index 7adb5bdbe6..6a1154b06d 100644 --- a/core/state/state_object.go +++ b/core/state/state_object.go @@ -183,6 +183,10 @@ type StateObject struct { // empty returns whether the account is considered empty. func (s *StateObject) empty() bool { return s.data.Nonce == 0 && s.data.Balance.Sign() == 0 && bytes.Equal(s.data.CodeHash, emptyCodeHash) + + // in parallel mode, we should never get raw nonce, balance, codeHash any more, + // since it could be invalid, if the element was read from unconfirmed DB or base DB + // return s.db.GetNonce(s.address) == 0 && s.db.GetBalance(s.address).Sign() == 0 && bytes.Equal(s.db.GetCodeHash(s.address).Bytes(), emptyCodeHash) } // Account is the Ethereum consensus representation of accounts. @@ -591,6 +595,7 @@ func (s *StateObject) AddBalance(amount *big.Int) { return } s.SetBalance(new(big.Int).Add(s.Balance(), amount)) + // s.SetBalance(new(big.Int).Add(s.db.GetBalance(s.address), amount)) } // SubBalance removes amount from s's balance. @@ -600,12 +605,15 @@ func (s *StateObject) SubBalance(amount *big.Int) { return } s.SetBalance(new(big.Int).Sub(s.Balance(), amount)) + // s.SetBalance(new(big.Int).Sub(s.db.GetBalance(s.address), amount)) } func (s *StateObject) SetBalance(amount *big.Int) { + // prevBalance := new(big.Int).Set(s.db.GetBalance(s.address)) s.db.journal.append(balanceChange{ account: &s.address, - prev: new(big.Int).Set(s.data.Balance), + prev: new(big.Int).Set(s.data.Balance), // prevBalance, + // prev: prevBalance, }) s.setBalance(amount) } @@ -699,7 +707,7 @@ func (s *StateObject) CodeSize(db Database) int { } func (s *StateObject) SetCode(codeHash common.Hash, code []byte) { - prevcode := s.Code(s.db.db) + prevcode := s.db.GetCode(s.address) s.db.journal.append(codeChange{ account: &s.address, prevhash: s.CodeHash(), @@ -715,9 +723,10 @@ func (s *StateObject) setCode(codeHash common.Hash, code []byte) { } func (s *StateObject) SetNonce(nonce uint64) { + prevNonce := s.db.GetNonce(s.address) s.db.journal.append(nonceChange{ account: &s.address, - prev: s.data.Nonce, + prev: prevNonce, }) s.setNonce(nonce) } diff --git a/core/state/statedb.go b/core/state/statedb.go index 9263e46fd8..dd1752bcbc 100644 --- a/core/state/statedb.go +++ b/core/state/statedb.go @@ -101,8 +101,16 @@ func (s *StateDB) storeStateObj(addr common.Address, stateObject *StateObject) { // the object could be create in SlotDB, if it got the object from DB and // update it to the shared `s.parallel.stateObjects`` stateObject.db = s.parallel.baseStateDB + stateObject.db.storeParallelLock.Lock() + if _, ok := s.parallel.stateObjects.Load(addr); !ok { + s.parallel.stateObjects.Store(addr, stateObject) + } + stateObject.db.storeParallelLock.Unlock() + } else { + stateObject.db.storeParallelLock.Lock() + s.parallel.stateObjects.Store(addr, stateObject) + stateObject.db.storeParallelLock.Unlock() } - s.parallel.stateObjects.Store(addr, stateObject) } else { s.stateObjects[addr] = stateObject } @@ -150,8 +158,10 @@ type ParallelState struct { kvChangesInSlot map[common.Address]StateKeys // value will be kept in dirtiedStateObjectsInSlot // Actions such as SetCode, Suicide will change address's state. // Later call like Exist(), Empty(), HasSuicided() depend on the address's state. - addrStateReadsInSlot map[common.Address]bool // true: exist, false: not exist or deleted - addrStateChangesInSlot map[common.Address]bool // true: created, false: deleted + addrStateReadsInSlot map[common.Address]bool // true: exist, false: not exist or deleted + addrStateChangesInSlot map[common.Address]bool // true: created, false: deleted + addrSnapDestructsReadsInSlot map[common.Address]bool + // addrSnapDestructsChangesInSlot map[common.Address]struct{} // no use to get from unconfirmed DB for efficiency // Transaction will pay gas fee to system address. // Parallel execution will clear system address's balance at first, in order to maintain transaction's @@ -187,13 +197,14 @@ type StateDB struct { fullProcessed bool pipeCommit bool - snapMux sync.Mutex - snaps *snapshot.Tree - snap snapshot.Snapshot - snapParallelLock sync.RWMutex // for parallel mode, for main StateDB, slot will read snapshot, while processor will write. - snapDestructs map[common.Address]struct{} - snapAccounts map[common.Address][]byte - snapStorage map[common.Address]map[string][]byte + snapMux sync.Mutex + snaps *snapshot.Tree + snap snapshot.Snapshot + storeParallelLock sync.RWMutex + snapParallelLock sync.RWMutex // for parallel mode, for main StateDB, slot will read snapshot, while processor will write. + snapDestructs map[common.Address]struct{} + snapAccounts map[common.Address][]byte + snapStorage map[common.Address]map[string][]byte // This map holds 'live' objects, which will get modified while processing a state transition. stateObjects map[common.Address]*StateObject @@ -832,7 +843,7 @@ func (s *StateDB) getCodeHashFromUnconfirmedDB(addr common.Address) (common.Hash // It is for address state check of: Exist(), Empty() and HasSuicided() // Since the unconfirmed DB should have done Finalise() with `deleteEmptyObjects = true` // If the dirty address is empty or suicided, it will be marked as deleted, so we only need to return `deleted` or not. -func (s *StateDB) getAddrStateFromUnconfirmedDB(addr common.Address) (deleted bool, exist bool) { +func (s *StateDB) getAddrStateFromUnconfirmedDB(addr common.Address) (bool, bool) { if addr == s.parallel.systemAddress { // never get systemaddress from unconfirmed DB return false, false @@ -980,8 +991,13 @@ func (s *StateDB) GetBalance(addr common.Address) *big.Int { } // 1.Try to get from dirty if _, ok := s.parallel.balanceChangesInSlot[addr]; ok { - obj := s.parallel.dirtiedStateObjectsInSlot[addr] // addr must exist in dirtiedStateObjectsInSlot - return obj.Balance() + if obj, ok := s.parallel.dirtiedStateObjectsInSlot[addr]; ok { + // on balance fixup, addr may not exist in dirtiedStateObjectsInSlot + // we intend to fixup balance based on unconfirmed DB or main DB + balance := obj.Balance() + log.Debug("GetBalance in dirty", "txIndex", s.txIndex, "addr", addr, "balance", balance) + return balance + } } // 2.Try to get from uncomfirmed DB or main DB // 2.1 Already read before @@ -1010,8 +1026,13 @@ func (s *StateDB) GetNonce(addr common.Address) uint64 { if s.parallel.isSlotDB { // 1.Try to get from dirty if _, ok := s.parallel.nonceChangesInSlot[addr]; ok { - obj := s.parallel.dirtiedStateObjectsInSlot[addr] // addr must exist - return obj.Nonce() + if obj, ok := s.parallel.dirtiedStateObjectsInSlot[addr]; ok { + // on nonce fixup, addr may not exist in dirtiedStateObjectsInSlot + // we intend to fixup nonce based on unconfirmed DB or main DB + nonce := obj.Nonce() + log.Debug("GetNonce in dirty", "txIndex", s.txIndex, "addr", addr, "nonce", nonce) + return nonce + } } // 2.Try to get from uncomfirmed DB or main DB // 2.1 Already read before @@ -1055,41 +1076,43 @@ func (s *StateDB) BaseTxIndex() int { func (s *StateDB) IsParallelReadsValid() bool { slotDB := s if !slotDB.parallel.isSlotDB { - log.Error("IsSlotDBReadsValid slotDB should be slot DB", "txIndex", slotDB.txIndex) + log.Error("IsSlotDBReadsValid slotDB should be slot DB", "SlotIndex", slotDB.parallel.SlotIndex, "txIndex", slotDB.txIndex) return false } mainDB := slotDB.parallel.baseStateDB if mainDB.parallel.isSlotDB { - log.Error("IsSlotDBReadsValid s should be main DB", "txIndex", slotDB.txIndex) + log.Error("IsSlotDBReadsValid s should be main DB", "SlotIndex", slotDB.parallel.SlotIndex, "txIndex", slotDB.txIndex) return false } // for nonce for addr, nonceSlot := range slotDB.parallel.nonceReadsInSlot { nonceMain := mainDB.GetNonce(addr) if nonceSlot != nonceMain { - log.Debug("IsSlotDBReadsValid nonce read is invalid", "addr", addr, - "nonceSlot", nonceSlot, "nonceMain", nonceMain, + log.Info("IsSlotDBReadsValid nonce read is invalid", "addr", addr, + "nonceSlot", nonceSlot, "nonceMain", nonceMain, "SlotIndex", slotDB.parallel.SlotIndex, "txIndex", slotDB.txIndex, "baseTxIndex", slotDB.parallel.baseTxIndex) return false } } // balance for addr, balanceSlot := range slotDB.parallel.balanceReadsInSlot { - balanceMain := mainDB.GetBalance(addr) - if balanceSlot.Cmp(balanceMain) != 0 { - log.Debug("IsSlotDBReadsValid balance read is invalid", "addr", addr, - "balanceSlot", balanceSlot, "balanceMain", balanceMain, - "txIndex", slotDB.txIndex, "baseTxIndex", slotDB.parallel.baseTxIndex) - return false + if addr != s.parallel.systemAddress { // skip balance check for system address + balanceMain := mainDB.GetBalance(addr) + if balanceSlot.Cmp(balanceMain) != 0 { + log.Info("IsSlotDBReadsValid balance read is invalid", "addr", addr, + "balanceSlot", balanceSlot, "balanceMain", balanceMain, "SlotIndex", slotDB.parallel.SlotIndex, + "txIndex", slotDB.txIndex, "baseTxIndex", slotDB.parallel.baseTxIndex) + return false + } } } // check code for addr, codeSlot := range slotDB.parallel.codeReadsInSlot { codeMain := mainDB.GetCode(addr) if !bytes.Equal(codeSlot, codeMain) { - log.Debug("IsSlotDBReadsValid code read is invalid", "addr", addr, - "len codeSlot", len(codeSlot), "len codeMain", len(codeMain), + log.Info("IsSlotDBReadsValid code read is invalid", "addr", addr, + "len codeSlot", len(codeSlot), "len codeMain", len(codeMain), "SlotIndex", slotDB.parallel.SlotIndex, "txIndex", slotDB.txIndex, "baseTxIndex", slotDB.parallel.baseTxIndex) return false } @@ -1098,8 +1121,8 @@ func (s *StateDB) IsParallelReadsValid() bool { for addr, codeHashSlot := range slotDB.parallel.codeHashReadsInSlot { codeHashMain := mainDB.GetCodeHash(addr) if !bytes.Equal(codeHashSlot.Bytes(), codeHashMain.Bytes()) { - log.Debug("IsSlotDBReadsValid codehash read is invalid", "addr", addr, - "codeHashSlot", codeHashSlot, "codeHashMain", codeHashMain, + log.Info("IsSlotDBReadsValid codehash read is invalid", "addr", addr, + "codeHashSlot", codeHashSlot, "codeHashMain", codeHashMain, "SlotIndex", slotDB.parallel.SlotIndex, "txIndex", slotDB.txIndex, "baseTxIndex", slotDB.parallel.baseTxIndex) return false } @@ -1110,8 +1133,9 @@ func (s *StateDB) IsParallelReadsValid() bool { slotStorage.Range(func(keySlot, valSlot interface{}) bool { valMain := mainDB.GetState(addr, keySlot.(common.Hash)) if !bytes.Equal(valSlot.(common.Hash).Bytes(), valMain.Bytes()) { - log.Debug("IsSlotDBReadsValid KV read is invalid", "addr", addr, - "key", keySlot.(common.Hash), "valSlot", valSlot.(common.Hash), "valMain", valMain, + log.Info("IsSlotDBReadsValid KV read is invalid", "addr", addr, + "key", keySlot.(common.Hash), "valSlot", valSlot.(common.Hash), + "valMain", valMain, "SlotIndex", slotDB.parallel.SlotIndex, "txIndex", slotDB.txIndex, "baseTxIndex", slotDB.parallel.baseTxIndex) conflict = true return false // return false, Range will be terminated. @@ -1129,22 +1153,47 @@ func (s *StateDB) IsParallelReadsValid() bool { stateMain = true // addr exist in main DB } if stateSlot != stateMain { - log.Debug("IsSlotDBReadsValid addrState read invalid(true: exist, false: not exist)", - "addr", addr, "stateSlot", stateSlot, "stateMain", stateMain, + // skip addr state check for system address + if addr != s.parallel.systemAddress { + log.Info("IsSlotDBReadsValid addrState read invalid(true: exist, false: not exist)", + "addr", addr, "stateSlot", stateSlot, "stateMain", stateMain, + "SlotIndex", slotDB.parallel.SlotIndex, + "txIndex", slotDB.txIndex, "baseTxIndex", slotDB.parallel.baseTxIndex) + return false + } + } + } + // snapshot destructs check + + for addr, destructRead := range slotDB.parallel.addrSnapDestructsReadsInSlot { + mainObj := mainDB.getStateObjectNoSlot(addr) + if mainObj == nil { + log.Info("IsSlotDBReadsValid snapshot destructs read invalid, address should exist", + "addr", addr, "destruct", destructRead, + "SlotIndex", slotDB.parallel.SlotIndex, + "txIndex", slotDB.txIndex, "baseTxIndex", slotDB.parallel.baseTxIndex) + return false + } + _, destructMain := mainDB.snapDestructs[addr] // addr not exist + if destructRead != destructMain { + log.Info("IsSlotDBReadsValid snapshot destructs read invalid", + "addr", addr, "destructRead", destructRead, "destructMain", destructMain, + "SlotIndex", slotDB.parallel.SlotIndex, "txIndex", slotDB.txIndex, "baseTxIndex", slotDB.parallel.baseTxIndex) return false } } + return true } -// For most of the transactions, systemAddressOpsCount should be 2: +// For most of the transactions, systemAddressOpsCount should be 3: // one for SetBalance(0) on NewSlotDB() // the other is for AddBalance(GasFee) at the end. -// (systemAddressOpsCount > 2) means the transaction tries to access systemAddress, in +// (systemAddressOpsCount > 3) means the transaction tries to access systemAddress, in // this case, we should redo and keep its balance on NewSlotDB() func (s *StateDB) SystemAddressRedo() bool { - return s.parallel.systemAddressOpsCount > 2 + return s.parallel.systemAddressOpsCount > 3 } // NeedsRedo returns true if there is any clear reason that we need to redo this transaction @@ -1156,9 +1205,12 @@ func (s *StateDB) GetCode(addr common.Address) []byte { if s.parallel.isSlotDB { // 1.Try to get from dirty if _, ok := s.parallel.codeChangesInSlot[addr]; ok { - obj := s.parallel.dirtiedStateObjectsInSlot[addr] // addr must exist in dirtiedStateObjectsInSlot - code := obj.Code(s.db) - return code + if obj, ok := s.parallel.dirtiedStateObjectsInSlot[addr]; ok { + // on code fixup, addr may not exist in dirtiedStateObjectsInSlot + // we intend to fixup code based on unconfirmed DB or main DB + code := obj.Code(s.db) + return code + } } // 2.Try to get from uncomfirmed DB or main DB // 2.1 Already read before @@ -1188,8 +1240,11 @@ func (s *StateDB) GetCodeSize(addr common.Address) int { if s.parallel.isSlotDB { // 1.Try to get from dirty if _, ok := s.parallel.codeChangesInSlot[addr]; ok { - obj := s.parallel.dirtiedStateObjectsInSlot[addr] // addr must exist in dirtiedStateObjectsInSlot - return obj.CodeSize(s.db) + if obj, ok := s.parallel.dirtiedStateObjectsInSlot[addr]; ok { + // on code fixup, addr may not exist in dirtiedStateObjectsInSlot + // we intend to fixup code based on unconfirmed DB or main DB + return obj.CodeSize(s.db) + } } // 2.Try to get from uncomfirmed DB or main DB // 2.1 Already read before @@ -1226,8 +1281,11 @@ func (s *StateDB) GetCodeHash(addr common.Address) common.Hash { if s.parallel.isSlotDB { // 1.Try to get from dirty if _, ok := s.parallel.codeChangesInSlot[addr]; ok { - obj := s.parallel.dirtiedStateObjectsInSlot[addr] // addr must exist in dirtiedStateObjectsInSlot - return common.BytesToHash(obj.CodeHash()) + if obj, ok := s.parallel.dirtiedStateObjectsInSlot[addr]; ok { + // on code fixup, addr may not exist in dirtiedStateObjectsInSlot + // we intend to fixup balance based on unconfirmed DB or main DB + return common.BytesToHash(obj.CodeHash()) + } } // 2.Try to get from uncomfirmed DB or main DB // 2.1 Already read before @@ -1407,11 +1465,8 @@ func (s *StateDB) HasSuicided(addr common.Address) bool { return obj.suicided } // 2.Try to get from uncomfirmed - if deleted, ok := s.getAddrStateFromUnconfirmedDB(addr); ok { - if deleted { - return false - } - return false + if exist, ok := s.getAddrStateFromUnconfirmedDB(addr); ok { + return !exist } } stateObject := s.getStateObjectNoSlot(addr) @@ -1469,22 +1524,32 @@ func (s *StateDB) AddBalance(addr common.Address, amount *big.Int) { if addr == s.parallel.systemAddress { s.parallel.systemAddressOpsCount++ } + // if amount.Sign() != 0 { // todo: to reenable it if _, ok := s.parallel.dirtiedStateObjectsInSlot[addr]; !ok { newStateObject := stateObject.lightCopy(s) // light copy from main DB // do balance fixup from the confirmed DB, it could be more reliable than main DB - if balance := s.getBalanceFromUnconfirmedDB(addr); balance != nil { - newStateObject.setBalance(balance) - } - s.parallel.balanceReadsInSlot[addr] = newStateObject.Balance() // could read from main DB or unconfirmed DB - + balance := s.GetBalance(addr) + newStateObject.setBalance(balance) + // s.parallel.balanceReadsInSlot[addr] = newStateObject.Balance() // could read from main DB or unconfirmed DB newStateObject.AddBalance(amount) s.parallel.dirtiedStateObjectsInSlot[addr] = newStateObject - // if amount.Sign() != 0 { // todo: to reenable it s.parallel.balanceChangesInSlot[addr] = struct{}{} return } + // already dirty, make sure the balance if fixed up + // if stateObject.Balance() + if addr != s.parallel.systemAddress { + if stateObject.Balance().Cmp(s.GetBalance(addr)) != 0 { + log.Warn("AddBalance in dirty, but balance is incorrect", "txIndex", s.txIndex, "addr", addr, + "stateObject.Balance()", stateObject.Balance(), "s.GetBalance(addr)", s.GetBalance(addr)) + stateObject.setBalance(s.GetBalance(addr)) + } + } } stateObject.AddBalance(amount) + if s.parallel.isSlotDB { + s.parallel.balanceChangesInSlot[addr] = struct{}{} + } } } @@ -1502,22 +1567,34 @@ func (s *StateDB) SubBalance(addr common.Address, amount *big.Int) { if addr == s.parallel.systemAddress { s.parallel.systemAddressOpsCount++ } + + // if amount.Sign() != 0 { // todo: to reenable it if _, ok := s.parallel.dirtiedStateObjectsInSlot[addr]; !ok { newStateObject := stateObject.lightCopy(s) // light copy from main DB // do balance fixup from the confirmed DB, it could be more reliable than main DB - if balance := s.getBalanceFromUnconfirmedDB(addr); balance != nil { - newStateObject.setBalance(balance) - } - s.parallel.balanceReadsInSlot[addr] = newStateObject.Balance() + balance := s.GetBalance(addr) + newStateObject.setBalance(balance) + // s.parallel.balanceReadsInSlot[addr] = newStateObject.Balance() newStateObject.SubBalance(amount) - s.parallel.dirtiedStateObjectsInSlot[addr] = newStateObject - - // if amount.Sign() != 0 { // todo: to reenable it s.parallel.balanceChangesInSlot[addr] = struct{}{} + s.parallel.dirtiedStateObjectsInSlot[addr] = newStateObject return } + // already dirty, make sure the balance if fixed + // if stateObject.Balance() + if addr != s.parallel.systemAddress { + if stateObject.Balance().Cmp(s.GetBalance(addr)) != 0 { + log.Warn("SubBalance in dirty, but balance is incorrect", "txIndex", s.txIndex, "addr", addr, + "stateObject.Balance()", stateObject.Balance(), "s.GetBalance(addr)", s.GetBalance(addr)) + stateObject.setBalance(s.GetBalance(addr)) + } + } } stateObject.SubBalance(amount) + if s.parallel.isSlotDB { + s.parallel.balanceChangesInSlot[addr] = struct{}{} + } + } } @@ -1528,15 +1605,23 @@ func (s *StateDB) SetBalance(addr common.Address, amount *big.Int) { if addr == s.parallel.systemAddress { s.parallel.systemAddressOpsCount++ } - s.parallel.balanceChangesInSlot[addr] = struct{}{} if _, ok := s.parallel.dirtiedStateObjectsInSlot[addr]; !ok { newStateObject := stateObject.lightCopy(s) + // update balance for revert, in case child contract is revertted, + // it should revert to the previous balance + balance := s.GetBalance(addr) + newStateObject.setBalance(balance) newStateObject.SetBalance(amount) + s.parallel.balanceChangesInSlot[addr] = struct{}{} s.parallel.dirtiedStateObjectsInSlot[addr] = newStateObject return } + } stateObject.SetBalance(amount) + if s.parallel.isSlotDB { + s.parallel.balanceChangesInSlot[addr] = struct{}{} + } } } @@ -1544,15 +1629,22 @@ func (s *StateDB) SetNonce(addr common.Address, nonce uint64) { stateObject := s.GetOrNewStateObject(addr) if stateObject != nil { if s.parallel.isSlotDB { - s.parallel.nonceChangesInSlot[addr] = struct{}{} if _, ok := s.parallel.dirtiedStateObjectsInSlot[addr]; !ok { newStateObject := stateObject.lightCopy(s) + noncePre := s.GetNonce(addr) + newStateObject.setNonce(noncePre) // nonce fixup newStateObject.SetNonce(nonce) + s.parallel.nonceChangesInSlot[addr] = struct{}{} s.parallel.dirtiedStateObjectsInSlot[addr] = newStateObject return } + noncePre := s.GetNonce(addr) + stateObject.setNonce(noncePre) // nonce fixup } stateObject.SetNonce(nonce) + if s.parallel.isSlotDB { + s.parallel.nonceChangesInSlot[addr] = struct{}{} + } } } @@ -1561,15 +1653,25 @@ func (s *StateDB) SetCode(addr common.Address, code []byte) { if stateObject != nil { codeHash := crypto.Keccak256Hash(code) if s.parallel.isSlotDB { - s.parallel.codeChangesInSlot[addr] = struct{}{} if _, ok := s.parallel.dirtiedStateObjectsInSlot[addr]; !ok { newStateObject := stateObject.lightCopy(s) + codePre := s.GetCode(addr) // code fixup + codeHashPre := crypto.Keccak256Hash(codePre) + newStateObject.setCode(codeHashPre, codePre) + newStateObject.SetCode(codeHash, code) s.parallel.dirtiedStateObjectsInSlot[addr] = newStateObject + s.parallel.codeChangesInSlot[addr] = struct{}{} return } + codePre := s.GetCode(addr) // code fixup + codeHashPre := crypto.Keccak256Hash(codePre) + stateObject.setCode(codeHashPre, codePre) } stateObject.SetCode(codeHash, code) + if s.parallel.isSlotDB { + s.parallel.codeChangesInSlot[addr] = struct{}{} + } } } @@ -1601,6 +1703,7 @@ func (s *StateDB) SetState(addr common.Address, key, value common.Hash) { s.parallel.dirtiedStateObjectsInSlot[addr] = newStateObject return } + // do State Update } stateObject.SetState(s.db, key, value) } @@ -1625,27 +1728,36 @@ func (s *StateDB) Suicide(addr common.Address) bool { if s.parallel.isSlotDB { // 1.Try to get from dirty, it could be suicided inside of contract call stateObject = s.parallel.dirtiedStateObjectsInSlot[addr] - // 2.Try to get from uncomfirmed, if deleted return false, since the address does not exist if stateObject == nil { - if deleted, ok := s.getAddrStateFromUnconfirmedDB(addr); ok { - if deleted { + if _, ok := s.parallel.addrStateReadsInSlot[addr]; !ok { + log.Info("Suicide addr not in dirty", "txIndex", s.txIndex, "addr", addr) + } + // 2.Try to get from uncomfirmed, if deleted return false, since the address does not exist + if obj, ok := s.getStateObjectFromUnconfirmedDB(addr); ok { + stateObject = obj + s.parallel.addrStateReadsInSlot[addr] = !stateObject.deleted // true: exist, false: deleted + if stateObject.deleted { + log.Error("Suicide addr alreay deleted in confirmed DB", "txIndex", s.txIndex, "addr", addr) return false } } } } - // 3.Try to get from main StateDB if stateObject == nil { + // 3.Try to get from main StateDB stateObject = s.getStateObjectNoSlot(addr) - } - if stateObject == nil { - return false + if stateObject == nil { + s.parallel.addrStateReadsInSlot[addr] = false // true: exist, false: deleted + log.Error("Suicide addr not exist", "txIndex", s.txIndex, "addr", addr) + return false + } + s.parallel.addrStateReadsInSlot[addr] = true // true: exist, false: deleted } s.journal.append(suicideChange{ account: &addr, prev: stateObject.suicided, // todo: must be false? - prevbalance: new(big.Int).Set(stateObject.Balance()), + prevbalance: new(big.Int).Set(s.GetBalance(addr)), }) if s.parallel.isSlotDB { @@ -1656,12 +1768,15 @@ func (s *StateDB) Suicide(addr common.Address) bool { newStateObject.data.Balance = new(big.Int) s.parallel.dirtiedStateObjectsInSlot[addr] = newStateObject s.parallel.addrStateChangesInSlot[addr] = false // false: the address does not exist any more, - s.parallel.nonceChangesInSlot[addr] = struct{}{} + // s.parallel.nonceChangesInSlot[addr] = struct{}{} s.parallel.balanceChangesInSlot[addr] = struct{}{} s.parallel.codeChangesInSlot[addr] = struct{}{} // s.parallel.kvChangesInSlot[addr] = make(StateKeys) // all key changes are discarded return true } + s.parallel.addrStateChangesInSlot[addr] = false // false: the address does not exist any more, + s.parallel.balanceChangesInSlot[addr] = struct{}{} + s.parallel.codeChangesInSlot[addr] = struct{}{} } stateObject.markSuicided() @@ -1797,6 +1912,10 @@ func (s *StateDB) getDeletedStateObject(addr common.Address) *StateObject { } } // Insert into the live set + // if obj, ok := s.loadStateObj(addr); ok { + // fixme: concurrent not safe, merge could update it... + // return obj + //} obj := newObject(s, s.isParallel, addr, *data) s.SetStateObject(obj) return obj @@ -1821,7 +1940,7 @@ func (s *StateDB) GetOrNewStateObject(addr common.Address) *StateObject { if stateObject == nil { stateObject = s.getStateObjectNoSlot(addr) } - if stateObject == nil || stateObject.deleted { + if stateObject == nil || stateObject.deleted || stateObject.suicided { stateObject = s.createObject(addr) exist = false } @@ -1859,11 +1978,15 @@ func (s *StateDB) createObject(addr common.Address) (newobj *StateObject) { var prevdestruct bool if s.snap != nil && prev != nil { - _, prevdestruct = s.snapDestructs[prev.address] + _, prevdestruct = s.snapDestructs[prev.address] // fixme, record the snapshot read for create Account + if s.parallel.isSlotDB { + s.parallel.addrSnapDestructsReadsInSlot[addr] = prevdestruct + } if !prevdestruct { // To destroy the previous trie node first and update the trie tree // with the new object on block commit. s.snapDestructs[prev.address] = struct{}{} + } } newobj = newObject(s, s.isParallel, addr, Account{}) @@ -1875,8 +1998,8 @@ func (s *StateDB) createObject(addr common.Address) (newobj *StateObject) { } if s.parallel.isSlotDB { - s.parallel.dirtiedStateObjectsInSlot[addr] = newobj - s.parallel.addrStateChangesInSlot[addr] = true // the object is created + // s.parallel.dirtiedStateObjectsInSlot[addr] = newobj // would change the bahavior of AddBalance... + s.parallel.addrStateChangesInSlot[addr] = true // the object sis created s.parallel.nonceChangesInSlot[addr] = struct{}{} s.parallel.balanceChangesInSlot[addr] = struct{}{} s.parallel.codeChangesInSlot[addr] = struct{}{} @@ -2179,17 +2302,18 @@ func (s *StateDB) CopyForSlot() *StateDB { stateObjects: s.parallel.stateObjects, unconfirmedDBInShot: make(map[int]*StateDB, 100), - codeReadsInSlot: make(map[common.Address][]byte, 10), // addressStructPool.Get().(map[common.Address]struct{}), - codeHashReadsInSlot: make(map[common.Address]common.Hash), - codeChangesInSlot: make(map[common.Address]struct{}), // addressStructPool.Get().(map[common.Address]struct{}), - kvChangesInSlot: make(map[common.Address]StateKeys), // stateKeysPool.Get().(map[common.Address]StateKeys), - kvReadsInSlot: make(map[common.Address]Storage, 100), // stateKeysPool.Get().(map[common.Address]Storage), - balanceChangesInSlot: make(map[common.Address]struct{}), // addressStructPool.Get().(map[common.Address]struct{}), - balanceReadsInSlot: make(map[common.Address]*big.Int), // addressStructPool.Get().(map[common.Address]struct{}), - addrStateReadsInSlot: make(map[common.Address]bool), // addressStructPool.Get().(map[common.Address]struct{}), - addrStateChangesInSlot: make(map[common.Address]bool), // addressStructPool.Get().(map[common.Address]struct{}), - nonceChangesInSlot: make(map[common.Address]struct{}), // addressStructPool.Get().(map[common.Address]struct{}), - nonceReadsInSlot: make(map[common.Address]uint64), + codeReadsInSlot: make(map[common.Address][]byte, 10), // addressStructPool.Get().(map[common.Address]struct{}), + codeHashReadsInSlot: make(map[common.Address]common.Hash), + codeChangesInSlot: make(map[common.Address]struct{}), // addressStructPool.Get().(map[common.Address]struct{}), + kvChangesInSlot: make(map[common.Address]StateKeys), // stateKeysPool.Get().(map[common.Address]StateKeys), + kvReadsInSlot: make(map[common.Address]Storage, 100), // stateKeysPool.Get().(map[common.Address]Storage), + balanceChangesInSlot: make(map[common.Address]struct{}), // addressStructPool.Get().(map[common.Address]struct{}), + balanceReadsInSlot: make(map[common.Address]*big.Int), // addressStructPool.Get().(map[common.Address]struct{}), + addrStateReadsInSlot: make(map[common.Address]bool), // addressStructPool.Get().(map[common.Address]struct{}), + addrStateChangesInSlot: make(map[common.Address]bool), // addressStructPool.Get().(map[common.Address]struct{}), + nonceChangesInSlot: make(map[common.Address]struct{}), // addressStructPool.Get().(map[common.Address]struct{}), + nonceReadsInSlot: make(map[common.Address]uint64), + addrSnapDestructsReadsInSlot: make(map[common.Address]bool), isSlotDB: true, dirtiedStateObjectsInSlot: make(map[common.Address]*StateObject), // stateObjectsPool.Get().(map[common.Address]*StateObject), From be5e6ebc313fd329c48f9c4b3c3a3d96cf0e45d1 Mon Sep 17 00:00:00 2001 From: setunapo Date: Tue, 26 Apr 2022 13:44:37 +0800 Subject: [PATCH 07/18] 0426: code improve --- core/state/statedb.go | 11 ++--------- 1 file changed, 2 insertions(+), 9 deletions(-) diff --git a/core/state/statedb.go b/core/state/statedb.go index dd1752bcbc..86c951ed7b 100644 --- a/core/state/statedb.go +++ b/core/state/statedb.go @@ -994,9 +994,7 @@ func (s *StateDB) GetBalance(addr common.Address) *big.Int { if obj, ok := s.parallel.dirtiedStateObjectsInSlot[addr]; ok { // on balance fixup, addr may not exist in dirtiedStateObjectsInSlot // we intend to fixup balance based on unconfirmed DB or main DB - balance := obj.Balance() - log.Debug("GetBalance in dirty", "txIndex", s.txIndex, "addr", addr, "balance", balance) - return balance + return obj.Balance() } } // 2.Try to get from uncomfirmed DB or main DB @@ -1029,9 +1027,7 @@ func (s *StateDB) GetNonce(addr common.Address) uint64 { if obj, ok := s.parallel.dirtiedStateObjectsInSlot[addr]; ok { // on nonce fixup, addr may not exist in dirtiedStateObjectsInSlot // we intend to fixup nonce based on unconfirmed DB or main DB - nonce := obj.Nonce() - log.Debug("GetNonce in dirty", "txIndex", s.txIndex, "addr", addr, "nonce", nonce) - return nonce + return obj.Nonce() } } // 2.Try to get from uncomfirmed DB or main DB @@ -1729,9 +1725,6 @@ func (s *StateDB) Suicide(addr common.Address) bool { // 1.Try to get from dirty, it could be suicided inside of contract call stateObject = s.parallel.dirtiedStateObjectsInSlot[addr] if stateObject == nil { - if _, ok := s.parallel.addrStateReadsInSlot[addr]; !ok { - log.Info("Suicide addr not in dirty", "txIndex", s.txIndex, "addr", addr) - } // 2.Try to get from uncomfirmed, if deleted return false, since the address does not exist if obj, ok := s.getStateObjectFromUnconfirmedDB(addr); ok { stateObject = obj From bbf13718b9693a705414f4b660ae706457f0eaa1 Mon Sep 17 00:00:00 2001 From: setunapo Date: Tue, 26 Apr 2022 18:34:56 +0800 Subject: [PATCH 08/18] Empty() and log level --- core/state/state_object.go | 53 ++++++++++++++++++++++++++++++++------ core/state/statedb.go | 22 ++++++++-------- 2 files changed, 56 insertions(+), 19 deletions(-) diff --git a/core/state/state_object.go b/core/state/state_object.go index 6a1154b06d..b442954efe 100644 --- a/core/state/state_object.go +++ b/core/state/state_object.go @@ -182,11 +182,51 @@ type StateObject struct { // empty returns whether the account is considered empty. func (s *StateObject) empty() bool { - return s.data.Nonce == 0 && s.data.Balance.Sign() == 0 && bytes.Equal(s.data.CodeHash, emptyCodeHash) - - // in parallel mode, we should never get raw nonce, balance, codeHash any more, - // since it could be invalid, if the element was read from unconfirmed DB or base DB + // return s.data.Nonce == 0 && s.data.Balance.Sign() == 0 && bytes.Equal(s.data.CodeHash, emptyCodeHash) + + // 0426, leave some notation, empty() works so far + // empty() has 3 use cases: + // 1.StateDB.Empty(), to empty check + // A: It is ok, we have handled it in Empty(), to make sure nonce, balance, codeHash are solid + // 2:AddBalance 0, empty check for touch event + // empty() will add a touch event. + // if we misjudge it, the touch event could be lost, which make address not deleted. // fixme + // 3.Finalise(), to do empty delete + // the address should be dirtied or touched + // if it nonce dirtied, it is ok, since nonce is monotonically increasing, won't be zero + // if balance is dirtied, balance could be zero, we should refer solid nonce & codeHash // fixme + // if codeHash is dirtied, it is ok, since code will not be updated. + // if suicide, it is ok + // if object is new created, it is ok + // if CreateAccout, recreate the address, it is ok. + + // Slot 0 tx 0: AddBalance(100) to addr_1, => addr_1: balance = 100, nonce = 0, code is empty + // Slot 1 tx 1: addr_1 Transfer 99.9979 with GasFee 0.0021, => addr_1: balance = 0, nonce = 1, code is empty + // notice: balance transfer cost 21,000 gas, with gasPrice = 100Gwei, GasFee will be 0.0021 + // Slot 0 tx 2: add balance 0 to addr_1(empty check for touch event), + // the object was lightCopied from tx 0, + + // in parallel mode, we should not check empty by raw nonce, balance, codeHash any more, + // since it could be invalid. + // e.g., AddBalance() to an address, we will do lightCopy to get a new StateObject, we did balance fixup to + // make sure object's Balance is reliable. But we did not fixup nonce or code, we only do nonce or codehash + // fixup on need, that's when we wanna to update the nonce or codehash. + // So nonce, blance + // Before the block is processed, addr_1 account: nonce = 0, emptyCodeHash, balance = 100 + // Slot 0 tx 0: no access to addr_1 + // Slot 1 tx 1: sub balance 100, it is empty and deleted + // Slot 0 tx 2: GetNonce, lightCopy based on main DB(balance = 100) , not empty // return s.db.GetNonce(s.address) == 0 && s.db.GetBalance(s.address).Sign() == 0 && bytes.Equal(s.db.GetCodeHash(s.address).Bytes(), emptyCodeHash) + + if s.db.GetBalance(s.address).Sign() != 0 { // check balance first, since it is most likely not zero + return false + } + if s.db.GetNonce(s.address) != 0 { + return false + } + codeHash := s.db.GetCodeHash(s.address) + return bytes.Equal(codeHash.Bytes(), emptyCodeHash) // code is empty, the object is empty + } // Account is the Ethereum consensus representation of accounts. @@ -288,7 +328,6 @@ func (s *StateObject) GetState(db Database, key common.Hash) common.Hash { if dirty { return value } - // Otherwise return the entry's original value return s.GetCommittedState(db, key) } @@ -658,9 +697,7 @@ func (s *StateObject) MergeSlotObject(db Database, dirtyObjs *StateObject, keys // In parallel mode, always GetState by StateDB, not by StateObject directly, // since it the KV could exist in unconfirmed DB. // But here, it should be ok, since the KV should be changed and valid in the SlotDB, - // we still do GetState by StateDB, it is not an issue. - val := dirtyObjs.db.GetState(s.address, key) - s.SetState(db, key, val) + s.SetState(db, key, dirtyObjs.GetState(db, key)) } } diff --git a/core/state/statedb.go b/core/state/statedb.go index 86c951ed7b..4e852cdbd8 100644 --- a/core/state/statedb.go +++ b/core/state/statedb.go @@ -952,10 +952,10 @@ func (s *StateDB) Empty(addr common.Address) bool { } // so we have to check it manually // empty means: Nonce == 0 && Balance == 0 && CodeHash == emptyCodeHash - if s.GetNonce(addr) != 0 { + if s.GetBalance(addr).Sign() != 0 { // check balance first, since it is most likely not zero return false } - if s.GetBalance(addr).Sign() != 0 { + if s.GetNonce(addr) != 0 { return false } codeHash := s.GetCodeHash(addr) @@ -1085,7 +1085,7 @@ func (s *StateDB) IsParallelReadsValid() bool { for addr, nonceSlot := range slotDB.parallel.nonceReadsInSlot { nonceMain := mainDB.GetNonce(addr) if nonceSlot != nonceMain { - log.Info("IsSlotDBReadsValid nonce read is invalid", "addr", addr, + log.Debug("IsSlotDBReadsValid nonce read is invalid", "addr", addr, "nonceSlot", nonceSlot, "nonceMain", nonceMain, "SlotIndex", slotDB.parallel.SlotIndex, "txIndex", slotDB.txIndex, "baseTxIndex", slotDB.parallel.baseTxIndex) return false @@ -1096,7 +1096,7 @@ func (s *StateDB) IsParallelReadsValid() bool { if addr != s.parallel.systemAddress { // skip balance check for system address balanceMain := mainDB.GetBalance(addr) if balanceSlot.Cmp(balanceMain) != 0 { - log.Info("IsSlotDBReadsValid balance read is invalid", "addr", addr, + log.Debug("IsSlotDBReadsValid balance read is invalid", "addr", addr, "balanceSlot", balanceSlot, "balanceMain", balanceMain, "SlotIndex", slotDB.parallel.SlotIndex, "txIndex", slotDB.txIndex, "baseTxIndex", slotDB.parallel.baseTxIndex) return false @@ -1107,7 +1107,7 @@ func (s *StateDB) IsParallelReadsValid() bool { for addr, codeSlot := range slotDB.parallel.codeReadsInSlot { codeMain := mainDB.GetCode(addr) if !bytes.Equal(codeSlot, codeMain) { - log.Info("IsSlotDBReadsValid code read is invalid", "addr", addr, + log.Debug("IsSlotDBReadsValid code read is invalid", "addr", addr, "len codeSlot", len(codeSlot), "len codeMain", len(codeMain), "SlotIndex", slotDB.parallel.SlotIndex, "txIndex", slotDB.txIndex, "baseTxIndex", slotDB.parallel.baseTxIndex) return false @@ -1117,7 +1117,7 @@ func (s *StateDB) IsParallelReadsValid() bool { for addr, codeHashSlot := range slotDB.parallel.codeHashReadsInSlot { codeHashMain := mainDB.GetCodeHash(addr) if !bytes.Equal(codeHashSlot.Bytes(), codeHashMain.Bytes()) { - log.Info("IsSlotDBReadsValid codehash read is invalid", "addr", addr, + log.Debug("IsSlotDBReadsValid codehash read is invalid", "addr", addr, "codeHashSlot", codeHashSlot, "codeHashMain", codeHashMain, "SlotIndex", slotDB.parallel.SlotIndex, "txIndex", slotDB.txIndex, "baseTxIndex", slotDB.parallel.baseTxIndex) return false @@ -1129,7 +1129,7 @@ func (s *StateDB) IsParallelReadsValid() bool { slotStorage.Range(func(keySlot, valSlot interface{}) bool { valMain := mainDB.GetState(addr, keySlot.(common.Hash)) if !bytes.Equal(valSlot.(common.Hash).Bytes(), valMain.Bytes()) { - log.Info("IsSlotDBReadsValid KV read is invalid", "addr", addr, + log.Debug("IsSlotDBReadsValid KV read is invalid", "addr", addr, "key", keySlot.(common.Hash), "valSlot", valSlot.(common.Hash), "valMain", valMain, "SlotIndex", slotDB.parallel.SlotIndex, "txIndex", slotDB.txIndex, "baseTxIndex", slotDB.parallel.baseTxIndex) @@ -1151,7 +1151,7 @@ func (s *StateDB) IsParallelReadsValid() bool { if stateSlot != stateMain { // skip addr state check for system address if addr != s.parallel.systemAddress { - log.Info("IsSlotDBReadsValid addrState read invalid(true: exist, false: not exist)", + log.Debug("IsSlotDBReadsValid addrState read invalid(true: exist, false: not exist)", "addr", addr, "stateSlot", stateSlot, "stateMain", stateMain, "SlotIndex", slotDB.parallel.SlotIndex, "txIndex", slotDB.txIndex, "baseTxIndex", slotDB.parallel.baseTxIndex) @@ -1164,7 +1164,7 @@ func (s *StateDB) IsParallelReadsValid() bool { for addr, destructRead := range slotDB.parallel.addrSnapDestructsReadsInSlot { mainObj := mainDB.getStateObjectNoSlot(addr) if mainObj == nil { - log.Info("IsSlotDBReadsValid snapshot destructs read invalid, address should exist", + log.Debug("IsSlotDBReadsValid snapshot destructs read invalid, address should exist", "addr", addr, "destruct", destructRead, "SlotIndex", slotDB.parallel.SlotIndex, "txIndex", slotDB.txIndex, "baseTxIndex", slotDB.parallel.baseTxIndex) @@ -1172,7 +1172,7 @@ func (s *StateDB) IsParallelReadsValid() bool { } _, destructMain := mainDB.snapDestructs[addr] // addr not exist if destructRead != destructMain { - log.Info("IsSlotDBReadsValid snapshot destructs read invalid", + log.Debug("IsSlotDBReadsValid snapshot destructs read invalid", "addr", addr, "destructRead", destructRead, "destructMain", destructMain, "SlotIndex", slotDB.parallel.SlotIndex, "txIndex", slotDB.txIndex, "baseTxIndex", slotDB.parallel.baseTxIndex) @@ -1189,7 +1189,7 @@ func (s *StateDB) IsParallelReadsValid() bool { // (systemAddressOpsCount > 3) means the transaction tries to access systemAddress, in // this case, we should redo and keep its balance on NewSlotDB() func (s *StateDB) SystemAddressRedo() bool { - return s.parallel.systemAddressOpsCount > 3 + return s.parallel.systemAddressOpsCount > 4 } // NeedsRedo returns true if there is any clear reason that we need to redo this transaction From 30fd585ef27bd7a652883fe7d3f9e48d1e5fc65a Mon Sep 17 00:00:00 2001 From: setunapo Date: Thu, 21 Apr 2022 11:54:34 +0800 Subject: [PATCH 09/18] implement ParallelStateDB ** add ParallelStateDB ** remove isSlotDB in StateDB ** state.StateDBer ** remove getStateObjectNoSlot ... --- core/state/interface.go | 82 + core/state/journal.go | 52 +- core/state/state_object.go | 22 +- core/state/statedb.go | 3976 +++++++++++++++++++----------------- core/state/statedb.go.bak | 3383 ++++++++++++++++++++++++++++++ core/state_processor.go | 22 +- 6 files changed, 5615 insertions(+), 1922 deletions(-) create mode 100644 core/state/interface.go create mode 100644 core/state/statedb.go.bak diff --git a/core/state/interface.go b/core/state/interface.go new file mode 100644 index 0000000000..2362ac828b --- /dev/null +++ b/core/state/interface.go @@ -0,0 +1,82 @@ +// Copyright 2016 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see . + +package state + +import ( + "math/big" + + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core/types" +) + +// StateDBer is copied from vm/interface.go +// It is used by StateObject & Journal right now, to abstract StateDB & ParallelStateDB +type StateDBer interface { + getBaseStateDB() *StateDB + getStateObject(common.Address) *StateObject // only accessible for journal + storeStateObj(common.Address, *StateObject) // only accessible for journal + + CreateAccount(common.Address) + + SubBalance(common.Address, *big.Int) + AddBalance(common.Address, *big.Int) + GetBalance(common.Address) *big.Int + + GetNonce(common.Address) uint64 + SetNonce(common.Address, uint64) + + GetCodeHash(common.Address) common.Hash + GetCode(common.Address) []byte + SetCode(common.Address, []byte) + GetCodeSize(common.Address) int + + AddRefund(uint64) + SubRefund(uint64) + GetRefund() uint64 + + GetCommittedState(common.Address, common.Hash) common.Hash + GetState(common.Address, common.Hash) common.Hash + SetState(common.Address, common.Hash, common.Hash) + + Suicide(common.Address) bool + HasSuicided(common.Address) bool + + // Exist reports whether the given account exists in state. + // Notably this should also return true for suicided accounts. + Exist(common.Address) bool + // Empty returns whether the given account is empty. Empty + // is defined according to EIP161 (balance = nonce = code = 0). + Empty(common.Address) bool + + PrepareAccessList(sender common.Address, dest *common.Address, precompiles []common.Address, txAccesses types.AccessList) + AddressInAccessList(addr common.Address) bool + SlotInAccessList(addr common.Address, slot common.Hash) (addressOk bool, slotOk bool) + // AddAddressToAccessList adds the given address to the access list. This operation is safe to perform + // even if the feature/fork is not active yet + AddAddressToAccessList(addr common.Address) + // AddSlotToAccessList adds the given (address,slot) to the access list. This operation is safe to perform + // even if the feature/fork is not active yet + AddSlotToAccessList(addr common.Address, slot common.Hash) + + RevertToSnapshot(int) + Snapshot() int + + AddLog(*types.Log) + AddPreimage(common.Hash, []byte) + + ForEachStorage(common.Address, func(common.Hash, common.Hash) bool) error +} diff --git a/core/state/journal.go b/core/state/journal.go index 96655d007d..e267205688 100644 --- a/core/state/journal.go +++ b/core/state/journal.go @@ -26,7 +26,7 @@ import ( // reverted on demand. type journalEntry interface { // revert undoes the changes introduced by this journal entry. - revert(*StateDB) + revert(StateDBer) // dirtied returns the Ethereum address modified by this journal entry. dirtied() *common.Address @@ -58,10 +58,10 @@ func (j *journal) append(entry journalEntry) { // revert undoes a batch of journalled modifications along with any reverted // dirty handling too. -func (j *journal) revert(statedb *StateDB, snapshot int) { +func (j *journal) revert(dber StateDBer, snapshot int) { for i := len(j.entries) - 1; i >= snapshot; i-- { // Undo the changes made by the operation - j.entries[i].revert(statedb) + j.entries[i].revert(dber) // Drop any dirty tracking induced by the change if addr := j.entries[i].dirtied(); addr != nil { @@ -141,7 +141,8 @@ type ( } ) -func (ch createObjectChange) revert(s *StateDB) { +func (ch createObjectChange) revert(dber StateDBer) { + s := dber.getBaseStateDB() if s.parallel.isSlotDB { delete(s.parallel.dirtiedStateObjectsInSlot, *ch.account) delete(s.parallel.addrStateChangesInSlot, *ch.account) @@ -159,13 +160,14 @@ func (ch createObjectChange) dirtied() *common.Address { return ch.account } -func (ch resetObjectChange) revert(s *StateDB) { +func (ch resetObjectChange) revert(dber StateDBer) { + s := dber.getBaseStateDB() if s.parallel.isSlotDB { // ch.prev must be from dirtiedStateObjectsInSlot, put it back s.parallel.dirtiedStateObjectsInSlot[ch.prev.address] = ch.prev } else { // ch.prev was got from main DB, put it back to main DB. - s.SetStateObject(ch.prev) + s.storeStateObj(ch.prev.address, ch.prev) } if !ch.prevdestruct && s.snap != nil { delete(s.snapDestructs, ch.prev.address) @@ -176,8 +178,8 @@ func (ch resetObjectChange) dirtied() *common.Address { return nil } -func (ch suicideChange) revert(s *StateDB) { - obj := s.getStateObject(*ch.account) +func (ch suicideChange) revert(dber StateDBer) { + obj := dber.getStateObject(*ch.account) if obj != nil { obj.suicided = ch.prev obj.setBalance(ch.prevbalance) @@ -190,46 +192,47 @@ func (ch suicideChange) dirtied() *common.Address { var ripemd = common.HexToAddress("0000000000000000000000000000000000000003") -func (ch touchChange) revert(s *StateDB) { +func (ch touchChange) revert(dber StateDBer) { } func (ch touchChange) dirtied() *common.Address { return ch.account } -func (ch balanceChange) revert(s *StateDB) { - s.getStateObject(*ch.account).setBalance(ch.prev) +func (ch balanceChange) revert(dber StateDBer) { + dber.getStateObject(*ch.account).setBalance(ch.prev) } func (ch balanceChange) dirtied() *common.Address { return ch.account } -func (ch nonceChange) revert(s *StateDB) { - s.getStateObject(*ch.account).setNonce(ch.prev) +func (ch nonceChange) revert(dber StateDBer) { + dber.getStateObject(*ch.account).setNonce(ch.prev) } func (ch nonceChange) dirtied() *common.Address { return ch.account } -func (ch codeChange) revert(s *StateDB) { - s.getStateObject(*ch.account).setCode(common.BytesToHash(ch.prevhash), ch.prevcode) +func (ch codeChange) revert(dber StateDBer) { + dber.getStateObject(*ch.account).setCode(common.BytesToHash(ch.prevhash), ch.prevcode) } func (ch codeChange) dirtied() *common.Address { return ch.account } -func (ch storageChange) revert(s *StateDB) { - s.getStateObject(*ch.account).setState(ch.key, ch.prevalue) +func (ch storageChange) revert(dber StateDBer) { + dber.getStateObject(*ch.account).setState(ch.key, ch.prevalue) } func (ch storageChange) dirtied() *common.Address { return ch.account } -func (ch refundChange) revert(s *StateDB) { +func (ch refundChange) revert(dber StateDBer) { + s := dber.getBaseStateDB() s.refund = ch.prev } @@ -237,7 +240,9 @@ func (ch refundChange) dirtied() *common.Address { return nil } -func (ch addLogChange) revert(s *StateDB) { +func (ch addLogChange) revert(dber StateDBer) { + s := dber.getBaseStateDB() + logs := s.logs[ch.txhash] if len(logs) == 1 { delete(s.logs, ch.txhash) @@ -251,7 +256,8 @@ func (ch addLogChange) dirtied() *common.Address { return nil } -func (ch addPreimageChange) revert(s *StateDB) { +func (ch addPreimageChange) revert(dber StateDBer) { + s := dber.getBaseStateDB() delete(s.preimages, ch.hash) } @@ -259,7 +265,8 @@ func (ch addPreimageChange) dirtied() *common.Address { return nil } -func (ch accessListAddAccountChange) revert(s *StateDB) { +func (ch accessListAddAccountChange) revert(dber StateDBer) { + s := dber.getBaseStateDB() /* One important invariant here, is that whenever a (addr, slot) is added, if the addr is not already present, the add causes two journal entries: @@ -278,7 +285,8 @@ func (ch accessListAddAccountChange) dirtied() *common.Address { return nil } -func (ch accessListAddSlotChange) revert(s *StateDB) { +func (ch accessListAddSlotChange) revert(dber StateDBer) { + s := dber.getBaseStateDB() if s.accessList != nil { s.accessList.DeleteSlot(*ch.address, *ch.slot) } diff --git a/core/state/state_object.go b/core/state/state_object.go index b442954efe..b516db042a 100644 --- a/core/state/state_object.go +++ b/core/state/state_object.go @@ -150,6 +150,7 @@ type StateObject struct { addrHash common.Hash // hash of ethereum address of the account data Account db *StateDB + dbItf StateDBer // DB error. // State objects are used by the consensus core and VM which are @@ -218,13 +219,13 @@ func (s *StateObject) empty() bool { // Slot 0 tx 2: GetNonce, lightCopy based on main DB(balance = 100) , not empty // return s.db.GetNonce(s.address) == 0 && s.db.GetBalance(s.address).Sign() == 0 && bytes.Equal(s.db.GetCodeHash(s.address).Bytes(), emptyCodeHash) - if s.db.GetBalance(s.address).Sign() != 0 { // check balance first, since it is most likely not zero + if s.dbItf.GetBalance(s.address).Sign() != 0 { // check balance first, since it is most likely not zero return false } - if s.db.GetNonce(s.address) != 0 { + if s.dbItf.GetNonce(s.address) != 0 { return false } - codeHash := s.db.GetCodeHash(s.address) + codeHash := s.dbItf.GetCodeHash(s.address) return bytes.Equal(codeHash.Bytes(), emptyCodeHash) // code is empty, the object is empty } @@ -239,7 +240,8 @@ type Account struct { } // newObject creates a state object. -func newObject(db *StateDB, isParallel bool, address common.Address, data Account) *StateObject { +func newObject(dbItf StateDBer, isParallel bool, address common.Address, data Account) *StateObject { + db := dbItf.getBaseStateDB() if data.Balance == nil { data.Balance = new(big.Int) // todo: why not common.Big0? } @@ -257,6 +259,7 @@ func newObject(db *StateDB, isParallel bool, address common.Address, data Accoun return &StateObject{ db: db, + dbItf: dbItf, address: address, addrHash: crypto.Keccak256Hash(address[:]), data: data, @@ -448,7 +451,7 @@ func (s *StateObject) SetState(db Database, key, value common.Hash) { // this `SetState could be skipped` // d.Finally, the key's value will be `val_2`, while it should be `val_1` // such as: https://bscscan.com/txs?block=2491181 - prev := s.db.GetState(s.address, key) + prev := s.dbItf.GetState(s.address, key) // fixme: if it is for journal, may not necessary, we can remove this change record if prev == value { return } @@ -634,7 +637,6 @@ func (s *StateObject) AddBalance(amount *big.Int) { return } s.SetBalance(new(big.Int).Add(s.Balance(), amount)) - // s.SetBalance(new(big.Int).Add(s.db.GetBalance(s.address), amount)) } // SubBalance removes amount from s's balance. @@ -644,11 +646,9 @@ func (s *StateObject) SubBalance(amount *big.Int) { return } s.SetBalance(new(big.Int).Sub(s.Balance(), amount)) - // s.SetBalance(new(big.Int).Sub(s.db.GetBalance(s.address), amount)) } func (s *StateObject) SetBalance(amount *big.Int) { - // prevBalance := new(big.Int).Set(s.db.GetBalance(s.address)) s.db.journal.append(balanceChange{ account: &s.address, prev: new(big.Int).Set(s.data.Balance), // prevBalance, @@ -664,7 +664,7 @@ func (s *StateObject) setBalance(amount *big.Int) { // Return the gas back to the origin. Used by the Virtual machine or Closures func (s *StateObject) ReturnGas(gas *big.Int) {} -func (s *StateObject) lightCopy(db *StateDB) *StateObject { +func (s *StateObject) lightCopy(db *ParallelStateDB) *StateObject { stateObject := newObject(db, s.isParallel, s.address, s.data) if s.trie != nil { // fixme: no need to copy trie for light copy, since light copied object won't access trie DB @@ -744,7 +744,7 @@ func (s *StateObject) CodeSize(db Database) int { } func (s *StateObject) SetCode(codeHash common.Hash, code []byte) { - prevcode := s.db.GetCode(s.address) + prevcode := s.dbItf.GetCode(s.address) s.db.journal.append(codeChange{ account: &s.address, prevhash: s.CodeHash(), @@ -760,7 +760,7 @@ func (s *StateObject) setCode(codeHash common.Hash, code []byte) { } func (s *StateObject) SetNonce(nonce uint64) { - prevNonce := s.db.GetNonce(s.address) + prevNonce := s.dbItf.GetNonce(s.address) s.db.journal.append(nonceChange{ account: &s.address, prev: prevNonce, diff --git a/core/state/statedb.go b/core/state/statedb.go index 4e852cdbd8..8289e8cad0 100644 --- a/core/state/statedb.go +++ b/core/state/statedb.go @@ -97,20 +97,9 @@ func (s *StateDB) storeStateObj(addr common.Address, stateObject *StateObject) { if s.isParallel { // When a state object is stored into s.parallel.stateObjects, // it belongs to base StateDB, it is confirmed and valid. - if s.parallel.isSlotDB { - // the object could be create in SlotDB, if it got the object from DB and - // update it to the shared `s.parallel.stateObjects`` - stateObject.db = s.parallel.baseStateDB - stateObject.db.storeParallelLock.Lock() - if _, ok := s.parallel.stateObjects.Load(addr); !ok { - s.parallel.stateObjects.Store(addr, stateObject) - } - stateObject.db.storeParallelLock.Unlock() - } else { - stateObject.db.storeParallelLock.Lock() - s.parallel.stateObjects.Store(addr, stateObject) - stateObject.db.storeParallelLock.Unlock() - } + stateObject.db.storeParallelLock.Lock() + s.parallel.stateObjects.Store(addr, stateObject) + stateObject.db.storeParallelLock.Unlock() } else { s.stateObjects[addr] = stateObject } @@ -127,7 +116,7 @@ func (s *StateDB) deleteStateObj(addr common.Address) { // For parallel mode only type ParallelState struct { - isSlotDB bool // isSlotDB denotes StateDB is used in slot + isSlotDB bool // denotes StateDB is used in slot, we will try to remove it SlotIndex int // fixme: to be removed // stateObjects holds the state objects in the base slot db // the reason for using stateObjects instead of stateObjects on the outside is @@ -139,7 +128,7 @@ type ParallelState struct { baseStateDB *StateDB // for parallel mode, there will be a base StateDB in dispatcher routine. baseTxIndex int // slotDB is created base on this tx index. dirtiedStateObjectsInSlot map[common.Address]*StateObject - unconfirmedDBInShot map[int]*StateDB // do unconfirmed reference in same slot. + unconfirmedDBInShot map[int]*ParallelStateDB // do unconfirmed reference in same slot. // we will record the read detail for conflict check and // the changed addr or key for object merge, the changed detail can be acheived from the dirty object @@ -262,40 +251,6 @@ func New(root common.Hash, db Database, snaps *snapshot.Tree) (*StateDB, error) return newStateDB(root, db, snaps) } -// NewSlotDB creates a new State DB based on the provided StateDB. -// With parallel, each execution slot would have its own StateDB. -func NewSlotDB(db *StateDB, systemAddr common.Address, txIndex int, baseTxIndex int, keepSystem bool, - unconfirmedDBs *sync.Map /*map[int]*StateDB*/) *StateDB { - slotDB := db.CopyForSlot() - slotDB.txIndex = txIndex - slotDB.originalRoot = db.originalRoot - slotDB.parallel.baseStateDB = db - slotDB.parallel.baseTxIndex = baseTxIndex - slotDB.parallel.systemAddress = systemAddr - slotDB.parallel.systemAddressOpsCount = 0 - slotDB.parallel.keepSystemAddressBalance = keepSystem - slotDB.storagePool = NewStoragePool() - slotDB.EnableWriteOnSharedStorage() - for index := baseTxIndex + 1; index < slotDB.txIndex; index++ { // txIndex - unconfirmedDB, ok := unconfirmedDBs.Load(index) - if ok { - slotDB.parallel.unconfirmedDBInShot[index] = unconfirmedDB.(*StateDB) - } - } - - // All transactions will pay gas fee to the systemAddr at the end, this address is - // deemed to conflict, we handle it specially, clear it now and set it back to the main - // StateDB later; - // But there are transactions that will try to read systemAddr's balance, such as: - // https://bscscan.com/tx/0xcd69755be1d2f55af259441ff5ee2f312830b8539899e82488a21e85bc121a2a. - // It will trigger transaction redo and keepSystem will be marked as true. - if !keepSystem { - slotDB.SetBalance(systemAddr, big.NewInt(0)) - } - - return slotDB -} - // NewWithSharedPool creates a new state with sharedStorge on layer 1.5 func NewWithSharedPool(root common.Hash, db Database, snaps *snapshot.Tree) (*StateDB, error) { statedb, err := newStateDB(root, db, snaps) @@ -342,200 +297,17 @@ func newStateDB(root common.Hash, db Database, snaps *snapshot.Tree) (*StateDB, return sdb, nil } -func (s *StateDB) getStateObjectFromStateObjects(addr common.Address) (*StateObject, bool) { - return s.loadStateObj(addr) -} - -// RevertSlotDB keep the Read list for conflict detect, -// discard all state changes except: -// - nonce and balance of from address -// - balance of system address: will be used on merge to update SystemAddress's balance -func (s *StateDB) RevertSlotDB(from common.Address) { - s.parallel.kvChangesInSlot = make(map[common.Address]StateKeys) - - // balance := s.parallel.balanceChangesInSlot[from] - s.parallel.nonceChangesInSlot = make(map[common.Address]struct{}) - s.parallel.balanceChangesInSlot = make(map[common.Address]struct{}, 1) - s.parallel.addrStateChangesInSlot = make(map[common.Address]bool) // 0: created, 1: deleted - - selfStateObject := s.parallel.dirtiedStateObjectsInSlot[from] - systemAddress := s.parallel.systemAddress - systemStateObject := s.parallel.dirtiedStateObjectsInSlot[systemAddress] - s.parallel.dirtiedStateObjectsInSlot = make(map[common.Address]*StateObject, 2) - // keep these elements - s.parallel.dirtiedStateObjectsInSlot[from] = selfStateObject - s.parallel.dirtiedStateObjectsInSlot[systemAddress] = systemStateObject - s.parallel.balanceChangesInSlot[from] = struct{}{} - s.parallel.balanceChangesInSlot[systemAddress] = struct{}{} - s.parallel.nonceChangesInSlot[from] = struct{}{} -} - -// PrepareForParallel prepares for state db to be used in parallel execution mode. -func (s *StateDB) PrepareForParallel() { - s.isParallel = true - s.parallel.stateObjects = &StateObjectSyncMap{} +func (s *StateDB) getBaseStateDB() *StateDB { + return s } -// MergeSlotDB is for Parallel execution mode, when the transaction has been -// finalized(dirty -> pending) on execution slot, the execution results should be -// merged back to the main StateDB. -// And it will return and keep the slot's change list for later conflict detect. -func (s *StateDB) MergeSlotDB(slotDb *StateDB, slotReceipt *types.Receipt, txIndex int) { - // receipt.Logs use unified log index within a block - // align slotDB's log index to the block stateDB's logSize - for _, l := range slotReceipt.Logs { - l.Index += s.logSize - } - s.logSize += slotDb.logSize - - // before merge, pay the gas fee first: AddBalance to consensus.SystemAddress - systemAddress := slotDb.parallel.systemAddress - if slotDb.parallel.keepSystemAddressBalance { - s.SetBalance(systemAddress, slotDb.GetBalance(systemAddress)) - } else { - s.AddBalance(systemAddress, slotDb.GetBalance(systemAddress)) - } - - // only merge dirty objects - addressesToPrefetch := make([][]byte, 0, len(slotDb.stateObjectsDirty)) - for addr := range slotDb.stateObjectsDirty { - if _, exist := s.stateObjectsDirty[addr]; !exist { - s.stateObjectsDirty[addr] = struct{}{} - } - // system address is EOA account, it should have no storage change - if addr == systemAddress { - continue - } - - // stateObjects: KV, balance, nonce... - dirtyObj, ok := slotDb.parallel.dirtiedStateObjectsInSlot[addr] - if !ok { - log.Error("parallel merge, but dirty object not exist!", "SlotIndex", slotDb.parallel.SlotIndex, "txIndex:", slotDb.txIndex, "addr", addr) - continue - } - mainObj, exist := s.loadStateObj(addr) - if !exist { // fixme: it is also state change - // addr not exist on main DB, do ownership transfer - // dirtyObj.db = s - // dirtyObj.finalise(true) // true: prefetch on dispatcher - mainObj = dirtyObj.deepCopy(s) - mainObj.finalise(true) - s.storeStateObj(addr, mainObj) - // fixme: should not delete, would cause unconfirmed DB incorrect? - // delete(slotDb.parallel.dirtiedStateObjectsInSlot, addr) // transfer ownership, fixme: shared read? - if dirtyObj.deleted { - // remove the addr from snapAccounts&snapStorage only when object is deleted. - // "deleted" is not equal to "snapDestructs", since createObject() will add an addr for - // snapDestructs to destroy previous object, while it will keep the addr in snapAccounts & snapAccounts - delete(s.snapAccounts, addr) - delete(s.snapStorage, addr) - } - } else { - // addr already in main DB, do merge: balance, KV, code, State(create, suicide) - // can not do copy or ownership transfer directly, since dirtyObj could have outdated - // data(may be updated within the conflict window) - - var newMainObj = mainObj // we don't need to copy the object since the storages are thread safe - if _, ok := slotDb.parallel.addrStateChangesInSlot[addr]; ok { - // there are 3 kinds of state change: - // 1.Suicide - // 2.Empty Delete - // 3.createObject - // a.AddBalance,SetState to an unexist or deleted(suicide, empty delete) address. - // b.CreateAccount: like DAO the fork, regenerate a account carry its balance without KV - // For these state change, do ownership transafer for efficiency: - // dirtyObj.db = s - // newMainObj = dirtyObj - newMainObj = dirtyObj.deepCopy(s) - // should not delete, would cause unconfirmed DB incorrect. - // delete(slotDb.parallel.dirtiedStateObjectsInSlot, addr) // transfer ownership, fixme: shared read? - if dirtyObj.deleted { - // remove the addr from snapAccounts&snapStorage only when object is deleted. - // "deleted" is not equal to "snapDestructs", since createObject() will add an addr for - // snapDestructs to destroy previous object, while it will keep the addr in snapAccounts & snapAccounts - delete(s.snapAccounts, addr) - delete(s.snapStorage, addr) - } - } else { - // deepCopy a temporary *StateObject for safety, since slot could read the address, - // dispatch should avoid overwrite the StateObject directly otherwise, it could - // crash for: concurrent map iteration and map write - - if _, balanced := slotDb.parallel.balanceChangesInSlot[addr]; balanced { - newMainObj.SetBalance(dirtyObj.Balance()) - } - if _, coded := slotDb.parallel.codeChangesInSlot[addr]; coded { - newMainObj.code = dirtyObj.code - newMainObj.data.CodeHash = dirtyObj.data.CodeHash - newMainObj.dirtyCode = true - } - if keys, stated := slotDb.parallel.kvChangesInSlot[addr]; stated { - newMainObj.MergeSlotObject(s.db, dirtyObj, keys) - } - if _, nonced := slotDb.parallel.nonceChangesInSlot[addr]; nonced { - // dirtyObj.Nonce() should not be less than newMainObj - newMainObj.setNonce(dirtyObj.Nonce()) - } - } - newMainObj.finalise(true) // true: prefetch on dispatcher - // update the object - s.storeStateObj(addr, newMainObj) - } - addressesToPrefetch = append(addressesToPrefetch, common.CopyBytes(addr[:])) // Copy needed for closure - } - - if s.prefetcher != nil && len(addressesToPrefetch) > 0 { - s.prefetcher.prefetch(s.originalRoot, addressesToPrefetch, emptyAddr) // prefetch for trie node of account - } - - for addr := range slotDb.stateObjectsPending { - if _, exist := s.stateObjectsPending[addr]; !exist { - s.stateObjectsPending[addr] = struct{}{} - } - } - - // slotDb.logs: logs will be kept in receipts, no need to do merge - - for hash, preimage := range slotDb.preimages { - s.preimages[hash] = preimage - } - if s.accessList != nil { - // fixme: accessList is not enabled yet, but it should use merge rather than overwrite Copy - s.accessList = slotDb.accessList.Copy() - } - - if slotDb.snaps != nil { - for k := range slotDb.snapDestructs { - // There could be a race condition for parallel transaction execution - // One transaction add balance 0 to an empty address, will delete it(delete empty is enabled). - // While another concurrent transaction could add a none-zero balance to it, make it not empty - // We fixed it by add a addr state read record for add balance 0 - s.snapParallelLock.Lock() - s.snapDestructs[k] = struct{}{} - s.snapParallelLock.Unlock() - } - - // slotDb.snapAccounts should be empty, comment out and to be deleted later - // for k, v := range slotDb.snapAccounts { - // s.snapAccounts[k] = v - // } - // slotDb.snapStorage should be empty, comment out and to be deleted later - // for k, v := range slotDb.snapStorage { - // temp := make(map[string][]byte) - // for kk, vv := range v { - // temp[kk] = vv - // } - // s.snapStorage[k] = temp - // } - } +func (s *StateDB) getStateObjectFromStateObjects(addr common.Address) (*StateObject, bool) { + return s.loadStateObj(addr) } func (s *StateDB) EnableWriteOnSharedStorage() { s.writeOnSharedStorage = true } -func (s *StateDB) SetSlotIndex(index int) { - s.parallel.SlotIndex = index -} // StartPrefetcher initializes a new trie prefetcher to pull in nodes from the // state trie concurrently while the state is mutated so that when we reach the @@ -674,1462 +446,771 @@ func (s *StateDB) AddRefund(gas uint64) { func (s *StateDB) SubRefund(gas uint64) { s.journal.append(refundChange{prev: s.refund}) if gas > s.refund { - if s.isParallel { - // we don't need to panic here if we read the wrong state, we just need to redo this transaction - log.Info(fmt.Sprintf("Refund counter below zero (gas: %d > refund: %d)", gas, s.refund), "tx", s.thash.String()) - s.parallel.needsRedo = true - return - } panic(fmt.Sprintf("Refund counter below zero (gas: %d > refund: %d)", gas, s.refund)) } s.refund -= gas } -// For Parallel Execution Mode, it can be seen as Penetrated Access: -// ------------------------------------------------------- -// | BaseTxIndex | Unconfirmed Txs... | Current TxIndex | -// ------------------------------------------------------- -// Access from the unconfirmed DB with range&priority: txIndex -1(previous tx) -> baseTxIndex + 1 -func (s *StateDB) getBalanceFromUnconfirmedDB(addr common.Address) *big.Int { - if addr == s.parallel.systemAddress { - // never get systemaddress from unconfirmed DB - return nil +// Exist reports whether the given account address exists in the state. +// Notably this also returns true for suicided accounts. +func (s *StateDB) Exist(addr common.Address) bool { + exist := s.getStateObject(addr) != nil + return exist +} + +// Empty returns whether the state object is either non-existent +// or empty according to the EIP161 specification (balance = nonce = code = 0) +func (s *StateDB) Empty(addr common.Address) bool { + so := s.getStateObject(addr) + empty := (so == nil || so.empty()) + return empty +} + +// GetBalance retrieves the balance from the given address or 0 if object not found +// GetFrom the dirty list => from unconfirmed DB => get from main stateDB +func (s *StateDB) GetBalance(addr common.Address) *big.Int { + balance := common.Big0 + stateObject := s.getStateObject(addr) + if stateObject != nil { + balance = stateObject.Balance() } + return balance +} - for i := s.txIndex - 1; i > s.parallel.baseTxIndex; i-- { - if db, ok := s.parallel.unconfirmedDBInShot[i]; ok { - // 1.Refer the state of address, exist or not in dirtiedStateObjectsInSlot - if obj, exist := db.parallel.dirtiedStateObjectsInSlot[addr]; exist { - balanceHit := false - if _, exist := db.parallel.addrStateChangesInSlot[addr]; exist { - balanceHit = true - } - if _, exist := db.parallel.balanceChangesInSlot[addr]; exist { // only changed balance is reliable - balanceHit = true - } - if !balanceHit { - continue - } - balance := obj.Balance() - if obj.deleted { - balance = common.Big0 - } - return balance - } - } +func (s *StateDB) GetNonce(addr common.Address) uint64 { + var nonce uint64 = 0 + stateObject := s.getStateObject(addr) + if stateObject != nil { + nonce = stateObject.Nonce() } - return nil + + return nonce } -// Similar to getBalanceFromUnconfirmedDB -func (s *StateDB) getNonceFromUnconfirmedDB(addr common.Address) (uint64, bool) { - if addr == s.parallel.systemAddress { - // never get systemaddress from unconfirmed DB - return 0, false - } +// TxIndex returns the current transaction index set by Prepare. +func (s *StateDB) TxIndex() int { + return s.txIndex +} - for i := s.txIndex - 1; i > s.parallel.baseTxIndex; i-- { - if unconfirmedDb, ok := s.parallel.unconfirmedDBInShot[i]; ok { - nonceHit := false - if _, ok := unconfirmedDb.parallel.addrStateChangesInSlot[addr]; ok { - nonceHit = true - } else if _, ok := unconfirmedDb.parallel.nonceChangesInSlot[addr]; ok { - nonceHit = true - } - if !nonceHit { - // nonce refer not hit, try next unconfirmedDb - continue - } - // nonce hit, return the nonce - obj := unconfirmedDb.parallel.dirtiedStateObjectsInSlot[addr] - if obj == nil { - // could not exist, if it is changed but reverted - // fixme: revert should remove the change record - log.Debug("Get nonce from UnconfirmedDB, changed but object not exist, ", - "txIndex", s.txIndex, "referred txIndex", i, "addr", addr) - continue - } - nonce := obj.Nonce() - // deleted object with nonce == 0 - if obj.deleted { - nonce = 0 - } - return nonce, true - } - } - return 0, false +// BlockHash returns the current block hash set by Prepare. +func (s *StateDB) BlockHash() common.Hash { + return s.bhash } -// Similar to getBalanceFromUnconfirmedDB -// It is not only for code, but also codeHash and codeSize, we return the *StateObject for convienence. -func (s *StateDB) getCodeFromUnconfirmedDB(addr common.Address) ([]byte, bool) { - if addr == s.parallel.systemAddress { - // never get systemaddress from unconfirmed DB - return nil, false - } +// BaseTxIndex returns the tx index that slot db based. +func (s *StateDB) BaseTxIndex() int { + return s.parallel.baseTxIndex +} - for i := s.txIndex - 1; i > s.parallel.baseTxIndex; i-- { - if db, ok := s.parallel.unconfirmedDBInShot[i]; ok { - codeHit := false - if _, exist := db.parallel.addrStateChangesInSlot[addr]; exist { - codeHit = true - } - if _, exist := db.parallel.codeChangesInSlot[addr]; exist { - codeHit = true - } - if !codeHit { - // try next unconfirmedDb - continue - } - obj := db.parallel.dirtiedStateObjectsInSlot[addr] - if obj == nil { - // could not exist, if it is changed but reverted - // fixme: revert should remove the change record - log.Debug("Get code from UnconfirmedDB, changed but object not exist, ", - "txIndex", s.txIndex, "referred txIndex", i, "addr", addr) - continue - } - code := obj.Code(s.db) - if obj.deleted { - code = nil - } - return code, true - } +func (s *StateDB) GetCode(addr common.Address) []byte { + stateObject := s.getStateObject(addr) + var code []byte + if stateObject != nil { + code = stateObject.Code(s.db) } - return nil, false + return code } -// Similar to getCodeFromUnconfirmedDB -// but differ when address is deleted or not exist -func (s *StateDB) getCodeHashFromUnconfirmedDB(addr common.Address) (common.Hash, bool) { - if addr == s.parallel.systemAddress { - // never get systemaddress from unconfirmed DB - return common.Hash{}, false +func (s *StateDB) GetCodeSize(addr common.Address) int { + var codeSize int = 0 + stateObject := s.getStateObject(addr) + if stateObject != nil { + codeSize = stateObject.CodeSize(s.db) } + return codeSize +} - for i := s.txIndex - 1; i > s.parallel.baseTxIndex; i-- { - if db, ok := s.parallel.unconfirmedDBInShot[i]; ok { - hashHit := false - if _, exist := db.parallel.addrStateChangesInSlot[addr]; exist { - hashHit = true - } - if _, exist := db.parallel.codeChangesInSlot[addr]; exist { - hashHit = true - } - if !hashHit { - // try next unconfirmedDb - continue - } - - obj := db.parallel.dirtiedStateObjectsInSlot[addr] - if obj == nil { - // could not exist, if it is changed but reverted - // fixme: revert should remove the change record - log.Debug("Get codeHash from UnconfirmedDB, changed but object not exist, ", - "txIndex", s.txIndex, "referred txIndex", i, "addr", addr) - continue - } - codeHash := common.Hash{} - if !obj.deleted { - codeHash = common.BytesToHash(obj.CodeHash()) - } - return codeHash, true - } +// return value of GetCodeHash: +// - common.Hash{}: the address does not exist +// - emptyCodeHash: the address exist, but code is empty +// - others: the address exist, and code is not empty +func (s *StateDB) GetCodeHash(addr common.Address) common.Hash { + stateObject := s.getStateObject(addr) + codeHash := common.Hash{} + if stateObject != nil { + codeHash = common.BytesToHash(stateObject.CodeHash()) } - return common.Hash{}, false + return codeHash } -// Similar to getCodeFromUnconfirmedDB -// It is for address state check of: Exist(), Empty() and HasSuicided() -// Since the unconfirmed DB should have done Finalise() with `deleteEmptyObjects = true` -// If the dirty address is empty or suicided, it will be marked as deleted, so we only need to return `deleted` or not. -func (s *StateDB) getAddrStateFromUnconfirmedDB(addr common.Address) (bool, bool) { - if addr == s.parallel.systemAddress { - // never get systemaddress from unconfirmed DB - return false, false +// GetState retrieves a value from the given account's storage trie. +func (s *StateDB) GetState(addr common.Address, hash common.Hash) common.Hash { + stateObject := s.getStateObject(addr) + val := common.Hash{} + if stateObject != nil { + val = stateObject.GetState(s.db, hash) } + return val +} - // check the unconfirmed DB with range: baseTxIndex -> txIndex -1(previous tx) - for i := s.txIndex - 1; i > s.parallel.baseTxIndex; i-- { - if db, ok := s.parallel.unconfirmedDBInShot[i]; ok { - if exist, ok := db.parallel.addrStateChangesInSlot[addr]; ok { - if _, ok := db.parallel.dirtiedStateObjectsInSlot[addr]; !ok { - // could not exist, if it is changed but reverted - // fixme: revert should remove the change record - log.Debug("Get addr State from UnconfirmedDB, changed but object not exist, ", - "txIndex", s.txIndex, "referred txIndex", i, "addr", addr) - continue - } +// GetProof returns the Merkle proof for a given account. +func (s *StateDB) GetProof(addr common.Address) ([][]byte, error) { + return s.GetProofByHash(crypto.Keccak256Hash(addr.Bytes())) +} - return exist, true - } - } +// GetProofByHash returns the Merkle proof for a given account. +func (s *StateDB) GetProofByHash(addrHash common.Hash) ([][]byte, error) { + var proof proofList + if _, err := s.Trie(); err != nil { + return nil, err } - return false, false + err := s.trie.Prove(addrHash[:], 0, &proof) + return proof, err } -func (s *StateDB) getKVFromUnconfirmedDB(addr common.Address, key common.Hash) (common.Hash, bool) { - // check the unconfirmed DB with range: baseTxIndex -> txIndex -1(previous tx) - for i := s.txIndex - 1; i > s.parallel.baseTxIndex; i-- { - if db, ok := s.parallel.unconfirmedDBInShot[i]; ok { - if obj, ok := db.parallel.dirtiedStateObjectsInSlot[addr]; ok { // if deleted on merge, can get from main StateDB, ok but fixme: concurrent safe - if obj.deleted { - return common.Hash{}, true - } - if _, ok := db.parallel.kvChangesInSlot[addr]; ok { - if val, exist := obj.dirtyStorage.GetValue(key); exist { - return val, true - } - if val, exist := obj.pendingStorage.GetValue(key); exist { // fixme: can be removed - log.Error("Get KV from Unconfirmed StateDB, in pending", - "my txIndex", s.txIndex, "DB's txIndex", i, "addr", addr, - "key", key, "val", val) - return val, true - } - } - } - } +// GetStorageProof returns the Merkle proof for given storage slot. +func (s *StateDB) GetStorageProof(a common.Address, key common.Hash) ([][]byte, error) { + var proof proofList + trie := s.StorageTrie(a) + if trie == nil { + return proof, errors.New("storage trie for requested address does not exist") } - return common.Hash{}, false + err := trie.Prove(crypto.Keccak256(key.Bytes()), 0, &proof) + return proof, err } -func (s *StateDB) getStateObjectFromUnconfirmedDB(addr common.Address) (*StateObject, bool) { - // check the unconfirmed DB with range: baseTxIndex -> txIndex -1(previous tx) - for i := s.txIndex - 1; i > s.parallel.baseTxIndex; i-- { - if db, ok := s.parallel.unconfirmedDBInShot[i]; ok { - if obj, ok := db.parallel.dirtiedStateObjectsInSlot[addr]; ok { // if deleted on merge, can get from main StateDB, ok but fixme: concurrent safe - return obj, true - } - } +// GetStorageProofByHash returns the Merkle proof for given storage slot. +func (s *StateDB) GetStorageProofByHash(a common.Address, key common.Hash) ([][]byte, error) { + var proof proofList + trie := s.StorageTrie(a) + if trie == nil { + return proof, errors.New("storage trie for requested address does not exist") } - return nil, false + err := trie.Prove(crypto.Keccak256(key.Bytes()), 0, &proof) + return proof, err } -// Exist reports whether the given account address exists in the state. -// Notably this also returns true for suicided accounts. -func (s *StateDB) Exist(addr common.Address) bool { - if s.parallel.isSlotDB { - // 1.Try to get from dirty - if obj, ok := s.parallel.dirtiedStateObjectsInSlot[addr]; ok { - // dirty object should not be deleted, since deleted is only flagged on finalise - // and if it is suicided in contract call, suicide is taken as exist until it is finalised - // todo: add a check here, to be removed later - if obj.deleted || obj.suicided { - log.Error("Exist in dirty, but marked as deleted or suicided", - "txIndex", s.txIndex, "baseTxIndex:", s.parallel.baseTxIndex) - } - return true - } - // 2.Try to get from uncomfirmed & main DB - // 2.1 Already read before - if exist, ok := s.parallel.addrStateReadsInSlot[addr]; ok { - return exist - } - // 2.2 Try to get from unconfirmed DB if exist - if exist, ok := s.getAddrStateFromUnconfirmedDB(addr); ok { - s.parallel.addrStateReadsInSlot[addr] = exist // update and cache - return exist - } - } - // 3.Try to get from main StateDB - exist := s.getStateObjectNoSlot(addr) != nil - if s.parallel.isSlotDB { - s.parallel.addrStateReadsInSlot[addr] = exist // update and cache +// GetCommittedState retrieves a value from the given account's committed storage trie. +func (s *StateDB) GetCommittedState(addr common.Address, hash common.Hash) common.Hash { + stateObject := s.getStateObject(addr) + val := common.Hash{} + if stateObject != nil { + val = stateObject.GetCommittedState(s.db, hash) } - return exist + return val } -// Empty returns whether the state object is either non-existent -// or empty according to the EIP161 specification (balance = nonce = code = 0) -func (s *StateDB) Empty(addr common.Address) bool { - if s.parallel.isSlotDB { - // 1.Try to get from dirty - if obj, ok := s.parallel.dirtiedStateObjectsInSlot[addr]; ok { - // dirty object is light copied and fixup on need, - // empty could be wrong, except it is created with this TX - if _, ok := s.parallel.addrStateChangesInSlot[addr]; ok { - return obj.empty() - } - // so we have to check it manually - // empty means: Nonce == 0 && Balance == 0 && CodeHash == emptyCodeHash - if s.GetBalance(addr).Sign() != 0 { // check balance first, since it is most likely not zero - return false - } - if s.GetNonce(addr) != 0 { - return false - } - codeHash := s.GetCodeHash(addr) - return bytes.Equal(codeHash.Bytes(), emptyCodeHash) // code is empty, the object is empty - } - // 2.Try to get from uncomfirmed & main DB - // 2.1 Already read before - if exist, ok := s.parallel.addrStateReadsInSlot[addr]; ok { - // exist means not empty - return !exist - } - // 2.2 Try to get from unconfirmed DB if exist - if exist, ok := s.getAddrStateFromUnconfirmedDB(addr); ok { - s.parallel.addrStateReadsInSlot[addr] = exist // update and cache - return !exist - } - } +// Database retrieves the low level database supporting the lower level trie ops. +func (s *StateDB) Database() Database { + return s.db +} - so := s.getStateObjectNoSlot(addr) - empty := (so == nil || so.empty()) - if s.parallel.isSlotDB { - s.parallel.addrStateReadsInSlot[addr] = !empty // update and cache +// StorageTrie returns the storage trie of an account. +// The return value is a copy and is nil for non-existent accounts. +func (s *StateDB) StorageTrie(addr common.Address) Trie { + stateObject := s.getStateObject(addr) + if stateObject == nil { + return nil } - return empty + cpy := stateObject.deepCopy(s) + cpy.updateTrie(s.db) + return cpy.getTrie(s.db) } -// GetBalance retrieves the balance from the given address or 0 if object not found -// GetFrom the dirty list => from unconfirmed DB => get from main stateDB -func (s *StateDB) GetBalance(addr common.Address) *big.Int { - if s.parallel.isSlotDB { - if addr == s.parallel.systemAddress { - s.parallel.systemAddressOpsCount++ - } - // 1.Try to get from dirty - if _, ok := s.parallel.balanceChangesInSlot[addr]; ok { - if obj, ok := s.parallel.dirtiedStateObjectsInSlot[addr]; ok { - // on balance fixup, addr may not exist in dirtiedStateObjectsInSlot - // we intend to fixup balance based on unconfirmed DB or main DB - return obj.Balance() - } - } - // 2.Try to get from uncomfirmed DB or main DB - // 2.1 Already read before - if balance, ok := s.parallel.balanceReadsInSlot[addr]; ok { - return balance - } - // 2.2 Try to get from unconfirmed DB if exist - if balance := s.getBalanceFromUnconfirmedDB(addr); balance != nil { - s.parallel.balanceReadsInSlot[addr] = balance - return balance - } - } - // 3. Try to get from main StateObejct - balance := common.Big0 - stateObject := s.getStateObjectNoSlot(addr) +func (s *StateDB) HasSuicided(addr common.Address) bool { + stateObject := s.getStateObject(addr) if stateObject != nil { - balance = stateObject.Balance() - } - if s.parallel.isSlotDB { - s.parallel.balanceReadsInSlot[addr] = balance + return stateObject.suicided } - return balance + return false } -func (s *StateDB) GetNonce(addr common.Address) uint64 { - if s.parallel.isSlotDB { - // 1.Try to get from dirty - if _, ok := s.parallel.nonceChangesInSlot[addr]; ok { - if obj, ok := s.parallel.dirtiedStateObjectsInSlot[addr]; ok { - // on nonce fixup, addr may not exist in dirtiedStateObjectsInSlot - // we intend to fixup nonce based on unconfirmed DB or main DB - return obj.Nonce() - } - } - // 2.Try to get from uncomfirmed DB or main DB - // 2.1 Already read before - if nonce, ok := s.parallel.nonceReadsInSlot[addr]; ok { - return nonce - } - // 2.2 Try to get from unconfirmed DB if exist - if nonce, ok := s.getNonceFromUnconfirmedDB(addr); ok { - s.parallel.nonceReadsInSlot[addr] = nonce - return nonce - } - } - // 3.Try to get from main StateDB - var nonce uint64 = 0 - stateObject := s.getStateObjectNoSlot(addr) +/* + * SETTERS + */ + +// AddBalance adds amount to the account associated with addr. +func (s *StateDB) AddBalance(addr common.Address, amount *big.Int) { + stateObject := s.GetOrNewStateObject(addr) if stateObject != nil { - nonce = stateObject.Nonce() - } - if s.parallel.isSlotDB { - s.parallel.nonceReadsInSlot[addr] = nonce + stateObject.AddBalance(amount) } - - return nonce } -// TxIndex returns the current transaction index set by Prepare. -func (s *StateDB) TxIndex() int { - return s.txIndex +// SubBalance subtracts amount from the account associated with addr. +func (s *StateDB) SubBalance(addr common.Address, amount *big.Int) { + stateObject := s.GetOrNewStateObject(addr) + if stateObject != nil { + stateObject.SubBalance(amount) + } } -// BlockHash returns the current block hash set by Prepare. -func (s *StateDB) BlockHash() common.Hash { - return s.bhash +func (s *StateDB) SetBalance(addr common.Address, amount *big.Int) { + stateObject := s.GetOrNewStateObject(addr) + if stateObject != nil { + stateObject.SetBalance(amount) + } } -// BaseTxIndex returns the tx index that slot db based. -func (s *StateDB) BaseTxIndex() int { - return s.parallel.baseTxIndex +func (s *StateDB) SetNonce(addr common.Address, nonce uint64) { + stateObject := s.GetOrNewStateObject(addr) + if stateObject != nil { + stateObject.SetNonce(nonce) + } } -func (s *StateDB) IsParallelReadsValid() bool { - slotDB := s - if !slotDB.parallel.isSlotDB { - log.Error("IsSlotDBReadsValid slotDB should be slot DB", "SlotIndex", slotDB.parallel.SlotIndex, "txIndex", slotDB.txIndex) - return false +func (s *StateDB) SetCode(addr common.Address, code []byte) { + stateObject := s.GetOrNewStateObject(addr) + if stateObject != nil { + codeHash := crypto.Keccak256Hash(code) + stateObject.SetCode(codeHash, code) } +} - mainDB := slotDB.parallel.baseStateDB - if mainDB.parallel.isSlotDB { - log.Error("IsSlotDBReadsValid s should be main DB", "SlotIndex", slotDB.parallel.SlotIndex, "txIndex", slotDB.txIndex) - return false - } - // for nonce - for addr, nonceSlot := range slotDB.parallel.nonceReadsInSlot { - nonceMain := mainDB.GetNonce(addr) - if nonceSlot != nonceMain { - log.Debug("IsSlotDBReadsValid nonce read is invalid", "addr", addr, - "nonceSlot", nonceSlot, "nonceMain", nonceMain, "SlotIndex", slotDB.parallel.SlotIndex, - "txIndex", slotDB.txIndex, "baseTxIndex", slotDB.parallel.baseTxIndex) - return false - } - } - // balance - for addr, balanceSlot := range slotDB.parallel.balanceReadsInSlot { - if addr != s.parallel.systemAddress { // skip balance check for system address - balanceMain := mainDB.GetBalance(addr) - if balanceSlot.Cmp(balanceMain) != 0 { - log.Debug("IsSlotDBReadsValid balance read is invalid", "addr", addr, - "balanceSlot", balanceSlot, "balanceMain", balanceMain, "SlotIndex", slotDB.parallel.SlotIndex, - "txIndex", slotDB.txIndex, "baseTxIndex", slotDB.parallel.baseTxIndex) - return false - } - } - } - // check code - for addr, codeSlot := range slotDB.parallel.codeReadsInSlot { - codeMain := mainDB.GetCode(addr) - if !bytes.Equal(codeSlot, codeMain) { - log.Debug("IsSlotDBReadsValid code read is invalid", "addr", addr, - "len codeSlot", len(codeSlot), "len codeMain", len(codeMain), "SlotIndex", slotDB.parallel.SlotIndex, - "txIndex", slotDB.txIndex, "baseTxIndex", slotDB.parallel.baseTxIndex) - return false - } - } - // check codeHash - for addr, codeHashSlot := range slotDB.parallel.codeHashReadsInSlot { - codeHashMain := mainDB.GetCodeHash(addr) - if !bytes.Equal(codeHashSlot.Bytes(), codeHashMain.Bytes()) { - log.Debug("IsSlotDBReadsValid codehash read is invalid", "addr", addr, - "codeHashSlot", codeHashSlot, "codeHashMain", codeHashMain, "SlotIndex", slotDB.parallel.SlotIndex, - "txIndex", slotDB.txIndex, "baseTxIndex", slotDB.parallel.baseTxIndex) - return false - } - } - // check KV - for addr, slotStorage := range slotDB.parallel.kvReadsInSlot { - conflict := false - slotStorage.Range(func(keySlot, valSlot interface{}) bool { - valMain := mainDB.GetState(addr, keySlot.(common.Hash)) - if !bytes.Equal(valSlot.(common.Hash).Bytes(), valMain.Bytes()) { - log.Debug("IsSlotDBReadsValid KV read is invalid", "addr", addr, - "key", keySlot.(common.Hash), "valSlot", valSlot.(common.Hash), - "valMain", valMain, "SlotIndex", slotDB.parallel.SlotIndex, - "txIndex", slotDB.txIndex, "baseTxIndex", slotDB.parallel.baseTxIndex) - conflict = true - return false // return false, Range will be terminated. - } - return true // return true, Range will try next KV - }) - if conflict { - return false - } +func (s *StateDB) SetState(addr common.Address, key, value common.Hash) { + stateObject := s.GetOrNewStateObject(addr) + if stateObject != nil { + stateObject.SetState(s.db, key, value) } - // addr state check - for addr, stateSlot := range slotDB.parallel.addrStateReadsInSlot { - stateMain := false // addr not exist - if mainDB.getStateObjectNoSlot(addr) != nil { - stateMain = true // addr exist in main DB - } - if stateSlot != stateMain { - // skip addr state check for system address - if addr != s.parallel.systemAddress { - log.Debug("IsSlotDBReadsValid addrState read invalid(true: exist, false: not exist)", - "addr", addr, "stateSlot", stateSlot, "stateMain", stateMain, - "SlotIndex", slotDB.parallel.SlotIndex, - "txIndex", slotDB.txIndex, "baseTxIndex", slotDB.parallel.baseTxIndex) - return false - } - } +} + +// SetStorage replaces the entire storage for the specified account with given +// storage. This function should only be used for debugging. +func (s *StateDB) SetStorage(addr common.Address, storage map[common.Hash]common.Hash) { + stateObject := s.GetOrNewStateObject(addr) // fixme: parallel mode? + if stateObject != nil { + stateObject.SetStorage(storage) } - // snapshot destructs check +} - for addr, destructRead := range slotDB.parallel.addrSnapDestructsReadsInSlot { - mainObj := mainDB.getStateObjectNoSlot(addr) - if mainObj == nil { - log.Debug("IsSlotDBReadsValid snapshot destructs read invalid, address should exist", - "addr", addr, "destruct", destructRead, - "SlotIndex", slotDB.parallel.SlotIndex, - "txIndex", slotDB.txIndex, "baseTxIndex", slotDB.parallel.baseTxIndex) - return false - } - _, destructMain := mainDB.snapDestructs[addr] // addr not exist - if destructRead != destructMain { - log.Debug("IsSlotDBReadsValid snapshot destructs read invalid", - "addr", addr, "destructRead", destructRead, "destructMain", destructMain, - "SlotIndex", slotDB.parallel.SlotIndex, - "txIndex", slotDB.txIndex, "baseTxIndex", slotDB.parallel.baseTxIndex) +// Suicide marks the given account as suicided. +// This clears the account balance. +// +// The account's state object is still available until the state is committed, +// getStateObject will return a non-nil account after Suicide. +func (s *StateDB) Suicide(addr common.Address) bool { + var stateObject *StateObject + if stateObject == nil { + // 3.Try to get from main StateDB + stateObject = s.getStateObject(addr) + if stateObject == nil { + log.Error("Suicide addr not exist", "txIndex", s.txIndex, "addr", addr) return false } } - return true -} + s.journal.append(suicideChange{ + account: &addr, + prev: stateObject.suicided, // todo: must be false? + prevbalance: new(big.Int).Set(s.GetBalance(addr)), + }) -// For most of the transactions, systemAddressOpsCount should be 3: -// one for SetBalance(0) on NewSlotDB() -// the other is for AddBalance(GasFee) at the end. -// (systemAddressOpsCount > 3) means the transaction tries to access systemAddress, in -// this case, we should redo and keep its balance on NewSlotDB() -func (s *StateDB) SystemAddressRedo() bool { - return s.parallel.systemAddressOpsCount > 4 + stateObject.markSuicided() + stateObject.data.Balance = new(big.Int) + return true } -// NeedsRedo returns true if there is any clear reason that we need to redo this transaction -func (s *StateDB) NeedsRedo() bool { - return s.parallel.needsRedo -} +// +// Setting, updating & deleting state object methods. +// -func (s *StateDB) GetCode(addr common.Address) []byte { - if s.parallel.isSlotDB { - // 1.Try to get from dirty - if _, ok := s.parallel.codeChangesInSlot[addr]; ok { - if obj, ok := s.parallel.dirtiedStateObjectsInSlot[addr]; ok { - // on code fixup, addr may not exist in dirtiedStateObjectsInSlot - // we intend to fixup code based on unconfirmed DB or main DB - code := obj.Code(s.db) - return code - } - } - // 2.Try to get from uncomfirmed DB or main DB - // 2.1 Already read before - if code, ok := s.parallel.codeReadsInSlot[addr]; ok { - return code - } - // 2.2 Try to get from unconfirmed DB if exist - if code, ok := s.getCodeFromUnconfirmedDB(addr); ok { - s.parallel.codeReadsInSlot[addr] = code - return code - } +// updateStateObject writes the given object to the trie. +func (s *StateDB) updateStateObject(obj *StateObject) { + // Track the amount of time wasted on updating the account from the trie + if metrics.EnabledExpensive { + defer func(start time.Time) { s.AccountUpdates += time.Since(start) }(time.Now()) } - - // 3. Try to get from main StateObejct - stateObject := s.getStateObjectNoSlot(addr) - var code []byte - if stateObject != nil { - code = stateObject.Code(s.db) + // Encode the account and update the account trie + addr := obj.Address() + data := obj.encodeData + var err error + if data == nil { + data, err = rlp.EncodeToBytes(obj) + if err != nil { + panic(fmt.Errorf("can't encode object at %x: %v", addr[:], err)) + } } - if s.parallel.isSlotDB { - s.parallel.codeReadsInSlot[addr] = code + if err = s.trie.TryUpdate(addr[:], data); err != nil { + s.setError(fmt.Errorf("updateStateObject (%x) error: %v", addr[:], err)) } - return code } -func (s *StateDB) GetCodeSize(addr common.Address) int { - if s.parallel.isSlotDB { - // 1.Try to get from dirty - if _, ok := s.parallel.codeChangesInSlot[addr]; ok { - if obj, ok := s.parallel.dirtiedStateObjectsInSlot[addr]; ok { - // on code fixup, addr may not exist in dirtiedStateObjectsInSlot - // we intend to fixup code based on unconfirmed DB or main DB - return obj.CodeSize(s.db) - } - } - // 2.Try to get from uncomfirmed DB or main DB - // 2.1 Already read before - if code, ok := s.parallel.codeReadsInSlot[addr]; ok { - return len(code) // len(nil) is 0 too - } - // 2.2 Try to get from unconfirmed DB if exist - if code, ok := s.getCodeFromUnconfirmedDB(addr); ok { - s.parallel.codeReadsInSlot[addr] = code - return len(code) // len(nil) is 0 too - } - } - - // 3. Try to get from main StateObejct - var codeSize int = 0 - var code []byte - stateObject := s.getStateObjectNoSlot(addr) - - if stateObject != nil { - code = stateObject.Code(s.db) - codeSize = stateObject.CodeSize(s.db) +// deleteStateObject removes the given object from the state trie. +func (s *StateDB) deleteStateObject(obj *StateObject) { + // Track the amount of time wasted on deleting the account from the trie + if metrics.EnabledExpensive { + defer func(start time.Time) { s.AccountUpdates += time.Since(start) }(time.Now()) } - if s.parallel.isSlotDB { - s.parallel.codeReadsInSlot[addr] = code + // Delete the account from the trie + addr := obj.Address() + if err := s.trie.TryDelete(addr[:]); err != nil { + s.setError(fmt.Errorf("deleteStateObject (%x) error: %v", addr[:], err)) } - return codeSize } -// return value of GetCodeHash: -// - common.Hash{}: the address does not exist -// - emptyCodeHash: the address exist, but code is empty -// - others: the address exist, and code is not empty -func (s *StateDB) GetCodeHash(addr common.Address) common.Hash { - if s.parallel.isSlotDB { - // 1.Try to get from dirty - if _, ok := s.parallel.codeChangesInSlot[addr]; ok { - if obj, ok := s.parallel.dirtiedStateObjectsInSlot[addr]; ok { - // on code fixup, addr may not exist in dirtiedStateObjectsInSlot - // we intend to fixup balance based on unconfirmed DB or main DB - return common.BytesToHash(obj.CodeHash()) - } - } - // 2.Try to get from uncomfirmed DB or main DB - // 2.1 Already read before - if codeHash, ok := s.parallel.codeHashReadsInSlot[addr]; ok { - return codeHash - } - // 2.2 Try to get from unconfirmed DB if exist - if codeHash, ok := s.getCodeHashFromUnconfirmedDB(addr); ok { - s.parallel.codeHashReadsInSlot[addr] = codeHash - return codeHash - } - } - - // 3. Try to get from main StateObejct - stateObject := s.getStateObjectNoSlot(addr) - codeHash := common.Hash{} - if stateObject != nil { - codeHash = common.BytesToHash(stateObject.CodeHash()) - } - if s.parallel.isSlotDB { - s.parallel.codeHashReadsInSlot[addr] = codeHash +// getStateObject retrieves a state object given by the address, returning nil if +// the object is not found or was deleted in this execution context. If you need +// to differentiate between non-existent/just-deleted, use getDeletedStateObject. +func (s *StateDB) getStateObject(addr common.Address) *StateObject { + if obj := s.getDeletedStateObject(addr); obj != nil && !obj.deleted { + return obj } - return codeHash + return nil } -// GetState retrieves a value from the given account's storage trie. -// For parallel mode wih, get from the state in order: -// -> self dirty, both Slot & MainProcessor -// -> pending of self: Slot on merge -// -> pending of unconfirmed DB -// -> pending of main StateDB -// -> origin -func (s *StateDB) GetState(addr common.Address, hash common.Hash) common.Hash { - if s.parallel.isSlotDB { - - // 1.Try to get from dirty - if exist, ok := s.parallel.addrStateChangesInSlot[addr]; ok { - if !exist { - return common.Hash{} - } - obj := s.parallel.dirtiedStateObjectsInSlot[addr] // addr must exist in dirtiedStateObjectsInSlot - return obj.GetState(s.db, hash) +func (s *StateDB) getStateObjectFromSnapshotOrTrie(addr common.Address) (data *Account, ok bool) { + var err error + // If no live objects are available, attempt to use snapshots + if s.snap != nil { + if metrics.EnabledExpensive { + defer func(start time.Time) { s.SnapshotAccountReads += time.Since(start) }(time.Now()) } - if keys, ok := s.parallel.kvChangesInSlot[addr]; ok { - if _, ok := keys[hash]; ok { - obj := s.parallel.dirtiedStateObjectsInSlot[addr] // addr must exist in dirtiedStateObjectsInSlot - return obj.GetState(s.db, hash) + var acc *snapshot.Account + if acc, err = s.snap.Account(crypto.HashData(s.hasher, addr.Bytes())); err == nil { + if acc == nil { + return nil, false } - } - // 2.Try to get from uncomfirmed DB or main DB - // 2.1 Already read before - if storage, ok := s.parallel.kvReadsInSlot[addr]; ok { - if val, ok := storage.GetValue(hash); ok { - return val + data = &Account{ + Nonce: acc.Nonce, + Balance: acc.Balance, + CodeHash: acc.CodeHash, + Root: common.BytesToHash(acc.Root), + } + if len(data.CodeHash) == 0 { + data.CodeHash = emptyCodeHash + } + if data.Root == (common.Hash{}) { + data.Root = emptyRoot } } - // 2.2 Try to get from unconfirmed DB if exist - if val, ok := s.getKVFromUnconfirmedDB(addr, hash); ok { - if s.parallel.kvReadsInSlot[addr] == nil { - s.parallel.kvReadsInSlot[addr] = newStorage(false) + } + // If snapshot unavailable or reading from it failed, load from the database + if s.snap == nil || err != nil { + if s.trie == nil { + tr, err := s.db.OpenTrie(s.originalRoot) + if err != nil { + s.setError(fmt.Errorf("failed to open trie tree")) + return nil, false } - s.parallel.kvReadsInSlot[addr].StoreValue(hash, val) // update cache - return val + s.trie = tr + } + if metrics.EnabledExpensive { + defer func(start time.Time) { s.AccountReads += time.Since(start) }(time.Now()) + } + enc, err := s.trie.TryGet(addr.Bytes()) + if err != nil { + s.setError(fmt.Errorf("getDeleteStateObject (%x) error: %v", addr.Bytes(), err)) + return nil, false + } + if len(enc) == 0 { + return nil, false + } + data = new(Account) + if err := rlp.DecodeBytes(enc, data); err != nil { + log.Error("Failed to decode state object", "addr", addr, "err", err) + return nil, false } } + return data, true +} - // 3.Get from main StateDB - stateObject := s.getStateObjectNoSlot(addr) - val := common.Hash{} - if stateObject != nil { - val = stateObject.GetState(s.db, hash) +// getDeletedStateObject is similar to getStateObject, but instead of returning +// nil for a deleted state object, it returns the actual object with the deleted +// flag set. This is needed by the state journal to revert to the correct s- +// destructed object instead of wiping all knowledge about the state object. +func (s *StateDB) getDeletedStateObject(addr common.Address) *StateObject { + // Prefer live objects if any is available + if obj, _ := s.getStateObjectFromStateObjects(addr); obj != nil { + return obj } - if s.parallel.isSlotDB { - if s.parallel.kvReadsInSlot[addr] == nil { - s.parallel.kvReadsInSlot[addr] = newStorage(false) - } - s.parallel.kvReadsInSlot[addr].StoreValue(hash, val) // update cache + data, ok := s.getStateObjectFromSnapshotOrTrie(addr) + if !ok { + return nil } - return val + // Insert into the live set + // if obj, ok := s.loadStateObj(addr); ok { + // fixme: concurrent not safe, merge could update it... + // return obj + //} + obj := newObject(s, s.isParallel, addr, *data) + s.storeStateObj(addr, obj) + return obj } -// GetProof returns the Merkle proof for a given account. -func (s *StateDB) GetProof(addr common.Address) ([][]byte, error) { - return s.GetProofByHash(crypto.Keccak256Hash(addr.Bytes())) -} +// func (s *StateDB) SetStateObject(object *StateObject) { +// s.storeStateObj(object.Address(), object) +// } -// GetProofByHash returns the Merkle proof for a given account. -func (s *StateDB) GetProofByHash(addrHash common.Hash) ([][]byte, error) { - var proof proofList - if _, err := s.Trie(); err != nil { - return nil, err +// GetOrNewStateObject retrieves a state object or create a new state object if nil. +// dirtyInSlot -> Unconfirmed DB -> main DB -> snapshot, no? create one +func (s *StateDB) GetOrNewStateObject(addr common.Address) *StateObject { + var stateObject *StateObject = nil + if stateObject == nil { + stateObject = s.getStateObject(addr) } - err := s.trie.Prove(addrHash[:], 0, &proof) - return proof, err -} - -// GetStorageProof returns the Merkle proof for given storage slot. -func (s *StateDB) GetStorageProof(a common.Address, key common.Hash) ([][]byte, error) { - var proof proofList - trie := s.StorageTrie(a) - if trie == nil { - return proof, errors.New("storage trie for requested address does not exist") + if stateObject == nil || stateObject.deleted || stateObject.suicided { + stateObject = s.createObject(addr) } - err := trie.Prove(crypto.Keccak256(key.Bytes()), 0, &proof) - return proof, err + return stateObject } -// GetStorageProofByHash returns the Merkle proof for given storage slot. -func (s *StateDB) GetStorageProofByHash(a common.Address, key common.Hash) ([][]byte, error) { - var proof proofList - trie := s.StorageTrie(a) - if trie == nil { - return proof, errors.New("storage trie for requested address does not exist") - } - err := trie.Prove(crypto.Keccak256(key.Bytes()), 0, &proof) - return proof, err -} +// createObject creates a new state object. If there is an existing account with +// the given address, it is overwritten and returned as the second return value. -// GetCommittedState retrieves a value from the given account's committed storage trie. -func (s *StateDB) GetCommittedState(addr common.Address, hash common.Hash) common.Hash { - if s.parallel.isSlotDB { - // 1.No need to get from pending of itself even on merge, since stateobject in SlotDB won't do finalise - // 2.Try to get from uncomfirmed DB or main DB - // KVs in unconfirmed DB can be seen as pending storage - // KVs in main DB are merged from SlotDB and has done finalise() on merge, can be seen as pending storage too. - // 2.1 Already read before - if storage, ok := s.parallel.kvReadsInSlot[addr]; ok { - if val, ok := storage.GetValue(hash); ok { - return val - } - } - // 2.2 Try to get from unconfirmed DB if exist - if val, ok := s.getKVFromUnconfirmedDB(addr, hash); ok { - if s.parallel.kvReadsInSlot[addr] == nil { - s.parallel.kvReadsInSlot[addr] = newStorage(false) - } - s.parallel.kvReadsInSlot[addr].StoreValue(hash, val) // update cache - return val +// prev is used for CreateAccount to get its balance +// Parallel mode: +// if prev in dirty: revert is ok +// if prev in unconfirmed DB: addr state read record, revert should not put it back +// if prev in main DB: addr state read record, revert should not put it back +// if pre no exist: addr state read record, + +// `prev` is used to handle revert, to recover with the `prev` object +// In Parallel mode, we only need to recover to `prev` in SlotDB, +// a.if it is not in SlotDB, `revert` will remove it from the SlotDB +// b.if it is exist in SlotDB, `revert` will recover to the `prev` in SlotDB +// c.as `snapDestructs` it is the same +func (s *StateDB) createObject(addr common.Address) (newobj *StateObject) { + prev := s.getDeletedStateObject(addr) // Note, prev might have been deleted, we need that! + var prevdestruct bool + + if s.snap != nil && prev != nil { + _, prevdestruct = s.snapDestructs[prev.address] + if !prevdestruct { + // To destroy the previous trie node first and update the trie tree + // with the new object on block commit. + s.snapDestructs[prev.address] = struct{}{} } } - // 3. Try to get from main DB - stateObject := s.getStateObjectNoSlot(addr) - val := common.Hash{} - if stateObject != nil { - val = stateObject.GetCommittedState(s.db, hash) - } - if s.parallel.isSlotDB { - if s.parallel.kvReadsInSlot[addr] == nil { - s.parallel.kvReadsInSlot[addr] = newStorage(false) - } - s.parallel.kvReadsInSlot[addr].StoreValue(hash, val) // update cache + newobj = newObject(s, s.isParallel, addr, Account{}) + newobj.setNonce(0) // sets the object to dirty + if prev == nil { + s.journal.append(createObjectChange{account: &addr}) + } else { + s.journal.append(resetObjectChange{prev: prev, prevdestruct: prevdestruct}) } - return val -} -// Database retrieves the low level database supporting the lower level trie ops. -func (s *StateDB) Database() Database { - return s.db + s.storeStateObj(addr, newobj) + return newobj } -// StorageTrie returns the storage trie of an account. -// The return value is a copy and is nil for non-existent accounts. -func (s *StateDB) StorageTrie(addr common.Address) Trie { - stateObject := s.getStateObject(addr) - if stateObject == nil { +// CreateAccount explicitly creates a state object. If a state object with the address +// already exists the balance is carried over to the new account. +// +// CreateAccount is called during the EVM CREATE operation. The situation might arise that +// a contract does the following: +// +// 1. sends funds to sha(account ++ (nonce + 1)) +// 2. tx_create(sha(account ++ nonce)) (note that this gets the address of 1) +// +// Carrying over the balance ensures that Ether doesn't disappear. +func (s *StateDB) CreateAccount(addr common.Address) { + // no matter it is got from dirty, unconfirmed or main DB + // if addr not exist, preBalance will be common.Big0, it is same as new(big.Int) which + // is the value newObject(), + preBalance := s.GetBalance(addr) + newObj := s.createObject(addr) + newObj.setBalance(new(big.Int).Set(preBalance)) // new big.Int for newObj +} + +func (s *StateDB) ForEachStorage(addr common.Address, cb func(key, value common.Hash) bool) error { + so := s.getStateObject(addr) + if so == nil { return nil } - cpy := stateObject.deepCopy(s) - cpy.updateTrie(s.db) - return cpy.getTrie(s.db) -} + it := trie.NewIterator(so.getTrie(s.db).NodeIterator(nil)) -func (s *StateDB) HasSuicided(addr common.Address) bool { - if s.parallel.isSlotDB { - // 1.Try to get from dirty - if obj, ok := s.parallel.dirtiedStateObjectsInSlot[addr]; ok { - return obj.suicided + for it.Next() { + key := common.BytesToHash(s.trie.GetKey(it.Key)) + if value, dirty := so.dirtyStorage.GetValue(key); dirty { + if !cb(key, value) { + return nil + } + continue } - // 2.Try to get from uncomfirmed - if exist, ok := s.getAddrStateFromUnconfirmedDB(addr); ok { - return !exist + + if len(it.Value) > 0 { + _, content, _, err := rlp.Split(it.Value) + if err != nil { + return err + } + if !cb(key, common.BytesToHash(content)) { + return nil + } } } - stateObject := s.getStateObjectNoSlot(addr) - if stateObject != nil { - return stateObject.suicided - } - return false + return nil } -/* - * SETTERS - */ - -// the source mainObj should be got from the main StateDB -// we have to update its nonce, balance, code if they have updated in the unconfirmed DBs -/* -func (s *StateDB) unconfirmedLightCopy(mainObj *StateObject) *StateObject { - newObj := mainObj.lightCopy(s) // copied nonce, balance, code from base DB - - // do balance fixup only when it exist in unconfirmed DB - if nonce, ok := s.getNonceFromUnconfirmedDB(mainObj.address); ok { - // code got from unconfirmed DB - newObj.setNonce(nonce) +// Copy creates a deep, independent copy of the state. +// Snapshots of the copied state cannot be applied to the copy. +func (s *StateDB) Copy() *StateDB { + // Copy all the basic fields, initialize the memory ones + state := &StateDB{ + db: s.db, + trie: s.db.CopyTrie(s.trie), + stateObjects: make(map[common.Address]*StateObject, len(s.journal.dirties)), + stateObjectsPending: make(map[common.Address]struct{}, len(s.stateObjectsPending)), + stateObjectsDirty: make(map[common.Address]struct{}, len(s.journal.dirties)), + storagePool: s.storagePool, + refund: s.refund, + logs: make(map[common.Hash][]*types.Log, len(s.logs)), + logSize: s.logSize, + preimages: make(map[common.Hash][]byte, len(s.preimages)), + journal: newJournal(), + hasher: crypto.NewKeccakState(), + parallel: ParallelState{}, } + // Copy the dirty states, logs, and preimages + for addr := range s.journal.dirties { + // As documented [here](https://github.com/ethereum/go-ethereum/pull/16485#issuecomment-380438527), + // and in the Finalise-method, there is a case where an object is in the journal but not + // in the stateObjects: OOG after touch on ripeMD prior to Byzantium. Thus, we need to check for + // nil + if object, exist := s.getStateObjectFromStateObjects(addr); exist { + // Even though the original object is dirty, we are not copying the journal, + // so we need to make sure that anyside effect the journal would have caused + // during a commit (or similar op) is already applied to the copy. + state.storeStateObj(addr, object.deepCopy(state)) - // do balance fixup - if balance := s.getBalanceFromUnconfirmedDB(mainObj.address); balance != nil { - // balance got from unconfirmed DB - newObj.setBalance(balance) + state.stateObjectsDirty[addr] = struct{}{} // Mark the copy dirty to force internal (code/state) commits + state.stateObjectsPending[addr] = struct{}{} // Mark the copy pending to force external (account) commits + } } - // do code fixup - if codeObj, ok := s.getCodeFromUnconfirmedDB(mainObj.address); ok { - newObj.setCode(crypto.Keccak256Hash(codeObj), codeObj) // fixme: to confirm if we should use "codeObj.Code(db)" - newObj.dirtyCode = false // copy does not make the code dirty, + // Above, we don't copy the actual journal. This means that if the copy is copied, the + // loop above will be a no-op, since the copy's journal is empty. + // Thus, here we iterate over stateObjects, to enable copies of copies + for addr := range s.stateObjectsPending { + if _, exist := state.getStateObjectFromStateObjects(addr); !exist { + object, _ := s.getStateObjectFromStateObjects(addr) + state.storeStateObj(addr, object.deepCopy(state)) + } + state.stateObjectsPending[addr] = struct{}{} } - return newObj -} -*/ - -// AddBalance adds amount to the account associated with addr. -func (s *StateDB) AddBalance(addr common.Address, amount *big.Int) { - // if s.parallel.isSlotDB { - // add balance will perform a read operation first - // s.parallel.balanceReadsInSlot[addr] = struct{}{} // fixme: to make the the balance valid, since unconfirmed would refer it. - // if amount.Sign() == 0 { - // if amount == 0, no balance change, but there is still an empty check. - // take this empty check as addr state read(create, suicide, empty delete) - // s.parallel.addrStateReadsInSlot[addr] = struct{}{} - // } - // } - - stateObject := s.GetOrNewStateObject(addr) - if stateObject != nil { - if s.parallel.isSlotDB { - if addr == s.parallel.systemAddress { - s.parallel.systemAddressOpsCount++ - } - // if amount.Sign() != 0 { // todo: to reenable it - if _, ok := s.parallel.dirtiedStateObjectsInSlot[addr]; !ok { - newStateObject := stateObject.lightCopy(s) // light copy from main DB - // do balance fixup from the confirmed DB, it could be more reliable than main DB - balance := s.GetBalance(addr) - newStateObject.setBalance(balance) - // s.parallel.balanceReadsInSlot[addr] = newStateObject.Balance() // could read from main DB or unconfirmed DB - newStateObject.AddBalance(amount) - s.parallel.dirtiedStateObjectsInSlot[addr] = newStateObject - s.parallel.balanceChangesInSlot[addr] = struct{}{} - return - } - // already dirty, make sure the balance if fixed up - // if stateObject.Balance() - if addr != s.parallel.systemAddress { - if stateObject.Balance().Cmp(s.GetBalance(addr)) != 0 { - log.Warn("AddBalance in dirty, but balance is incorrect", "txIndex", s.txIndex, "addr", addr, - "stateObject.Balance()", stateObject.Balance(), "s.GetBalance(addr)", s.GetBalance(addr)) - stateObject.setBalance(s.GetBalance(addr)) - } - } + for addr := range s.stateObjectsDirty { + if _, exist := state.getStateObjectFromStateObjects(addr); !exist { + object, _ := s.getStateObjectFromStateObjects(addr) + state.storeStateObj(addr, object.deepCopy(state)) } - stateObject.AddBalance(amount) - if s.parallel.isSlotDB { - s.parallel.balanceChangesInSlot[addr] = struct{}{} + state.stateObjectsDirty[addr] = struct{}{} + } + for hash, logs := range s.logs { + cpy := make([]*types.Log, len(logs)) + for i, l := range logs { + cpy[i] = new(types.Log) + *cpy[i] = *l } + state.logs[hash] = cpy + } + for hash, preimage := range s.preimages { + state.preimages[hash] = preimage + } + // Do we need to copy the access list? In practice: No. At the start of a + // transaction, the access list is empty. In practice, we only ever copy state + // _between_ transactions/blocks, never in the middle of a transaction. + // However, it doesn't cost us much to copy an empty list, so we do it anyway + // to not blow up if we ever decide copy it in the middle of a transaction + if s.accessList != nil { + state.accessList = s.accessList.Copy() } -} -// SubBalance subtracts amount from the account associated with addr. -func (s *StateDB) SubBalance(addr common.Address, amount *big.Int) { - // if s.parallel.isSlotDB { - // if amount.Sign() != 0 { - // unlike add, sub 0 balance will not touch empty object - // s.parallel.balanceReadsInSlot[addr] = struct{}{} - // } - // } - stateObject := s.GetOrNewStateObject(addr) - if stateObject != nil { - if s.parallel.isSlotDB { - if addr == s.parallel.systemAddress { - s.parallel.systemAddressOpsCount++ - } - - // if amount.Sign() != 0 { // todo: to reenable it - if _, ok := s.parallel.dirtiedStateObjectsInSlot[addr]; !ok { - newStateObject := stateObject.lightCopy(s) // light copy from main DB - // do balance fixup from the confirmed DB, it could be more reliable than main DB - balance := s.GetBalance(addr) - newStateObject.setBalance(balance) - // s.parallel.balanceReadsInSlot[addr] = newStateObject.Balance() - newStateObject.SubBalance(amount) - s.parallel.balanceChangesInSlot[addr] = struct{}{} - s.parallel.dirtiedStateObjectsInSlot[addr] = newStateObject - return - } - // already dirty, make sure the balance if fixed - // if stateObject.Balance() - if addr != s.parallel.systemAddress { - if stateObject.Balance().Cmp(s.GetBalance(addr)) != 0 { - log.Warn("SubBalance in dirty, but balance is incorrect", "txIndex", s.txIndex, "addr", addr, - "stateObject.Balance()", stateObject.Balance(), "s.GetBalance(addr)", s.GetBalance(addr)) - stateObject.setBalance(s.GetBalance(addr)) - } - } + // If there's a prefetcher running, make an inactive copy of it that can + // only access data but does not actively preload (since the user will not + // know that they need to explicitly terminate an active copy). + if s.prefetcher != nil { + state.prefetcher = s.prefetcher.copy() + } + if s.snaps != nil { + // In order for the miner to be able to use and make additions + // to the snapshot tree, we need to copy that aswell. + // Otherwise, any block mined by ourselves will cause gaps in the tree, + // and force the miner to operate trie-backed only + state.snaps = s.snaps + state.snap = s.snap + // deep copy needed + state.snapDestructs = make(map[common.Address]struct{}) + for k, v := range s.snapDestructs { + state.snapDestructs[k] = v } - stateObject.SubBalance(amount) - if s.parallel.isSlotDB { - s.parallel.balanceChangesInSlot[addr] = struct{}{} + state.snapAccounts = make(map[common.Address][]byte) + for k, v := range s.snapAccounts { + state.snapAccounts[k] = v } - - } -} - -func (s *StateDB) SetBalance(addr common.Address, amount *big.Int) { - stateObject := s.GetOrNewStateObject(addr) - if stateObject != nil { - if s.parallel.isSlotDB { - if addr == s.parallel.systemAddress { - s.parallel.systemAddressOpsCount++ - } - if _, ok := s.parallel.dirtiedStateObjectsInSlot[addr]; !ok { - newStateObject := stateObject.lightCopy(s) - // update balance for revert, in case child contract is revertted, - // it should revert to the previous balance - balance := s.GetBalance(addr) - newStateObject.setBalance(balance) - newStateObject.SetBalance(amount) - s.parallel.balanceChangesInSlot[addr] = struct{}{} - s.parallel.dirtiedStateObjectsInSlot[addr] = newStateObject - return + state.snapStorage = make(map[common.Address]map[string][]byte) + for k, v := range s.snapStorage { + temp := make(map[string][]byte) + for kk, vv := range v { + temp[kk] = vv } - - } - stateObject.SetBalance(amount) - if s.parallel.isSlotDB { - s.parallel.balanceChangesInSlot[addr] = struct{}{} + state.snapStorage[k] = temp } } + return state } -func (s *StateDB) SetNonce(addr common.Address, nonce uint64) { - stateObject := s.GetOrNewStateObject(addr) - if stateObject != nil { - if s.parallel.isSlotDB { - if _, ok := s.parallel.dirtiedStateObjectsInSlot[addr]; !ok { - newStateObject := stateObject.lightCopy(s) - noncePre := s.GetNonce(addr) - newStateObject.setNonce(noncePre) // nonce fixup - newStateObject.SetNonce(nonce) - s.parallel.nonceChangesInSlot[addr] = struct{}{} - s.parallel.dirtiedStateObjectsInSlot[addr] = newStateObject - return - } - noncePre := s.GetNonce(addr) - stateObject.setNonce(noncePre) // nonce fixup - } - stateObject.SetNonce(nonce) - if s.parallel.isSlotDB { - s.parallel.nonceChangesInSlot[addr] = struct{}{} - } - } +/* +var addressStructPool = sync.Pool{ + New: func() interface{} { return make(map[common.Address]struct{}, defaultNumOfSlots) }, } -func (s *StateDB) SetCode(addr common.Address, code []byte) { - stateObject := s.GetOrNewStateObject(addr) - if stateObject != nil { - codeHash := crypto.Keccak256Hash(code) - if s.parallel.isSlotDB { - if _, ok := s.parallel.dirtiedStateObjectsInSlot[addr]; !ok { - newStateObject := stateObject.lightCopy(s) - codePre := s.GetCode(addr) // code fixup - codeHashPre := crypto.Keccak256Hash(codePre) - newStateObject.setCode(codeHashPre, codePre) - - newStateObject.SetCode(codeHash, code) - s.parallel.dirtiedStateObjectsInSlot[addr] = newStateObject - s.parallel.codeChangesInSlot[addr] = struct{}{} - return - } - codePre := s.GetCode(addr) // code fixup - codeHashPre := crypto.Keccak256Hash(codePre) - stateObject.setCode(codeHashPre, codePre) - } - stateObject.SetCode(codeHash, code) - if s.parallel.isSlotDB { - s.parallel.codeChangesInSlot[addr] = struct{}{} +var journalPool = sync.Pool{ + New: func() interface{} { + return &journal{ + dirties: make(map[common.Address]int, defaultNumOfSlots), + entries: make([]journalEntry, 0, defaultNumOfSlots), } - } + }, } -func (s *StateDB) SetState(addr common.Address, key, value common.Hash) { - stateObject := s.GetOrNewStateObject(addr) // attention: if StateObject's lightCopy, its storage is only a part of the full storage, - if stateObject != nil { - if s.parallel.isSlotDB { - if s.parallel.baseTxIndex+1 == s.txIndex { - // we check if state is unchanged - // only when current transaction is the next transaction to be committed - // fixme: there is a bug, block: 14,962,284, - // stateObject is in dirty (light copy), but the key is in mainStateDB - // stateObject dirty -> committed, will skip mainStateDB dirty - if s.GetState(addr, key) == value { - log.Debug("Skip set same state", "baseTxIndex", s.parallel.baseTxIndex, - "txIndex", s.txIndex, "addr", addr, - "key", key, "value", value) - return - } - } +var stateKeysPool = sync.Pool{ + New: func() interface{} { return make(map[common.Address]StateKeys, defaultNumOfSlots) }, +} - if s.parallel.kvChangesInSlot[addr] == nil { - s.parallel.kvChangesInSlot[addr] = make(StateKeys) // make(Storage, defaultNumOfSlots) - } - - if _, ok := s.parallel.dirtiedStateObjectsInSlot[addr]; !ok { - newStateObject := stateObject.lightCopy(s) - newStateObject.SetState(s.db, key, value) - s.parallel.dirtiedStateObjectsInSlot[addr] = newStateObject - return - } - // do State Update - } - stateObject.SetState(s.db, key, value) - } +var stateObjectsPool = sync.Pool{ + New: func() interface{} { return make(map[common.Address]*StateObject, defaultNumOfSlots) }, } -// SetStorage replaces the entire storage for the specified account with given -// storage. This function should only be used for debugging. -func (s *StateDB) SetStorage(addr common.Address, storage map[common.Hash]common.Hash) { - stateObject := s.GetOrNewStateObject(addr) // fixme: parallel mode? - if stateObject != nil { - stateObject.SetStorage(storage) - } +var balancePool = sync.Pool{ + New: func() interface{} { return make(map[common.Address]*big.Int, defaultNumOfSlots) }, } -// Suicide marks the given account as suicided. -// This clears the account balance. -// -// The account's state object is still available until the state is committed, -// getStateObject will return a non-nil account after Suicide. -func (s *StateDB) Suicide(addr common.Address) bool { - var stateObject *StateObject - if s.parallel.isSlotDB { - // 1.Try to get from dirty, it could be suicided inside of contract call - stateObject = s.parallel.dirtiedStateObjectsInSlot[addr] - if stateObject == nil { - // 2.Try to get from uncomfirmed, if deleted return false, since the address does not exist - if obj, ok := s.getStateObjectFromUnconfirmedDB(addr); ok { - stateObject = obj - s.parallel.addrStateReadsInSlot[addr] = !stateObject.deleted // true: exist, false: deleted - if stateObject.deleted { - log.Error("Suicide addr alreay deleted in confirmed DB", "txIndex", s.txIndex, "addr", addr) - return false - } - } - } - } - if stateObject == nil { - // 3.Try to get from main StateDB - stateObject = s.getStateObjectNoSlot(addr) - if stateObject == nil { - s.parallel.addrStateReadsInSlot[addr] = false // true: exist, false: deleted - log.Error("Suicide addr not exist", "txIndex", s.txIndex, "addr", addr) - return false - } - s.parallel.addrStateReadsInSlot[addr] = true // true: exist, false: deleted - } +var snapAccountPool = sync.Pool{ + New: func() interface{} { return make(map[common.Address][]byte, defaultNumOfSlots) }, +} - s.journal.append(suicideChange{ - account: &addr, - prev: stateObject.suicided, // todo: must be false? - prevbalance: new(big.Int).Set(s.GetBalance(addr)), - }) +var snapStoragePool = sync.Pool{ + New: func() interface{} { return make(map[common.Address]map[string][]byte, defaultNumOfSlots) }, +} - if s.parallel.isSlotDB { - if _, ok := s.parallel.dirtiedStateObjectsInSlot[addr]; !ok { - // do copy-on-write for suicide "write" - newStateObject := stateObject.lightCopy(s) - newStateObject.markSuicided() - newStateObject.data.Balance = new(big.Int) - s.parallel.dirtiedStateObjectsInSlot[addr] = newStateObject - s.parallel.addrStateChangesInSlot[addr] = false // false: the address does not exist any more, - // s.parallel.nonceChangesInSlot[addr] = struct{}{} - s.parallel.balanceChangesInSlot[addr] = struct{}{} - s.parallel.codeChangesInSlot[addr] = struct{}{} - // s.parallel.kvChangesInSlot[addr] = make(StateKeys) // all key changes are discarded - return true - } - s.parallel.addrStateChangesInSlot[addr] = false // false: the address does not exist any more, - s.parallel.balanceChangesInSlot[addr] = struct{}{} - s.parallel.codeChangesInSlot[addr] = struct{}{} - } +var snapStorageValuePool = sync.Pool{ + New: func() interface{} { return make(map[string][]byte, defaultNumOfSlots) }, +} - stateObject.markSuicided() - stateObject.data.Balance = new(big.Int) - return true +var logsPool = sync.Pool{ + New: func() interface{} { return make(map[common.Hash][]*types.Log, defaultNumOfSlots) }, } -// -// Setting, updating & deleting state object methods. -// +func (s *StateDB) SlotDBPutSyncPool() { + // for key := range s.parallel.codeReadsInSlot { + // delete(s.parallel.codeReadsInSlot, key) + //} + //addressStructPool.Put(s.parallel.codeReadsInSlot) -// updateStateObject writes the given object to the trie. -func (s *StateDB) updateStateObject(obj *StateObject) { - // Track the amount of time wasted on updating the account from the trie - if metrics.EnabledExpensive { - defer func(start time.Time) { s.AccountUpdates += time.Since(start) }(time.Now()) - } - // Encode the account and update the account trie - addr := obj.Address() - data := obj.encodeData - var err error - if data == nil { - data, err = rlp.EncodeToBytes(obj) - if err != nil { - panic(fmt.Errorf("can't encode object at %x: %v", addr[:], err)) - } - } - if err = s.trie.TryUpdate(addr[:], data); err != nil { - s.setError(fmt.Errorf("updateStateObject (%x) error: %v", addr[:], err)) + for key := range s.parallel.codeChangesInSlot { + delete(s.parallel.codeChangesInSlot, key) } -} + addressStructPool.Put(s.parallel.codeChangesInSlot) -// deleteStateObject removes the given object from the state trie. -func (s *StateDB) deleteStateObject(obj *StateObject) { - // Track the amount of time wasted on deleting the account from the trie - if metrics.EnabledExpensive { - defer func(start time.Time) { s.AccountUpdates += time.Since(start) }(time.Now()) - } - // Delete the account from the trie - addr := obj.Address() - if err := s.trie.TryDelete(addr[:]); err != nil { - s.setError(fmt.Errorf("deleteStateObject (%x) error: %v", addr[:], err)) + for key := range s.parallel.balanceChangesInSlot { + delete(s.parallel.balanceChangesInSlot, key) } -} + addressStructPool.Put(s.parallel.balanceChangesInSlot) -// getStateObject retrieves a state object given by the address, returning nil if -// the object is not found or was deleted in this execution context. If you need -// to differentiate between non-existent/just-deleted, use getDeletedStateObject. -// fixme: avoid getStateObjectNoSlot, may be we define a new struct SlotDB which inherit StateDB -func (s *StateDB) getStateObjectNoSlot(addr common.Address) *StateObject { - if obj := s.getDeletedStateObject(addr); obj != nil && !obj.deleted { - return obj + for key := range s.parallel.balanceReadsInSlot { + delete(s.parallel.balanceReadsInSlot, key) } - return nil -} + balancePool.Put(s.parallel.balanceReadsInSlot) -// for parallel execution mode, try to get dirty StateObject in slot first. -func (s *StateDB) getStateObject(addr common.Address) *StateObject { - if s.parallel.isSlotDB { - if obj, ok := s.parallel.dirtiedStateObjectsInSlot[addr]; ok { - return obj - } + // for key := range s.parallel.addrStateReadsInSlot { + // delete(s.parallel.addrStateReadsInSlot, key) + // } + // addressStructPool.Put(s.parallel.addrStateReadsInSlot) + + for key := range s.parallel.nonceChangesInSlot { + delete(s.parallel.nonceChangesInSlot, key) } + addressStructPool.Put(s.parallel.nonceChangesInSlot) - if obj := s.getDeletedStateObject(addr); obj != nil && !obj.deleted { - return obj + for key := range s.stateObjectsPending { + delete(s.stateObjectsPending, key) } - return nil -} + addressStructPool.Put(s.stateObjectsPending) -// getDeletedStateObject is similar to getStateObject, but instead of returning -// nil for a deleted state object, it returns the actual object with the deleted -// flag set. This is needed by the state journal to revert to the correct s- -// destructed object instead of wiping all knowledge about the state object. -func (s *StateDB) getDeletedStateObject(addr common.Address) *StateObject { - // Prefer live objects if any is available - if obj, _ := s.getStateObjectFromStateObjects(addr); obj != nil { - return obj + for key := range s.stateObjectsDirty { + delete(s.stateObjectsDirty, key) } - // If no live objects are available, attempt to use snapshots - var ( - data *Account - err error - ) - if s.snap != nil { - if metrics.EnabledExpensive { - defer func(start time.Time) { s.SnapshotAccountReads += time.Since(start) }(time.Now()) - } - var acc *snapshot.Account - if acc, err = s.snap.Account(crypto.HashData(s.hasher, addr.Bytes())); err == nil { - if acc == nil { - return nil - } - data = &Account{ - Nonce: acc.Nonce, - Balance: acc.Balance, - CodeHash: acc.CodeHash, - Root: common.BytesToHash(acc.Root), - } - if len(data.CodeHash) == 0 { - data.CodeHash = emptyCodeHash - } - if data.Root == (common.Hash{}) { - data.Root = emptyRoot - } - } + addressStructPool.Put(s.stateObjectsDirty) + + for key := range s.journal.dirties { + delete(s.journal.dirties, key) } - // If snapshot unavailable or reading from it failed, load from the database - if s.snap == nil || err != nil { - if s.trie == nil { - tr, err := s.db.OpenTrie(s.originalRoot) - if err != nil { - s.setError(fmt.Errorf("failed to open trie tree")) - return nil - } - s.trie = tr - } - if metrics.EnabledExpensive { - defer func(start time.Time) { s.AccountReads += time.Since(start) }(time.Now()) - } - enc, err := s.trie.TryGet(addr.Bytes()) - if err != nil { - s.setError(fmt.Errorf("getDeleteStateObject (%x) error: %v", addr.Bytes(), err)) - return nil - } - if len(enc) == 0 { - return nil - } - data = new(Account) - if err := rlp.DecodeBytes(enc, data); err != nil { - log.Error("Failed to decode state object", "addr", addr, "err", err) - return nil - } + s.journal.entries = s.journal.entries[:0] + journalPool.Put(s.journal) + + for key := range s.parallel.kvChangesInSlot { + delete(s.parallel.kvChangesInSlot, key) } - // Insert into the live set - // if obj, ok := s.loadStateObj(addr); ok { - // fixme: concurrent not safe, merge could update it... - // return obj - //} - obj := newObject(s, s.isParallel, addr, *data) - s.SetStateObject(obj) - return obj -} + stateKeysPool.Put(s.parallel.kvChangesInSlot) -func (s *StateDB) SetStateObject(object *StateObject) { - s.storeStateObj(object.Address(), object) -} + // for key := range s.parallel.kvReadsInSlot { + // delete(s.parallel.kvReadsInSlot, key) + // } + // stateKeysPool.Put(s.parallel.kvReadsInSlot) -// GetOrNewStateObject retrieves a state object or create a new state object if nil. -// dirtyInSlot -> Unconfirmed DB -> main DB -> snapshot, no? create one -func (s *StateDB) GetOrNewStateObject(addr common.Address) *StateObject { - var stateObject *StateObject = nil - exist := true - if s.parallel.isSlotDB { - if stateObject, ok := s.parallel.dirtiedStateObjectsInSlot[addr]; ok { - return stateObject - } - stateObject, _ = s.getStateObjectFromUnconfirmedDB(addr) + for key := range s.parallel.dirtiedStateObjectsInSlot { + delete(s.parallel.dirtiedStateObjectsInSlot, key) } + stateObjectsPool.Put(s.parallel.dirtiedStateObjectsInSlot) - if stateObject == nil { - stateObject = s.getStateObjectNoSlot(addr) - } - if stateObject == nil || stateObject.deleted || stateObject.suicided { - stateObject = s.createObject(addr) - exist = false + for key := range s.snapDestructs { + delete(s.snapDestructs, key) } + addressStructPool.Put(s.snapDestructs) - if s.parallel.isSlotDB { - s.parallel.addrStateReadsInSlot[addr] = exist // true: exist, false: not exist + for key := range s.snapAccounts { + delete(s.snapAccounts, key) } - return stateObject -} + snapAccountPool.Put(s.snapAccounts) -// createObject creates a new state object. If there is an existing account with -// the given address, it is overwritten and returned as the second return value. + for key, storage := range s.snapStorage { + for key := range storage { + delete(storage, key) + } + snapStorageValuePool.Put(storage) + delete(s.snapStorage, key) + } + snapStoragePool.Put(s.snapStorage) -// prev is used for CreateAccount to get its balance -// Parallel mode: -// if prev in dirty: revert is ok -// if prev in unconfirmed DB: addr state read record, revert should not put it back -// if prev in main DB: addr state read record, revert should not put it back -// if pre no exist: addr state read record, - -// `prev` is used to handle revert, to recover with the `prev` object -// In Parallel mode, we only need to recover to `prev` in SlotDB, -// a.if it is not in SlotDB, `revert` will remove it from the SlotDB -// b.if it is exist in SlotDB, `revert` will recover to the `prev` in SlotDB -// c.as `snapDestructs` it is the same -func (s *StateDB) createObject(addr common.Address) (newobj *StateObject) { - var prev *StateObject = nil - if s.parallel.isSlotDB { - // do not get from unconfirmed DB, since it will has problem on revert - prev = s.parallel.dirtiedStateObjectsInSlot[addr] - } else { - prev = s.getDeletedStateObject(addr) // Note, prev might have been deleted, we need that! - } - - var prevdestruct bool - - if s.snap != nil && prev != nil { - _, prevdestruct = s.snapDestructs[prev.address] // fixme, record the snapshot read for create Account - if s.parallel.isSlotDB { - s.parallel.addrSnapDestructsReadsInSlot[addr] = prevdestruct - } - if !prevdestruct { - // To destroy the previous trie node first and update the trie tree - // with the new object on block commit. - s.snapDestructs[prev.address] = struct{}{} - - } - } - newobj = newObject(s, s.isParallel, addr, Account{}) - newobj.setNonce(0) // sets the object to dirty - if prev == nil { - s.journal.append(createObjectChange{account: &addr}) - } else { - s.journal.append(resetObjectChange{prev: prev, prevdestruct: prevdestruct}) - } - - if s.parallel.isSlotDB { - // s.parallel.dirtiedStateObjectsInSlot[addr] = newobj // would change the bahavior of AddBalance... - s.parallel.addrStateChangesInSlot[addr] = true // the object sis created - s.parallel.nonceChangesInSlot[addr] = struct{}{} - s.parallel.balanceChangesInSlot[addr] = struct{}{} - s.parallel.codeChangesInSlot[addr] = struct{}{} - // notice: all the KVs are cleared if any - s.parallel.kvChangesInSlot[addr] = make(StateKeys) - } else { - s.SetStateObject(newobj) - } - return newobj -} - -// CreateAccount explicitly creates a state object. If a state object with the address -// already exists the balance is carried over to the new account. -// -// CreateAccount is called during the EVM CREATE operation. The situation might arise that -// a contract does the following: -// -// 1. sends funds to sha(account ++ (nonce + 1)) -// 2. tx_create(sha(account ++ nonce)) (note that this gets the address of 1) -// -// Carrying over the balance ensures that Ether doesn't disappear. -func (s *StateDB) CreateAccount(addr common.Address) { - // no matter it is got from dirty, unconfirmed or main DB - // if addr not exist, preBalance will be common.Big0, it is same as new(big.Int) which - // is the value newObject(), - preBalance := s.GetBalance(addr) // parallel balance read will be recorded inside of GetBalance - newObj := s.createObject(addr) - newObj.setBalance(new(big.Int).Set(preBalance)) // new big.Int for newObj -} - -func (s *StateDB) ForEachStorage(addr common.Address, cb func(key, value common.Hash) bool) error { - so := s.getStateObject(addr) - if so == nil { - return nil - } - it := trie.NewIterator(so.getTrie(s.db).NodeIterator(nil)) - - for it.Next() { - key := common.BytesToHash(s.trie.GetKey(it.Key)) - if value, dirty := so.dirtyStorage.GetValue(key); dirty { - if !cb(key, value) { - return nil - } - continue - } - - if len(it.Value) > 0 { - _, content, _, err := rlp.Split(it.Value) - if err != nil { - return err - } - if !cb(key, common.BytesToHash(content)) { - return nil - } - } + for key := range s.logs { + delete(s.logs, key) } - return nil + logsPool.Put(s.logs) } +*/ +// CopyForSlot copy all the basic fields, initialize the memory ones +func (s *StateDB) CopyForSlot() *ParallelStateDB { + parallel := ParallelState{ + // use base(dispatcher) slot db's stateObjects. + // It is a SyncMap, only readable to slot, not writable + stateObjects: s.parallel.stateObjects, + unconfirmedDBInShot: make(map[int]*ParallelStateDB, 100), -// Copy creates a deep, independent copy of the state. -// Snapshots of the copied state cannot be applied to the copy. -func (s *StateDB) Copy() *StateDB { - // Copy all the basic fields, initialize the memory ones - state := &StateDB{ - db: s.db, - trie: s.db.CopyTrie(s.trie), - stateObjects: make(map[common.Address]*StateObject, len(s.journal.dirties)), - stateObjectsPending: make(map[common.Address]struct{}, len(s.stateObjectsPending)), - stateObjectsDirty: make(map[common.Address]struct{}, len(s.journal.dirties)), - storagePool: s.storagePool, - refund: s.refund, - logs: make(map[common.Hash][]*types.Log, len(s.logs)), - logSize: s.logSize, - preimages: make(map[common.Hash][]byte, len(s.preimages)), - journal: newJournal(), - hasher: crypto.NewKeccakState(), - parallel: ParallelState{}, - } - // Copy the dirty states, logs, and preimages - for addr := range s.journal.dirties { - // As documented [here](https://github.com/ethereum/go-ethereum/pull/16485#issuecomment-380438527), - // and in the Finalise-method, there is a case where an object is in the journal but not - // in the stateObjects: OOG after touch on ripeMD prior to Byzantium. Thus, we need to check for - // nil - if object, exist := s.getStateObjectFromStateObjects(addr); exist { - // Even though the original object is dirty, we are not copying the journal, - // so we need to make sure that anyside effect the journal would have caused - // during a commit (or similar op) is already applied to the copy. - state.storeStateObj(addr, object.deepCopy(state)) + codeReadsInSlot: make(map[common.Address][]byte, 10), // addressStructPool.Get().(map[common.Address]struct{}), + codeHashReadsInSlot: make(map[common.Address]common.Hash), + codeChangesInSlot: make(map[common.Address]struct{}), // addressStructPool.Get().(map[common.Address]struct{}), + kvChangesInSlot: make(map[common.Address]StateKeys), // stateKeysPool.Get().(map[common.Address]StateKeys), + kvReadsInSlot: make(map[common.Address]Storage, 100), // stateKeysPool.Get().(map[common.Address]Storage), + balanceChangesInSlot: make(map[common.Address]struct{}), // addressStructPool.Get().(map[common.Address]struct{}), + balanceReadsInSlot: make(map[common.Address]*big.Int), // addressStructPool.Get().(map[common.Address]struct{}), + addrStateReadsInSlot: make(map[common.Address]bool), // addressStructPool.Get().(map[common.Address]struct{}), + addrStateChangesInSlot: make(map[common.Address]bool), // addressStructPool.Get().(map[common.Address]struct{}), + nonceChangesInSlot: make(map[common.Address]struct{}), // addressStructPool.Get().(map[common.Address]struct{}), + nonceReadsInSlot: make(map[common.Address]uint64), + addrSnapDestructsReadsInSlot: make(map[common.Address]bool), - state.stateObjectsDirty[addr] = struct{}{} // Mark the copy dirty to force internal (code/state) commits - state.stateObjectsPending[addr] = struct{}{} // Mark the copy pending to force external (account) commits - } - } - // Above, we don't copy the actual journal. This means that if the copy is copied, the - // loop above will be a no-op, since the copy's journal is empty. - // Thus, here we iterate over stateObjects, to enable copies of copies - for addr := range s.stateObjectsPending { - if _, exist := state.getStateObjectFromStateObjects(addr); !exist { - object, _ := s.getStateObjectFromStateObjects(addr) - state.storeStateObj(addr, object.deepCopy(state)) - } - state.stateObjectsPending[addr] = struct{}{} - } - for addr := range s.stateObjectsDirty { - if _, exist := state.getStateObjectFromStateObjects(addr); !exist { - object, _ := s.getStateObjectFromStateObjects(addr) - state.storeStateObj(addr, object.deepCopy(state)) - } - state.stateObjectsDirty[addr] = struct{}{} + isSlotDB: true, + dirtiedStateObjectsInSlot: make(map[common.Address]*StateObject), // stateObjectsPool.Get().(map[common.Address]*StateObject), } - for hash, logs := range s.logs { - cpy := make([]*types.Log, len(logs)) - for i, l := range logs { - cpy[i] = new(types.Log) - *cpy[i] = *l - } - state.logs[hash] = cpy + state := &ParallelStateDB{ + StateDB{ + db: s.db, + trie: s.db.CopyTrie(s.trie), + stateObjects: make(map[common.Address]*StateObject), // replaced by parallel.stateObjects in parallel mode + stateObjectsPending: make(map[common.Address]struct{}), // addressStructPool.Get().(map[common.Address]struct{}), + stateObjectsDirty: make(map[common.Address]struct{}), //addressStructPool.Get().(map[common.Address]struct{}), + refund: s.refund, // should be 0 + logs: make(map[common.Hash][]*types.Log, defaultNumOfSlots), // logsPool.Get().(map[common.Hash][]*types.Log), + logSize: 0, + preimages: make(map[common.Hash][]byte, len(s.preimages)), + journal: newJournal(), // journalPool.Get().(*journal), + hasher: crypto.NewKeccakState(), + isParallel: true, + parallel: parallel, + }, } for hash, preimage := range s.preimages { state.preimages[hash] = preimage } - // Do we need to copy the access list? In practice: No. At the start of a - // transaction, the access list is empty. In practice, we only ever copy state - // _between_ transactions/blocks, never in the middle of a transaction. - // However, it doesn't cost us much to copy an empty list, so we do it anyway - // to not blow up if we ever decide copy it in the middle of a transaction - if s.accessList != nil { - state.accessList = s.accessList.Copy() - } - // If there's a prefetcher running, make an inactive copy of it that can - // only access data but does not actively preload (since the user will not - // know that they need to explicitly terminate an active copy). - if s.prefetcher != nil { - state.prefetcher = s.prefetcher.copy() - } if s.snaps != nil { // In order for the miner to be able to use and make additions // to the snapshot tree, we need to copy that aswell. @@ -2138,271 +1219,71 @@ func (s *StateDB) Copy() *StateDB { state.snaps = s.snaps state.snap = s.snap // deep copy needed - state.snapDestructs = make(map[common.Address]struct{}) + state.snapDestructs = make(map[common.Address]struct{}) //addressStructPool.Get().(map[common.Address]struct{}) + s.snapParallelLock.RLock() for k, v := range s.snapDestructs { state.snapDestructs[k] = v } - state.snapAccounts = make(map[common.Address][]byte) + s.snapParallelLock.RUnlock() + // + state.snapAccounts = make(map[common.Address][]byte) // snapAccountPool.Get().(map[common.Address][]byte) for k, v := range s.snapAccounts { state.snapAccounts[k] = v } - state.snapStorage = make(map[common.Address]map[string][]byte) + state.snapStorage = make(map[common.Address]map[string][]byte) // snapStoragePool.Get().(map[common.Address]map[string][]byte) for k, v := range s.snapStorage { - temp := make(map[string][]byte) + temp := make(map[string][]byte) // snapStorageValuePool.Get().(map[string][]byte) for kk, vv := range v { temp[kk] = vv } state.snapStorage[k] = temp } + // trie prefetch should be done by dispacther on StateObject Merge, + // disable it in parallel slot + // state.prefetcher = s.prefetcher } - return state -} - -/* -var addressStructPool = sync.Pool{ - New: func() interface{} { return make(map[common.Address]struct{}, defaultNumOfSlots) }, -} -var journalPool = sync.Pool{ - New: func() interface{} { - return &journal{ - dirties: make(map[common.Address]int, defaultNumOfSlots), - entries: make([]journalEntry, 0, defaultNumOfSlots), - } - }, + return state } -var stateKeysPool = sync.Pool{ - New: func() interface{} { return make(map[common.Address]StateKeys, defaultNumOfSlots) }, +// Snapshot returns an identifier for the current revision of the state. +func (s *StateDB) Snapshot() int { + id := s.nextRevisionId + s.nextRevisionId++ + s.validRevisions = append(s.validRevisions, revision{id, s.journal.length()}) + return id } -var stateObjectsPool = sync.Pool{ - New: func() interface{} { return make(map[common.Address]*StateObject, defaultNumOfSlots) }, -} +// RevertToSnapshot reverts all state changes made since the given revision. +func (s *StateDB) RevertToSnapshot(revid int) { + // Find the snapshot in the stack of valid snapshots. + idx := sort.Search(len(s.validRevisions), func(i int) bool { + return s.validRevisions[i].id >= revid + }) + if idx == len(s.validRevisions) || s.validRevisions[idx].id != revid { + panic(fmt.Errorf("revision id %v cannot be reverted", revid)) + } + snapshot := s.validRevisions[idx].journalIndex -var balancePool = sync.Pool{ - New: func() interface{} { return make(map[common.Address]*big.Int, defaultNumOfSlots) }, + // Replay the journal to undo changes and remove invalidated snapshots + s.journal.revert(s, snapshot) + s.validRevisions = s.validRevisions[:idx] } -var snapAccountPool = sync.Pool{ - New: func() interface{} { return make(map[common.Address][]byte, defaultNumOfSlots) }, +// GetRefund returns the current value of the refund counter. +func (s *StateDB) GetRefund() uint64 { + return s.refund } -var snapStoragePool = sync.Pool{ - New: func() interface{} { return make(map[common.Address]map[string][]byte, defaultNumOfSlots) }, -} - -var snapStorageValuePool = sync.Pool{ - New: func() interface{} { return make(map[string][]byte, defaultNumOfSlots) }, -} - -var logsPool = sync.Pool{ - New: func() interface{} { return make(map[common.Hash][]*types.Log, defaultNumOfSlots) }, -} - -func (s *StateDB) SlotDBPutSyncPool() { - // for key := range s.parallel.codeReadsInSlot { - // delete(s.parallel.codeReadsInSlot, key) - //} - //addressStructPool.Put(s.parallel.codeReadsInSlot) - - for key := range s.parallel.codeChangesInSlot { - delete(s.parallel.codeChangesInSlot, key) - } - addressStructPool.Put(s.parallel.codeChangesInSlot) - - for key := range s.parallel.balanceChangesInSlot { - delete(s.parallel.balanceChangesInSlot, key) - } - addressStructPool.Put(s.parallel.balanceChangesInSlot) - - for key := range s.parallel.balanceReadsInSlot { - delete(s.parallel.balanceReadsInSlot, key) - } - balancePool.Put(s.parallel.balanceReadsInSlot) - - // for key := range s.parallel.addrStateReadsInSlot { - // delete(s.parallel.addrStateReadsInSlot, key) - // } - // addressStructPool.Put(s.parallel.addrStateReadsInSlot) - - for key := range s.parallel.nonceChangesInSlot { - delete(s.parallel.nonceChangesInSlot, key) - } - addressStructPool.Put(s.parallel.nonceChangesInSlot) - - for key := range s.stateObjectsPending { - delete(s.stateObjectsPending, key) - } - addressStructPool.Put(s.stateObjectsPending) - - for key := range s.stateObjectsDirty { - delete(s.stateObjectsDirty, key) - } - addressStructPool.Put(s.stateObjectsDirty) - - for key := range s.journal.dirties { - delete(s.journal.dirties, key) - } - s.journal.entries = s.journal.entries[:0] - journalPool.Put(s.journal) - - for key := range s.parallel.kvChangesInSlot { - delete(s.parallel.kvChangesInSlot, key) - } - stateKeysPool.Put(s.parallel.kvChangesInSlot) - - // for key := range s.parallel.kvReadsInSlot { - // delete(s.parallel.kvReadsInSlot, key) - // } - // stateKeysPool.Put(s.parallel.kvReadsInSlot) - - for key := range s.parallel.dirtiedStateObjectsInSlot { - delete(s.parallel.dirtiedStateObjectsInSlot, key) - } - stateObjectsPool.Put(s.parallel.dirtiedStateObjectsInSlot) - - for key := range s.snapDestructs { - delete(s.snapDestructs, key) - } - addressStructPool.Put(s.snapDestructs) - - for key := range s.snapAccounts { - delete(s.snapAccounts, key) - } - snapAccountPool.Put(s.snapAccounts) - - for key, storage := range s.snapStorage { - for key := range storage { - delete(storage, key) - } - snapStorageValuePool.Put(storage) - delete(s.snapStorage, key) - } - snapStoragePool.Put(s.snapStorage) - - for key := range s.logs { - delete(s.logs, key) - } - logsPool.Put(s.logs) -} -*/ -// CopyForSlot copy all the basic fields, initialize the memory ones -func (s *StateDB) CopyForSlot() *StateDB { - parallel := ParallelState{ - // use base(dispatcher) slot db's stateObjects. - // It is a SyncMap, only readable to slot, not writable - stateObjects: s.parallel.stateObjects, - unconfirmedDBInShot: make(map[int]*StateDB, 100), - - codeReadsInSlot: make(map[common.Address][]byte, 10), // addressStructPool.Get().(map[common.Address]struct{}), - codeHashReadsInSlot: make(map[common.Address]common.Hash), - codeChangesInSlot: make(map[common.Address]struct{}), // addressStructPool.Get().(map[common.Address]struct{}), - kvChangesInSlot: make(map[common.Address]StateKeys), // stateKeysPool.Get().(map[common.Address]StateKeys), - kvReadsInSlot: make(map[common.Address]Storage, 100), // stateKeysPool.Get().(map[common.Address]Storage), - balanceChangesInSlot: make(map[common.Address]struct{}), // addressStructPool.Get().(map[common.Address]struct{}), - balanceReadsInSlot: make(map[common.Address]*big.Int), // addressStructPool.Get().(map[common.Address]struct{}), - addrStateReadsInSlot: make(map[common.Address]bool), // addressStructPool.Get().(map[common.Address]struct{}), - addrStateChangesInSlot: make(map[common.Address]bool), // addressStructPool.Get().(map[common.Address]struct{}), - nonceChangesInSlot: make(map[common.Address]struct{}), // addressStructPool.Get().(map[common.Address]struct{}), - nonceReadsInSlot: make(map[common.Address]uint64), - addrSnapDestructsReadsInSlot: make(map[common.Address]bool), - - isSlotDB: true, - dirtiedStateObjectsInSlot: make(map[common.Address]*StateObject), // stateObjectsPool.Get().(map[common.Address]*StateObject), - } - state := &StateDB{ - db: s.db, - trie: s.db.CopyTrie(s.trie), - stateObjects: make(map[common.Address]*StateObject), // replaced by parallel.stateObjects in parallel mode - stateObjectsPending: make(map[common.Address]struct{}), // addressStructPool.Get().(map[common.Address]struct{}), - stateObjectsDirty: make(map[common.Address]struct{}), //addressStructPool.Get().(map[common.Address]struct{}), - refund: s.refund, // should be 0 - logs: make(map[common.Hash][]*types.Log, defaultNumOfSlots), // logsPool.Get().(map[common.Hash][]*types.Log), - logSize: 0, - preimages: make(map[common.Hash][]byte, len(s.preimages)), - journal: newJournal(), // journalPool.Get().(*journal), - hasher: crypto.NewKeccakState(), - isParallel: true, - parallel: parallel, - } - - for hash, preimage := range s.preimages { - state.preimages[hash] = preimage - } - - if s.snaps != nil { - // In order for the miner to be able to use and make additions - // to the snapshot tree, we need to copy that aswell. - // Otherwise, any block mined by ourselves will cause gaps in the tree, - // and force the miner to operate trie-backed only - state.snaps = s.snaps - state.snap = s.snap - // deep copy needed - state.snapDestructs = make(map[common.Address]struct{}) //addressStructPool.Get().(map[common.Address]struct{}) - s.snapParallelLock.RLock() - for k, v := range s.snapDestructs { - state.snapDestructs[k] = v - } - s.snapParallelLock.RUnlock() - // - state.snapAccounts = make(map[common.Address][]byte) // snapAccountPool.Get().(map[common.Address][]byte) - for k, v := range s.snapAccounts { - state.snapAccounts[k] = v - } - state.snapStorage = make(map[common.Address]map[string][]byte) // snapStoragePool.Get().(map[common.Address]map[string][]byte) - for k, v := range s.snapStorage { - temp := make(map[string][]byte) // snapStorageValuePool.Get().(map[string][]byte) - for kk, vv := range v { - temp[kk] = vv - } - state.snapStorage[k] = temp - } - // trie prefetch should be done by dispacther on StateObject Merge, - // disable it in parallel slot - // state.prefetcher = s.prefetcher - } - return state -} - -// Snapshot returns an identifier for the current revision of the state. -func (s *StateDB) Snapshot() int { - id := s.nextRevisionId - s.nextRevisionId++ - s.validRevisions = append(s.validRevisions, revision{id, s.journal.length()}) - return id -} - -// RevertToSnapshot reverts all state changes made since the given revision. -func (s *StateDB) RevertToSnapshot(revid int) { - // Find the snapshot in the stack of valid snapshots. - idx := sort.Search(len(s.validRevisions), func(i int) bool { - return s.validRevisions[i].id >= revid - }) - if idx == len(s.validRevisions) || s.validRevisions[idx].id != revid { - panic(fmt.Errorf("revision id %v cannot be reverted", revid)) - } - snapshot := s.validRevisions[idx].journalIndex - - // Replay the journal to undo changes and remove invalidated snapshots - s.journal.revert(s, snapshot) - s.validRevisions = s.validRevisions[:idx] -} - -// GetRefund returns the current value of the refund counter. -func (s *StateDB) GetRefund() uint64 { - return s.refund -} - -// GetRefund returns the current value of the refund counter. -func (s *StateDB) WaitPipeVerification() error { - // We need wait for the parent trie to commit - if s.snap != nil { - if valid := s.snap.WaitAndGetVerifyRes(); !valid { - return fmt.Errorf("verification on parent snap failed") - } - } - return nil +// GetRefund returns the current value of the refund counter. +func (s *StateDB) WaitPipeVerification() error { + // We need wait for the parent trie to commit + if s.snap != nil { + if valid := s.snap.WaitAndGetVerifyRes(); !valid { + return fmt.Errorf("verification on parent snap failed") + } + } + return nil } // Finalise finalises the state by removing the s destructed objects and clears @@ -2955,149 +1836,1486 @@ func (s *StateDB) Commit(failPostCommitFunc func(), postCommitFuncs ...func() er return common.Hash{}, nil, r } } - root := s.stateRoot - if s.pipeCommit { - root = s.expectedRoot - } - - return root, diffLayer, nil + root := s.stateRoot + if s.pipeCommit { + root = s.expectedRoot + } + + return root, diffLayer, nil +} + +func (s *StateDB) DiffLayerToSnap(diffLayer *types.DiffLayer) (map[common.Address]struct{}, map[common.Address][]byte, map[common.Address]map[string][]byte, error) { + snapDestructs := make(map[common.Address]struct{}) + snapAccounts := make(map[common.Address][]byte) + snapStorage := make(map[common.Address]map[string][]byte) + + for _, des := range diffLayer.Destructs { + snapDestructs[des] = struct{}{} + } + for _, account := range diffLayer.Accounts { + snapAccounts[account.Account] = account.Blob + } + for _, storage := range diffLayer.Storages { + // should never happen + if len(storage.Keys) != len(storage.Vals) { + return nil, nil, nil, errors.New("invalid diffLayer: length of keys and values mismatch") + } + snapStorage[storage.Account] = make(map[string][]byte, len(storage.Keys)) + n := len(storage.Keys) + for i := 0; i < n; i++ { + snapStorage[storage.Account][storage.Keys[i]] = storage.Vals[i] + } + } + return snapDestructs, snapAccounts, snapStorage, nil +} + +func (s *StateDB) SnapToDiffLayer() ([]common.Address, []types.DiffAccount, []types.DiffStorage) { + destructs := make([]common.Address, 0, len(s.snapDestructs)) + for account := range s.snapDestructs { + destructs = append(destructs, account) + } + accounts := make([]types.DiffAccount, 0, len(s.snapAccounts)) + for accountHash, account := range s.snapAccounts { + accounts = append(accounts, types.DiffAccount{ + Account: accountHash, + Blob: account, + }) + } + storages := make([]types.DiffStorage, 0, len(s.snapStorage)) + for accountHash, storage := range s.snapStorage { + keys := make([]string, 0, len(storage)) + values := make([][]byte, 0, len(storage)) + for k, v := range storage { + keys = append(keys, k) + values = append(values, v) + } + storages = append(storages, types.DiffStorage{ + Account: accountHash, + Keys: keys, + Vals: values, + }) + } + return destructs, accounts, storages +} + +// PrepareAccessList handles the preparatory steps for executing a state transition with +// regards to both EIP-2929 and EIP-2930: +// +// - Add sender to access list (2929) +// - Add destination to access list (2929) +// - Add precompiles to access list (2929) +// - Add the contents of the optional tx access list (2930) +// +// This method should only be called if Yolov3/Berlin/2929+2930 is applicable at the current number. +func (s *StateDB) PrepareAccessList(sender common.Address, dst *common.Address, precompiles []common.Address, list types.AccessList) { + s.AddAddressToAccessList(sender) + if dst != nil { + s.AddAddressToAccessList(*dst) + // If it's a create-tx, the destination will be added inside evm.create + } + for _, addr := range precompiles { + s.AddAddressToAccessList(addr) + } + for _, el := range list { + s.AddAddressToAccessList(el.Address) + for _, key := range el.StorageKeys { + s.AddSlotToAccessList(el.Address, key) + } + } +} + +// AddAddressToAccessList adds the given address to the access list +func (s *StateDB) AddAddressToAccessList(addr common.Address) { + if s.accessList == nil { + s.accessList = newAccessList() + } + if s.accessList.AddAddress(addr) { + s.journal.append(accessListAddAccountChange{&addr}) + } +} + +// AddSlotToAccessList adds the given (address, slot)-tuple to the access list +func (s *StateDB) AddSlotToAccessList(addr common.Address, slot common.Hash) { + if s.accessList == nil { + s.accessList = newAccessList() + } + addrMod, slotMod := s.accessList.AddSlot(addr, slot) + if addrMod { + // In practice, this should not happen, since there is no way to enter the + // scope of 'address' without having the 'address' become already added + // to the access list (via call-variant, create, etc). + // Better safe than sorry, though + s.journal.append(accessListAddAccountChange{&addr}) + } + if slotMod { + s.journal.append(accessListAddSlotChange{ + address: &addr, + slot: &slot, + }) + } +} + +// AddressInAccessList returns true if the given address is in the access list. +func (s *StateDB) AddressInAccessList(addr common.Address) bool { + if s.accessList == nil { + return false + } + return s.accessList.ContainsAddress(addr) +} + +// SlotInAccessList returns true if the given (address, slot)-tuple is in the access list. +func (s *StateDB) SlotInAccessList(addr common.Address, slot common.Hash) (addressPresent bool, slotPresent bool) { + if s.accessList == nil { + return false, false + } + return s.accessList.Contains(addr, slot) +} + +func (s *StateDB) GetDirtyAccounts() []common.Address { + accounts := make([]common.Address, 0, len(s.stateObjectsDirty)) + for account := range s.stateObjectsDirty { + accounts = append(accounts, account) + } + return accounts +} + +func (s *StateDB) GetStorage(address common.Address) *sync.Map { + return s.storagePool.getStorage(address) +} + +// PrepareForParallel prepares for state db to be used in parallel execution mode. +func (s *StateDB) PrepareForParallel() { + s.isParallel = true + s.parallel.stateObjects = &StateObjectSyncMap{} +} + +// MergeSlotDB is for Parallel execution mode, when the transaction has been +// finalized(dirty -> pending) on execution slot, the execution results should be +// merged back to the main StateDB. +// And it will return and keep the slot's change list for later conflict detect. +func (s *StateDB) MergeSlotDB(slotDb *ParallelStateDB, slotReceipt *types.Receipt, txIndex int) { + // receipt.Logs use unified log index within a block + // align slotDB's log index to the block stateDB's logSize + for _, l := range slotReceipt.Logs { + l.Index += s.logSize + } + s.logSize += slotDb.logSize + + // before merge, pay the gas fee first: AddBalance to consensus.SystemAddress + systemAddress := slotDb.parallel.systemAddress + if slotDb.parallel.keepSystemAddressBalance { + s.SetBalance(systemAddress, slotDb.GetBalance(systemAddress)) + } else { + s.AddBalance(systemAddress, slotDb.GetBalance(systemAddress)) + } + + // only merge dirty objects + addressesToPrefetch := make([][]byte, 0, len(slotDb.stateObjectsDirty)) + for addr := range slotDb.stateObjectsDirty { + if _, exist := s.stateObjectsDirty[addr]; !exist { + s.stateObjectsDirty[addr] = struct{}{} + } + // system address is EOA account, it should have no storage change + if addr == systemAddress { + continue + } + + // stateObjects: KV, balance, nonce... + dirtyObj, ok := slotDb.parallel.dirtiedStateObjectsInSlot[addr] + if !ok { + log.Error("parallel merge, but dirty object not exist!", "SlotIndex", slotDb.parallel.SlotIndex, "txIndex:", slotDb.txIndex, "addr", addr) + continue + } + mainObj, exist := s.loadStateObj(addr) + if !exist { // fixme: it is also state change + // addr not exist on main DB, do ownership transfer + // dirtyObj.db = s + // dirtyObj.finalise(true) // true: prefetch on dispatcher + mainObj = dirtyObj.deepCopy(s) + mainObj.finalise(true) + s.storeStateObj(addr, mainObj) + // fixme: should not delete, would cause unconfirmed DB incorrect? + // delete(slotDb.parallel.dirtiedStateObjectsInSlot, addr) // transfer ownership, fixme: shared read? + if dirtyObj.deleted { + // remove the addr from snapAccounts&snapStorage only when object is deleted. + // "deleted" is not equal to "snapDestructs", since createObject() will add an addr for + // snapDestructs to destroy previous object, while it will keep the addr in snapAccounts & snapAccounts + delete(s.snapAccounts, addr) + delete(s.snapStorage, addr) + } + } else { + // addr already in main DB, do merge: balance, KV, code, State(create, suicide) + // can not do copy or ownership transfer directly, since dirtyObj could have outdated + // data(may be updated within the conflict window) + + var newMainObj = mainObj // we don't need to copy the object since the storages are thread safe + if _, ok := slotDb.parallel.addrStateChangesInSlot[addr]; ok { + // there are 3 kinds of state change: + // 1.Suicide + // 2.Empty Delete + // 3.createObject + // a.AddBalance,SetState to an unexist or deleted(suicide, empty delete) address. + // b.CreateAccount: like DAO the fork, regenerate a account carry its balance without KV + // For these state change, do ownership transafer for efficiency: + // dirtyObj.db = s + // newMainObj = dirtyObj + newMainObj = dirtyObj.deepCopy(s) + // should not delete, would cause unconfirmed DB incorrect. + // delete(slotDb.parallel.dirtiedStateObjectsInSlot, addr) // transfer ownership, fixme: shared read? + if dirtyObj.deleted { + // remove the addr from snapAccounts&snapStorage only when object is deleted. + // "deleted" is not equal to "snapDestructs", since createObject() will add an addr for + // snapDestructs to destroy previous object, while it will keep the addr in snapAccounts & snapAccounts + delete(s.snapAccounts, addr) + delete(s.snapStorage, addr) + } + } else { + // deepCopy a temporary *StateObject for safety, since slot could read the address, + // dispatch should avoid overwrite the StateObject directly otherwise, it could + // crash for: concurrent map iteration and map write + + if _, balanced := slotDb.parallel.balanceChangesInSlot[addr]; balanced { + newMainObj.SetBalance(dirtyObj.Balance()) + } + if _, coded := slotDb.parallel.codeChangesInSlot[addr]; coded { + newMainObj.code = dirtyObj.code + newMainObj.data.CodeHash = dirtyObj.data.CodeHash + newMainObj.dirtyCode = true + } + if keys, stated := slotDb.parallel.kvChangesInSlot[addr]; stated { + newMainObj.MergeSlotObject(s.db, dirtyObj, keys) + } + if _, nonced := slotDb.parallel.nonceChangesInSlot[addr]; nonced { + // dirtyObj.Nonce() should not be less than newMainObj + newMainObj.setNonce(dirtyObj.Nonce()) + } + } + newMainObj.finalise(true) // true: prefetch on dispatcher + // update the object + s.storeStateObj(addr, newMainObj) + } + addressesToPrefetch = append(addressesToPrefetch, common.CopyBytes(addr[:])) // Copy needed for closure + } + + if s.prefetcher != nil && len(addressesToPrefetch) > 0 { + s.prefetcher.prefetch(s.originalRoot, addressesToPrefetch, emptyAddr) // prefetch for trie node of account + } + + for addr := range slotDb.stateObjectsPending { + if _, exist := s.stateObjectsPending[addr]; !exist { + s.stateObjectsPending[addr] = struct{}{} + } + } + + // slotDb.logs: logs will be kept in receipts, no need to do merge + + for hash, preimage := range slotDb.preimages { + s.preimages[hash] = preimage + } + if s.accessList != nil { + // fixme: accessList is not enabled yet, but it should use merge rather than overwrite Copy + s.accessList = slotDb.accessList.Copy() + } + + if slotDb.snaps != nil { + for k := range slotDb.snapDestructs { + // There could be a race condition for parallel transaction execution + // One transaction add balance 0 to an empty address, will delete it(delete empty is enabled). + // While another concurrent transaction could add a none-zero balance to it, make it not empty + // We fixed it by add a addr state read record for add balance 0 + s.snapParallelLock.Lock() + s.snapDestructs[k] = struct{}{} + s.snapParallelLock.Unlock() + } + + // slotDb.snapAccounts should be empty, comment out and to be deleted later + // for k, v := range slotDb.snapAccounts { + // s.snapAccounts[k] = v + // } + // slotDb.snapStorage should be empty, comment out and to be deleted later + // for k, v := range slotDb.snapStorage { + // temp := make(map[string][]byte) + // for kk, vv := range v { + // temp[kk] = vv + // } + // s.snapStorage[k] = temp + // } + } +} + +type ParallelStateDB struct { + StateDB +} + +// NewSlotDB creates a new State DB based on the provided StateDB. +// With parallel, each execution slot would have its own StateDB. +func NewSlotDB(db *StateDB, systemAddr common.Address, txIndex int, baseTxIndex int, keepSystem bool, + unconfirmedDBs *sync.Map /*map[int]*ParallelStateDB*/) *ParallelStateDB { + slotDB := db.CopyForSlot() + slotDB.txIndex = txIndex + slotDB.originalRoot = db.originalRoot + slotDB.parallel.baseStateDB = db + slotDB.parallel.baseTxIndex = baseTxIndex + slotDB.parallel.systemAddress = systemAddr + slotDB.parallel.systemAddressOpsCount = 0 + slotDB.parallel.keepSystemAddressBalance = keepSystem + slotDB.storagePool = NewStoragePool() + slotDB.EnableWriteOnSharedStorage() + for index := baseTxIndex + 1; index < slotDB.txIndex; index++ { // txIndex + unconfirmedDB, ok := unconfirmedDBs.Load(index) + if ok { + slotDB.parallel.unconfirmedDBInShot[index] = unconfirmedDB.(*ParallelStateDB) + } + } + + // All transactions will pay gas fee to the systemAddr at the end, this address is + // deemed to conflict, we handle it specially, clear it now and set it back to the main + // StateDB later; + // But there are transactions that will try to read systemAddr's balance, such as: + // https://bscscan.com/tx/0xcd69755be1d2f55af259441ff5ee2f312830b8539899e82488a21e85bc121a2a. + // It will trigger transaction redo and keepSystem will be marked as true. + if !keepSystem { + slotDB.SetBalance(systemAddr, big.NewInt(0)) + } + + return slotDB +} + +// RevertSlotDB keep the Read list for conflict detect, +// discard all state changes except: +// - nonce and balance of from address +// - balance of system address: will be used on merge to update SystemAddress's balance +func (s *ParallelStateDB) RevertSlotDB(from common.Address) { + s.parallel.kvChangesInSlot = make(map[common.Address]StateKeys) + + // balance := s.parallel.balanceChangesInSlot[from] + s.parallel.nonceChangesInSlot = make(map[common.Address]struct{}) + s.parallel.balanceChangesInSlot = make(map[common.Address]struct{}, 1) + s.parallel.addrStateChangesInSlot = make(map[common.Address]bool) // 0: created, 1: deleted + + selfStateObject := s.parallel.dirtiedStateObjectsInSlot[from] + systemAddress := s.parallel.systemAddress + systemStateObject := s.parallel.dirtiedStateObjectsInSlot[systemAddress] + s.parallel.dirtiedStateObjectsInSlot = make(map[common.Address]*StateObject, 2) + // keep these elements + s.parallel.dirtiedStateObjectsInSlot[from] = selfStateObject + s.parallel.dirtiedStateObjectsInSlot[systemAddress] = systemStateObject + s.parallel.balanceChangesInSlot[from] = struct{}{} + s.parallel.balanceChangesInSlot[systemAddress] = struct{}{} + s.parallel.nonceChangesInSlot[from] = struct{}{} +} + +func (s *ParallelStateDB) getBaseStateDB() *StateDB { + return &s.StateDB +} + +func (s *ParallelStateDB) SetSlotIndex(index int) { + s.parallel.SlotIndex = index +} + +// for parallel execution mode, try to get dirty StateObject in slot first. +// it is mainly used by journal revert right now. +func (s *ParallelStateDB) getStateObject(addr common.Address) *StateObject { + if obj, ok := s.parallel.dirtiedStateObjectsInSlot[addr]; ok { + return obj + } + // can not call s.StateDB.getStateObject(), since `newObject` need ParallelStateDB as the interface + return s.getStateObjectNoSlot(addr) +} + +func (s *ParallelStateDB) storeStateObj(addr common.Address, stateObject *StateObject) { + // When a state object is stored into s.parallel.stateObjects, + // it belongs to base StateDB, it is confirmed and valid. + stateObject.db = s.parallel.baseStateDB + stateObject.dbItf = s.parallel.baseStateDB + // the object could be create in SlotDB, if it got the object from DB and + // update it to the shared `s.parallel.stateObjects`` + stateObject.db.storeParallelLock.Lock() + if _, ok := s.parallel.stateObjects.Load(addr); !ok { + s.parallel.stateObjects.Store(addr, stateObject) + } + stateObject.db.storeParallelLock.Unlock() +} + +func (s *ParallelStateDB) getStateObjectNoSlot(addr common.Address) *StateObject { + if obj := s.getDeletedStateObject(addr); obj != nil && !obj.deleted { + return obj + } + return nil +} + +// createObject creates a new state object. If there is an existing account with +// the given address, it is overwritten and returned as the second return value. + +// prev is used for CreateAccount to get its balance +// Parallel mode: +// if prev in dirty: revert is ok +// if prev in unconfirmed DB: addr state read record, revert should not put it back +// if prev in main DB: addr state read record, revert should not put it back +// if pre no exist: addr state read record, + +// `prev` is used to handle revert, to recover with the `prev` object +// In Parallel mode, we only need to recover to `prev` in SlotDB, +// a.if it is not in SlotDB, `revert` will remove it from the SlotDB +// b.if it is exist in SlotDB, `revert` will recover to the `prev` in SlotDB +// c.as `snapDestructs` it is the same +func (s *ParallelStateDB) createObject(addr common.Address) (newobj *StateObject) { + // do not get from unconfirmed DB, since it will has problem on revert + prev := s.parallel.dirtiedStateObjectsInSlot[addr] + + var prevdestruct bool + + if s.snap != nil && prev != nil { + _, prevdestruct = s.snapDestructs[prev.address] // fixme, record the snapshot read for create Account + s.parallel.addrSnapDestructsReadsInSlot[addr] = prevdestruct + if !prevdestruct { + // To destroy the previous trie node first and update the trie tree + // with the new object on block commit. + s.snapDestructs[prev.address] = struct{}{} + } + } + newobj = newObject(s, s.isParallel, addr, Account{}) + newobj.setNonce(0) // sets the object to dirty + if prev == nil { + s.journal.append(createObjectChange{account: &addr}) + } else { + s.journal.append(resetObjectChange{prev: prev, prevdestruct: prevdestruct}) + } + + // s.parallel.dirtiedStateObjectsInSlot[addr] = newobj // would change the bahavior of AddBalance... + s.parallel.addrStateChangesInSlot[addr] = true // the object sis created + s.parallel.nonceChangesInSlot[addr] = struct{}{} + s.parallel.balanceChangesInSlot[addr] = struct{}{} + s.parallel.codeChangesInSlot[addr] = struct{}{} + // notice: all the KVs are cleared if any + s.parallel.kvChangesInSlot[addr] = make(StateKeys) + return newobj +} + +// getDeletedStateObject is similar to getStateObject, but instead of returning +// nil for a deleted state object, it returns the actual object with the deleted +// flag set. This is needed by the state journal to revert to the correct s- +// destructed object instead of wiping all knowledge about the state object. +func (s *ParallelStateDB) getDeletedStateObject(addr common.Address) *StateObject { + // Prefer live objects if any is available + if obj, _ := s.getStateObjectFromStateObjects(addr); obj != nil { + return obj + } + data, ok := s.getStateObjectFromSnapshotOrTrie(addr) + if !ok { + return nil + } + // Insert into the live set + // if obj, ok := s.loadStateObj(addr); ok { + // fixme: concurrent not safe, merge could update it... + // return obj + // } + // this is why we have to use a seperate getDeletedStateObject for ParallelStateDB + // `s` has to be the ParallelStateDB + obj := newObject(s, s.isParallel, addr, *data) + s.storeStateObj(addr, obj) + // s.SetStateObject(obj) + return obj +} + +// GetOrNewStateObject retrieves a state object or create a new state object if nil. +// dirtyInSlot -> Unconfirmed DB -> main DB -> snapshot, no? create one +func (s *ParallelStateDB) GetOrNewStateObject(addr common.Address) *StateObject { + var stateObject *StateObject = nil + exist := true + if stateObject, ok := s.parallel.dirtiedStateObjectsInSlot[addr]; ok { + return stateObject + } + stateObject, _ = s.getStateObjectFromUnconfirmedDB(addr) + + if stateObject == nil { + stateObject = s.getStateObjectNoSlot(addr) // try to get from base db + } + if stateObject == nil || stateObject.deleted || stateObject.suicided { + stateObject = s.createObject(addr) + exist = false + } + + s.parallel.addrStateReadsInSlot[addr] = exist // true: exist, false: not exist + return stateObject +} + +// Exist reports whether the given account address exists in the state. +// Notably this also returns true for suicided accounts. +func (s *ParallelStateDB) Exist(addr common.Address) bool { + // 1.Try to get from dirty + if obj, ok := s.parallel.dirtiedStateObjectsInSlot[addr]; ok { + // dirty object should not be deleted, since deleted is only flagged on finalise + // and if it is suicided in contract call, suicide is taken as exist until it is finalised + // todo: add a check here, to be removed later + if obj.deleted || obj.suicided { + log.Error("Exist in dirty, but marked as deleted or suicided", + "txIndex", s.txIndex, "baseTxIndex:", s.parallel.baseTxIndex) + } + return true + } + // 2.Try to get from uncomfirmed & main DB + // 2.1 Already read before + if exist, ok := s.parallel.addrStateReadsInSlot[addr]; ok { + return exist + } + // 2.2 Try to get from unconfirmed DB if exist + if exist, ok := s.getAddrStateFromUnconfirmedDB(addr); ok { + s.parallel.addrStateReadsInSlot[addr] = exist // update and cache + return exist + } + + // 3.Try to get from main StateDB + exist := s.getStateObjectNoSlot(addr) != nil + s.parallel.addrStateReadsInSlot[addr] = exist // update and cache + return exist +} + +// Empty returns whether the state object is either non-existent +// or empty according to the EIP161 specification (balance = nonce = code = 0) +func (s *ParallelStateDB) Empty(addr common.Address) bool { + // 1.Try to get from dirty + if obj, ok := s.parallel.dirtiedStateObjectsInSlot[addr]; ok { + // dirty object is light copied and fixup on need, + // empty could be wrong, except it is created with this TX + if _, ok := s.parallel.addrStateChangesInSlot[addr]; ok { + return obj.empty() + } + // so we have to check it manually + // empty means: Nonce == 0 && Balance == 0 && CodeHash == emptyCodeHash + if s.GetBalance(addr).Sign() != 0 { // check balance first, since it is most likely not zero + return false + } + if s.GetNonce(addr) != 0 { + return false + } + codeHash := s.GetCodeHash(addr) + return bytes.Equal(codeHash.Bytes(), emptyCodeHash) // code is empty, the object is empty + } + // 2.Try to get from uncomfirmed & main DB + // 2.1 Already read before + if exist, ok := s.parallel.addrStateReadsInSlot[addr]; ok { + // exist means not empty + return !exist + } + // 2.2 Try to get from unconfirmed DB if exist + if exist, ok := s.getAddrStateFromUnconfirmedDB(addr); ok { + s.parallel.addrStateReadsInSlot[addr] = exist // update and cache + return !exist + } + + so := s.getStateObjectNoSlot(addr) + empty := (so == nil || so.empty()) + s.parallel.addrStateReadsInSlot[addr] = !empty // update and cache + return empty +} + +// GetBalance retrieves the balance from the given address or 0 if object not found +// GetFrom the dirty list => from unconfirmed DB => get from main stateDB +func (s *ParallelStateDB) GetBalance(addr common.Address) *big.Int { + if addr == s.parallel.systemAddress { + s.parallel.systemAddressOpsCount++ + } + // 1.Try to get from dirty + if _, ok := s.parallel.balanceChangesInSlot[addr]; ok { + if obj, ok := s.parallel.dirtiedStateObjectsInSlot[addr]; ok { + // on balance fixup, addr may not exist in dirtiedStateObjectsInSlot + // we intend to fixup balance based on unconfirmed DB or main DB + return obj.Balance() + } + } + // 2.Try to get from uncomfirmed DB or main DB + // 2.1 Already read before + if balance, ok := s.parallel.balanceReadsInSlot[addr]; ok { + return balance + } + // 2.2 Try to get from unconfirmed DB if exist + if balance := s.getBalanceFromUnconfirmedDB(addr); balance != nil { + s.parallel.balanceReadsInSlot[addr] = balance + return balance + } + + // 3. Try to get from main StateObejct + balance := common.Big0 + stateObject := s.getStateObjectNoSlot(addr) + if stateObject != nil { + balance = stateObject.Balance() + } + s.parallel.balanceReadsInSlot[addr] = balance + return balance +} + +func (s *ParallelStateDB) GetNonce(addr common.Address) uint64 { + // 1.Try to get from dirty + if _, ok := s.parallel.nonceChangesInSlot[addr]; ok { + if obj, ok := s.parallel.dirtiedStateObjectsInSlot[addr]; ok { + // on nonce fixup, addr may not exist in dirtiedStateObjectsInSlot + // we intend to fixup nonce based on unconfirmed DB or main DB + return obj.Nonce() + } + } + // 2.Try to get from uncomfirmed DB or main DB + // 2.1 Already read before + if nonce, ok := s.parallel.nonceReadsInSlot[addr]; ok { + return nonce + } + // 2.2 Try to get from unconfirmed DB if exist + if nonce, ok := s.getNonceFromUnconfirmedDB(addr); ok { + s.parallel.nonceReadsInSlot[addr] = nonce + return nonce + } + + // 3.Try to get from main StateDB + var nonce uint64 = 0 + stateObject := s.getStateObjectNoSlot(addr) + if stateObject != nil { + nonce = stateObject.Nonce() + } + s.parallel.nonceReadsInSlot[addr] = nonce + + return nonce +} + +func (s *ParallelStateDB) GetCode(addr common.Address) []byte { + // 1.Try to get from dirty + if _, ok := s.parallel.codeChangesInSlot[addr]; ok { + if obj, ok := s.parallel.dirtiedStateObjectsInSlot[addr]; ok { + // on code fixup, addr may not exist in dirtiedStateObjectsInSlot + // we intend to fixup code based on unconfirmed DB or main DB + code := obj.Code(s.db) + return code + } + } + // 2.Try to get from uncomfirmed DB or main DB + // 2.1 Already read before + if code, ok := s.parallel.codeReadsInSlot[addr]; ok { + return code + } + // 2.2 Try to get from unconfirmed DB if exist + if code, ok := s.getCodeFromUnconfirmedDB(addr); ok { + s.parallel.codeReadsInSlot[addr] = code + return code + } + + // 3. Try to get from main StateObejct + stateObject := s.getStateObjectNoSlot(addr) + var code []byte + if stateObject != nil { + code = stateObject.Code(s.db) + } + s.parallel.codeReadsInSlot[addr] = code + return code +} + +func (s *ParallelStateDB) GetCodeSize(addr common.Address) int { + // 1.Try to get from dirty + if _, ok := s.parallel.codeChangesInSlot[addr]; ok { + if obj, ok := s.parallel.dirtiedStateObjectsInSlot[addr]; ok { + // on code fixup, addr may not exist in dirtiedStateObjectsInSlot + // we intend to fixup code based on unconfirmed DB or main DB + return obj.CodeSize(s.db) + } + } + // 2.Try to get from uncomfirmed DB or main DB + // 2.1 Already read before + if code, ok := s.parallel.codeReadsInSlot[addr]; ok { + return len(code) // len(nil) is 0 too + } + // 2.2 Try to get from unconfirmed DB if exist + if code, ok := s.getCodeFromUnconfirmedDB(addr); ok { + s.parallel.codeReadsInSlot[addr] = code + return len(code) // len(nil) is 0 too + } + + // 3. Try to get from main StateObejct + var codeSize int = 0 + var code []byte + stateObject := s.getStateObjectNoSlot(addr) + + if stateObject != nil { + code = stateObject.Code(s.db) + codeSize = stateObject.CodeSize(s.db) + } + s.parallel.codeReadsInSlot[addr] = code + return codeSize +} + +// return value of GetCodeHash: +// - common.Hash{}: the address does not exist +// - emptyCodeHash: the address exist, but code is empty +// - others: the address exist, and code is not empty +func (s *ParallelStateDB) GetCodeHash(addr common.Address) common.Hash { + // 1.Try to get from dirty + if _, ok := s.parallel.codeChangesInSlot[addr]; ok { + if obj, ok := s.parallel.dirtiedStateObjectsInSlot[addr]; ok { + // on code fixup, addr may not exist in dirtiedStateObjectsInSlot + // we intend to fixup balance based on unconfirmed DB or main DB + return common.BytesToHash(obj.CodeHash()) + } + } + // 2.Try to get from uncomfirmed DB or main DB + // 2.1 Already read before + if codeHash, ok := s.parallel.codeHashReadsInSlot[addr]; ok { + return codeHash + } + // 2.2 Try to get from unconfirmed DB if exist + if codeHash, ok := s.getCodeHashFromUnconfirmedDB(addr); ok { + s.parallel.codeHashReadsInSlot[addr] = codeHash + return codeHash + } + // 3. Try to get from main StateObejct + stateObject := s.getStateObjectNoSlot(addr) + codeHash := common.Hash{} + if stateObject != nil { + codeHash = common.BytesToHash(stateObject.CodeHash()) + } + s.parallel.codeHashReadsInSlot[addr] = codeHash + return codeHash +} + +// GetState retrieves a value from the given account's storage trie. +// For parallel mode wih, get from the state in order: +// -> self dirty, both Slot & MainProcessor +// -> pending of self: Slot on merge +// -> pending of unconfirmed DB +// -> pending of main StateDB +// -> origin +func (s *ParallelStateDB) GetState(addr common.Address, hash common.Hash) common.Hash { + // 1.Try to get from dirty + if exist, ok := s.parallel.addrStateChangesInSlot[addr]; ok { + if !exist { + return common.Hash{} + } + obj := s.parallel.dirtiedStateObjectsInSlot[addr] // addr must exist in dirtiedStateObjectsInSlot + return obj.GetState(s.db, hash) + } + if keys, ok := s.parallel.kvChangesInSlot[addr]; ok { + if _, ok := keys[hash]; ok { + obj := s.parallel.dirtiedStateObjectsInSlot[addr] // addr must exist in dirtiedStateObjectsInSlot + return obj.GetState(s.db, hash) + } + } + // 2.Try to get from uncomfirmed DB or main DB + // 2.1 Already read before + if storage, ok := s.parallel.kvReadsInSlot[addr]; ok { + if val, ok := storage.GetValue(hash); ok { + return val + } + } + // 2.2 Try to get from unconfirmed DB if exist + if val, ok := s.getKVFromUnconfirmedDB(addr, hash); ok { + if s.parallel.kvReadsInSlot[addr] == nil { + s.parallel.kvReadsInSlot[addr] = newStorage(false) + } + s.parallel.kvReadsInSlot[addr].StoreValue(hash, val) // update cache + return val + } + + // 3.Get from main StateDB + stateObject := s.getStateObjectNoSlot(addr) + val := common.Hash{} + if stateObject != nil { + val = stateObject.GetState(s.db, hash) + } + if s.parallel.kvReadsInSlot[addr] == nil { + s.parallel.kvReadsInSlot[addr] = newStorage(false) + } + s.parallel.kvReadsInSlot[addr].StoreValue(hash, val) // update cache + return val +} + +// GetCommittedState retrieves a value from the given account's committed storage trie. +func (s *ParallelStateDB) GetCommittedState(addr common.Address, hash common.Hash) common.Hash { + // 1.No need to get from pending of itself even on merge, since stateobject in SlotDB won't do finalise + // 2.Try to get from uncomfirmed DB or main DB + // KVs in unconfirmed DB can be seen as pending storage + // KVs in main DB are merged from SlotDB and has done finalise() on merge, can be seen as pending storage too. + // 2.1 Already read before + if storage, ok := s.parallel.kvReadsInSlot[addr]; ok { + if val, ok := storage.GetValue(hash); ok { + return val + } + } + // 2.2 Try to get from unconfirmed DB if exist + if val, ok := s.getKVFromUnconfirmedDB(addr, hash); ok { + if s.parallel.kvReadsInSlot[addr] == nil { + s.parallel.kvReadsInSlot[addr] = newStorage(false) + } + s.parallel.kvReadsInSlot[addr].StoreValue(hash, val) // update cache + return val + } + + // 3. Try to get from main DB + stateObject := s.getStateObjectNoSlot(addr) + val := common.Hash{} + if stateObject != nil { + val = stateObject.GetCommittedState(s.db, hash) + } + if s.parallel.kvReadsInSlot[addr] == nil { + s.parallel.kvReadsInSlot[addr] = newStorage(false) + } + s.parallel.kvReadsInSlot[addr].StoreValue(hash, val) // update cache + return val +} + +func (s *ParallelStateDB) HasSuicided(addr common.Address) bool { + // 1.Try to get from dirty + if obj, ok := s.parallel.dirtiedStateObjectsInSlot[addr]; ok { + return obj.suicided + } + // 2.Try to get from uncomfirmed + if exist, ok := s.getAddrStateFromUnconfirmedDB(addr); ok { + return !exist + } + + stateObject := s.getStateObjectNoSlot(addr) + if stateObject != nil { + return stateObject.suicided + } + return false +} + +// AddBalance adds amount to the account associated with addr. +func (s *ParallelStateDB) AddBalance(addr common.Address, amount *big.Int) { + // add balance will perform a read operation first + // s.parallel.balanceReadsInSlot[addr] = struct{}{} // fixme: to make the the balance valid, since unconfirmed would refer it. + // if amount.Sign() == 0 { + // if amount == 0, no balance change, but there is still an empty check. + // take this empty check as addr state read(create, suicide, empty delete) + // s.parallel.addrStateReadsInSlot[addr] = struct{}{} + // } + + stateObject := s.GetOrNewStateObject(addr) + if stateObject != nil { + if addr == s.parallel.systemAddress { + s.parallel.systemAddressOpsCount++ + } + // if amount.Sign() != 0 { // todo: to reenable it + if _, ok := s.parallel.dirtiedStateObjectsInSlot[addr]; !ok { + newStateObject := stateObject.lightCopy(s) // light copy from main DB + // do balance fixup from the confirmed DB, it could be more reliable than main DB + balance := s.GetBalance(addr) + newStateObject.setBalance(balance) + // s.parallel.balanceReadsInSlot[addr] = newStateObject.Balance() // could read from main DB or unconfirmed DB + newStateObject.AddBalance(amount) + s.parallel.dirtiedStateObjectsInSlot[addr] = newStateObject + s.parallel.balanceChangesInSlot[addr] = struct{}{} + return + } + // already dirty, make sure the balance if fixed up + // if stateObject.Balance() + if addr != s.parallel.systemAddress { + if stateObject.Balance().Cmp(s.GetBalance(addr)) != 0 { + log.Warn("AddBalance in dirty, but balance is incorrect", "txIndex", s.txIndex, "addr", addr, + "stateObject.Balance()", stateObject.Balance(), "s.GetBalance(addr)", s.GetBalance(addr)) + stateObject.setBalance(s.GetBalance(addr)) + } + } + + stateObject.AddBalance(amount) + s.parallel.balanceChangesInSlot[addr] = struct{}{} + } +} + +// SubBalance subtracts amount from the account associated with addr. +func (s *ParallelStateDB) SubBalance(addr common.Address, amount *big.Int) { + // if amount.Sign() != 0 { + // unlike add, sub 0 balance will not touch empty object + // s.parallel.balanceReadsInSlot[addr] = struct{}{} + // } + + stateObject := s.GetOrNewStateObject(addr) + if stateObject != nil { + if addr == s.parallel.systemAddress { + s.parallel.systemAddressOpsCount++ + } + + // if amount.Sign() != 0 { // todo: to reenable it + if _, ok := s.parallel.dirtiedStateObjectsInSlot[addr]; !ok { + newStateObject := stateObject.lightCopy(s) // light copy from main DB + // do balance fixup from the confirmed DB, it could be more reliable than main DB + balance := s.GetBalance(addr) + newStateObject.setBalance(balance) + // s.parallel.balanceReadsInSlot[addr] = newStateObject.Balance() + newStateObject.SubBalance(amount) + s.parallel.balanceChangesInSlot[addr] = struct{}{} + s.parallel.dirtiedStateObjectsInSlot[addr] = newStateObject + return + } + // already dirty, make sure the balance if fixed + // if stateObject.Balance() + if addr != s.parallel.systemAddress { + if stateObject.Balance().Cmp(s.GetBalance(addr)) != 0 { + log.Warn("SubBalance in dirty, but balance is incorrect", "txIndex", s.txIndex, "addr", addr, + "stateObject.Balance()", stateObject.Balance(), "s.GetBalance(addr)", s.GetBalance(addr)) + stateObject.setBalance(s.GetBalance(addr)) + } + } + + stateObject.SubBalance(amount) + s.parallel.balanceChangesInSlot[addr] = struct{}{} + } +} + +func (s *ParallelStateDB) SetBalance(addr common.Address, amount *big.Int) { + stateObject := s.GetOrNewStateObject(addr) + if stateObject != nil { + if addr == s.parallel.systemAddress { + s.parallel.systemAddressOpsCount++ + } + if _, ok := s.parallel.dirtiedStateObjectsInSlot[addr]; !ok { + newStateObject := stateObject.lightCopy(s) + // update balance for revert, in case child contract is revertted, + // it should revert to the previous balance + balance := s.GetBalance(addr) + newStateObject.setBalance(balance) + newStateObject.SetBalance(amount) + s.parallel.balanceChangesInSlot[addr] = struct{}{} + s.parallel.dirtiedStateObjectsInSlot[addr] = newStateObject + return + } + + stateObject.SetBalance(amount) + s.parallel.balanceChangesInSlot[addr] = struct{}{} + } +} + +func (s *ParallelStateDB) SetNonce(addr common.Address, nonce uint64) { + stateObject := s.GetOrNewStateObject(addr) + if stateObject != nil { + if _, ok := s.parallel.dirtiedStateObjectsInSlot[addr]; !ok { + newStateObject := stateObject.lightCopy(s) + noncePre := s.GetNonce(addr) + newStateObject.setNonce(noncePre) // nonce fixup + newStateObject.SetNonce(nonce) + s.parallel.nonceChangesInSlot[addr] = struct{}{} + s.parallel.dirtiedStateObjectsInSlot[addr] = newStateObject + return + } + noncePre := s.GetNonce(addr) + stateObject.setNonce(noncePre) // nonce fixup + + stateObject.SetNonce(nonce) + s.parallel.nonceChangesInSlot[addr] = struct{}{} + } +} + +func (s *ParallelStateDB) SetCode(addr common.Address, code []byte) { + stateObject := s.GetOrNewStateObject(addr) + if stateObject != nil { + codeHash := crypto.Keccak256Hash(code) + if _, ok := s.parallel.dirtiedStateObjectsInSlot[addr]; !ok { + newStateObject := stateObject.lightCopy(s) + codePre := s.GetCode(addr) // code fixup + codeHashPre := crypto.Keccak256Hash(codePre) + newStateObject.setCode(codeHashPre, codePre) + + newStateObject.SetCode(codeHash, code) + s.parallel.dirtiedStateObjectsInSlot[addr] = newStateObject + s.parallel.codeChangesInSlot[addr] = struct{}{} + return + } + codePre := s.GetCode(addr) // code fixup + codeHashPre := crypto.Keccak256Hash(codePre) + stateObject.setCode(codeHashPre, codePre) + + stateObject.SetCode(codeHash, code) + s.parallel.codeChangesInSlot[addr] = struct{}{} + } +} + +func (s *ParallelStateDB) SetState(addr common.Address, key, value common.Hash) { + stateObject := s.GetOrNewStateObject(addr) // attention: if StateObject's lightCopy, its storage is only a part of the full storage, + if stateObject != nil { + if s.parallel.baseTxIndex+1 == s.txIndex { + // we check if state is unchanged + // only when current transaction is the next transaction to be committed + // fixme: there is a bug, block: 14,962,284, + // stateObject is in dirty (light copy), but the key is in mainStateDB + // stateObject dirty -> committed, will skip mainStateDB dirty + if s.GetState(addr, key) == value { + log.Debug("Skip set same state", "baseTxIndex", s.parallel.baseTxIndex, + "txIndex", s.txIndex, "addr", addr, + "key", key, "value", value) + return + } + } + + if s.parallel.kvChangesInSlot[addr] == nil { + s.parallel.kvChangesInSlot[addr] = make(StateKeys) // make(Storage, defaultNumOfSlots) + } + + if _, ok := s.parallel.dirtiedStateObjectsInSlot[addr]; !ok { + newStateObject := stateObject.lightCopy(s) + newStateObject.SetState(s.db, key, value) + s.parallel.dirtiedStateObjectsInSlot[addr] = newStateObject + return + } + // do State Update + stateObject.SetState(s.db, key, value) + } +} + +// Suicide marks the given account as suicided. +// This clears the account balance. +// +// The account's state object is still available until the state is committed, +// getStateObject will return a non-nil account after Suicide. +func (s *ParallelStateDB) Suicide(addr common.Address) bool { + var stateObject *StateObject + // 1.Try to get from dirty, it could be suicided inside of contract call + stateObject = s.parallel.dirtiedStateObjectsInSlot[addr] + if stateObject == nil { + // 2.Try to get from uncomfirmed, if deleted return false, since the address does not exist + if obj, ok := s.getStateObjectFromUnconfirmedDB(addr); ok { + stateObject = obj + s.parallel.addrStateReadsInSlot[addr] = !stateObject.deleted // true: exist, false: deleted + if stateObject.deleted { + log.Error("Suicide addr alreay deleted in confirmed DB", "txIndex", s.txIndex, "addr", addr) + return false + } + } + } + + if stateObject == nil { + // 3.Try to get from main StateDB + stateObject = s.getStateObjectNoSlot(addr) + if stateObject == nil { + s.parallel.addrStateReadsInSlot[addr] = false // true: exist, false: deleted + log.Error("Suicide addr not exist", "txIndex", s.txIndex, "addr", addr) + return false + } + s.parallel.addrStateReadsInSlot[addr] = true // true: exist, false: deleted + } + + s.journal.append(suicideChange{ + account: &addr, + prev: stateObject.suicided, // todo: must be false? + prevbalance: new(big.Int).Set(s.GetBalance(addr)), + }) + + if _, ok := s.parallel.dirtiedStateObjectsInSlot[addr]; !ok { + // do copy-on-write for suicide "write" + newStateObject := stateObject.lightCopy(s) + newStateObject.markSuicided() + newStateObject.data.Balance = new(big.Int) + s.parallel.dirtiedStateObjectsInSlot[addr] = newStateObject + s.parallel.addrStateChangesInSlot[addr] = false // false: the address does not exist any more, + // s.parallel.nonceChangesInSlot[addr] = struct{}{} + s.parallel.balanceChangesInSlot[addr] = struct{}{} + s.parallel.codeChangesInSlot[addr] = struct{}{} + // s.parallel.kvChangesInSlot[addr] = make(StateKeys) // all key changes are discarded + return true + } + s.parallel.addrStateChangesInSlot[addr] = false // false: the address does not exist any more, + s.parallel.balanceChangesInSlot[addr] = struct{}{} + s.parallel.codeChangesInSlot[addr] = struct{}{} + + stateObject.markSuicided() + stateObject.data.Balance = new(big.Int) + return true +} + +// CreateAccount explicitly creates a state object. If a state object with the address +// already exists the balance is carried over to the new account. +// +// CreateAccount is called during the EVM CREATE operation. The situation might arise that +// a contract does the following: +// +// 1. sends funds to sha(account ++ (nonce + 1)) +// 2. tx_create(sha(account ++ nonce)) (note that this gets the address of 1) +// +// Carrying over the balance ensures that Ether doesn't disappear. +func (s *ParallelStateDB) CreateAccount(addr common.Address) { + // no matter it is got from dirty, unconfirmed or main DB + // if addr not exist, preBalance will be common.Big0, it is same as new(big.Int) which + // is the value newObject(), + preBalance := s.GetBalance(addr) // parallel balance read will be recorded inside of GetBalance + newObj := s.createObject(addr) + newObj.setBalance(new(big.Int).Set(preBalance)) // new big.Int for newObj +} + +// RevertToSnapshot reverts all state changes made since the given revision. +func (s *ParallelStateDB) RevertToSnapshot(revid int) { + // Find the snapshot in the stack of valid snapshots. + idx := sort.Search(len(s.validRevisions), func(i int) bool { + return s.validRevisions[i].id >= revid + }) + if idx == len(s.validRevisions) || s.validRevisions[idx].id != revid { + panic(fmt.Errorf("revision id %v cannot be reverted", revid)) + } + snapshot := s.validRevisions[idx].journalIndex + + // Replay the journal to undo changes and remove invalidated snapshots + s.journal.revert(s, snapshot) + s.validRevisions = s.validRevisions[:idx] +} + +// AddRefund adds gas to the refund counter +// journal.append will use ParallelState for revert +func (s *ParallelStateDB) AddRefund(gas uint64) { // fixme: not needed + s.journal.append(refundChange{prev: s.refund}) + s.refund += gas +} + +// SubRefund removes gas from the refund counter. +// This method will panic if the refund counter goes below zero +func (s *ParallelStateDB) SubRefund(gas uint64) { // fixme: not needed + s.journal.append(refundChange{prev: s.refund}) + if gas > s.refund { + // we don't need to panic here if we read the wrong state in parallelm mode + // we just need to redo this transaction + log.Info(fmt.Sprintf("Refund counter below zero (gas: %d > refund: %d)", gas, s.refund), "tx", s.thash.String()) + s.parallel.needsRedo = true + return + } + s.refund -= gas +} + +// For Parallel Execution Mode, it can be seen as Penetrated Access: +// ------------------------------------------------------- +// | BaseTxIndex | Unconfirmed Txs... | Current TxIndex | +// ------------------------------------------------------- +// Access from the unconfirmed DB with range&priority: txIndex -1(previous tx) -> baseTxIndex + 1 +func (s *ParallelStateDB) getBalanceFromUnconfirmedDB(addr common.Address) *big.Int { + if addr == s.parallel.systemAddress { + // never get systemaddress from unconfirmed DB + return nil + } + + for i := s.txIndex - 1; i > s.parallel.baseTxIndex; i-- { + if db, ok := s.parallel.unconfirmedDBInShot[i]; ok { + // 1.Refer the state of address, exist or not in dirtiedStateObjectsInSlot + if obj, exist := db.parallel.dirtiedStateObjectsInSlot[addr]; exist { + balanceHit := false + if _, exist := db.parallel.addrStateChangesInSlot[addr]; exist { + balanceHit = true + } + if _, exist := db.parallel.balanceChangesInSlot[addr]; exist { // only changed balance is reliable + balanceHit = true + } + if !balanceHit { + continue + } + balance := obj.Balance() + if obj.deleted { + balance = common.Big0 + } + return balance + } + } + } + return nil +} + +// Similar to getBalanceFromUnconfirmedDB +func (s *ParallelStateDB) getNonceFromUnconfirmedDB(addr common.Address) (uint64, bool) { + if addr == s.parallel.systemAddress { + // never get systemaddress from unconfirmed DB + return 0, false + } + + for i := s.txIndex - 1; i > s.parallel.baseTxIndex; i-- { + if unconfirmedDb, ok := s.parallel.unconfirmedDBInShot[i]; ok { + nonceHit := false + if _, ok := unconfirmedDb.parallel.addrStateChangesInSlot[addr]; ok { + nonceHit = true + } else if _, ok := unconfirmedDb.parallel.nonceChangesInSlot[addr]; ok { + nonceHit = true + } + if !nonceHit { + // nonce refer not hit, try next unconfirmedDb + continue + } + // nonce hit, return the nonce + obj := unconfirmedDb.parallel.dirtiedStateObjectsInSlot[addr] + if obj == nil { + // could not exist, if it is changed but reverted + // fixme: revert should remove the change record + log.Debug("Get nonce from UnconfirmedDB, changed but object not exist, ", + "txIndex", s.txIndex, "referred txIndex", i, "addr", addr) + continue + } + nonce := obj.Nonce() + // deleted object with nonce == 0 + if obj.deleted { + nonce = 0 + } + return nonce, true + } + } + return 0, false +} + +// Similar to getBalanceFromUnconfirmedDB +// It is not only for code, but also codeHash and codeSize, we return the *StateObject for convienence. +func (s *ParallelStateDB) getCodeFromUnconfirmedDB(addr common.Address) ([]byte, bool) { + if addr == s.parallel.systemAddress { + // never get systemaddress from unconfirmed DB + return nil, false + } + + for i := s.txIndex - 1; i > s.parallel.baseTxIndex; i-- { + if db, ok := s.parallel.unconfirmedDBInShot[i]; ok { + codeHit := false + if _, exist := db.parallel.addrStateChangesInSlot[addr]; exist { + codeHit = true + } + if _, exist := db.parallel.codeChangesInSlot[addr]; exist { + codeHit = true + } + if !codeHit { + // try next unconfirmedDb + continue + } + obj := db.parallel.dirtiedStateObjectsInSlot[addr] + if obj == nil { + // could not exist, if it is changed but reverted + // fixme: revert should remove the change record + log.Debug("Get code from UnconfirmedDB, changed but object not exist, ", + "txIndex", s.txIndex, "referred txIndex", i, "addr", addr) + continue + } + code := obj.Code(s.db) + if obj.deleted { + code = nil + } + return code, true + } + } + return nil, false +} + +// Similar to getCodeFromUnconfirmedDB +// but differ when address is deleted or not exist +func (s *ParallelStateDB) getCodeHashFromUnconfirmedDB(addr common.Address) (common.Hash, bool) { + if addr == s.parallel.systemAddress { + // never get systemaddress from unconfirmed DB + return common.Hash{}, false + } + + for i := s.txIndex - 1; i > s.parallel.baseTxIndex; i-- { + if db, ok := s.parallel.unconfirmedDBInShot[i]; ok { + hashHit := false + if _, exist := db.parallel.addrStateChangesInSlot[addr]; exist { + hashHit = true + } + if _, exist := db.parallel.codeChangesInSlot[addr]; exist { + hashHit = true + } + if !hashHit { + // try next unconfirmedDb + continue + } + + obj := db.parallel.dirtiedStateObjectsInSlot[addr] + if obj == nil { + // could not exist, if it is changed but reverted + // fixme: revert should remove the change record + log.Debug("Get codeHash from UnconfirmedDB, changed but object not exist, ", + "txIndex", s.txIndex, "referred txIndex", i, "addr", addr) + continue + } + codeHash := common.Hash{} + if !obj.deleted { + codeHash = common.BytesToHash(obj.CodeHash()) + } + return codeHash, true + } + } + return common.Hash{}, false +} + +// Similar to getCodeFromUnconfirmedDB +// It is for address state check of: Exist(), Empty() and HasSuicided() +// Since the unconfirmed DB should have done Finalise() with `deleteEmptyObjects = true` +// If the dirty address is empty or suicided, it will be marked as deleted, so we only need to return `deleted` or not. +func (s *ParallelStateDB) getAddrStateFromUnconfirmedDB(addr common.Address) (bool, bool) { + if addr == s.parallel.systemAddress { + // never get systemaddress from unconfirmed DB + return false, false + } + + // check the unconfirmed DB with range: baseTxIndex -> txIndex -1(previous tx) + for i := s.txIndex - 1; i > s.parallel.baseTxIndex; i-- { + if db, ok := s.parallel.unconfirmedDBInShot[i]; ok { + if exist, ok := db.parallel.addrStateChangesInSlot[addr]; ok { + if _, ok := db.parallel.dirtiedStateObjectsInSlot[addr]; !ok { + // could not exist, if it is changed but reverted + // fixme: revert should remove the change record + log.Debug("Get addr State from UnconfirmedDB, changed but object not exist, ", + "txIndex", s.txIndex, "referred txIndex", i, "addr", addr) + continue + } + + return exist, true + } + } + } + return false, false } -func (s *StateDB) DiffLayerToSnap(diffLayer *types.DiffLayer) (map[common.Address]struct{}, map[common.Address][]byte, map[common.Address]map[string][]byte, error) { - snapDestructs := make(map[common.Address]struct{}) - snapAccounts := make(map[common.Address][]byte) - snapStorage := make(map[common.Address]map[string][]byte) - - for _, des := range diffLayer.Destructs { - snapDestructs[des] = struct{}{} - } - for _, account := range diffLayer.Accounts { - snapAccounts[account.Account] = account.Blob - } - for _, storage := range diffLayer.Storages { - // should never happen - if len(storage.Keys) != len(storage.Vals) { - return nil, nil, nil, errors.New("invalid diffLayer: length of keys and values mismatch") - } - snapStorage[storage.Account] = make(map[string][]byte, len(storage.Keys)) - n := len(storage.Keys) - for i := 0; i < n; i++ { - snapStorage[storage.Account][storage.Keys[i]] = storage.Vals[i] +func (s *ParallelStateDB) getKVFromUnconfirmedDB(addr common.Address, key common.Hash) (common.Hash, bool) { + // check the unconfirmed DB with range: baseTxIndex -> txIndex -1(previous tx) + for i := s.txIndex - 1; i > s.parallel.baseTxIndex; i-- { + if db, ok := s.parallel.unconfirmedDBInShot[i]; ok { + if obj, ok := db.parallel.dirtiedStateObjectsInSlot[addr]; ok { // if deleted on merge, can get from main StateDB, ok but fixme: concurrent safe + if obj.deleted { + return common.Hash{}, true + } + if _, ok := db.parallel.kvChangesInSlot[addr]; ok { + if val, exist := obj.dirtyStorage.GetValue(key); exist { + return val, true + } + if val, exist := obj.pendingStorage.GetValue(key); exist { // fixme: can be removed + log.Error("Get KV from Unconfirmed StateDB, in pending", + "my txIndex", s.txIndex, "DB's txIndex", i, "addr", addr, + "key", key, "val", val) + return val, true + } + } + } } } - return snapDestructs, snapAccounts, snapStorage, nil + return common.Hash{}, false } -func (s *StateDB) SnapToDiffLayer() ([]common.Address, []types.DiffAccount, []types.DiffStorage) { - destructs := make([]common.Address, 0, len(s.snapDestructs)) - for account := range s.snapDestructs { - destructs = append(destructs, account) - } - accounts := make([]types.DiffAccount, 0, len(s.snapAccounts)) - for accountHash, account := range s.snapAccounts { - accounts = append(accounts, types.DiffAccount{ - Account: accountHash, - Blob: account, - }) - } - storages := make([]types.DiffStorage, 0, len(s.snapStorage)) - for accountHash, storage := range s.snapStorage { - keys := make([]string, 0, len(storage)) - values := make([][]byte, 0, len(storage)) - for k, v := range storage { - keys = append(keys, k) - values = append(values, v) +func (s *ParallelStateDB) getStateObjectFromUnconfirmedDB(addr common.Address) (*StateObject, bool) { + // check the unconfirmed DB with range: baseTxIndex -> txIndex -1(previous tx) + for i := s.txIndex - 1; i > s.parallel.baseTxIndex; i-- { + if db, ok := s.parallel.unconfirmedDBInShot[i]; ok { + if obj, ok := db.parallel.dirtiedStateObjectsInSlot[addr]; ok { // if deleted on merge, can get from main StateDB, ok but fixme: concurrent safe + return obj, true + } } - storages = append(storages, types.DiffStorage{ - Account: accountHash, - Keys: keys, - Vals: values, - }) } - return destructs, accounts, storages + return nil, false } -// PrepareAccessList handles the preparatory steps for executing a state transition with -// regards to both EIP-2929 and EIP-2930: -// -// - Add sender to access list (2929) -// - Add destination to access list (2929) -// - Add precompiles to access list (2929) -// - Add the contents of the optional tx access list (2930) -// -// This method should only be called if Yolov3/Berlin/2929+2930 is applicable at the current number. -func (s *StateDB) PrepareAccessList(sender common.Address, dst *common.Address, precompiles []common.Address, list types.AccessList) { - s.AddAddressToAccessList(sender) - if dst != nil { - s.AddAddressToAccessList(*dst) - // If it's a create-tx, the destination will be added inside evm.create +func (s *ParallelStateDB) IsParallelReadsValid() bool { + slotDB := s + if !slotDB.parallel.isSlotDB { + log.Error("IsSlotDBReadsValid slotDB should be slot DB", "SlotIndex", slotDB.parallel.SlotIndex, "txIndex", slotDB.txIndex) + return false } - for _, addr := range precompiles { - s.AddAddressToAccessList(addr) + + mainDB := slotDB.parallel.baseStateDB + if mainDB.parallel.isSlotDB { + log.Error("IsSlotDBReadsValid s should be main DB", "SlotIndex", slotDB.parallel.SlotIndex, "txIndex", slotDB.txIndex) + return false } - for _, el := range list { - s.AddAddressToAccessList(el.Address) - for _, key := range el.StorageKeys { - s.AddSlotToAccessList(el.Address, key) + // for nonce + for addr, nonceSlot := range slotDB.parallel.nonceReadsInSlot { + nonceMain := mainDB.GetNonce(addr) + if nonceSlot != nonceMain { + log.Debug("IsSlotDBReadsValid nonce read is invalid", "addr", addr, + "nonceSlot", nonceSlot, "nonceMain", nonceMain, "SlotIndex", slotDB.parallel.SlotIndex, + "txIndex", slotDB.txIndex, "baseTxIndex", slotDB.parallel.baseTxIndex) + return false } } -} - -// AddAddressToAccessList adds the given address to the access list -func (s *StateDB) AddAddressToAccessList(addr common.Address) { - if s.accessList == nil { - s.accessList = newAccessList() - } - if s.accessList.AddAddress(addr) { - s.journal.append(accessListAddAccountChange{&addr}) + // balance + for addr, balanceSlot := range slotDB.parallel.balanceReadsInSlot { + if addr != s.parallel.systemAddress { // skip balance check for system address + balanceMain := mainDB.GetBalance(addr) + if balanceSlot.Cmp(balanceMain) != 0 { + log.Debug("IsSlotDBReadsValid balance read is invalid", "addr", addr, + "balanceSlot", balanceSlot, "balanceMain", balanceMain, "SlotIndex", slotDB.parallel.SlotIndex, + "txIndex", slotDB.txIndex, "baseTxIndex", slotDB.parallel.baseTxIndex) + return false + } + } } -} - -// AddSlotToAccessList adds the given (address, slot)-tuple to the access list -func (s *StateDB) AddSlotToAccessList(addr common.Address, slot common.Hash) { - if s.accessList == nil { - s.accessList = newAccessList() + // check code + for addr, codeSlot := range slotDB.parallel.codeReadsInSlot { + codeMain := mainDB.GetCode(addr) + if !bytes.Equal(codeSlot, codeMain) { + log.Debug("IsSlotDBReadsValid code read is invalid", "addr", addr, + "len codeSlot", len(codeSlot), "len codeMain", len(codeMain), "SlotIndex", slotDB.parallel.SlotIndex, + "txIndex", slotDB.txIndex, "baseTxIndex", slotDB.parallel.baseTxIndex) + return false + } } - addrMod, slotMod := s.accessList.AddSlot(addr, slot) - if addrMod { - // In practice, this should not happen, since there is no way to enter the - // scope of 'address' without having the 'address' become already added - // to the access list (via call-variant, create, etc). - // Better safe than sorry, though - s.journal.append(accessListAddAccountChange{&addr}) + // check codeHash + for addr, codeHashSlot := range slotDB.parallel.codeHashReadsInSlot { + codeHashMain := mainDB.GetCodeHash(addr) + if !bytes.Equal(codeHashSlot.Bytes(), codeHashMain.Bytes()) { + log.Debug("IsSlotDBReadsValid codehash read is invalid", "addr", addr, + "codeHashSlot", codeHashSlot, "codeHashMain", codeHashMain, "SlotIndex", slotDB.parallel.SlotIndex, + "txIndex", slotDB.txIndex, "baseTxIndex", slotDB.parallel.baseTxIndex) + return false + } } - if slotMod { - s.journal.append(accessListAddSlotChange{ - address: &addr, - slot: &slot, + // check KV + for addr, slotStorage := range slotDB.parallel.kvReadsInSlot { + conflict := false + slotStorage.Range(func(keySlot, valSlot interface{}) bool { + valMain := mainDB.GetState(addr, keySlot.(common.Hash)) + if !bytes.Equal(valSlot.(common.Hash).Bytes(), valMain.Bytes()) { + log.Debug("IsSlotDBReadsValid KV read is invalid", "addr", addr, + "key", keySlot.(common.Hash), "valSlot", valSlot.(common.Hash), + "valMain", valMain, "SlotIndex", slotDB.parallel.SlotIndex, + "txIndex", slotDB.txIndex, "baseTxIndex", slotDB.parallel.baseTxIndex) + conflict = true + return false // return false, Range will be terminated. + } + return true // return true, Range will try next KV }) + if conflict { + return false + } } -} - -// AddressInAccessList returns true if the given address is in the access list. -func (s *StateDB) AddressInAccessList(addr common.Address) bool { - if s.accessList == nil { - return false + // addr state check + for addr, stateSlot := range slotDB.parallel.addrStateReadsInSlot { + stateMain := false // addr not exist + if mainDB.getStateObject(addr) != nil { + stateMain = true // addr exist in main DB + } + if stateSlot != stateMain { + // skip addr state check for system address + if addr != s.parallel.systemAddress { + log.Debug("IsSlotDBReadsValid addrState read invalid(true: exist, false: not exist)", + "addr", addr, "stateSlot", stateSlot, "stateMain", stateMain, + "SlotIndex", slotDB.parallel.SlotIndex, + "txIndex", slotDB.txIndex, "baseTxIndex", slotDB.parallel.baseTxIndex) + return false + } + } } - return s.accessList.ContainsAddress(addr) -} + // snapshot destructs check -// SlotInAccessList returns true if the given (address, slot)-tuple is in the access list. -func (s *StateDB) SlotInAccessList(addr common.Address, slot common.Hash) (addressPresent bool, slotPresent bool) { - if s.accessList == nil { - return false, false + for addr, destructRead := range slotDB.parallel.addrSnapDestructsReadsInSlot { + mainObj := mainDB.getStateObject(addr) + if mainObj == nil { + log.Debug("IsSlotDBReadsValid snapshot destructs read invalid, address should exist", + "addr", addr, "destruct", destructRead, + "SlotIndex", slotDB.parallel.SlotIndex, + "txIndex", slotDB.txIndex, "baseTxIndex", slotDB.parallel.baseTxIndex) + return false + } + _, destructMain := mainDB.snapDestructs[addr] // addr not exist + if destructRead != destructMain { + log.Debug("IsSlotDBReadsValid snapshot destructs read invalid", + "addr", addr, "destructRead", destructRead, "destructMain", destructMain, + "SlotIndex", slotDB.parallel.SlotIndex, + "txIndex", slotDB.txIndex, "baseTxIndex", slotDB.parallel.baseTxIndex) + return false + } } - return s.accessList.Contains(addr, slot) + + return true } -func (s *StateDB) GetDirtyAccounts() []common.Address { - accounts := make([]common.Address, 0, len(s.stateObjectsDirty)) - for account := range s.stateObjectsDirty { - accounts = append(accounts, account) - } - return accounts +// For most of the transactions, systemAddressOpsCount should be 3: +// one for SetBalance(0) on NewSlotDB() +// the other is for AddBalance(GasFee) at the end. +// (systemAddressOpsCount > 3) means the transaction tries to access systemAddress, in +// this case, we should redo and keep its balance on NewSlotDB() +func (s *ParallelStateDB) SystemAddressRedo() bool { + return s.parallel.systemAddressOpsCount > 4 } -func (s *StateDB) GetStorage(address common.Address) *sync.Map { - return s.storagePool.getStorage(address) +// NeedsRedo returns true if there is any clear reason that we need to redo this transaction +func (s *ParallelStateDB) NeedsRedo() bool { + return s.parallel.needsRedo } diff --git a/core/state/statedb.go.bak b/core/state/statedb.go.bak new file mode 100644 index 0000000000..76f32e32b7 --- /dev/null +++ b/core/state/statedb.go.bak @@ -0,0 +1,3383 @@ +// Copyright 2014 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see . + +// Package state provides a caching layer atop the Ethereum state trie. +package state + +import ( + "bytes" + "errors" + "fmt" + "math/big" + "runtime" + "sort" + "sync" + "time" + + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/common/gopool" + "github.com/ethereum/go-ethereum/core/rawdb" + "github.com/ethereum/go-ethereum/core/state/snapshot" + "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/crypto" + "github.com/ethereum/go-ethereum/ethdb" + "github.com/ethereum/go-ethereum/log" + "github.com/ethereum/go-ethereum/metrics" + "github.com/ethereum/go-ethereum/rlp" + "github.com/ethereum/go-ethereum/trie" +) + +const defaultNumOfSlots = 100 + +type revision struct { + id int + journalIndex int +} + +var ( + // emptyRoot is the known root hash of an empty trie. + emptyRoot = common.HexToHash("56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421") + + emptyAddr = crypto.Keccak256Hash(common.Address{}.Bytes()) +) + +type proofList [][]byte + +func (n *proofList) Put(key []byte, value []byte) error { + *n = append(*n, value) + return nil +} + +func (n *proofList) Delete(key []byte) error { + panic("not supported") +} + +type StateKeys map[common.Hash]struct{} + +type StateObjectSyncMap struct { + sync.Map +} + +func (s *StateObjectSyncMap) LoadStateObject(addr common.Address) (*StateObject, bool) { + stateObject, ok := s.Load(addr) + if !ok { + return nil, ok + } + return stateObject.(*StateObject), ok +} + +func (s *StateObjectSyncMap) StoreStateObject(addr common.Address, stateObject *StateObject) { + s.Store(addr, stateObject) +} + +// loadStateObj is the entry for loading state object from stateObjects in StateDB or stateObjects in parallel +func (s *StateDB) loadStateObj(addr common.Address) (*StateObject, bool) { + if s.isParallel { + return s.parallel.stateObjects.LoadStateObject(addr) + } + obj, ok := s.stateObjects[addr] + return obj, ok +} + +// storeStateObj is the entry for storing state object to stateObjects in StateDB or stateObjects in parallel +func (s *StateDB) storeStateObj(addr common.Address, stateObject *StateObject) { + if s.isParallel { + // When a state object is stored into s.parallel.stateObjects, + // it belongs to base StateDB, it is confirmed and valid. + stateObject.db.storeParallelLock.Lock() + s.parallel.stateObjects.Store(addr, stateObject) + stateObject.db.storeParallelLock.Unlock() + } else { + s.stateObjects[addr] = stateObject + } +} + +// deleteStateObj is the entry for deleting state object to stateObjects in StateDB or stateObjects in parallel +func (s *StateDB) deleteStateObj(addr common.Address) { + if s.isParallel { + s.parallel.stateObjects.Delete(addr) + } else { + delete(s.stateObjects, addr) + } +} + +// For parallel mode only +type ParallelState struct { + isSlotDB bool // denotes StateDB is used in slot, we will try to remove it + SlotIndex int // fixme: to be removed + // stateObjects holds the state objects in the base slot db + // the reason for using stateObjects instead of stateObjects on the outside is + // we need a thread safe map to hold state objects since there are many slots will read + // state objects from it; + // And we will merge all the changes made by the concurrent slot into it. + stateObjects *StateObjectSyncMap + + baseStateDB *StateDB // for parallel mode, there will be a base StateDB in dispatcher routine. + baseTxIndex int // slotDB is created base on this tx index. + dirtiedStateObjectsInSlot map[common.Address]*StateObject + unconfirmedDBInShot map[int]*ParallelStateDB // do unconfirmed reference in same slot. + + // we will record the read detail for conflict check and + // the changed addr or key for object merge, the changed detail can be acheived from the dirty object + nonceChangesInSlot map[common.Address]struct{} + nonceReadsInSlot map[common.Address]uint64 + balanceChangesInSlot map[common.Address]struct{} // the address's balance has been changed + balanceReadsInSlot map[common.Address]*big.Int // the address's balance has been read and used. + // codeSize can be derived based on code, but codeHash can not directly derived based on code + // - codeSize is 0 for address not exist or empty code + // - codeHash is `common.Hash{}` for address not exist, emptyCodeHash(`Keccak256Hash(nil)`) for empty code + // so we use codeReadsInSlot & codeHashReadsInSlot to keep code and codeHash, codeSize is derived from code + codeReadsInSlot map[common.Address][]byte // empty if address not exist or no code in this address + codeHashReadsInSlot map[common.Address]common.Hash + codeChangesInSlot map[common.Address]struct{} + kvReadsInSlot map[common.Address]Storage + kvChangesInSlot map[common.Address]StateKeys // value will be kept in dirtiedStateObjectsInSlot + // Actions such as SetCode, Suicide will change address's state. + // Later call like Exist(), Empty(), HasSuicided() depend on the address's state. + addrStateReadsInSlot map[common.Address]bool // true: exist, false: not exist or deleted + addrStateChangesInSlot map[common.Address]bool // true: created, false: deleted + addrSnapDestructsReadsInSlot map[common.Address]bool + // addrSnapDestructsChangesInSlot map[common.Address]struct{} // no use to get from unconfirmed DB for efficiency + + // Transaction will pay gas fee to system address. + // Parallel execution will clear system address's balance at first, in order to maintain transaction's + // gas fee value. Normal transaction will access system address twice, otherwise it means the transaction + // needs real system address's balance, the transaction will be marked redo with keepSystemAddressBalance = true + systemAddress common.Address + systemAddressOpsCount int + keepSystemAddressBalance bool + + // we may need to redo for some specific reasons, like we read the wrong state and need to panic in sequential mode in SubRefund + needsRedo bool +} + +// StateDB structs within the ethereum protocol are used to store anything +// within the merkle trie. StateDBs take care of caching and storing +// nested states. It's the general query interface to retrieve: +// * Contracts +// * Accounts +type StateDB struct { + db Database + prefetcherLock sync.Mutex + prefetcher *triePrefetcher + originalRoot common.Hash // The pre-state root, before any changes were made + expectedRoot common.Hash // The state root in the block header + stateRoot common.Hash // The calculation result of IntermediateRoot + + trie Trie + hasher crypto.KeccakState + diffLayer *types.DiffLayer + diffTries map[common.Address]Trie + diffCode map[common.Hash][]byte + lightProcessed bool + fullProcessed bool + pipeCommit bool + + snapMux sync.Mutex + snaps *snapshot.Tree + snap snapshot.Snapshot + storeParallelLock sync.RWMutex + snapParallelLock sync.RWMutex // for parallel mode, for main StateDB, slot will read snapshot, while processor will write. + snapDestructs map[common.Address]struct{} + snapAccounts map[common.Address][]byte + snapStorage map[common.Address]map[string][]byte + + // This map holds 'live' objects, which will get modified while processing a state transition. + stateObjects map[common.Address]*StateObject + stateObjectsPending map[common.Address]struct{} // State objects finalized but not yet written to the trie + stateObjectsDirty map[common.Address]struct{} // State objects modified in the current execution + storagePool *StoragePool // sharedPool to store L1 originStorage of stateObjects + writeOnSharedStorage bool // Write to the shared origin storage of a stateObject while reading from the underlying storage layer. + + isParallel bool + parallel ParallelState // to keep all the parallel execution elements + + // DB error. + // State objects are used by the consensus core and VM which are + // unable to deal with database-level errors. Any error that occurs + // during a database read is memoized here and will eventually be returned + // by StateDB.Commit. + dbErr error + + // The refund counter, also used by state transitioning. + refund uint64 + + thash, bhash common.Hash + txIndex int + logs map[common.Hash][]*types.Log + logSize uint + + preimages map[common.Hash][]byte + + // Per-transaction access list + accessList *accessList + + // Journal of state modifications. This is the backbone of + // Snapshot and RevertToSnapshot. + journal *journal + validRevisions []revision + nextRevisionId int + + // Measurements gathered during execution for debugging purposes + MetricsMux sync.Mutex + AccountReads time.Duration + AccountHashes time.Duration + AccountUpdates time.Duration + AccountCommits time.Duration + StorageReads time.Duration + StorageHashes time.Duration + StorageUpdates time.Duration + StorageCommits time.Duration + SnapshotAccountReads time.Duration + SnapshotStorageReads time.Duration + SnapshotCommits time.Duration +} + +// New creates a new state from a given trie. +func New(root common.Hash, db Database, snaps *snapshot.Tree) (*StateDB, error) { + return newStateDB(root, db, snaps) +} + +// NewWithSharedPool creates a new state with sharedStorge on layer 1.5 +func NewWithSharedPool(root common.Hash, db Database, snaps *snapshot.Tree) (*StateDB, error) { + statedb, err := newStateDB(root, db, snaps) + if err != nil { + return nil, err + } + statedb.storagePool = NewStoragePool() + return statedb, nil +} + +func newStateDB(root common.Hash, db Database, snaps *snapshot.Tree) (*StateDB, error) { + sdb := &StateDB{ + db: db, + originalRoot: root, + snaps: snaps, + stateObjects: make(map[common.Address]*StateObject, defaultNumOfSlots), + parallel: ParallelState{ + SlotIndex: -1, + }, + stateObjectsPending: make(map[common.Address]struct{}, defaultNumOfSlots), + stateObjectsDirty: make(map[common.Address]struct{}, defaultNumOfSlots), + txIndex: -1, + logs: make(map[common.Hash][]*types.Log, defaultNumOfSlots), + preimages: make(map[common.Hash][]byte), + journal: newJournal(), + hasher: crypto.NewKeccakState(), + } + if sdb.snaps != nil { + if sdb.snap = sdb.snaps.Snapshot(root); sdb.snap != nil { + sdb.snapDestructs = make(map[common.Address]struct{}) + sdb.snapAccounts = make(map[common.Address][]byte) + sdb.snapStorage = make(map[common.Address]map[string][]byte) + } + } + + snapVerified := sdb.snap != nil && sdb.snap.Verified() + tr, err := db.OpenTrie(root) + // return error when 1. failed to open trie and 2. the snap is nil or the snap is not nil and done verification + if err != nil && (sdb.snap == nil || snapVerified) { + return nil, err + } + sdb.trie = tr + sdb.EnableWriteOnSharedStorage() // fixme:remove when s.originStorage[key] is enabled + return sdb, nil +} + +func (s *StateDB) getBaseStateDB() *StateDB { + return s +} + +func (s *StateDB) getStateObjectFromStateObjects(addr common.Address) (*StateObject, bool) { + return s.loadStateObj(addr) +} + +func (s *StateDB) EnableWriteOnSharedStorage() { + s.writeOnSharedStorage = true +} + +// StartPrefetcher initializes a new trie prefetcher to pull in nodes from the +// state trie concurrently while the state is mutated so that when we reach the +// commit phase, most of the needed data is already hot. +func (s *StateDB) StartPrefetcher(namespace string) { + s.prefetcherLock.Lock() + defer s.prefetcherLock.Unlock() + if s.prefetcher != nil { + s.prefetcher.close() + s.prefetcher = nil + } + if s.snap != nil { + s.prefetcher = newTriePrefetcher(s.db, s.originalRoot, namespace) + } +} + +// StopPrefetcher terminates a running prefetcher and reports any leftover stats +// from the gathered metrics. +func (s *StateDB) StopPrefetcher() { + s.prefetcherLock.Lock() + defer s.prefetcherLock.Unlock() + if s.prefetcher != nil { + s.prefetcher.close() + s.prefetcher = nil + } +} + +// Mark that the block is processed by diff layer +func (s *StateDB) SetExpectedStateRoot(root common.Hash) { + s.expectedRoot = root +} + +// Mark that the block is processed by diff layer +func (s *StateDB) MarkLightProcessed() { + s.lightProcessed = true +} + +// Enable the pipeline commit function of statedb +func (s *StateDB) EnablePipeCommit() { + if s.snap != nil { + s.pipeCommit = true + } +} + +// Mark that the block is full processed +func (s *StateDB) MarkFullProcessed() { + s.fullProcessed = true +} + +func (s *StateDB) IsLightProcessed() bool { + return s.lightProcessed +} + +// setError remembers the first non-nil error it is called with. +func (s *StateDB) setError(err error) { + if s.dbErr == nil { + s.dbErr = err + } +} + +func (s *StateDB) Error() error { + return s.dbErr +} + +// Not thread safe +func (s *StateDB) Trie() (Trie, error) { + if s.trie == nil { + err := s.WaitPipeVerification() + if err != nil { + return nil, err + } + tr, err := s.db.OpenTrie(s.originalRoot) + if err != nil { + return nil, err + } + s.trie = tr + } + return s.trie, nil +} + +func (s *StateDB) SetDiff(diffLayer *types.DiffLayer, diffTries map[common.Address]Trie, diffCode map[common.Hash][]byte) { + s.diffLayer, s.diffTries, s.diffCode = diffLayer, diffTries, diffCode +} + +func (s *StateDB) SetSnapData(snapDestructs map[common.Address]struct{}, snapAccounts map[common.Address][]byte, + snapStorage map[common.Address]map[string][]byte) { + s.snapDestructs, s.snapAccounts, s.snapStorage = snapDestructs, snapAccounts, snapStorage +} + +func (s *StateDB) AddLog(log *types.Log) { + s.journal.append(addLogChange{txhash: s.thash}) + + log.TxHash = s.thash + log.BlockHash = s.bhash + log.TxIndex = uint(s.txIndex) + log.Index = s.logSize + s.logs[s.thash] = append(s.logs[s.thash], log) + s.logSize++ +} + +func (s *StateDB) GetLogs(hash common.Hash) []*types.Log { + return s.logs[hash] +} + +func (s *StateDB) Logs() []*types.Log { + var logs []*types.Log + for _, lgs := range s.logs { + logs = append(logs, lgs...) + } + return logs +} + +// AddPreimage records a SHA3 preimage seen by the VM. +func (s *StateDB) AddPreimage(hash common.Hash, preimage []byte) { + if _, ok := s.preimages[hash]; !ok { + s.journal.append(addPreimageChange{hash: hash}) + pi := make([]byte, len(preimage)) + copy(pi, preimage) + s.preimages[hash] = pi + } +} + +// Preimages returns a list of SHA3 preimages that have been submitted. +func (s *StateDB) Preimages() map[common.Hash][]byte { + return s.preimages +} + +// AddRefund adds gas to the refund counter +func (s *StateDB) AddRefund(gas uint64) { + s.journal.append(refundChange{prev: s.refund}) + s.refund += gas +} + +// SubRefund removes gas from the refund counter. +// This method will panic if the refund counter goes below zero +func (s *StateDB) SubRefund(gas uint64) { + s.journal.append(refundChange{prev: s.refund}) + if gas > s.refund { + panic(fmt.Sprintf("Refund counter below zero (gas: %d > refund: %d)", gas, s.refund)) + } + s.refund -= gas +} + +// Exist reports whether the given account address exists in the state. +// Notably this also returns true for suicided accounts. +func (s *StateDB) Exist(addr common.Address) bool { + log.Debug("StateDB Exist", "SlotxIndex", s.parallel.SlotIndex, "txIndex", s.TxIndex()) + // 3.Try to get from main StateDB + exist := s.getStateObject(addr) != nil + return exist +} + +// Empty returns whether the state object is either non-existent +// or empty according to the EIP161 specification (balance = nonce = code = 0) +func (s *StateDB) Empty(addr common.Address) bool { + log.Debug("StateDB Empty", "SlotxIndex", s.parallel.SlotIndex, "txIndex", s.TxIndex()) + so := s.getStateObject(addr) + empty := (so == nil || so.empty()) + return empty +} + +// GetBalance retrieves the balance from the given address or 0 if object not found +// GetFrom the dirty list => from unconfirmed DB => get from main stateDB +func (s *StateDB) GetBalance(addr common.Address) *big.Int { + log.Debug("StateDB GetBalance", "SlotxIndex", s.parallel.SlotIndex, "txIndex", s.TxIndex()) + if s.parallel.SlotIndex != -1 { + log.Debug("StateDB GetBalance in slot", "SlotxIndex", s.parallel.SlotIndex, "txIndex", s.TxIndex()) + } + balance := common.Big0 + stateObject := s.getStateObject(addr) + if stateObject != nil { + balance = stateObject.Balance() + } + return balance +} + +func (s *StateDB) GetNonce(addr common.Address) uint64 { + log.Debug("StateDB GetNonce", "SlotxIndex", s.parallel.SlotIndex, "txIndex", s.TxIndex()) + + var nonce uint64 = 0 + stateObject := s.getStateObject(addr) + if stateObject != nil { + nonce = stateObject.Nonce() + } + + return nonce +} + +// TxIndex returns the current transaction index set by Prepare. +func (s *StateDB) TxIndex() int { + return s.txIndex +} + +// BlockHash returns the current block hash set by Prepare. +func (s *StateDB) BlockHash() common.Hash { + return s.bhash +} + +// BaseTxIndex returns the tx index that slot db based. +func (s *StateDB) BaseTxIndex() int { + return s.parallel.baseTxIndex +} + +func (s *StateDB) GetCode(addr common.Address) []byte { + log.Debug("StateDB GetCode", "SlotxIndex", s.parallel.SlotIndex, "txIndex", s.TxIndex()) + + stateObject := s.getStateObject(addr) + var code []byte + if stateObject != nil { + code = stateObject.Code(s.db) + } + return code +} + +func (s *StateDB) GetCodeSize(addr common.Address) int { + log.Debug("StateDB GetCodeSize", "SlotxIndex", s.parallel.SlotIndex, "txIndex", s.TxIndex()) + + var codeSize int = 0 + stateObject := s.getStateObject(addr) + if stateObject != nil { + codeSize = stateObject.CodeSize(s.db) + } + return codeSize +} + +// return value of GetCodeHash: +// - common.Hash{}: the address does not exist +// - emptyCodeHash: the address exist, but code is empty +// - others: the address exist, and code is not empty +func (s *StateDB) GetCodeHash(addr common.Address) common.Hash { + log.Debug("StateDB GetCodeHash", "SlotxIndex", s.parallel.SlotIndex, "txIndex", s.TxIndex()) + + stateObject := s.getStateObject(addr) + codeHash := common.Hash{} + if stateObject != nil { + codeHash = common.BytesToHash(stateObject.CodeHash()) + } + return codeHash +} + +// GetState retrieves a value from the given account's storage trie. +func (s *StateDB) GetState(addr common.Address, hash common.Hash) common.Hash { + log.Debug("StateDB GetState", "SlotxIndex", s.parallel.SlotIndex, "txIndex", s.TxIndex()) + stateObject := s.getStateObject(addr) + val := common.Hash{} + if stateObject != nil { + val = stateObject.GetState(s.db, hash) + } + return val +} + +// GetProof returns the Merkle proof for a given account. +func (s *StateDB) GetProof(addr common.Address) ([][]byte, error) { + return s.GetProofByHash(crypto.Keccak256Hash(addr.Bytes())) +} + +// GetProofByHash returns the Merkle proof for a given account. +func (s *StateDB) GetProofByHash(addrHash common.Hash) ([][]byte, error) { + var proof proofList + if _, err := s.Trie(); err != nil { + return nil, err + } + err := s.trie.Prove(addrHash[:], 0, &proof) + return proof, err +} + +// GetStorageProof returns the Merkle proof for given storage slot. +func (s *StateDB) GetStorageProof(a common.Address, key common.Hash) ([][]byte, error) { + var proof proofList + trie := s.StorageTrie(a) + if trie == nil { + return proof, errors.New("storage trie for requested address does not exist") + } + err := trie.Prove(crypto.Keccak256(key.Bytes()), 0, &proof) + return proof, err +} + +// GetStorageProofByHash returns the Merkle proof for given storage slot. +func (s *StateDB) GetStorageProofByHash(a common.Address, key common.Hash) ([][]byte, error) { + var proof proofList + trie := s.StorageTrie(a) + if trie == nil { + return proof, errors.New("storage trie for requested address does not exist") + } + err := trie.Prove(crypto.Keccak256(key.Bytes()), 0, &proof) + return proof, err +} + +// GetCommittedState retrieves a value from the given account's committed storage trie. +func (s *StateDB) GetCommittedState(addr common.Address, hash common.Hash) common.Hash { + log.Debug("StateDB GetCommittedState", "SlotxIndex", s.parallel.SlotIndex, "txIndex", s.TxIndex()) + stateObject := s.getStateObject(addr) + val := common.Hash{} + if stateObject != nil { + val = stateObject.GetCommittedState(s.db, hash) + } + return val +} + +// Database retrieves the low level database supporting the lower level trie ops. +func (s *StateDB) Database() Database { + return s.db +} + +// StorageTrie returns the storage trie of an account. +// The return value is a copy and is nil for non-existent accounts. +func (s *StateDB) StorageTrie(addr common.Address) Trie { + stateObject := s.getStateObject(addr) + if stateObject == nil { + return nil + } + cpy := stateObject.deepCopy(s) + cpy.updateTrie(s.db) + return cpy.getTrie(s.db) +} + +func (s *StateDB) HasSuicided(addr common.Address) bool { + stateObject := s.getStateObject(addr) + if stateObject != nil { + return stateObject.suicided + } + return false +} + +/* + * SETTERS + */ + +// AddBalance adds amount to the account associated with addr. +func (s *StateDB) AddBalance(addr common.Address, amount *big.Int) { + log.Debug("StateDB AddBalance", "SlotxIndex", s.parallel.SlotIndex, "txIndex", s.TxIndex()) + + stateObject := s.GetOrNewStateObject(addr) + if stateObject != nil { + stateObject.AddBalance(amount) + } +} + +// SubBalance subtracts amount from the account associated with addr. +func (s *StateDB) SubBalance(addr common.Address, amount *big.Int) { + log.Debug("StateDB SubBalance", "SlotxIndex", s.parallel.SlotIndex, "txIndex", s.TxIndex()) + + stateObject := s.GetOrNewStateObject(addr) + if stateObject != nil { + stateObject.SubBalance(amount) + } +} + +func (s *StateDB) SetBalance(addr common.Address, amount *big.Int) { + log.Debug("StateDB SetBalance", "SlotxIndex", s.parallel.SlotIndex, "txIndex", s.TxIndex()) + + stateObject := s.GetOrNewStateObject(addr) + if stateObject != nil { + stateObject.SetBalance(amount) + } +} + +func (s *StateDB) SetNonce(addr common.Address, nonce uint64) { + log.Debug("StateDB SetNonce", "SlotxIndex", s.parallel.SlotIndex, "txIndex", s.TxIndex()) + + stateObject := s.GetOrNewStateObject(addr) + if stateObject != nil { + stateObject.SetNonce(nonce) + } +} + +func (s *StateDB) SetCode(addr common.Address, code []byte) { + log.Debug("StateDB SetCode", "SlotxIndex", s.parallel.SlotIndex, "txIndex", s.TxIndex()) + + stateObject := s.GetOrNewStateObject(addr) + if stateObject != nil { + codeHash := crypto.Keccak256Hash(code) + stateObject.SetCode(codeHash, code) + } +} + +func (s *StateDB) SetState(addr common.Address, key, value common.Hash) { + log.Debug("StateDB SetState", "SlotxIndex", s.parallel.SlotIndex, "txIndex", s.TxIndex()) + + stateObject := s.GetOrNewStateObject(addr) + if stateObject != nil { + stateObject.SetState(s.db, key, value) + } +} + +// SetStorage replaces the entire storage for the specified account with given +// storage. This function should only be used for debugging. +func (s *StateDB) SetStorage(addr common.Address, storage map[common.Hash]common.Hash) { + stateObject := s.GetOrNewStateObject(addr) // fixme: parallel mode? + if stateObject != nil { + stateObject.SetStorage(storage) + } +} + +// Suicide marks the given account as suicided. +// This clears the account balance. +// +// The account's state object is still available until the state is committed, +// getStateObject will return a non-nil account after Suicide. +func (s *StateDB) Suicide(addr common.Address) bool { + log.Debug("StateDB Suicide", "SlotxIndex", s.parallel.SlotIndex, "txIndex", s.TxIndex()) + + var stateObject *StateObject + if stateObject == nil { + // 3.Try to get from main StateDB + stateObject = s.getStateObject(addr) + if stateObject == nil { + log.Error("Suicide addr not exist", "txIndex", s.txIndex, "addr", addr) + return false + } + } + + s.journal.append(suicideChange{ + account: &addr, + prev: stateObject.suicided, // todo: must be false? + prevbalance: new(big.Int).Set(s.GetBalance(addr)), + }) + + stateObject.markSuicided() + stateObject.data.Balance = new(big.Int) + return true +} + +// +// Setting, updating & deleting state object methods. +// + +// updateStateObject writes the given object to the trie. +func (s *StateDB) updateStateObject(obj *StateObject) { + log.Debug("StateDB updateStateObject", "SlotxIndex", s.parallel.SlotIndex, "txIndex", s.TxIndex()) + + // Track the amount of time wasted on updating the account from the trie + if metrics.EnabledExpensive { + defer func(start time.Time) { s.AccountUpdates += time.Since(start) }(time.Now()) + } + // Encode the account and update the account trie + addr := obj.Address() + data := obj.encodeData + var err error + if data == nil { + data, err = rlp.EncodeToBytes(obj) + if err != nil { + panic(fmt.Errorf("can't encode object at %x: %v", addr[:], err)) + } + } + if err = s.trie.TryUpdate(addr[:], data); err != nil { + s.setError(fmt.Errorf("updateStateObject (%x) error: %v", addr[:], err)) + } +} + +// deleteStateObject removes the given object from the state trie. +func (s *StateDB) deleteStateObject(obj *StateObject) { + // Track the amount of time wasted on deleting the account from the trie + if metrics.EnabledExpensive { + defer func(start time.Time) { s.AccountUpdates += time.Since(start) }(time.Now()) + } + // Delete the account from the trie + addr := obj.Address() + if err := s.trie.TryDelete(addr[:]); err != nil { + s.setError(fmt.Errorf("deleteStateObject (%x) error: %v", addr[:], err)) + } +} + +// getStateObject retrieves a state object given by the address, returning nil if +// the object is not found or was deleted in this execution context. If you need +// to differentiate between non-existent/just-deleted, use getDeletedStateObject. +func (s *StateDB) getStateObject(addr common.Address) *StateObject { + if obj := s.getDeletedStateObject(addr); obj != nil && !obj.deleted { + return obj + } + return nil +} + +func (s *StateDB) getStateObjectFromSnapshotOrTrie(addr common.Address) (data *Account, ok bool) { + var err error + // If no live objects are available, attempt to use snapshots + if s.snap != nil { + if metrics.EnabledExpensive { + defer func(start time.Time) { s.SnapshotAccountReads += time.Since(start) }(time.Now()) + } + var acc *snapshot.Account + if acc, err = s.snap.Account(crypto.HashData(s.hasher, addr.Bytes())); err == nil { + if acc == nil { + return nil, false + } + data = &Account{ + Nonce: acc.Nonce, + Balance: acc.Balance, + CodeHash: acc.CodeHash, + Root: common.BytesToHash(acc.Root), + } + if len(data.CodeHash) == 0 { + data.CodeHash = emptyCodeHash + } + if data.Root == (common.Hash{}) { + data.Root = emptyRoot + } + } + } + // If snapshot unavailable or reading from it failed, load from the database + if s.snap == nil || err != nil { + if s.trie == nil { + tr, err := s.db.OpenTrie(s.originalRoot) + if err != nil { + s.setError(fmt.Errorf("failed to open trie tree")) + return nil, false + } + s.trie = tr + } + if metrics.EnabledExpensive { + defer func(start time.Time) { s.AccountReads += time.Since(start) }(time.Now()) + } + enc, err := s.trie.TryGet(addr.Bytes()) + if err != nil { + s.setError(fmt.Errorf("getDeleteStateObject (%x) error: %v", addr.Bytes(), err)) + return nil, false + } + if len(enc) == 0 { + return nil, false + } + data = new(Account) + if err := rlp.DecodeBytes(enc, data); err != nil { + log.Error("Failed to decode state object", "addr", addr, "err", err) + return nil, false + } + } + return data, true +} + +// getDeletedStateObject is similar to getStateObject, but instead of returning +// nil for a deleted state object, it returns the actual object with the deleted +// flag set. This is needed by the state journal to revert to the correct s- +// destructed object instead of wiping all knowledge about the state object. +func (s *StateDB) getDeletedStateObject(addr common.Address) *StateObject { + // Prefer live objects if any is available + if obj, _ := s.getStateObjectFromStateObjects(addr); obj != nil { + return obj + } + data, ok := s.getStateObjectFromSnapshotOrTrie(addr) + if !ok { + return nil + } + // Insert into the live set + // if obj, ok := s.loadStateObj(addr); ok { + // fixme: concurrent not safe, merge could update it... + // return obj + //} + obj := newObject(s, s.isParallel, addr, *data) + s.storeStateObj(addr, obj) + return obj +} + +// func (s *StateDB) SetStateObject(object *StateObject) { +// s.storeStateObj(object.Address(), object) +// } + +// GetOrNewStateObject retrieves a state object or create a new state object if nil. +// dirtyInSlot -> Unconfirmed DB -> main DB -> snapshot, no? create one +func (s *StateDB) GetOrNewStateObject(addr common.Address) *StateObject { + var stateObject *StateObject = nil + if stateObject == nil { + stateObject = s.getStateObject(addr) + } + if stateObject == nil || stateObject.deleted || stateObject.suicided { + stateObject = s.createObject(addr) + } + return stateObject +} + +// createObject creates a new state object. If there is an existing account with +// the given address, it is overwritten and returned as the second return value. + +// prev is used for CreateAccount to get its balance +// Parallel mode: +// if prev in dirty: revert is ok +// if prev in unconfirmed DB: addr state read record, revert should not put it back +// if prev in main DB: addr state read record, revert should not put it back +// if pre no exist: addr state read record, + +// `prev` is used to handle revert, to recover with the `prev` object +// In Parallel mode, we only need to recover to `prev` in SlotDB, +// a.if it is not in SlotDB, `revert` will remove it from the SlotDB +// b.if it is exist in SlotDB, `revert` will recover to the `prev` in SlotDB +// c.as `snapDestructs` it is the same +func (s *StateDB) createObject(addr common.Address) (newobj *StateObject) { + prev := s.getDeletedStateObject(addr) // Note, prev might have been deleted, we need that! + var prevdestruct bool + + if s.snap != nil && prev != nil { + _, prevdestruct = s.snapDestructs[prev.address] + if !prevdestruct { + // To destroy the previous trie node first and update the trie tree + // with the new object on block commit. + s.snapDestructs[prev.address] = struct{}{} + } + } + newobj = newObject(s, s.isParallel, addr, Account{}) + newobj.setNonce(0) // sets the object to dirty + if prev == nil { + s.journal.append(createObjectChange{account: &addr}) + } else { + s.journal.append(resetObjectChange{prev: prev, prevdestruct: prevdestruct}) + } + + s.storeStateObj(addr, newobj) + return newobj +} + +// CreateAccount explicitly creates a state object. If a state object with the address +// already exists the balance is carried over to the new account. +// +// CreateAccount is called during the EVM CREATE operation. The situation might arise that +// a contract does the following: +// +// 1. sends funds to sha(account ++ (nonce + 1)) +// 2. tx_create(sha(account ++ nonce)) (note that this gets the address of 1) +// +// Carrying over the balance ensures that Ether doesn't disappear. +func (s *StateDB) CreateAccount(addr common.Address) { + // no matter it is got from dirty, unconfirmed or main DB + // if addr not exist, preBalance will be common.Big0, it is same as new(big.Int) which + // is the value newObject(), + preBalance := s.GetBalance(addr) + newObj := s.createObject(addr) + newObj.setBalance(new(big.Int).Set(preBalance)) // new big.Int for newObj +} + +func (s *StateDB) ForEachStorage(addr common.Address, cb func(key, value common.Hash) bool) error { + so := s.getStateObject(addr) + if so == nil { + return nil + } + it := trie.NewIterator(so.getTrie(s.db).NodeIterator(nil)) + + for it.Next() { + key := common.BytesToHash(s.trie.GetKey(it.Key)) + if value, dirty := so.dirtyStorage.GetValue(key); dirty { + if !cb(key, value) { + return nil + } + continue + } + + if len(it.Value) > 0 { + _, content, _, err := rlp.Split(it.Value) + if err != nil { + return err + } + if !cb(key, common.BytesToHash(content)) { + return nil + } + } + } + return nil +} + +// Copy creates a deep, independent copy of the state. +// Snapshots of the copied state cannot be applied to the copy. +func (s *StateDB) Copy() *StateDB { + // Copy all the basic fields, initialize the memory ones + state := &StateDB{ + db: s.db, + trie: s.db.CopyTrie(s.trie), + stateObjects: make(map[common.Address]*StateObject, len(s.journal.dirties)), + stateObjectsPending: make(map[common.Address]struct{}, len(s.stateObjectsPending)), + stateObjectsDirty: make(map[common.Address]struct{}, len(s.journal.dirties)), + storagePool: s.storagePool, + refund: s.refund, + logs: make(map[common.Hash][]*types.Log, len(s.logs)), + logSize: s.logSize, + preimages: make(map[common.Hash][]byte, len(s.preimages)), + journal: newJournal(), + hasher: crypto.NewKeccakState(), + parallel: ParallelState{}, + } + // Copy the dirty states, logs, and preimages + for addr := range s.journal.dirties { + // As documented [here](https://github.com/ethereum/go-ethereum/pull/16485#issuecomment-380438527), + // and in the Finalise-method, there is a case where an object is in the journal but not + // in the stateObjects: OOG after touch on ripeMD prior to Byzantium. Thus, we need to check for + // nil + if object, exist := s.getStateObjectFromStateObjects(addr); exist { + // Even though the original object is dirty, we are not copying the journal, + // so we need to make sure that anyside effect the journal would have caused + // during a commit (or similar op) is already applied to the copy. + state.storeStateObj(addr, object.deepCopy(state)) + + state.stateObjectsDirty[addr] = struct{}{} // Mark the copy dirty to force internal (code/state) commits + state.stateObjectsPending[addr] = struct{}{} // Mark the copy pending to force external (account) commits + } + } + // Above, we don't copy the actual journal. This means that if the copy is copied, the + // loop above will be a no-op, since the copy's journal is empty. + // Thus, here we iterate over stateObjects, to enable copies of copies + for addr := range s.stateObjectsPending { + if _, exist := state.getStateObjectFromStateObjects(addr); !exist { + object, _ := s.getStateObjectFromStateObjects(addr) + state.storeStateObj(addr, object.deepCopy(state)) + } + state.stateObjectsPending[addr] = struct{}{} + } + for addr := range s.stateObjectsDirty { + if _, exist := state.getStateObjectFromStateObjects(addr); !exist { + object, _ := s.getStateObjectFromStateObjects(addr) + state.storeStateObj(addr, object.deepCopy(state)) + } + state.stateObjectsDirty[addr] = struct{}{} + } + for hash, logs := range s.logs { + cpy := make([]*types.Log, len(logs)) + for i, l := range logs { + cpy[i] = new(types.Log) + *cpy[i] = *l + } + state.logs[hash] = cpy + } + for hash, preimage := range s.preimages { + state.preimages[hash] = preimage + } + // Do we need to copy the access list? In practice: No. At the start of a + // transaction, the access list is empty. In practice, we only ever copy state + // _between_ transactions/blocks, never in the middle of a transaction. + // However, it doesn't cost us much to copy an empty list, so we do it anyway + // to not blow up if we ever decide copy it in the middle of a transaction + if s.accessList != nil { + state.accessList = s.accessList.Copy() + } + + // If there's a prefetcher running, make an inactive copy of it that can + // only access data but does not actively preload (since the user will not + // know that they need to explicitly terminate an active copy). + if s.prefetcher != nil { + state.prefetcher = s.prefetcher.copy() + } + if s.snaps != nil { + // In order for the miner to be able to use and make additions + // to the snapshot tree, we need to copy that aswell. + // Otherwise, any block mined by ourselves will cause gaps in the tree, + // and force the miner to operate trie-backed only + state.snaps = s.snaps + state.snap = s.snap + // deep copy needed + state.snapDestructs = make(map[common.Address]struct{}) + for k, v := range s.snapDestructs { + state.snapDestructs[k] = v + } + state.snapAccounts = make(map[common.Address][]byte) + for k, v := range s.snapAccounts { + state.snapAccounts[k] = v + } + state.snapStorage = make(map[common.Address]map[string][]byte) + for k, v := range s.snapStorage { + temp := make(map[string][]byte) + for kk, vv := range v { + temp[kk] = vv + } + state.snapStorage[k] = temp + } + } + return state +} + +/* +var addressStructPool = sync.Pool{ + New: func() interface{} { return make(map[common.Address]struct{}, defaultNumOfSlots) }, +} + +var journalPool = sync.Pool{ + New: func() interface{} { + return &journal{ + dirties: make(map[common.Address]int, defaultNumOfSlots), + entries: make([]journalEntry, 0, defaultNumOfSlots), + } + }, +} + +var stateKeysPool = sync.Pool{ + New: func() interface{} { return make(map[common.Address]StateKeys, defaultNumOfSlots) }, +} + +var stateObjectsPool = sync.Pool{ + New: func() interface{} { return make(map[common.Address]*StateObject, defaultNumOfSlots) }, +} + +var balancePool = sync.Pool{ + New: func() interface{} { return make(map[common.Address]*big.Int, defaultNumOfSlots) }, +} + +var snapAccountPool = sync.Pool{ + New: func() interface{} { return make(map[common.Address][]byte, defaultNumOfSlots) }, +} + +var snapStoragePool = sync.Pool{ + New: func() interface{} { return make(map[common.Address]map[string][]byte, defaultNumOfSlots) }, +} + +var snapStorageValuePool = sync.Pool{ + New: func() interface{} { return make(map[string][]byte, defaultNumOfSlots) }, +} + +var logsPool = sync.Pool{ + New: func() interface{} { return make(map[common.Hash][]*types.Log, defaultNumOfSlots) }, +} + +func (s *StateDB) SlotDBPutSyncPool() { + // for key := range s.parallel.codeReadsInSlot { + // delete(s.parallel.codeReadsInSlot, key) + //} + //addressStructPool.Put(s.parallel.codeReadsInSlot) + + for key := range s.parallel.codeChangesInSlot { + delete(s.parallel.codeChangesInSlot, key) + } + addressStructPool.Put(s.parallel.codeChangesInSlot) + + for key := range s.parallel.balanceChangesInSlot { + delete(s.parallel.balanceChangesInSlot, key) + } + addressStructPool.Put(s.parallel.balanceChangesInSlot) + + for key := range s.parallel.balanceReadsInSlot { + delete(s.parallel.balanceReadsInSlot, key) + } + balancePool.Put(s.parallel.balanceReadsInSlot) + + // for key := range s.parallel.addrStateReadsInSlot { + // delete(s.parallel.addrStateReadsInSlot, key) + // } + // addressStructPool.Put(s.parallel.addrStateReadsInSlot) + + for key := range s.parallel.nonceChangesInSlot { + delete(s.parallel.nonceChangesInSlot, key) + } + addressStructPool.Put(s.parallel.nonceChangesInSlot) + + for key := range s.stateObjectsPending { + delete(s.stateObjectsPending, key) + } + addressStructPool.Put(s.stateObjectsPending) + + for key := range s.stateObjectsDirty { + delete(s.stateObjectsDirty, key) + } + addressStructPool.Put(s.stateObjectsDirty) + + for key := range s.journal.dirties { + delete(s.journal.dirties, key) + } + s.journal.entries = s.journal.entries[:0] + journalPool.Put(s.journal) + + for key := range s.parallel.kvChangesInSlot { + delete(s.parallel.kvChangesInSlot, key) + } + stateKeysPool.Put(s.parallel.kvChangesInSlot) + + // for key := range s.parallel.kvReadsInSlot { + // delete(s.parallel.kvReadsInSlot, key) + // } + // stateKeysPool.Put(s.parallel.kvReadsInSlot) + + for key := range s.parallel.dirtiedStateObjectsInSlot { + delete(s.parallel.dirtiedStateObjectsInSlot, key) + } + stateObjectsPool.Put(s.parallel.dirtiedStateObjectsInSlot) + + for key := range s.snapDestructs { + delete(s.snapDestructs, key) + } + addressStructPool.Put(s.snapDestructs) + + for key := range s.snapAccounts { + delete(s.snapAccounts, key) + } + snapAccountPool.Put(s.snapAccounts) + + for key, storage := range s.snapStorage { + for key := range storage { + delete(storage, key) + } + snapStorageValuePool.Put(storage) + delete(s.snapStorage, key) + } + snapStoragePool.Put(s.snapStorage) + + for key := range s.logs { + delete(s.logs, key) + } + logsPool.Put(s.logs) +} +*/ +// CopyForSlot copy all the basic fields, initialize the memory ones +func (s *StateDB) CopyForSlot() *ParallelStateDB { + parallel := ParallelState{ + // use base(dispatcher) slot db's stateObjects. + // It is a SyncMap, only readable to slot, not writable + stateObjects: s.parallel.stateObjects, + unconfirmedDBInShot: make(map[int]*ParallelStateDB, 100), + + codeReadsInSlot: make(map[common.Address][]byte, 10), // addressStructPool.Get().(map[common.Address]struct{}), + codeHashReadsInSlot: make(map[common.Address]common.Hash), + codeChangesInSlot: make(map[common.Address]struct{}), // addressStructPool.Get().(map[common.Address]struct{}), + kvChangesInSlot: make(map[common.Address]StateKeys), // stateKeysPool.Get().(map[common.Address]StateKeys), + kvReadsInSlot: make(map[common.Address]Storage, 100), // stateKeysPool.Get().(map[common.Address]Storage), + balanceChangesInSlot: make(map[common.Address]struct{}), // addressStructPool.Get().(map[common.Address]struct{}), + balanceReadsInSlot: make(map[common.Address]*big.Int), // addressStructPool.Get().(map[common.Address]struct{}), + addrStateReadsInSlot: make(map[common.Address]bool), // addressStructPool.Get().(map[common.Address]struct{}), + addrStateChangesInSlot: make(map[common.Address]bool), // addressStructPool.Get().(map[common.Address]struct{}), + nonceChangesInSlot: make(map[common.Address]struct{}), // addressStructPool.Get().(map[common.Address]struct{}), + nonceReadsInSlot: make(map[common.Address]uint64), + addrSnapDestructsReadsInSlot: make(map[common.Address]bool), + + isSlotDB: true, + dirtiedStateObjectsInSlot: make(map[common.Address]*StateObject), // stateObjectsPool.Get().(map[common.Address]*StateObject), + } + state := &ParallelStateDB{ + StateDB{ + db: s.db, + trie: s.db.CopyTrie(s.trie), + stateObjects: make(map[common.Address]*StateObject), // replaced by parallel.stateObjects in parallel mode + stateObjectsPending: make(map[common.Address]struct{}), // addressStructPool.Get().(map[common.Address]struct{}), + stateObjectsDirty: make(map[common.Address]struct{}), //addressStructPool.Get().(map[common.Address]struct{}), + refund: s.refund, // should be 0 + logs: make(map[common.Hash][]*types.Log, defaultNumOfSlots), // logsPool.Get().(map[common.Hash][]*types.Log), + logSize: 0, + preimages: make(map[common.Hash][]byte, len(s.preimages)), + journal: newJournal(), // journalPool.Get().(*journal), + hasher: crypto.NewKeccakState(), + isParallel: true, + parallel: parallel, + }, + } + for hash, preimage := range s.preimages { + state.preimages[hash] = preimage + } + + if s.snaps != nil { + // In order for the miner to be able to use and make additions + // to the snapshot tree, we need to copy that aswell. + // Otherwise, any block mined by ourselves will cause gaps in the tree, + // and force the miner to operate trie-backed only + state.snaps = s.snaps + state.snap = s.snap + // deep copy needed + state.snapDestructs = make(map[common.Address]struct{}) //addressStructPool.Get().(map[common.Address]struct{}) + s.snapParallelLock.RLock() + for k, v := range s.snapDestructs { + state.snapDestructs[k] = v + } + s.snapParallelLock.RUnlock() + // + state.snapAccounts = make(map[common.Address][]byte) // snapAccountPool.Get().(map[common.Address][]byte) + for k, v := range s.snapAccounts { + state.snapAccounts[k] = v + } + state.snapStorage = make(map[common.Address]map[string][]byte) // snapStoragePool.Get().(map[common.Address]map[string][]byte) + for k, v := range s.snapStorage { + temp := make(map[string][]byte) // snapStorageValuePool.Get().(map[string][]byte) + for kk, vv := range v { + temp[kk] = vv + } + state.snapStorage[k] = temp + } + // trie prefetch should be done by dispacther on StateObject Merge, + // disable it in parallel slot + // state.prefetcher = s.prefetcher + } + + return state +} + +// Snapshot returns an identifier for the current revision of the state. +func (s *StateDB) Snapshot() int { + id := s.nextRevisionId + s.nextRevisionId++ + s.validRevisions = append(s.validRevisions, revision{id, s.journal.length()}) + return id +} + +// RevertToSnapshot reverts all state changes made since the given revision. +func (s *StateDB) RevertToSnapshot(revid int) { + // Find the snapshot in the stack of valid snapshots. + idx := sort.Search(len(s.validRevisions), func(i int) bool { + return s.validRevisions[i].id >= revid + }) + if idx == len(s.validRevisions) || s.validRevisions[idx].id != revid { + panic(fmt.Errorf("revision id %v cannot be reverted", revid)) + } + snapshot := s.validRevisions[idx].journalIndex + + // Replay the journal to undo changes and remove invalidated snapshots + s.journal.revert(s, snapshot) + s.validRevisions = s.validRevisions[:idx] +} + +// GetRefund returns the current value of the refund counter. +func (s *StateDB) GetRefund() uint64 { + return s.refund +} + +// GetRefund returns the current value of the refund counter. +func (s *StateDB) WaitPipeVerification() error { + // We need wait for the parent trie to commit + if s.snap != nil { + if valid := s.snap.WaitAndGetVerifyRes(); !valid { + return fmt.Errorf("verification on parent snap failed") + } + } + return nil +} + +// Finalise finalises the state by removing the s destructed objects and clears +// the journal as well as the refunds. Finalise, however, will not push any updates +// into the tries just yet. Only IntermediateRoot or Commit will do that. +func (s *StateDB) Finalise(deleteEmptyObjects bool) { // fixme: concurrent safe... + addressesToPrefetch := make([][]byte, 0, len(s.journal.dirties)) + for addr := range s.journal.dirties { + var obj *StateObject + var exist bool + if s.parallel.isSlotDB { + obj = s.parallel.dirtiedStateObjectsInSlot[addr] + if obj != nil { + exist = true + } else { + log.Error("StateDB Finalise dirty addr not in dirtiedStateObjectsInSlot", + "addr", addr) + } + } else { + obj, exist = s.getStateObjectFromStateObjects(addr) + } + if !exist { + // ripeMD is 'touched' at block 1714175, in tx 0x1237f737031e40bcde4a8b7e717b2d15e3ecadfe49bb1bbc71ee9deb09c6fcf2 + // That tx goes out of gas, and although the notion of 'touched' does not exist there, the + // touch-event will still be recorded in the journal. Since ripeMD is a special snowflake, + // it will persist in the journal even though the journal is reverted. In this special circumstance, + // it may exist in `s.journal.dirties` but not in `s.stateObjects`. + // Thus, we can safely ignore it here + continue + } + if obj.suicided || (deleteEmptyObjects && obj.empty()) { + if s.parallel.isSlotDB { + s.parallel.addrStateChangesInSlot[addr] = false // false: deleted + } + obj.deleted = true + + // If state snapshotting is active, also mark the destruction there. + // Note, we can't do this only at the end of a block because multiple + // transactions within the same block might self destruct and then + // ressurrect an account; but the snapshotter needs both events. + if s.snap != nil { + s.snapDestructs[obj.address] = struct{}{} // We need to maintain account deletions explicitly (will remain set indefinitely) + delete(s.snapAccounts, obj.address) // Clear out any previously updated account data (may be recreated via a ressurrect) + delete(s.snapStorage, obj.address) // Clear out any previously updated storage data (may be recreated via a ressurrect) + } + } else { + // 1.none parallel mode, we do obj.finalise(true) as normal + // 2.with parallel mode, we do obj.finalise(true) on dispatcher, not on slot routine + // obj.finalise(true) will clear its dirtyStorage, will make prefetch broken. + if !s.isParallel || !s.parallel.isSlotDB { + obj.finalise(true) // Prefetch slots in the background + } + } + if _, exist := s.stateObjectsPending[addr]; !exist { + s.stateObjectsPending[addr] = struct{}{} + } + if _, exist := s.stateObjectsDirty[addr]; !exist { + s.stateObjectsDirty[addr] = struct{}{} + // At this point, also ship the address off to the precacher. The precacher + // will start loading tries, and when the change is eventually committed, + // the commit-phase will be a lot faster + addressesToPrefetch = append(addressesToPrefetch, common.CopyBytes(addr[:])) // Copy needed for closure + } + } + if s.prefetcher != nil && len(addressesToPrefetch) > 0 { + s.prefetcher.prefetch(s.originalRoot, addressesToPrefetch, emptyAddr) + } + // Invalidate journal because reverting across transactions is not allowed. + s.clearJournalAndRefund() +} + +// IntermediateRoot computes the current root hash of the state trie. +// It is called in between transactions to get the root hash that +// goes into transaction receipts. +func (s *StateDB) IntermediateRoot(deleteEmptyObjects bool) common.Hash { + if s.lightProcessed { + s.StopPrefetcher() + return s.trie.Hash() + } + // Finalise all the dirty storage states and write them into the tries + s.Finalise(deleteEmptyObjects) + s.AccountsIntermediateRoot() + return s.StateIntermediateRoot() +} + +func (s *StateDB) AccountsIntermediateRoot() { + tasks := make(chan func()) + finishCh := make(chan struct{}) + defer close(finishCh) + wg := sync.WaitGroup{} + for i := 0; i < runtime.NumCPU(); i++ { + go func() { + for { + select { + case task := <-tasks: + task() + case <-finishCh: + return + } + } + }() + } + + // Although naively it makes sense to retrieve the account trie and then do + // the contract storage and account updates sequentially, that short circuits + // the account prefetcher. Instead, let's process all the storage updates + // first, giving the account prefeches just a few more milliseconds of time + // to pull useful data from disk. + for addr := range s.stateObjectsPending { + if obj, _ := s.getStateObjectFromStateObjects(addr); !obj.deleted { + wg.Add(1) + tasks <- func() { + obj.updateRoot(s.db) + // If state snapshotting is active, cache the data til commit. Note, this + // update mechanism is not symmetric to the deletion, because whereas it is + // enough to track account updates at commit time, deletions need tracking + // at transaction boundary level to ensure we capture state clearing. + if s.snap != nil && !obj.deleted { + s.snapMux.Lock() + // It is possible to add unnecessary change, but it is fine. + s.snapAccounts[obj.address] = snapshot.SlimAccountRLP(obj.data.Nonce, obj.data.Balance, obj.data.Root, obj.data.CodeHash) + s.snapMux.Unlock() + } + data, err := rlp.EncodeToBytes(obj) + if err != nil { + panic(fmt.Errorf("can't encode object at %x: %v", addr[:], err)) + } + obj.encodeData = data + wg.Done() + } + } + } + wg.Wait() +} + +func (s *StateDB) StateIntermediateRoot() common.Hash { + // If there was a trie prefetcher operating, it gets aborted and irrevocably + // modified after we start retrieving tries. Remove it from the statedb after + // this round of use. + // + // This is weird pre-byzantium since the first tx runs with a prefetcher and + // the remainder without, but pre-byzantium even the initial prefetcher is + // useless, so no sleep lost. + prefetcher := s.prefetcher + defer func() { + s.prefetcherLock.Lock() + if s.prefetcher != nil { + s.prefetcher.close() + s.prefetcher = nil + } + // try not use defer inside defer + s.prefetcherLock.Unlock() + }() + + // Now we're about to start to write changes to the trie. The trie is so far + // _untouched_. We can check with the prefetcher, if it can give us a trie + // which has the same root, but also has some content loaded into it. + if prefetcher != nil { + if trie := prefetcher.trie(s.originalRoot); trie != nil { + s.trie = trie + } + } + if s.trie == nil { + tr, err := s.db.OpenTrie(s.originalRoot) + if err != nil { + panic(fmt.Sprintf("Failed to open trie tree %s", s.originalRoot)) + } + s.trie = tr + } + usedAddrs := make([][]byte, 0, len(s.stateObjectsPending)) + for addr := range s.stateObjectsPending { + if obj, _ := s.getStateObjectFromStateObjects(addr); obj.deleted { + s.deleteStateObject(obj) + } else { + s.updateStateObject(obj) + } + usedAddrs = append(usedAddrs, common.CopyBytes(addr[:])) // Copy needed for closure + } + if prefetcher != nil { + prefetcher.used(s.originalRoot, usedAddrs) + } + if len(s.stateObjectsPending) > 0 { + s.stateObjectsPending = make(map[common.Address]struct{}) + } + // Track the amount of time wasted on hashing the account trie + if metrics.EnabledExpensive { + defer func(start time.Time) { s.AccountHashes += time.Since(start) }(time.Now()) + } + root := s.trie.Hash() + return root +} + +// Prepare sets the current transaction hash and index and block hash which is +// used when the EVM emits new state logs. +func (s *StateDB) Prepare(thash, bhash common.Hash, ti int) { + s.thash = thash + s.bhash = bhash + s.txIndex = ti + s.accessList = nil +} + +func (s *StateDB) clearJournalAndRefund() { + if len(s.journal.entries) > 0 { + s.journal = newJournal() + s.refund = 0 + } + s.validRevisions = s.validRevisions[:0] // Snapshots can be created without journal entires +} + +func (s *StateDB) LightCommit() (common.Hash, *types.DiffLayer, error) { + codeWriter := s.db.TrieDB().DiskDB().NewBatch() + + // light process already verified it, expectedRoot is trustworthy. + root := s.expectedRoot + + commitFuncs := []func() error{ + func() error { + for codeHash, code := range s.diffCode { + rawdb.WriteCode(codeWriter, codeHash, code) + if codeWriter.ValueSize() >= ethdb.IdealBatchSize { + if err := codeWriter.Write(); err != nil { + return err + } + codeWriter.Reset() + } + } + if codeWriter.ValueSize() > 0 { + if err := codeWriter.Write(); err != nil { + return err + } + } + return nil + }, + func() error { + tasks := make(chan func()) + taskResults := make(chan error, len(s.diffTries)) + tasksNum := 0 + finishCh := make(chan struct{}) + defer close(finishCh) + threads := gopool.Threads(len(s.diffTries)) + + for i := 0; i < threads; i++ { + go func() { + for { + select { + case task := <-tasks: + task() + case <-finishCh: + return + } + } + }() + } + + for account, diff := range s.diffTries { + tmpAccount := account + tmpDiff := diff + tasks <- func() { + root, err := tmpDiff.Commit(nil) + if err != nil { + taskResults <- err + return + } + s.db.CacheStorage(crypto.Keccak256Hash(tmpAccount[:]), root, tmpDiff) + taskResults <- nil + } + tasksNum++ + } + + for i := 0; i < tasksNum; i++ { + err := <-taskResults + if err != nil { + return err + } + } + + // commit account trie + var account Account + root, err := s.trie.Commit(func(_ [][]byte, _ []byte, leaf []byte, parent common.Hash) error { + if err := rlp.DecodeBytes(leaf, &account); err != nil { + return nil + } + if account.Root != emptyRoot { + s.db.TrieDB().Reference(account.Root, parent) + } + return nil + }) + if err != nil { + return err + } + if root != emptyRoot { + s.db.CacheAccount(root, s.trie) + } + return nil + }, + func() error { + if s.snap != nil { + if metrics.EnabledExpensive { + defer func(start time.Time) { s.SnapshotCommits += time.Since(start) }(time.Now()) + } + // Only update if there's a state transition (skip empty Clique blocks) + if parent := s.snap.Root(); parent != root { + // for light commit, always do sync commit + if err := s.snaps.Update(root, parent, s.snapDestructs, s.snapAccounts, s.snapStorage, nil); err != nil { + log.Warn("Failed to update snapshot tree", "from", parent, "to", root, "err", err) + } + // Keep n diff layers in the memory + // - head layer is paired with HEAD state + // - head-1 layer is paired with HEAD-1 state + // - head-(n-1) layer(bottom-most diff layer) is paired with HEAD-(n-1)state + if err := s.snaps.Cap(root, s.snaps.CapLimit()); err != nil { + log.Warn("Failed to cap snapshot tree", "root", root, "layers", s.snaps.CapLimit(), "err", err) + } + } + } + return nil + }, + } + commitRes := make(chan error, len(commitFuncs)) + for _, f := range commitFuncs { + tmpFunc := f + go func() { + commitRes <- tmpFunc() + }() + } + for i := 0; i < len(commitFuncs); i++ { + r := <-commitRes + if r != nil { + return common.Hash{}, nil, r + } + } + s.snap, s.snapDestructs, s.snapAccounts, s.snapStorage = nil, nil, nil, nil + s.diffTries, s.diffCode = nil, nil + return root, s.diffLayer, nil +} + +// Commit writes the state to the underlying in-memory trie database. +func (s *StateDB) Commit(failPostCommitFunc func(), postCommitFuncs ...func() error) (common.Hash, *types.DiffLayer, error) { + if s.dbErr != nil { + return common.Hash{}, nil, fmt.Errorf("commit aborted due to earlier error: %v", s.dbErr) + } + // Finalize any pending changes and merge everything into the tries + if s.lightProcessed { + root, diff, err := s.LightCommit() + if err != nil { + return root, diff, err + } + for _, postFunc := range postCommitFuncs { + err = postFunc() + if err != nil { + return root, diff, err + } + } + return root, diff, nil + } + var diffLayer *types.DiffLayer + var verified chan struct{} + var snapUpdated chan struct{} + if s.snap != nil { + diffLayer = &types.DiffLayer{} + } + if s.pipeCommit { + // async commit the MPT + verified = make(chan struct{}) + snapUpdated = make(chan struct{}) + } + + commmitTrie := func() error { + commitErr := func() error { + if s.stateRoot = s.StateIntermediateRoot(); s.fullProcessed && s.expectedRoot != s.stateRoot { + return fmt.Errorf("invalid merkle root (remote: %x local: %x)", s.expectedRoot, s.stateRoot) + } + tasks := make(chan func()) + taskResults := make(chan error, len(s.stateObjectsDirty)) + tasksNum := 0 + finishCh := make(chan struct{}) + + threads := gopool.Threads(len(s.stateObjectsDirty)) + wg := sync.WaitGroup{} + for i := 0; i < threads; i++ { + wg.Add(1) + go func() { + defer wg.Done() + for { + select { + case task := <-tasks: + task() + case <-finishCh: + return + } + } + }() + } + + if s.snap != nil { + for addr := range s.stateObjectsDirty { + if obj, _ := s.getStateObjectFromStateObjects(addr); !obj.deleted { + if obj.code != nil && obj.dirtyCode { + diffLayer.Codes = append(diffLayer.Codes, types.DiffCode{ + Hash: common.BytesToHash(obj.CodeHash()), + Code: obj.code, + }) + } + } + } + } + + for addr := range s.stateObjectsDirty { + if obj, _ := s.getStateObjectFromStateObjects(addr); !obj.deleted { + // Write any contract code associated with the state object + tasks <- func() { + // Write any storage changes in the state object to its storage trie + if err := obj.CommitTrie(s.db); err != nil { + taskResults <- err + } + taskResults <- nil + } + tasksNum++ + } + } + + for i := 0; i < tasksNum; i++ { + err := <-taskResults + if err != nil { + close(finishCh) + return err + } + } + close(finishCh) + + // The onleaf func is called _serially_, so we can reuse the same account + // for unmarshalling every time. + var account Account + root, err := s.trie.Commit(func(_ [][]byte, _ []byte, leaf []byte, parent common.Hash) error { + if err := rlp.DecodeBytes(leaf, &account); err != nil { + return nil + } + if account.Root != emptyRoot { + s.db.TrieDB().Reference(account.Root, parent) + } + return nil + }) + if err != nil { + return err + } + if root != emptyRoot { + s.db.CacheAccount(root, s.trie) + } + for _, postFunc := range postCommitFuncs { + err = postFunc() + if err != nil { + return err + } + } + wg.Wait() + return nil + }() + + if s.pipeCommit { + if commitErr == nil { + <-snapUpdated + s.snaps.Snapshot(s.stateRoot).MarkValid() + } else { + // The blockchain will do the further rewind if write block not finish yet + if failPostCommitFunc != nil { + <-snapUpdated + failPostCommitFunc() + } + log.Error("state verification failed", "err", commitErr) + } + close(verified) + } + return commitErr + } + + commitFuncs := []func() error{ + func() error { + codeWriter := s.db.TrieDB().DiskDB().NewBatch() + for addr := range s.stateObjectsDirty { + if obj, _ := s.getStateObjectFromStateObjects(addr); !obj.deleted { + if obj.code != nil && obj.dirtyCode { + rawdb.WriteCode(codeWriter, common.BytesToHash(obj.CodeHash()), obj.code) + obj.dirtyCode = false + if codeWriter.ValueSize() > ethdb.IdealBatchSize { + if err := codeWriter.Write(); err != nil { + return err + } + codeWriter.Reset() + } + } + } + } + if codeWriter.ValueSize() > 0 { + if err := codeWriter.Write(); err != nil { + log.Crit("Failed to commit dirty codes", "error", err) + return err + } + } + return nil + }, + func() error { + // If snapshotting is enabled, update the snapshot tree with this new version + if s.snap != nil { + if metrics.EnabledExpensive { + defer func(start time.Time) { s.SnapshotCommits += time.Since(start) }(time.Now()) + } + if s.pipeCommit { + defer close(snapUpdated) + } + // Only update if there's a state transition (skip empty Clique blocks) + if parent := s.snap.Root(); parent != s.expectedRoot { + if err := s.snaps.Update(s.expectedRoot, parent, s.snapDestructs, s.snapAccounts, s.snapStorage, verified); err != nil { + log.Warn("Failed to update snapshot tree", "from", parent, "to", s.expectedRoot, "err", err) + } + // Keep n diff layers in the memory + // - head layer is paired with HEAD state + // - head-1 layer is paired with HEAD-1 state + // - head-(n-1) layer(bottom-most diff layer) is paired with HEAD-(n-1)state + go func() { + if err := s.snaps.Cap(s.expectedRoot, s.snaps.CapLimit()); err != nil { + log.Warn("Failed to cap snapshot tree", "root", s.expectedRoot, "layers", s.snaps.CapLimit(), "err", err) + } + }() + } + } + return nil + }, + func() error { + if s.snap != nil { + diffLayer.Destructs, diffLayer.Accounts, diffLayer.Storages = s.SnapToDiffLayer() + } + return nil + }, + } + if s.pipeCommit { + go commmitTrie() + } else { + commitFuncs = append(commitFuncs, commmitTrie) + } + commitRes := make(chan error, len(commitFuncs)) + for _, f := range commitFuncs { + tmpFunc := f + go func() { + commitRes <- tmpFunc() + }() + } + for i := 0; i < len(commitFuncs); i++ { + r := <-commitRes + if r != nil { + return common.Hash{}, nil, r + } + } + root := s.stateRoot + if s.pipeCommit { + root = s.expectedRoot + } + + return root, diffLayer, nil +} + +func (s *StateDB) DiffLayerToSnap(diffLayer *types.DiffLayer) (map[common.Address]struct{}, map[common.Address][]byte, map[common.Address]map[string][]byte, error) { + snapDestructs := make(map[common.Address]struct{}) + snapAccounts := make(map[common.Address][]byte) + snapStorage := make(map[common.Address]map[string][]byte) + + for _, des := range diffLayer.Destructs { + snapDestructs[des] = struct{}{} + } + for _, account := range diffLayer.Accounts { + snapAccounts[account.Account] = account.Blob + } + for _, storage := range diffLayer.Storages { + // should never happen + if len(storage.Keys) != len(storage.Vals) { + return nil, nil, nil, errors.New("invalid diffLayer: length of keys and values mismatch") + } + snapStorage[storage.Account] = make(map[string][]byte, len(storage.Keys)) + n := len(storage.Keys) + for i := 0; i < n; i++ { + snapStorage[storage.Account][storage.Keys[i]] = storage.Vals[i] + } + } + return snapDestructs, snapAccounts, snapStorage, nil +} + +func (s *StateDB) SnapToDiffLayer() ([]common.Address, []types.DiffAccount, []types.DiffStorage) { + destructs := make([]common.Address, 0, len(s.snapDestructs)) + for account := range s.snapDestructs { + destructs = append(destructs, account) + } + accounts := make([]types.DiffAccount, 0, len(s.snapAccounts)) + for accountHash, account := range s.snapAccounts { + accounts = append(accounts, types.DiffAccount{ + Account: accountHash, + Blob: account, + }) + } + storages := make([]types.DiffStorage, 0, len(s.snapStorage)) + for accountHash, storage := range s.snapStorage { + keys := make([]string, 0, len(storage)) + values := make([][]byte, 0, len(storage)) + for k, v := range storage { + keys = append(keys, k) + values = append(values, v) + } + storages = append(storages, types.DiffStorage{ + Account: accountHash, + Keys: keys, + Vals: values, + }) + } + return destructs, accounts, storages +} + +// PrepareAccessList handles the preparatory steps for executing a state transition with +// regards to both EIP-2929 and EIP-2930: +// +// - Add sender to access list (2929) +// - Add destination to access list (2929) +// - Add precompiles to access list (2929) +// - Add the contents of the optional tx access list (2930) +// +// This method should only be called if Yolov3/Berlin/2929+2930 is applicable at the current number. +func (s *StateDB) PrepareAccessList(sender common.Address, dst *common.Address, precompiles []common.Address, list types.AccessList) { + s.AddAddressToAccessList(sender) + if dst != nil { + s.AddAddressToAccessList(*dst) + // If it's a create-tx, the destination will be added inside evm.create + } + for _, addr := range precompiles { + s.AddAddressToAccessList(addr) + } + for _, el := range list { + s.AddAddressToAccessList(el.Address) + for _, key := range el.StorageKeys { + s.AddSlotToAccessList(el.Address, key) + } + } +} + +// AddAddressToAccessList adds the given address to the access list +func (s *StateDB) AddAddressToAccessList(addr common.Address) { + if s.accessList == nil { + s.accessList = newAccessList() + } + if s.accessList.AddAddress(addr) { + s.journal.append(accessListAddAccountChange{&addr}) + } +} + +// AddSlotToAccessList adds the given (address, slot)-tuple to the access list +func (s *StateDB) AddSlotToAccessList(addr common.Address, slot common.Hash) { + if s.accessList == nil { + s.accessList = newAccessList() + } + addrMod, slotMod := s.accessList.AddSlot(addr, slot) + if addrMod { + // In practice, this should not happen, since there is no way to enter the + // scope of 'address' without having the 'address' become already added + // to the access list (via call-variant, create, etc). + // Better safe than sorry, though + s.journal.append(accessListAddAccountChange{&addr}) + } + if slotMod { + s.journal.append(accessListAddSlotChange{ + address: &addr, + slot: &slot, + }) + } +} + +// AddressInAccessList returns true if the given address is in the access list. +func (s *StateDB) AddressInAccessList(addr common.Address) bool { + if s.accessList == nil { + return false + } + return s.accessList.ContainsAddress(addr) +} + +// SlotInAccessList returns true if the given (address, slot)-tuple is in the access list. +func (s *StateDB) SlotInAccessList(addr common.Address, slot common.Hash) (addressPresent bool, slotPresent bool) { + if s.accessList == nil { + return false, false + } + return s.accessList.Contains(addr, slot) +} + +func (s *StateDB) GetDirtyAccounts() []common.Address { + accounts := make([]common.Address, 0, len(s.stateObjectsDirty)) + for account := range s.stateObjectsDirty { + accounts = append(accounts, account) + } + return accounts +} + +func (s *StateDB) GetStorage(address common.Address) *sync.Map { + return s.storagePool.getStorage(address) +} + +// PrepareForParallel prepares for state db to be used in parallel execution mode. +func (s *StateDB) PrepareForParallel() { + s.isParallel = true + s.parallel.stateObjects = &StateObjectSyncMap{} +} + +// MergeSlotDB is for Parallel execution mode, when the transaction has been +// finalized(dirty -> pending) on execution slot, the execution results should be +// merged back to the main StateDB. +// And it will return and keep the slot's change list for later conflict detect. +func (s *StateDB) MergeSlotDB(slotDb *ParallelStateDB, slotReceipt *types.Receipt, txIndex int) { + // receipt.Logs use unified log index within a block + // align slotDB's log index to the block stateDB's logSize + for _, l := range slotReceipt.Logs { + l.Index += s.logSize + } + s.logSize += slotDb.logSize + + // before merge, pay the gas fee first: AddBalance to consensus.SystemAddress + systemAddress := slotDb.parallel.systemAddress + if slotDb.parallel.keepSystemAddressBalance { + s.SetBalance(systemAddress, slotDb.GetBalance(systemAddress)) + } else { + s.AddBalance(systemAddress, slotDb.GetBalance(systemAddress)) + } + + // only merge dirty objects + addressesToPrefetch := make([][]byte, 0, len(slotDb.stateObjectsDirty)) + for addr := range slotDb.stateObjectsDirty { + if _, exist := s.stateObjectsDirty[addr]; !exist { + s.stateObjectsDirty[addr] = struct{}{} + } + // system address is EOA account, it should have no storage change + if addr == systemAddress { + continue + } + + // stateObjects: KV, balance, nonce... + dirtyObj, ok := slotDb.parallel.dirtiedStateObjectsInSlot[addr] + if !ok { + log.Error("parallel merge, but dirty object not exist!", "SlotIndex", slotDb.parallel.SlotIndex, "txIndex:", slotDb.txIndex, "addr", addr) + continue + } + mainObj, exist := s.loadStateObj(addr) + if !exist { // fixme: it is also state change + // addr not exist on main DB, do ownership transfer + // dirtyObj.db = s + // dirtyObj.finalise(true) // true: prefetch on dispatcher + mainObj = dirtyObj.deepCopy(s) + mainObj.finalise(true) + s.storeStateObj(addr, mainObj) + // fixme: should not delete, would cause unconfirmed DB incorrect? + // delete(slotDb.parallel.dirtiedStateObjectsInSlot, addr) // transfer ownership, fixme: shared read? + if dirtyObj.deleted { + // remove the addr from snapAccounts&snapStorage only when object is deleted. + // "deleted" is not equal to "snapDestructs", since createObject() will add an addr for + // snapDestructs to destroy previous object, while it will keep the addr in snapAccounts & snapAccounts + delete(s.snapAccounts, addr) + delete(s.snapStorage, addr) + } + } else { + // addr already in main DB, do merge: balance, KV, code, State(create, suicide) + // can not do copy or ownership transfer directly, since dirtyObj could have outdated + // data(may be updated within the conflict window) + + var newMainObj = mainObj // we don't need to copy the object since the storages are thread safe + if _, ok := slotDb.parallel.addrStateChangesInSlot[addr]; ok { + // there are 3 kinds of state change: + // 1.Suicide + // 2.Empty Delete + // 3.createObject + // a.AddBalance,SetState to an unexist or deleted(suicide, empty delete) address. + // b.CreateAccount: like DAO the fork, regenerate a account carry its balance without KV + // For these state change, do ownership transafer for efficiency: + // dirtyObj.db = s + // newMainObj = dirtyObj + newMainObj = dirtyObj.deepCopy(s) + // should not delete, would cause unconfirmed DB incorrect. + // delete(slotDb.parallel.dirtiedStateObjectsInSlot, addr) // transfer ownership, fixme: shared read? + if dirtyObj.deleted { + // remove the addr from snapAccounts&snapStorage only when object is deleted. + // "deleted" is not equal to "snapDestructs", since createObject() will add an addr for + // snapDestructs to destroy previous object, while it will keep the addr in snapAccounts & snapAccounts + delete(s.snapAccounts, addr) + delete(s.snapStorage, addr) + } + } else { + // deepCopy a temporary *StateObject for safety, since slot could read the address, + // dispatch should avoid overwrite the StateObject directly otherwise, it could + // crash for: concurrent map iteration and map write + + if _, balanced := slotDb.parallel.balanceChangesInSlot[addr]; balanced { + newMainObj.SetBalance(dirtyObj.Balance()) + } + if _, coded := slotDb.parallel.codeChangesInSlot[addr]; coded { + newMainObj.code = dirtyObj.code + newMainObj.data.CodeHash = dirtyObj.data.CodeHash + newMainObj.dirtyCode = true + } + if keys, stated := slotDb.parallel.kvChangesInSlot[addr]; stated { + newMainObj.MergeSlotObject(s.db, dirtyObj, keys) + } + if _, nonced := slotDb.parallel.nonceChangesInSlot[addr]; nonced { + // dirtyObj.Nonce() should not be less than newMainObj + newMainObj.setNonce(dirtyObj.Nonce()) + } + } + newMainObj.finalise(true) // true: prefetch on dispatcher + // update the object + s.storeStateObj(addr, newMainObj) + } + addressesToPrefetch = append(addressesToPrefetch, common.CopyBytes(addr[:])) // Copy needed for closure + } + + if s.prefetcher != nil && len(addressesToPrefetch) > 0 { + s.prefetcher.prefetch(s.originalRoot, addressesToPrefetch, emptyAddr) // prefetch for trie node of account + } + + for addr := range slotDb.stateObjectsPending { + if _, exist := s.stateObjectsPending[addr]; !exist { + s.stateObjectsPending[addr] = struct{}{} + } + } + + // slotDb.logs: logs will be kept in receipts, no need to do merge + + for hash, preimage := range slotDb.preimages { + s.preimages[hash] = preimage + } + if s.accessList != nil { + // fixme: accessList is not enabled yet, but it should use merge rather than overwrite Copy + s.accessList = slotDb.accessList.Copy() + } + + if slotDb.snaps != nil { + for k := range slotDb.snapDestructs { + // There could be a race condition for parallel transaction execution + // One transaction add balance 0 to an empty address, will delete it(delete empty is enabled). + // While another concurrent transaction could add a none-zero balance to it, make it not empty + // We fixed it by add a addr state read record for add balance 0 + s.snapParallelLock.Lock() + s.snapDestructs[k] = struct{}{} + s.snapParallelLock.Unlock() + } + + // slotDb.snapAccounts should be empty, comment out and to be deleted later + // for k, v := range slotDb.snapAccounts { + // s.snapAccounts[k] = v + // } + // slotDb.snapStorage should be empty, comment out and to be deleted later + // for k, v := range slotDb.snapStorage { + // temp := make(map[string][]byte) + // for kk, vv := range v { + // temp[kk] = vv + // } + // s.snapStorage[k] = temp + // } + } +} + +type ParallelStateDB struct { + StateDB +} + +// NewSlotDB creates a new State DB based on the provided StateDB. +// With parallel, each execution slot would have its own StateDB. +func NewSlotDB(db *StateDB, systemAddr common.Address, txIndex int, baseTxIndex int, keepSystem bool, + unconfirmedDBs *sync.Map /*map[int]*ParallelStateDB*/) *ParallelStateDB { + slotDB := db.CopyForSlot() + slotDB.txIndex = txIndex + slotDB.originalRoot = db.originalRoot + slotDB.parallel.baseStateDB = db + slotDB.parallel.baseTxIndex = baseTxIndex + slotDB.parallel.systemAddress = systemAddr + slotDB.parallel.systemAddressOpsCount = 0 + slotDB.parallel.keepSystemAddressBalance = keepSystem + slotDB.storagePool = NewStoragePool() + slotDB.EnableWriteOnSharedStorage() + for index := baseTxIndex + 1; index < slotDB.txIndex; index++ { // txIndex + unconfirmedDB, ok := unconfirmedDBs.Load(index) + if ok { + slotDB.parallel.unconfirmedDBInShot[index] = unconfirmedDB.(*ParallelStateDB) + } + } + + // All transactions will pay gas fee to the systemAddr at the end, this address is + // deemed to conflict, we handle it specially, clear it now and set it back to the main + // StateDB later; + // But there are transactions that will try to read systemAddr's balance, such as: + // https://bscscan.com/tx/0xcd69755be1d2f55af259441ff5ee2f312830b8539899e82488a21e85bc121a2a. + // It will trigger transaction redo and keepSystem will be marked as true. + if !keepSystem { + slotDB.SetBalance(systemAddr, big.NewInt(0)) + } + + return slotDB +} + +// RevertSlotDB keep the Read list for conflict detect, +// discard all state changes except: +// - nonce and balance of from address +// - balance of system address: will be used on merge to update SystemAddress's balance +func (s *ParallelStateDB) RevertSlotDB(from common.Address) { + s.parallel.kvChangesInSlot = make(map[common.Address]StateKeys) + + // balance := s.parallel.balanceChangesInSlot[from] + s.parallel.nonceChangesInSlot = make(map[common.Address]struct{}) + s.parallel.balanceChangesInSlot = make(map[common.Address]struct{}, 1) + s.parallel.addrStateChangesInSlot = make(map[common.Address]bool) // 0: created, 1: deleted + + selfStateObject := s.parallel.dirtiedStateObjectsInSlot[from] + systemAddress := s.parallel.systemAddress + systemStateObject := s.parallel.dirtiedStateObjectsInSlot[systemAddress] + s.parallel.dirtiedStateObjectsInSlot = make(map[common.Address]*StateObject, 2) + // keep these elements + s.parallel.dirtiedStateObjectsInSlot[from] = selfStateObject + s.parallel.dirtiedStateObjectsInSlot[systemAddress] = systemStateObject + s.parallel.balanceChangesInSlot[from] = struct{}{} + s.parallel.balanceChangesInSlot[systemAddress] = struct{}{} + s.parallel.nonceChangesInSlot[from] = struct{}{} +} + +func (s *ParallelStateDB) getBaseStateDB() *StateDB { + return &s.StateDB +} + +func (s *ParallelStateDB) SetSlotIndex(index int) { + s.parallel.SlotIndex = index +} + +// for parallel execution mode, try to get dirty StateObject in slot first. +// it is mainly used by journal revert right now. +func (s *ParallelStateDB) getStateObject(addr common.Address) *StateObject { + if obj, ok := s.parallel.dirtiedStateObjectsInSlot[addr]; ok { + return obj + } + // can not call s.StateDB.getStateObject(), since `newObject` need ParallelStateDB as the interface + return s.getStateObjectNoSlot(addr) +} + +func (s *ParallelStateDB) storeStateObj(addr common.Address, stateObject *StateObject) { + // When a state object is stored into s.parallel.stateObjects, + // it belongs to base StateDB, it is confirmed and valid. + stateObject.db = s.parallel.baseStateDB + stateObject.dbItf = s.parallel.baseStateDB + // the object could be create in SlotDB, if it got the object from DB and + // update it to the shared `s.parallel.stateObjects`` + stateObject.db.storeParallelLock.Lock() + if _, ok := s.parallel.stateObjects.Load(addr); !ok { + s.parallel.stateObjects.Store(addr, stateObject) + } + stateObject.db.storeParallelLock.Unlock() +} + +func (s *ParallelStateDB) getStateObjectNoSlot(addr common.Address) *StateObject { + if obj := s.getDeletedStateObject(addr); obj != nil && !obj.deleted { + return obj + } + return nil +} + +// createObject creates a new state object. If there is an existing account with +// the given address, it is overwritten and returned as the second return value. + +// prev is used for CreateAccount to get its balance +// Parallel mode: +// if prev in dirty: revert is ok +// if prev in unconfirmed DB: addr state read record, revert should not put it back +// if prev in main DB: addr state read record, revert should not put it back +// if pre no exist: addr state read record, + +// `prev` is used to handle revert, to recover with the `prev` object +// In Parallel mode, we only need to recover to `prev` in SlotDB, +// a.if it is not in SlotDB, `revert` will remove it from the SlotDB +// b.if it is exist in SlotDB, `revert` will recover to the `prev` in SlotDB +// c.as `snapDestructs` it is the same +func (s *ParallelStateDB) createObject(addr common.Address) (newobj *StateObject) { + log.Debug("ParallelStateDB createObject", "SlotxIndex", s.parallel.SlotIndex, "txIndex", s.TxIndex()) + + // do not get from unconfirmed DB, since it will has problem on revert + prev := s.parallel.dirtiedStateObjectsInSlot[addr] + + var prevdestruct bool + + if s.snap != nil && prev != nil { + _, prevdestruct = s.snapDestructs[prev.address] // fixme, record the snapshot read for create Account + s.parallel.addrSnapDestructsReadsInSlot[addr] = prevdestruct + if !prevdestruct { + // To destroy the previous trie node first and update the trie tree + // with the new object on block commit. + s.snapDestructs[prev.address] = struct{}{} + } + } + newobj = newObject(s, s.isParallel, addr, Account{}) + newobj.setNonce(0) // sets the object to dirty + if prev == nil { + s.journal.append(createObjectChange{account: &addr}) + } else { + s.journal.append(resetObjectChange{prev: prev, prevdestruct: prevdestruct}) + } + + // s.parallel.dirtiedStateObjectsInSlot[addr] = newobj // would change the bahavior of AddBalance... + s.parallel.addrStateChangesInSlot[addr] = true // the object sis created + s.parallel.nonceChangesInSlot[addr] = struct{}{} + s.parallel.balanceChangesInSlot[addr] = struct{}{} + s.parallel.codeChangesInSlot[addr] = struct{}{} + // notice: all the KVs are cleared if any + s.parallel.kvChangesInSlot[addr] = make(StateKeys) + return newobj +} + +// getDeletedStateObject is similar to getStateObject, but instead of returning +// nil for a deleted state object, it returns the actual object with the deleted +// flag set. This is needed by the state journal to revert to the correct s- +// destructed object instead of wiping all knowledge about the state object. +func (s *ParallelStateDB) getDeletedStateObject(addr common.Address) *StateObject { + // Prefer live objects if any is available + if obj, _ := s.getStateObjectFromStateObjects(addr); obj != nil { + return obj + } + data, ok := s.getStateObjectFromSnapshotOrTrie(addr) + if !ok { + return nil + } + // Insert into the live set + // if obj, ok := s.loadStateObj(addr); ok { + // fixme: concurrent not safe, merge could update it... + // return obj + // } + // this is why we have to use a seperate getDeletedStateObject for ParallelStateDB + // `s` has to be the ParallelStateDB + obj := newObject(s, s.isParallel, addr, *data) + s.storeStateObj(addr, obj) + // s.SetStateObject(obj) + return obj +} + +// GetOrNewStateObject retrieves a state object or create a new state object if nil. +// dirtyInSlot -> Unconfirmed DB -> main DB -> snapshot, no? create one +func (s *ParallelStateDB) GetOrNewStateObject(addr common.Address) *StateObject { + log.Debug("ParallelStateDB GetOrNewStateObject", "SlotxIndex", s.parallel.SlotIndex, "txIndex", s.TxIndex()) + + var stateObject *StateObject = nil + exist := true + if stateObject, ok := s.parallel.dirtiedStateObjectsInSlot[addr]; ok { + return stateObject + } + stateObject, _ = s.getStateObjectFromUnconfirmedDB(addr) + + if stateObject == nil { + stateObject = s.getStateObjectNoSlot(addr) // try to get from base db + } + if stateObject == nil || stateObject.deleted || stateObject.suicided { + stateObject = s.createObject(addr) + exist = false + } + + s.parallel.addrStateReadsInSlot[addr] = exist // true: exist, false: not exist + return stateObject +} + +// Exist reports whether the given account address exists in the state. +// Notably this also returns true for suicided accounts. +func (s *ParallelStateDB) Exist(addr common.Address) bool { + log.Debug("ParallelStateDB Exist", "SlotxIndex", s.parallel.SlotIndex, "txIndex", s.TxIndex()) + // 1.Try to get from dirty + if obj, ok := s.parallel.dirtiedStateObjectsInSlot[addr]; ok { + // dirty object should not be deleted, since deleted is only flagged on finalise + // and if it is suicided in contract call, suicide is taken as exist until it is finalised + // todo: add a check here, to be removed later + if obj.deleted || obj.suicided { + log.Error("Exist in dirty, but marked as deleted or suicided", + "txIndex", s.txIndex, "baseTxIndex:", s.parallel.baseTxIndex) + } + return true + } + // 2.Try to get from uncomfirmed & main DB + // 2.1 Already read before + if exist, ok := s.parallel.addrStateReadsInSlot[addr]; ok { + return exist + } + // 2.2 Try to get from unconfirmed DB if exist + if exist, ok := s.getAddrStateFromUnconfirmedDB(addr); ok { + s.parallel.addrStateReadsInSlot[addr] = exist // update and cache + return exist + } + + // 3.Try to get from main StateDB + exist := s.getStateObjectNoSlot(addr) != nil + s.parallel.addrStateReadsInSlot[addr] = exist // update and cache + return exist +} + +// Empty returns whether the state object is either non-existent +// or empty according to the EIP161 specification (balance = nonce = code = 0) +func (s *ParallelStateDB) Empty(addr common.Address) bool { + log.Debug("ParallelStateDB Empty", "SlotxIndex", s.parallel.SlotIndex, "txIndex", s.TxIndex()) + // 1.Try to get from dirty + if obj, ok := s.parallel.dirtiedStateObjectsInSlot[addr]; ok { + // dirty object is light copied and fixup on need, + // empty could be wrong, except it is created with this TX + if _, ok := s.parallel.addrStateChangesInSlot[addr]; ok { + return obj.empty() + } + // so we have to check it manually + // empty means: Nonce == 0 && Balance == 0 && CodeHash == emptyCodeHash + if s.GetBalance(addr).Sign() != 0 { // check balance first, since it is most likely not zero + return false + } + if s.GetNonce(addr) != 0 { + return false + } + codeHash := s.GetCodeHash(addr) + return bytes.Equal(codeHash.Bytes(), emptyCodeHash) // code is empty, the object is empty + } + // 2.Try to get from uncomfirmed & main DB + // 2.1 Already read before + if exist, ok := s.parallel.addrStateReadsInSlot[addr]; ok { + // exist means not empty + return !exist + } + // 2.2 Try to get from unconfirmed DB if exist + if exist, ok := s.getAddrStateFromUnconfirmedDB(addr); ok { + s.parallel.addrStateReadsInSlot[addr] = exist // update and cache + return !exist + } + + so := s.getStateObjectNoSlot(addr) + empty := (so == nil || so.empty()) + s.parallel.addrStateReadsInSlot[addr] = !empty // update and cache + return empty +} + +// GetBalance retrieves the balance from the given address or 0 if object not found +// GetFrom the dirty list => from unconfirmed DB => get from main stateDB +func (s *ParallelStateDB) GetBalance(addr common.Address) *big.Int { + log.Debug("ParallelStateDB GetBalance", "SlotxIndex", s.parallel.SlotIndex, "txIndex", s.TxIndex()) + if addr == s.parallel.systemAddress { + s.parallel.systemAddressOpsCount++ + } + // 1.Try to get from dirty + if _, ok := s.parallel.balanceChangesInSlot[addr]; ok { + if obj, ok := s.parallel.dirtiedStateObjectsInSlot[addr]; ok { + // on balance fixup, addr may not exist in dirtiedStateObjectsInSlot + // we intend to fixup balance based on unconfirmed DB or main DB + return obj.Balance() + } + } + // 2.Try to get from uncomfirmed DB or main DB + // 2.1 Already read before + if balance, ok := s.parallel.balanceReadsInSlot[addr]; ok { + return balance + } + // 2.2 Try to get from unconfirmed DB if exist + if balance := s.getBalanceFromUnconfirmedDB(addr); balance != nil { + s.parallel.balanceReadsInSlot[addr] = balance + return balance + } + + // 3. Try to get from main StateObejct + balance := common.Big0 + stateObject := s.getStateObjectNoSlot(addr) + if stateObject != nil { + balance = stateObject.Balance() + } + s.parallel.balanceReadsInSlot[addr] = balance + return balance +} + +func (s *ParallelStateDB) GetNonce(addr common.Address) uint64 { + log.Debug("ParallelStateDB GetNonce", "SlotxIndex", s.parallel.SlotIndex, "txIndex", s.TxIndex()) + // 1.Try to get from dirty + if _, ok := s.parallel.nonceChangesInSlot[addr]; ok { + if obj, ok := s.parallel.dirtiedStateObjectsInSlot[addr]; ok { + // on nonce fixup, addr may not exist in dirtiedStateObjectsInSlot + // we intend to fixup nonce based on unconfirmed DB or main DB + return obj.Nonce() + } + } + // 2.Try to get from uncomfirmed DB or main DB + // 2.1 Already read before + if nonce, ok := s.parallel.nonceReadsInSlot[addr]; ok { + return nonce + } + // 2.2 Try to get from unconfirmed DB if exist + if nonce, ok := s.getNonceFromUnconfirmedDB(addr); ok { + s.parallel.nonceReadsInSlot[addr] = nonce + return nonce + } + + // 3.Try to get from main StateDB + var nonce uint64 = 0 + stateObject := s.getStateObjectNoSlot(addr) + if stateObject != nil { + nonce = stateObject.Nonce() + } + s.parallel.nonceReadsInSlot[addr] = nonce + + return nonce +} + +func (s *ParallelStateDB) GetCode(addr common.Address) []byte { + log.Debug("ParallelStateDB GetCode", "SlotxIndex", s.parallel.SlotIndex, "txIndex", s.TxIndex()) + // 1.Try to get from dirty + if _, ok := s.parallel.codeChangesInSlot[addr]; ok { + if obj, ok := s.parallel.dirtiedStateObjectsInSlot[addr]; ok { + // on code fixup, addr may not exist in dirtiedStateObjectsInSlot + // we intend to fixup code based on unconfirmed DB or main DB + code := obj.Code(s.db) + return code + } + } + // 2.Try to get from uncomfirmed DB or main DB + // 2.1 Already read before + if code, ok := s.parallel.codeReadsInSlot[addr]; ok { + return code + } + // 2.2 Try to get from unconfirmed DB if exist + if code, ok := s.getCodeFromUnconfirmedDB(addr); ok { + s.parallel.codeReadsInSlot[addr] = code + return code + } + + // 3. Try to get from main StateObejct + stateObject := s.getStateObjectNoSlot(addr) + var code []byte + if stateObject != nil { + code = stateObject.Code(s.db) + } + s.parallel.codeReadsInSlot[addr] = code + return code +} + +func (s *ParallelStateDB) GetCodeSize(addr common.Address) int { + log.Debug("ParallelStateDB GetCodeSize", "SlotxIndex", s.parallel.SlotIndex, "txIndex", s.TxIndex()) + + // 1.Try to get from dirty + if _, ok := s.parallel.codeChangesInSlot[addr]; ok { + if obj, ok := s.parallel.dirtiedStateObjectsInSlot[addr]; ok { + // on code fixup, addr may not exist in dirtiedStateObjectsInSlot + // we intend to fixup code based on unconfirmed DB or main DB + return obj.CodeSize(s.db) + } + } + // 2.Try to get from uncomfirmed DB or main DB + // 2.1 Already read before + if code, ok := s.parallel.codeReadsInSlot[addr]; ok { + return len(code) // len(nil) is 0 too + } + // 2.2 Try to get from unconfirmed DB if exist + if code, ok := s.getCodeFromUnconfirmedDB(addr); ok { + s.parallel.codeReadsInSlot[addr] = code + return len(code) // len(nil) is 0 too + } + + // 3. Try to get from main StateObejct + var codeSize int = 0 + var code []byte + stateObject := s.getStateObjectNoSlot(addr) + + if stateObject != nil { + code = stateObject.Code(s.db) + codeSize = stateObject.CodeSize(s.db) + } + s.parallel.codeReadsInSlot[addr] = code + return codeSize +} + +// return value of GetCodeHash: +// - common.Hash{}: the address does not exist +// - emptyCodeHash: the address exist, but code is empty +// - others: the address exist, and code is not empty +func (s *ParallelStateDB) GetCodeHash(addr common.Address) common.Hash { + log.Debug("ParallelStateDB GetCodeHash", "SlotxIndex", s.parallel.SlotIndex, "txIndex", s.TxIndex()) + + // 1.Try to get from dirty + if _, ok := s.parallel.codeChangesInSlot[addr]; ok { + if obj, ok := s.parallel.dirtiedStateObjectsInSlot[addr]; ok { + // on code fixup, addr may not exist in dirtiedStateObjectsInSlot + // we intend to fixup balance based on unconfirmed DB or main DB + return common.BytesToHash(obj.CodeHash()) + } + } + // 2.Try to get from uncomfirmed DB or main DB + // 2.1 Already read before + if codeHash, ok := s.parallel.codeHashReadsInSlot[addr]; ok { + return codeHash + } + // 2.2 Try to get from unconfirmed DB if exist + if codeHash, ok := s.getCodeHashFromUnconfirmedDB(addr); ok { + s.parallel.codeHashReadsInSlot[addr] = codeHash + return codeHash + } + // 3. Try to get from main StateObejct + stateObject := s.getStateObjectNoSlot(addr) + codeHash := common.Hash{} + if stateObject != nil { + codeHash = common.BytesToHash(stateObject.CodeHash()) + } + s.parallel.codeHashReadsInSlot[addr] = codeHash + return codeHash +} + +// GetState retrieves a value from the given account's storage trie. +// For parallel mode wih, get from the state in order: +// -> self dirty, both Slot & MainProcessor +// -> pending of self: Slot on merge +// -> pending of unconfirmed DB +// -> pending of main StateDB +// -> origin +func (s *ParallelStateDB) GetState(addr common.Address, hash common.Hash) common.Hash { + log.Debug("ParallelStateDB GetState", "SlotxIndex", s.parallel.SlotIndex, "txIndex", s.TxIndex()) + + // 1.Try to get from dirty + if exist, ok := s.parallel.addrStateChangesInSlot[addr]; ok { + if !exist { + return common.Hash{} + } + obj := s.parallel.dirtiedStateObjectsInSlot[addr] // addr must exist in dirtiedStateObjectsInSlot + return obj.GetState(s.db, hash) + } + if keys, ok := s.parallel.kvChangesInSlot[addr]; ok { + if _, ok := keys[hash]; ok { + obj := s.parallel.dirtiedStateObjectsInSlot[addr] // addr must exist in dirtiedStateObjectsInSlot + return obj.GetState(s.db, hash) + } + } + // 2.Try to get from uncomfirmed DB or main DB + // 2.1 Already read before + if storage, ok := s.parallel.kvReadsInSlot[addr]; ok { + if val, ok := storage.GetValue(hash); ok { + return val + } + } + // 2.2 Try to get from unconfirmed DB if exist + if val, ok := s.getKVFromUnconfirmedDB(addr, hash); ok { + if s.parallel.kvReadsInSlot[addr] == nil { + s.parallel.kvReadsInSlot[addr] = newStorage(false) + } + s.parallel.kvReadsInSlot[addr].StoreValue(hash, val) // update cache + return val + } + + // 3.Get from main StateDB + stateObject := s.getStateObjectNoSlot(addr) + val := common.Hash{} + if stateObject != nil { + val = stateObject.GetState(s.db, hash) + } + if s.parallel.kvReadsInSlot[addr] == nil { + s.parallel.kvReadsInSlot[addr] = newStorage(false) + } + s.parallel.kvReadsInSlot[addr].StoreValue(hash, val) // update cache + return val +} + +// GetCommittedState retrieves a value from the given account's committed storage trie. +func (s *ParallelStateDB) GetCommittedState(addr common.Address, hash common.Hash) common.Hash { + log.Debug("ParallelStateDB GetCommittedState", "SlotxIndex", s.parallel.SlotIndex, "txIndex", s.TxIndex()) + // 1.No need to get from pending of itself even on merge, since stateobject in SlotDB won't do finalise + // 2.Try to get from uncomfirmed DB or main DB + // KVs in unconfirmed DB can be seen as pending storage + // KVs in main DB are merged from SlotDB and has done finalise() on merge, can be seen as pending storage too. + // 2.1 Already read before + if storage, ok := s.parallel.kvReadsInSlot[addr]; ok { + if val, ok := storage.GetValue(hash); ok { + return val + } + } + // 2.2 Try to get from unconfirmed DB if exist + if val, ok := s.getKVFromUnconfirmedDB(addr, hash); ok { + if s.parallel.kvReadsInSlot[addr] == nil { + s.parallel.kvReadsInSlot[addr] = newStorage(false) + } + s.parallel.kvReadsInSlot[addr].StoreValue(hash, val) // update cache + return val + } + + // 3. Try to get from main DB + stateObject := s.getStateObjectNoSlot(addr) + val := common.Hash{} + if stateObject != nil { + val = stateObject.GetCommittedState(s.db, hash) + } + if s.parallel.kvReadsInSlot[addr] == nil { + s.parallel.kvReadsInSlot[addr] = newStorage(false) + } + s.parallel.kvReadsInSlot[addr].StoreValue(hash, val) // update cache + return val +} + +func (s *ParallelStateDB) HasSuicided(addr common.Address) bool { + log.Debug("ParallelStateDB HasSuicided", "SlotxIndex", s.parallel.SlotIndex, "txIndex", s.TxIndex()) + // 1.Try to get from dirty + if obj, ok := s.parallel.dirtiedStateObjectsInSlot[addr]; ok { + return obj.suicided + } + // 2.Try to get from uncomfirmed + if exist, ok := s.getAddrStateFromUnconfirmedDB(addr); ok { + return !exist + } + + stateObject := s.getStateObjectNoSlot(addr) + if stateObject != nil { + return stateObject.suicided + } + return false +} + +// AddBalance adds amount to the account associated with addr. +func (s *ParallelStateDB) AddBalance(addr common.Address, amount *big.Int) { + // add balance will perform a read operation first + // s.parallel.balanceReadsInSlot[addr] = struct{}{} // fixme: to make the the balance valid, since unconfirmed would refer it. + // if amount.Sign() == 0 { + // if amount == 0, no balance change, but there is still an empty check. + // take this empty check as addr state read(create, suicide, empty delete) + // s.parallel.addrStateReadsInSlot[addr] = struct{}{} + // } + log.Debug("ParallelStateDB AddBalance", "SlotxIndex", s.parallel.SlotIndex, "txIndex", s.TxIndex()) + + stateObject := s.GetOrNewStateObject(addr) + if stateObject != nil { + if addr == s.parallel.systemAddress { + s.parallel.systemAddressOpsCount++ + } + // if amount.Sign() != 0 { // todo: to reenable it + if _, ok := s.parallel.dirtiedStateObjectsInSlot[addr]; !ok { + newStateObject := stateObject.lightCopy(s) // light copy from main DB + // do balance fixup from the confirmed DB, it could be more reliable than main DB + balance := s.GetBalance(addr) + newStateObject.setBalance(balance) + // s.parallel.balanceReadsInSlot[addr] = newStateObject.Balance() // could read from main DB or unconfirmed DB + newStateObject.AddBalance(amount) + s.parallel.dirtiedStateObjectsInSlot[addr] = newStateObject + s.parallel.balanceChangesInSlot[addr] = struct{}{} + return + } + // already dirty, make sure the balance if fixed up + // if stateObject.Balance() + if addr != s.parallel.systemAddress { + if stateObject.Balance().Cmp(s.GetBalance(addr)) != 0 { + log.Warn("AddBalance in dirty, but balance is incorrect", "txIndex", s.txIndex, "addr", addr, + "stateObject.Balance()", stateObject.Balance(), "s.GetBalance(addr)", s.GetBalance(addr)) + stateObject.setBalance(s.GetBalance(addr)) + } + } + + stateObject.AddBalance(amount) + s.parallel.balanceChangesInSlot[addr] = struct{}{} + } +} + +// SubBalance subtracts amount from the account associated with addr. +func (s *ParallelStateDB) SubBalance(addr common.Address, amount *big.Int) { + // if amount.Sign() != 0 { + // unlike add, sub 0 balance will not touch empty object + // s.parallel.balanceReadsInSlot[addr] = struct{}{} + // } + log.Debug("ParallelStateDB SubBalance", "SlotxIndex", s.parallel.SlotIndex, "txIndex", s.TxIndex()) + + stateObject := s.GetOrNewStateObject(addr) + if stateObject != nil { + if addr == s.parallel.systemAddress { + s.parallel.systemAddressOpsCount++ + } + + // if amount.Sign() != 0 { // todo: to reenable it + if _, ok := s.parallel.dirtiedStateObjectsInSlot[addr]; !ok { + newStateObject := stateObject.lightCopy(s) // light copy from main DB + // do balance fixup from the confirmed DB, it could be more reliable than main DB + balance := s.GetBalance(addr) + newStateObject.setBalance(balance) + // s.parallel.balanceReadsInSlot[addr] = newStateObject.Balance() + newStateObject.SubBalance(amount) + s.parallel.balanceChangesInSlot[addr] = struct{}{} + s.parallel.dirtiedStateObjectsInSlot[addr] = newStateObject + return + } + // already dirty, make sure the balance if fixed + // if stateObject.Balance() + if addr != s.parallel.systemAddress { + if stateObject.Balance().Cmp(s.GetBalance(addr)) != 0 { + log.Warn("SubBalance in dirty, but balance is incorrect", "txIndex", s.txIndex, "addr", addr, + "stateObject.Balance()", stateObject.Balance(), "s.GetBalance(addr)", s.GetBalance(addr)) + stateObject.setBalance(s.GetBalance(addr)) + } + } + + stateObject.SubBalance(amount) + s.parallel.balanceChangesInSlot[addr] = struct{}{} + } +} + +func (s *ParallelStateDB) SetBalance(addr common.Address, amount *big.Int) { + log.Debug("ParallelStateDB SetBalance", "SlotxIndex", s.parallel.SlotIndex, "txIndex", s.TxIndex()) + + stateObject := s.GetOrNewStateObject(addr) + if stateObject != nil { + if addr == s.parallel.systemAddress { + s.parallel.systemAddressOpsCount++ + } + if _, ok := s.parallel.dirtiedStateObjectsInSlot[addr]; !ok { + newStateObject := stateObject.lightCopy(s) + // update balance for revert, in case child contract is revertted, + // it should revert to the previous balance + balance := s.GetBalance(addr) + newStateObject.setBalance(balance) + newStateObject.SetBalance(amount) + s.parallel.balanceChangesInSlot[addr] = struct{}{} + s.parallel.dirtiedStateObjectsInSlot[addr] = newStateObject + return + } + + stateObject.SetBalance(amount) + s.parallel.balanceChangesInSlot[addr] = struct{}{} + } +} + +func (s *ParallelStateDB) SetNonce(addr common.Address, nonce uint64) { + log.Debug("ParallelStateDB SetNonce", "SlotxIndex", s.parallel.SlotIndex, "txIndex", s.TxIndex()) + + stateObject := s.GetOrNewStateObject(addr) + if stateObject != nil { + if _, ok := s.parallel.dirtiedStateObjectsInSlot[addr]; !ok { + newStateObject := stateObject.lightCopy(s) + noncePre := s.GetNonce(addr) + newStateObject.setNonce(noncePre) // nonce fixup + newStateObject.SetNonce(nonce) + s.parallel.nonceChangesInSlot[addr] = struct{}{} + s.parallel.dirtiedStateObjectsInSlot[addr] = newStateObject + return + } + noncePre := s.GetNonce(addr) + stateObject.setNonce(noncePre) // nonce fixup + + stateObject.SetNonce(nonce) + s.parallel.nonceChangesInSlot[addr] = struct{}{} + } +} + +func (s *ParallelStateDB) SetCode(addr common.Address, code []byte) { + log.Debug("ParallelStateDB SetCode", "SlotxIndex", s.parallel.SlotIndex, "txIndex", s.TxIndex()) + + stateObject := s.GetOrNewStateObject(addr) + if stateObject != nil { + codeHash := crypto.Keccak256Hash(code) + if _, ok := s.parallel.dirtiedStateObjectsInSlot[addr]; !ok { + newStateObject := stateObject.lightCopy(s) + codePre := s.GetCode(addr) // code fixup + codeHashPre := crypto.Keccak256Hash(codePre) + newStateObject.setCode(codeHashPre, codePre) + + newStateObject.SetCode(codeHash, code) + s.parallel.dirtiedStateObjectsInSlot[addr] = newStateObject + s.parallel.codeChangesInSlot[addr] = struct{}{} + return + } + codePre := s.GetCode(addr) // code fixup + codeHashPre := crypto.Keccak256Hash(codePre) + stateObject.setCode(codeHashPre, codePre) + + stateObject.SetCode(codeHash, code) + s.parallel.codeChangesInSlot[addr] = struct{}{} + } +} + +func (s *ParallelStateDB) SetState(addr common.Address, key, value common.Hash) { + log.Debug("ParallelStateDB SetState", "SlotxIndex", s.parallel.SlotIndex, "txIndex", s.TxIndex()) + + stateObject := s.GetOrNewStateObject(addr) // attention: if StateObject's lightCopy, its storage is only a part of the full storage, + if stateObject != nil { + if s.parallel.baseTxIndex+1 == s.txIndex { + // we check if state is unchanged + // only when current transaction is the next transaction to be committed + // fixme: there is a bug, block: 14,962,284, + // stateObject is in dirty (light copy), but the key is in mainStateDB + // stateObject dirty -> committed, will skip mainStateDB dirty + if s.GetState(addr, key) == value { + log.Debug("Skip set same state", "baseTxIndex", s.parallel.baseTxIndex, + "txIndex", s.txIndex, "addr", addr, + "key", key, "value", value) + return + } + } + + if s.parallel.kvChangesInSlot[addr] == nil { + s.parallel.kvChangesInSlot[addr] = make(StateKeys) // make(Storage, defaultNumOfSlots) + } + + if _, ok := s.parallel.dirtiedStateObjectsInSlot[addr]; !ok { + newStateObject := stateObject.lightCopy(s) + newStateObject.SetState(s.db, key, value) + s.parallel.dirtiedStateObjectsInSlot[addr] = newStateObject + return + } + // do State Update + stateObject.SetState(s.db, key, value) + } +} + +// Suicide marks the given account as suicided. +// This clears the account balance. +// +// The account's state object is still available until the state is committed, +// getStateObject will return a non-nil account after Suicide. +func (s *ParallelStateDB) Suicide(addr common.Address) bool { + log.Debug("ParallelStateDB Suicide", "SlotxIndex", s.parallel.SlotIndex, "txIndex", s.TxIndex()) + + var stateObject *StateObject + // 1.Try to get from dirty, it could be suicided inside of contract call + stateObject = s.parallel.dirtiedStateObjectsInSlot[addr] + if stateObject == nil { + // 2.Try to get from uncomfirmed, if deleted return false, since the address does not exist + if obj, ok := s.getStateObjectFromUnconfirmedDB(addr); ok { + stateObject = obj + s.parallel.addrStateReadsInSlot[addr] = !stateObject.deleted // true: exist, false: deleted + if stateObject.deleted { + log.Error("Suicide addr alreay deleted in confirmed DB", "txIndex", s.txIndex, "addr", addr) + return false + } + } + } + + if stateObject == nil { + // 3.Try to get from main StateDB + stateObject = s.getStateObjectNoSlot(addr) + if stateObject == nil { + s.parallel.addrStateReadsInSlot[addr] = false // true: exist, false: deleted + log.Error("Suicide addr not exist", "txIndex", s.txIndex, "addr", addr) + return false + } + s.parallel.addrStateReadsInSlot[addr] = true // true: exist, false: deleted + } + + s.journal.append(suicideChange{ + account: &addr, + prev: stateObject.suicided, // todo: must be false? + prevbalance: new(big.Int).Set(s.GetBalance(addr)), + }) + + if _, ok := s.parallel.dirtiedStateObjectsInSlot[addr]; !ok { + // do copy-on-write for suicide "write" + newStateObject := stateObject.lightCopy(s) + newStateObject.markSuicided() + newStateObject.data.Balance = new(big.Int) + s.parallel.dirtiedStateObjectsInSlot[addr] = newStateObject + s.parallel.addrStateChangesInSlot[addr] = false // false: the address does not exist any more, + // s.parallel.nonceChangesInSlot[addr] = struct{}{} + s.parallel.balanceChangesInSlot[addr] = struct{}{} + s.parallel.codeChangesInSlot[addr] = struct{}{} + // s.parallel.kvChangesInSlot[addr] = make(StateKeys) // all key changes are discarded + return true + } + s.parallel.addrStateChangesInSlot[addr] = false // false: the address does not exist any more, + s.parallel.balanceChangesInSlot[addr] = struct{}{} + s.parallel.codeChangesInSlot[addr] = struct{}{} + + stateObject.markSuicided() + stateObject.data.Balance = new(big.Int) + return true +} + +// CreateAccount explicitly creates a state object. If a state object with the address +// already exists the balance is carried over to the new account. +// +// CreateAccount is called during the EVM CREATE operation. The situation might arise that +// a contract does the following: +// +// 1. sends funds to sha(account ++ (nonce + 1)) +// 2. tx_create(sha(account ++ nonce)) (note that this gets the address of 1) +// +// Carrying over the balance ensures that Ether doesn't disappear. +func (s *ParallelStateDB) CreateAccount(addr common.Address) { + // no matter it is got from dirty, unconfirmed or main DB + // if addr not exist, preBalance will be common.Big0, it is same as new(big.Int) which + // is the value newObject(), + preBalance := s.GetBalance(addr) // parallel balance read will be recorded inside of GetBalance + newObj := s.createObject(addr) + newObj.setBalance(new(big.Int).Set(preBalance)) // new big.Int for newObj +} + +// RevertToSnapshot reverts all state changes made since the given revision. +func (s *ParallelStateDB) RevertToSnapshot(revid int) { + // Find the snapshot in the stack of valid snapshots. + idx := sort.Search(len(s.validRevisions), func(i int) bool { + return s.validRevisions[i].id >= revid + }) + if idx == len(s.validRevisions) || s.validRevisions[idx].id != revid { + panic(fmt.Errorf("revision id %v cannot be reverted", revid)) + } + snapshot := s.validRevisions[idx].journalIndex + + // Replay the journal to undo changes and remove invalidated snapshots + s.journal.revert(s, snapshot) + s.validRevisions = s.validRevisions[:idx] +} + +// AddRefund adds gas to the refund counter +// journal.append will use ParallelState for revert +func (s *ParallelStateDB) AddRefund(gas uint64) { // fixme: not needed + s.journal.append(refundChange{prev: s.refund}) + s.refund += gas +} + +// SubRefund removes gas from the refund counter. +// This method will panic if the refund counter goes below zero +func (s *ParallelStateDB) SubRefund(gas uint64) { // fixme: not needed + s.journal.append(refundChange{prev: s.refund}) + if gas > s.refund { + // we don't need to panic here if we read the wrong state in parallelm mode + // we just need to redo this transaction + log.Info(fmt.Sprintf("Refund counter below zero (gas: %d > refund: %d)", gas, s.refund), "tx", s.thash.String()) + s.parallel.needsRedo = true + return + } + s.refund -= gas +} + +// For Parallel Execution Mode, it can be seen as Penetrated Access: +// ------------------------------------------------------- +// | BaseTxIndex | Unconfirmed Txs... | Current TxIndex | +// ------------------------------------------------------- +// Access from the unconfirmed DB with range&priority: txIndex -1(previous tx) -> baseTxIndex + 1 +func (s *ParallelStateDB) getBalanceFromUnconfirmedDB(addr common.Address) *big.Int { + if addr == s.parallel.systemAddress { + // never get systemaddress from unconfirmed DB + return nil + } + + for i := s.txIndex - 1; i > s.parallel.baseTxIndex; i-- { + if db, ok := s.parallel.unconfirmedDBInShot[i]; ok { + // 1.Refer the state of address, exist or not in dirtiedStateObjectsInSlot + if obj, exist := db.parallel.dirtiedStateObjectsInSlot[addr]; exist { + balanceHit := false + if _, exist := db.parallel.addrStateChangesInSlot[addr]; exist { + balanceHit = true + } + if _, exist := db.parallel.balanceChangesInSlot[addr]; exist { // only changed balance is reliable + balanceHit = true + } + if !balanceHit { + continue + } + balance := obj.Balance() + if obj.deleted { + balance = common.Big0 + } + return balance + } + } + } + return nil +} + +// Similar to getBalanceFromUnconfirmedDB +func (s *ParallelStateDB) getNonceFromUnconfirmedDB(addr common.Address) (uint64, bool) { + if addr == s.parallel.systemAddress { + // never get systemaddress from unconfirmed DB + return 0, false + } + + for i := s.txIndex - 1; i > s.parallel.baseTxIndex; i-- { + if unconfirmedDb, ok := s.parallel.unconfirmedDBInShot[i]; ok { + nonceHit := false + if _, ok := unconfirmedDb.parallel.addrStateChangesInSlot[addr]; ok { + nonceHit = true + } else if _, ok := unconfirmedDb.parallel.nonceChangesInSlot[addr]; ok { + nonceHit = true + } + if !nonceHit { + // nonce refer not hit, try next unconfirmedDb + continue + } + // nonce hit, return the nonce + obj := unconfirmedDb.parallel.dirtiedStateObjectsInSlot[addr] + if obj == nil { + // could not exist, if it is changed but reverted + // fixme: revert should remove the change record + log.Debug("Get nonce from UnconfirmedDB, changed but object not exist, ", + "txIndex", s.txIndex, "referred txIndex", i, "addr", addr) + continue + } + nonce := obj.Nonce() + // deleted object with nonce == 0 + if obj.deleted { + nonce = 0 + } + return nonce, true + } + } + return 0, false +} + +// Similar to getBalanceFromUnconfirmedDB +// It is not only for code, but also codeHash and codeSize, we return the *StateObject for convienence. +func (s *ParallelStateDB) getCodeFromUnconfirmedDB(addr common.Address) ([]byte, bool) { + if addr == s.parallel.systemAddress { + // never get systemaddress from unconfirmed DB + return nil, false + } + + for i := s.txIndex - 1; i > s.parallel.baseTxIndex; i-- { + if db, ok := s.parallel.unconfirmedDBInShot[i]; ok { + codeHit := false + if _, exist := db.parallel.addrStateChangesInSlot[addr]; exist { + codeHit = true + } + if _, exist := db.parallel.codeChangesInSlot[addr]; exist { + codeHit = true + } + if !codeHit { + // try next unconfirmedDb + continue + } + obj := db.parallel.dirtiedStateObjectsInSlot[addr] + if obj == nil { + // could not exist, if it is changed but reverted + // fixme: revert should remove the change record + log.Debug("Get code from UnconfirmedDB, changed but object not exist, ", + "txIndex", s.txIndex, "referred txIndex", i, "addr", addr) + continue + } + code := obj.Code(s.db) + if obj.deleted { + code = nil + } + return code, true + } + } + return nil, false +} + +// Similar to getCodeFromUnconfirmedDB +// but differ when address is deleted or not exist +func (s *ParallelStateDB) getCodeHashFromUnconfirmedDB(addr common.Address) (common.Hash, bool) { + if addr == s.parallel.systemAddress { + // never get systemaddress from unconfirmed DB + return common.Hash{}, false + } + + for i := s.txIndex - 1; i > s.parallel.baseTxIndex; i-- { + if db, ok := s.parallel.unconfirmedDBInShot[i]; ok { + hashHit := false + if _, exist := db.parallel.addrStateChangesInSlot[addr]; exist { + hashHit = true + } + if _, exist := db.parallel.codeChangesInSlot[addr]; exist { + hashHit = true + } + if !hashHit { + // try next unconfirmedDb + continue + } + + obj := db.parallel.dirtiedStateObjectsInSlot[addr] + if obj == nil { + // could not exist, if it is changed but reverted + // fixme: revert should remove the change record + log.Debug("Get codeHash from UnconfirmedDB, changed but object not exist, ", + "txIndex", s.txIndex, "referred txIndex", i, "addr", addr) + continue + } + codeHash := common.Hash{} + if !obj.deleted { + codeHash = common.BytesToHash(obj.CodeHash()) + } + return codeHash, true + } + } + return common.Hash{}, false +} + +// Similar to getCodeFromUnconfirmedDB +// It is for address state check of: Exist(), Empty() and HasSuicided() +// Since the unconfirmed DB should have done Finalise() with `deleteEmptyObjects = true` +// If the dirty address is empty or suicided, it will be marked as deleted, so we only need to return `deleted` or not. +func (s *ParallelStateDB) getAddrStateFromUnconfirmedDB(addr common.Address) (bool, bool) { + if addr == s.parallel.systemAddress { + // never get systemaddress from unconfirmed DB + return false, false + } + + // check the unconfirmed DB with range: baseTxIndex -> txIndex -1(previous tx) + for i := s.txIndex - 1; i > s.parallel.baseTxIndex; i-- { + if db, ok := s.parallel.unconfirmedDBInShot[i]; ok { + if exist, ok := db.parallel.addrStateChangesInSlot[addr]; ok { + if _, ok := db.parallel.dirtiedStateObjectsInSlot[addr]; !ok { + // could not exist, if it is changed but reverted + // fixme: revert should remove the change record + log.Debug("Get addr State from UnconfirmedDB, changed but object not exist, ", + "txIndex", s.txIndex, "referred txIndex", i, "addr", addr) + continue + } + + return exist, true + } + } + } + return false, false +} + +func (s *ParallelStateDB) getKVFromUnconfirmedDB(addr common.Address, key common.Hash) (common.Hash, bool) { + // check the unconfirmed DB with range: baseTxIndex -> txIndex -1(previous tx) + for i := s.txIndex - 1; i > s.parallel.baseTxIndex; i-- { + if db, ok := s.parallel.unconfirmedDBInShot[i]; ok { + if obj, ok := db.parallel.dirtiedStateObjectsInSlot[addr]; ok { // if deleted on merge, can get from main StateDB, ok but fixme: concurrent safe + if obj.deleted { + return common.Hash{}, true + } + if _, ok := db.parallel.kvChangesInSlot[addr]; ok { + if val, exist := obj.dirtyStorage.GetValue(key); exist { + return val, true + } + if val, exist := obj.pendingStorage.GetValue(key); exist { // fixme: can be removed + log.Error("Get KV from Unconfirmed StateDB, in pending", + "my txIndex", s.txIndex, "DB's txIndex", i, "addr", addr, + "key", key, "val", val) + return val, true + } + } + } + } + } + return common.Hash{}, false +} + +func (s *ParallelStateDB) getStateObjectFromUnconfirmedDB(addr common.Address) (*StateObject, bool) { + // check the unconfirmed DB with range: baseTxIndex -> txIndex -1(previous tx) + for i := s.txIndex - 1; i > s.parallel.baseTxIndex; i-- { + if db, ok := s.parallel.unconfirmedDBInShot[i]; ok { + if obj, ok := db.parallel.dirtiedStateObjectsInSlot[addr]; ok { // if deleted on merge, can get from main StateDB, ok but fixme: concurrent safe + return obj, true + } + } + } + return nil, false +} + +func (s *ParallelStateDB) IsParallelReadsValid() bool { + slotDB := s + if !slotDB.parallel.isSlotDB { + log.Error("IsSlotDBReadsValid slotDB should be slot DB", "SlotIndex", slotDB.parallel.SlotIndex, "txIndex", slotDB.txIndex) + return false + } + + mainDB := slotDB.parallel.baseStateDB + if mainDB.parallel.isSlotDB { + log.Error("IsSlotDBReadsValid s should be main DB", "SlotIndex", slotDB.parallel.SlotIndex, "txIndex", slotDB.txIndex) + return false + } + // for nonce + for addr, nonceSlot := range slotDB.parallel.nonceReadsInSlot { + nonceMain := mainDB.GetNonce(addr) + if nonceSlot != nonceMain { + log.Debug("IsSlotDBReadsValid nonce read is invalid", "addr", addr, + "nonceSlot", nonceSlot, "nonceMain", nonceMain, "SlotIndex", slotDB.parallel.SlotIndex, + "txIndex", slotDB.txIndex, "baseTxIndex", slotDB.parallel.baseTxIndex) + return false + } + } + // balance + for addr, balanceSlot := range slotDB.parallel.balanceReadsInSlot { + if addr != s.parallel.systemAddress { // skip balance check for system address + balanceMain := mainDB.GetBalance(addr) + if balanceSlot.Cmp(balanceMain) != 0 { + log.Debug("IsSlotDBReadsValid balance read is invalid", "addr", addr, + "balanceSlot", balanceSlot, "balanceMain", balanceMain, "SlotIndex", slotDB.parallel.SlotIndex, + "txIndex", slotDB.txIndex, "baseTxIndex", slotDB.parallel.baseTxIndex) + return false + } + } + } + // check code + for addr, codeSlot := range slotDB.parallel.codeReadsInSlot { + codeMain := mainDB.GetCode(addr) + if !bytes.Equal(codeSlot, codeMain) { + log.Debug("IsSlotDBReadsValid code read is invalid", "addr", addr, + "len codeSlot", len(codeSlot), "len codeMain", len(codeMain), "SlotIndex", slotDB.parallel.SlotIndex, + "txIndex", slotDB.txIndex, "baseTxIndex", slotDB.parallel.baseTxIndex) + return false + } + } + // check codeHash + for addr, codeHashSlot := range slotDB.parallel.codeHashReadsInSlot { + codeHashMain := mainDB.GetCodeHash(addr) + if !bytes.Equal(codeHashSlot.Bytes(), codeHashMain.Bytes()) { + log.Debug("IsSlotDBReadsValid codehash read is invalid", "addr", addr, + "codeHashSlot", codeHashSlot, "codeHashMain", codeHashMain, "SlotIndex", slotDB.parallel.SlotIndex, + "txIndex", slotDB.txIndex, "baseTxIndex", slotDB.parallel.baseTxIndex) + return false + } + } + // check KV + for addr, slotStorage := range slotDB.parallel.kvReadsInSlot { + conflict := false + slotStorage.Range(func(keySlot, valSlot interface{}) bool { + valMain := mainDB.GetState(addr, keySlot.(common.Hash)) + if !bytes.Equal(valSlot.(common.Hash).Bytes(), valMain.Bytes()) { + log.Debug("IsSlotDBReadsValid KV read is invalid", "addr", addr, + "key", keySlot.(common.Hash), "valSlot", valSlot.(common.Hash), + "valMain", valMain, "SlotIndex", slotDB.parallel.SlotIndex, + "txIndex", slotDB.txIndex, "baseTxIndex", slotDB.parallel.baseTxIndex) + conflict = true + return false // return false, Range will be terminated. + } + return true // return true, Range will try next KV + }) + if conflict { + return false + } + } + // addr state check + for addr, stateSlot := range slotDB.parallel.addrStateReadsInSlot { + stateMain := false // addr not exist + if mainDB.getStateObject(addr) != nil { + stateMain = true // addr exist in main DB + } + if stateSlot != stateMain { + // skip addr state check for system address + if addr != s.parallel.systemAddress { + log.Debug("IsSlotDBReadsValid addrState read invalid(true: exist, false: not exist)", + "addr", addr, "stateSlot", stateSlot, "stateMain", stateMain, + "SlotIndex", slotDB.parallel.SlotIndex, + "txIndex", slotDB.txIndex, "baseTxIndex", slotDB.parallel.baseTxIndex) + return false + } + } + } + // snapshot destructs check + + for addr, destructRead := range slotDB.parallel.addrSnapDestructsReadsInSlot { + mainObj := mainDB.getStateObject(addr) + if mainObj == nil { + log.Debug("IsSlotDBReadsValid snapshot destructs read invalid, address should exist", + "addr", addr, "destruct", destructRead, + "SlotIndex", slotDB.parallel.SlotIndex, + "txIndex", slotDB.txIndex, "baseTxIndex", slotDB.parallel.baseTxIndex) + return false + } + _, destructMain := mainDB.snapDestructs[addr] // addr not exist + if destructRead != destructMain { + log.Debug("IsSlotDBReadsValid snapshot destructs read invalid", + "addr", addr, "destructRead", destructRead, "destructMain", destructMain, + "SlotIndex", slotDB.parallel.SlotIndex, + "txIndex", slotDB.txIndex, "baseTxIndex", slotDB.parallel.baseTxIndex) + return false + } + } + + return true +} + +// For most of the transactions, systemAddressOpsCount should be 3: +// one for SetBalance(0) on NewSlotDB() +// the other is for AddBalance(GasFee) at the end. +// (systemAddressOpsCount > 3) means the transaction tries to access systemAddress, in +// this case, we should redo and keep its balance on NewSlotDB() +func (s *ParallelStateDB) SystemAddressRedo() bool { + return s.parallel.systemAddressOpsCount > 4 +} + +// NeedsRedo returns true if there is any clear reason that we need to redo this transaction +func (s *ParallelStateDB) NeedsRedo() bool { + return s.parallel.needsRedo +} diff --git a/core/state_processor.go b/core/state_processor.go index 9e6da7fee1..4dde8c064a 100644 --- a/core/state_processor.go +++ b/core/state_processor.go @@ -77,7 +77,7 @@ type ParallelStateProcessor struct { // txReqAccountSorted map[common.Address][]*ParallelTxRequest // fixme: *ParallelTxRequest => ParallelTxRequest? slotState []*SlotState // idle, or pending messages mergedTxIndex int // the latest finalized tx index - slotDBsToRelease []*state.StateDB + slotDBsToRelease []*state.ParallelStateDB debugErrorRedoNum int debugConflictRedoNum int } @@ -398,8 +398,8 @@ func (p *LightStateProcessor) LightProcess(diffLayer *types.DiffLayer, block *ty type SlotState struct { pendingTxReqChan chan struct{} pendingConfirmChan chan *ParallelTxResult - pendingTxReqList []*ParallelTxRequest // maintained by dispatcher for dispatch policy - slotdbChan chan *state.StateDB // dispatch will create and send this slotDB to slot + pendingTxReqList []*ParallelTxRequest // maintained by dispatcher for dispatch policy + slotdbChan chan *state.ParallelStateDB // dispatch will create and send this slotDB to slot // txReqUnits []*ParallelDispatchUnit // only dispatch can accesssd unconfirmedStateDBs *sync.Map // [int]*state.StateDB // fixme: concurrent safe, not use sync.Map? } @@ -411,7 +411,7 @@ type ParallelTxResult struct { err error // to describe error message? txReq *ParallelTxRequest receipt *types.Receipt - slotDB *state.StateDB // if updated, it is not equal to txReq.slotDB + slotDB *state.ParallelStateDB // if updated, it is not equal to txReq.slotDB gpSlot *GasPool evm *vm.EVM result *ExecutionResult @@ -420,7 +420,7 @@ type ParallelTxResult struct { type ParallelTxRequest struct { txIndex int tx *types.Transaction - slotDB *state.StateDB + slotDB *state.ParallelStateDB gasLimit uint64 msg types.Message block *types.Block @@ -441,7 +441,7 @@ func (p *ParallelStateProcessor) init() { for i := 0; i < p.parallelNum; i++ { p.slotState[i] = &SlotState{ - slotdbChan: make(chan *state.StateDB, 1), + slotdbChan: make(chan *state.ParallelStateDB, 1), pendingTxReqChan: make(chan struct{}, 1), pendingConfirmChan: make(chan *ParallelTxResult, p.queueSize), } @@ -894,7 +894,7 @@ func (p *ParallelStateProcessor) resetState(txNum int, statedb *state.StateDB) { statedb.PrepareForParallel() - p.slotDBsToRelease = make([]*state.StateDB, 0, txNum) + p.slotDBsToRelease = make([]*state.ParallelStateDB, 0, txNum) /* stateDBsToRelease := p.slotDBsToRelease go func() { @@ -916,10 +916,12 @@ func (p *ParallelStateProcessor) Process(block *types.Block, statedb *state.Stat header = block.Header() gp = new(GasPool).AddGas(block.GasLimit()) ) - log.Info("ProcessParallel", "block", header.Number) var receipts = make([]*types.Receipt, 0) txNum := len(block.Transactions()) p.resetState(txNum, statedb) + if txNum > 0 { + log.Info("ProcessParallel", "block", header.Number) + } // Iterate over and process the individual transactions posa, isPoSA := p.engine.(consensus.PoSA) @@ -1150,7 +1152,7 @@ func applyTransaction(msg types.Message, config *params.ChainConfig, bc ChainCon return receipt, err } -func applyTransactionStageExecution(msg types.Message, gp *GasPool, statedb *state.StateDB, evm *vm.EVM) (*vm.EVM, *ExecutionResult, error) { +func applyTransactionStageExecution(msg types.Message, gp *GasPool, statedb *state.ParallelStateDB, evm *vm.EVM) (*vm.EVM, *ExecutionResult, error) { // Create a new context to be used in the EVM environment. txContext := NewEVMTxContext(msg) evm.Reset(txContext, statedb) @@ -1164,7 +1166,7 @@ func applyTransactionStageExecution(msg types.Message, gp *GasPool, statedb *sta return evm, result, err } -func applyTransactionStageFinalization(evm *vm.EVM, result *ExecutionResult, msg types.Message, config *params.ChainConfig, statedb *state.StateDB, header *types.Header, tx *types.Transaction, usedGas *uint64, receiptProcessors ...ReceiptProcessor) (*types.Receipt, error) { +func applyTransactionStageFinalization(evm *vm.EVM, result *ExecutionResult, msg types.Message, config *params.ChainConfig, statedb *state.ParallelStateDB, header *types.Header, tx *types.Transaction, usedGas *uint64, receiptProcessors ...ReceiptProcessor) (*types.Receipt, error) { // Update the state with pending changes. var root []byte if config.IsByzantium(header.Number) { From 79afba039a42841a89459fd4c788d0ea72faa1d8 Mon Sep 17 00:00:00 2001 From: setunapo Date: Fri, 29 Apr 2022 21:57:44 +0800 Subject: [PATCH 10/18] 0505: For a Good Dispatcher ** WBNB balance makeup by GetBalanceOpCode & depth add a lock to fix WBNB make up concurrent crash wbnb makeup by balance depth add a new interface GetBalanceOpCode ** universal unconfirmed DB ** dispatch: static & dynamic, configurable ** redesign dispatcher module shold avoid redundant redo steal tx when idle Aggresive result confirm in Stage2, when all tx have been executed no steal in stage 2 configurable: maxRedoCounterInstage1 > fixups: - mergedTxIndex concurrent access, disorder - fix systemAddrRedo - make sure all slot are stopped before next block --- core/state/journal.go | 2 + core/state/statedb.go | 241 ++++++++++++-- core/state_processor.go | 695 ++++++++++++++++++++++++++++++++-------- core/vm/evm.go | 1 + core/vm/instructions.go | 2 +- core/vm/interface.go | 2 + 6 files changed, 798 insertions(+), 145 deletions(-) diff --git a/core/state/journal.go b/core/state/journal.go index e267205688..dbb552c142 100644 --- a/core/state/journal.go +++ b/core/state/journal.go @@ -170,7 +170,9 @@ func (ch resetObjectChange) revert(dber StateDBer) { s.storeStateObj(ch.prev.address, ch.prev) } if !ch.prevdestruct && s.snap != nil { + s.snapParallelLock.Lock() delete(s.snapDestructs, ch.prev.address) + s.snapParallelLock.Unlock() } } diff --git a/core/state/statedb.go b/core/state/statedb.go index 8289e8cad0..2b9f954047 100644 --- a/core/state/statedb.go +++ b/core/state/statedb.go @@ -52,6 +52,18 @@ var ( emptyRoot = common.HexToHash("56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421") emptyAddr = crypto.Keccak256Hash(common.Address{}.Bytes()) + + // https://bscscan.com/address/0xbb4cdb9cbd36b01bd1cbaebf2de08d9173bc095c + WBNBAddress = common.HexToAddress("0xbb4cdb9cbd36b01bd1cbaebf2de08d9173bc095c") + // EVM use big-endian mode, so as the MethodID + WBNBAddress_deposit = []byte{0xd0, 0xe3, 0x0d, 0xb0} // "0xd0e30db0": Keccak-256("deposit()") + WBNBAddress_withdraw = []byte{0x2e, 0x1a, 0x7d, 0x4d} // "0x2e1a7d4d": Keccak-256("withdraw(uint256)") + WBNBAddress_totalSupply = []byte{0x18, 0x16, 0x0d, 0xdd} // "0x18160ddd": Keccak-256("totalSupply()") + WBNBAddress_approve = []byte{0x09, 0x5e, 0xa7, 0xb3} // "0x095ea7b3": Keccak-256("approve(address,uint256)") + WBNBAddress_transfer = []byte{0xa9, 0x05, 0x9c, 0xbb} // "0xa9059cbb": Keccak-256("transfer(address,uint256)") + WBNBAddress_transferFrom = []byte{0x23, 0xb8, 0x72, 0xdd} // "0x23b872dd": Keccak-256("transferFrom(address,address,uint256)") + // unknown WBNB interface 1: {0xDD, 0x62,0xED, 0x3E} in block: 14,248,627 + // unknown WBNB interface 2: {0x70, 0xa0,0x82, 0x31} in block: 14,249,300 ) type proofList [][]byte @@ -477,6 +489,10 @@ func (s *StateDB) GetBalance(addr common.Address) *big.Int { return balance } +func (s *StateDB) GetBalanceOpCode(addr common.Address) *big.Int { + return s.GetBalance(addr) +} + func (s *StateDB) GetNonce(addr common.Address) uint64 { var nonce uint64 = 0 stateObject := s.getStateObject(addr) @@ -864,12 +880,14 @@ func (s *StateDB) createObject(addr common.Address) (newobj *StateObject) { var prevdestruct bool if s.snap != nil && prev != nil { + s.snapParallelLock.Lock() // fixme: with new dispatch policy, the ending Tx could runing, while the block have processed. _, prevdestruct = s.snapDestructs[prev.address] if !prevdestruct { // To destroy the previous trie node first and update the trie tree // with the new object on block commit. s.snapDestructs[prev.address] = struct{}{} } + s.snapParallelLock.Unlock() } newobj = newObject(s, s.isParallel, addr, Account{}) newobj.setNonce(0) // sets the object to dirty @@ -1191,7 +1209,7 @@ func (s *StateDB) CopyForSlot() *ParallelStateDB { dirtiedStateObjectsInSlot: make(map[common.Address]*StateObject), // stateObjectsPool.Get().(map[common.Address]*StateObject), } state := &ParallelStateDB{ - StateDB{ + StateDB: StateDB{ db: s.db, trie: s.db.CopyTrie(s.trie), stateObjects: make(map[common.Address]*StateObject), // replaced by parallel.stateObjects in parallel mode @@ -1206,6 +1224,10 @@ func (s *StateDB) CopyForSlot() *ParallelStateDB { isParallel: true, parallel: parallel, }, + wbnbMakeUp: true, + // wbnbBalanceAccessed: 0, + // wbnbBalanceAccessedExpected: 0, + balanceUpdateDepth: 0, } for hash, preimage := range s.preimages { state.preimages[hash] = preimage @@ -1325,9 +1347,11 @@ func (s *StateDB) Finalise(deleteEmptyObjects bool) { // fixme: concurrent safe. // transactions within the same block might self destruct and then // ressurrect an account; but the snapshotter needs both events. if s.snap != nil { + s.snapParallelLock.Lock() s.snapDestructs[obj.address] = struct{}{} // We need to maintain account deletions explicitly (will remain set indefinitely) - delete(s.snapAccounts, obj.address) // Clear out any previously updated account data (may be recreated via a ressurrect) - delete(s.snapStorage, obj.address) // Clear out any previously updated storage data (may be recreated via a ressurrect) + s.snapParallelLock.Unlock() + delete(s.snapAccounts, obj.address) // Clear out any previously updated account data (may be recreated via a ressurrect) + delete(s.snapStorage, obj.address) // Clear out any previously updated storage data (may be recreated via a ressurrect) } } else { // 1.none parallel mode, we do obj.finalise(true) as normal @@ -2143,8 +2167,18 @@ func (s *StateDB) MergeSlotDB(slotDb *ParallelStateDB, slotReceipt *types.Receip } } +func (s *StateDB) ParallelMakeUp(addr common.Address, input []byte) { + // do nothing, this API is for parallel mode +} + type ParallelStateDB struct { StateDB + wbnbMakeUp bool // default true, we can not do WBNB make up only when supported API call is received. + // wbnbBalanceAccessed int // how many times the WBNB's balance is acccessed, i.e. `GetBalance`, `AddBalance`, `SubBalance`, `SetBalance` + // wbnbBalanceAccessedExpected int // how many times the WBNB contract is called. + wbnbMakeUpLock sync.RWMutex // we may make up WBNB's balanace of the unconfirmed DB, while other slot read it. + // wbnbContractCalled int // how many times the WBNB contract is called. + balanceUpdateDepth int } // NewSlotDB creates a new State DB based on the provided StateDB. @@ -2266,6 +2300,7 @@ func (s *ParallelStateDB) createObject(addr common.Address) (newobj *StateObject var prevdestruct bool if s.snap != nil && prev != nil { + s.snapParallelLock.Lock() _, prevdestruct = s.snapDestructs[prev.address] // fixme, record the snapshot read for create Account s.parallel.addrSnapDestructsReadsInSlot[addr] = prevdestruct if !prevdestruct { @@ -2273,6 +2308,8 @@ func (s *ParallelStateDB) createObject(addr common.Address) (newobj *StateObject // with the new object on block commit. s.snapDestructs[prev.address] = struct{}{} } + s.snapParallelLock.Lock() + } newobj = newObject(s, s.isParallel, addr, Account{}) newobj.setNonce(0) // sets the object to dirty @@ -2445,6 +2482,15 @@ func (s *ParallelStateDB) GetBalance(addr common.Address) *big.Int { return balance } +func (s *ParallelStateDB) GetBalanceOpCode(addr common.Address) *big.Int { + if addr == WBNBAddress { + // s.wbnbBalanceAccessed++ + s.wbnbMakeUp = false + log.Debug("GetBalanceOpCode for WBNB", "txIndex", s.TxIndex()) + } + return s.GetBalance(addr) +} + func (s *ParallelStateDB) GetNonce(addr common.Address) uint64 { // 1.Try to get from dirty if _, ok := s.parallel.nonceChangesInSlot[addr]; ok { @@ -2684,12 +2730,18 @@ func (s *ParallelStateDB) AddBalance(addr common.Address, amount *big.Int) { // take this empty check as addr state read(create, suicide, empty delete) // s.parallel.addrStateReadsInSlot[addr] = struct{}{} // } - + s.balanceUpdateDepth++ + defer func() { + s.balanceUpdateDepth-- + }() stateObject := s.GetOrNewStateObject(addr) if stateObject != nil { if addr == s.parallel.systemAddress { s.parallel.systemAddressOpsCount++ } + //else if addr == WBNBAddress { + // s.wbnbBalanceAccessed++ + //} // if amount.Sign() != 0 { // todo: to reenable it if _, ok := s.parallel.dirtiedStateObjectsInSlot[addr]; !ok { newStateObject := stateObject.lightCopy(s) // light copy from main DB @@ -2705,10 +2757,11 @@ func (s *ParallelStateDB) AddBalance(addr common.Address, amount *big.Int) { // already dirty, make sure the balance if fixed up // if stateObject.Balance() if addr != s.parallel.systemAddress { - if stateObject.Balance().Cmp(s.GetBalance(addr)) != 0 { - log.Warn("AddBalance in dirty, but balance is incorrect", "txIndex", s.txIndex, "addr", addr, - "stateObject.Balance()", stateObject.Balance(), "s.GetBalance(addr)", s.GetBalance(addr)) - stateObject.setBalance(s.GetBalance(addr)) + balance := s.GetBalance(addr) + if stateObject.Balance().Cmp(balance) != 0 { + log.Warn("AddBalance in dirty, but balance has not do fixup", "txIndex", s.txIndex, "addr", addr, + "stateObject.Balance()", stateObject.Balance(), "s.GetBalance(addr)", balance) + stateObject.setBalance(balance) } } @@ -2723,12 +2776,19 @@ func (s *ParallelStateDB) SubBalance(addr common.Address, amount *big.Int) { // unlike add, sub 0 balance will not touch empty object // s.parallel.balanceReadsInSlot[addr] = struct{}{} // } + s.balanceUpdateDepth++ + defer func() { + s.balanceUpdateDepth-- + }() stateObject := s.GetOrNewStateObject(addr) if stateObject != nil { if addr == s.parallel.systemAddress { s.parallel.systemAddressOpsCount++ } + // else if addr == WBNBAddress { + // s.wbnbBalanceAccessed++ + // } // if amount.Sign() != 0 { // todo: to reenable it if _, ok := s.parallel.dirtiedStateObjectsInSlot[addr]; !ok { @@ -2745,10 +2805,11 @@ func (s *ParallelStateDB) SubBalance(addr common.Address, amount *big.Int) { // already dirty, make sure the balance if fixed // if stateObject.Balance() if addr != s.parallel.systemAddress { - if stateObject.Balance().Cmp(s.GetBalance(addr)) != 0 { + balance := s.GetBalance(addr) + if stateObject.Balance().Cmp(balance) != 0 { log.Warn("SubBalance in dirty, but balance is incorrect", "txIndex", s.txIndex, "addr", addr, - "stateObject.Balance()", stateObject.Balance(), "s.GetBalance(addr)", s.GetBalance(addr)) - stateObject.setBalance(s.GetBalance(addr)) + "stateObject.Balance()", stateObject.Balance(), "s.GetBalance(addr)", balance) + stateObject.setBalance(balance) } } @@ -2758,11 +2819,19 @@ func (s *ParallelStateDB) SubBalance(addr common.Address, amount *big.Int) { } func (s *ParallelStateDB) SetBalance(addr common.Address, amount *big.Int) { + s.balanceUpdateDepth++ + defer func() { + s.balanceUpdateDepth-- + }() + stateObject := s.GetOrNewStateObject(addr) if stateObject != nil { if addr == s.parallel.systemAddress { s.parallel.systemAddressOpsCount++ } + // else if addr == WBNBAddress { + // s.wbnbBalanceAccessed++ + // } if _, ok := s.parallel.dirtiedStateObjectsInSlot[addr]; !ok { newStateObject := stateObject.lightCopy(s) // update balance for revert, in case child contract is revertted, @@ -2774,7 +2843,11 @@ func (s *ParallelStateDB) SetBalance(addr common.Address, amount *big.Int) { s.parallel.dirtiedStateObjectsInSlot[addr] = newStateObject return } - + // do balance fixup + if addr != s.parallel.systemAddress { + balance := s.GetBalance(addr) + stateObject.setBalance(balance) + } stateObject.SetBalance(amount) s.parallel.balanceChangesInSlot[addr] = struct{}{} } @@ -2985,6 +3058,8 @@ func (s *ParallelStateDB) getBalanceFromUnconfirmedDB(addr common.Address) *big. for i := s.txIndex - 1; i > s.parallel.baseTxIndex; i-- { if db, ok := s.parallel.unconfirmedDBInShot[i]; ok { + db.wbnbMakeUpLock.RLock() + defer db.wbnbMakeUpLock.RUnlock() // 1.Refer the state of address, exist or not in dirtiedStateObjectsInSlot if obj, exist := db.parallel.dirtiedStateObjectsInSlot[addr]; exist { balanceHit := false @@ -3016,11 +3091,11 @@ func (s *ParallelStateDB) getNonceFromUnconfirmedDB(addr common.Address) (uint64 } for i := s.txIndex - 1; i > s.parallel.baseTxIndex; i-- { - if unconfirmedDb, ok := s.parallel.unconfirmedDBInShot[i]; ok { + if db, ok := s.parallel.unconfirmedDBInShot[i]; ok { nonceHit := false - if _, ok := unconfirmedDb.parallel.addrStateChangesInSlot[addr]; ok { + if _, ok := db.parallel.addrStateChangesInSlot[addr]; ok { nonceHit = true - } else if _, ok := unconfirmedDb.parallel.nonceChangesInSlot[addr]; ok { + } else if _, ok := db.parallel.nonceChangesInSlot[addr]; ok { nonceHit = true } if !nonceHit { @@ -3028,7 +3103,9 @@ func (s *ParallelStateDB) getNonceFromUnconfirmedDB(addr common.Address) (uint64 continue } // nonce hit, return the nonce - obj := unconfirmedDb.parallel.dirtiedStateObjectsInSlot[addr] + db.wbnbMakeUpLock.RLock() + defer db.wbnbMakeUpLock.RUnlock() + obj := db.parallel.dirtiedStateObjectsInSlot[addr] if obj == nil { // could not exist, if it is changed but reverted // fixme: revert should remove the change record @@ -3068,6 +3145,8 @@ func (s *ParallelStateDB) getCodeFromUnconfirmedDB(addr common.Address) ([]byte, // try next unconfirmedDb continue } + db.wbnbMakeUpLock.RLock() + defer db.wbnbMakeUpLock.RUnlock() obj := db.parallel.dirtiedStateObjectsInSlot[addr] if obj == nil { // could not exist, if it is changed but reverted @@ -3107,7 +3186,8 @@ func (s *ParallelStateDB) getCodeHashFromUnconfirmedDB(addr common.Address) (com // try next unconfirmedDb continue } - + db.wbnbMakeUpLock.RLock() + defer db.wbnbMakeUpLock.RUnlock() obj := db.parallel.dirtiedStateObjectsInSlot[addr] if obj == nil { // could not exist, if it is changed but reverted @@ -3140,6 +3220,8 @@ func (s *ParallelStateDB) getAddrStateFromUnconfirmedDB(addr common.Address) (bo for i := s.txIndex - 1; i > s.parallel.baseTxIndex; i-- { if db, ok := s.parallel.unconfirmedDBInShot[i]; ok { if exist, ok := db.parallel.addrStateChangesInSlot[addr]; ok { + db.wbnbMakeUpLock.RLock() + defer db.wbnbMakeUpLock.RUnlock() if _, ok := db.parallel.dirtiedStateObjectsInSlot[addr]; !ok { // could not exist, if it is changed but reverted // fixme: revert should remove the change record @@ -3159,6 +3241,8 @@ func (s *ParallelStateDB) getKVFromUnconfirmedDB(addr common.Address, key common // check the unconfirmed DB with range: baseTxIndex -> txIndex -1(previous tx) for i := s.txIndex - 1; i > s.parallel.baseTxIndex; i-- { if db, ok := s.parallel.unconfirmedDBInShot[i]; ok { + db.wbnbMakeUpLock.RLock() + defer db.wbnbMakeUpLock.RUnlock() if obj, ok := db.parallel.dirtiedStateObjectsInSlot[addr]; ok { // if deleted on merge, can get from main StateDB, ok but fixme: concurrent safe if obj.deleted { return common.Hash{}, true @@ -3184,6 +3268,8 @@ func (s *ParallelStateDB) getStateObjectFromUnconfirmedDB(addr common.Address) ( // check the unconfirmed DB with range: baseTxIndex -> txIndex -1(previous tx) for i := s.txIndex - 1; i > s.parallel.baseTxIndex; i-- { if db, ok := s.parallel.unconfirmedDBInShot[i]; ok { + db.wbnbMakeUpLock.RLock() + defer db.wbnbMakeUpLock.RUnlock() if obj, ok := db.parallel.dirtiedStateObjectsInSlot[addr]; ok { // if deleted on merge, can get from main StateDB, ok but fixme: concurrent safe return obj, true } @@ -3192,7 +3278,7 @@ func (s *ParallelStateDB) getStateObjectFromUnconfirmedDB(addr common.Address) ( return nil, false } -func (s *ParallelStateDB) IsParallelReadsValid() bool { +func (s *ParallelStateDB) IsParallelReadsValid(isStage2 bool) bool { slotDB := s if !slotDB.parallel.isSlotDB { log.Error("IsSlotDBReadsValid slotDB should be slot DB", "SlotIndex", slotDB.parallel.SlotIndex, "txIndex", slotDB.txIndex) @@ -3208,6 +3294,12 @@ func (s *ParallelStateDB) IsParallelReadsValid() bool { for addr, nonceSlot := range slotDB.parallel.nonceReadsInSlot { nonceMain := mainDB.GetNonce(addr) if nonceSlot != nonceMain { + if isStage2 { //? + log.Debug("IsSlotDBReadsValid skip nonce check in stage 2", + "SlotIndex", slotDB.parallel.SlotIndex, "txIndex", slotDB.txIndex) + continue + } + log.Debug("IsSlotDBReadsValid nonce read is invalid", "addr", addr, "nonceSlot", nonceSlot, "nonceMain", nonceMain, "SlotIndex", slotDB.parallel.SlotIndex, "txIndex", slotDB.txIndex, "baseTxIndex", slotDB.parallel.baseTxIndex) @@ -3219,6 +3311,33 @@ func (s *ParallelStateDB) IsParallelReadsValid() bool { if addr != s.parallel.systemAddress { // skip balance check for system address balanceMain := mainDB.GetBalance(addr) if balanceSlot.Cmp(balanceMain) != 0 { + if addr == WBNBAddress && slotDB.WBNBMakeUp() { // WBNB balance make up + if isStage2 { + log.Debug("IsSlotDBReadsValid skip makeup for WBNB in stage 2", + "SlotIndex", slotDB.parallel.SlotIndex, "txIndex", slotDB.txIndex) + continue // stage2 will skip WBNB check, no balance makeup + } + balanceDelta := new(big.Int).Sub(balanceMain, balanceSlot) + slotDB.wbnbMakeUpLock.Lock() + slotDB.AddBalance(addr, balanceDelta) // fixme: concurrent not safe, unconfirmed read + slotDB.wbnbMakeUpLock.Unlock() + /* + if _, exist := slotDB.stateObjectsPending[addr]; !exist { + slotDB.stateObjectsPending[addr] = struct{}{} + } + if _, exist := slotDB.stateObjectsDirty[addr]; !exist { + // only read, but never change WBNB's balance or state + // log.Warn("IsSlotDBReadsValid balance makeup for WBNB, but it is not in dirty", + // "SlotIndex", slotDB.parallel.SlotIndex, "txIndex", slotDB.txIndex) + slotDB.stateObjectsDirty[addr] = struct{}{} + } + */ + log.Debug("IsSlotDBReadsValid balance makeup for WBNB", + "SlotIndex", slotDB.parallel.SlotIndex, "txIndex", slotDB.txIndex, + "updated WBNB balance", slotDB.GetBalance(addr)) + continue + } + log.Debug("IsSlotDBReadsValid balance read is invalid", "addr", addr, "balanceSlot", balanceSlot, "balanceMain", balanceMain, "SlotIndex", slotDB.parallel.SlotIndex, "txIndex", slotDB.txIndex, "baseTxIndex", slotDB.parallel.baseTxIndex) @@ -3283,7 +3402,6 @@ func (s *ParallelStateDB) IsParallelReadsValid() bool { } } // snapshot destructs check - for addr, destructRead := range slotDB.parallel.addrSnapDestructsReadsInSlot { mainObj := mainDB.getStateObject(addr) if mainObj == nil { @@ -3293,7 +3411,9 @@ func (s *ParallelStateDB) IsParallelReadsValid() bool { "txIndex", slotDB.txIndex, "baseTxIndex", slotDB.parallel.baseTxIndex) return false } + s.snapParallelLock.RLock() // fixme: this lock is not needed _, destructMain := mainDB.snapDestructs[addr] // addr not exist + s.snapParallelLock.RUnlock() if destructRead != destructMain { log.Debug("IsSlotDBReadsValid snapshot destructs read invalid", "addr", addr, "destructRead", destructRead, "destructMain", destructMain, @@ -3319,3 +3439,86 @@ func (s *ParallelStateDB) SystemAddressRedo() bool { func (s *ParallelStateDB) NeedsRedo() bool { return s.parallel.needsRedo } + +/** + * WBNB makeup is allowed when WBNB'balance is only accessed through contract Call. + * If it is accessed not through contract all, e.g., by `address.balance`, `address.transfer(amount)`, + * we can not do balance make up. + */ +/* +fixme: not work... wbnbBalanceAccessedExpected is not correct... +dumped log: +wbnbBalanceAccessed=3 wbnbBalanceAccessedExpected=0 +wbnbBalanceAccessed=9 wbnbBalanceAccessedExpected=4 +wbnbBalanceAccessed=9 wbnbBalanceAccessedExpected=4 +wbnbBalanceAccessed=12 wbnbBalanceAccessedExpected=2 +wbnbBalanceAccessed=10 wbnbBalanceAccessedExpected=2 +wbnbBalanceAccessed=12 wbnbBalanceAccessedExpected=4 +wbnbBalanceAccessed=9 wbnbBalanceAccessedExpected=4 +wbnbBalanceAccessed=13 wbnbBalanceAccessedExpected=2 +wbnbBalanceAccessed=7 wbnbBalanceAccessedExpected=2 +wbnbBalanceAccessed=9 wbnbBalanceAccessedExpected=4 +wbnbBalanceAccessed=9 wbnbBalanceAccessedExpected=4 +wbnbBalanceAccessed=9 wbnbBalanceAccessedExpected=4 +wbnbBalanceAccessed=9 wbnbBalanceAccessedExpected=4 +wbnbBalanceAccessed=9 wbnbBalanceAccessedExpected=4 +wbnbBalanceAccessed=9 wbnbBalanceAccessedExpected=4 +*/ +func (s *ParallelStateDB) WBNBMakeUp() bool { + return s.wbnbMakeUp +} + +func (s *ParallelStateDB) ParallelMakeUp(addr common.Address, input []byte) { + if addr == WBNBAddress { + if len(input) < 4 { + // should never less than 4 + // log.Warn("ParallelMakeUp for WBNB input size invalid", "input size", len(input), "input", input) + s.wbnbMakeUp = false + return + } + methodId := input[:4] + if bytes.Equal(methodId, WBNBAddress_deposit) { + // log.Debug("ParallelMakeUp for WBNB deposit", "input size", len(input), "input", input) + // s.wbnbBalanceAccessedExpected += 2 // AddBalance() + return + } + if bytes.Equal(methodId, WBNBAddress_withdraw) { + // log.Debug("ParallelMakeUp for WBNB withdraw", "input size", len(input), "input", input) + // ** If from's balance is not enough, it will revert ==> +2, only AddBalance() + // ** if from's balance is enough, ==> +4, AddBalance(), SubBalance() for transfer + // attention, WBNB contract's balance should always sufficient + // s.wbnbBalanceAccessedExpected += 4 + + // as noted above, withdraw's access depends revert or not. + // we have to hack RevertToSnapshot to get the really access count, disable right now. + // s.wbnbMakeUp = false + return + } + if bytes.Equal(methodId, WBNBAddress_approve) { + // log.Debug("ParallelMakeUp for WBNB approve", "input size", len(input), "input", input) + // s.wbnbBalanceAccessedExpected += 2 + return + } + if bytes.Equal(methodId, WBNBAddress_transfer) { + // log.Debug("ParallelMakeUp for WBNB transfer", "input size", len(input), "input", input) + // This is WBNB token transfer, not balance transfer + // s.wbnbBalanceAccessedExpected += 2 + return + } + if bytes.Equal(methodId, WBNBAddress_transferFrom) { + // log.Debug("ParallelMakeUp for WBNB transferFrom", "input size", len(input), "input", input) + // This is WBNB token transfer, not balance transfer + // s.wbnbBalanceAccessedExpected += 2 + return + } + // if bytes.Equal(methodId, WBNBAddress_totalSupply) { + // log.Debug("ParallelMakeUp for WBNB, not for totalSupply", "input size", len(input), "input", input) + // s.wbnbMakeUp = false // can not makeup + // return + // } + + log.Warn("ParallelMakeUp for WBNB unknown method id", "input size", len(input), "input", input) + s.wbnbMakeUp = false + } + +} diff --git a/core/state_processor.go b/core/state_processor.go index 4dde8c064a..5371fc8c02 100644 --- a/core/state_processor.go +++ b/core/state_processor.go @@ -23,7 +23,9 @@ import ( "math/big" "math/rand" "runtime" + "strconv" "sync" + "sync/atomic" "time" "github.com/ethereum/go-ethereum/common" @@ -48,8 +50,13 @@ const ( recentDiffLayerTimeout = 5 farDiffLayerTimeout = 2 maxUnitSize = 10 + dispatchPolicyStatic = 1 + dispatchPolicyDynamic = 2 // not supported + maxRedoCounterInstage1 = 2 // try 2, 4, 10, or no limit? ) +var dispatchPolicy = dispatchPolicyStatic + // StateProcessor is a basic Processor, which takes care of transitioning // state from one point to another. // @@ -71,15 +78,23 @@ func NewStateProcessor(config *params.ChainConfig, bc *BlockChain, engine consen // add for parallel executions type ParallelStateProcessor struct { StateProcessor - parallelNum int // leave a CPU to dispatcher - queueSize int // parallel slot's maximum number of pending Txs - txResultChan chan *ParallelTxResult // to notify dispatcher that a tx is done + parallelNum int // leave a CPU to dispatcher + queueSize int // parallel slot's maximum number of pending Txs + pendingConfirmChan chan *ParallelTxResult + pendingConfirmResults map[int][]*ParallelTxResult // tx could be executed several times, with several result to check + txResultChan chan *ParallelTxResult // to notify dispatcher that a tx is done // txReqAccountSorted map[common.Address][]*ParallelTxRequest // fixme: *ParallelTxRequest => ParallelTxRequest? slotState []*SlotState // idle, or pending messages - mergedTxIndex int // the latest finalized tx index + mergedTxIndex int // the latest finalized tx index, fixme: use Atomic slotDBsToRelease []*state.ParallelStateDB - debugErrorRedoNum int debugConflictRedoNum int + unconfirmedStateDBs *sync.Map // [int]*state.StateDB // fixme: concurrent safe, not use sync.Map? + stopSlotChan chan int // fixme: use struct{}{}, to make sure all slot are idle + stopConfirmChan chan struct{} // fixme: use struct{}{}, to make sure all slot are idle + allTxReqs []*ParallelTxRequest + txReqExecuteRecord map[int]int // for each the execute count of each Tx + txReqExecuteCount int + confirmInStage2 bool } func NewParallelStateProcessor(config *params.ChainConfig, bc *BlockChain, engine consensus.Engine, parallelNum int, queueSize int) *ParallelStateProcessor { @@ -396,12 +411,16 @@ func (p *LightStateProcessor) LightProcess(diffLayer *types.DiffLayer, block *ty } type SlotState struct { - pendingTxReqChan chan struct{} - pendingConfirmChan chan *ParallelTxResult - pendingTxReqList []*ParallelTxRequest // maintained by dispatcher for dispatch policy - slotdbChan chan *state.ParallelStateDB // dispatch will create and send this slotDB to slot + pendingTxReqChan chan *ParallelTxRequest + pendingTxReqShadowChan chan *ParallelTxRequest + pendingTxReqList []*ParallelTxRequest // maintained by dispatcher for dispatch policy + slotdbChan chan *state.ParallelStateDB // dispatch will create and send this slotDB to slot + activatedId int32 // 0: normal slot, 1: shadow slot + // idle bool + // shadowIdle bool + stopChan chan struct{} + stopShadowChan chan struct{} // txReqUnits []*ParallelDispatchUnit // only dispatch can accesssd - unconfirmedStateDBs *sync.Map // [int]*state.StateDB // fixme: concurrent safe, not use sync.Map? } type ParallelTxResult struct { @@ -418,17 +437,19 @@ type ParallelTxResult struct { } type ParallelTxRequest struct { - txIndex int - tx *types.Transaction - slotDB *state.ParallelStateDB + txIndex int + staticSlotIndex int // static dispatched id + tx *types.Transaction + // slotDB *state.ParallelStateDB gasLimit uint64 msg types.Message block *types.Block vmConfig vm.Config bloomProcessor *AsyncReceiptBloomGenerator usedGas *uint64 - waitTxChan chan struct{} - curTxChan chan struct{} + curTxChan chan int + systemAddrRedo bool + runnable int32 // we only run a Tx once or if it needs redo } // to create and start the execution slot goroutines @@ -437,29 +458,37 @@ func (p *ParallelStateProcessor) init() { "CPUNum", runtime.NumCPU(), "QueueSize", p.queueSize) p.txResultChan = make(chan *ParallelTxResult, p.parallelNum) + p.stopSlotChan = make(chan int, 1) + p.stopConfirmChan = make(chan struct{}, 1) p.slotState = make([]*SlotState, p.parallelNum) - for i := 0; i < p.parallelNum; i++ { p.slotState[i] = &SlotState{ - slotdbChan: make(chan *state.ParallelStateDB, 1), - pendingTxReqChan: make(chan struct{}, 1), - pendingConfirmChan: make(chan *ParallelTxResult, p.queueSize), + slotdbChan: make(chan *state.ParallelStateDB, 1), + pendingTxReqChan: make(chan *ParallelTxRequest, 1), + pendingTxReqShadowChan: make(chan *ParallelTxRequest, 1), + stopChan: make(chan struct{}, 1), + stopShadowChan: make(chan struct{}, 1), } // start the shadow slot first go func(slotIndex int) { - p.runShadowSlotLoop(slotIndex) // this loop will be permanent live + p.runSlotLoop(slotIndex, 1) // this loop will be permanent live }(i) // start the slot's goroutine go func(slotIndex int) { - p.runSlotLoop(slotIndex) // this loop will be permanent live + p.runSlotLoop(slotIndex, 0) // this loop will be permanent live }(i) } + + p.pendingConfirmChan = make(chan *ParallelTxResult, 400) + go func() { + p.runConfirmLoop() // this loop will be permanent live + }() } +/* // for parallel execute, we put contracts of same address in a slot, // since these txs probably would have conflicts -/* func (p *ParallelStateProcessor) queueSameToAddress(txReq *ParallelTxRequest) bool { txToAddr := txReq.tx.To() // To() == nil means contract creation, no same To address @@ -488,10 +517,10 @@ func (p *ParallelStateProcessor) queueSameToAddress(txReq *ParallelTxRequest) bo } return false } -*/ + // for parallel execute, we put contracts of same address in a slot, // since these txs probably would have conflicts -/* + func (p *ParallelStateProcessor) queueSameFromAddress(txReq *ParallelTxRequest) bool { txFromAddr := txReq.msg.From() for i, slot := range p.slotState { @@ -512,8 +541,7 @@ func (p *ParallelStateProcessor) queueSameFromAddress(txReq *ParallelTxRequest) } return false } -*/ -/* + func (p *ParallelStateProcessor) dispatchToHungrySlot(statedb *state.StateDB, txReq *ParallelTxRequest) bool { var workload int = len(p.slotState[0].pendingTxReqList) var slotIndex int = 0 @@ -528,11 +556,6 @@ func (p *ParallelStateProcessor) dispatchToHungrySlot(statedb *state.StateDB, tx return false } - if workload == 0 && txReq.slotDB == nil { - // Create a SlotDB for idle slot to save an IPC channel cost for updateSlotDB - txReq.slotDB = state.NewSlotDB(statedb, consensus.SystemAddress, p.mergedTxIndex, false) - } - log.Debug("dispatch To Hungry Slot", "slot", slotIndex, "workload", workload, "txIndex", txReq.txIndex) slot := p.slotState[slotIndex] select { @@ -547,7 +570,6 @@ func (p *ParallelStateProcessor) dispatchToHungrySlot(statedb *state.StateDB, tx return false } */ - // 1.Sliding Window: // txReqAccountSorted @@ -559,15 +581,22 @@ func (p *ParallelStateProcessor) dispatchToHungrySlot(statedb *state.StateDB, tx // Unit make policy: // 1.From // 2.To... + /* type ParallelDispatchUnit struct { + unitId int // unit with same id is likely has dependency, but to put them in same slot. + // ** preemptible: true + // the unit can be preempted by dispatcher and be reallocated to other slot. + // unit with same Id will be reallocated together. + // ** preemptible: false + // can not be preempted, maybe it is the first unconfirmed unit of the current slot. + preemptible bool startTxIndex int endTxIndex int txsSize int txReqs []*ParallelTxRequest } */ - // Try best to make the unit full, it is full when: // ** maxUnitSize reached // ** tx index range reached @@ -586,7 +615,6 @@ type ParallelDispatchUnit struct { // ** make sure same From in same slot // ** try to make it balanced, queue to the most hungry slot for new Address func (p *ParallelStateProcessor) doStaticDispatch(mainStatedb *state.StateDB, txReqs []*ParallelTxRequest) { - fromSlotMap := make(map[common.Address]int, 100) toSlotMap := make(map[common.Address]int, 100) for _, txReq := range txReqs { @@ -620,6 +648,7 @@ func (p *ParallelStateProcessor) doStaticDispatch(mainStatedb *state.StateDB, tx } slot := p.slotState[slotIndex] + txReq.staticSlotIndex = slotIndex // txreq is better to be executed in this slot slot.pendingTxReqList = append(slot.pendingTxReqList, txReq) } } @@ -670,16 +699,12 @@ func (p *ParallelStateProcessor) waitUntilNextTxDone(statedb *state.StateDB, gp var result *ParallelTxResult for { result = <-p.txResultChan - // slot may request new slotDB, if slotDB is outdated - // such as: - // tx in pending tx request, previous tx in same queue is likely "damaged" the slotDB - // tx redo for conflict - // tx stage 1 failed, nonce out of order... + // slot may request new slotDB, if a TxReq do not have valid parallel state db if result.updateSlotDB { // the target slot is waiting for new slotDB slotState := p.slotState[result.slotIndex] slotDB := state.NewSlotDB(statedb, consensus.SystemAddress, result.txReq.txIndex, - p.mergedTxIndex, result.keepSystem, slotState.unconfirmedStateDBs) + p.mergedTxIndex, result.keepSystem, p.unconfirmedStateDBs) slotDB.SetSlotIndex(result.slotIndex) p.slotDBsToRelease = append(p.slotDBsToRelease, slotDB) slotState.slotdbChan <- slotDB @@ -688,15 +713,20 @@ func (p *ParallelStateProcessor) waitUntilNextTxDone(statedb *state.StateDB, gp // ok, the tx result is valid and can be merged break } + // log.Info("waitUntilNextTxDone receive a result", "result.slotIndex", result.slotIndex, + // "TxIndex", result.txReq.txIndex, "result.receipt.GasUsed", result.receipt.GasUsed) if err := gp.SubGas(result.receipt.GasUsed); err != nil { log.Error("gas limit reached", "block", result.txReq.block.Number(), "txIndex", result.txReq.txIndex, "GasUsed", result.receipt.GasUsed, "gp.Gas", gp.Gas()) } - resultSlotIndex := result.slotIndex resultTxIndex := result.txReq.txIndex - resultSlotState := p.slotState[resultSlotIndex] - resultSlotState.pendingTxReqList = resultSlotState.pendingTxReqList[1:] + // no need to delete in static dispatch + // if dispatchPolicy == dispatchPolicyDynamic { + // resultSlotIndex := result.slotIndex + // resultSlotState := p.slotState[resultSlotIndex] + // resultSlotState.pendingTxReqList = resultSlotState.pendingTxReqList[1:] + // } statedb.MergeSlotDB(result.slotDB, result.receipt, resultTxIndex) if resultTxIndex != p.mergedTxIndex+1 { @@ -704,14 +734,17 @@ func (p *ParallelStateProcessor) waitUntilNextTxDone(statedb *state.StateDB, gp "p.mergedTxIndex", p.mergedTxIndex) } p.mergedTxIndex = resultTxIndex + log.Debug("waitUntilNextTxDone result is merged", "result.slotIndex", result.slotIndex, + "TxIndex", result.txReq.txIndex, "p.mergedTxIndex", p.mergedTxIndex) + // notify the following Tx, it is merged, // todo(optimize): if next tx is in same slot, it do not need to wait; save this channel cost. - close(result.txReq.curTxChan) + result.txReq.curTxChan <- p.mergedTxIndex + // close(result.txReq.curTxChan) return result } -func (p *ParallelStateProcessor) executeInSlot(slotIndex int, txReq *ParallelTxRequest) *ParallelTxResult { - slotDB := txReq.slotDB +func (p *ParallelStateProcessor) executeInSlot(slotIndex int, txReq *ParallelTxRequest, slotDB *state.ParallelStateDB) *ParallelTxResult { slotDB.Prepare(txReq.tx.Hash(), txReq.block.Hash(), txReq.txIndex) blockContext := NewEVMBlockContext(txReq.block.Header(), p.bc, nil) // can share blockContext within a block for efficiency vmenv := vm.NewEVM(blockContext, vm.TxContext{}, slotDB, p.config, txReq.vmConfig) @@ -720,12 +753,15 @@ func (p *ParallelStateProcessor) executeInSlot(slotIndex int, txReq *ParallelTxR gpSlot := new(GasPool).AddGas(txReq.gasLimit) // block.GasLimit() evm, result, err := applyTransactionStageExecution(txReq.msg, gpSlot, slotDB, vmenv) - if err != nil { + // txReq.runnable must be flase, switch it to true + atomic.CompareAndSwapInt32(&txReq.runnable, 0, 1) + // the error could be caused by unconfirmed balance reference, // the balance could insufficient to pay its gas limit, which cause it preCheck.buyGas() failed // redo could solve it. - log.Warn("In slot execution error", "error", err) + log.Warn("In slot execution error", "error", err, + "slotIndex", slotIndex, "txIndex", txReq.txIndex) return &ParallelTxResult{ updateSlotDB: false, slotIndex: slotIndex, @@ -738,7 +774,6 @@ func (p *ParallelStateProcessor) executeInSlot(slotIndex int, txReq *ParallelTxR result: result, } } - if result.Failed() { // if Tx is reverted, all its state change will be discarded slotDB.RevertSlotDB(txReq.msg.From()) @@ -751,13 +786,14 @@ func (p *ParallelStateProcessor) executeInSlot(slotIndex int, txReq *ParallelTxR txReq: txReq, receipt: nil, // receipt is generated in finalize stage slotDB: slotDB, - err: err, + err: nil, gpSlot: gpSlot, evm: evm, result: result, } } +/* func (p *ParallelStateProcessor) executeInShadowSlot(slotIndex int, txResult *ParallelTxResult) *ParallelTxResult { txReq := txResult.txReq txIndex := txReq.txIndex @@ -840,45 +876,378 @@ func (p *ParallelStateProcessor) executeInShadowSlot(slotIndex int, txResult *Pa txResult.updateSlotDB = false return txResult } +*/ + +func (p *ParallelStateProcessor) runConfirmLoop() { + for { + // ParallelTxResult is not confirmed yet + var unconfirmedResult *ParallelTxResult + select { + case <-p.stopConfirmChan: + log.Debug("runConfirmLoop stop received, drop all pendingConfirm", + "len(p.pendingConfirmChan)", len(p.pendingConfirmChan)) + for len(p.pendingConfirmChan) > 0 { + <-p.pendingConfirmChan + } + p.stopSlotChan <- -1 + continue + case unconfirmedResult = <-p.pendingConfirmChan: + } + txIndex := unconfirmedResult.txReq.txIndex + if _, ok := p.txReqExecuteRecord[txIndex]; !ok { + p.txReqExecuteRecord[txIndex] = 0 + p.txReqExecuteCount++ + } + p.txReqExecuteRecord[txIndex]++ + + log.Debug("runConfirmLoop receive", "txIndex", txIndex, "p.mergedTxIndex", p.mergedTxIndex) + p.pendingConfirmResults[txIndex] = append(p.pendingConfirmResults[txIndex], unconfirmedResult) + newTxMerged := false + for { + targetTxIndex := p.mergedTxIndex + 1 + if delivered := p.toConfirmTxIndex(targetTxIndex, false); !delivered { + break + } + newTxMerged = true + } + txSize := len(p.allTxReqs) + if !p.confirmInStage2 && p.txReqExecuteCount == txSize { + log.Info("runConfirmLoop last txIndex received, enter stage 2", "txIndex", txIndex) + p.confirmInStage2 = true + for i := 0; i < txSize; i++ { + p.txReqExecuteRecord[txIndex] = 0 // clear it when enter stage2, for redo limit + } + } + // if no Tx is merged, we will skip the stage 2 check + if !newTxMerged { + continue + } + + // stage 2,if all tx have been executed at least once, and its result has been recevied. + // in Stage 2, we will run check when merge is advanced. + if p.confirmInStage2 { + // more aggressive tx result confirm, even for these Txs not in turn + // now we will be more aggressive: + // do conflcit check , as long as tx result is generated, + // if lucky, it is the Tx's turn, we will do conflict check with WBNB makeup + // otherwise, do conflict check without WBNB makeup, but we will ignor WBNB's balance conflict. + // throw these likely conflicted tx back to re-execute + startTxIndex := p.mergedTxIndex + 2 // stage 2's will start from the next target merge index + log.Info("runConfirmLoop in stage 2", "startTxIndex", startTxIndex) + for txIndex := startTxIndex; txIndex < txSize; txIndex++ { + p.toConfirmTxIndex(txIndex, true) + } + } + + } + +} + +// do conflict detect +func (p *ParallelStateProcessor) hasConflict(txResult *ParallelTxResult, isStage2 bool) bool { + txReq := txResult.txReq + txIndex := txReq.txIndex + slotDB := txResult.slotDB + if txResult.err != nil { + log.Debug("redo, since in slot execute failed", "err", txResult.err) + return true + } else if slotDB.SystemAddressRedo() { + log.Debug("Stage Execution conflict for SystemAddressRedo", "txIndex", txIndex) + txResult.txReq.systemAddrRedo = true + return true + } else if slotDB.NeedsRedo() { + // if this is any reason that indicates this transaction needs to redo, skip the conflict check + return true + } else { + // to check if what the slot db read is correct. + // refDetail := slotDB.UnconfirmedRefList() + if !slotDB.IsParallelReadsValid(isStage2) { + return true + } + } + return false +} + +func (p *ParallelStateProcessor) switchSlot(slot *SlotState, slotIndex int) { + if atomic.CompareAndSwapInt32(&slot.activatedId, 0, 1) { + // switch from normal to shadow slot + if len(slot.pendingTxReqShadowChan) == 0 { + slot.pendingTxReqShadowChan <- nil // only notify when target once + log.Debug("switchSlot to shadow", "slotIndex", slotIndex) + } + } else if atomic.CompareAndSwapInt32(&slot.activatedId, 1, 0) { + // switch from shadow to normal slot + if len(slot.pendingTxReqChan) == 0 { + slot.pendingTxReqChan <- nil // only notify when target once + log.Debug("switchSlot to normal", "slotIndex", slotIndex) + } + } +} + +// to confirm a serial TxResults with same txIndex +func (p *ParallelStateProcessor) toConfirmTxIndex(targetTxIndex int, isStage2 bool) bool { + // var targetTxIndex int + if targetTxIndex <= p.mergedTxIndex { + log.Warn("toConfirmTxIndex in stage 2, invalid txIndex", + "targetTxIndex", targetTxIndex, "isStage2", isStage2) + return false + } + if targetTxIndex == p.mergedTxIndex+1 && isStage2 { + // this is the one that can been merged, + // others are for likely conflict check, since it is not their tuen. + log.Warn("toConfirmTxIndex in stage 2, invalid txIndex", + "targetTxIndex", targetTxIndex, "isStage2", isStage2) + return false + } + + log.Debug("toConfirmTxIndex", "targetTxIndex", targetTxIndex, + "current mergedTxIndex", p.mergedTxIndex, "isStage2", isStage2) + for { + // handle a targetTxIndex in a loop + // targetTxIndex = p.mergedTxIndex + 1 + // select a unconfirmedResult to check + results := p.pendingConfirmResults[targetTxIndex] + resultsLen := len(results) + if resultsLen == 0 { // no pending result can be verified, break and wait for incoming results + log.Debug("toConfirmTxIndex resultsLen is 0", "targetTxIndex", targetTxIndex, + "merged TxIndex", p.mergedTxIndex, "isStage2", isStage2) + return false + } + lastResult := results[len(results)-1] // last is the most fresh, stack based priority + if !isStage2 { + // not remove the confirm result in Stage2, since the conflict check is guranteed. + p.pendingConfirmResults[targetTxIndex] = p.pendingConfirmResults[targetTxIndex][:resultsLen-1] // remove from the queue + } + log.Debug("toConfirmTxIndex", "resultsLen", resultsLen, "txIndex", lastResult.txReq.txIndex, + "merged TxIndex", p.mergedTxIndex, "isStage2", isStage2) + + valid := p.toConfirmTxIndexResult(lastResult, isStage2) + executedSlotIndex := lastResult.slotIndex // could be stolen and executed in other slot. + staticSlotIndex := lastResult.txReq.staticSlotIndex + if !valid { + if resultsLen == 1 || isStage2 { // for Stage 2, we only check its latest result. + p.debugConflictRedoNum++ + if !isStage2 || p.txReqExecuteRecord[lastResult.txReq.txIndex] < maxRedoCounterInstage1 { + lastResult.txReq.runnable = 1 // needs redo + } + slot := p.slotState[staticSlotIndex] + log.Debug("runConfirmLoop conflict", + "staticSlotIndex", staticSlotIndex, + "executedSlotIndex", executedSlotIndex, + "txIndex", lastResult.txReq.txIndex, + "activatedId", slot.activatedId, + "isStage2", isStage2, "slot.activatedId", slot.activatedId) + // if hit { // switch only it is hit, for none-hit, it is not that necessary, + p.switchSlot(slot, staticSlotIndex) + //} + log.Debug("runConfirmLoop conflict, switched", + "staticSlotIndex", staticSlotIndex, + "executedSlotIndex", executedSlotIndex, + "txIndex", lastResult.txReq.txIndex) + // this the last result for this txIndex, + // interrupt its current routine, and reschedule from the the other routine(shadow?) + return false + } else { + // try next + log.Debug("runConfirmLoop conflict, try next result of same txIndex", + "staticSlotIndex", staticSlotIndex, + "executedSlotIndex", executedSlotIndex, + "txIndex", lastResult.txReq.txIndex) + } + continue + } + if isStage2 { + // likely valid, but not sure, can not deliver + // fixme: need to handle txResult repeatedly check? + return false + } + log.Debug("runConfirmLoop result to deliver", + "staticSlotIndex", staticSlotIndex, + "executedSlotIndex", executedSlotIndex, + "txIndex", lastResult.txReq.txIndex, "mergedTxIndex", p.mergedTxIndex) + // result is valid, deliver it to main processor + p.txResultChan <- lastResult + // wait until merged TxIndex is updated + <-lastResult.txReq.curTxChan + // close(result.txReq.curTxChan) // fixme: to close + + log.Debug("runConfirmLoop result is delivered", + "staticSlotIndex", staticSlotIndex, + "executedSlotIndex", executedSlotIndex, + "txIndex", lastResult.txReq.txIndex, "mergedTxIndex", p.mergedTxIndex) + if p.mergedTxIndex != (targetTxIndex) { + log.Warn("runConfirmLoop result delivered, but unexpected mergedTxIndex", + "mergedTxIndex", p.mergedTxIndex, "targetTxIndex", targetTxIndex) + } + // p.mergedTxIndex = targetTxIndex // fixme: cpu execute disorder, + return true // try validate next txIndex + } + +} -func (p *ParallelStateProcessor) runSlotLoop(slotIndex int) { +// to confirm one txResult +func (p *ParallelStateProcessor) toConfirmTxIndexResult(txResult *ParallelTxResult, isStage2 bool) bool { + txReq := txResult.txReq + // txIndex := txReq.txIndex + // slotDB := txResult.slotDB + if p.hasConflict(txResult, isStage2) { + return false + } + if isStage2 { // not its turn + return true // likely valid, not sure, no finalized right now. + } + + // goroutine unsafe operation will be handled from here for safety + gasConsumed := txReq.gasLimit - txResult.gpSlot.Gas() + if gasConsumed != txResult.result.UsedGas { + log.Error("gasConsumed != result.UsedGas mismatch", + "gasConsumed", gasConsumed, "result.UsedGas", txResult.result.UsedGas) + } + + // ok, time to do finalize, stage2 should not be parallel + header := txReq.block.Header() + txResult.receipt, txResult.err = applyTransactionStageFinalization(txResult.evm, txResult.result, + txReq.msg, p.config, txResult.slotDB, header, + txReq.tx, txReq.usedGas, txReq.bloomProcessor) + txResult.updateSlotDB = false + return true +} + +func (p *ParallelStateProcessor) runSlotLoop(slotIndex int, slotType int32) { curSlot := p.slotState[slotIndex] + startTxIndex := 0 + var wakeupChan chan *ParallelTxRequest + var stopChan chan struct{} + + if slotType == 0 { // 0: normal, 1: shadow + wakeupChan = curSlot.pendingTxReqChan + stopChan = curSlot.stopChan + } else { + wakeupChan = curSlot.pendingTxReqShadowChan + stopChan = curSlot.stopShadowChan + } for { // wait for new TxReq - <-curSlot.pendingTxReqChan - - // receive a dispatched message - - // SlotDB create rational: - // ** for a dispatched tx, - // the slot should be idle, it is better to create a new SlotDB, since new Tx is not related to previous Tx - // ** for a queued tx, - // it is better to create a new SlotDB, since COW is used. - for _, txReq := range curSlot.pendingTxReqList { - if txReq.slotDB == nil { - result := &ParallelTxResult{ + // txReq := <-curSlot.pendingTxReqChan + select { + case <-stopChan: + log.Debug("runSlotLoop stop received", "slotIndex", slotIndex, "slotType", slotType) + p.stopSlotChan <- slotIndex + continue + case <-wakeupChan: + } + + traceMsg := "runSlotLoop " + strconv.Itoa(slotIndex) + if slotType == 1 { + traceMsg = traceMsg + " shadow" + } + + startTxIndex = p.mergedTxIndex + 1 + log.Debug("runSlotLoop started", "slotIndex", slotIndex, "startTxIndex", startTxIndex, "slotType", slotType) + if dispatchPolicy == dispatchPolicyStatic { + interrupted := false + for _, txReq := range curSlot.pendingTxReqList { + if txReq.txIndex < startTxIndex { + continue + } + // if interrupted, + if curSlot.activatedId != slotType { + log.Debug("runSlotLoop, switch loop", "slotIndex", slotIndex, + "txIndex ", txReq.txIndex, + "activatedId", curSlot.activatedId, "slotType", slotType) + interrupted = true + break + } + + if !atomic.CompareAndSwapInt32(&txReq.runnable, 1, 0) { + // not swapped: txReq.runnable == 0 + log.Debug("runSlotLoop, tx not runnable", "slotIndex", slotIndex, + "txIndex ", txReq.txIndex, "slotType", slotType) + continue + } + + resultUpdateDB := &ParallelTxResult{ updateSlotDB: true, slotIndex: slotIndex, err: nil, txReq: txReq, + keepSystem: txReq.systemAddrRedo, + } + p.txResultChan <- resultUpdateDB + slotDB := <-curSlot.slotdbChan + if slotDB == nil { // block is processed, fixme: no need to steal + break } - p.txResultChan <- result - txReq.slotDB = <-curSlot.slotdbChan + log.Debug("runSlotLoop executeInSlot", "slotIndex", slotIndex, + "txReq.txIndex", txReq.txIndex, "slotType", slotType) + result := p.executeInSlot(slotIndex, txReq, slotDB) + log.Debug("runSlotLoop executeInSlot done", "slotIndex", slotIndex, + "txReq.txIndex", txReq.txIndex, "slotType", slotType) + + p.unconfirmedStateDBs.Store(txReq.txIndex, slotDB) + log.Debug("runSlotLoop executeInSlot to send result", "slotIndex", slotIndex, + "txReq.txIndex", txReq.txIndex, "slotType", slotType) + + p.pendingConfirmChan <- result + } + // switched to the other slot. + if interrupted || p.confirmInStage2 { + continue + } + // txReq in this Slot have all been executed, try steal one from other slot. + // as long as the TxReq is runable, we steal it, mark it as stolen + // steal one by one + + for _, stealTxReq := range p.allTxReqs { + if !atomic.CompareAndSwapInt32(&stealTxReq.runnable, 1, 0) { + // not swapped: txReq.runnable == 0 + log.Debug("runSlotLoop, tx not runnable", "slotIndex", slotIndex, + "txIndex ", stealTxReq.txIndex, "slotType", slotType) + continue + } + resultUpdateDB := &ParallelTxResult{ + updateSlotDB: true, + slotIndex: slotIndex, + err: nil, + txReq: stealTxReq, + keepSystem: stealTxReq.systemAddrRedo, + } + p.txResultChan <- resultUpdateDB + slotDB := <-curSlot.slotdbChan + if slotDB == nil { // block is processed + break + } + log.Debug("runSlotLoop executeInSlot steal", "slotIndex", slotIndex, + "txReq.txIndex", stealTxReq.txIndex, "slotType", slotType) + + result := p.executeInSlot(slotIndex, stealTxReq, slotDB) + log.Debug("runSlotLoop executeInSlot steal done", "slotIndex", slotIndex, + "txReq.txIndex", stealTxReq.txIndex, "slotType", slotType) + p.unconfirmedStateDBs.Store(stealTxReq.txIndex, slotDB) + log.Debug("runSlotLoop executeInSlot steal to send result", "slotIndex", slotIndex, + "txReq.txIndex", stealTxReq.txIndex, "slotType", slotType) + p.pendingConfirmChan <- result } - result := p.executeInSlot(slotIndex, txReq) - curSlot.unconfirmedStateDBs.Store(txReq.txIndex, txReq.slotDB) - curSlot.pendingConfirmChan <- result - } - } -} -func (p *ParallelStateProcessor) runShadowSlotLoop(slotIndex int) { - curSlot := p.slotState[slotIndex] - for { - // ParallelTxResult from pendingConfirmChan is not confirmed yet - unconfirmedResult := <-curSlot.pendingConfirmChan - confirmedResult := p.executeInShadowSlot(slotIndex, unconfirmedResult) - p.txResultChan <- confirmedResult + } + /* + // disable dynamic right now. + else if dispatchPolicy == dispatchPolicyDynamic { + if txReq.slotDB == nil { + result := &ParallelTxResult{ + updateSlotDB: true, + slotIndex: slotIndex, + err: nil, + txReq: txReq, + } + p.txResultChan <- result + txReq.slotDB = <-curSlot.slotdbChan + } + result := p.executeInSlot(slotIndex, txReq) + p.unconfirmedStateDBs.Store(txReq.txIndex, txReq.slotDB) + p.pendingConfirmChan <- result + } + */ } } @@ -888,13 +1257,14 @@ func (p *ParallelStateProcessor) resetState(txNum int, statedb *state.StateDB) { return } p.mergedTxIndex = -1 - p.debugErrorRedoNum = 0 p.debugConflictRedoNum = 0 + p.confirmInStage2 = false // p.txReqAccountSorted = make(map[common.Address][]*ParallelTxRequest) // fixme: to be reused? statedb.PrepareForParallel() - + p.allTxReqs = make([]*ParallelTxRequest, 0) p.slotDBsToRelease = make([]*state.ParallelStateDB, 0, txNum) + /* stateDBsToRelease := p.slotDBsToRelease go func() { @@ -905,8 +1275,12 @@ func (p *ParallelStateProcessor) resetState(txNum int, statedb *state.StateDB) { */ for _, slot := range p.slotState { slot.pendingTxReqList = make([]*ParallelTxRequest, 0) - slot.unconfirmedStateDBs = new(sync.Map) // make(map[int]*state.StateDB), fixme: resue not new? + slot.activatedId = 0 } + p.unconfirmedStateDBs = new(sync.Map) // make(map[int]*state.ParallelStateDB) + p.pendingConfirmResults = make(map[int][]*ParallelTxResult, 200) + p.txReqExecuteRecord = make(map[int]int, 200) + p.txReqExecuteCount = 0 } // Implement BEP-130: Parallel Transaction Execution. @@ -920,7 +1294,7 @@ func (p *ParallelStateProcessor) Process(block *types.Block, statedb *state.Stat txNum := len(block.Transactions()) p.resetState(txNum, statedb) if txNum > 0 { - log.Info("ProcessParallel", "block", header.Number) + log.Info("ProcessParallel", "block", header.Number, "txNum", txNum) } // Iterate over and process the individual transactions @@ -930,8 +1304,7 @@ func (p *ParallelStateProcessor) Process(block *types.Block, statedb *state.Stat systemTxs := make([]*types.Transaction, 0, 2) signer, _, bloomProcessor := p.preExecute(block, statedb, cfg, true) - var waitTxChan, curTxChan chan struct{} - var txReqs []*ParallelTxRequest + // var txReqs []*ParallelTxRequest for i, tx := range block.Transactions() { if isPoSA { if isSystemTx, err := posa.IsSystemTransaction(tx, block.Header()); err != nil { @@ -952,60 +1325,132 @@ func (p *ParallelStateProcessor) Process(block *types.Block, statedb *state.Stat } // parallel start, wrap an exec message, which will be dispatched to a slot - waitTxChan = curTxChan // can be nil, if this is the tx of first batch, otherwise, it is previous Tx's wait channel - curTxChan = make(chan struct{}, 1) - txReq := &ParallelTxRequest{ - txIndex: i, - tx: tx, - slotDB: nil, - gasLimit: block.GasLimit(), // gp.Gas(). - msg: msg, - block: block, - vmConfig: cfg, - bloomProcessor: bloomProcessor, - usedGas: usedGas, - waitTxChan: waitTxChan, - curTxChan: curTxChan, + txIndex: i, + staticSlotIndex: -1, + tx: tx, + gasLimit: block.GasLimit(), // gp.Gas(). + msg: msg, + block: block, + vmConfig: cfg, + bloomProcessor: bloomProcessor, + usedGas: usedGas, + curTxChan: make(chan int, 1), + systemAddrRedo: false, // set to true, when systemAddr access is detected. + runnable: 1, // 0: not runnable, 1: runnable } - txReqs = append(txReqs, txReq) - // from := txReq.msg.From() - // p.txReqAccountSorted[from] = append(p.txReqAccountSorted[from], txReq) - // Generate TxReqUnit every 80() transaction? - // if (i + 1) % *(p.parallelNum *10) == 0 { - // p.txReqAccountSorted = make(map[common.Address][]*ParallelTxRequest) // fixme: memory reuse? - // } - } - p.doStaticDispatch(statedb, txReqs) - for _, slot := range p.slotState { - slot.pendingTxReqChan <- struct{}{} + p.allTxReqs = append(p.allTxReqs, txReq) } - for { - if len(commonTxs)+len(systemTxs) == txNum { - break - } - - result := p.waitUntilNextTxDone(statedb, gp) - // update tx result - if result.err != nil { - log.Error("ProcessParallel a failed tx", "resultSlotIndex", result.slotIndex, - "resultTxIndex", result.txReq.txIndex, "result.err", result.err) - return statedb, nil, nil, 0, fmt.Errorf("could not apply tx %d [%v]: %w", result.txReq.txIndex, result.txReq.tx.Hash().Hex(), result.err) + if dispatchPolicy == dispatchPolicyStatic { + p.doStaticDispatch(statedb, p.allTxReqs) // todo: put txReqs in unit? + // after static dispatch, we notify the slot to work. + for _, slot := range p.slotState { + slot.pendingTxReqChan <- nil } - - commonTxs = append(commonTxs, result.txReq.tx) - receipts = append(receipts, result.receipt) + // wait until all Txs have processed. + for { + if len(commonTxs)+len(systemTxs) == txNum { + break + } + result := p.waitUntilNextTxDone(statedb, gp) + // update tx result + if result.err != nil { + log.Error("ProcessParallel a failed tx", "resultSlotIndex", result.slotIndex, + "resultTxIndex", result.txReq.txIndex, "result.err", result.err) + return statedb, nil, nil, 0, fmt.Errorf("could not apply tx %d [%v]: %w", result.txReq.txIndex, result.txReq.tx.Hash().Hex(), result.err) + } + commonTxs = append(commonTxs, result.txReq.tx) + receipts = append(receipts, result.receipt) + } + // wait unitl all slot are stopped + for _, slot := range p.slotState { + // log.Info("ProcessParallel to stop", "slotIndex", i) + slot.stopChan <- struct{}{} + slot.stopShadowChan <- struct{}{} + stopCount := 0 + for { + select { + case updateDB := <-p.txResultChan: // in case a slot is requesting a new DB... + if updateDB.updateSlotDB { + slotState := p.slotState[updateDB.slotIndex] + // log.Info("ProcessParallel try to update slot db", "slotIndex", updateDB.slotIndex) + slotState.slotdbChan <- nil + continue + } + // else { + // log.Info("ProcessParallel unexpected txResultChan", "slotIndex", i) + // } + // case slotIndex := <-p.stopSlotChan: + case <-p.stopSlotChan: + // log.Info("ProcessParallel slot stopped", "slotIndex", slotIndex) + stopCount++ + } + if stopCount == 2 { + break + } + } + // log.Info("ProcessParallel shadow slot stopped", "slotIndex", i) + } + // wait until the confirm routine is stopped + log.Debug("ProcessParallel to stop confirm routine") + p.stopConfirmChan <- struct{}{} + <-p.stopSlotChan + log.Debug("ProcessParallel stopped confirm routine") } + /* + else if dispatchPolicy == dispatchPolicyDynamic { + for _, txReq := range txReqs { + // to optimize the for { for {} } loop code style? it is ok right now. + for { + if p.queueSameFromAddress(txReq) { + break + } + if p.queueSameToAddress(txReq) { + break + } + // if idle slot available, just dispatch and process next tx. + if p.dispatchToHungrySlot(statedb, txReq) { + break + } + log.Debug("ProcessParallel no slot available, wait", "txIndex", txReq.txIndex) + // no idle slot, wait until a tx is executed and merged. + result := p.waitUntilNextTxDone(statedb, gp) + + // update tx result + if result.err != nil { + log.Warn("ProcessParallel a failed tx", "resultSlotIndex", result.slotIndex, + "resultTxIndex", result.txReq.txIndex, "result.err", result.err) + return statedb, nil, nil, 0, fmt.Errorf("could not apply tx %d [%v]: %w", result.txReq.txIndex, result.txReq.tx.Hash().Hex(), result.err) + } + commonTxs = append(commonTxs, result.txReq.tx) + receipts = append(receipts, result.receipt) + } + } + // wait until all tx request are done + for len(commonTxs)+len(systemTxs) < txNum { + result := p.waitUntilNextTxDone(statedb, gp) + + // update tx result + if result.err != nil { + log.Error("ProcessParallel a failed tx", "resultSlotIndex", result.slotIndex, + "resultTxIndex", result.txReq.txIndex, "result.err", result.err) + return statedb, nil, nil, 0, fmt.Errorf("could not apply tx %d [%v]: %w", result.txReq.txIndex, result.txReq.tx.Hash().Hex(), result.err) + } + + commonTxs = append(commonTxs, result.txReq.tx) + receipts = append(receipts, result.receipt) + } + } + */ // len(commonTxs) could be 0, such as: https://bscscan.com/block/14580486 if len(commonTxs) > 0 { log.Info("ProcessParallel tx all done", "block", header.Number, "usedGas", *usedGas, "txNum", txNum, "len(commonTxs)", len(commonTxs), - "errorNum", p.debugErrorRedoNum, "conflictNum", p.debugConflictRedoNum, - "redoRate(%)", 100*(p.debugErrorRedoNum+p.debugConflictRedoNum)/len(commonTxs)) + "redoRate(%)", 100*(p.debugConflictRedoNum)/len(commonTxs)) } allLogs, err := p.postExecute(block, statedb, &commonTxs, &receipts, &systemTxs, usedGas, bloomProcessor) return statedb, receipts, allLogs, *usedGas, err diff --git a/core/vm/evm.go b/core/vm/evm.go index 53e2e8797b..f21df8885d 100644 --- a/core/vm/evm.go +++ b/core/vm/evm.go @@ -277,6 +277,7 @@ func (evm *EVM) Call(caller ContractRef, addr common.Address, input []byte, gas // The depth-check is already done, and precompiles handled above contract := NewContract(caller, AccountRef(addrCopy), value, gas) contract.SetCallCode(&addrCopy, evm.StateDB.GetCodeHash(addrCopy), code) + evm.StateDB.ParallelMakeUp(addr, input) ret, err = run(evm, contract, input, false) gas = contract.Gas } diff --git a/core/vm/instructions.go b/core/vm/instructions.go index 0ecf28d59a..ae5c7079f3 100644 --- a/core/vm/instructions.go +++ b/core/vm/instructions.go @@ -259,7 +259,7 @@ func opAddress(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([] func opBalance(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]byte, error) { slot := scope.Stack.peek() address := common.Address(slot.Bytes20()) - slot.SetFromBig(interpreter.evm.StateDB.GetBalance(address)) + slot.SetFromBig(interpreter.evm.StateDB.GetBalanceOpCode(address)) return nil, nil } diff --git a/core/vm/interface.go b/core/vm/interface.go index ad9b05d666..be263002b7 100644 --- a/core/vm/interface.go +++ b/core/vm/interface.go @@ -30,6 +30,7 @@ type StateDB interface { SubBalance(common.Address, *big.Int) AddBalance(common.Address, *big.Int) GetBalance(common.Address) *big.Int + GetBalanceOpCode(common.Address) *big.Int GetNonce(common.Address) uint64 SetNonce(common.Address, uint64) @@ -74,6 +75,7 @@ type StateDB interface { AddPreimage(common.Hash, []byte) ForEachStorage(common.Address, func(common.Hash, common.Hash) bool) error + ParallelMakeUp(addr common.Address, input []byte) } // CallContext provides a basic interface for the EVM calling conventions. The EVM From 34881e2f1b74ecd352e02c58ced9fb4d18d1749d Mon Sep 17 00:00:00 2001 From: setunapo Date: Thu, 5 May 2022 19:26:12 +0800 Subject: [PATCH 11/18] debugConflictRedoNum --- core/state_processor.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/core/state_processor.go b/core/state_processor.go index 5371fc8c02..bc9268c897 100644 --- a/core/state_processor.go +++ b/core/state_processor.go @@ -1026,9 +1026,9 @@ func (p *ParallelStateProcessor) toConfirmTxIndex(targetTxIndex int, isStage2 bo staticSlotIndex := lastResult.txReq.staticSlotIndex if !valid { if resultsLen == 1 || isStage2 { // for Stage 2, we only check its latest result. - p.debugConflictRedoNum++ if !isStage2 || p.txReqExecuteRecord[lastResult.txReq.txIndex] < maxRedoCounterInstage1 { lastResult.txReq.runnable = 1 // needs redo + p.debugConflictRedoNum++ } slot := p.slotState[staticSlotIndex] log.Debug("runConfirmLoop conflict", From 76348fa69bd5dae6c5e69c499892dc31e7804d3c Mon Sep 17 00:00:00 2001 From: setunapo Date: Thu, 5 May 2022 19:25:48 +0800 Subject: [PATCH 12/18] remove debug logs --- core/state/statedb.go | 4 +- core/state_processor.go | 179 +--------------------------------------- 2 files changed, 5 insertions(+), 178 deletions(-) diff --git a/core/state/statedb.go b/core/state/statedb.go index 2b9f954047..f00d587c02 100644 --- a/core/state/statedb.go +++ b/core/state/statedb.go @@ -2486,7 +2486,7 @@ func (s *ParallelStateDB) GetBalanceOpCode(addr common.Address) *big.Int { if addr == WBNBAddress { // s.wbnbBalanceAccessed++ s.wbnbMakeUp = false - log.Debug("GetBalanceOpCode for WBNB", "txIndex", s.TxIndex()) + // log.Debug("GetBalanceOpCode for WBNB", "txIndex", s.TxIndex()) } return s.GetBalance(addr) } @@ -3517,7 +3517,7 @@ func (s *ParallelStateDB) ParallelMakeUp(addr common.Address, input []byte) { // return // } - log.Warn("ParallelMakeUp for WBNB unknown method id", "input size", len(input), "input", input) + // log.Warn("ParallelMakeUp for WBNB unknown method id", "input size", len(input), "input", input) s.wbnbMakeUp = false } diff --git a/core/state_processor.go b/core/state_processor.go index bc9268c897..c31ebccdee 100644 --- a/core/state_processor.go +++ b/core/state_processor.go @@ -23,7 +23,6 @@ import ( "math/big" "math/rand" "runtime" - "strconv" "sync" "sync/atomic" "time" @@ -713,8 +712,6 @@ func (p *ParallelStateProcessor) waitUntilNextTxDone(statedb *state.StateDB, gp // ok, the tx result is valid and can be merged break } - // log.Info("waitUntilNextTxDone receive a result", "result.slotIndex", result.slotIndex, - // "TxIndex", result.txReq.txIndex, "result.receipt.GasUsed", result.receipt.GasUsed) if err := gp.SubGas(result.receipt.GasUsed); err != nil { log.Error("gas limit reached", "block", result.txReq.block.Number(), "txIndex", result.txReq.txIndex, "GasUsed", result.receipt.GasUsed, "gp.Gas", gp.Gas()) @@ -734,8 +731,8 @@ func (p *ParallelStateProcessor) waitUntilNextTxDone(statedb *state.StateDB, gp "p.mergedTxIndex", p.mergedTxIndex) } p.mergedTxIndex = resultTxIndex - log.Debug("waitUntilNextTxDone result is merged", "result.slotIndex", result.slotIndex, - "TxIndex", result.txReq.txIndex, "p.mergedTxIndex", p.mergedTxIndex) + // log.Debug("waitUntilNextTxDone result is merged", "result.slotIndex", result.slotIndex, + // "TxIndex", result.txReq.txIndex, "p.mergedTxIndex", p.mergedTxIndex) // notify the following Tx, it is merged, // todo(optimize): if next tx is in same slot, it do not need to wait; save this channel cost. @@ -793,99 +790,12 @@ func (p *ParallelStateProcessor) executeInSlot(slotIndex int, txReq *ParallelTxR } } -/* -func (p *ParallelStateProcessor) executeInShadowSlot(slotIndex int, txResult *ParallelTxResult) *ParallelTxResult { - txReq := txResult.txReq - txIndex := txReq.txIndex - slotDB := txReq.slotDB - header := txReq.block.Header() - - // wait until the previous tx is finalized. - if txReq.waitTxChan != nil { - <-txReq.waitTxChan // close the channel - } - - // do conflict detect - hasConflict := false - systemAddrConflict := false - if txResult.err != nil { - log.Debug("redo, since in slot execute failed", "err", txResult.err) - hasConflict = true - } else if slotDB.SystemAddressRedo() { - log.Debug("Stage Execution conflict for SystemAddressRedo", "Slot", slotIndex, - "txIndex", txIndex) - hasConflict = true - systemAddrConflict = true - } else if slotDB.NeedsRedo() { - // if this is any reason that indicates this transaction needs to redo, skip the conflict check - hasConflict = true - } else { - // to check if what the slot db read is correct. - // refDetail := slotDB.UnconfirmedRefList() - if !slotDB.IsParallelReadsValid() { - hasConflict = true - } - } - - if hasConflict { - p.debugConflictRedoNum++ - // re-run should not have conflict, since it has the latest world state. - redoResult := &ParallelTxResult{ - updateSlotDB: true, - keepSystem: systemAddrConflict, - slotIndex: slotIndex, - txReq: txReq, - } - p.txResultChan <- redoResult - updatedSlotDB := <-p.slotState[slotIndex].slotdbChan - updatedSlotDB.Prepare(txReq.tx.Hash(), txReq.block.Hash(), txIndex) - gpSlot := new(GasPool).AddGas(txReq.gasLimit) - - txResult.slotDB = updatedSlotDB - txResult.gpSlot = gpSlot - - blockContext := NewEVMBlockContext(header, p.bc, nil) // can share blockContext within a block for efficiency - vmenv := vm.NewEVM(blockContext, vm.TxContext{}, updatedSlotDB, p.config, txReq.vmConfig) - txResult.evm, txResult.result, txResult.err = applyTransactionStageExecution(txReq.msg, - gpSlot, updatedSlotDB, vmenv) - - if txResult.err != nil { - log.Error("Stage Execution conflict redo, error", txResult.err) - } - - if txResult.result.Failed() { - // if Tx is reverted, all its state change will be discarded - log.Debug("TX reverted?", "Slot", slotIndex, "txIndex", txIndex, - "result.Err", txResult.result.Err) - txResult.slotDB.RevertSlotDB(txReq.msg.From()) - } - } - - // goroutine unsafe operation will be handled from here for safety - gasConsumed := txReq.gasLimit - txResult.gpSlot.Gas() - if gasConsumed != txResult.result.UsedGas { - log.Error("gasConsumed != result.UsedGas mismatch", - "gasConsumed", gasConsumed, "result.UsedGas", txResult.result.UsedGas) - } - - // ok, time to do finalize, stage2 should not be parallel - txResult.receipt, txResult.err = applyTransactionStageFinalization(txResult.evm, txResult.result, - txReq.msg, p.config, txResult.slotDB, header, - txReq.tx, txReq.usedGas, txReq.bloomProcessor) - - txResult.updateSlotDB = false - return txResult -} -*/ - func (p *ParallelStateProcessor) runConfirmLoop() { for { // ParallelTxResult is not confirmed yet var unconfirmedResult *ParallelTxResult select { case <-p.stopConfirmChan: - log.Debug("runConfirmLoop stop received, drop all pendingConfirm", - "len(p.pendingConfirmChan)", len(p.pendingConfirmChan)) for len(p.pendingConfirmChan) > 0 { <-p.pendingConfirmChan } @@ -899,8 +809,6 @@ func (p *ParallelStateProcessor) runConfirmLoop() { p.txReqExecuteCount++ } p.txReqExecuteRecord[txIndex]++ - - log.Debug("runConfirmLoop receive", "txIndex", txIndex, "p.mergedTxIndex", p.mergedTxIndex) p.pendingConfirmResults[txIndex] = append(p.pendingConfirmResults[txIndex], unconfirmedResult) newTxMerged := false for { @@ -912,7 +820,6 @@ func (p *ParallelStateProcessor) runConfirmLoop() { } txSize := len(p.allTxReqs) if !p.confirmInStage2 && p.txReqExecuteCount == txSize { - log.Info("runConfirmLoop last txIndex received, enter stage 2", "txIndex", txIndex) p.confirmInStage2 = true for i := 0; i < txSize; i++ { p.txReqExecuteRecord[txIndex] = 0 // clear it when enter stage2, for redo limit @@ -933,7 +840,6 @@ func (p *ParallelStateProcessor) runConfirmLoop() { // otherwise, do conflict check without WBNB makeup, but we will ignor WBNB's balance conflict. // throw these likely conflicted tx back to re-execute startTxIndex := p.mergedTxIndex + 2 // stage 2's will start from the next target merge index - log.Info("runConfirmLoop in stage 2", "startTxIndex", startTxIndex) for txIndex := startTxIndex; txIndex < txSize; txIndex++ { p.toConfirmTxIndex(txIndex, true) } @@ -945,14 +851,10 @@ func (p *ParallelStateProcessor) runConfirmLoop() { // do conflict detect func (p *ParallelStateProcessor) hasConflict(txResult *ParallelTxResult, isStage2 bool) bool { - txReq := txResult.txReq - txIndex := txReq.txIndex slotDB := txResult.slotDB if txResult.err != nil { - log.Debug("redo, since in slot execute failed", "err", txResult.err) return true } else if slotDB.SystemAddressRedo() { - log.Debug("Stage Execution conflict for SystemAddressRedo", "txIndex", txIndex) txResult.txReq.systemAddrRedo = true return true } else if slotDB.NeedsRedo() { @@ -973,13 +875,11 @@ func (p *ParallelStateProcessor) switchSlot(slot *SlotState, slotIndex int) { // switch from normal to shadow slot if len(slot.pendingTxReqShadowChan) == 0 { slot.pendingTxReqShadowChan <- nil // only notify when target once - log.Debug("switchSlot to shadow", "slotIndex", slotIndex) } } else if atomic.CompareAndSwapInt32(&slot.activatedId, 1, 0) { // switch from shadow to normal slot if len(slot.pendingTxReqChan) == 0 { slot.pendingTxReqChan <- nil // only notify when target once - log.Debug("switchSlot to normal", "slotIndex", slotIndex) } } } @@ -1000,8 +900,6 @@ func (p *ParallelStateProcessor) toConfirmTxIndex(targetTxIndex int, isStage2 bo return false } - log.Debug("toConfirmTxIndex", "targetTxIndex", targetTxIndex, - "current mergedTxIndex", p.mergedTxIndex, "isStage2", isStage2) for { // handle a targetTxIndex in a loop // targetTxIndex = p.mergedTxIndex + 1 @@ -1009,8 +907,6 @@ func (p *ParallelStateProcessor) toConfirmTxIndex(targetTxIndex int, isStage2 bo results := p.pendingConfirmResults[targetTxIndex] resultsLen := len(results) if resultsLen == 0 { // no pending result can be verified, break and wait for incoming results - log.Debug("toConfirmTxIndex resultsLen is 0", "targetTxIndex", targetTxIndex, - "merged TxIndex", p.mergedTxIndex, "isStage2", isStage2) return false } lastResult := results[len(results)-1] // last is the most fresh, stack based priority @@ -1018,11 +914,8 @@ func (p *ParallelStateProcessor) toConfirmTxIndex(targetTxIndex int, isStage2 bo // not remove the confirm result in Stage2, since the conflict check is guranteed. p.pendingConfirmResults[targetTxIndex] = p.pendingConfirmResults[targetTxIndex][:resultsLen-1] // remove from the queue } - log.Debug("toConfirmTxIndex", "resultsLen", resultsLen, "txIndex", lastResult.txReq.txIndex, - "merged TxIndex", p.mergedTxIndex, "isStage2", isStage2) valid := p.toConfirmTxIndexResult(lastResult, isStage2) - executedSlotIndex := lastResult.slotIndex // could be stolen and executed in other slot. staticSlotIndex := lastResult.txReq.staticSlotIndex if !valid { if resultsLen == 1 || isStage2 { // for Stage 2, we only check its latest result. @@ -1031,28 +924,9 @@ func (p *ParallelStateProcessor) toConfirmTxIndex(targetTxIndex int, isStage2 bo p.debugConflictRedoNum++ } slot := p.slotState[staticSlotIndex] - log.Debug("runConfirmLoop conflict", - "staticSlotIndex", staticSlotIndex, - "executedSlotIndex", executedSlotIndex, - "txIndex", lastResult.txReq.txIndex, - "activatedId", slot.activatedId, - "isStage2", isStage2, "slot.activatedId", slot.activatedId) - // if hit { // switch only it is hit, for none-hit, it is not that necessary, + // interrupt its current routine, and switch to the other routine p.switchSlot(slot, staticSlotIndex) - //} - log.Debug("runConfirmLoop conflict, switched", - "staticSlotIndex", staticSlotIndex, - "executedSlotIndex", executedSlotIndex, - "txIndex", lastResult.txReq.txIndex) - // this the last result for this txIndex, - // interrupt its current routine, and reschedule from the the other routine(shadow?) return false - } else { - // try next - log.Debug("runConfirmLoop conflict, try next result of same txIndex", - "staticSlotIndex", staticSlotIndex, - "executedSlotIndex", executedSlotIndex, - "txIndex", lastResult.txReq.txIndex) } continue } @@ -1061,20 +935,12 @@ func (p *ParallelStateProcessor) toConfirmTxIndex(targetTxIndex int, isStage2 bo // fixme: need to handle txResult repeatedly check? return false } - log.Debug("runConfirmLoop result to deliver", - "staticSlotIndex", staticSlotIndex, - "executedSlotIndex", executedSlotIndex, - "txIndex", lastResult.txReq.txIndex, "mergedTxIndex", p.mergedTxIndex) // result is valid, deliver it to main processor p.txResultChan <- lastResult // wait until merged TxIndex is updated <-lastResult.txReq.curTxChan // close(result.txReq.curTxChan) // fixme: to close - log.Debug("runConfirmLoop result is delivered", - "staticSlotIndex", staticSlotIndex, - "executedSlotIndex", executedSlotIndex, - "txIndex", lastResult.txReq.txIndex, "mergedTxIndex", p.mergedTxIndex) if p.mergedTxIndex != (targetTxIndex) { log.Warn("runConfirmLoop result delivered, but unexpected mergedTxIndex", "mergedTxIndex", p.mergedTxIndex, "targetTxIndex", targetTxIndex) @@ -1131,19 +997,11 @@ func (p *ParallelStateProcessor) runSlotLoop(slotIndex int, slotType int32) { // txReq := <-curSlot.pendingTxReqChan select { case <-stopChan: - log.Debug("runSlotLoop stop received", "slotIndex", slotIndex, "slotType", slotType) p.stopSlotChan <- slotIndex continue case <-wakeupChan: } - - traceMsg := "runSlotLoop " + strconv.Itoa(slotIndex) - if slotType == 1 { - traceMsg = traceMsg + " shadow" - } - startTxIndex = p.mergedTxIndex + 1 - log.Debug("runSlotLoop started", "slotIndex", slotIndex, "startTxIndex", startTxIndex, "slotType", slotType) if dispatchPolicy == dispatchPolicyStatic { interrupted := false for _, txReq := range curSlot.pendingTxReqList { @@ -1152,17 +1010,12 @@ func (p *ParallelStateProcessor) runSlotLoop(slotIndex int, slotType int32) { } // if interrupted, if curSlot.activatedId != slotType { - log.Debug("runSlotLoop, switch loop", "slotIndex", slotIndex, - "txIndex ", txReq.txIndex, - "activatedId", curSlot.activatedId, "slotType", slotType) interrupted = true break } if !atomic.CompareAndSwapInt32(&txReq.runnable, 1, 0) { // not swapped: txReq.runnable == 0 - log.Debug("runSlotLoop, tx not runnable", "slotIndex", slotIndex, - "txIndex ", txReq.txIndex, "slotType", slotType) continue } @@ -1178,16 +1031,8 @@ func (p *ParallelStateProcessor) runSlotLoop(slotIndex int, slotType int32) { if slotDB == nil { // block is processed, fixme: no need to steal break } - log.Debug("runSlotLoop executeInSlot", "slotIndex", slotIndex, - "txReq.txIndex", txReq.txIndex, "slotType", slotType) result := p.executeInSlot(slotIndex, txReq, slotDB) - log.Debug("runSlotLoop executeInSlot done", "slotIndex", slotIndex, - "txReq.txIndex", txReq.txIndex, "slotType", slotType) - p.unconfirmedStateDBs.Store(txReq.txIndex, slotDB) - log.Debug("runSlotLoop executeInSlot to send result", "slotIndex", slotIndex, - "txReq.txIndex", txReq.txIndex, "slotType", slotType) - p.pendingConfirmChan <- result } // switched to the other slot. @@ -1201,8 +1046,6 @@ func (p *ParallelStateProcessor) runSlotLoop(slotIndex int, slotType int32) { for _, stealTxReq := range p.allTxReqs { if !atomic.CompareAndSwapInt32(&stealTxReq.runnable, 1, 0) { // not swapped: txReq.runnable == 0 - log.Debug("runSlotLoop, tx not runnable", "slotIndex", slotIndex, - "txIndex ", stealTxReq.txIndex, "slotType", slotType) continue } resultUpdateDB := &ParallelTxResult{ @@ -1217,15 +1060,9 @@ func (p *ParallelStateProcessor) runSlotLoop(slotIndex int, slotType int32) { if slotDB == nil { // block is processed break } - log.Debug("runSlotLoop executeInSlot steal", "slotIndex", slotIndex, - "txReq.txIndex", stealTxReq.txIndex, "slotType", slotType) result := p.executeInSlot(slotIndex, stealTxReq, slotDB) - log.Debug("runSlotLoop executeInSlot steal done", "slotIndex", slotIndex, - "txReq.txIndex", stealTxReq.txIndex, "slotType", slotType) p.unconfirmedStateDBs.Store(stealTxReq.txIndex, slotDB) - log.Debug("runSlotLoop executeInSlot steal to send result", "slotIndex", slotIndex, - "txReq.txIndex", stealTxReq.txIndex, "slotType", slotType) p.pendingConfirmChan <- result } @@ -1365,7 +1202,6 @@ func (p *ParallelStateProcessor) Process(block *types.Block, statedb *state.Stat } // wait unitl all slot are stopped for _, slot := range p.slotState { - // log.Info("ProcessParallel to stop", "slotIndex", i) slot.stopChan <- struct{}{} slot.stopShadowChan <- struct{}{} stopCount := 0 @@ -1374,29 +1210,20 @@ func (p *ParallelStateProcessor) Process(block *types.Block, statedb *state.Stat case updateDB := <-p.txResultChan: // in case a slot is requesting a new DB... if updateDB.updateSlotDB { slotState := p.slotState[updateDB.slotIndex] - // log.Info("ProcessParallel try to update slot db", "slotIndex", updateDB.slotIndex) slotState.slotdbChan <- nil continue } - // else { - // log.Info("ProcessParallel unexpected txResultChan", "slotIndex", i) - // } - // case slotIndex := <-p.stopSlotChan: case <-p.stopSlotChan: - // log.Info("ProcessParallel slot stopped", "slotIndex", slotIndex) stopCount++ } if stopCount == 2 { break } } - // log.Info("ProcessParallel shadow slot stopped", "slotIndex", i) } // wait until the confirm routine is stopped - log.Debug("ProcessParallel to stop confirm routine") p.stopConfirmChan <- struct{}{} <-p.stopSlotChan - log.Debug("ProcessParallel stopped confirm routine") } /* else if dispatchPolicy == dispatchPolicyDynamic { From 288a43653f607acb04edb9d3b4e7d473dde4ba8a Mon Sep 17 00:00:00 2001 From: setunapo Date: Fri, 6 May 2022 09:03:58 +0800 Subject: [PATCH 13/18] stage2CheckNumber --- core/state_processor.go | 11 ++++++++--- 1 file changed, 8 insertions(+), 3 deletions(-) diff --git a/core/state_processor.go b/core/state_processor.go index c31ebccdee..bef1ac87c1 100644 --- a/core/state_processor.go +++ b/core/state_processor.go @@ -50,8 +50,9 @@ const ( farDiffLayerTimeout = 2 maxUnitSize = 10 dispatchPolicyStatic = 1 - dispatchPolicyDynamic = 2 // not supported - maxRedoCounterInstage1 = 2 // try 2, 4, 10, or no limit? + dispatchPolicyDynamic = 2 // not supported + maxRedoCounterInstage1 = 10000 // try 2, 4, 10, or no limit? + stage2CheckNumber = 10 ) var dispatchPolicy = dispatchPolicyStatic @@ -840,7 +841,11 @@ func (p *ParallelStateProcessor) runConfirmLoop() { // otherwise, do conflict check without WBNB makeup, but we will ignor WBNB's balance conflict. // throw these likely conflicted tx back to re-execute startTxIndex := p.mergedTxIndex + 2 // stage 2's will start from the next target merge index - for txIndex := startTxIndex; txIndex < txSize; txIndex++ { + endTxIndex := startTxIndex + stage2CheckNumber + if endTxIndex > (txSize - 1) { + endTxIndex = txSize - 1 + } + for txIndex := startTxIndex; txIndex < endTxIndex; txIndex++ { p.toConfirmTxIndex(txIndex, true) } } From 594eac23e6bc38f1d3ddfe8bf177ec6dc149f82c Mon Sep 17 00:00:00 2001 From: setunapo Date: Fri, 6 May 2022 10:01:48 +0800 Subject: [PATCH 14/18] Stage2 conflict check: read with FromTxIndex skip conflict check if what the SlotDB read was from a fresh txIndex, which has not been merged. --- core/state/statedb.go | 194 ++++++++++++++++++++++++++++++---------- core/state_processor.go | 4 +- 2 files changed, 149 insertions(+), 49 deletions(-) diff --git a/core/state/statedb.go b/core/state/statedb.go index f00d587c02..e4b00033aa 100644 --- a/core/state/statedb.go +++ b/core/state/statedb.go @@ -159,8 +159,16 @@ type ParallelState struct { kvChangesInSlot map[common.Address]StateKeys // value will be kept in dirtiedStateObjectsInSlot // Actions such as SetCode, Suicide will change address's state. // Later call like Exist(), Empty(), HasSuicided() depend on the address's state. - addrStateReadsInSlot map[common.Address]bool // true: exist, false: not exist or deleted - addrStateChangesInSlot map[common.Address]bool // true: created, false: deleted + addrStateReadsInSlot map[common.Address]bool // true: exist, false: not exist or deleted + addrStateChangesInSlot map[common.Address]bool // true: created, false: deleted + + nonceReadsInSlotFromTxIndex map[common.Address]int // -1: main db + balanceReadsInSlotFromTxIndex map[common.Address]int // -1: main db + codeReadsInSlotFromTxIndex map[common.Address]int // -1: main db + codeHashReadsInSlotFromTxIndex map[common.Address]int // -1: main db + kvReadsInSlotFromTxIndex map[common.Address]map[common.Hash]int // -1: main db + addrStateReadsInSlotFromTxIndex map[common.Address]int // -1: main db + addrSnapDestructsReadsInSlot map[common.Address]bool // addrSnapDestructsChangesInSlot map[common.Address]struct{} // no use to get from unconfirmed DB for efficiency @@ -1205,6 +1213,13 @@ func (s *StateDB) CopyForSlot() *ParallelStateDB { nonceReadsInSlot: make(map[common.Address]uint64), addrSnapDestructsReadsInSlot: make(map[common.Address]bool), + nonceReadsInSlotFromTxIndex: make(map[common.Address]int), + balanceReadsInSlotFromTxIndex: make(map[common.Address]int), + codeReadsInSlotFromTxIndex: make(map[common.Address]int), + codeHashReadsInSlotFromTxIndex: make(map[common.Address]int), + kvReadsInSlotFromTxIndex: make(map[common.Address]map[common.Hash]int), + addrStateReadsInSlotFromTxIndex: make(map[common.Address]int), + isSlotDB: true, dirtiedStateObjectsInSlot: make(map[common.Address]*StateObject), // stateObjectsPool.Get().(map[common.Address]*StateObject), } @@ -2363,10 +2378,13 @@ func (s *ParallelStateDB) GetOrNewStateObject(addr common.Address) *StateObject if stateObject, ok := s.parallel.dirtiedStateObjectsInSlot[addr]; ok { return stateObject } - stateObject, _ = s.getStateObjectFromUnconfirmedDB(addr) + var fromTxIndex int = -1 + stateObject, dbTxIndex, _ := s.getStateObjectFromUnconfirmedDB(addr) if stateObject == nil { stateObject = s.getStateObjectNoSlot(addr) // try to get from base db + } else { + fromTxIndex = dbTxIndex } if stateObject == nil || stateObject.deleted || stateObject.suicided { stateObject = s.createObject(addr) @@ -2374,6 +2392,7 @@ func (s *ParallelStateDB) GetOrNewStateObject(addr common.Address) *StateObject } s.parallel.addrStateReadsInSlot[addr] = exist // true: exist, false: not exist + s.parallel.addrStateReadsInSlotFromTxIndex[addr] = fromTxIndex return stateObject } @@ -2397,14 +2416,16 @@ func (s *ParallelStateDB) Exist(addr common.Address) bool { return exist } // 2.2 Try to get from unconfirmed DB if exist - if exist, ok := s.getAddrStateFromUnconfirmedDB(addr); ok { + if exist, txIndex, ok := s.getAddrStateFromUnconfirmedDB(addr); ok { s.parallel.addrStateReadsInSlot[addr] = exist // update and cache + s.parallel.addrStateReadsInSlotFromTxIndex[addr] = txIndex return exist } // 3.Try to get from main StateDB exist := s.getStateObjectNoSlot(addr) != nil s.parallel.addrStateReadsInSlot[addr] = exist // update and cache + s.parallel.addrStateReadsInSlotFromTxIndex[addr] = -1 return exist } @@ -2436,14 +2457,16 @@ func (s *ParallelStateDB) Empty(addr common.Address) bool { return !exist } // 2.2 Try to get from unconfirmed DB if exist - if exist, ok := s.getAddrStateFromUnconfirmedDB(addr); ok { + if exist, txIndex, ok := s.getAddrStateFromUnconfirmedDB(addr); ok { s.parallel.addrStateReadsInSlot[addr] = exist // update and cache + s.parallel.addrStateReadsInSlotFromTxIndex[addr] = txIndex return !exist } so := s.getStateObjectNoSlot(addr) empty := (so == nil || so.empty()) s.parallel.addrStateReadsInSlot[addr] = !empty // update and cache + s.parallel.addrStateReadsInSlotFromTxIndex[addr] = -1 return empty } @@ -2467,8 +2490,9 @@ func (s *ParallelStateDB) GetBalance(addr common.Address) *big.Int { return balance } // 2.2 Try to get from unconfirmed DB if exist - if balance := s.getBalanceFromUnconfirmedDB(addr); balance != nil { + if balance, txIndex := s.getBalanceFromUnconfirmedDB(addr); balance != nil { s.parallel.balanceReadsInSlot[addr] = balance + s.parallel.balanceReadsInSlotFromTxIndex[addr] = txIndex return balance } @@ -2479,6 +2503,7 @@ func (s *ParallelStateDB) GetBalance(addr common.Address) *big.Int { balance = stateObject.Balance() } s.parallel.balanceReadsInSlot[addr] = balance + s.parallel.balanceReadsInSlotFromTxIndex[addr] = -1 return balance } @@ -2506,8 +2531,9 @@ func (s *ParallelStateDB) GetNonce(addr common.Address) uint64 { return nonce } // 2.2 Try to get from unconfirmed DB if exist - if nonce, ok := s.getNonceFromUnconfirmedDB(addr); ok { + if nonce, txIndex, ok := s.getNonceFromUnconfirmedDB(addr); ok { s.parallel.nonceReadsInSlot[addr] = nonce + s.parallel.nonceReadsInSlotFromTxIndex[addr] = txIndex return nonce } @@ -2518,6 +2544,7 @@ func (s *ParallelStateDB) GetNonce(addr common.Address) uint64 { nonce = stateObject.Nonce() } s.parallel.nonceReadsInSlot[addr] = nonce + s.parallel.nonceReadsInSlotFromTxIndex[addr] = -1 return nonce } @@ -2538,8 +2565,9 @@ func (s *ParallelStateDB) GetCode(addr common.Address) []byte { return code } // 2.2 Try to get from unconfirmed DB if exist - if code, ok := s.getCodeFromUnconfirmedDB(addr); ok { + if code, txIndex, ok := s.getCodeFromUnconfirmedDB(addr); ok { s.parallel.codeReadsInSlot[addr] = code + s.parallel.codeReadsInSlotFromTxIndex[addr] = txIndex return code } @@ -2550,6 +2578,7 @@ func (s *ParallelStateDB) GetCode(addr common.Address) []byte { code = stateObject.Code(s.db) } s.parallel.codeReadsInSlot[addr] = code + s.parallel.codeReadsInSlotFromTxIndex[addr] = -1 return code } @@ -2568,8 +2597,9 @@ func (s *ParallelStateDB) GetCodeSize(addr common.Address) int { return len(code) // len(nil) is 0 too } // 2.2 Try to get from unconfirmed DB if exist - if code, ok := s.getCodeFromUnconfirmedDB(addr); ok { + if code, txIndex, ok := s.getCodeFromUnconfirmedDB(addr); ok { s.parallel.codeReadsInSlot[addr] = code + s.parallel.codeReadsInSlotFromTxIndex[addr] = txIndex return len(code) // len(nil) is 0 too } @@ -2583,6 +2613,7 @@ func (s *ParallelStateDB) GetCodeSize(addr common.Address) int { codeSize = stateObject.CodeSize(s.db) } s.parallel.codeReadsInSlot[addr] = code + s.parallel.codeReadsInSlotFromTxIndex[addr] = -1 return codeSize } @@ -2605,8 +2636,9 @@ func (s *ParallelStateDB) GetCodeHash(addr common.Address) common.Hash { return codeHash } // 2.2 Try to get from unconfirmed DB if exist - if codeHash, ok := s.getCodeHashFromUnconfirmedDB(addr); ok { + if codeHash, txIndex, ok := s.getCodeHashFromUnconfirmedDB(addr); ok { s.parallel.codeHashReadsInSlot[addr] = codeHash + s.parallel.codeHashReadsInSlotFromTxIndex[addr] = txIndex return codeHash } // 3. Try to get from main StateObejct @@ -2616,6 +2648,7 @@ func (s *ParallelStateDB) GetCodeHash(addr common.Address) common.Hash { codeHash = common.BytesToHash(stateObject.CodeHash()) } s.parallel.codeHashReadsInSlot[addr] = codeHash + s.parallel.codeHashReadsInSlotFromTxIndex[addr] = -1 return codeHash } @@ -2649,11 +2682,13 @@ func (s *ParallelStateDB) GetState(addr common.Address, hash common.Hash) common } } // 2.2 Try to get from unconfirmed DB if exist - if val, ok := s.getKVFromUnconfirmedDB(addr, hash); ok { + if val, txIndex, ok := s.getKVFromUnconfirmedDB(addr, hash); ok { if s.parallel.kvReadsInSlot[addr] == nil { s.parallel.kvReadsInSlot[addr] = newStorage(false) + s.parallel.kvReadsInSlotFromTxIndex[addr] = make(map[common.Hash]int) } s.parallel.kvReadsInSlot[addr].StoreValue(hash, val) // update cache + s.parallel.kvReadsInSlotFromTxIndex[addr][hash] = txIndex return val } @@ -2665,8 +2700,10 @@ func (s *ParallelStateDB) GetState(addr common.Address, hash common.Hash) common } if s.parallel.kvReadsInSlot[addr] == nil { s.parallel.kvReadsInSlot[addr] = newStorage(false) + s.parallel.kvReadsInSlotFromTxIndex[addr] = make(map[common.Hash]int) } s.parallel.kvReadsInSlot[addr].StoreValue(hash, val) // update cache + s.parallel.kvReadsInSlotFromTxIndex[addr][hash] = -1 return val } @@ -2683,11 +2720,13 @@ func (s *ParallelStateDB) GetCommittedState(addr common.Address, hash common.Has } } // 2.2 Try to get from unconfirmed DB if exist - if val, ok := s.getKVFromUnconfirmedDB(addr, hash); ok { + if val, txIndex, ok := s.getKVFromUnconfirmedDB(addr, hash); ok { if s.parallel.kvReadsInSlot[addr] == nil { s.parallel.kvReadsInSlot[addr] = newStorage(false) + s.parallel.kvReadsInSlotFromTxIndex[addr] = make(map[common.Hash]int) } s.parallel.kvReadsInSlot[addr].StoreValue(hash, val) // update cache + s.parallel.kvReadsInSlotFromTxIndex[addr][hash] = txIndex return val } @@ -2699,8 +2738,10 @@ func (s *ParallelStateDB) GetCommittedState(addr common.Address, hash common.Has } if s.parallel.kvReadsInSlot[addr] == nil { s.parallel.kvReadsInSlot[addr] = newStorage(false) + s.parallel.kvReadsInSlotFromTxIndex[addr] = make(map[common.Hash]int) } s.parallel.kvReadsInSlot[addr].StoreValue(hash, val) // update cache + s.parallel.kvReadsInSlotFromTxIndex[addr][hash] = -1 return val } @@ -2710,7 +2751,7 @@ func (s *ParallelStateDB) HasSuicided(addr common.Address) bool { return obj.suicided } // 2.Try to get from uncomfirmed - if exist, ok := s.getAddrStateFromUnconfirmedDB(addr); ok { + if exist, _, ok := s.getAddrStateFromUnconfirmedDB(addr); ok { // fixme? return !exist } @@ -2940,9 +2981,10 @@ func (s *ParallelStateDB) Suicide(addr common.Address) bool { stateObject = s.parallel.dirtiedStateObjectsInSlot[addr] if stateObject == nil { // 2.Try to get from uncomfirmed, if deleted return false, since the address does not exist - if obj, ok := s.getStateObjectFromUnconfirmedDB(addr); ok { + if obj, txIndex, ok := s.getStateObjectFromUnconfirmedDB(addr); ok { stateObject = obj s.parallel.addrStateReadsInSlot[addr] = !stateObject.deleted // true: exist, false: deleted + s.parallel.addrStateReadsInSlotFromTxIndex[addr] = txIndex if stateObject.deleted { log.Error("Suicide addr alreay deleted in confirmed DB", "txIndex", s.txIndex, "addr", addr) return false @@ -2955,10 +2997,12 @@ func (s *ParallelStateDB) Suicide(addr common.Address) bool { stateObject = s.getStateObjectNoSlot(addr) if stateObject == nil { s.parallel.addrStateReadsInSlot[addr] = false // true: exist, false: deleted + s.parallel.addrStateReadsInSlotFromTxIndex[addr] = -1 log.Error("Suicide addr not exist", "txIndex", s.txIndex, "addr", addr) return false } s.parallel.addrStateReadsInSlot[addr] = true // true: exist, false: deleted + s.parallel.addrStateReadsInSlotFromTxIndex[addr] = -1 } s.journal.append(suicideChange{ @@ -3050,10 +3094,10 @@ func (s *ParallelStateDB) SubRefund(gas uint64) { // fixme: not needed // | BaseTxIndex | Unconfirmed Txs... | Current TxIndex | // ------------------------------------------------------- // Access from the unconfirmed DB with range&priority: txIndex -1(previous tx) -> baseTxIndex + 1 -func (s *ParallelStateDB) getBalanceFromUnconfirmedDB(addr common.Address) *big.Int { +func (s *ParallelStateDB) getBalanceFromUnconfirmedDB(addr common.Address) (*big.Int, int) { if addr == s.parallel.systemAddress { // never get systemaddress from unconfirmed DB - return nil + return nil, 0 } for i := s.txIndex - 1; i > s.parallel.baseTxIndex; i-- { @@ -3076,18 +3120,18 @@ func (s *ParallelStateDB) getBalanceFromUnconfirmedDB(addr common.Address) *big. if obj.deleted { balance = common.Big0 } - return balance + return balance, db.txIndex } } } - return nil + return nil, 0 } // Similar to getBalanceFromUnconfirmedDB -func (s *ParallelStateDB) getNonceFromUnconfirmedDB(addr common.Address) (uint64, bool) { +func (s *ParallelStateDB) getNonceFromUnconfirmedDB(addr common.Address) (uint64, int, bool) { if addr == s.parallel.systemAddress { // never get systemaddress from unconfirmed DB - return 0, false + return 0, 0, false } for i := s.txIndex - 1; i > s.parallel.baseTxIndex; i-- { @@ -3118,18 +3162,18 @@ func (s *ParallelStateDB) getNonceFromUnconfirmedDB(addr common.Address) (uint64 if obj.deleted { nonce = 0 } - return nonce, true + return nonce, db.TxIndex(), true } } - return 0, false + return 0, 0, false } // Similar to getBalanceFromUnconfirmedDB // It is not only for code, but also codeHash and codeSize, we return the *StateObject for convienence. -func (s *ParallelStateDB) getCodeFromUnconfirmedDB(addr common.Address) ([]byte, bool) { +func (s *ParallelStateDB) getCodeFromUnconfirmedDB(addr common.Address) ([]byte, int, bool) { if addr == s.parallel.systemAddress { // never get systemaddress from unconfirmed DB - return nil, false + return nil, 0, false } for i := s.txIndex - 1; i > s.parallel.baseTxIndex; i-- { @@ -3159,18 +3203,18 @@ func (s *ParallelStateDB) getCodeFromUnconfirmedDB(addr common.Address) ([]byte, if obj.deleted { code = nil } - return code, true + return code, db.txIndex, true } } - return nil, false + return nil, 0, false } // Similar to getCodeFromUnconfirmedDB // but differ when address is deleted or not exist -func (s *ParallelStateDB) getCodeHashFromUnconfirmedDB(addr common.Address) (common.Hash, bool) { +func (s *ParallelStateDB) getCodeHashFromUnconfirmedDB(addr common.Address) (common.Hash, int, bool) { if addr == s.parallel.systemAddress { // never get systemaddress from unconfirmed DB - return common.Hash{}, false + return common.Hash{}, 0, false } for i := s.txIndex - 1; i > s.parallel.baseTxIndex; i-- { @@ -3200,20 +3244,20 @@ func (s *ParallelStateDB) getCodeHashFromUnconfirmedDB(addr common.Address) (com if !obj.deleted { codeHash = common.BytesToHash(obj.CodeHash()) } - return codeHash, true + return codeHash, db.txIndex, true } } - return common.Hash{}, false + return common.Hash{}, 0, false } // Similar to getCodeFromUnconfirmedDB // It is for address state check of: Exist(), Empty() and HasSuicided() // Since the unconfirmed DB should have done Finalise() with `deleteEmptyObjects = true` // If the dirty address is empty or suicided, it will be marked as deleted, so we only need to return `deleted` or not. -func (s *ParallelStateDB) getAddrStateFromUnconfirmedDB(addr common.Address) (bool, bool) { +func (s *ParallelStateDB) getAddrStateFromUnconfirmedDB(addr common.Address) (bool, int, bool) { if addr == s.parallel.systemAddress { // never get systemaddress from unconfirmed DB - return false, false + return false, 0, false } // check the unconfirmed DB with range: baseTxIndex -> txIndex -1(previous tx) @@ -3230,14 +3274,14 @@ func (s *ParallelStateDB) getAddrStateFromUnconfirmedDB(addr common.Address) (bo continue } - return exist, true + return exist, db.txIndex, true } } } - return false, false + return false, 0, false } -func (s *ParallelStateDB) getKVFromUnconfirmedDB(addr common.Address, key common.Hash) (common.Hash, bool) { +func (s *ParallelStateDB) getKVFromUnconfirmedDB(addr common.Address, key common.Hash) (common.Hash, int, bool) { // check the unconfirmed DB with range: baseTxIndex -> txIndex -1(previous tx) for i := s.txIndex - 1; i > s.parallel.baseTxIndex; i-- { if db, ok := s.parallel.unconfirmedDBInShot[i]; ok { @@ -3245,40 +3289,41 @@ func (s *ParallelStateDB) getKVFromUnconfirmedDB(addr common.Address, key common defer db.wbnbMakeUpLock.RUnlock() if obj, ok := db.parallel.dirtiedStateObjectsInSlot[addr]; ok { // if deleted on merge, can get from main StateDB, ok but fixme: concurrent safe if obj.deleted { - return common.Hash{}, true + return common.Hash{}, db.txIndex, true } if _, ok := db.parallel.kvChangesInSlot[addr]; ok { if val, exist := obj.dirtyStorage.GetValue(key); exist { - return val, true + return val, db.txIndex, true } if val, exist := obj.pendingStorage.GetValue(key); exist { // fixme: can be removed log.Error("Get KV from Unconfirmed StateDB, in pending", "my txIndex", s.txIndex, "DB's txIndex", i, "addr", addr, "key", key, "val", val) - return val, true + return val, db.txIndex, true } } } } } - return common.Hash{}, false + return common.Hash{}, 0, false } -func (s *ParallelStateDB) getStateObjectFromUnconfirmedDB(addr common.Address) (*StateObject, bool) { +func (s *ParallelStateDB) getStateObjectFromUnconfirmedDB(addr common.Address) (*StateObject, int, bool) { // check the unconfirmed DB with range: baseTxIndex -> txIndex -1(previous tx) for i := s.txIndex - 1; i > s.parallel.baseTxIndex; i-- { if db, ok := s.parallel.unconfirmedDBInShot[i]; ok { db.wbnbMakeUpLock.RLock() defer db.wbnbMakeUpLock.RUnlock() if obj, ok := db.parallel.dirtiedStateObjectsInSlot[addr]; ok { // if deleted on merge, can get from main StateDB, ok but fixme: concurrent safe - return obj, true + return obj, db.txIndex, true } } } - return nil, false + return nil, 0, false } -func (s *ParallelStateDB) IsParallelReadsValid(isStage2 bool) bool { +// in stage2, we do unconfirmed conflict detect +func (s *ParallelStateDB) IsParallelReadsValid(isStage2 bool, mergedTxIndex int) bool { slotDB := s if !slotDB.parallel.isSlotDB { log.Error("IsSlotDBReadsValid slotDB should be slot DB", "SlotIndex", slotDB.parallel.SlotIndex, "txIndex", slotDB.txIndex) @@ -3292,14 +3337,18 @@ func (s *ParallelStateDB) IsParallelReadsValid(isStage2 bool) bool { } // for nonce for addr, nonceSlot := range slotDB.parallel.nonceReadsInSlot { - nonceMain := mainDB.GetNonce(addr) - if nonceSlot != nonceMain { - if isStage2 { //? + if isStage2 { + readTxIndex := slotDB.parallel.nonceReadsInSlotFromTxIndex[addr] + if readTxIndex > mergedTxIndex { log.Debug("IsSlotDBReadsValid skip nonce check in stage 2", - "SlotIndex", slotDB.parallel.SlotIndex, "txIndex", slotDB.txIndex) + "SlotIndex", slotDB.parallel.SlotIndex, "txIndex", slotDB.txIndex, + "readTxIndex", readTxIndex, "mergedTxIndex", mergedTxIndex) continue } + } + nonceMain := mainDB.GetNonce(addr) + if nonceSlot != nonceMain { log.Debug("IsSlotDBReadsValid nonce read is invalid", "addr", addr, "nonceSlot", nonceSlot, "nonceMain", nonceMain, "SlotIndex", slotDB.parallel.SlotIndex, "txIndex", slotDB.txIndex, "baseTxIndex", slotDB.parallel.baseTxIndex) @@ -3308,9 +3357,20 @@ func (s *ParallelStateDB) IsParallelReadsValid(isStage2 bool) bool { } // balance for addr, balanceSlot := range slotDB.parallel.balanceReadsInSlot { + if isStage2 { + readTxIndex := slotDB.parallel.balanceReadsInSlotFromTxIndex[addr] + if readTxIndex > mergedTxIndex { + log.Debug("IsSlotDBReadsValid skip balance check in stage 2", + "SlotIndex", slotDB.parallel.SlotIndex, "txIndex", slotDB.txIndex, + "readTxIndex", readTxIndex, "mergedTxIndex", mergedTxIndex) + continue + } + } + if addr != s.parallel.systemAddress { // skip balance check for system address balanceMain := mainDB.GetBalance(addr) if balanceSlot.Cmp(balanceMain) != 0 { + if addr == WBNBAddress && slotDB.WBNBMakeUp() { // WBNB balance make up if isStage2 { log.Debug("IsSlotDBReadsValid skip makeup for WBNB in stage 2", @@ -3347,6 +3407,15 @@ func (s *ParallelStateDB) IsParallelReadsValid(isStage2 bool) bool { } // check code for addr, codeSlot := range slotDB.parallel.codeReadsInSlot { + if isStage2 { + readTxIndex := slotDB.parallel.codeReadsInSlotFromTxIndex[addr] + if readTxIndex > mergedTxIndex { + log.Debug("IsSlotDBReadsValid skip code check in stage 2", + "SlotIndex", slotDB.parallel.SlotIndex, "txIndex", slotDB.txIndex, + "readTxIndex", readTxIndex, "mergedTxIndex", mergedTxIndex) + continue + } + } codeMain := mainDB.GetCode(addr) if !bytes.Equal(codeSlot, codeMain) { log.Debug("IsSlotDBReadsValid code read is invalid", "addr", addr, @@ -3357,6 +3426,15 @@ func (s *ParallelStateDB) IsParallelReadsValid(isStage2 bool) bool { } // check codeHash for addr, codeHashSlot := range slotDB.parallel.codeHashReadsInSlot { + if isStage2 { + readTxIndex := slotDB.parallel.codeHashReadsInSlotFromTxIndex[addr] + if readTxIndex > mergedTxIndex { + log.Debug("IsSlotDBReadsValid skip codeHash check in stage 2", + "SlotIndex", slotDB.parallel.SlotIndex, "txIndex", slotDB.txIndex, + "readTxIndex", readTxIndex, "mergedTxIndex", mergedTxIndex) + continue + } + } codeHashMain := mainDB.GetCodeHash(addr) if !bytes.Equal(codeHashSlot.Bytes(), codeHashMain.Bytes()) { log.Debug("IsSlotDBReadsValid codehash read is invalid", "addr", addr, @@ -3369,6 +3447,15 @@ func (s *ParallelStateDB) IsParallelReadsValid(isStage2 bool) bool { for addr, slotStorage := range slotDB.parallel.kvReadsInSlot { conflict := false slotStorage.Range(func(keySlot, valSlot interface{}) bool { + if isStage2 { + readTxIndex := slotDB.parallel.kvReadsInSlotFromTxIndex[addr][keySlot.(common.Hash)] + if readTxIndex > mergedTxIndex { + log.Debug("IsSlotDBReadsValid skip nonce check in stage 2", + "SlotIndex", slotDB.parallel.SlotIndex, "txIndex", slotDB.txIndex, + "readTxIndex", readTxIndex, "mergedTxIndex", mergedTxIndex) + return true // return true, Range will try next KV + } + } valMain := mainDB.GetState(addr, keySlot.(common.Hash)) if !bytes.Equal(valSlot.(common.Hash).Bytes(), valMain.Bytes()) { log.Debug("IsSlotDBReadsValid KV read is invalid", "addr", addr, @@ -3386,11 +3473,21 @@ func (s *ParallelStateDB) IsParallelReadsValid(isStage2 bool) bool { } // addr state check for addr, stateSlot := range slotDB.parallel.addrStateReadsInSlot { + if isStage2 { + readTxIndex := slotDB.parallel.addrStateReadsInSlotFromTxIndex[addr] + if readTxIndex > mergedTxIndex { + log.Debug("IsSlotDBReadsValid skip addr state check in stage 2", + "SlotIndex", slotDB.parallel.SlotIndex, "txIndex", slotDB.txIndex, + "readTxIndex", readTxIndex, "mergedTxIndex", mergedTxIndex) + continue + } + } stateMain := false // addr not exist if mainDB.getStateObject(addr) != nil { stateMain = true // addr exist in main DB } if stateSlot != stateMain { + // skip addr state check for system address if addr != s.parallel.systemAddress { log.Debug("IsSlotDBReadsValid addrState read invalid(true: exist, false: not exist)", @@ -3401,6 +3498,9 @@ func (s *ParallelStateDB) IsParallelReadsValid(isStage2 bool) bool { } } } + if isStage2 { // stage2 skip snapshot destructs check + return true + } // snapshot destructs check for addr, destructRead := range slotDB.parallel.addrSnapDestructsReadsInSlot { mainObj := mainDB.getStateObject(addr) diff --git a/core/state_processor.go b/core/state_processor.go index bef1ac87c1..601368460d 100644 --- a/core/state_processor.go +++ b/core/state_processor.go @@ -52,7 +52,7 @@ const ( dispatchPolicyStatic = 1 dispatchPolicyDynamic = 2 // not supported maxRedoCounterInstage1 = 10000 // try 2, 4, 10, or no limit? - stage2CheckNumber = 10 + stage2CheckNumber = 20 ) var dispatchPolicy = dispatchPolicyStatic @@ -868,7 +868,7 @@ func (p *ParallelStateProcessor) hasConflict(txResult *ParallelTxResult, isStage } else { // to check if what the slot db read is correct. // refDetail := slotDB.UnconfirmedRefList() - if !slotDB.IsParallelReadsValid(isStage2) { + if !slotDB.IsParallelReadsValid(isStage2, p.mergedTxIndex) { return true } } From 1d5c4e6e4c847c1f1a38653fd460d54be0be3380 Mon Sep 17 00:00:00 2001 From: setunapo Date: Fri, 6 May 2022 13:14:16 +0800 Subject: [PATCH 15/18] debug log: systemAddr --- core/state/statedb.go | 7 +++++++ core/state_processor.go | 5 ++++- 2 files changed, 11 insertions(+), 1 deletion(-) diff --git a/core/state/statedb.go b/core/state/statedb.go index e4b00033aa..3623286553 100644 --- a/core/state/statedb.go +++ b/core/state/statedb.go @@ -3531,7 +3531,14 @@ func (s *ParallelStateDB) IsParallelReadsValid(isStage2 bool, mergedTxIndex int) // the other is for AddBalance(GasFee) at the end. // (systemAddressOpsCount > 3) means the transaction tries to access systemAddress, in // this case, we should redo and keep its balance on NewSlotDB() +// for example: +// https://bscscan.com/tx/0xe469f1f948de90e9508f96da59a96ed84b818e71432ca11c5176eb60eb66671b func (s *ParallelStateDB) SystemAddressRedo() bool { + if s.parallel.systemAddressOpsCount > 4 { + log.Info("SystemAddressRedo", "SlotIndex", s.parallel.SlotIndex, + "txIndex", s.txIndex, + "systemAddressOpsCount", s.parallel.systemAddressOpsCount) + } return s.parallel.systemAddressOpsCount > 4 } diff --git a/core/state_processor.go b/core/state_processor.go index 601368460d..92e1a0f525 100644 --- a/core/state_processor.go +++ b/core/state_processor.go @@ -860,7 +860,10 @@ func (p *ParallelStateProcessor) hasConflict(txResult *ParallelTxResult, isStage if txResult.err != nil { return true } else if slotDB.SystemAddressRedo() { - txResult.txReq.systemAddrRedo = true + if !isStage2 { + // for system addr redo, it has to wait until it's turn to keep the system address balance + txResult.txReq.systemAddrRedo = true + } return true } else if slotDB.NeedsRedo() { // if this is any reason that indicates this transaction needs to redo, skip the conflict check From e2794a333deaf722aaccac562de73cbe82fa0d3e Mon Sep 17 00:00:00 2001 From: setunapo Date: Fri, 6 May 2022 14:05:52 +0800 Subject: [PATCH 16/18] Stage2 unconfirmed check --- core/state/statedb.go | 155 +++++++++++++++++++--------------------- core/state_processor.go | 25 ++++--- 2 files changed, 88 insertions(+), 92 deletions(-) diff --git a/core/state/statedb.go b/core/state/statedb.go index 3623286553..57ec1f1629 100644 --- a/core/state/statedb.go +++ b/core/state/statedb.go @@ -3321,9 +3321,19 @@ func (s *ParallelStateDB) getStateObjectFromUnconfirmedDB(addr common.Address) ( } return nil, 0, false } +func (s *ParallelStateDB) UpdateUnConfirmDBs(baseTxIndex int, + unconfirmedDBs *sync.Map /*map[int]*ParallelStateDB*/) { + s.parallel.unconfirmedDBInShot = make(map[int]*ParallelStateDB, 100) + for index := baseTxIndex + 1; index < s.txIndex; index++ { + unconfirmedDB, ok := unconfirmedDBs.Load(index) + if ok { + s.parallel.unconfirmedDBInShot[index] = unconfirmedDB.(*ParallelStateDB) + } + } +} // in stage2, we do unconfirmed conflict detect -func (s *ParallelStateDB) IsParallelReadsValid(isStage2 bool, mergedTxIndex int) bool { +func (s *ParallelStateDB) IsParallelReadsValid(isStage2 bool, mergedTxIndex int, unconfirmedDBs *sync.Map) bool { slotDB := s if !slotDB.parallel.isSlotDB { log.Error("IsSlotDBReadsValid slotDB should be slot DB", "SlotIndex", slotDB.parallel.SlotIndex, "txIndex", slotDB.txIndex) @@ -3335,18 +3345,21 @@ func (s *ParallelStateDB) IsParallelReadsValid(isStage2 bool, mergedTxIndex int) log.Error("IsSlotDBReadsValid s should be main DB", "SlotIndex", slotDB.parallel.SlotIndex, "txIndex", slotDB.txIndex) return false } + if isStage2 { // update slotDB's unconfirmed DB list and try + slotDB.UpdateUnConfirmDBs(mergedTxIndex, unconfirmedDBs) + } // for nonce for addr, nonceSlot := range slotDB.parallel.nonceReadsInSlot { - if isStage2 { - readTxIndex := slotDB.parallel.nonceReadsInSlotFromTxIndex[addr] - if readTxIndex > mergedTxIndex { - log.Debug("IsSlotDBReadsValid skip nonce check in stage 2", - "SlotIndex", slotDB.parallel.SlotIndex, "txIndex", slotDB.txIndex, - "readTxIndex", readTxIndex, "mergedTxIndex", mergedTxIndex) - continue + if isStage2 { // update slotDB's unconfirmed DB list and try + if nonceUnconfirm, _, ok := slotDB.getNonceFromUnconfirmedDB(addr); ok { + if nonceSlot != nonceUnconfirm { + log.Debug("IsSlotDBReadsValid nonce read is invalid in unconfirmed", "addr", addr, + "nonceSlot", nonceSlot, "nonceUnconfirm", nonceUnconfirm, "SlotIndex", slotDB.parallel.SlotIndex, + "txIndex", slotDB.txIndex, "baseTxIndex", slotDB.parallel.baseTxIndex) + return false + } } } - nonceMain := mainDB.GetNonce(addr) if nonceSlot != nonceMain { log.Debug("IsSlotDBReadsValid nonce read is invalid", "addr", addr, @@ -3357,26 +3370,25 @@ func (s *ParallelStateDB) IsParallelReadsValid(isStage2 bool, mergedTxIndex int) } // balance for addr, balanceSlot := range slotDB.parallel.balanceReadsInSlot { - if isStage2 { - readTxIndex := slotDB.parallel.balanceReadsInSlotFromTxIndex[addr] - if readTxIndex > mergedTxIndex { - log.Debug("IsSlotDBReadsValid skip balance check in stage 2", - "SlotIndex", slotDB.parallel.SlotIndex, "txIndex", slotDB.txIndex, - "readTxIndex", readTxIndex, "mergedTxIndex", mergedTxIndex) - continue + if isStage2 { // update slotDB's unconfirmed DB list and try + if balanceUnconfirm, _ := slotDB.getBalanceFromUnconfirmedDB(addr); balanceUnconfirm != nil { + if balanceSlot.Cmp(balanceUnconfirm) == 0 { + continue + } + if addr == WBNBAddress && slotDB.WBNBMakeUp() { + log.Debug("IsSlotDBReadsValid skip makeup for WBNB in stage 2", + "SlotIndex", slotDB.parallel.SlotIndex, "txIndex", slotDB.txIndex) + continue // stage2 will skip WBNB check, no balance makeup + } + return false } } - if addr != s.parallel.systemAddress { // skip balance check for system address + if addr != slotDB.parallel.systemAddress { // skip balance check for system address balanceMain := mainDB.GetBalance(addr) if balanceSlot.Cmp(balanceMain) != 0 { if addr == WBNBAddress && slotDB.WBNBMakeUp() { // WBNB balance make up - if isStage2 { - log.Debug("IsSlotDBReadsValid skip makeup for WBNB in stage 2", - "SlotIndex", slotDB.parallel.SlotIndex, "txIndex", slotDB.txIndex) - continue // stage2 will skip WBNB check, no balance makeup - } balanceDelta := new(big.Int).Sub(balanceMain, balanceSlot) slotDB.wbnbMakeUpLock.Lock() slotDB.AddBalance(addr, balanceDelta) // fixme: concurrent not safe, unconfirmed read @@ -3405,55 +3417,21 @@ func (s *ParallelStateDB) IsParallelReadsValid(isStage2 bool, mergedTxIndex int) } } } - // check code - for addr, codeSlot := range slotDB.parallel.codeReadsInSlot { - if isStage2 { - readTxIndex := slotDB.parallel.codeReadsInSlotFromTxIndex[addr] - if readTxIndex > mergedTxIndex { - log.Debug("IsSlotDBReadsValid skip code check in stage 2", - "SlotIndex", slotDB.parallel.SlotIndex, "txIndex", slotDB.txIndex, - "readTxIndex", readTxIndex, "mergedTxIndex", mergedTxIndex) - continue - } - } - codeMain := mainDB.GetCode(addr) - if !bytes.Equal(codeSlot, codeMain) { - log.Debug("IsSlotDBReadsValid code read is invalid", "addr", addr, - "len codeSlot", len(codeSlot), "len codeMain", len(codeMain), "SlotIndex", slotDB.parallel.SlotIndex, - "txIndex", slotDB.txIndex, "baseTxIndex", slotDB.parallel.baseTxIndex) - return false - } - } - // check codeHash - for addr, codeHashSlot := range slotDB.parallel.codeHashReadsInSlot { - if isStage2 { - readTxIndex := slotDB.parallel.codeHashReadsInSlotFromTxIndex[addr] - if readTxIndex > mergedTxIndex { - log.Debug("IsSlotDBReadsValid skip codeHash check in stage 2", - "SlotIndex", slotDB.parallel.SlotIndex, "txIndex", slotDB.txIndex, - "readTxIndex", readTxIndex, "mergedTxIndex", mergedTxIndex) - continue - } - } - codeHashMain := mainDB.GetCodeHash(addr) - if !bytes.Equal(codeHashSlot.Bytes(), codeHashMain.Bytes()) { - log.Debug("IsSlotDBReadsValid codehash read is invalid", "addr", addr, - "codeHashSlot", codeHashSlot, "codeHashMain", codeHashMain, "SlotIndex", slotDB.parallel.SlotIndex, - "txIndex", slotDB.txIndex, "baseTxIndex", slotDB.parallel.baseTxIndex) - return false - } - } + // check KV for addr, slotStorage := range slotDB.parallel.kvReadsInSlot { conflict := false slotStorage.Range(func(keySlot, valSlot interface{}) bool { - if isStage2 { - readTxIndex := slotDB.parallel.kvReadsInSlotFromTxIndex[addr][keySlot.(common.Hash)] - if readTxIndex > mergedTxIndex { - log.Debug("IsSlotDBReadsValid skip nonce check in stage 2", - "SlotIndex", slotDB.parallel.SlotIndex, "txIndex", slotDB.txIndex, - "readTxIndex", readTxIndex, "mergedTxIndex", mergedTxIndex) - return true // return true, Range will try next KV + if isStage2 { // update slotDB's unconfirmed DB list and try + if valUnconfirm, _, ok := slotDB.getKVFromUnconfirmedDB(addr, valSlot.(common.Hash)); ok { + if !bytes.Equal(valSlot.(common.Hash).Bytes(), valUnconfirm.Bytes()) { + log.Debug("IsSlotDBReadsValid nonce read is invalid in unconfirmed", "addr", addr, + "valSlot", valSlot.(common.Hash), "valUnconfirm", valUnconfirm, + "SlotIndex", slotDB.parallel.SlotIndex, + "txIndex", slotDB.txIndex, "baseTxIndex", slotDB.parallel.baseTxIndex) + conflict = true + return false // return false, Range will be terminated. + } } } valMain := mainDB.GetState(addr, keySlot.(common.Hash)) @@ -3471,25 +3449,39 @@ func (s *ParallelStateDB) IsParallelReadsValid(isStage2 bool, mergedTxIndex int) return false } } + if isStage2 { // stage2 skip check code, or state, since they are likely unchanged. + return true + } + + // check code + for addr, codeSlot := range slotDB.parallel.codeReadsInSlot { + codeMain := mainDB.GetCode(addr) + if !bytes.Equal(codeSlot, codeMain) { + log.Debug("IsSlotDBReadsValid code read is invalid", "addr", addr, + "len codeSlot", len(codeSlot), "len codeMain", len(codeMain), "SlotIndex", slotDB.parallel.SlotIndex, + "txIndex", slotDB.txIndex, "baseTxIndex", slotDB.parallel.baseTxIndex) + return false + } + } + // check codeHash + for addr, codeHashSlot := range slotDB.parallel.codeHashReadsInSlot { + codeHashMain := mainDB.GetCodeHash(addr) + if !bytes.Equal(codeHashSlot.Bytes(), codeHashMain.Bytes()) { + log.Debug("IsSlotDBReadsValid codehash read is invalid", "addr", addr, + "codeHashSlot", codeHashSlot, "codeHashMain", codeHashMain, "SlotIndex", slotDB.parallel.SlotIndex, + "txIndex", slotDB.txIndex, "baseTxIndex", slotDB.parallel.baseTxIndex) + return false + } + } // addr state check for addr, stateSlot := range slotDB.parallel.addrStateReadsInSlot { - if isStage2 { - readTxIndex := slotDB.parallel.addrStateReadsInSlotFromTxIndex[addr] - if readTxIndex > mergedTxIndex { - log.Debug("IsSlotDBReadsValid skip addr state check in stage 2", - "SlotIndex", slotDB.parallel.SlotIndex, "txIndex", slotDB.txIndex, - "readTxIndex", readTxIndex, "mergedTxIndex", mergedTxIndex) - continue - } - } stateMain := false // addr not exist if mainDB.getStateObject(addr) != nil { stateMain = true // addr exist in main DB } if stateSlot != stateMain { - // skip addr state check for system address - if addr != s.parallel.systemAddress { + if addr != slotDB.parallel.systemAddress { log.Debug("IsSlotDBReadsValid addrState read invalid(true: exist, false: not exist)", "addr", addr, "stateSlot", stateSlot, "stateMain", stateMain, "SlotIndex", slotDB.parallel.SlotIndex, @@ -3498,9 +3490,6 @@ func (s *ParallelStateDB) IsParallelReadsValid(isStage2 bool, mergedTxIndex int) } } } - if isStage2 { // stage2 skip snapshot destructs check - return true - } // snapshot destructs check for addr, destructRead := range slotDB.parallel.addrSnapDestructsReadsInSlot { mainObj := mainDB.getStateObject(addr) @@ -3511,9 +3500,9 @@ func (s *ParallelStateDB) IsParallelReadsValid(isStage2 bool, mergedTxIndex int) "txIndex", slotDB.txIndex, "baseTxIndex", slotDB.parallel.baseTxIndex) return false } - s.snapParallelLock.RLock() // fixme: this lock is not needed + slotDB.snapParallelLock.RLock() // fixme: this lock is not needed _, destructMain := mainDB.snapDestructs[addr] // addr not exist - s.snapParallelLock.RUnlock() + slotDB.snapParallelLock.RUnlock() if destructRead != destructMain { log.Debug("IsSlotDBReadsValid snapshot destructs read invalid", "addr", addr, "destructRead", destructRead, "destructMain", destructMain, diff --git a/core/state_processor.go b/core/state_processor.go index 92e1a0f525..298b70e6fe 100644 --- a/core/state_processor.go +++ b/core/state_processor.go @@ -51,8 +51,9 @@ const ( maxUnitSize = 10 dispatchPolicyStatic = 1 dispatchPolicyDynamic = 2 // not supported - maxRedoCounterInstage1 = 10000 // try 2, 4, 10, or no limit? - stage2CheckNumber = 20 + maxRedoCounterInstage1 = 10000 // try 2, 4, 10, or no limit? not needed + stage2CheckNumber = 10 + stage2RedoNumber = 5 ) var dispatchPolicy = dispatchPolicyStatic @@ -94,7 +95,7 @@ type ParallelStateProcessor struct { allTxReqs []*ParallelTxRequest txReqExecuteRecord map[int]int // for each the execute count of each Tx txReqExecuteCount int - confirmInStage2 bool + inConfirmStage2 bool } func NewParallelStateProcessor(config *params.ChainConfig, bc *BlockChain, engine consensus.Engine, parallelNum int, queueSize int) *ParallelStateProcessor { @@ -820,8 +821,8 @@ func (p *ParallelStateProcessor) runConfirmLoop() { newTxMerged = true } txSize := len(p.allTxReqs) - if !p.confirmInStage2 && p.txReqExecuteCount == txSize { - p.confirmInStage2 = true + if !p.inConfirmStage2 && p.txReqExecuteCount == txSize { + p.inConfirmStage2 = true for i := 0; i < txSize; i++ { p.txReqExecuteRecord[txIndex] = 0 // clear it when enter stage2, for redo limit } @@ -833,7 +834,7 @@ func (p *ParallelStateProcessor) runConfirmLoop() { // stage 2,if all tx have been executed at least once, and its result has been recevied. // in Stage 2, we will run check when merge is advanced. - if p.confirmInStage2 { + if p.inConfirmStage2 { // more aggressive tx result confirm, even for these Txs not in turn // now we will be more aggressive: // do conflcit check , as long as tx result is generated, @@ -845,8 +846,14 @@ func (p *ParallelStateProcessor) runConfirmLoop() { if endTxIndex > (txSize - 1) { endTxIndex = txSize - 1 } + conflictNumMark := p.debugConflictRedoNum for txIndex := startTxIndex; txIndex < endTxIndex; txIndex++ { p.toConfirmTxIndex(txIndex, true) + newConflictNum := p.debugConflictRedoNum - conflictNumMark + // if many redo is scheduled, stop now + if newConflictNum >= stage2RedoNumber { + break + } } } @@ -871,7 +878,7 @@ func (p *ParallelStateProcessor) hasConflict(txResult *ParallelTxResult, isStage } else { // to check if what the slot db read is correct. // refDetail := slotDB.UnconfirmedRefList() - if !slotDB.IsParallelReadsValid(isStage2, p.mergedTxIndex) { + if !slotDB.IsParallelReadsValid(isStage2, p.mergedTxIndex, p.unconfirmedStateDBs) { return true } } @@ -1044,7 +1051,7 @@ func (p *ParallelStateProcessor) runSlotLoop(slotIndex int, slotType int32) { p.pendingConfirmChan <- result } // switched to the other slot. - if interrupted || p.confirmInStage2 { + if interrupted || p.inConfirmStage2 { continue } // txReq in this Slot have all been executed, try steal one from other slot. @@ -1103,7 +1110,7 @@ func (p *ParallelStateProcessor) resetState(txNum int, statedb *state.StateDB) { } p.mergedTxIndex = -1 p.debugConflictRedoNum = 0 - p.confirmInStage2 = false + p.inConfirmStage2 = false // p.txReqAccountSorted = make(map[common.Address][]*ParallelTxRequest) // fixme: to be reused? statedb.PrepareForParallel() From 5fc920a184dbdc6956cca4af602cd1c2863a02c8 Mon Sep 17 00:00:00 2001 From: setunapo Date: Fri, 6 May 2022 15:21:47 +0800 Subject: [PATCH 17/18] fix WBNB skip --- core/state/statedb.go | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/core/state/statedb.go b/core/state/statedb.go index 57ec1f1629..bd6a191763 100644 --- a/core/state/statedb.go +++ b/core/state/statedb.go @@ -3387,8 +3387,12 @@ func (s *ParallelStateDB) IsParallelReadsValid(isStage2 bool, mergedTxIndex int, if addr != slotDB.parallel.systemAddress { // skip balance check for system address balanceMain := mainDB.GetBalance(addr) if balanceSlot.Cmp(balanceMain) != 0 { - if addr == WBNBAddress && slotDB.WBNBMakeUp() { // WBNB balance make up + if isStage2 { + log.Debug("IsSlotDBReadsValid skip makeup for WBNB in stage 2", + "SlotIndex", slotDB.parallel.SlotIndex, "txIndex", slotDB.txIndex) + continue // stage2 will skip WBNB check, no balance makeup + } balanceDelta := new(big.Int).Sub(balanceMain, balanceSlot) slotDB.wbnbMakeUpLock.Lock() slotDB.AddBalance(addr, balanceDelta) // fixme: concurrent not safe, unconfirmed read From bb32205a30955889a696d05890f68a384837d286 Mon Sep 17 00:00:00 2001 From: setunapo Date: Fri, 6 May 2022 16:30:17 +0800 Subject: [PATCH 18/18] stage2ReservedNum --- core/state/statedb.go | 3 ++- core/state_processor.go | 13 ++++++++++--- 2 files changed, 12 insertions(+), 4 deletions(-) diff --git a/core/state/statedb.go b/core/state/statedb.go index bd6a191763..b385768bf5 100644 --- a/core/state/statedb.go +++ b/core/state/statedb.go @@ -3531,8 +3531,9 @@ func (s *ParallelStateDB) SystemAddressRedo() bool { log.Info("SystemAddressRedo", "SlotIndex", s.parallel.SlotIndex, "txIndex", s.txIndex, "systemAddressOpsCount", s.parallel.systemAddressOpsCount) + return true } - return s.parallel.systemAddressOpsCount > 4 + return false } // NeedsRedo returns true if there is any clear reason that we need to redo this transaction diff --git a/core/state_processor.go b/core/state_processor.go index 298b70e6fe..e1aef10630 100644 --- a/core/state_processor.go +++ b/core/state_processor.go @@ -52,8 +52,9 @@ const ( dispatchPolicyStatic = 1 dispatchPolicyDynamic = 2 // not supported maxRedoCounterInstage1 = 10000 // try 2, 4, 10, or no limit? not needed - stage2CheckNumber = 10 - stage2RedoNumber = 5 + stage2CheckNumber = 20 // not fixed, use decrease? + stage2RedoNumber = 8 + stage2ReservedNum = 7 // ? ) var dispatchPolicy = dispatchPolicyStatic @@ -821,7 +822,13 @@ func (p *ParallelStateProcessor) runConfirmLoop() { newTxMerged = true } txSize := len(p.allTxReqs) - if !p.inConfirmStage2 && p.txReqExecuteCount == txSize { + // usually, the the last Tx could be the bottleneck it could be very slow, + // so it is better for us to enter stage 2 a bit earlier + targetStage2Count := txSize + if txSize > 50 { + targetStage2Count = txSize - stage2ReservedNum + } + if !p.inConfirmStage2 && p.txReqExecuteCount == targetStage2Count { p.inConfirmStage2 = true for i := 0; i < txSize; i++ { p.txReqExecuteRecord[txIndex] = 0 // clear it when enter stage2, for redo limit