From dbf78a3072bac386044d158e50d2784f8d4c68cb Mon Sep 17 00:00:00 2001 From: lunarblock <84233204+lunarblock@users.noreply.github.com> Date: Tue, 12 Apr 2022 11:02:02 +0800 Subject: [PATCH 01/10] [R4R] add sync pool for slotdb and remove deepCopy for mergeSlotDB (#1) * add sync pool for slotdb and remove deepCopy for mergeSlotDB * don't panic if there is anything wrong reading state * use map in sequential mode and sync.map in parallel mode * fix the comments --- core/state/dump.go | 2 +- core/state/state_object.go | 189 ++++++++++++++++++++++++++--------- core/state/state_test.go | 53 ++++++---- core/state/statedb.go | 197 +++++++++++++++++++++++++++++++------ core/state_processor.go | 15 +++ 5 files changed, 362 insertions(+), 94 deletions(-) diff --git a/core/state/dump.go b/core/state/dump.go index b25da714fd..55f4c7754d 100644 --- a/core/state/dump.go +++ b/core/state/dump.go @@ -138,7 +138,7 @@ func (s *StateDB) DumpToCollector(c DumpCollector, excludeCode, excludeStorage, account.SecureKey = it.Key } addr := common.BytesToAddress(addrBytes) - obj := newObject(s, addr, data) + obj := newObject(s, s.isParallel, addr, data) if !excludeCode { account.Code = common.Bytes2Hex(obj.Code(s.db)) } diff --git a/core/state/state_object.go b/core/state/state_object.go index 8830a9d0ad..ce8926609a 100644 --- a/core/state/state_object.go +++ b/core/state/state_object.go @@ -38,9 +38,18 @@ func (c Code) String() string { return string(c) //strings.Join(Disassemble(c), " ") } -type Storage map[common.Hash]common.Hash +type Storage interface { + String() string + GetValue(hash common.Hash) (common.Hash, bool) + StoreValue(hash common.Hash, value common.Hash) + Length() (length int) + Copy() Storage + Range(func(key, value interface{}) bool) +} + +type StorageMap map[common.Hash]common.Hash -func (s Storage) String() (str string) { +func (s StorageMap) String() (str string) { for key, value := range s { str += fmt.Sprintf("%X : %X\n", key, value) } @@ -48,8 +57,8 @@ func (s Storage) String() (str string) { return } -func (s Storage) Copy() Storage { - cpy := make(Storage) +func (s StorageMap) Copy() Storage { + cpy := make(StorageMap) for key, value := range s { cpy[key] = value } @@ -57,6 +66,79 @@ func (s Storage) Copy() Storage { return cpy } +func (s StorageMap) GetValue(hash common.Hash) (common.Hash, bool) { + value, ok := s[hash] + return value, ok +} + +func (s StorageMap) StoreValue(hash common.Hash, value common.Hash) { + s[hash] = value +} + +func (s StorageMap) Length() int { + return len(s) +} + +func (s StorageMap) Range(f func(hash, value interface{}) bool) { + for k, v := range s { + result := f(k, v) + if !result { + return + } + } +} + +type StorageSyncMap struct { + sync.Map +} + +func (s *StorageSyncMap) String() (str string) { + s.Range(func(key, value interface{}) bool { + str += fmt.Sprintf("%X : %X\n", key, value) + return true + }) + + return +} + +func (s *StorageSyncMap) GetValue(hash common.Hash) (common.Hash, bool) { + value, ok := s.Load(hash) + if !ok { + return common.Hash{}, ok + } + + return value.(common.Hash), ok +} + +func (s *StorageSyncMap) StoreValue(hash common.Hash, value common.Hash) { + s.Store(hash, value) +} + +func (s *StorageSyncMap) Length() (length int) { + s.Range(func(key, value interface{}) bool { + length++ + return true + }) + return length +} + +func (s *StorageSyncMap) Copy() Storage { + cpy := StorageSyncMap{} + s.Range(func(key, value interface{}) bool { + cpy.Store(key, value) + return true + }) + + return &cpy +} + +func newStorage(isParallel bool) Storage { + if isParallel { + return &StorageSyncMap{} + } + return make(StorageMap) +} + // StateObject represents an Ethereum account which is being modified. // // The usage pattern is as follows: @@ -80,12 +162,12 @@ type StateObject struct { trie Trie // storage trie, which becomes non-nil on first access code Code // contract bytecode, which gets set when code is loaded + isParallel bool // isParallel indicates this state object is used in parallel mode sharedOriginStorage *sync.Map // Storage cache of original entries to dedup rewrites, reset for every transaction - originStorage Storage - - pendingStorage Storage // Storage entries that need to be flushed to disk, at the end of an entire block - dirtyStorage Storage // Storage entries that have been modified in the current transaction execution - fakeStorage Storage // Fake storage which constructed by caller for debugging purpose. + originStorage Storage // Storage cache of original entries to dedup rewrites, reset for every transaction + pendingStorage Storage // Storage entries that need to be flushed to disk, at the end of an entire block + dirtyStorage Storage // Storage entries that have been modified in the current transaction execution + fakeStorage Storage // Fake storage which constructed by caller for debugging purpose. // Cache flags. // When an object is marked suicided it will be delete from the trie @@ -113,7 +195,7 @@ type Account struct { } // newObject creates a state object. -func newObject(db *StateDB, address common.Address, data Account) *StateObject { +func newObject(db *StateDB, isParallel bool, address common.Address, data Account) *StateObject { if data.Balance == nil { data.Balance = new(big.Int) } @@ -134,10 +216,11 @@ func newObject(db *StateDB, address common.Address, data Account) *StateObject { address: address, addrHash: crypto.Keccak256Hash(address[:]), data: data, + isParallel: isParallel, sharedOriginStorage: storageMap, - originStorage: make(Storage), - pendingStorage: make(Storage), - dirtyStorage: make(Storage), + originStorage: newStorage(isParallel), + dirtyStorage: newStorage(isParallel), + pendingStorage: newStorage(isParallel), } } @@ -193,10 +276,11 @@ func (s *StateObject) getTrie(db Database) Trie { func (s *StateObject) GetState(db Database, key common.Hash) common.Hash { // If the fake storage is set, only lookup the state here(in the debugging mode) if s.fakeStorage != nil { - return s.fakeStorage[key] + fakeValue, _ := s.fakeStorage.GetValue(key) + return fakeValue } // If we have a dirty value for this state entry, return it - value, dirty := s.dirtyStorage[key] + value, dirty := s.dirtyStorage.GetValue(key) if dirty { return value } @@ -205,7 +289,7 @@ func (s *StateObject) GetState(db Database, key common.Hash) common.Hash { } func (s *StateObject) getOriginStorage(key common.Hash) (common.Hash, bool) { - if value, cached := s.originStorage[key]; cached { + if value, cached := s.originStorage.GetValue(key); cached { return value, true } // if L1 cache miss, try to get it from shared pool @@ -214,7 +298,7 @@ func (s *StateObject) getOriginStorage(key common.Hash) (common.Hash, bool) { if !ok { return common.Hash{}, false } - s.originStorage[key] = val.(common.Hash) + s.originStorage.StoreValue(key, val.(common.Hash)) return val.(common.Hash), true } return common.Hash{}, false @@ -224,17 +308,18 @@ func (s *StateObject) setOriginStorage(key common.Hash, value common.Hash) { if s.db.writeOnSharedStorage && s.sharedOriginStorage != nil { s.sharedOriginStorage.Store(key, value) } - s.originStorage[key] = value + s.originStorage.StoreValue(key, value) } // GetCommittedState retrieves a value from the committed account storage trie. func (s *StateObject) GetCommittedState(db Database, key common.Hash) common.Hash { // If the fake storage is set, only lookup the state here(in the debugging mode) if s.fakeStorage != nil { - return s.fakeStorage[key] + fakeValue, _ := s.fakeStorage.GetValue(key) + return fakeValue } // If we have a pending write or clean cached, return that - if value, pending := s.pendingStorage[key]; pending { + if value, pending := s.pendingStorage.GetValue(key); pending { return value } @@ -305,7 +390,7 @@ func (s *StateObject) GetCommittedState(db Database, key common.Hash) common.Has func (s *StateObject) SetState(db Database, key, value common.Hash) { // If the fake storage is set, put the temporary state update here. if s.fakeStorage != nil { - s.fakeStorage[key] = value + s.fakeStorage.StoreValue(key, value) return } // If the new value is the same as old, don't set @@ -331,35 +416,39 @@ func (s *StateObject) SetState(db Database, key, value common.Hash) { func (s *StateObject) SetStorage(storage map[common.Hash]common.Hash) { // Allocate fake storage if it's nil. if s.fakeStorage == nil { - s.fakeStorage = make(Storage) + s.fakeStorage = newStorage(s.isParallel) } for key, value := range storage { - s.fakeStorage[key] = value + s.fakeStorage.StoreValue(key, value) } // Don't bother journal since this function should only be used for // debugging and the `fake` storage won't be committed to database. } func (s *StateObject) setState(key, value common.Hash) { - s.dirtyStorage[key] = value + s.dirtyStorage.StoreValue(key, value) } // finalise moves all dirty storage slots into the pending area to be hashed or // committed later. It is invoked at the end of every transaction. func (s *StateObject) finalise(prefetch bool) { - slotsToPrefetch := make([][]byte, 0, len(s.dirtyStorage)) - for key, value := range s.dirtyStorage { - s.pendingStorage[key] = value - if value != s.originStorage[key] { - slotsToPrefetch = append(slotsToPrefetch, common.CopyBytes(key[:])) // Copy needed for closure + slotsToPrefetch := make([][]byte, 0, s.dirtyStorage.Length()) + s.dirtyStorage.Range(func(key, value interface{}) bool { + s.pendingStorage.StoreValue(key.(common.Hash), value.(common.Hash)) + + originalValue, _ := s.originStorage.GetValue(key.(common.Hash)) + if value.(common.Hash) != originalValue { + originalKey := key.(common.Hash) + slotsToPrefetch = append(slotsToPrefetch, common.CopyBytes(originalKey[:])) // Copy needed for closure } - } + return true + }) if s.db.prefetcher != nil && prefetch && len(slotsToPrefetch) > 0 && s.data.Root != emptyRoot { s.db.prefetcher.prefetch(s.data.Root, slotsToPrefetch, s.addrHash) } - if len(s.dirtyStorage) > 0 { - s.dirtyStorage = make(Storage) + if s.dirtyStorage.Length() > 0 { + s.dirtyStorage = newStorage(s.isParallel) } } @@ -368,7 +457,7 @@ func (s *StateObject) finalise(prefetch bool) { func (s *StateObject) updateTrie(db Database) Trie { // Make sure all dirty slots are finalized into the pending storage area s.finalise(false) // Don't prefetch any more, pull directly if need be - if len(s.pendingStorage) == 0 { + if s.pendingStorage.Length() == 0 { return s.trie } // Track the amount of time wasted on updating the storage trie @@ -384,20 +473,26 @@ func (s *StateObject) updateTrie(db Database) Trie { // Insert all the pending updates into the trie tr := s.getTrie(db) - usedStorage := make([][]byte, 0, len(s.pendingStorage)) - for key, value := range s.pendingStorage { + usedStorage := make([][]byte, 0, s.pendingStorage.Length()) + s.pendingStorage.Range(func(k, v interface{}) bool { + key := k.(common.Hash) + value := v.(common.Hash) + // Skip noop changes, persist actual changes - if value == s.originStorage[key] { - continue + originalValue, _ := s.originStorage.GetValue(k.(common.Hash)) + if v.(common.Hash) == originalValue { + return true } - s.originStorage[key] = value - var v []byte + + s.originStorage.StoreValue(k.(common.Hash), v.(common.Hash)) + + var vs []byte if (value == common.Hash{}) { s.setError(tr.TryDelete(key[:])) } else { // Encoding []byte cannot fail, ok to ignore the error. - v, _ = rlp.EncodeToBytes(common.TrimLeftZeroes(value[:])) - s.setError(tr.TryUpdate(key[:], v)) + vs, _ = rlp.EncodeToBytes(common.TrimLeftZeroes(value[:])) + s.setError(tr.TryUpdate(key[:], vs)) } // If state snapshotting is active, cache the data til commit if s.db.snap != nil { @@ -409,16 +504,18 @@ func (s *StateObject) updateTrie(db Database) Trie { s.db.snapStorage[s.address] = storage } } - storage[string(key[:])] = v // v will be nil if value is 0x00 + storage[string(key[:])] = vs // v will be nil if value is 0x00 s.db.snapMux.Unlock() } usedStorage = append(usedStorage, common.CopyBytes(key[:])) // Copy needed for closure - } + return true + }) + if s.db.prefetcher != nil { s.db.prefetcher.used(s.data.Root, usedStorage) } - if len(s.pendingStorage) > 0 { - s.pendingStorage = make(Storage) + if s.pendingStorage.Length() > 0 { + s.pendingStorage = newStorage(s.isParallel) } return tr } @@ -506,7 +603,7 @@ func (s *StateObject) setBalance(amount *big.Int) { func (s *StateObject) ReturnGas(gas *big.Int) {} func (s *StateObject) deepCopy(db *StateDB) *StateObject { - stateObject := newObject(db, s.address, s.data) + stateObject := newObject(db, s.isParallel, s.address, s.data) if s.trie != nil { stateObject.trie = db.db.CopyTrie(s.trie) } diff --git a/core/state/state_test.go b/core/state/state_test.go index 4be9ae8ce3..a8417b13e7 100644 --- a/core/state/state_test.go +++ b/core/state/state_test.go @@ -227,30 +227,47 @@ func compareStateObjects(so0, so1 *StateObject, t *testing.T) { t.Fatalf("Code mismatch: have %v, want %v", so0.code, so1.code) } - if len(so1.dirtyStorage) != len(so0.dirtyStorage) { - t.Errorf("Dirty storage size mismatch: have %d, want %d", len(so1.dirtyStorage), len(so0.dirtyStorage)) + if so1.dirtyStorage.Length() != so0.dirtyStorage.Length() { + t.Errorf("Dirty storage size mismatch: have %d, want %d", so1.dirtyStorage.Length(), so0.dirtyStorage.Length()) } - for k, v := range so1.dirtyStorage { - if so0.dirtyStorage[k] != v { - t.Errorf("Dirty storage key %x mismatch: have %v, want %v", k, so0.dirtyStorage[k], v) + + so1.dirtyStorage.Range(func(key, value interface{}) bool { + k, v := key.(common.Hash), value.(common.Hash) + + if tmpV, _ := so0.dirtyStorage.GetValue(k); tmpV != v { + t.Errorf("Dirty storage key %x mismatch: have %v, want %v", k, tmpV.String(), v) } - } - for k, v := range so0.dirtyStorage { - if so1.dirtyStorage[k] != v { + return true + }) + + so0.dirtyStorage.Range(func(key, value interface{}) bool { + k, v := key.(common.Hash), value.(common.Hash) + + if tmpV, _ := so1.dirtyStorage.GetValue(k); tmpV != v { t.Errorf("Dirty storage key %x mismatch: have %v, want none.", k, v) } + return true + }) + + if so1.originStorage.Length() != so0.originStorage.Length() { + t.Errorf("Origin storage size mismatch: have %d, want %d", so1.originStorage.Length(), so0.originStorage.Length()) } - if len(so1.originStorage) != len(so0.originStorage) { - t.Errorf("Origin storage size mismatch: have %d, want %d", len(so1.originStorage), len(so0.originStorage)) - } - for k, v := range so1.originStorage { - if so0.originStorage[k] != v { - t.Errorf("Origin storage key %x mismatch: have %v, want %v", k, so0.originStorage[k], v) + + so1.originStorage.Range(func(key, value interface{}) bool { + k, v := key.(common.Hash), value.(common.Hash) + + if tmpV, _ := so0.originStorage.GetValue(k); tmpV != v { + t.Errorf("Origin storage key %x mismatch: have %v, want %v", k, tmpV, v) } - } - for k, v := range so0.originStorage { - if so1.originStorage[k] != v { + return true + }) + + so0.originStorage.Range(func(key, value interface{}) bool { + k, v := key.(common.Hash), value.(common.Hash) + + if tmpV, _ := so1.originStorage.GetValue(k); tmpV != v { t.Errorf("Origin storage key %x mismatch: have %v, want none.", k, v) } - } + return true + }) } diff --git a/core/state/statedb.go b/core/state/statedb.go index debf0c65dd..4c67b6e531 100644 --- a/core/state/statedb.go +++ b/core/state/statedb.go @@ -153,6 +153,9 @@ type ParallelState struct { systemAddress common.Address systemAddressOpsCount int keepSystemAddressBalance bool + + // we may need to redo for some specific reasons, like we read the wrong state and need to panic in sequential mode in SubRefund + needsRedo bool } // StateDB structs within the ethereum protocol are used to store anything @@ -379,9 +382,9 @@ func (s *StateDB) MergeSlotDB(slotDb *StateDB, slotReceipt *types.Receipt, txInd } else { // addr already in main DB, do merge: balance, KV, code, State(create, suicide) // can not do copy or ownership transfer directly, since dirtyObj could have outdated - // data(may be update within the conflict window) + // data(may be updated within the conflict window) - var newMainObj *StateObject + var newMainObj = mainObj // we don't need to copy the object since the storages are thread safe if _, created := slotDb.parallel.addrStateChangesInSlot[addr]; created { // there are 3 kinds of state change: // 1.Suicide @@ -405,7 +408,6 @@ func (s *StateDB) MergeSlotDB(slotDb *StateDB, slotReceipt *types.Receipt, txInd // deepCopy a temporary *StateObject for safety, since slot could read the address, // dispatch should avoid overwrite the StateObject directly otherwise, it could // crash for: concurrent map iteration and map write - newMainObj = mainObj.deepCopy(s) if _, balanced := slotDb.parallel.balanceChangesInSlot[addr]; balanced { log.Debug("merge state object: Balance", "newMainObj.Balance()", newMainObj.Balance(), @@ -630,6 +632,12 @@ func (s *StateDB) AddRefund(gas uint64) { func (s *StateDB) SubRefund(gas uint64) { s.journal.append(refundChange{prev: s.refund}) if gas > s.refund { + if s.isParallel { + // we don't need to panic here if we read the wrong state, we just need to redo this transaction + log.Info(fmt.Sprintf("Refund counter below zero (gas: %d > refund: %d)", gas, s.refund), "tx", s.thash.String()) + s.parallel.needsRedo = true + return + } panic(fmt.Sprintf("Refund counter below zero (gas: %d > refund: %d)", gas, s.refund)) } s.refund -= gas @@ -711,6 +719,11 @@ func (s *StateDB) SystemAddressRedo() bool { return s.parallel.systemAddressOpsCount > 2 } +// NeedsRedo returns true if there is any clear reason that we need to redo this transaction +func (s *StateDB) NeedsRedo() bool { + return s.parallel.needsRedo +} + func (s *StateDB) GetCode(addr common.Address) []byte { if s.parallel.isSlotDB { s.parallel.codeReadsInSlot[addr] = struct{}{} @@ -1158,7 +1171,7 @@ func (s *StateDB) getDeletedStateObject(addr common.Address) *StateObject { } } // Insert into the live set - obj := newObject(s, addr, *data) + obj := newObject(s, s.isParallel, addr, *data) s.SetStateObject(obj) return obj } @@ -1199,7 +1212,7 @@ func (s *StateDB) createObject(addr common.Address) (newobj, prev *StateObject) s.snapDestructs[prev.address] = struct{}{} } } - newobj = newObject(s, addr, Account{}) + newobj = newObject(s, s.isParallel, addr, Account{}) newobj.setNonce(0) // sets the object to dirty if prev == nil { s.journal.append(createObjectChange{account: &addr}) @@ -1243,7 +1256,7 @@ func (s *StateDB) ForEachStorage(addr common.Address, cb func(key, value common. for it.Next() { key := common.BytesToHash(s.trie.GetKey(it.Key)) - if value, dirty := so.dirtyStorage[key]; dirty { + if value, dirty := so.dirtyStorage.GetValue(key); dirty { if !cb(key, value) { return nil } @@ -1369,40 +1382,166 @@ func (s *StateDB) Copy() *StateDB { return state } -// Copy all the basic fields, initialize the memory ones +var addressStructPool = sync.Pool{ + New: func() interface{} { return make(map[common.Address]struct{}, defaultNumOfSlots) }, +} + +var journalPool = sync.Pool{ + New: func() interface{} { + return &journal{ + dirties: make(map[common.Address]int, defaultNumOfSlots), + entries: make([]journalEntry, 0, defaultNumOfSlots), + } + }, +} + +var stateKeysPool = sync.Pool{ + New: func() interface{} { return make(map[common.Address]StateKeys, defaultNumOfSlots) }, +} + +var stateObjectsPool = sync.Pool{ + New: func() interface{} { return make(map[common.Address]*StateObject, defaultNumOfSlots) }, +} + +var snapAccountPool = sync.Pool{ + New: func() interface{} { return make(map[common.Address][]byte, defaultNumOfSlots) }, +} + +var snapStoragePool = sync.Pool{ + New: func() interface{} { return make(map[common.Address]map[string][]byte, defaultNumOfSlots) }, +} + +var snapStorageValuePool = sync.Pool{ + New: func() interface{} { return make(map[string][]byte, defaultNumOfSlots) }, +} + +var logsPool = sync.Pool{ + New: func() interface{} { return make(map[common.Hash][]*types.Log, defaultNumOfSlots) }, +} + +func (s *StateDB) SlotDBPutSyncPool() { + for key := range s.parallel.stateObjectsSuicidedInSlot { + delete(s.parallel.stateObjectsSuicidedInSlot, key) + } + addressStructPool.Put(s.parallel.stateObjectsSuicidedInSlot) + + for key := range s.parallel.codeReadsInSlot { + delete(s.parallel.codeReadsInSlot, key) + } + addressStructPool.Put(s.parallel.codeReadsInSlot) + + for key := range s.parallel.codeChangesInSlot { + delete(s.parallel.codeChangesInSlot, key) + } + addressStructPool.Put(s.parallel.codeChangesInSlot) + + for key := range s.parallel.balanceChangesInSlot { + delete(s.parallel.balanceChangesInSlot, key) + } + addressStructPool.Put(s.parallel.balanceChangesInSlot) + + for key := range s.parallel.balanceReadsInSlot { + delete(s.parallel.balanceReadsInSlot, key) + } + addressStructPool.Put(s.parallel.balanceReadsInSlot) + + for key := range s.parallel.addrStateReadsInSlot { + delete(s.parallel.addrStateReadsInSlot, key) + } + addressStructPool.Put(s.parallel.addrStateReadsInSlot) + + for key := range s.parallel.nonceChangesInSlot { + delete(s.parallel.nonceChangesInSlot, key) + } + addressStructPool.Put(s.parallel.nonceChangesInSlot) + + for key := range s.stateObjectsPending { + delete(s.stateObjectsPending, key) + } + addressStructPool.Put(s.stateObjectsPending) + + for key := range s.stateObjectsDirty { + delete(s.stateObjectsDirty, key) + } + addressStructPool.Put(s.stateObjectsDirty) + + for key := range s.journal.dirties { + delete(s.journal.dirties, key) + } + s.journal.entries = s.journal.entries[:0] + journalPool.Put(s.journal) + + for key := range s.parallel.stateChangesInSlot { + delete(s.parallel.stateChangesInSlot, key) + } + stateKeysPool.Put(s.parallel.stateChangesInSlot) + + for key := range s.parallel.stateReadsInSlot { + delete(s.parallel.stateReadsInSlot, key) + } + stateKeysPool.Put(s.parallel.stateReadsInSlot) + + for key := range s.parallel.dirtiedStateObjectsInSlot { + delete(s.parallel.dirtiedStateObjectsInSlot, key) + } + stateObjectsPool.Put(s.parallel.dirtiedStateObjectsInSlot) + + for key := range s.snapDestructs { + delete(s.snapDestructs, key) + } + addressStructPool.Put(s.snapDestructs) + + for key := range s.snapAccounts { + delete(s.snapAccounts, key) + } + snapAccountPool.Put(s.snapAccounts) + + for key, storage := range s.snapStorage { + for key := range storage { + delete(storage, key) + } + snapStorageValuePool.Put(storage) + delete(s.snapStorage, key) + } + snapStoragePool.Put(s.snapStorage) + + for key := range s.logs { + delete(s.logs, key) + } + logsPool.Put(s.logs) +} + +// CopyForSlot copy all the basic fields, initialize the memory ones func (s *StateDB) CopyForSlot() *StateDB { parallel := ParallelState{ // use base(dispatcher) slot db's stateObjects. // It is a SyncMap, only readable to slot, not writable stateObjects: s.parallel.stateObjects, - stateObjectsSuicidedInSlot: make(map[common.Address]struct{}, 10), - codeReadsInSlot: make(map[common.Address]struct{}, defaultNumOfSlots), - codeChangesInSlot: make(map[common.Address]struct{}, 10), - stateChangesInSlot: make(map[common.Address]StateKeys, defaultNumOfSlots), - stateReadsInSlot: make(map[common.Address]StateKeys, defaultNumOfSlots), - balanceChangesInSlot: make(map[common.Address]struct{}, defaultNumOfSlots), - balanceReadsInSlot: make(map[common.Address]struct{}, defaultNumOfSlots), - addrStateReadsInSlot: make(map[common.Address]struct{}, defaultNumOfSlots), - addrStateChangesInSlot: make(map[common.Address]struct{}, 10), - nonceChangesInSlot: make(map[common.Address]struct{}, 10), + stateObjectsSuicidedInSlot: addressStructPool.Get().(map[common.Address]struct{}), + codeReadsInSlot: addressStructPool.Get().(map[common.Address]struct{}), + codeChangesInSlot: addressStructPool.Get().(map[common.Address]struct{}), + stateChangesInSlot: stateKeysPool.Get().(map[common.Address]StateKeys), + stateReadsInSlot: stateKeysPool.Get().(map[common.Address]StateKeys), + balanceChangesInSlot: addressStructPool.Get().(map[common.Address]struct{}), + balanceReadsInSlot: addressStructPool.Get().(map[common.Address]struct{}), + addrStateReadsInSlot: addressStructPool.Get().(map[common.Address]struct{}), + addrStateChangesInSlot: addressStructPool.Get().(map[common.Address]struct{}), + nonceChangesInSlot: addressStructPool.Get().(map[common.Address]struct{}), isSlotDB: true, - dirtiedStateObjectsInSlot: make(map[common.Address]*StateObject, defaultNumOfSlots), + dirtiedStateObjectsInSlot: stateObjectsPool.Get().(map[common.Address]*StateObject), } state := &StateDB{ db: s.db, trie: s.db.CopyTrie(s.trie), stateObjects: make(map[common.Address]*StateObject), // replaced by parallel.stateObjects in parallel mode - stateObjectsPending: make(map[common.Address]struct{}, defaultNumOfSlots), - stateObjectsDirty: make(map[common.Address]struct{}, defaultNumOfSlots), + stateObjectsPending: addressStructPool.Get().(map[common.Address]struct{}), + stateObjectsDirty: addressStructPool.Get().(map[common.Address]struct{}), refund: s.refund, // should be 0 - logs: make(map[common.Hash][]*types.Log, defaultNumOfSlots), + logs: logsPool.Get().(map[common.Hash][]*types.Log), logSize: 0, preimages: make(map[common.Hash][]byte, len(s.preimages)), - journal: newJournal(), + journal: journalPool.Get().(*journal), hasher: crypto.NewKeccakState(), - snapDestructs: make(map[common.Address]struct{}), - snapAccounts: make(map[common.Address][]byte), - snapStorage: make(map[common.Address]map[string][]byte), isParallel: true, parallel: parallel, } @@ -1419,18 +1558,18 @@ func (s *StateDB) CopyForSlot() *StateDB { state.snaps = s.snaps state.snap = s.snap // deep copy needed - state.snapDestructs = make(map[common.Address]struct{}) + state.snapDestructs = addressStructPool.Get().(map[common.Address]struct{}) for k, v := range s.snapDestructs { state.snapDestructs[k] = v } // - state.snapAccounts = make(map[common.Address][]byte) + state.snapAccounts = snapAccountPool.Get().(map[common.Address][]byte) for k, v := range s.snapAccounts { state.snapAccounts[k] = v } - state.snapStorage = make(map[common.Address]map[string][]byte) + state.snapStorage = snapStoragePool.Get().(map[common.Address]map[string][]byte) for k, v := range s.snapStorage { - temp := make(map[string][]byte) + temp := snapStorageValuePool.Get().(map[string][]byte) for kk, vv := range v { temp[kk] = vv } diff --git a/core/state_processor.go b/core/state_processor.go index b3aebcab89..4af8b39262 100644 --- a/core/state_processor.go +++ b/core/state_processor.go @@ -75,6 +75,7 @@ type ParallelStateProcessor struct { txResultChan chan *ParallelTxResult // to notify dispatcher that a tx is done slotState []*SlotState // idle, or pending messages mergedTxIndex int // the latest finalized tx index + slotDBsToRelease []*state.StateDB debugErrorRedoNum int debugConflictRedoNum int } @@ -591,6 +592,7 @@ func (p *ParallelStateProcessor) dispatchToIdleSlot(statedb *state.StateDB, txRe if len(slot.mergedChangeList) == 0 { // first transaction of a slot, there is no usable SlotDB, have to create one for it. txReq.slotDB = state.NewSlotDB(statedb, consensus.SystemAddress, p.mergedTxIndex, false) + p.slotDBsToRelease = append(p.slotDBsToRelease, txReq.slotDB) } log.Debug("dispatchToIdleSlot", "Slot", i, "txIndex", txReq.txIndex) slot.tailTxReq = txReq @@ -616,6 +618,7 @@ func (p *ParallelStateProcessor) waitUntilNextTxDone(statedb *state.StateDB, gp // the target slot is waiting for new slotDB slotState := p.slotState[result.slotIndex] slotDB := state.NewSlotDB(statedb, consensus.SystemAddress, p.mergedTxIndex, result.keepSystem) + p.slotDBsToRelease = append(p.slotDBsToRelease, slotDB) slotState.slotdbChan <- slotDB continue } @@ -718,6 +721,9 @@ func (p *ParallelStateProcessor) execInSlot(slotIndex int, txReq *ParallelTxRequ if slotDB.SystemAddressRedo() { hasConflict = true systemAddrConflict = true + } else if slotDB.NeedsRedo() { + // if this is any reason that indicates this transaction needs to redo, skip the conflict check + hasConflict = true } else { for index := 0; index < p.parallelNum; index++ { if index == slotIndex { @@ -831,6 +837,15 @@ func (p *ParallelStateProcessor) resetState(txNum int, statedb *state.StateDB) { statedb.PrepareForParallel() + stateDBsToRelease := p.slotDBsToRelease + p.slotDBsToRelease = make([]*state.StateDB, 0, txNum) + + go func() { + for _, slotDB := range stateDBsToRelease { + slotDB.SlotDBPutSyncPool() + } + }() + for _, slot := range p.slotState { slot.tailTxReq = nil slot.mergedChangeList = make([]state.SlotChangeList, 0) From 24c53f571f7c219db74ae00d4b83805910645472 Mon Sep 17 00:00:00 2001 From: setunapo Date: Wed, 23 Mar 2022 16:38:45 +0800 Subject: [PATCH 02/10] WIP: Parallel 2.0 streaming pipeline ** dispatch hunry slot ** remove idle dispatch ** static Dispatch --- core/state/statedb.go | 10 + core/state_processor.go | 446 ++++++++++++++++++++++++-------------- core/types/transaction.go | 8 + core/vm/evm.go | 2 +- core/vm/interface.go | 2 + 5 files changed, 308 insertions(+), 160 deletions(-) diff --git a/core/state/statedb.go b/core/state/statedb.go index 4c67b6e531..a38dca5aef 100644 --- a/core/state/statedb.go +++ b/core/state/statedb.go @@ -152,6 +152,7 @@ type ParallelState struct { // needs real system address's balance, the transaction will be marked redo with keepSystemAddressBalance = true systemAddress common.Address systemAddressOpsCount int + nonceIncreased uint64 // create contract keepSystemAddressBalance bool // we may need to redo for some specific reasons, like we read the wrong state and need to panic in sequential mode in SubRefund @@ -689,6 +690,10 @@ func (s *StateDB) BlockHash() common.Hash { return s.bhash } +func (s *StateDB) IsSlotDB() bool { + return s.parallel.isSlotDB +} + // BaseTxIndex returns the tx index that slot db based. func (s *StateDB) BaseTxIndex() int { return s.parallel.baseTxIndex @@ -724,6 +729,10 @@ func (s *StateDB) NeedsRedo() bool { return s.parallel.needsRedo } +func (s *StateDB) NonceIncreased() uint64 { + return s.parallel.nonceIncreased +} + func (s *StateDB) GetCode(addr common.Address) []byte { if s.parallel.isSlotDB { s.parallel.codeReadsInSlot[addr] = struct{}{} @@ -945,6 +954,7 @@ func (s *StateDB) NonceChanged(addr common.Address) { if s.parallel.isSlotDB { log.Debug("NonceChanged", "txIndex", s.txIndex, "addr", addr) s.parallel.nonceChangesInSlot[addr] = struct{}{} + s.parallel.nonceIncreased++ } } diff --git a/core/state_processor.go b/core/state_processor.go index 4af8b39262..f98adea519 100644 --- a/core/state_processor.go +++ b/core/state_processor.go @@ -47,6 +47,7 @@ const ( recentTime = 1024 * 3 recentDiffLayerTimeout = 5 farDiffLayerTimeout = 2 + maxUnitSize = 10 ) // StateProcessor is a basic Processor, which takes care of transitioning @@ -70,11 +71,12 @@ func NewStateProcessor(config *params.ChainConfig, bc *BlockChain, engine consen // add for parallel executions type ParallelStateProcessor struct { StateProcessor - parallelNum int // leave a CPU to dispatcher - queueSize int // parallel slot's maximum number of pending Txs - txResultChan chan *ParallelTxResult // to notify dispatcher that a tx is done - slotState []*SlotState // idle, or pending messages - mergedTxIndex int // the latest finalized tx index + parallelNum int // leave a CPU to dispatcher + queueSize int // parallel slot's maximum number of pending Txs + txResultChan chan *ParallelTxResult // to notify dispatcher that a tx is done + // txReqAccountSorted map[common.Address][]*ParallelTxRequest // fixme: *ParallelTxRequest => ParallelTxRequest? + slotState []*SlotState // idle, or pending messages + mergedTxIndex int // the latest finalized tx index slotDBsToRelease []*state.StateDB debugErrorRedoNum int debugConflictRedoNum int @@ -394,11 +396,12 @@ func (p *LightStateProcessor) LightProcess(diffLayer *types.DiffLayer, block *ty } type SlotState struct { - tailTxReq *ParallelTxRequest // tail pending Tx of the slot, should be accessed on dispatcher only. - pendingTxReqChan chan *ParallelTxRequest - pendingTxReqList []*ParallelTxRequest // maintained by dispatcher for dispatch policy - mergedChangeList []state.SlotChangeList - slotdbChan chan *state.StateDB // dispatch will create and send this slotDB to slot + pendingTxReqChan chan struct{} + pendingConfirmChan chan *ParallelTxResult + pendingTxReqList []*ParallelTxRequest // maintained by dispatcher for dispatch policy + mergedChangeList []state.SlotChangeList + slotdbChan chan *state.StateDB // dispatch will create and send this slotDB to slot + // txReqUnits []*ParallelDispatchUnit // only dispatch can access } type ParallelTxResult struct { @@ -409,6 +412,9 @@ type ParallelTxResult struct { txReq *ParallelTxRequest receipt *types.Receipt slotDB *state.StateDB // if updated, it is not equal to txReq.slotDB + gpSlot *GasPool + evm *vm.EVM + result *ExecutionResult } type ParallelTxRequest struct { @@ -435,9 +441,15 @@ func (p *ParallelStateProcessor) init() { for i := 0; i < p.parallelNum; i++ { p.slotState[i] = &SlotState{ - slotdbChan: make(chan *state.StateDB, 1), - pendingTxReqChan: make(chan *ParallelTxRequest, p.queueSize), + slotdbChan: make(chan *state.StateDB, 1), + pendingTxReqChan: make(chan struct{}, 1), + pendingConfirmChan: make(chan *ParallelTxResult, p.queueSize), } + // start the shadow slot first + go func(slotIndex int) { + p.runShadowSlotLoop(slotIndex) // this loop will be permanent live + }(i) + // start the slot's goroutine go func(slotIndex int) { p.runSlotLoop(slotIndex) // this loop will be permanent live @@ -525,6 +537,7 @@ func (p *ParallelStateProcessor) hasStateConflict(readDb *state.StateDB, changeL // for parallel execute, we put contracts of same address in a slot, // since these txs probably would have conflicts +/* func (p *ParallelStateProcessor) queueSameToAddress(txReq *ParallelTxRequest) bool { txToAddr := txReq.tx.To() // To() == nil means contract creation, no same To address @@ -532,9 +545,6 @@ func (p *ParallelStateProcessor) queueSameToAddress(txReq *ParallelTxRequest) bo return false } for i, slot := range p.slotState { - if slot.tailTxReq == nil { // this slot is idle - continue - } for _, pending := range slot.pendingTxReqList { // To() == nil means contract creation, skip it. if pending.tx.To() == nil { @@ -544,7 +554,6 @@ func (p *ParallelStateProcessor) queueSameToAddress(txReq *ParallelTxRequest) bo if *txToAddr == *pending.tx.To() { select { case slot.pendingTxReqChan <- txReq: - slot.tailTxReq = txReq slot.pendingTxReqList = append(slot.pendingTxReqList, txReq) log.Debug("queue same To address", "Slot", i, "txIndex", txReq.txIndex) return true @@ -557,21 +566,18 @@ func (p *ParallelStateProcessor) queueSameToAddress(txReq *ParallelTxRequest) bo } return false } - +*/ // for parallel execute, we put contracts of same address in a slot, // since these txs probably would have conflicts +/* func (p *ParallelStateProcessor) queueSameFromAddress(txReq *ParallelTxRequest) bool { txFromAddr := txReq.msg.From() for i, slot := range p.slotState { - if slot.tailTxReq == nil { // this slot is idle - continue - } for _, pending := range slot.pendingTxReqList { // same from address, put it on slot's pending list. if txFromAddr == pending.msg.From() { select { case slot.pendingTxReqChan <- txReq: - slot.tailTxReq = txReq slot.pendingTxReqList = append(slot.pendingTxReqList, txReq) log.Debug("queue same From address", "Slot", i, "txIndex", txReq.txIndex) return true @@ -584,26 +590,159 @@ func (p *ParallelStateProcessor) queueSameFromAddress(txReq *ParallelTxRequest) } return false } +*/ +/* +func (p *ParallelStateProcessor) dispatchToHungrySlot(statedb *state.StateDB, txReq *ParallelTxRequest) bool { + var workload int = len(p.slotState[0].pendingTxReqList) + var slotIndex int = 0 + for i, slot := range p.slotState { // can start from index 1 + if len(slot.pendingTxReqList) < workload { + slotIndex = i + workload = len(slot.pendingTxReqList) + } + } + if workload >= p.queueSize { + log.Debug("dispatch no Hungry Slot, all slots are full of task", "queueSize", p.queueSize) + return false + } -// if there is idle slot, dispatch the msg to the first idle slot -func (p *ParallelStateProcessor) dispatchToIdleSlot(statedb *state.StateDB, txReq *ParallelTxRequest) bool { - for i, slot := range p.slotState { - if slot.tailTxReq == nil { - if len(slot.mergedChangeList) == 0 { - // first transaction of a slot, there is no usable SlotDB, have to create one for it. - txReq.slotDB = state.NewSlotDB(statedb, consensus.SystemAddress, p.mergedTxIndex, false) - p.slotDBsToRelease = append(p.slotDBsToRelease, txReq.slotDB) + if workload == 0 && txReq.slotDB == nil { + // Create a SlotDB for idle slot to save an IPC channel cost for updateSlotDB + txReq.slotDB = state.NewSlotDB(statedb, consensus.SystemAddress, p.mergedTxIndex, false) + } + + log.Debug("dispatch To Hungry Slot", "slot", slotIndex, "workload", workload, "txIndex", txReq.txIndex) + slot := p.slotState[slotIndex] + select { + case slot.pendingTxReqChan <- txReq: + slot.pendingTxReqList = append(slot.pendingTxReqList, txReq) + return true + default: + log.Error("dispatch To Hungry Slot, but chan <- txReq failed??", "Slot", slotIndex, "txIndex", txReq.txIndex) + break + } + + return false +} +*/ + +// 1.Sliding Window: + +// txReqAccountSorted +// Unit: a slice of *TxReq, with len <= maxParallelUnitSize +// Units should be ordered by TxIndex +// TxReq's TxIndex of a Unit should be within a certain range: ParallelNum * maxParallelUnitSize? + +// Dispatch an Unit once for each slot? +// Unit make policy: +// 1.From +// 2.To... +/* +type ParallelDispatchUnit struct { + startTxIndex int + endTxIndex int + txsSize int + txReqs []*ParallelTxRequest +} +*/ + +// Try best to make the unit full, it is full when: +// ** maxUnitSize reached +// ** tx index range reached +// Avoid to make it full immediately, swicth to next unit when: +// ** full +// ** not full, but the Tx of the same address has exhausted + +// New Unit will be created by batch +// ** first + +// Benefit of StaticDispatch: +// ** try best to make Txs with same From() in same slot +// ** reduce IPC cost by dispatch in Unit + +// 2022.03.25: too complicated, apply simple method first... +// ** make sure same From in same slot +// ** try to make it balanced, queue to the most hungry slot for new Address +func (p *ParallelStateProcessor) doStaticDispatch(mainStatedb *state.StateDB, txReqs []*ParallelTxRequest) { + + fromSlotMap := make(map[common.Address]int, 100) + toSlotMap := make(map[common.Address]int, 100) + for _, txReq := range txReqs { + var slotIndex int = -1 + if i, ok := fromSlotMap[txReq.msg.From()]; ok { + // first: same From are all in same slot + slotIndex = i + } else if txReq.msg.To() != nil { + // To Address, with txIndex sorted, could be in different slot. + // fixme: Create will move to hungry slot + if i, ok := toSlotMap[*txReq.msg.To()]; ok { + slotIndex = i } - log.Debug("dispatchToIdleSlot", "Slot", i, "txIndex", txReq.txIndex) - slot.tailTxReq = txReq - slot.pendingTxReqList = append(slot.pendingTxReqList, txReq) - slot.pendingTxReqChan <- txReq - return true } + + // not found, dispatch to most hungry slot + if slotIndex == -1 { + var workload int = len(p.slotState[0].pendingTxReqList) + slotIndex = 0 + for i, slot := range p.slotState { // can start from index 1 + if len(slot.pendingTxReqList) < workload { + slotIndex = i + workload = len(slot.pendingTxReqList) + } + } + } + // update + fromSlotMap[txReq.msg.From()] = slotIndex + if txReq.msg.To() != nil { + toSlotMap[*txReq.msg.To()] = slotIndex + } + + slot := p.slotState[slotIndex] + slot.pendingTxReqList = append(slot.pendingTxReqList, txReq) } - return false } +// get the most hungry slot + +/* + // + unitsInBatch := make([]*ParallelDispatchUnit, p.parallelNum ) + + slotIndex :=0 + for _, txReqs := range p.txReqAccountSorted { + currentUnit := unitsInBatch[slotIndex] + slotIndex := (slotIndex+1) % p.parallelNum + if currentUnit.txsSize >= maxUnitSize { + // current slot's unit is full, try next slot's unit + continue + } + var unit *ParallelDispatchUnit + for _, txReq := range txReqs { + numUnit := len(p.slotState[slotIndex].txReqUnits) + // create a unit for the first one + if numUnit == 0 { + unit = &ParallelDispatchUnit{ + startTxIndex: txReq.txIndex, + endTxIndex: txReq.txIndex + txIndexSize, + txsSize: 0, + } + unit.txReqs = append(unit.txReqs, txReq) + continue + } + // + unit = p.slotState[slotIndex].txReqUnits[numUnit-1] + // unit is already full + if unit.txsSize >= maxParallelUnitSize { + + } + } + } + // first: move From() to unit + + allUnit = append(allUnit) + } +*/ + // wait until the next Tx is executed and its result is merged to the main stateDB func (p *ParallelStateProcessor) waitUntilNextTxDone(statedb *state.StateDB, gp *GasPool) *ParallelTxResult { var result *ParallelTxResult @@ -625,7 +764,6 @@ func (p *ParallelStateProcessor) waitUntilNextTxDone(statedb *state.StateDB, gp // ok, the tx result is valid and can be merged break } - if err := gp.SubGas(result.receipt.GasUsed); err != nil { log.Error("gas limit reached", "block", result.txReq.block.Number(), "txIndex", result.txReq.txIndex, "GasUsed", result.receipt.GasUsed, "gp.Gas", gp.Gas()) @@ -635,10 +773,6 @@ func (p *ParallelStateProcessor) waitUntilNextTxDone(statedb *state.StateDB, gp resultTxIndex := result.txReq.txIndex resultSlotState := p.slotState[resultSlotIndex] resultSlotState.pendingTxReqList = resultSlotState.pendingTxReqList[1:] - if resultSlotState.tailTxReq.txIndex == resultTxIndex { - log.Debug("ProcessParallel slot is idle", "Slot", resultSlotIndex) - resultSlotState.tailTxReq = nil - } // Slot's mergedChangeList is produced by dispatcher, while consumed by slot. // It is safe, since write and read is in sequential, do write -> notify -> read @@ -657,31 +791,36 @@ func (p *ParallelStateProcessor) waitUntilNextTxDone(statedb *state.StateDB, gp return result } -func (p *ParallelStateProcessor) execInSlot(slotIndex int, txReq *ParallelTxRequest) *ParallelTxResult { - txIndex := txReq.txIndex - tx := txReq.tx +func (p *ParallelStateProcessor) executeInSlot(slotIndex int, txReq *ParallelTxRequest) *ParallelTxResult { slotDB := txReq.slotDB - slotGasLimit := txReq.gasLimit // not accurate, but it is ok for block import. - msg := txReq.msg - block := txReq.block - header := block.Header() - cfg := txReq.vmConfig - bloomProcessor := txReq.bloomProcessor - - blockContext := NewEVMBlockContext(header, p.bc, nil) // can share blockContext within a block for efficiency - vmenv := vm.NewEVM(blockContext, vm.TxContext{}, slotDB, p.config, cfg) + slotDB.Prepare(txReq.tx.Hash(), txReq.block.Hash(), txReq.txIndex) + blockContext := NewEVMBlockContext(txReq.block.Header(), p.bc, nil) // can share blockContext within a block for efficiency + vmenv := vm.NewEVM(blockContext, vm.TxContext{}, slotDB, p.config, txReq.vmConfig) + // gasLimit not accurate, but it is ok for block import. + // each slot would use its own gas pool, and will do gaslimit check later + gpSlot := new(GasPool).AddGas(txReq.gasLimit) - var receipt *types.Receipt - var result *ExecutionResult - var err error - var evm *vm.EVM + evm, result, err := applyTransactionStageExecution(txReq.msg, gpSlot, slotDB, vmenv) + log.Debug("In Slot, Stage Execution done", "Slot", slotIndex, "txIndex", txReq.txIndex, "slotDB.baseTxIndex", slotDB.BaseTxIndex()) - slotDB.Prepare(tx.Hash(), block.Hash(), txIndex) - log.Debug("exec In Slot", "Slot", slotIndex, "txIndex", txIndex, "slotDB.baseTxIndex", slotDB.BaseTxIndex()) + return &ParallelTxResult{ + updateSlotDB: false, + slotIndex: slotIndex, + txReq: txReq, + receipt: nil, // receipt is generated in finalize stage + slotDB: slotDB, + err: err, + gpSlot: gpSlot, + evm: evm, + result: result, + } +} - gpSlot := new(GasPool).AddGas(slotGasLimit) // each slot would use its own gas pool, and will do gaslimit check later - evm, result, err = applyTransactionStageExecution(msg, gpSlot, slotDB, vmenv) - log.Debug("Stage Execution done", "Slot", slotIndex, "txIndex", txIndex, "slotDB.baseTxIndex", slotDB.BaseTxIndex()) +func (p *ParallelStateProcessor) executeInShadowSlot(slotIndex int, txResult *ParallelTxResult) *ParallelTxResult { + txReq := txResult.txReq + txIndex := txReq.txIndex + slotDB := txReq.slotDB + header := txReq.block.Header() // wait until the previous tx is finalized. if txReq.waitTxChan != nil { @@ -689,35 +828,14 @@ func (p *ParallelStateProcessor) execInSlot(slotIndex int, txReq *ParallelTxRequ <-txReq.waitTxChan // close the channel } - // in parallel mode, tx can run into trouble, for example: err="nonce too high" - // in these cases, we will wait and re-run. - if err != nil { - p.debugErrorRedoNum++ - log.Debug("Stage Execution err", "Slot", slotIndex, "txIndex", txIndex, - "current slotDB.baseTxIndex", slotDB.BaseTxIndex(), "err", err) - redoResult := &ParallelTxResult{ - updateSlotDB: true, - slotIndex: slotIndex, - txReq: txReq, - receipt: receipt, - err: err, - } - p.txResultChan <- redoResult - slotDB = <-p.slotState[slotIndex].slotdbChan - slotDB.Prepare(tx.Hash(), block.Hash(), txIndex) - log.Debug("Stage Execution get new slotdb to redo", "Slot", slotIndex, - "txIndex", txIndex, "new slotDB.baseTxIndex", slotDB.BaseTxIndex()) - gpSlot = new(GasPool).AddGas(slotGasLimit) - evm, result, err = applyTransactionStageExecution(msg, gpSlot, slotDB, vmenv) - if err != nil { - log.Error("Stage Execution redo, error", err) - } + if txResult.err != nil { + log.Error("executeInShadowSlot should have no error", "err", txResult.err) } // do conflict detect hasConflict := false systemAddrConflict := false - log.Debug("Stage Execution done, do conflict check", "Slot", slotIndex, "txIndex", txIndex) + log.Debug("Shadow Stage Execution done, do conflict check", "Slot", slotIndex, "txIndex", txIndex) if slotDB.SystemAddressRedo() { hasConflict = true systemAddrConflict = true @@ -726,12 +844,11 @@ func (p *ParallelStateProcessor) execInSlot(slotIndex int, txReq *ParallelTxRequ hasConflict = true } else { for index := 0; index < p.parallelNum; index++ { - if index == slotIndex { - continue - } - + // log.Debug("Shadow conflict check", "Slot", slotIndex, "txIndex", txIndex) // check all finalizedDb from current slot's for _, changeList := range p.slotState[index].mergedChangeList { + // log.Debug("Shadow conflict check", "changeList.TxIndex", changeList.TxIndex, + // "slotDB.BaseTxIndex()", slotDB.BaseTxIndex()) if changeList.TxIndex <= slotDB.BaseTxIndex() { continue } @@ -756,73 +873,93 @@ func (p *ParallelStateProcessor) execInSlot(slotIndex int, txReq *ParallelTxRequ updateSlotDB: true, keepSystem: systemAddrConflict, slotIndex: slotIndex, - txReq: txReq, - receipt: receipt, - err: err, } p.txResultChan <- redoResult - slotDB = <-p.slotState[slotIndex].slotdbChan - slotDB.Prepare(tx.Hash(), block.Hash(), txIndex) - gpSlot = new(GasPool).AddGas(slotGasLimit) - evm, result, err = applyTransactionStageExecution(msg, gpSlot, slotDB, vmenv) - if err != nil { - log.Error("Stage Execution conflict redo, error", err) + updatedSlotDB := <-p.slotState[slotIndex].slotdbChan + updatedSlotDB.Prepare(txReq.tx.Hash(), txReq.block.Hash(), txIndex) + gpSlot := new(GasPool).AddGas(txReq.gasLimit) + + txResult.slotDB = updatedSlotDB + txResult.gpSlot = gpSlot + + blockContext := NewEVMBlockContext(header, p.bc, nil) // can share blockContext within a block for efficiency + vmenv := vm.NewEVM(blockContext, vm.TxContext{}, updatedSlotDB, p.config, txReq.vmConfig) + txResult.evm, txResult.result, txResult.err = applyTransactionStageExecution(txReq.msg, + gpSlot, updatedSlotDB, vmenv) + + if txResult.err != nil { + log.Error("Stage Execution conflict redo, error", txResult.err) } } // goroutine unsafe operation will be handled from here for safety - gasConsumed := slotGasLimit - gpSlot.Gas() - if gasConsumed != result.UsedGas { + gasConsumed := txReq.gasLimit - txResult.gpSlot.Gas() + if gasConsumed != txResult.result.UsedGas { log.Error("gasConsumed != result.UsedGas mismatch", - "gasConsumed", gasConsumed, "result.UsedGas", result.UsedGas) + "gasConsumed", gasConsumed, "result.UsedGas", txResult.result.UsedGas) } - log.Debug("ok to finalize this TX", - "Slot", slotIndex, "txIndex", txIndex, "result.UsedGas", result.UsedGas, "txReq.usedGas", *txReq.usedGas) + log.Debug("ok to finalize this TX", "Slot", slotIndex, "txIndex", txIndex, + "result.UsedGas", txResult.result.UsedGas, "txReq.usedGas", *txReq.usedGas) + // ok, time to do finalize, stage2 should not be parallel - receipt, err = applyTransactionStageFinalization(evm, result, msg, p.config, slotDB, header, tx, txReq.usedGas, bloomProcessor) + txResult.receipt, txResult.err = applyTransactionStageFinalization(txResult.evm, txResult.result, + txReq.msg, p.config, txResult.slotDB, header, + txReq.tx, txReq.usedGas, txReq.bloomProcessor) - if result.Failed() { + if txResult.result.Failed() { // if Tx is reverted, all its state change will be discarded - log.Debug("TX reverted?", "Slot", slotIndex, "txIndex", txIndex, "result.Err", result.Err) - slotDB.RevertSlotDB(msg.From()) + log.Debug("TX reverted?", "Slot", slotIndex, "txIndex", txIndex, + "result.Err", txResult.result.Err) + txResult.slotDB.RevertSlotDB(txReq.msg.From()) } - return &ParallelTxResult{ - updateSlotDB: false, - slotIndex: slotIndex, - txReq: txReq, - receipt: receipt, - slotDB: slotDB, - err: err, - } + txResult.updateSlotDB = false + return txResult } func (p *ParallelStateProcessor) runSlotLoop(slotIndex int) { curSlot := p.slotState[slotIndex] for { // wait for new TxReq - txReq := <-curSlot.pendingTxReqChan + <-curSlot.pendingTxReqChan + // receive a dispatched message - log.Debug("SlotLoop received a new TxReq", "Slot", slotIndex, "txIndex", txReq.txIndex) // SlotDB create rational: // ** for a dispatched tx, // the slot should be idle, it is better to create a new SlotDB, since new Tx is not related to previous Tx // ** for a queued tx, // it is better to create a new SlotDB, since COW is used. - if txReq.slotDB == nil { - result := &ParallelTxResult{ - updateSlotDB: true, - slotIndex: slotIndex, - err: nil, + for _, txReq := range curSlot.pendingTxReqList { + log.Debug("SlotLoop received a new TxReq", "Slot", slotIndex, "txIndex", txReq.txIndex) + if txReq.slotDB == nil { + result := &ParallelTxResult{ + updateSlotDB: true, + slotIndex: slotIndex, + err: nil, + } + p.txResultChan <- result + txReq.slotDB = <-curSlot.slotdbChan } - p.txResultChan <- result - txReq.slotDB = <-curSlot.slotdbChan + result := p.executeInSlot(slotIndex, txReq) + curSlot.pendingConfirmChan <- result } - result := p.execInSlot(slotIndex, txReq) - log.Debug("SlotLoop the TxReq is done", "Slot", slotIndex, "err", result.err) - p.txResultChan <- result + } +} + +func (p *ParallelStateProcessor) runShadowSlotLoop(slotIndex int) { + curSlot := p.slotState[slotIndex] + for { + log.Debug("runShadowSlotLoop wait", "slotIndex", slotIndex) + // ParallelTxResult from pendingConfirmChan is not confirmed yet + unconfirmedResult := <-curSlot.pendingConfirmChan + + log.Debug("runShadowSlotLoop to confirm the TxResult from master slot", "Slot", slotIndex, "txIndex", unconfirmedResult.txReq.txIndex) + confirmedResult := p.executeInShadowSlot(slotIndex, unconfirmedResult) + + log.Debug("runShadowSlotLoop the TxReq is done", "Slot", slotIndex, "err", confirmedResult.err) + p.txResultChan <- confirmedResult } } @@ -834,6 +971,7 @@ func (p *ParallelStateProcessor) resetState(txNum int, statedb *state.StateDB) { p.mergedTxIndex = -1 p.debugErrorRedoNum = 0 p.debugConflictRedoNum = 0 + // p.txReqAccountSorted = make(map[common.Address][]*ParallelTxRequest) // fixme: to be reused? statedb.PrepareForParallel() @@ -847,7 +985,6 @@ func (p *ParallelStateProcessor) resetState(txNum int, statedb *state.StateDB) { }() for _, slot := range p.slotState { - slot.tailTxReq = nil slot.mergedChangeList = make([]state.SlotChangeList, 0) slot.pendingTxReqList = make([]*ParallelTxRequest, 0) } @@ -872,6 +1009,7 @@ func (p *ParallelStateProcessor) Process(block *types.Block, statedb *state.Stat signer, _, bloomProcessor := p.preExecute(block, statedb, cfg, true) var waitTxChan, curTxChan chan struct{} + var txReqs []*ParallelTxRequest for i, tx := range block.Transactions() { if isPoSA { if isSystemTx, err := posa.IsSystemTransaction(tx, block.Header()); err != nil { @@ -883,7 +1021,10 @@ func (p *ParallelStateProcessor) Process(block *types.Block, statedb *state.Stat } // can be moved it into slot for efficiency, but signer is not concurrent safe - msg, err := tx.AsMessage(signer) + // Parallel Execution 1.0&2.0 is for full sync mode, Nonce PreCheck is not necessary + // And since we will do out-of-order execution, the Nonce PreCheck could fail. + // We will disable it and leave it to Parallel 3.0 which is for validator mode + msg, err := tx.AsParallelMessage(signer) if err != nil { return statedb, nil, nil, 0, err } @@ -905,45 +1046,32 @@ func (p *ParallelStateProcessor) Process(block *types.Block, statedb *state.Stat waitTxChan: waitTxChan, curTxChan: curTxChan, } - - // to optimize the for { for {} } loop code style? it is ok right now. - for { - if p.queueSameFromAddress(txReq) { - break - } - - if p.queueSameToAddress(txReq) { - break - } - // if idle slot available, just dispatch and process next tx. - if p.dispatchToIdleSlot(statedb, txReq) { - break - } - log.Debug("ProcessParallel no slot available, wait", "txIndex", txReq.txIndex) - // no idle slot, wait until a tx is executed and merged. - result := p.waitUntilNextTxDone(statedb, gp) - - // update tx result - if result.err != nil { - log.Warn("ProcessParallel a failed tx", "resultSlotIndex", result.slotIndex, - "resultTxIndex", result.txReq.txIndex, "result.err", result.err) - return statedb, nil, nil, 0, fmt.Errorf("could not apply tx %d [%v]: %w", result.txReq.txIndex, result.txReq.tx.Hash().Hex(), result.err) - } - - commonTxs = append(commonTxs, result.txReq.tx) - receipts = append(receipts, result.receipt) - } + txReqs = append(txReqs, txReq) + // from := txReq.msg.From() + // p.txReqAccountSorted[from] = append(p.txReqAccountSorted[from], txReq) + // Generate TxReqUnit every 80() transaction? + // if (i + 1) % *(p.parallelNum *10) == 0 { + // p.txReqAccountSorted = make(map[common.Address][]*ParallelTxRequest) // fixme: memory reuse? + // } } + p.doStaticDispatch(statedb, txReqs) + for _, slot := range p.slotState { + slot.pendingTxReqChan <- struct{}{} + } + for { + if len(commonTxs)+len(systemTxs) == txNum { + break + } - // wait until all tx request are done - for len(commonTxs)+len(systemTxs) < txNum { result := p.waitUntilNextTxDone(statedb, gp) + // update tx result if result.err != nil { log.Warn("ProcessParallel a failed tx", "resultSlotIndex", result.slotIndex, "resultTxIndex", result.txReq.txIndex, "result.err", result.err) return statedb, nil, nil, 0, fmt.Errorf("could not apply tx %d [%v]: %w", result.txReq.txIndex, result.txReq.tx.Hash().Hex(), result.err) } + commonTxs = append(commonTxs, result.txReq.tx) receipts = append(receipts, result.receipt) } diff --git a/core/types/transaction.go b/core/types/transaction.go index 74c011544b..9a6bebc42e 100644 --- a/core/types/transaction.go +++ b/core/types/transaction.go @@ -535,6 +535,14 @@ func (tx *Transaction) AsMessage(s Signer) (Message, error) { return msg, err } +// Parallel 1.0&2.0 will skip nonce check, since it is not necessary for sync mode. +// Parallel 3.0 will reenable it, nonce check for parallel execution will be designed. +func (tx *Transaction) AsParallelMessage(s Signer) (Message, error) { + msg, err := tx.AsMessage(s) + msg.checkNonce = false + return msg, err +} + func (m Message) From() common.Address { return m.from } func (m Message) To() *common.Address { return m.to } func (m Message) GasPrice() *big.Int { return m.gasPrice } diff --git a/core/vm/evm.go b/core/vm/evm.go index c7c8e0596c..1970f97910 100644 --- a/core/vm/evm.go +++ b/core/vm/evm.go @@ -475,7 +475,7 @@ func (evm *EVM) create(caller ContractRef, codeAndHash *codeAndHash, gas uint64, } nonce := evm.StateDB.GetNonce(caller.Address()) evm.StateDB.SetNonce(caller.Address(), nonce+1) - evm.StateDB.NonceChanged(caller.Address()) + evm.StateDB.NonceChanged(caller.Address()) // fixme: nonce double -- // We add this to the access list _before_ taking a snapshot. Even if the creation fails, // the access-list change should not be rolled back if evm.chainRules.IsBerlin { diff --git a/core/vm/interface.go b/core/vm/interface.go index c3d99aaa76..f424ed9cb9 100644 --- a/core/vm/interface.go +++ b/core/vm/interface.go @@ -75,6 +75,8 @@ type StateDB interface { AddPreimage(common.Hash, []byte) ForEachStorage(common.Address, func(common.Hash, common.Hash) bool) error + + IsSlotDB() bool } // CallContext provides a basic interface for the EVM calling conventions. The EVM From f02c988a5cd23134a8fea2ede847093bd554f4e0 Mon Sep 17 00:00:00 2001 From: setunapo Date: Tue, 29 Mar 2022 22:36:23 +0800 Subject: [PATCH 03/10] WIP: implement unconfirmed db reference ** to reduce conflict rate ** light copy ** conflict detect --- core/blockchain.go | 19 +- core/state/journal.go | 8 +- core/state/state_object.go | 45 +- core/state/statedb.go | 1199 ++++++++++++++++++++++++++++-------- core/state_processor.go | 213 ++----- core/types/transaction.go | 4 +- core/vm/evm.go | 1 - core/vm/interface.go | 3 - 8 files changed, 1059 insertions(+), 433 deletions(-) diff --git a/core/blockchain.go b/core/blockchain.go index 9ed4317b13..417f3a0037 100644 --- a/core/blockchain.go +++ b/core/blockchain.go @@ -2113,14 +2113,17 @@ func (bc *BlockChain) insertChain(chain types.Blocks, verifySeals bool) (int, er var followupInterrupt uint32 // For diff sync, it may fallback to full sync, so we still do prefetch // parallel mode has a pipeline, similar to this prefetch, to save CPU we disable this prefetch for parallel - if !bc.parallelExecution { - if len(block.Transactions()) >= prefetchTxNumber { - throwaway := statedb.Copy() - go func(start time.Time, followup *types.Block, throwaway *state.StateDB, interrupt *uint32) { - bc.prefetcher.Prefetch(followup, throwaway, bc.vmConfig, &followupInterrupt) - }(time.Now(), block, throwaway, &followupInterrupt) - } - } + /* + // disable prefetch for parallel bugfix + if !bc.parallelExecution { + if len(block.Transactions()) >= prefetchTxNumber { + throwaway := statedb.Copy() + go func(start time.Time, followup *types.Block, throwaway *state.StateDB, interrupt *uint32) { + bc.prefetcher.Prefetch(followup, throwaway, bc.vmConfig, &followupInterrupt) + }(time.Now(), block, throwaway, &followupInterrupt) + } + } + */ //Process block using the parent state as reference point substart := time.Now() if bc.pipeCommit { diff --git a/core/state/journal.go b/core/state/journal.go index b3a2956f75..5afe8886bb 100644 --- a/core/state/journal.go +++ b/core/state/journal.go @@ -155,7 +155,13 @@ func (ch createObjectChange) dirtied() *common.Address { } func (ch resetObjectChange) revert(s *StateDB) { - s.SetStateObject(ch.prev) + if s.parallel.isSlotDB { + // ch.prev must be from dirtiedStateObjectsInSlot, put it back + s.parallel.dirtiedStateObjectsInSlot[ch.prev.address] = ch.prev + } else { + // ch.prev was got from main DB, put it back to main DB. + s.SetStateObject(ch.prev) + } if !ch.prevdestruct && s.snap != nil { delete(s.snapDestructs, ch.prev.address) } diff --git a/core/state/state_object.go b/core/state/state_object.go index ce8926609a..7adb5bdbe6 100644 --- a/core/state/state_object.go +++ b/core/state/state_object.go @@ -197,7 +197,7 @@ type Account struct { // newObject creates a state object. func newObject(db *StateDB, isParallel bool, address common.Address, data Account) *StateObject { if data.Balance == nil { - data.Balance = new(big.Int) + data.Balance = new(big.Int) // todo: why not common.Big0? } if data.CodeHash == nil { data.CodeHash = emptyCodeHash @@ -284,6 +284,7 @@ func (s *StateObject) GetState(db Database, key common.Hash) common.Hash { if dirty { return value } + // Otherwise return the entry's original value return s.GetCommittedState(db, key) } @@ -353,9 +354,12 @@ func (s *StateObject) GetCommittedState(db Database, key common.Hash) common.Has // 1) resurrect happened, and new slot values were set -- those should // have been handles via pendingStorage above. // 2) we don't have new values, and can deliver empty response back - if _, destructed := s.db.snapDestructs[s.address]; destructed { + s.db.snapParallelLock.RLock() + if _, destructed := s.db.snapDestructs[s.address]; destructed { // fixme: use sync.Map, instead of RWMutex? + s.db.snapParallelLock.RUnlock() return common.Hash{} } + s.db.snapParallelLock.RUnlock() enc, err = s.db.snap.Storage(s.addrHash, crypto.Keccak256Hash(key.Bytes())) } // If snapshot unavailable or reading from it failed, load from the database @@ -394,7 +398,14 @@ func (s *StateObject) SetState(db Database, key, value common.Hash) { return } // If the new value is the same as old, don't set - prev := s.GetState(db, key) + // In parallel mode, it has to get from StateDB, in case: + // a.the Slot did not set the key before and try to set it to `val_1` + // b.Unconfirmed DB has set the key to `val_2` + // c.if we use StateObject.GetState, and the key load from the main DB is `val_1` + // this `SetState could be skipped` + // d.Finally, the key's value will be `val_2`, while it should be `val_1` + // such as: https://bscscan.com/txs?block=2491181 + prev := s.db.GetState(s.address, key) if prev == value { return } @@ -404,6 +415,10 @@ func (s *StateObject) SetState(db Database, key, value common.Hash) { key: key, prevalue: prev, }) + if s.db.parallel.isSlotDB { + s.db.parallel.kvChangesInSlot[s.address][key] = struct{}{} // should be moved to here, after `s.db.GetState()` + } + s.setState(key, value) } @@ -484,7 +499,7 @@ func (s *StateObject) updateTrie(db Database) Trie { return true } - s.originStorage.StoreValue(k.(common.Hash), v.(common.Hash)) + s.setOriginStorage(key, value) var vs []byte if (value == common.Hash{}) { @@ -602,6 +617,19 @@ func (s *StateObject) setBalance(amount *big.Int) { // Return the gas back to the origin. Used by the Virtual machine or Closures func (s *StateObject) ReturnGas(gas *big.Int) {} +func (s *StateObject) lightCopy(db *StateDB) *StateObject { + stateObject := newObject(db, s.isParallel, s.address, s.data) + if s.trie != nil { + // fixme: no need to copy trie for light copy, since light copied object won't access trie DB + stateObject.trie = db.db.CopyTrie(s.trie) + } + stateObject.code = s.code + stateObject.suicided = false // should be false + stateObject.dirtyCode = s.dirtyCode // it is not used in slot, but keep it is ok + stateObject.deleted = false // should be false + return stateObject +} + func (s *StateObject) deepCopy(db *StateDB) *StateObject { stateObject := newObject(db, s.isParallel, s.address, s.data) if s.trie != nil { @@ -619,9 +647,12 @@ func (s *StateObject) deepCopy(db *StateDB) *StateObject { func (s *StateObject) MergeSlotObject(db Database, dirtyObjs *StateObject, keys StateKeys) { for key := range keys { - // better to do s.GetState(db, key) to load originStorage for this key? - // since originStorage was in dirtyObjs, but it works even originStorage miss the state object. - s.SetState(db, key, dirtyObjs.GetState(db, key)) + // In parallel mode, always GetState by StateDB, not by StateObject directly, + // since it the KV could exist in unconfirmed DB. + // But here, it should be ok, since the KV should be changed and valid in the SlotDB, + // we still do GetState by StateDB, it is not an issue. + val := dirtyObjs.db.GetState(s.address, key) + s.SetState(db, key, val) } } diff --git a/core/state/statedb.go b/core/state/statedb.go index a38dca5aef..f46e6b3556 100644 --- a/core/state/statedb.go +++ b/core/state/statedb.go @@ -18,6 +18,7 @@ package state import ( + "bytes" "errors" "fmt" "math/big" @@ -94,6 +95,13 @@ func (s *StateDB) loadStateObj(addr common.Address) (*StateObject, bool) { // storeStateObj is the entry for storing state object to stateObjects in StateDB or stateObjects in parallel func (s *StateDB) storeStateObj(addr common.Address, stateObject *StateObject) { if s.isParallel { + // When a state object is stored into s.parallel.stateObjects, + // it belongs to base StateDB, it is confirmed and valid. + if s.parallel.isSlotDB { + // the object could be create in SlotDB, if it got the object from DB and + // update it to the shared `s.parallel.stateObjects`` + stateObject.db = s.parallel.baseStateDB + } s.parallel.stateObjects.Store(addr, stateObject) } else { s.stateObjects[addr] = stateObject @@ -109,21 +117,10 @@ func (s *StateDB) deleteStateObj(addr common.Address) { } } -// For parallel mode only, keep the change list for later conflict detect -type SlotChangeList struct { - TxIndex int - StateObjectSuicided map[common.Address]struct{} - StateChangeSet map[common.Address]StateKeys - BalanceChangeSet map[common.Address]struct{} - CodeChangeSet map[common.Address]struct{} - AddrStateChangeSet map[common.Address]struct{} - NonceChangeSet map[common.Address]struct{} -} - // For parallel mode only type ParallelState struct { - isSlotDB bool // isSlotDB denotes StateDB is used in slot - + isSlotDB bool // isSlotDB denotes StateDB is used in slot + SlotIndex int // fixme: to be removed // stateObjects holds the state objects in the base slot db // the reason for using stateObjects instead of stateObjects on the outside is // we need a thread safe map to hold state objects since there are many slots will read @@ -131,28 +128,37 @@ type ParallelState struct { // And we will merge all the changes made by the concurrent slot into it. stateObjects *StateObjectSyncMap - baseTxIndex int // slotDB is created base on this tx index. + baseStateDB *StateDB // for parallel mode, there will be a base StateDB in dispatcher routine. + baseTxIndex int // slotDB is created base on this tx index. dirtiedStateObjectsInSlot map[common.Address]*StateObject - // for conflict check + unconfirmedDBInShot map[int]*StateDB // do unconfirmed reference in same slot. + + // we will record the read detail for conflict check and + // the changed addr or key for object merge, the changed detail can be acheived from the dirty object + nonceChangesInSlot map[common.Address]struct{} + nonceReadsInSlot map[common.Address]uint64 balanceChangesInSlot map[common.Address]struct{} // the address's balance has been changed - balanceReadsInSlot map[common.Address]struct{} // the address's balance has been read and used. - codeReadsInSlot map[common.Address]struct{} - codeChangesInSlot map[common.Address]struct{} - stateReadsInSlot map[common.Address]StateKeys - stateChangesInSlot map[common.Address]StateKeys // no need record value + balanceReadsInSlot map[common.Address]*big.Int // the address's balance has been read and used. + // codeSize can be derived based on code, but codeHash can not directly derived based on code + // - codeSize is 0 for address not exist or empty code + // - codeHash is `common.Hash{}` for address not exist, emptyCodeHash(`Keccak256Hash(nil)`) for empty code + // so we use codeReadsInSlot & codeHashReadsInSlot to keep code and codeHash, codeSize is derived from code + codeReadsInSlot map[common.Address][]byte // empty if address not exist or no code in this address + codeHashReadsInSlot map[common.Address]common.Hash + codeChangesInSlot map[common.Address]struct{} + kvReadsInSlot map[common.Address]Storage + kvChangesInSlot map[common.Address]StateKeys // value will be kept in dirtiedStateObjectsInSlot // Actions such as SetCode, Suicide will change address's state. // Later call like Exist(), Empty(), HasSuicided() depend on the address's state. - addrStateReadsInSlot map[common.Address]struct{} - addrStateChangesInSlot map[common.Address]struct{} - stateObjectsSuicidedInSlot map[common.Address]struct{} - nonceChangesInSlot map[common.Address]struct{} + addrStateReadsInSlot map[common.Address]bool // true: exist, false: not exist or deleted + addrStateChangesInSlot map[common.Address]bool // true: created, false: deleted + // Transaction will pay gas fee to system address. // Parallel execution will clear system address's balance at first, in order to maintain transaction's // gas fee value. Normal transaction will access system address twice, otherwise it means the transaction // needs real system address's balance, the transaction will be marked redo with keepSystemAddressBalance = true systemAddress common.Address systemAddressOpsCount int - nonceIncreased uint64 // create contract keepSystemAddressBalance bool // we may need to redo for some specific reasons, like we read the wrong state and need to panic in sequential mode in SubRefund @@ -181,12 +187,13 @@ type StateDB struct { fullProcessed bool pipeCommit bool - snapMux sync.Mutex - snaps *snapshot.Tree - snap snapshot.Snapshot - snapDestructs map[common.Address]struct{} - snapAccounts map[common.Address][]byte - snapStorage map[common.Address]map[string][]byte + snapMux sync.Mutex + snaps *snapshot.Tree + snap snapshot.Snapshot + snapParallelLock sync.RWMutex // for parallel mode, for main StateDB, slot will read snapshot, while processor will write. + snapDestructs map[common.Address]struct{} + snapAccounts map[common.Address][]byte + snapStorage map[common.Address]map[string][]byte // This map holds 'live' objects, which will get modified while processing a state transition. stateObjects map[common.Address]*StateObject @@ -246,13 +253,24 @@ func New(root common.Hash, db Database, snaps *snapshot.Tree) (*StateDB, error) // NewSlotDB creates a new State DB based on the provided StateDB. // With parallel, each execution slot would have its own StateDB. -func NewSlotDB(db *StateDB, systemAddr common.Address, baseTxIndex int, keepSystem bool) *StateDB { +func NewSlotDB(db *StateDB, systemAddr common.Address, txIndex int, baseTxIndex int, keepSystem bool, + unconfirmedDBs *sync.Map /*map[int]*StateDB*/) *StateDB { slotDB := db.CopyForSlot() + slotDB.txIndex = txIndex slotDB.originalRoot = db.originalRoot + slotDB.parallel.baseStateDB = db slotDB.parallel.baseTxIndex = baseTxIndex slotDB.parallel.systemAddress = systemAddr slotDB.parallel.systemAddressOpsCount = 0 slotDB.parallel.keepSystemAddressBalance = keepSystem + slotDB.storagePool = NewStoragePool() + slotDB.EnableWriteOnSharedStorage() + for index := baseTxIndex + 1; index < slotDB.txIndex; index++ { // txIndex + unconfirmedDB, ok := unconfirmedDBs.Load(index) + if ok { + slotDB.parallel.unconfirmedDBInShot[index] = unconfirmedDB.(*StateDB) + } + } // All transactions will pay gas fee to the systemAddr at the end, this address is // deemed to conflict, we handle it specially, clear it now and set it back to the main @@ -279,13 +297,16 @@ func NewWithSharedPool(root common.Hash, db Database, snaps *snapshot.Tree) (*St func newStateDB(root common.Hash, db Database, snaps *snapshot.Tree) (*StateDB, error) { sdb := &StateDB{ - db: db, - originalRoot: root, - snaps: snaps, - stateObjects: make(map[common.Address]*StateObject, defaultNumOfSlots), - parallel: ParallelState{}, + db: db, + originalRoot: root, + snaps: snaps, + stateObjects: make(map[common.Address]*StateObject, defaultNumOfSlots), + parallel: ParallelState{ + SlotIndex: -1, + }, stateObjectsPending: make(map[common.Address]struct{}, defaultNumOfSlots), stateObjectsDirty: make(map[common.Address]struct{}, defaultNumOfSlots), + txIndex: -1, logs: make(map[common.Hash][]*types.Log, defaultNumOfSlots), preimages: make(map[common.Hash][]byte), journal: newJournal(), @@ -306,28 +327,36 @@ func newStateDB(root common.Hash, db Database, snaps *snapshot.Tree) (*StateDB, return nil, err } sdb.trie = tr + sdb.EnableWriteOnSharedStorage() // fixme:remove when s.originStorage[key] is enabled return sdb, nil } func (s *StateDB) getStateObjectFromStateObjects(addr common.Address) (*StateObject, bool) { - if s.parallel.isSlotDB { - obj, ok := s.parallel.dirtiedStateObjectsInSlot[addr] - if ok { - return obj, ok - } - } return s.loadStateObj(addr) } -// RevertSlotDB keep its read list for conflict detect and discard its state changes except its own balance change, -// if the transaction execution is reverted, +// RevertSlotDB keep the Read list for conflict detect, +// discard all state changes except: +// - nonce and balance of from address +// - balance of system address: will be used on merge to update SystemAddress's balance func (s *StateDB) RevertSlotDB(from common.Address) { - s.parallel.stateObjectsSuicidedInSlot = make(map[common.Address]struct{}) - s.parallel.stateChangesInSlot = make(map[common.Address]StateKeys) + s.parallel.kvChangesInSlot = make(map[common.Address]StateKeys) + + // balance := s.parallel.balanceChangesInSlot[from] + s.parallel.nonceChangesInSlot = make(map[common.Address]struct{}) s.parallel.balanceChangesInSlot = make(map[common.Address]struct{}, 1) + s.parallel.addrStateChangesInSlot = make(map[common.Address]bool) // 0: created, 1: deleted + + selfStateObject := s.parallel.dirtiedStateObjectsInSlot[from] + systemAddress := s.parallel.systemAddress + systemStateObject := s.parallel.dirtiedStateObjectsInSlot[systemAddress] + s.parallel.dirtiedStateObjectsInSlot = make(map[common.Address]*StateObject, 2) + // keep these elements + s.parallel.dirtiedStateObjectsInSlot[from] = selfStateObject + s.parallel.dirtiedStateObjectsInSlot[systemAddress] = systemStateObject s.parallel.balanceChangesInSlot[from] = struct{}{} - s.parallel.addrStateChangesInSlot = make(map[common.Address]struct{}) - s.parallel.nonceChangesInSlot = make(map[common.Address]struct{}) + s.parallel.balanceChangesInSlot[systemAddress] = struct{}{} + s.parallel.nonceChangesInSlot[from] = struct{}{} } // PrepareForParallel prepares for state db to be used in parallel execution mode. @@ -340,7 +369,7 @@ func (s *StateDB) PrepareForParallel() { // finalized(dirty -> pending) on execution slot, the execution results should be // merged back to the main StateDB. // And it will return and keep the slot's change list for later conflict detect. -func (s *StateDB) MergeSlotDB(slotDb *StateDB, slotReceipt *types.Receipt, txIndex int) SlotChangeList { +func (s *StateDB) MergeSlotDB(slotDb *StateDB, slotReceipt *types.Receipt, txIndex int) { // receipt.Logs use unified log index within a block // align slotDB's log index to the block stateDB's logSize for _, l := range slotReceipt.Logs { @@ -368,25 +397,35 @@ func (s *StateDB) MergeSlotDB(slotDb *StateDB, slotReceipt *types.Receipt, txInd } // stateObjects: KV, balance, nonce... - dirtyObj, ok := slotDb.getStateObjectFromStateObjects(addr) + dirtyObj, ok := slotDb.parallel.dirtiedStateObjectsInSlot[addr] if !ok { - log.Error("parallel merge, but dirty object not exist!", "txIndex:", slotDb.txIndex, "addr", addr) + log.Error("parallel merge, but dirty object not exist!", "SlotIndex", slotDb.parallel.SlotIndex, "txIndex:", slotDb.txIndex, "addr", addr) continue } mainObj, exist := s.loadStateObj(addr) - if !exist { + if !exist { // fixme: it is also state change // addr not exist on main DB, do ownership transfer - dirtyObj.db = s - dirtyObj.finalise(true) // true: prefetch on dispatcher - s.storeStateObj(addr, dirtyObj) - delete(slotDb.parallel.dirtiedStateObjectsInSlot, addr) // transfer ownership + // dirtyObj.db = s + // dirtyObj.finalise(true) // true: prefetch on dispatcher + mainObj = dirtyObj.deepCopy(s) + mainObj.finalise(true) + s.storeStateObj(addr, mainObj) + // fixme: should not delete, would cause unconfirmed DB incorrect? + // delete(slotDb.parallel.dirtiedStateObjectsInSlot, addr) // transfer ownership, fixme: shared read? + if dirtyObj.deleted { + // remove the addr from snapAccounts&snapStorage only when object is deleted. + // "deleted" is not equal to "snapDestructs", since createObject() will add an addr for + // snapDestructs to destroy previous object, while it will keep the addr in snapAccounts & snapAccounts + delete(s.snapAccounts, addr) + delete(s.snapStorage, addr) + } } else { // addr already in main DB, do merge: balance, KV, code, State(create, suicide) // can not do copy or ownership transfer directly, since dirtyObj could have outdated // data(may be updated within the conflict window) var newMainObj = mainObj // we don't need to copy the object since the storages are thread safe - if _, created := slotDb.parallel.addrStateChangesInSlot[addr]; created { + if _, ok := slotDb.parallel.addrStateChangesInSlot[addr]; ok { // there are 3 kinds of state change: // 1.Suicide // 2.Empty Delete @@ -394,10 +433,11 @@ func (s *StateDB) MergeSlotDB(slotDb *StateDB, slotReceipt *types.Receipt, txInd // a.AddBalance,SetState to an unexist or deleted(suicide, empty delete) address. // b.CreateAccount: like DAO the fork, regenerate a account carry its balance without KV // For these state change, do ownership transafer for efficiency: - log.Debug("MergeSlotDB state object merge: addr state change") - dirtyObj.db = s - newMainObj = dirtyObj - delete(slotDb.parallel.dirtiedStateObjectsInSlot, addr) // transfer ownership + // dirtyObj.db = s + // newMainObj = dirtyObj + newMainObj = dirtyObj.deepCopy(s) + // should not delete, would cause unconfirmed DB incorrect. + // delete(slotDb.parallel.dirtiedStateObjectsInSlot, addr) // transfer ownership, fixme: shared read? if dirtyObj.deleted { // remove the addr from snapAccounts&snapStorage only when object is deleted. // "deleted" is not equal to "snapDestructs", since createObject() will add an addr for @@ -409,24 +449,22 @@ func (s *StateDB) MergeSlotDB(slotDb *StateDB, slotReceipt *types.Receipt, txInd // deepCopy a temporary *StateObject for safety, since slot could read the address, // dispatch should avoid overwrite the StateObject directly otherwise, it could // crash for: concurrent map iteration and map write + if _, balanced := slotDb.parallel.balanceChangesInSlot[addr]; balanced { - log.Debug("merge state object: Balance", - "newMainObj.Balance()", newMainObj.Balance(), - "dirtyObj.Balance()", dirtyObj.Balance()) newMainObj.SetBalance(dirtyObj.Balance()) } if _, coded := slotDb.parallel.codeChangesInSlot[addr]; coded { - log.Debug("merge state object: Code") newMainObj.code = dirtyObj.code newMainObj.data.CodeHash = dirtyObj.data.CodeHash newMainObj.dirtyCode = true } - if keys, stated := slotDb.parallel.stateChangesInSlot[addr]; stated { - log.Debug("merge state object: KV") + if keys, stated := slotDb.parallel.kvChangesInSlot[addr]; stated { newMainObj.MergeSlotObject(s.db, dirtyObj, keys) } - // dirtyObj.Nonce() should not be less than newMainObj - newMainObj.setNonce(dirtyObj.Nonce()) + if _, nonced := slotDb.parallel.nonceChangesInSlot[addr]; nonced { + // dirtyObj.Nonce() should not be less than newMainObj + newMainObj.setNonce(dirtyObj.Nonce()) + } } newMainObj.finalise(true) // true: prefetch on dispatcher // update the object @@ -461,7 +499,9 @@ func (s *StateDB) MergeSlotDB(slotDb *StateDB, slotReceipt *types.Receipt, txInd // One transaction add balance 0 to an empty address, will delete it(delete empty is enabled). // While another concurrent transaction could add a none-zero balance to it, make it not empty // We fixed it by add a addr state read record for add balance 0 + s.snapParallelLock.Lock() s.snapDestructs[k] = struct{}{} + s.snapParallelLock.Unlock() } // slotDb.snapAccounts should be empty, comment out and to be deleted later @@ -477,24 +517,14 @@ func (s *StateDB) MergeSlotDB(slotDb *StateDB, slotReceipt *types.Receipt, txInd // s.snapStorage[k] = temp // } } - - // to create a new object to store change list for conflict detect, - // since slot db reuse is disabled, we do not need to do copy. - changeList := SlotChangeList{ - TxIndex: txIndex, - StateObjectSuicided: slotDb.parallel.stateObjectsSuicidedInSlot, - StateChangeSet: slotDb.parallel.stateChangesInSlot, - BalanceChangeSet: slotDb.parallel.balanceChangesInSlot, - CodeChangeSet: slotDb.parallel.codeChangesInSlot, - AddrStateChangeSet: slotDb.parallel.addrStateChangesInSlot, - NonceChangeSet: slotDb.parallel.nonceChangesInSlot, - } - return changeList } func (s *StateDB) EnableWriteOnSharedStorage() { s.writeOnSharedStorage = true } +func (s *StateDB) SetSlotIndex(index int) { + s.parallel.SlotIndex = index +} // StartPrefetcher initializes a new trie prefetcher to pull in nodes from the // state trie concurrently while the state is mutated so that when we reach the @@ -644,40 +674,367 @@ func (s *StateDB) SubRefund(gas uint64) { s.refund -= gas } +// For Parallel Execution Mode, it can be seen as Penetrated Access: +// ------------------------------------------------------- +// | BaseTxIndex | Unconfirmed Txs... | Current TxIndex | +// ------------------------------------------------------- +// Access from the unconfirmed DB with range&priority: txIndex -1(previous tx) -> baseTxIndex + 1 +func (s *StateDB) getBalanceFromUnconfirmedDB(addr common.Address) *big.Int { + if addr == s.parallel.systemAddress { + // never get systemaddress from unconfirmed DB + return nil + } + + for i := s.txIndex - 1; i > s.parallel.baseTxIndex; i-- { + if db, ok := s.parallel.unconfirmedDBInShot[i]; ok { + // 1.Refer the state of address, exist or not in dirtiedStateObjectsInSlot + if obj, exist := db.parallel.dirtiedStateObjectsInSlot[addr]; exist { + balanceHit := false + if _, exist := db.parallel.addrStateChangesInSlot[addr]; exist { + balanceHit = true + } + if _, exist := db.parallel.balanceChangesInSlot[addr]; exist { // only changed balance is reliable + balanceHit = true + } + if !balanceHit { + continue + } + balance := obj.Balance() + if obj.deleted { + balance = common.Big0 + } + return balance + } + } + } + return nil +} + +// Similar to getBalanceFromUnconfirmedDB +func (s *StateDB) getNonceFromUnconfirmedDB(addr common.Address) (uint64, bool) { + if addr == s.parallel.systemAddress { + // never get systemaddress from unconfirmed DB + return 0, false + } + + for i := s.txIndex - 1; i > s.parallel.baseTxIndex; i-- { + if unconfirmedDb, ok := s.parallel.unconfirmedDBInShot[i]; ok { + nonceHit := false + if _, ok := unconfirmedDb.parallel.addrStateChangesInSlot[addr]; ok { + nonceHit = true + } else if _, ok := unconfirmedDb.parallel.nonceChangesInSlot[addr]; ok { + nonceHit = true + } + if !nonceHit { + // nonce refer not hit, try next unconfirmedDb + continue + } + // nonce hit, return the nonce + obj := unconfirmedDb.parallel.dirtiedStateObjectsInSlot[addr] + if obj == nil { + // could not exist, if it is changed but reverted + // fixme: revert should remove the change record + log.Debug("Get nonce from UnconfirmedDB, changed but object not exist, ", + "txIndex", s.txIndex, "referred txIndex", i, "addr", addr) + continue + } + nonce := obj.Nonce() + // deleted object with nonce == 0 + if obj.deleted { + nonce = 0 + } + return nonce, true + } + } + return 0, false +} + +// Similar to getBalanceFromUnconfirmedDB +// It is not only for code, but also codeHash and codeSize, we return the *StateObject for convienence. +func (s *StateDB) getCodeFromUnconfirmedDB(addr common.Address) ([]byte, bool) { + if addr == s.parallel.systemAddress { + // never get systemaddress from unconfirmed DB + return nil, false + } + + for i := s.txIndex - 1; i > s.parallel.baseTxIndex; i-- { + if db, ok := s.parallel.unconfirmedDBInShot[i]; ok { + codeHit := false + if _, exist := db.parallel.addrStateChangesInSlot[addr]; exist { + codeHit = true + } + if _, exist := db.parallel.codeChangesInSlot[addr]; exist { + codeHit = true + } + if !codeHit { + // try next unconfirmedDb + continue + } + obj := db.parallel.dirtiedStateObjectsInSlot[addr] + if obj == nil { + // could not exist, if it is changed but reverted + // fixme: revert should remove the change record + log.Debug("Get code from UnconfirmedDB, changed but object not exist, ", + "txIndex", s.txIndex, "referred txIndex", i, "addr", addr) + continue + } + code := obj.Code(s.db) + if obj.deleted { + code = nil + } + return code, true + } + } + return nil, false +} + +// Similar to getCodeFromUnconfirmedDB +// but differ when address is deleted or not exist +func (s *StateDB) getCodeHashFromUnconfirmedDB(addr common.Address) (common.Hash, bool) { + if addr == s.parallel.systemAddress { + // never get systemaddress from unconfirmed DB + return common.Hash{}, false + } + + for i := s.txIndex - 1; i > s.parallel.baseTxIndex; i-- { + if db, ok := s.parallel.unconfirmedDBInShot[i]; ok { + hashHit := false + if _, exist := db.parallel.addrStateChangesInSlot[addr]; exist { + hashHit = true + } + if _, exist := db.parallel.codeChangesInSlot[addr]; exist { + hashHit = true + } + if !hashHit { + // try next unconfirmedDb + continue + } + + obj := db.parallel.dirtiedStateObjectsInSlot[addr] + if obj == nil { + // could not exist, if it is changed but reverted + // fixme: revert should remove the change record + log.Debug("Get codeHash from UnconfirmedDB, changed but object not exist, ", + "txIndex", s.txIndex, "referred txIndex", i, "addr", addr) + continue + } + codeHash := common.Hash{} + if !obj.deleted { + codeHash = common.BytesToHash(obj.CodeHash()) + } + return codeHash, true + } + } + return common.Hash{}, false +} + +// Similar to getCodeFromUnconfirmedDB +// It is for address state check of: Exist(), Empty() and HasSuicided() +// Since the unconfirmed DB should have done Finalise() with `deleteEmptyObjects = true` +// If the dirty address is empty or suicided, it will be marked as deleted, so we only need to return `deleted` or not. +func (s *StateDB) getAddrStateFromUnconfirmedDB(addr common.Address) (deleted bool, exist bool) { + if addr == s.parallel.systemAddress { + // never get systemaddress from unconfirmed DB + return false, false + } + + // check the unconfirmed DB with range: baseTxIndex -> txIndex -1(previous tx) + for i := s.txIndex - 1; i > s.parallel.baseTxIndex; i-- { + if db, ok := s.parallel.unconfirmedDBInShot[i]; ok { + if exist, ok := db.parallel.addrStateChangesInSlot[addr]; ok { + if _, ok := db.parallel.dirtiedStateObjectsInSlot[addr]; !ok { + // could not exist, if it is changed but reverted + // fixme: revert should remove the change record + log.Debug("Get addr State from UnconfirmedDB, changed but object not exist, ", + "txIndex", s.txIndex, "referred txIndex", i, "addr", addr) + continue + } + + return exist, true + } + } + } + return false, false +} + +func (s *StateDB) getKVFromUnconfirmedDB(addr common.Address, key common.Hash) (common.Hash, bool) { + // check the unconfirmed DB with range: baseTxIndex -> txIndex -1(previous tx) + for i := s.txIndex - 1; i > s.parallel.baseTxIndex; i-- { + if db, ok := s.parallel.unconfirmedDBInShot[i]; ok { + if obj, ok := db.parallel.dirtiedStateObjectsInSlot[addr]; ok { // if deleted on merge, can get from main StateDB, ok but fixme: concurrent safe + if obj.deleted { + return common.Hash{}, true + } + if _, ok := db.parallel.kvChangesInSlot[addr]; ok { + if val, exist := obj.dirtyStorage.GetValue(key); exist { + return val, true + } + if val, exist := obj.pendingStorage.GetValue(key); exist { // fixme: can be removed + log.Error("Get KV from Unconfirmed StateDB, in pending", + "my txIndex", s.txIndex, "DB's txIndex", i, "addr", addr, + "key", key, "val", val) + return val, true + } + } + } + } + } + return common.Hash{}, false +} + +func (s *StateDB) getStateObjectFromUnconfirmedDB(addr common.Address) (*StateObject, bool) { + // check the unconfirmed DB with range: baseTxIndex -> txIndex -1(previous tx) + for i := s.txIndex - 1; i > s.parallel.baseTxIndex; i-- { + if db, ok := s.parallel.unconfirmedDBInShot[i]; ok { + if obj, ok := db.parallel.dirtiedStateObjectsInSlot[addr]; ok { // if deleted on merge, can get from main StateDB, ok but fixme: concurrent safe + return obj, true + } + } + } + return nil, false +} + // Exist reports whether the given account address exists in the state. // Notably this also returns true for suicided accounts. func (s *StateDB) Exist(addr common.Address) bool { - return s.getStateObject(addr) != nil + if s.parallel.isSlotDB { + // 1.Try to get from dirty + if obj, ok := s.parallel.dirtiedStateObjectsInSlot[addr]; ok { + // dirty object should not be deleted, since deleted is only flagged on finalise + // and if it is suicided in contract call, suicide is taken as exist until it is finalised + // todo: add a check here, to be removed later + if obj.deleted || obj.suicided { + log.Error("Exist in dirty, but marked as deleted or suicided", + "txIndex", s.txIndex, "baseTxIndex:", s.parallel.baseTxIndex) + } + return true + } + // 2.Try to get from uncomfirmed & main DB + // 2.1 Already read before + if exist, ok := s.parallel.addrStateReadsInSlot[addr]; ok { + return exist + } + // 2.2 Try to get from unconfirmed DB if exist + if exist, ok := s.getAddrStateFromUnconfirmedDB(addr); ok { + s.parallel.addrStateReadsInSlot[addr] = exist // update and cache + return exist + } + } + // 3.Try to get from main StateDB + exist := s.getStateObjectNoSlot(addr) != nil + if s.parallel.isSlotDB { + s.parallel.addrStateReadsInSlot[addr] = exist // update and cache + } + return exist } // Empty returns whether the state object is either non-existent // or empty according to the EIP161 specification (balance = nonce = code = 0) func (s *StateDB) Empty(addr common.Address) bool { - so := s.getStateObject(addr) - return so == nil || so.empty() + if s.parallel.isSlotDB { + // 1.Try to get from dirty + if obj, ok := s.parallel.dirtiedStateObjectsInSlot[addr]; ok { + // dirty object is light copied and fixup on need, + // empty could be wrong, except it is created with this TX + if _, ok := s.parallel.addrStateChangesInSlot[addr]; ok { + return obj.empty() + } + // so we have to check it manually + // empty means: Nonce == 0 && Balance == 0 && CodeHash == emptyCodeHash + if s.GetNonce(addr) != 0 { + return false + } + if s.GetBalance(addr).Sign() != 0 { + return false + } + codeHash := s.GetCodeHash(addr) + return bytes.Equal(codeHash.Bytes(), emptyCodeHash) // code is empty, the object is empty + } + // 2.Try to get from uncomfirmed & main DB + // 2.1 Already read before + if exist, ok := s.parallel.addrStateReadsInSlot[addr]; ok { + // exist means not empty + return !exist + } + // 2.2 Try to get from unconfirmed DB if exist + if exist, ok := s.getAddrStateFromUnconfirmedDB(addr); ok { + s.parallel.addrStateReadsInSlot[addr] = exist // update and cache + return !exist + } + } + + so := s.getStateObjectNoSlot(addr) + empty := (so == nil || so.empty()) + if s.parallel.isSlotDB { + s.parallel.addrStateReadsInSlot[addr] = !empty // update and cache + } + return empty } // GetBalance retrieves the balance from the given address or 0 if object not found +// GetFrom the dirty list => from unconfirmed DB => get from main stateDB func (s *StateDB) GetBalance(addr common.Address) *big.Int { if s.parallel.isSlotDB { - s.parallel.balanceReadsInSlot[addr] = struct{}{} if addr == s.parallel.systemAddress { s.parallel.systemAddressOpsCount++ } + // 1.Try to get from dirty + if _, ok := s.parallel.balanceChangesInSlot[addr]; ok { + obj := s.parallel.dirtiedStateObjectsInSlot[addr] // addr must exist in dirtiedStateObjectsInSlot + return obj.Balance() + } + // 2.Try to get from uncomfirmed DB or main DB + // 2.1 Already read before + if balance, ok := s.parallel.balanceReadsInSlot[addr]; ok { + return balance + } + // 2.2 Try to get from unconfirmed DB if exist + if balance := s.getBalanceFromUnconfirmedDB(addr); balance != nil { + s.parallel.balanceReadsInSlot[addr] = balance + return balance + } } - stateObject := s.getStateObject(addr) + // 3. Try to get from main StateObejct + balance := common.Big0 + stateObject := s.getStateObjectNoSlot(addr) if stateObject != nil { - return stateObject.Balance() + balance = stateObject.Balance() } - return common.Big0 + if s.parallel.isSlotDB { + s.parallel.balanceReadsInSlot[addr] = balance + } + return balance } func (s *StateDB) GetNonce(addr common.Address) uint64 { - stateObject := s.getStateObject(addr) + if s.parallel.isSlotDB { + // 1.Try to get from dirty + if _, ok := s.parallel.nonceChangesInSlot[addr]; ok { + obj := s.parallel.dirtiedStateObjectsInSlot[addr] // addr must exist + return obj.Nonce() + } + // 2.Try to get from uncomfirmed DB or main DB + // 2.1 Already read before + if nonce, ok := s.parallel.nonceReadsInSlot[addr]; ok { + return nonce + } + // 2.2 Try to get from unconfirmed DB if exist + if nonce, ok := s.getNonceFromUnconfirmedDB(addr); ok { + s.parallel.nonceReadsInSlot[addr] = nonce + return nonce + } + } + // 3.Try to get from main StateDB + var nonce uint64 = 0 + stateObject := s.getStateObjectNoSlot(addr) if stateObject != nil { - return stateObject.Nonce() + nonce = stateObject.Nonce() } - return 0 + if s.parallel.isSlotDB { + s.parallel.nonceReadsInSlot[addr] = nonce + } + + return nonce } // TxIndex returns the current transaction index set by Prepare. @@ -690,29 +1047,95 @@ func (s *StateDB) BlockHash() common.Hash { return s.bhash } -func (s *StateDB) IsSlotDB() bool { - return s.parallel.isSlotDB -} - // BaseTxIndex returns the tx index that slot db based. func (s *StateDB) BaseTxIndex() int { return s.parallel.baseTxIndex } -func (s *StateDB) CodeReadsInSlot() map[common.Address]struct{} { - return s.parallel.codeReadsInSlot -} - -func (s *StateDB) AddressReadsInSlot() map[common.Address]struct{} { - return s.parallel.addrStateReadsInSlot -} - -func (s *StateDB) StateReadsInSlot() map[common.Address]StateKeys { - return s.parallel.stateReadsInSlot -} +func (s *StateDB) IsParallelReadsValid() bool { + slotDB := s + if !slotDB.parallel.isSlotDB { + log.Error("IsSlotDBReadsValid slotDB should be slot DB", "txIndex", slotDB.txIndex) + return false + } -func (s *StateDB) BalanceReadsInSlot() map[common.Address]struct{} { - return s.parallel.balanceReadsInSlot + mainDB := slotDB.parallel.baseStateDB + if mainDB.parallel.isSlotDB { + log.Error("IsSlotDBReadsValid s should be main DB", "txIndex", slotDB.txIndex) + return false + } + // for nonce + for addr, nonceSlot := range slotDB.parallel.nonceReadsInSlot { + nonceMain := mainDB.GetNonce(addr) + if nonceSlot != nonceMain { + log.Debug("IsSlotDBReadsValid nonce read is invalid", "addr", addr, + "nonceSlot", nonceSlot, "nonceMain", nonceMain, + "txIndex", slotDB.txIndex, "baseTxIndex", slotDB.parallel.baseTxIndex) + return false + } + } + // balance + for addr, balanceSlot := range slotDB.parallel.balanceReadsInSlot { + balanceMain := mainDB.GetBalance(addr) + if balanceSlot.Cmp(balanceMain) != 0 { + log.Debug("IsSlotDBReadsValid balance read is invalid", "addr", addr, + "balanceSlot", balanceSlot, "balanceMain", balanceMain, + "txIndex", slotDB.txIndex, "baseTxIndex", slotDB.parallel.baseTxIndex) + return false + } + } + // check code + for addr, codeSlot := range slotDB.parallel.codeReadsInSlot { + codeMain := mainDB.GetCode(addr) + if !bytes.Equal(codeSlot, codeMain) { + log.Debug("IsSlotDBReadsValid code read is invalid", "addr", addr, + "len codeSlot", len(codeSlot), "len codeMain", len(codeMain), + "txIndex", slotDB.txIndex, "baseTxIndex", slotDB.parallel.baseTxIndex) + return false + } + } + // check codeHash + for addr, codeHashSlot := range slotDB.parallel.codeHashReadsInSlot { + codeHashMain := mainDB.GetCodeHash(addr) + if !bytes.Equal(codeHashSlot.Bytes(), codeHashMain.Bytes()) { + log.Debug("IsSlotDBReadsValid codehash read is invalid", "addr", addr, + "codeHashSlot", codeHashSlot, "codeHashMain", codeHashMain, + "txIndex", slotDB.txIndex, "baseTxIndex", slotDB.parallel.baseTxIndex) + return false + } + } + // check KV + for addr, slotStorage := range slotDB.parallel.kvReadsInSlot { + conflict := false + slotStorage.Range(func(keySlot, valSlot interface{}) bool { + valMain := mainDB.GetState(addr, keySlot.(common.Hash)) + if !bytes.Equal(valSlot.(common.Hash).Bytes(), valMain.Bytes()) { + log.Debug("IsSlotDBReadsValid KV read is invalid", "addr", addr, + "key", keySlot.(common.Hash), "valSlot", valSlot.(common.Hash), "valMain", valMain, + "txIndex", slotDB.txIndex, "baseTxIndex", slotDB.parallel.baseTxIndex) + conflict = true + return false // return false, Range will be terminated. + } + return true // return true, Range will try next KV + }) + if conflict { + return false + } + } + // addr state check + for addr, stateSlot := range slotDB.parallel.addrStateReadsInSlot { + stateMain := false // addr not exist + if mainDB.getStateObjectNoSlot(addr) != nil { + stateMain = true // addr exist in main DB + } + if stateSlot != stateMain { + log.Debug("IsSlotDBReadsValid addrState read invalid(true: exist, false: not exist)", + "addr", addr, "stateSlot", stateSlot, "stateMain", stateMain, + "txIndex", slotDB.txIndex, "baseTxIndex", slotDB.parallel.baseTxIndex) + return false + } + } + return true } // For most of the transactions, systemAddressOpsCount should be 2: @@ -729,60 +1152,161 @@ func (s *StateDB) NeedsRedo() bool { return s.parallel.needsRedo } -func (s *StateDB) NonceIncreased() uint64 { - return s.parallel.nonceIncreased -} - func (s *StateDB) GetCode(addr common.Address) []byte { if s.parallel.isSlotDB { - s.parallel.codeReadsInSlot[addr] = struct{}{} + // 1.Try to get from dirty + if _, ok := s.parallel.codeChangesInSlot[addr]; ok { + obj := s.parallel.dirtiedStateObjectsInSlot[addr] // addr must exist in dirtiedStateObjectsInSlot + code := obj.Code(s.db) + return code + } + // 2.Try to get from uncomfirmed DB or main DB + // 2.1 Already read before + if code, ok := s.parallel.codeReadsInSlot[addr]; ok { + return code + } + // 2.2 Try to get from unconfirmed DB if exist + if code, ok := s.getCodeFromUnconfirmedDB(addr); ok { + s.parallel.codeReadsInSlot[addr] = code + return code + } } - stateObject := s.getStateObject(addr) + // 3. Try to get from main StateObejct + stateObject := s.getStateObjectNoSlot(addr) + var code []byte if stateObject != nil { - return stateObject.Code(s.db) + code = stateObject.Code(s.db) } - return nil + if s.parallel.isSlotDB { + s.parallel.codeReadsInSlot[addr] = code + } + return code } func (s *StateDB) GetCodeSize(addr common.Address) int { if s.parallel.isSlotDB { - s.parallel.codeReadsInSlot[addr] = struct{}{} // code size is part of code + // 1.Try to get from dirty + if _, ok := s.parallel.codeChangesInSlot[addr]; ok { + obj := s.parallel.dirtiedStateObjectsInSlot[addr] // addr must exist in dirtiedStateObjectsInSlot + return obj.CodeSize(s.db) + } + // 2.Try to get from uncomfirmed DB or main DB + // 2.1 Already read before + if code, ok := s.parallel.codeReadsInSlot[addr]; ok { + return len(code) // len(nil) is 0 too + } + // 2.2 Try to get from unconfirmed DB if exist + if code, ok := s.getCodeFromUnconfirmedDB(addr); ok { + s.parallel.codeReadsInSlot[addr] = code + return len(code) // len(nil) is 0 too + } } - stateObject := s.getStateObject(addr) + // 3. Try to get from main StateObejct + var codeSize int = 0 + var code []byte + stateObject := s.getStateObjectNoSlot(addr) + if stateObject != nil { - return stateObject.CodeSize(s.db) + code = stateObject.Code(s.db) + codeSize = stateObject.CodeSize(s.db) + } + if s.parallel.isSlotDB { + s.parallel.codeReadsInSlot[addr] = code } - return 0 + return codeSize } +// return value of GetCodeHash: +// - common.Hash{}: the address does not exist +// - emptyCodeHash: the address exist, but code is empty +// - others: the address exist, and code is not empty func (s *StateDB) GetCodeHash(addr common.Address) common.Hash { if s.parallel.isSlotDB { - s.parallel.codeReadsInSlot[addr] = struct{}{} // code hash is part of code + // 1.Try to get from dirty + if _, ok := s.parallel.codeChangesInSlot[addr]; ok { + obj := s.parallel.dirtiedStateObjectsInSlot[addr] // addr must exist in dirtiedStateObjectsInSlot + return common.BytesToHash(obj.CodeHash()) + } + // 2.Try to get from uncomfirmed DB or main DB + // 2.1 Already read before + if codeHash, ok := s.parallel.codeHashReadsInSlot[addr]; ok { + return codeHash + } + // 2.2 Try to get from unconfirmed DB if exist + if codeHash, ok := s.getCodeHashFromUnconfirmedDB(addr); ok { + s.parallel.codeHashReadsInSlot[addr] = codeHash + return codeHash + } } - stateObject := s.getStateObject(addr) - if stateObject == nil { - return common.Hash{} + // 3. Try to get from main StateObejct + stateObject := s.getStateObjectNoSlot(addr) + codeHash := common.Hash{} + if stateObject != nil { + codeHash = common.BytesToHash(stateObject.CodeHash()) + } + if s.parallel.isSlotDB { + s.parallel.codeHashReadsInSlot[addr] = codeHash } - return common.BytesToHash(stateObject.CodeHash()) + return codeHash } // GetState retrieves a value from the given account's storage trie. +// For parallel mode wih, get from the state in order: +// -> self dirty, both Slot & MainProcessor +// -> pending of self: Slot on merge +// -> pending of unconfirmed DB +// -> pending of main StateDB +// -> origin func (s *StateDB) GetState(addr common.Address, hash common.Hash) common.Hash { if s.parallel.isSlotDB { - if s.parallel.stateReadsInSlot[addr] == nil { - s.parallel.stateReadsInSlot[addr] = make(map[common.Hash]struct{}, defaultNumOfSlots) + + // 1.Try to get from dirty + if exist, ok := s.parallel.addrStateChangesInSlot[addr]; ok { + if !exist { + return common.Hash{} + } + obj := s.parallel.dirtiedStateObjectsInSlot[addr] // addr must exist in dirtiedStateObjectsInSlot + return obj.GetState(s.db, hash) + } + if keys, ok := s.parallel.kvChangesInSlot[addr]; ok { + if _, ok := keys[hash]; ok { + obj := s.parallel.dirtiedStateObjectsInSlot[addr] // addr must exist in dirtiedStateObjectsInSlot + return obj.GetState(s.db, hash) + } + } + // 2.Try to get from uncomfirmed DB or main DB + // 2.1 Already read before + if storage, ok := s.parallel.kvReadsInSlot[addr]; ok { + if val, ok := storage.GetValue(hash); ok { + return val + } + } + // 2.2 Try to get from unconfirmed DB if exist + if val, ok := s.getKVFromUnconfirmedDB(addr, hash); ok { + if s.parallel.kvReadsInSlot[addr] == nil { + s.parallel.kvReadsInSlot[addr] = newStorage(false) + } + s.parallel.kvReadsInSlot[addr].StoreValue(hash, val) // update cache + return val } - s.parallel.stateReadsInSlot[addr][hash] = struct{}{} } - stateObject := s.getStateObject(addr) + // 3.Get from main StateDB + stateObject := s.getStateObjectNoSlot(addr) + val := common.Hash{} if stateObject != nil { - return stateObject.GetState(s.db, hash) + val = stateObject.GetState(s.db, hash) + } + if s.parallel.isSlotDB { + if s.parallel.kvReadsInSlot[addr] == nil { + s.parallel.kvReadsInSlot[addr] = newStorage(false) + } + s.parallel.kvReadsInSlot[addr].StoreValue(hash, val) // update cache } - return common.Hash{} + return val } // GetProof returns the Merkle proof for a given account. @@ -825,17 +1349,38 @@ func (s *StateDB) GetStorageProofByHash(a common.Address, key common.Hash) ([][] // GetCommittedState retrieves a value from the given account's committed storage trie. func (s *StateDB) GetCommittedState(addr common.Address, hash common.Hash) common.Hash { if s.parallel.isSlotDB { - if s.parallel.stateReadsInSlot[addr] == nil { - s.parallel.stateReadsInSlot[addr] = make(map[common.Hash]struct{}, defaultNumOfSlots) + // 1.No need to get from pending of itself even on merge, since stateobject in SlotDB won't do finalise + // 2.Try to get from uncomfirmed DB or main DB + // KVs in unconfirmed DB can be seen as pending storage + // KVs in main DB are merged from SlotDB and has done finalise() on merge, can be seen as pending storage too. + // 2.1 Already read before + if storage, ok := s.parallel.kvReadsInSlot[addr]; ok { + if val, ok := storage.GetValue(hash); ok { + return val + } + } + // 2.2 Try to get from unconfirmed DB if exist + if val, ok := s.getKVFromUnconfirmedDB(addr, hash); ok { + if s.parallel.kvReadsInSlot[addr] == nil { + s.parallel.kvReadsInSlot[addr] = newStorage(false) + } + s.parallel.kvReadsInSlot[addr].StoreValue(hash, val) // update cache + return val } - s.parallel.stateReadsInSlot[addr][hash] = struct{}{} } - - stateObject := s.getStateObject(addr) + // 3. Try to get from main DB + stateObject := s.getStateObjectNoSlot(addr) + val := common.Hash{} if stateObject != nil { - return stateObject.GetCommittedState(s.db, hash) + val = stateObject.GetCommittedState(s.db, hash) + } + if s.parallel.isSlotDB { + if s.parallel.kvReadsInSlot[addr] == nil { + s.parallel.kvReadsInSlot[addr] = newStorage(false) + } + s.parallel.kvReadsInSlot[addr].StoreValue(hash, val) // update cache } - return common.Hash{} + return val } // Database retrieves the low level database supporting the lower level trie ops. @@ -856,7 +1401,20 @@ func (s *StateDB) StorageTrie(addr common.Address) Trie { } func (s *StateDB) HasSuicided(addr common.Address) bool { - stateObject := s.getStateObject(addr) + if s.parallel.isSlotDB { + // 1.Try to get from dirty + if obj, ok := s.parallel.dirtiedStateObjectsInSlot[addr]; ok { + return obj.suicided + } + // 2.Try to get from uncomfirmed + if deleted, ok := s.getAddrStateFromUnconfirmedDB(addr); ok { + if deleted { + return false + } + return false + } + } + stateObject := s.getStateObjectNoSlot(addr) if stateObject != nil { return stateObject.suicided } @@ -867,30 +1425,62 @@ func (s *StateDB) HasSuicided(addr common.Address) bool { * SETTERS */ +// the source mainObj should be got from the main StateDB +// we have to update its nonce, balance, code if they have updated in the unconfirmed DBs +/* +func (s *StateDB) unconfirmedLightCopy(mainObj *StateObject) *StateObject { + newObj := mainObj.lightCopy(s) // copied nonce, balance, code from base DB + + // do balance fixup only when it exist in unconfirmed DB + if nonce, ok := s.getNonceFromUnconfirmedDB(mainObj.address); ok { + // code got from unconfirmed DB + newObj.setNonce(nonce) + } + + // do balance fixup + if balance := s.getBalanceFromUnconfirmedDB(mainObj.address); balance != nil { + // balance got from unconfirmed DB + newObj.setBalance(balance) + } + // do code fixup + if codeObj, ok := s.getCodeFromUnconfirmedDB(mainObj.address); ok { + newObj.setCode(crypto.Keccak256Hash(codeObj), codeObj) // fixme: to confirm if we should use "codeObj.Code(db)" + newObj.dirtyCode = false // copy does not make the code dirty, + } + return newObj +} +*/ + // AddBalance adds amount to the account associated with addr. func (s *StateDB) AddBalance(addr common.Address, amount *big.Int) { - if s.parallel.isSlotDB { - if amount.Sign() != 0 { - s.parallel.balanceChangesInSlot[addr] = struct{}{} - // add balance will perform a read operation first - s.parallel.balanceReadsInSlot[addr] = struct{}{} - } else { - // if amount == 0, no balance change, but there is still an empty check. - // take this empty check as addr state read(create, suicide, empty delete) - s.parallel.addrStateReadsInSlot[addr] = struct{}{} - } - if addr == s.parallel.systemAddress { - s.parallel.systemAddressOpsCount++ - } - } + // if s.parallel.isSlotDB { + // add balance will perform a read operation first + // s.parallel.balanceReadsInSlot[addr] = struct{}{} // fixme: to make the the balance valid, since unconfirmed would refer it. + // if amount.Sign() == 0 { + // if amount == 0, no balance change, but there is still an empty check. + // take this empty check as addr state read(create, suicide, empty delete) + // s.parallel.addrStateReadsInSlot[addr] = struct{}{} + // } + // } stateObject := s.GetOrNewStateObject(addr) if stateObject != nil { if s.parallel.isSlotDB { + if addr == s.parallel.systemAddress { + s.parallel.systemAddressOpsCount++ + } if _, ok := s.parallel.dirtiedStateObjectsInSlot[addr]; !ok { - newStateObject := stateObject.deepCopy(s) + newStateObject := stateObject.lightCopy(s) // light copy from main DB + // do balance fixup from the confirmed DB, it could be more reliable than main DB + if balance := s.getBalanceFromUnconfirmedDB(addr); balance != nil { + newStateObject.setBalance(balance) + } + s.parallel.balanceReadsInSlot[addr] = newStateObject.Balance() // could read from main DB or unconfirmed DB + newStateObject.AddBalance(amount) s.parallel.dirtiedStateObjectsInSlot[addr] = newStateObject + // if amount.Sign() != 0 { // todo: to reenable it + s.parallel.balanceChangesInSlot[addr] = struct{}{} return } } @@ -900,24 +1490,30 @@ func (s *StateDB) AddBalance(addr common.Address, amount *big.Int) { // SubBalance subtracts amount from the account associated with addr. func (s *StateDB) SubBalance(addr common.Address, amount *big.Int) { - if s.parallel.isSlotDB { - if amount.Sign() != 0 { - s.parallel.balanceChangesInSlot[addr] = struct{}{} - // unlike add, sub 0 balance will not touch empty object - s.parallel.balanceReadsInSlot[addr] = struct{}{} - } - if addr == s.parallel.systemAddress { - s.parallel.systemAddressOpsCount++ - } - } - + // if s.parallel.isSlotDB { + // if amount.Sign() != 0 { + // unlike add, sub 0 balance will not touch empty object + // s.parallel.balanceReadsInSlot[addr] = struct{}{} + // } + // } stateObject := s.GetOrNewStateObject(addr) if stateObject != nil { if s.parallel.isSlotDB { + if addr == s.parallel.systemAddress { + s.parallel.systemAddressOpsCount++ + } if _, ok := s.parallel.dirtiedStateObjectsInSlot[addr]; !ok { - newStateObject := stateObject.deepCopy(s) + newStateObject := stateObject.lightCopy(s) // light copy from main DB + // do balance fixup from the confirmed DB, it could be more reliable than main DB + if balance := s.getBalanceFromUnconfirmedDB(addr); balance != nil { + newStateObject.setBalance(balance) + } + s.parallel.balanceReadsInSlot[addr] = newStateObject.Balance() newStateObject.SubBalance(amount) s.parallel.dirtiedStateObjectsInSlot[addr] = newStateObject + + // if amount.Sign() != 0 { // todo: to reenable it + s.parallel.balanceChangesInSlot[addr] = struct{}{} return } } @@ -929,13 +1525,12 @@ func (s *StateDB) SetBalance(addr common.Address, amount *big.Int) { stateObject := s.GetOrNewStateObject(addr) if stateObject != nil { if s.parallel.isSlotDB { - s.parallel.balanceChangesInSlot[addr] = struct{}{} if addr == s.parallel.systemAddress { s.parallel.systemAddressOpsCount++ } - + s.parallel.balanceChangesInSlot[addr] = struct{}{} if _, ok := s.parallel.dirtiedStateObjectsInSlot[addr]; !ok { - newStateObject := stateObject.deepCopy(s) + newStateObject := stateObject.lightCopy(s) newStateObject.SetBalance(amount) s.parallel.dirtiedStateObjectsInSlot[addr] = newStateObject return @@ -945,25 +1540,13 @@ func (s *StateDB) SetBalance(addr common.Address, amount *big.Int) { } } -// Generally sender's nonce will be increased by 1 for each transaction -// But if the contract tries to create a new contract, its nonce will be advanced -// for each opCreate or opCreate2. Nonce is key to transaction execution, once it is -// changed for contract created, the concurrent transaction will be marked invalid if -// they accessed the address. -func (s *StateDB) NonceChanged(addr common.Address) { - if s.parallel.isSlotDB { - log.Debug("NonceChanged", "txIndex", s.txIndex, "addr", addr) - s.parallel.nonceChangesInSlot[addr] = struct{}{} - s.parallel.nonceIncreased++ - } -} - func (s *StateDB) SetNonce(addr common.Address, nonce uint64) { stateObject := s.GetOrNewStateObject(addr) if stateObject != nil { if s.parallel.isSlotDB { + s.parallel.nonceChangesInSlot[addr] = struct{}{} if _, ok := s.parallel.dirtiedStateObjectsInSlot[addr]; !ok { - newStateObject := stateObject.deepCopy(s) + newStateObject := stateObject.lightCopy(s) newStateObject.SetNonce(nonce) s.parallel.dirtiedStateObjectsInSlot[addr] = newStateObject return @@ -976,41 +1559,44 @@ func (s *StateDB) SetNonce(addr common.Address, nonce uint64) { func (s *StateDB) SetCode(addr common.Address, code []byte) { stateObject := s.GetOrNewStateObject(addr) if stateObject != nil { + codeHash := crypto.Keccak256Hash(code) if s.parallel.isSlotDB { s.parallel.codeChangesInSlot[addr] = struct{}{} - if _, ok := s.parallel.dirtiedStateObjectsInSlot[addr]; !ok { - newStateObject := stateObject.deepCopy(s) - newStateObject.SetCode(crypto.Keccak256Hash(code), code) + newStateObject := stateObject.lightCopy(s) + newStateObject.SetCode(codeHash, code) s.parallel.dirtiedStateObjectsInSlot[addr] = newStateObject return } } - stateObject.SetCode(crypto.Keccak256Hash(code), code) + stateObject.SetCode(codeHash, code) } } func (s *StateDB) SetState(addr common.Address, key, value common.Hash) { - stateObject := s.GetOrNewStateObject(addr) + stateObject := s.GetOrNewStateObject(addr) // attention: if StateObject's lightCopy, its storage is only a part of the full storage, if stateObject != nil { if s.parallel.isSlotDB { if s.parallel.baseTxIndex+1 == s.txIndex { // we check if state is unchanged // only when current transaction is the next transaction to be committed - if stateObject.GetState(s.db, key) == value { + // fixme: there is a bug, block: 14,962,284, + // stateObject is in dirty (light copy), but the key is in mainStateDB + // stateObject dirty -> committed, will skip mainStateDB dirty + if s.GetState(addr, key) == value { log.Debug("Skip set same state", "baseTxIndex", s.parallel.baseTxIndex, - "txIndex", s.txIndex) + "txIndex", s.txIndex, "addr", addr, + "key", key, "value", value) return } } - if s.parallel.stateChangesInSlot[addr] == nil { - s.parallel.stateChangesInSlot[addr] = make(StateKeys, defaultNumOfSlots) + if s.parallel.kvChangesInSlot[addr] == nil { + s.parallel.kvChangesInSlot[addr] = make(StateKeys) // make(Storage, defaultNumOfSlots) } - s.parallel.stateChangesInSlot[addr][key] = struct{}{} if _, ok := s.parallel.dirtiedStateObjectsInSlot[addr]; !ok { - newStateObject := stateObject.deepCopy(s) + newStateObject := stateObject.lightCopy(s) newStateObject.SetState(s.db, key, value) s.parallel.dirtiedStateObjectsInSlot[addr] = newStateObject return @@ -1023,7 +1609,7 @@ func (s *StateDB) SetState(addr common.Address, key, value common.Hash) { // SetStorage replaces the entire storage for the specified account with given // storage. This function should only be used for debugging. func (s *StateDB) SetStorage(addr common.Address, storage map[common.Hash]common.Hash) { - stateObject := s.GetOrNewStateObject(addr) + stateObject := s.GetOrNewStateObject(addr) // fixme: parallel mode? if stateObject != nil { stateObject.SetStorage(storage) } @@ -1035,26 +1621,45 @@ func (s *StateDB) SetStorage(addr common.Address, storage map[common.Hash]common // The account's state object is still available until the state is committed, // getStateObject will return a non-nil account after Suicide. func (s *StateDB) Suicide(addr common.Address) bool { - stateObject := s.getStateObject(addr) + var stateObject *StateObject + if s.parallel.isSlotDB { + // 1.Try to get from dirty, it could be suicided inside of contract call + stateObject = s.parallel.dirtiedStateObjectsInSlot[addr] + // 2.Try to get from uncomfirmed, if deleted return false, since the address does not exist + if stateObject == nil { + if deleted, ok := s.getAddrStateFromUnconfirmedDB(addr); ok { + if deleted { + return false + } + } + } + } + // 3.Try to get from main StateDB + if stateObject == nil { + stateObject = s.getStateObjectNoSlot(addr) + } if stateObject == nil { return false } s.journal.append(suicideChange{ account: &addr, - prev: stateObject.suicided, + prev: stateObject.suicided, // todo: must be false? prevbalance: new(big.Int).Set(stateObject.Balance()), }) if s.parallel.isSlotDB { - s.parallel.stateObjectsSuicidedInSlot[addr] = struct{}{} - s.parallel.addrStateChangesInSlot[addr] = struct{}{} if _, ok := s.parallel.dirtiedStateObjectsInSlot[addr]; !ok { // do copy-on-write for suicide "write" - newStateObject := stateObject.deepCopy(s) + newStateObject := stateObject.lightCopy(s) newStateObject.markSuicided() newStateObject.data.Balance = new(big.Int) s.parallel.dirtiedStateObjectsInSlot[addr] = newStateObject + s.parallel.addrStateChangesInSlot[addr] = false // false: the address does not exist any more, + s.parallel.nonceChangesInSlot[addr] = struct{}{} + s.parallel.balanceChangesInSlot[addr] = struct{}{} + s.parallel.codeChangesInSlot[addr] = struct{}{} + // s.parallel.kvChangesInSlot[addr] = make(StateKeys) // all key changes are discarded return true } } @@ -1105,9 +1710,20 @@ func (s *StateDB) deleteStateObject(obj *StateObject) { // getStateObject retrieves a state object given by the address, returning nil if // the object is not found or was deleted in this execution context. If you need // to differentiate between non-existent/just-deleted, use getDeletedStateObject. +// fixme: avoid getStateObjectNoSlot, may be we define a new struct SlotDB which inherit StateDB +func (s *StateDB) getStateObjectNoSlot(addr common.Address) *StateObject { + if obj := s.getDeletedStateObject(addr); obj != nil && !obj.deleted { + return obj + } + return nil +} + +// for parallel execution mode, try to get dirty StateObject in slot first. func (s *StateDB) getStateObject(addr common.Address) *StateObject { if s.parallel.isSlotDB { - s.parallel.addrStateReadsInSlot[addr] = struct{}{} + if obj, ok := s.parallel.dirtiedStateObjectsInSlot[addr]; ok { + return obj + } } if obj := s.getDeletedStateObject(addr); obj != nil && !obj.deleted { @@ -1187,38 +1803,66 @@ func (s *StateDB) getDeletedStateObject(addr common.Address) *StateObject { } func (s *StateDB) SetStateObject(object *StateObject) { - if s.parallel.isSlotDB { - s.parallel.dirtiedStateObjectsInSlot[object.Address()] = object - } else { - s.storeStateObj(object.Address(), object) - } + s.storeStateObj(object.Address(), object) } // GetOrNewStateObject retrieves a state object or create a new state object if nil. +// dirtyInSlot -> Unconfirmed DB -> main DB -> snapshot, no? create one func (s *StateDB) GetOrNewStateObject(addr common.Address) *StateObject { - stateObject := s.getStateObject(addr) + var stateObject *StateObject = nil + exist := true + if s.parallel.isSlotDB { + if stateObject, ok := s.parallel.dirtiedStateObjectsInSlot[addr]; ok { + return stateObject + } + stateObject, _ = s.getStateObjectFromUnconfirmedDB(addr) + } + if stateObject == nil { - stateObject, _ = s.createObject(addr) + stateObject = s.getStateObjectNoSlot(addr) + } + if stateObject == nil || stateObject.deleted { + stateObject = s.createObject(addr) + exist = false + } + + if s.parallel.isSlotDB { + s.parallel.addrStateReadsInSlot[addr] = exist // true: exist, false: not exist } return stateObject } // createObject creates a new state object. If there is an existing account with // the given address, it is overwritten and returned as the second return value. -func (s *StateDB) createObject(addr common.Address) (newobj, prev *StateObject) { + +// prev is used for CreateAccount to get its balance +// Parallel mode: +// if prev in dirty: revert is ok +// if prev in unconfirmed DB: addr state read record, revert should not put it back +// if prev in main DB: addr state read record, revert should not put it back +// if pre no exist: addr state read record, + +// `prev` is used to handle revert, to recover with the `prev` object +// In Parallel mode, we only need to recover to `prev` in SlotDB, +// a.if it is not in SlotDB, `revert` will remove it from the SlotDB +// b.if it is exist in SlotDB, `revert` will recover to the `prev` in SlotDB +// c.as `snapDestructs` it is the same +func (s *StateDB) createObject(addr common.Address) (newobj *StateObject) { + var prev *StateObject = nil if s.parallel.isSlotDB { - s.parallel.addrStateReadsInSlot[addr] = struct{}{} // will try to get the previous object. - s.parallel.addrStateChangesInSlot[addr] = struct{}{} + // do not get from unconfirmed DB, since it will has problem on revert + prev = s.parallel.dirtiedStateObjectsInSlot[addr] + } else { + prev = s.getDeletedStateObject(addr) // Note, prev might have been deleted, we need that! } - prev = s.getDeletedStateObject(addr) // Note, prev might have been deleted, we need that! - var prevdestruct bool + if s.snap != nil && prev != nil { _, prevdestruct = s.snapDestructs[prev.address] if !prevdestruct { - // createObject for deleted object will destroy the previous trie node first - // and update the trie tree with the new object on block commit. + // To destroy the previous trie node first and update the trie tree + // with the new object on block commit. s.snapDestructs[prev.address] = struct{}{} } } @@ -1229,11 +1873,19 @@ func (s *StateDB) createObject(addr common.Address) (newobj, prev *StateObject) } else { s.journal.append(resetObjectChange{prev: prev, prevdestruct: prevdestruct}) } - s.SetStateObject(newobj) - if prev != nil && !prev.deleted { - return newobj, prev + + if s.parallel.isSlotDB { + s.parallel.dirtiedStateObjectsInSlot[addr] = newobj + s.parallel.addrStateChangesInSlot[addr] = true // the object is created + s.parallel.nonceChangesInSlot[addr] = struct{}{} + s.parallel.balanceChangesInSlot[addr] = struct{}{} + s.parallel.codeChangesInSlot[addr] = struct{}{} + // notice: all the KVs are cleared if any + s.parallel.kvChangesInSlot[addr] = make(StateKeys) + } else { + s.SetStateObject(newobj) } - return newobj, nil + return newobj } // CreateAccount explicitly creates a state object. If a state object with the address @@ -1247,14 +1899,12 @@ func (s *StateDB) createObject(addr common.Address) (newobj, prev *StateObject) // // Carrying over the balance ensures that Ether doesn't disappear. func (s *StateDB) CreateAccount(addr common.Address) { - newObj, prev := s.createObject(addr) - if prev != nil { - newObj.setBalance(prev.data.Balance) - } - if s.parallel.isSlotDB { - s.parallel.balanceReadsInSlot[addr] = struct{}{} // read the balance of previous object - s.parallel.dirtiedStateObjectsInSlot[addr] = newObj - } + // no matter it is got from dirty, unconfirmed or main DB + // if addr not exist, preBalance will be common.Big0, it is same as new(big.Int) which + // is the value newObject(), + preBalance := s.GetBalance(addr) // parallel balance read will be recorded inside of GetBalance + newObj := s.createObject(addr) + newObj.setBalance(new(big.Int).Set(preBalance)) // new big.Int for newObj } func (s *StateDB) ForEachStorage(addr common.Address, cb func(key, value common.Hash) bool) error { @@ -1413,6 +2063,10 @@ var stateObjectsPool = sync.Pool{ New: func() interface{} { return make(map[common.Address]*StateObject, defaultNumOfSlots) }, } +var balancePool = sync.Pool{ + New: func() interface{} { return make(map[common.Address]*big.Int, defaultNumOfSlots) }, +} + var snapAccountPool = sync.Pool{ New: func() interface{} { return make(map[common.Address][]byte, defaultNumOfSlots) }, } @@ -1430,25 +2084,20 @@ var logsPool = sync.Pool{ } func (s *StateDB) SlotDBPutSyncPool() { - for key := range s.parallel.stateObjectsSuicidedInSlot { - delete(s.parallel.stateObjectsSuicidedInSlot, key) - } - addressStructPool.Put(s.parallel.stateObjectsSuicidedInSlot) - for key := range s.parallel.codeReadsInSlot { delete(s.parallel.codeReadsInSlot, key) } addressStructPool.Put(s.parallel.codeReadsInSlot) - for key := range s.parallel.codeChangesInSlot { - delete(s.parallel.codeChangesInSlot, key) - } - addressStructPool.Put(s.parallel.codeChangesInSlot) + // for key := range s.parallel.codeChangesInSlot { + // delete(s.parallel.codeChangesInSlot, key) + // } + // addressStructPool.Put(s.parallel.codeChangesInSlot) for key := range s.parallel.balanceChangesInSlot { delete(s.parallel.balanceChangesInSlot, key) } - addressStructPool.Put(s.parallel.balanceChangesInSlot) + balancePool.Put(s.parallel.balanceChangesInSlot) for key := range s.parallel.balanceReadsInSlot { delete(s.parallel.balanceReadsInSlot, key) @@ -1481,15 +2130,15 @@ func (s *StateDB) SlotDBPutSyncPool() { s.journal.entries = s.journal.entries[:0] journalPool.Put(s.journal) - for key := range s.parallel.stateChangesInSlot { - delete(s.parallel.stateChangesInSlot, key) - } - stateKeysPool.Put(s.parallel.stateChangesInSlot) + // for key := range s.parallel.kvChangesInSlot { + // delete(s.parallel.kvChangesInSlot, key) + //} + //stateKeysPool.Put(s.parallel.kvChangesInSlot) - for key := range s.parallel.stateReadsInSlot { - delete(s.parallel.stateReadsInSlot, key) + for key := range s.parallel.kvReadsInSlot { + delete(s.parallel.kvReadsInSlot, key) } - stateKeysPool.Put(s.parallel.stateReadsInSlot) + stateKeysPool.Put(s.parallel.kvReadsInSlot) for key := range s.parallel.dirtiedStateObjectsInSlot { delete(s.parallel.dirtiedStateObjectsInSlot, key) @@ -1526,27 +2175,31 @@ func (s *StateDB) CopyForSlot() *StateDB { parallel := ParallelState{ // use base(dispatcher) slot db's stateObjects. // It is a SyncMap, only readable to slot, not writable - stateObjects: s.parallel.stateObjects, - stateObjectsSuicidedInSlot: addressStructPool.Get().(map[common.Address]struct{}), - codeReadsInSlot: addressStructPool.Get().(map[common.Address]struct{}), - codeChangesInSlot: addressStructPool.Get().(map[common.Address]struct{}), - stateChangesInSlot: stateKeysPool.Get().(map[common.Address]StateKeys), - stateReadsInSlot: stateKeysPool.Get().(map[common.Address]StateKeys), - balanceChangesInSlot: addressStructPool.Get().(map[common.Address]struct{}), - balanceReadsInSlot: addressStructPool.Get().(map[common.Address]struct{}), - addrStateReadsInSlot: addressStructPool.Get().(map[common.Address]struct{}), - addrStateChangesInSlot: addressStructPool.Get().(map[common.Address]struct{}), - nonceChangesInSlot: addressStructPool.Get().(map[common.Address]struct{}), - isSlotDB: true, - dirtiedStateObjectsInSlot: stateObjectsPool.Get().(map[common.Address]*StateObject), + stateObjects: s.parallel.stateObjects, + unconfirmedDBInShot: make(map[int]*StateDB, 100), + + codeReadsInSlot: make(map[common.Address][]byte, 10), // addressStructPool.Get().(map[common.Address]struct{}), + codeHashReadsInSlot: make(map[common.Address]common.Hash), + codeChangesInSlot: make(map[common.Address]struct{}), + kvChangesInSlot: make(map[common.Address]StateKeys), // stateKeysPool.Get().(map[common.Address]StateKeys), + kvReadsInSlot: make(map[common.Address]Storage, 100), // stateKeysPool.Get().(map[common.Address]Storage), + balanceChangesInSlot: make(map[common.Address]struct{}, 10), // balancePool.Get().(map[common.Address]struct{}, 10), + balanceReadsInSlot: make(map[common.Address]*big.Int), // addressStructPool.Get().(map[common.Address]struct{}), + addrStateReadsInSlot: make(map[common.Address]bool), // addressStructPool.Get().(map[common.Address]struct{}), + addrStateChangesInSlot: make(map[common.Address]bool), // addressStructPool.Get().(map[common.Address]struct{}), + nonceChangesInSlot: make(map[common.Address]struct{}), // addressStructPool.Get().(map[common.Address]struct{}), + nonceReadsInSlot: make(map[common.Address]uint64), + + isSlotDB: true, + dirtiedStateObjectsInSlot: stateObjectsPool.Get().(map[common.Address]*StateObject), } state := &StateDB{ db: s.db, trie: s.db.CopyTrie(s.trie), stateObjects: make(map[common.Address]*StateObject), // replaced by parallel.stateObjects in parallel mode - stateObjectsPending: addressStructPool.Get().(map[common.Address]struct{}), - stateObjectsDirty: addressStructPool.Get().(map[common.Address]struct{}), - refund: s.refund, // should be 0 + stateObjectsPending: make(map[common.Address]struct{}), // addressStructPool.Get().(map[common.Address]struct{}), + stateObjectsDirty: make(map[common.Address]struct{}), // addressStructPool.Get().(map[common.Address]struct{}), + refund: s.refund, // should be 0 logs: logsPool.Get().(map[common.Hash][]*types.Log), logSize: 0, preimages: make(map[common.Hash][]byte, len(s.preimages)), @@ -1568,10 +2221,12 @@ func (s *StateDB) CopyForSlot() *StateDB { state.snaps = s.snaps state.snap = s.snap // deep copy needed - state.snapDestructs = addressStructPool.Get().(map[common.Address]struct{}) + state.snapDestructs = make(map[common.Address]struct{}) // addressStructPool.Get().(map[common.Address]struct{}) + s.snapParallelLock.RLock() for k, v := range s.snapDestructs { state.snapDestructs[k] = v } + s.snapParallelLock.RUnlock() // state.snapAccounts = snapAccountPool.Get().(map[common.Address][]byte) for k, v := range s.snapAccounts { @@ -1635,10 +2290,22 @@ func (s *StateDB) WaitPipeVerification() error { // Finalise finalises the state by removing the s destructed objects and clears // the journal as well as the refunds. Finalise, however, will not push any updates // into the tries just yet. Only IntermediateRoot or Commit will do that. -func (s *StateDB) Finalise(deleteEmptyObjects bool) { +func (s *StateDB) Finalise(deleteEmptyObjects bool) { // fixme: concurrent safe... addressesToPrefetch := make([][]byte, 0, len(s.journal.dirties)) for addr := range s.journal.dirties { - obj, exist := s.getStateObjectFromStateObjects(addr) + var obj *StateObject + var exist bool + if s.parallel.isSlotDB { + obj = s.parallel.dirtiedStateObjectsInSlot[addr] + if obj != nil { + exist = true + } else { + log.Error("StateDB Finalise dirty addr not in dirtiedStateObjectsInSlot", + "addr", addr) + } + } else { + obj, exist = s.getStateObjectFromStateObjects(addr) + } if !exist { // ripeMD is 'touched' at block 1714175, in tx 0x1237f737031e40bcde4a8b7e717b2d15e3ecadfe49bb1bbc71ee9deb09c6fcf2 // That tx goes out of gas, and although the notion of 'touched' does not exist there, the @@ -1650,7 +2317,7 @@ func (s *StateDB) Finalise(deleteEmptyObjects bool) { } if obj.suicided || (deleteEmptyObjects && obj.empty()) { if s.parallel.isSlotDB { - s.parallel.addrStateChangesInSlot[addr] = struct{}{} // empty an StateObject is a state change + s.parallel.addrStateChangesInSlot[addr] = false // false: deleted } obj.deleted = true diff --git a/core/state_processor.go b/core/state_processor.go index f98adea519..9e6da7fee1 100644 --- a/core/state_processor.go +++ b/core/state_processor.go @@ -399,9 +399,9 @@ type SlotState struct { pendingTxReqChan chan struct{} pendingConfirmChan chan *ParallelTxResult pendingTxReqList []*ParallelTxRequest // maintained by dispatcher for dispatch policy - mergedChangeList []state.SlotChangeList - slotdbChan chan *state.StateDB // dispatch will create and send this slotDB to slot - // txReqUnits []*ParallelDispatchUnit // only dispatch can access + slotdbChan chan *state.StateDB // dispatch will create and send this slotDB to slot + // txReqUnits []*ParallelDispatchUnit // only dispatch can accesssd + unconfirmedStateDBs *sync.Map // [int]*state.StateDB // fixme: concurrent safe, not use sync.Map? } type ParallelTxResult struct { @@ -457,84 +457,6 @@ func (p *ParallelStateProcessor) init() { } } -// conflict check uses conflict window, it will check all state changes from (cfWindowStart + 1) -// to the previous Tx, if any state in readDb is updated in changeList, then it is conflicted -func (p *ParallelStateProcessor) hasStateConflict(readDb *state.StateDB, changeList state.SlotChangeList) bool { - // check KV change - reads := readDb.StateReadsInSlot() - writes := changeList.StateChangeSet - for readAddr, readKeys := range reads { - if _, exist := changeList.AddrStateChangeSet[readAddr]; exist { - log.Debug("conflict: read addr changed state", "addr", readAddr) - return true - } - if writeKeys, ok := writes[readAddr]; ok { - // readAddr exist - for writeKey := range writeKeys { - // same addr and same key, mark conflicted - if _, ok := readKeys[writeKey]; ok { - log.Debug("conflict: state conflict", "addr", readAddr, "key", writeKey) - return true - } - } - } - } - // check balance change - balanceReads := readDb.BalanceReadsInSlot() - balanceWrite := changeList.BalanceChangeSet - for readAddr := range balanceReads { - if _, exist := changeList.AddrStateChangeSet[readAddr]; exist { - // SystemAddress is special, SystemAddressRedo() is prepared for it. - // Since txIndex = 0 will create StateObject for SystemAddress, skip its state change check - if readAddr != consensus.SystemAddress { - log.Debug("conflict: read addr changed balance", "addr", readAddr) - return true - } - } - if _, ok := balanceWrite[readAddr]; ok { - if readAddr != consensus.SystemAddress { - log.Debug("conflict: balance conflict", "addr", readAddr) - return true - } - } - } - - // check code change - codeReads := readDb.CodeReadsInSlot() - codeWrite := changeList.CodeChangeSet - for readAddr := range codeReads { - if _, exist := changeList.AddrStateChangeSet[readAddr]; exist { - log.Debug("conflict: read addr changed code", "addr", readAddr) - return true - } - if _, ok := codeWrite[readAddr]; ok { - log.Debug("conflict: code conflict", "addr", readAddr) - return true - } - } - - // check address state change: create, suicide... - addrReads := readDb.AddressReadsInSlot() - addrWrite := changeList.AddrStateChangeSet - nonceWrite := changeList.NonceChangeSet - for readAddr := range addrReads { - if _, ok := addrWrite[readAddr]; ok { - // SystemAddress is special, SystemAddressRedo() is prepared for it. - // Since txIndex = 0 will create StateObject for SystemAddress, skip its state change check - if readAddr != consensus.SystemAddress { - log.Debug("conflict: address state conflict", "addr", readAddr) - return true - } - } - if _, ok := nonceWrite[readAddr]; ok { - log.Debug("conflict: address nonce conflict", "addr", readAddr) - return true - } - } - - return false -} - // for parallel execute, we put contracts of same address in a slot, // since these txs probably would have conflicts /* @@ -756,7 +678,9 @@ func (p *ParallelStateProcessor) waitUntilNextTxDone(statedb *state.StateDB, gp if result.updateSlotDB { // the target slot is waiting for new slotDB slotState := p.slotState[result.slotIndex] - slotDB := state.NewSlotDB(statedb, consensus.SystemAddress, p.mergedTxIndex, result.keepSystem) + slotDB := state.NewSlotDB(statedb, consensus.SystemAddress, result.txReq.txIndex, + p.mergedTxIndex, result.keepSystem, slotState.unconfirmedStateDBs) + slotDB.SetSlotIndex(result.slotIndex) p.slotDBsToRelease = append(p.slotDBsToRelease, slotDB) slotState.slotdbChan <- slotDB continue @@ -773,12 +697,7 @@ func (p *ParallelStateProcessor) waitUntilNextTxDone(statedb *state.StateDB, gp resultTxIndex := result.txReq.txIndex resultSlotState := p.slotState[resultSlotIndex] resultSlotState.pendingTxReqList = resultSlotState.pendingTxReqList[1:] - - // Slot's mergedChangeList is produced by dispatcher, while consumed by slot. - // It is safe, since write and read is in sequential, do write -> notify -> read - // It is not good, but work right now. - changeList := statedb.MergeSlotDB(result.slotDB, result.receipt, resultTxIndex) - resultSlotState.mergedChangeList = append(resultSlotState.mergedChangeList, changeList) + statedb.MergeSlotDB(result.slotDB, result.receipt, resultTxIndex) if resultTxIndex != p.mergedTxIndex+1 { log.Error("ProcessParallel tx result out of order", "resultTxIndex", resultTxIndex, @@ -798,10 +717,33 @@ func (p *ParallelStateProcessor) executeInSlot(slotIndex int, txReq *ParallelTxR vmenv := vm.NewEVM(blockContext, vm.TxContext{}, slotDB, p.config, txReq.vmConfig) // gasLimit not accurate, but it is ok for block import. // each slot would use its own gas pool, and will do gaslimit check later - gpSlot := new(GasPool).AddGas(txReq.gasLimit) + gpSlot := new(GasPool).AddGas(txReq.gasLimit) // block.GasLimit() evm, result, err := applyTransactionStageExecution(txReq.msg, gpSlot, slotDB, vmenv) - log.Debug("In Slot, Stage Execution done", "Slot", slotIndex, "txIndex", txReq.txIndex, "slotDB.baseTxIndex", slotDB.BaseTxIndex()) + + if err != nil { + // the error could be caused by unconfirmed balance reference, + // the balance could insufficient to pay its gas limit, which cause it preCheck.buyGas() failed + // redo could solve it. + log.Warn("In slot execution error", "error", err) + return &ParallelTxResult{ + updateSlotDB: false, + slotIndex: slotIndex, + txReq: txReq, + receipt: nil, // receipt is generated in finalize stage + slotDB: slotDB, + err: err, + gpSlot: gpSlot, + evm: evm, + result: result, + } + } + + if result.Failed() { + // if Tx is reverted, all its state change will be discarded + slotDB.RevertSlotDB(txReq.msg.From()) + } + slotDB.Finalise(true) // Finalise could write s.parallel.addrStateChangesInSlot[addr], keep Read and Write in same routine to avoid crash return &ParallelTxResult{ updateSlotDB: false, @@ -824,45 +766,28 @@ func (p *ParallelStateProcessor) executeInShadowSlot(slotIndex int, txResult *Pa // wait until the previous tx is finalized. if txReq.waitTxChan != nil { - log.Debug("Stage wait previous Tx done", "Slot", slotIndex, "txIndex", txIndex) <-txReq.waitTxChan // close the channel } - if txResult.err != nil { - log.Error("executeInShadowSlot should have no error", "err", txResult.err) - } - // do conflict detect hasConflict := false systemAddrConflict := false - log.Debug("Shadow Stage Execution done, do conflict check", "Slot", slotIndex, "txIndex", txIndex) - if slotDB.SystemAddressRedo() { + if txResult.err != nil { + log.Debug("redo, since in slot execute failed", "err", txResult.err) + hasConflict = true + } else if slotDB.SystemAddressRedo() { + log.Debug("Stage Execution conflict for SystemAddressRedo", "Slot", slotIndex, + "txIndex", txIndex) hasConflict = true systemAddrConflict = true } else if slotDB.NeedsRedo() { // if this is any reason that indicates this transaction needs to redo, skip the conflict check hasConflict = true } else { - for index := 0; index < p.parallelNum; index++ { - // log.Debug("Shadow conflict check", "Slot", slotIndex, "txIndex", txIndex) - // check all finalizedDb from current slot's - for _, changeList := range p.slotState[index].mergedChangeList { - // log.Debug("Shadow conflict check", "changeList.TxIndex", changeList.TxIndex, - // "slotDB.BaseTxIndex()", slotDB.BaseTxIndex()) - if changeList.TxIndex <= slotDB.BaseTxIndex() { - continue - } - if p.hasStateConflict(slotDB, changeList) { - log.Debug("Stage Execution conflict", "Slot", slotIndex, - "txIndex", txIndex, " conflict slot", index, "slotDB.baseTxIndex", slotDB.BaseTxIndex(), - "conflict txIndex", changeList.TxIndex) - hasConflict = true - break - } - } - if hasConflict { - break - } + // to check if what the slot db read is correct. + // refDetail := slotDB.UnconfirmedRefList() + if !slotDB.IsParallelReadsValid() { + hasConflict = true } } @@ -873,6 +798,7 @@ func (p *ParallelStateProcessor) executeInShadowSlot(slotIndex int, txResult *Pa updateSlotDB: true, keepSystem: systemAddrConflict, slotIndex: slotIndex, + txReq: txReq, } p.txResultChan <- redoResult updatedSlotDB := <-p.slotState[slotIndex].slotdbChan @@ -890,6 +816,13 @@ func (p *ParallelStateProcessor) executeInShadowSlot(slotIndex int, txResult *Pa if txResult.err != nil { log.Error("Stage Execution conflict redo, error", txResult.err) } + + if txResult.result.Failed() { + // if Tx is reverted, all its state change will be discarded + log.Debug("TX reverted?", "Slot", slotIndex, "txIndex", txIndex, + "result.Err", txResult.result.Err) + txResult.slotDB.RevertSlotDB(txReq.msg.From()) + } } // goroutine unsafe operation will be handled from here for safety @@ -899,21 +832,11 @@ func (p *ParallelStateProcessor) executeInShadowSlot(slotIndex int, txResult *Pa "gasConsumed", gasConsumed, "result.UsedGas", txResult.result.UsedGas) } - log.Debug("ok to finalize this TX", "Slot", slotIndex, "txIndex", txIndex, - "result.UsedGas", txResult.result.UsedGas, "txReq.usedGas", *txReq.usedGas) - // ok, time to do finalize, stage2 should not be parallel txResult.receipt, txResult.err = applyTransactionStageFinalization(txResult.evm, txResult.result, txReq.msg, p.config, txResult.slotDB, header, txReq.tx, txReq.usedGas, txReq.bloomProcessor) - if txResult.result.Failed() { - // if Tx is reverted, all its state change will be discarded - log.Debug("TX reverted?", "Slot", slotIndex, "txIndex", txIndex, - "result.Err", txResult.result.Err) - txResult.slotDB.RevertSlotDB(txReq.msg.From()) - } - txResult.updateSlotDB = false return txResult } @@ -932,17 +855,18 @@ func (p *ParallelStateProcessor) runSlotLoop(slotIndex int) { // ** for a queued tx, // it is better to create a new SlotDB, since COW is used. for _, txReq := range curSlot.pendingTxReqList { - log.Debug("SlotLoop received a new TxReq", "Slot", slotIndex, "txIndex", txReq.txIndex) if txReq.slotDB == nil { result := &ParallelTxResult{ updateSlotDB: true, slotIndex: slotIndex, err: nil, + txReq: txReq, } p.txResultChan <- result txReq.slotDB = <-curSlot.slotdbChan } result := p.executeInSlot(slotIndex, txReq) + curSlot.unconfirmedStateDBs.Store(txReq.txIndex, txReq.slotDB) curSlot.pendingConfirmChan <- result } } @@ -951,14 +875,9 @@ func (p *ParallelStateProcessor) runSlotLoop(slotIndex int) { func (p *ParallelStateProcessor) runShadowSlotLoop(slotIndex int) { curSlot := p.slotState[slotIndex] for { - log.Debug("runShadowSlotLoop wait", "slotIndex", slotIndex) // ParallelTxResult from pendingConfirmChan is not confirmed yet unconfirmedResult := <-curSlot.pendingConfirmChan - - log.Debug("runShadowSlotLoop to confirm the TxResult from master slot", "Slot", slotIndex, "txIndex", unconfirmedResult.txReq.txIndex) confirmedResult := p.executeInShadowSlot(slotIndex, unconfirmedResult) - - log.Debug("runShadowSlotLoop the TxReq is done", "Slot", slotIndex, "err", confirmedResult.err) p.txResultChan <- confirmedResult } } @@ -975,18 +894,18 @@ func (p *ParallelStateProcessor) resetState(txNum int, statedb *state.StateDB) { statedb.PrepareForParallel() - stateDBsToRelease := p.slotDBsToRelease p.slotDBsToRelease = make([]*state.StateDB, 0, txNum) - - go func() { - for _, slotDB := range stateDBsToRelease { - slotDB.SlotDBPutSyncPool() - } - }() - + /* + stateDBsToRelease := p.slotDBsToRelease + go func() { + for _, slotDB := range stateDBsToRelease { + slotDB.SlotDBPutSyncPool() + } + }() + */ for _, slot := range p.slotState { - slot.mergedChangeList = make([]state.SlotChangeList, 0) slot.pendingTxReqList = make([]*ParallelTxRequest, 0) + slot.unconfirmedStateDBs = new(sync.Map) // make(map[int]*state.StateDB), fixme: resue not new? } } @@ -997,6 +916,7 @@ func (p *ParallelStateProcessor) Process(block *types.Block, statedb *state.Stat header = block.Header() gp = new(GasPool).AddGas(block.GasLimit()) ) + log.Info("ProcessParallel", "block", header.Number) var receipts = make([]*types.Receipt, 0) txNum := len(block.Transactions()) p.resetState(txNum, statedb) @@ -1024,7 +944,7 @@ func (p *ParallelStateProcessor) Process(block *types.Block, statedb *state.Stat // Parallel Execution 1.0&2.0 is for full sync mode, Nonce PreCheck is not necessary // And since we will do out-of-order execution, the Nonce PreCheck could fail. // We will disable it and leave it to Parallel 3.0 which is for validator mode - msg, err := tx.AsParallelMessage(signer) + msg, err := tx.AsMessageNoNonceCheck(signer) if err != nil { return statedb, nil, nil, 0, err } @@ -1037,7 +957,7 @@ func (p *ParallelStateProcessor) Process(block *types.Block, statedb *state.Stat txIndex: i, tx: tx, slotDB: nil, - gasLimit: gp.Gas(), + gasLimit: block.GasLimit(), // gp.Gas(). msg: msg, block: block, vmConfig: cfg, @@ -1067,7 +987,7 @@ func (p *ParallelStateProcessor) Process(block *types.Block, statedb *state.Stat // update tx result if result.err != nil { - log.Warn("ProcessParallel a failed tx", "resultSlotIndex", result.slotIndex, + log.Error("ProcessParallel a failed tx", "resultSlotIndex", result.slotIndex, "resultTxIndex", result.txReq.txIndex, "result.err", result.err) return statedb, nil, nil, 0, fmt.Errorf("could not apply tx %d [%v]: %w", result.txReq.txIndex, result.txReq.tx.Hash().Hex(), result.err) } @@ -1145,6 +1065,9 @@ func (p *StateProcessor) Process(block *types.Block, statedb *state.StateDB, cfg ) var receipts = make([]*types.Receipt, 0) txNum := len(block.Transactions()) + if txNum > 0 { + log.Info("Process", "block", header.Number, "txNum", txNum) + } commonTxs := make([]*types.Transaction, 0, txNum) // Iterate over and process the individual transactions posa, isPoSA := p.engine.(consensus.PoSA) diff --git a/core/types/transaction.go b/core/types/transaction.go index 9a6bebc42e..821c43a157 100644 --- a/core/types/transaction.go +++ b/core/types/transaction.go @@ -536,8 +536,8 @@ func (tx *Transaction) AsMessage(s Signer) (Message, error) { } // Parallel 1.0&2.0 will skip nonce check, since it is not necessary for sync mode. -// Parallel 3.0 will reenable it, nonce check for parallel execution will be designed. -func (tx *Transaction) AsParallelMessage(s Signer) (Message, error) { +// Parallel 3.0 will reenable it, nonce check for parallel execution will be designed then. +func (tx *Transaction) AsMessageNoNonceCheck(s Signer) (Message, error) { msg, err := tx.AsMessage(s) msg.checkNonce = false return msg, err diff --git a/core/vm/evm.go b/core/vm/evm.go index 1970f97910..53e2e8797b 100644 --- a/core/vm/evm.go +++ b/core/vm/evm.go @@ -475,7 +475,6 @@ func (evm *EVM) create(caller ContractRef, codeAndHash *codeAndHash, gas uint64, } nonce := evm.StateDB.GetNonce(caller.Address()) evm.StateDB.SetNonce(caller.Address(), nonce+1) - evm.StateDB.NonceChanged(caller.Address()) // fixme: nonce double -- // We add this to the access list _before_ taking a snapshot. Even if the creation fails, // the access-list change should not be rolled back if evm.chainRules.IsBerlin { diff --git a/core/vm/interface.go b/core/vm/interface.go index f424ed9cb9..ad9b05d666 100644 --- a/core/vm/interface.go +++ b/core/vm/interface.go @@ -31,7 +31,6 @@ type StateDB interface { AddBalance(common.Address, *big.Int) GetBalance(common.Address) *big.Int - NonceChanged(common.Address) GetNonce(common.Address) uint64 SetNonce(common.Address, uint64) @@ -75,8 +74,6 @@ type StateDB interface { AddPreimage(common.Hash, []byte) ForEachStorage(common.Address, func(common.Hash, common.Hash) bool) error - - IsSlotDB() bool } // CallContext provides a basic interface for the EVM calling conventions. The EVM From d3c8d8bc685e803a954f7daa3cd4cb50745c9cf6 Mon Sep 17 00:00:00 2001 From: setunapo Date: Wed, 20 Apr 2022 15:30:03 +0800 Subject: [PATCH 04/10] reenable sequential prefetch & disable sync.pool --- core/blockchain.go | 19 +++++------ core/state/statedb.go | 73 ++++++++++++++++++++++--------------------- 2 files changed, 45 insertions(+), 47 deletions(-) diff --git a/core/blockchain.go b/core/blockchain.go index 417f3a0037..9ed4317b13 100644 --- a/core/blockchain.go +++ b/core/blockchain.go @@ -2113,17 +2113,14 @@ func (bc *BlockChain) insertChain(chain types.Blocks, verifySeals bool) (int, er var followupInterrupt uint32 // For diff sync, it may fallback to full sync, so we still do prefetch // parallel mode has a pipeline, similar to this prefetch, to save CPU we disable this prefetch for parallel - /* - // disable prefetch for parallel bugfix - if !bc.parallelExecution { - if len(block.Transactions()) >= prefetchTxNumber { - throwaway := statedb.Copy() - go func(start time.Time, followup *types.Block, throwaway *state.StateDB, interrupt *uint32) { - bc.prefetcher.Prefetch(followup, throwaway, bc.vmConfig, &followupInterrupt) - }(time.Now(), block, throwaway, &followupInterrupt) - } - } - */ + if !bc.parallelExecution { + if len(block.Transactions()) >= prefetchTxNumber { + throwaway := statedb.Copy() + go func(start time.Time, followup *types.Block, throwaway *state.StateDB, interrupt *uint32) { + bc.prefetcher.Prefetch(followup, throwaway, bc.vmConfig, &followupInterrupt) + }(time.Now(), block, throwaway, &followupInterrupt) + } + } //Process block using the parent state as reference point substart := time.Now() if bc.pipeCommit { diff --git a/core/state/statedb.go b/core/state/statedb.go index f46e6b3556..9263e46fd8 100644 --- a/core/state/statedb.go +++ b/core/state/statedb.go @@ -2042,6 +2042,7 @@ func (s *StateDB) Copy() *StateDB { return state } +/* var addressStructPool = sync.Pool{ New: func() interface{} { return make(map[common.Address]struct{}, defaultNumOfSlots) }, } @@ -2084,30 +2085,30 @@ var logsPool = sync.Pool{ } func (s *StateDB) SlotDBPutSyncPool() { - for key := range s.parallel.codeReadsInSlot { - delete(s.parallel.codeReadsInSlot, key) - } - addressStructPool.Put(s.parallel.codeReadsInSlot) + // for key := range s.parallel.codeReadsInSlot { + // delete(s.parallel.codeReadsInSlot, key) + //} + //addressStructPool.Put(s.parallel.codeReadsInSlot) - // for key := range s.parallel.codeChangesInSlot { - // delete(s.parallel.codeChangesInSlot, key) - // } - // addressStructPool.Put(s.parallel.codeChangesInSlot) + for key := range s.parallel.codeChangesInSlot { + delete(s.parallel.codeChangesInSlot, key) + } + addressStructPool.Put(s.parallel.codeChangesInSlot) for key := range s.parallel.balanceChangesInSlot { delete(s.parallel.balanceChangesInSlot, key) } - balancePool.Put(s.parallel.balanceChangesInSlot) + addressStructPool.Put(s.parallel.balanceChangesInSlot) for key := range s.parallel.balanceReadsInSlot { delete(s.parallel.balanceReadsInSlot, key) } - addressStructPool.Put(s.parallel.balanceReadsInSlot) + balancePool.Put(s.parallel.balanceReadsInSlot) - for key := range s.parallel.addrStateReadsInSlot { - delete(s.parallel.addrStateReadsInSlot, key) - } - addressStructPool.Put(s.parallel.addrStateReadsInSlot) + // for key := range s.parallel.addrStateReadsInSlot { + // delete(s.parallel.addrStateReadsInSlot, key) + // } + // addressStructPool.Put(s.parallel.addrStateReadsInSlot) for key := range s.parallel.nonceChangesInSlot { delete(s.parallel.nonceChangesInSlot, key) @@ -2130,15 +2131,15 @@ func (s *StateDB) SlotDBPutSyncPool() { s.journal.entries = s.journal.entries[:0] journalPool.Put(s.journal) - // for key := range s.parallel.kvChangesInSlot { - // delete(s.parallel.kvChangesInSlot, key) - //} - //stateKeysPool.Put(s.parallel.kvChangesInSlot) - - for key := range s.parallel.kvReadsInSlot { - delete(s.parallel.kvReadsInSlot, key) + for key := range s.parallel.kvChangesInSlot { + delete(s.parallel.kvChangesInSlot, key) } - stateKeysPool.Put(s.parallel.kvReadsInSlot) + stateKeysPool.Put(s.parallel.kvChangesInSlot) + + // for key := range s.parallel.kvReadsInSlot { + // delete(s.parallel.kvReadsInSlot, key) + // } + // stateKeysPool.Put(s.parallel.kvReadsInSlot) for key := range s.parallel.dirtiedStateObjectsInSlot { delete(s.parallel.dirtiedStateObjectsInSlot, key) @@ -2169,7 +2170,7 @@ func (s *StateDB) SlotDBPutSyncPool() { } logsPool.Put(s.logs) } - +*/ // CopyForSlot copy all the basic fields, initialize the memory ones func (s *StateDB) CopyForSlot() *StateDB { parallel := ParallelState{ @@ -2180,10 +2181,10 @@ func (s *StateDB) CopyForSlot() *StateDB { codeReadsInSlot: make(map[common.Address][]byte, 10), // addressStructPool.Get().(map[common.Address]struct{}), codeHashReadsInSlot: make(map[common.Address]common.Hash), - codeChangesInSlot: make(map[common.Address]struct{}), + codeChangesInSlot: make(map[common.Address]struct{}), // addressStructPool.Get().(map[common.Address]struct{}), kvChangesInSlot: make(map[common.Address]StateKeys), // stateKeysPool.Get().(map[common.Address]StateKeys), kvReadsInSlot: make(map[common.Address]Storage, 100), // stateKeysPool.Get().(map[common.Address]Storage), - balanceChangesInSlot: make(map[common.Address]struct{}, 10), // balancePool.Get().(map[common.Address]struct{}, 10), + balanceChangesInSlot: make(map[common.Address]struct{}), // addressStructPool.Get().(map[common.Address]struct{}), balanceReadsInSlot: make(map[common.Address]*big.Int), // addressStructPool.Get().(map[common.Address]struct{}), addrStateReadsInSlot: make(map[common.Address]bool), // addressStructPool.Get().(map[common.Address]struct{}), addrStateChangesInSlot: make(map[common.Address]bool), // addressStructPool.Get().(map[common.Address]struct{}), @@ -2191,19 +2192,19 @@ func (s *StateDB) CopyForSlot() *StateDB { nonceReadsInSlot: make(map[common.Address]uint64), isSlotDB: true, - dirtiedStateObjectsInSlot: stateObjectsPool.Get().(map[common.Address]*StateObject), + dirtiedStateObjectsInSlot: make(map[common.Address]*StateObject), // stateObjectsPool.Get().(map[common.Address]*StateObject), } state := &StateDB{ db: s.db, trie: s.db.CopyTrie(s.trie), - stateObjects: make(map[common.Address]*StateObject), // replaced by parallel.stateObjects in parallel mode - stateObjectsPending: make(map[common.Address]struct{}), // addressStructPool.Get().(map[common.Address]struct{}), - stateObjectsDirty: make(map[common.Address]struct{}), // addressStructPool.Get().(map[common.Address]struct{}), - refund: s.refund, // should be 0 - logs: logsPool.Get().(map[common.Hash][]*types.Log), + stateObjects: make(map[common.Address]*StateObject), // replaced by parallel.stateObjects in parallel mode + stateObjectsPending: make(map[common.Address]struct{}), // addressStructPool.Get().(map[common.Address]struct{}), + stateObjectsDirty: make(map[common.Address]struct{}), //addressStructPool.Get().(map[common.Address]struct{}), + refund: s.refund, // should be 0 + logs: make(map[common.Hash][]*types.Log, defaultNumOfSlots), // logsPool.Get().(map[common.Hash][]*types.Log), logSize: 0, preimages: make(map[common.Hash][]byte, len(s.preimages)), - journal: journalPool.Get().(*journal), + journal: newJournal(), // journalPool.Get().(*journal), hasher: crypto.NewKeccakState(), isParallel: true, parallel: parallel, @@ -2221,20 +2222,20 @@ func (s *StateDB) CopyForSlot() *StateDB { state.snaps = s.snaps state.snap = s.snap // deep copy needed - state.snapDestructs = make(map[common.Address]struct{}) // addressStructPool.Get().(map[common.Address]struct{}) + state.snapDestructs = make(map[common.Address]struct{}) //addressStructPool.Get().(map[common.Address]struct{}) s.snapParallelLock.RLock() for k, v := range s.snapDestructs { state.snapDestructs[k] = v } s.snapParallelLock.RUnlock() // - state.snapAccounts = snapAccountPool.Get().(map[common.Address][]byte) + state.snapAccounts = make(map[common.Address][]byte) // snapAccountPool.Get().(map[common.Address][]byte) for k, v := range s.snapAccounts { state.snapAccounts[k] = v } - state.snapStorage = snapStoragePool.Get().(map[common.Address]map[string][]byte) + state.snapStorage = make(map[common.Address]map[string][]byte) // snapStoragePool.Get().(map[common.Address]map[string][]byte) for k, v := range s.snapStorage { - temp := snapStorageValuePool.Get().(map[string][]byte) + temp := make(map[string][]byte) // snapStorageValuePool.Get().(map[string][]byte) for kk, vv := range v { temp[kk] = vv } From 1bf23041b5c184b7e42e5a4beb172d79d00052c4 Mon Sep 17 00:00:00 2001 From: setunapo Date: Thu, 21 Apr 2022 20:44:33 +0800 Subject: [PATCH 05/10] Fix: suicide, concurrent store, CreateAccount, revert... balance systemaddr, nonce, code, suicide bad previous balance, nonce, code... addrSnapDestructsReadsInSlot skip system address balance check --- core/state/journal.go | 5 + core/state/state_object.go | 15 +- core/state/statedb.go | 300 ++++++++++++++++++++++++++----------- 3 files changed, 229 insertions(+), 91 deletions(-) diff --git a/core/state/journal.go b/core/state/journal.go index 5afe8886bb..96655d007d 100644 --- a/core/state/journal.go +++ b/core/state/journal.go @@ -144,6 +144,11 @@ type ( func (ch createObjectChange) revert(s *StateDB) { if s.parallel.isSlotDB { delete(s.parallel.dirtiedStateObjectsInSlot, *ch.account) + delete(s.parallel.addrStateChangesInSlot, *ch.account) + delete(s.parallel.nonceChangesInSlot, *ch.account) + delete(s.parallel.balanceChangesInSlot, *ch.account) + delete(s.parallel.codeChangesInSlot, *ch.account) + delete(s.parallel.kvChangesInSlot, *ch.account) } else { s.deleteStateObj(*ch.account) } diff --git a/core/state/state_object.go b/core/state/state_object.go index 7adb5bdbe6..6a1154b06d 100644 --- a/core/state/state_object.go +++ b/core/state/state_object.go @@ -183,6 +183,10 @@ type StateObject struct { // empty returns whether the account is considered empty. func (s *StateObject) empty() bool { return s.data.Nonce == 0 && s.data.Balance.Sign() == 0 && bytes.Equal(s.data.CodeHash, emptyCodeHash) + + // in parallel mode, we should never get raw nonce, balance, codeHash any more, + // since it could be invalid, if the element was read from unconfirmed DB or base DB + // return s.db.GetNonce(s.address) == 0 && s.db.GetBalance(s.address).Sign() == 0 && bytes.Equal(s.db.GetCodeHash(s.address).Bytes(), emptyCodeHash) } // Account is the Ethereum consensus representation of accounts. @@ -591,6 +595,7 @@ func (s *StateObject) AddBalance(amount *big.Int) { return } s.SetBalance(new(big.Int).Add(s.Balance(), amount)) + // s.SetBalance(new(big.Int).Add(s.db.GetBalance(s.address), amount)) } // SubBalance removes amount from s's balance. @@ -600,12 +605,15 @@ func (s *StateObject) SubBalance(amount *big.Int) { return } s.SetBalance(new(big.Int).Sub(s.Balance(), amount)) + // s.SetBalance(new(big.Int).Sub(s.db.GetBalance(s.address), amount)) } func (s *StateObject) SetBalance(amount *big.Int) { + // prevBalance := new(big.Int).Set(s.db.GetBalance(s.address)) s.db.journal.append(balanceChange{ account: &s.address, - prev: new(big.Int).Set(s.data.Balance), + prev: new(big.Int).Set(s.data.Balance), // prevBalance, + // prev: prevBalance, }) s.setBalance(amount) } @@ -699,7 +707,7 @@ func (s *StateObject) CodeSize(db Database) int { } func (s *StateObject) SetCode(codeHash common.Hash, code []byte) { - prevcode := s.Code(s.db.db) + prevcode := s.db.GetCode(s.address) s.db.journal.append(codeChange{ account: &s.address, prevhash: s.CodeHash(), @@ -715,9 +723,10 @@ func (s *StateObject) setCode(codeHash common.Hash, code []byte) { } func (s *StateObject) SetNonce(nonce uint64) { + prevNonce := s.db.GetNonce(s.address) s.db.journal.append(nonceChange{ account: &s.address, - prev: s.data.Nonce, + prev: prevNonce, }) s.setNonce(nonce) } diff --git a/core/state/statedb.go b/core/state/statedb.go index 9263e46fd8..dd1752bcbc 100644 --- a/core/state/statedb.go +++ b/core/state/statedb.go @@ -101,8 +101,16 @@ func (s *StateDB) storeStateObj(addr common.Address, stateObject *StateObject) { // the object could be create in SlotDB, if it got the object from DB and // update it to the shared `s.parallel.stateObjects`` stateObject.db = s.parallel.baseStateDB + stateObject.db.storeParallelLock.Lock() + if _, ok := s.parallel.stateObjects.Load(addr); !ok { + s.parallel.stateObjects.Store(addr, stateObject) + } + stateObject.db.storeParallelLock.Unlock() + } else { + stateObject.db.storeParallelLock.Lock() + s.parallel.stateObjects.Store(addr, stateObject) + stateObject.db.storeParallelLock.Unlock() } - s.parallel.stateObjects.Store(addr, stateObject) } else { s.stateObjects[addr] = stateObject } @@ -150,8 +158,10 @@ type ParallelState struct { kvChangesInSlot map[common.Address]StateKeys // value will be kept in dirtiedStateObjectsInSlot // Actions such as SetCode, Suicide will change address's state. // Later call like Exist(), Empty(), HasSuicided() depend on the address's state. - addrStateReadsInSlot map[common.Address]bool // true: exist, false: not exist or deleted - addrStateChangesInSlot map[common.Address]bool // true: created, false: deleted + addrStateReadsInSlot map[common.Address]bool // true: exist, false: not exist or deleted + addrStateChangesInSlot map[common.Address]bool // true: created, false: deleted + addrSnapDestructsReadsInSlot map[common.Address]bool + // addrSnapDestructsChangesInSlot map[common.Address]struct{} // no use to get from unconfirmed DB for efficiency // Transaction will pay gas fee to system address. // Parallel execution will clear system address's balance at first, in order to maintain transaction's @@ -187,13 +197,14 @@ type StateDB struct { fullProcessed bool pipeCommit bool - snapMux sync.Mutex - snaps *snapshot.Tree - snap snapshot.Snapshot - snapParallelLock sync.RWMutex // for parallel mode, for main StateDB, slot will read snapshot, while processor will write. - snapDestructs map[common.Address]struct{} - snapAccounts map[common.Address][]byte - snapStorage map[common.Address]map[string][]byte + snapMux sync.Mutex + snaps *snapshot.Tree + snap snapshot.Snapshot + storeParallelLock sync.RWMutex + snapParallelLock sync.RWMutex // for parallel mode, for main StateDB, slot will read snapshot, while processor will write. + snapDestructs map[common.Address]struct{} + snapAccounts map[common.Address][]byte + snapStorage map[common.Address]map[string][]byte // This map holds 'live' objects, which will get modified while processing a state transition. stateObjects map[common.Address]*StateObject @@ -832,7 +843,7 @@ func (s *StateDB) getCodeHashFromUnconfirmedDB(addr common.Address) (common.Hash // It is for address state check of: Exist(), Empty() and HasSuicided() // Since the unconfirmed DB should have done Finalise() with `deleteEmptyObjects = true` // If the dirty address is empty or suicided, it will be marked as deleted, so we only need to return `deleted` or not. -func (s *StateDB) getAddrStateFromUnconfirmedDB(addr common.Address) (deleted bool, exist bool) { +func (s *StateDB) getAddrStateFromUnconfirmedDB(addr common.Address) (bool, bool) { if addr == s.parallel.systemAddress { // never get systemaddress from unconfirmed DB return false, false @@ -980,8 +991,13 @@ func (s *StateDB) GetBalance(addr common.Address) *big.Int { } // 1.Try to get from dirty if _, ok := s.parallel.balanceChangesInSlot[addr]; ok { - obj := s.parallel.dirtiedStateObjectsInSlot[addr] // addr must exist in dirtiedStateObjectsInSlot - return obj.Balance() + if obj, ok := s.parallel.dirtiedStateObjectsInSlot[addr]; ok { + // on balance fixup, addr may not exist in dirtiedStateObjectsInSlot + // we intend to fixup balance based on unconfirmed DB or main DB + balance := obj.Balance() + log.Debug("GetBalance in dirty", "txIndex", s.txIndex, "addr", addr, "balance", balance) + return balance + } } // 2.Try to get from uncomfirmed DB or main DB // 2.1 Already read before @@ -1010,8 +1026,13 @@ func (s *StateDB) GetNonce(addr common.Address) uint64 { if s.parallel.isSlotDB { // 1.Try to get from dirty if _, ok := s.parallel.nonceChangesInSlot[addr]; ok { - obj := s.parallel.dirtiedStateObjectsInSlot[addr] // addr must exist - return obj.Nonce() + if obj, ok := s.parallel.dirtiedStateObjectsInSlot[addr]; ok { + // on nonce fixup, addr may not exist in dirtiedStateObjectsInSlot + // we intend to fixup nonce based on unconfirmed DB or main DB + nonce := obj.Nonce() + log.Debug("GetNonce in dirty", "txIndex", s.txIndex, "addr", addr, "nonce", nonce) + return nonce + } } // 2.Try to get from uncomfirmed DB or main DB // 2.1 Already read before @@ -1055,41 +1076,43 @@ func (s *StateDB) BaseTxIndex() int { func (s *StateDB) IsParallelReadsValid() bool { slotDB := s if !slotDB.parallel.isSlotDB { - log.Error("IsSlotDBReadsValid slotDB should be slot DB", "txIndex", slotDB.txIndex) + log.Error("IsSlotDBReadsValid slotDB should be slot DB", "SlotIndex", slotDB.parallel.SlotIndex, "txIndex", slotDB.txIndex) return false } mainDB := slotDB.parallel.baseStateDB if mainDB.parallel.isSlotDB { - log.Error("IsSlotDBReadsValid s should be main DB", "txIndex", slotDB.txIndex) + log.Error("IsSlotDBReadsValid s should be main DB", "SlotIndex", slotDB.parallel.SlotIndex, "txIndex", slotDB.txIndex) return false } // for nonce for addr, nonceSlot := range slotDB.parallel.nonceReadsInSlot { nonceMain := mainDB.GetNonce(addr) if nonceSlot != nonceMain { - log.Debug("IsSlotDBReadsValid nonce read is invalid", "addr", addr, - "nonceSlot", nonceSlot, "nonceMain", nonceMain, + log.Info("IsSlotDBReadsValid nonce read is invalid", "addr", addr, + "nonceSlot", nonceSlot, "nonceMain", nonceMain, "SlotIndex", slotDB.parallel.SlotIndex, "txIndex", slotDB.txIndex, "baseTxIndex", slotDB.parallel.baseTxIndex) return false } } // balance for addr, balanceSlot := range slotDB.parallel.balanceReadsInSlot { - balanceMain := mainDB.GetBalance(addr) - if balanceSlot.Cmp(balanceMain) != 0 { - log.Debug("IsSlotDBReadsValid balance read is invalid", "addr", addr, - "balanceSlot", balanceSlot, "balanceMain", balanceMain, - "txIndex", slotDB.txIndex, "baseTxIndex", slotDB.parallel.baseTxIndex) - return false + if addr != s.parallel.systemAddress { // skip balance check for system address + balanceMain := mainDB.GetBalance(addr) + if balanceSlot.Cmp(balanceMain) != 0 { + log.Info("IsSlotDBReadsValid balance read is invalid", "addr", addr, + "balanceSlot", balanceSlot, "balanceMain", balanceMain, "SlotIndex", slotDB.parallel.SlotIndex, + "txIndex", slotDB.txIndex, "baseTxIndex", slotDB.parallel.baseTxIndex) + return false + } } } // check code for addr, codeSlot := range slotDB.parallel.codeReadsInSlot { codeMain := mainDB.GetCode(addr) if !bytes.Equal(codeSlot, codeMain) { - log.Debug("IsSlotDBReadsValid code read is invalid", "addr", addr, - "len codeSlot", len(codeSlot), "len codeMain", len(codeMain), + log.Info("IsSlotDBReadsValid code read is invalid", "addr", addr, + "len codeSlot", len(codeSlot), "len codeMain", len(codeMain), "SlotIndex", slotDB.parallel.SlotIndex, "txIndex", slotDB.txIndex, "baseTxIndex", slotDB.parallel.baseTxIndex) return false } @@ -1098,8 +1121,8 @@ func (s *StateDB) IsParallelReadsValid() bool { for addr, codeHashSlot := range slotDB.parallel.codeHashReadsInSlot { codeHashMain := mainDB.GetCodeHash(addr) if !bytes.Equal(codeHashSlot.Bytes(), codeHashMain.Bytes()) { - log.Debug("IsSlotDBReadsValid codehash read is invalid", "addr", addr, - "codeHashSlot", codeHashSlot, "codeHashMain", codeHashMain, + log.Info("IsSlotDBReadsValid codehash read is invalid", "addr", addr, + "codeHashSlot", codeHashSlot, "codeHashMain", codeHashMain, "SlotIndex", slotDB.parallel.SlotIndex, "txIndex", slotDB.txIndex, "baseTxIndex", slotDB.parallel.baseTxIndex) return false } @@ -1110,8 +1133,9 @@ func (s *StateDB) IsParallelReadsValid() bool { slotStorage.Range(func(keySlot, valSlot interface{}) bool { valMain := mainDB.GetState(addr, keySlot.(common.Hash)) if !bytes.Equal(valSlot.(common.Hash).Bytes(), valMain.Bytes()) { - log.Debug("IsSlotDBReadsValid KV read is invalid", "addr", addr, - "key", keySlot.(common.Hash), "valSlot", valSlot.(common.Hash), "valMain", valMain, + log.Info("IsSlotDBReadsValid KV read is invalid", "addr", addr, + "key", keySlot.(common.Hash), "valSlot", valSlot.(common.Hash), + "valMain", valMain, "SlotIndex", slotDB.parallel.SlotIndex, "txIndex", slotDB.txIndex, "baseTxIndex", slotDB.parallel.baseTxIndex) conflict = true return false // return false, Range will be terminated. @@ -1129,22 +1153,47 @@ func (s *StateDB) IsParallelReadsValid() bool { stateMain = true // addr exist in main DB } if stateSlot != stateMain { - log.Debug("IsSlotDBReadsValid addrState read invalid(true: exist, false: not exist)", - "addr", addr, "stateSlot", stateSlot, "stateMain", stateMain, + // skip addr state check for system address + if addr != s.parallel.systemAddress { + log.Info("IsSlotDBReadsValid addrState read invalid(true: exist, false: not exist)", + "addr", addr, "stateSlot", stateSlot, "stateMain", stateMain, + "SlotIndex", slotDB.parallel.SlotIndex, + "txIndex", slotDB.txIndex, "baseTxIndex", slotDB.parallel.baseTxIndex) + return false + } + } + } + // snapshot destructs check + + for addr, destructRead := range slotDB.parallel.addrSnapDestructsReadsInSlot { + mainObj := mainDB.getStateObjectNoSlot(addr) + if mainObj == nil { + log.Info("IsSlotDBReadsValid snapshot destructs read invalid, address should exist", + "addr", addr, "destruct", destructRead, + "SlotIndex", slotDB.parallel.SlotIndex, + "txIndex", slotDB.txIndex, "baseTxIndex", slotDB.parallel.baseTxIndex) + return false + } + _, destructMain := mainDB.snapDestructs[addr] // addr not exist + if destructRead != destructMain { + log.Info("IsSlotDBReadsValid snapshot destructs read invalid", + "addr", addr, "destructRead", destructRead, "destructMain", destructMain, + "SlotIndex", slotDB.parallel.SlotIndex, "txIndex", slotDB.txIndex, "baseTxIndex", slotDB.parallel.baseTxIndex) return false } } + return true } -// For most of the transactions, systemAddressOpsCount should be 2: +// For most of the transactions, systemAddressOpsCount should be 3: // one for SetBalance(0) on NewSlotDB() // the other is for AddBalance(GasFee) at the end. -// (systemAddressOpsCount > 2) means the transaction tries to access systemAddress, in +// (systemAddressOpsCount > 3) means the transaction tries to access systemAddress, in // this case, we should redo and keep its balance on NewSlotDB() func (s *StateDB) SystemAddressRedo() bool { - return s.parallel.systemAddressOpsCount > 2 + return s.parallel.systemAddressOpsCount > 3 } // NeedsRedo returns true if there is any clear reason that we need to redo this transaction @@ -1156,9 +1205,12 @@ func (s *StateDB) GetCode(addr common.Address) []byte { if s.parallel.isSlotDB { // 1.Try to get from dirty if _, ok := s.parallel.codeChangesInSlot[addr]; ok { - obj := s.parallel.dirtiedStateObjectsInSlot[addr] // addr must exist in dirtiedStateObjectsInSlot - code := obj.Code(s.db) - return code + if obj, ok := s.parallel.dirtiedStateObjectsInSlot[addr]; ok { + // on code fixup, addr may not exist in dirtiedStateObjectsInSlot + // we intend to fixup code based on unconfirmed DB or main DB + code := obj.Code(s.db) + return code + } } // 2.Try to get from uncomfirmed DB or main DB // 2.1 Already read before @@ -1188,8 +1240,11 @@ func (s *StateDB) GetCodeSize(addr common.Address) int { if s.parallel.isSlotDB { // 1.Try to get from dirty if _, ok := s.parallel.codeChangesInSlot[addr]; ok { - obj := s.parallel.dirtiedStateObjectsInSlot[addr] // addr must exist in dirtiedStateObjectsInSlot - return obj.CodeSize(s.db) + if obj, ok := s.parallel.dirtiedStateObjectsInSlot[addr]; ok { + // on code fixup, addr may not exist in dirtiedStateObjectsInSlot + // we intend to fixup code based on unconfirmed DB or main DB + return obj.CodeSize(s.db) + } } // 2.Try to get from uncomfirmed DB or main DB // 2.1 Already read before @@ -1226,8 +1281,11 @@ func (s *StateDB) GetCodeHash(addr common.Address) common.Hash { if s.parallel.isSlotDB { // 1.Try to get from dirty if _, ok := s.parallel.codeChangesInSlot[addr]; ok { - obj := s.parallel.dirtiedStateObjectsInSlot[addr] // addr must exist in dirtiedStateObjectsInSlot - return common.BytesToHash(obj.CodeHash()) + if obj, ok := s.parallel.dirtiedStateObjectsInSlot[addr]; ok { + // on code fixup, addr may not exist in dirtiedStateObjectsInSlot + // we intend to fixup balance based on unconfirmed DB or main DB + return common.BytesToHash(obj.CodeHash()) + } } // 2.Try to get from uncomfirmed DB or main DB // 2.1 Already read before @@ -1407,11 +1465,8 @@ func (s *StateDB) HasSuicided(addr common.Address) bool { return obj.suicided } // 2.Try to get from uncomfirmed - if deleted, ok := s.getAddrStateFromUnconfirmedDB(addr); ok { - if deleted { - return false - } - return false + if exist, ok := s.getAddrStateFromUnconfirmedDB(addr); ok { + return !exist } } stateObject := s.getStateObjectNoSlot(addr) @@ -1469,22 +1524,32 @@ func (s *StateDB) AddBalance(addr common.Address, amount *big.Int) { if addr == s.parallel.systemAddress { s.parallel.systemAddressOpsCount++ } + // if amount.Sign() != 0 { // todo: to reenable it if _, ok := s.parallel.dirtiedStateObjectsInSlot[addr]; !ok { newStateObject := stateObject.lightCopy(s) // light copy from main DB // do balance fixup from the confirmed DB, it could be more reliable than main DB - if balance := s.getBalanceFromUnconfirmedDB(addr); balance != nil { - newStateObject.setBalance(balance) - } - s.parallel.balanceReadsInSlot[addr] = newStateObject.Balance() // could read from main DB or unconfirmed DB - + balance := s.GetBalance(addr) + newStateObject.setBalance(balance) + // s.parallel.balanceReadsInSlot[addr] = newStateObject.Balance() // could read from main DB or unconfirmed DB newStateObject.AddBalance(amount) s.parallel.dirtiedStateObjectsInSlot[addr] = newStateObject - // if amount.Sign() != 0 { // todo: to reenable it s.parallel.balanceChangesInSlot[addr] = struct{}{} return } + // already dirty, make sure the balance if fixed up + // if stateObject.Balance() + if addr != s.parallel.systemAddress { + if stateObject.Balance().Cmp(s.GetBalance(addr)) != 0 { + log.Warn("AddBalance in dirty, but balance is incorrect", "txIndex", s.txIndex, "addr", addr, + "stateObject.Balance()", stateObject.Balance(), "s.GetBalance(addr)", s.GetBalance(addr)) + stateObject.setBalance(s.GetBalance(addr)) + } + } } stateObject.AddBalance(amount) + if s.parallel.isSlotDB { + s.parallel.balanceChangesInSlot[addr] = struct{}{} + } } } @@ -1502,22 +1567,34 @@ func (s *StateDB) SubBalance(addr common.Address, amount *big.Int) { if addr == s.parallel.systemAddress { s.parallel.systemAddressOpsCount++ } + + // if amount.Sign() != 0 { // todo: to reenable it if _, ok := s.parallel.dirtiedStateObjectsInSlot[addr]; !ok { newStateObject := stateObject.lightCopy(s) // light copy from main DB // do balance fixup from the confirmed DB, it could be more reliable than main DB - if balance := s.getBalanceFromUnconfirmedDB(addr); balance != nil { - newStateObject.setBalance(balance) - } - s.parallel.balanceReadsInSlot[addr] = newStateObject.Balance() + balance := s.GetBalance(addr) + newStateObject.setBalance(balance) + // s.parallel.balanceReadsInSlot[addr] = newStateObject.Balance() newStateObject.SubBalance(amount) - s.parallel.dirtiedStateObjectsInSlot[addr] = newStateObject - - // if amount.Sign() != 0 { // todo: to reenable it s.parallel.balanceChangesInSlot[addr] = struct{}{} + s.parallel.dirtiedStateObjectsInSlot[addr] = newStateObject return } + // already dirty, make sure the balance if fixed + // if stateObject.Balance() + if addr != s.parallel.systemAddress { + if stateObject.Balance().Cmp(s.GetBalance(addr)) != 0 { + log.Warn("SubBalance in dirty, but balance is incorrect", "txIndex", s.txIndex, "addr", addr, + "stateObject.Balance()", stateObject.Balance(), "s.GetBalance(addr)", s.GetBalance(addr)) + stateObject.setBalance(s.GetBalance(addr)) + } + } } stateObject.SubBalance(amount) + if s.parallel.isSlotDB { + s.parallel.balanceChangesInSlot[addr] = struct{}{} + } + } } @@ -1528,15 +1605,23 @@ func (s *StateDB) SetBalance(addr common.Address, amount *big.Int) { if addr == s.parallel.systemAddress { s.parallel.systemAddressOpsCount++ } - s.parallel.balanceChangesInSlot[addr] = struct{}{} if _, ok := s.parallel.dirtiedStateObjectsInSlot[addr]; !ok { newStateObject := stateObject.lightCopy(s) + // update balance for revert, in case child contract is revertted, + // it should revert to the previous balance + balance := s.GetBalance(addr) + newStateObject.setBalance(balance) newStateObject.SetBalance(amount) + s.parallel.balanceChangesInSlot[addr] = struct{}{} s.parallel.dirtiedStateObjectsInSlot[addr] = newStateObject return } + } stateObject.SetBalance(amount) + if s.parallel.isSlotDB { + s.parallel.balanceChangesInSlot[addr] = struct{}{} + } } } @@ -1544,15 +1629,22 @@ func (s *StateDB) SetNonce(addr common.Address, nonce uint64) { stateObject := s.GetOrNewStateObject(addr) if stateObject != nil { if s.parallel.isSlotDB { - s.parallel.nonceChangesInSlot[addr] = struct{}{} if _, ok := s.parallel.dirtiedStateObjectsInSlot[addr]; !ok { newStateObject := stateObject.lightCopy(s) + noncePre := s.GetNonce(addr) + newStateObject.setNonce(noncePre) // nonce fixup newStateObject.SetNonce(nonce) + s.parallel.nonceChangesInSlot[addr] = struct{}{} s.parallel.dirtiedStateObjectsInSlot[addr] = newStateObject return } + noncePre := s.GetNonce(addr) + stateObject.setNonce(noncePre) // nonce fixup } stateObject.SetNonce(nonce) + if s.parallel.isSlotDB { + s.parallel.nonceChangesInSlot[addr] = struct{}{} + } } } @@ -1561,15 +1653,25 @@ func (s *StateDB) SetCode(addr common.Address, code []byte) { if stateObject != nil { codeHash := crypto.Keccak256Hash(code) if s.parallel.isSlotDB { - s.parallel.codeChangesInSlot[addr] = struct{}{} if _, ok := s.parallel.dirtiedStateObjectsInSlot[addr]; !ok { newStateObject := stateObject.lightCopy(s) + codePre := s.GetCode(addr) // code fixup + codeHashPre := crypto.Keccak256Hash(codePre) + newStateObject.setCode(codeHashPre, codePre) + newStateObject.SetCode(codeHash, code) s.parallel.dirtiedStateObjectsInSlot[addr] = newStateObject + s.parallel.codeChangesInSlot[addr] = struct{}{} return } + codePre := s.GetCode(addr) // code fixup + codeHashPre := crypto.Keccak256Hash(codePre) + stateObject.setCode(codeHashPre, codePre) } stateObject.SetCode(codeHash, code) + if s.parallel.isSlotDB { + s.parallel.codeChangesInSlot[addr] = struct{}{} + } } } @@ -1601,6 +1703,7 @@ func (s *StateDB) SetState(addr common.Address, key, value common.Hash) { s.parallel.dirtiedStateObjectsInSlot[addr] = newStateObject return } + // do State Update } stateObject.SetState(s.db, key, value) } @@ -1625,27 +1728,36 @@ func (s *StateDB) Suicide(addr common.Address) bool { if s.parallel.isSlotDB { // 1.Try to get from dirty, it could be suicided inside of contract call stateObject = s.parallel.dirtiedStateObjectsInSlot[addr] - // 2.Try to get from uncomfirmed, if deleted return false, since the address does not exist if stateObject == nil { - if deleted, ok := s.getAddrStateFromUnconfirmedDB(addr); ok { - if deleted { + if _, ok := s.parallel.addrStateReadsInSlot[addr]; !ok { + log.Info("Suicide addr not in dirty", "txIndex", s.txIndex, "addr", addr) + } + // 2.Try to get from uncomfirmed, if deleted return false, since the address does not exist + if obj, ok := s.getStateObjectFromUnconfirmedDB(addr); ok { + stateObject = obj + s.parallel.addrStateReadsInSlot[addr] = !stateObject.deleted // true: exist, false: deleted + if stateObject.deleted { + log.Error("Suicide addr alreay deleted in confirmed DB", "txIndex", s.txIndex, "addr", addr) return false } } } } - // 3.Try to get from main StateDB if stateObject == nil { + // 3.Try to get from main StateDB stateObject = s.getStateObjectNoSlot(addr) - } - if stateObject == nil { - return false + if stateObject == nil { + s.parallel.addrStateReadsInSlot[addr] = false // true: exist, false: deleted + log.Error("Suicide addr not exist", "txIndex", s.txIndex, "addr", addr) + return false + } + s.parallel.addrStateReadsInSlot[addr] = true // true: exist, false: deleted } s.journal.append(suicideChange{ account: &addr, prev: stateObject.suicided, // todo: must be false? - prevbalance: new(big.Int).Set(stateObject.Balance()), + prevbalance: new(big.Int).Set(s.GetBalance(addr)), }) if s.parallel.isSlotDB { @@ -1656,12 +1768,15 @@ func (s *StateDB) Suicide(addr common.Address) bool { newStateObject.data.Balance = new(big.Int) s.parallel.dirtiedStateObjectsInSlot[addr] = newStateObject s.parallel.addrStateChangesInSlot[addr] = false // false: the address does not exist any more, - s.parallel.nonceChangesInSlot[addr] = struct{}{} + // s.parallel.nonceChangesInSlot[addr] = struct{}{} s.parallel.balanceChangesInSlot[addr] = struct{}{} s.parallel.codeChangesInSlot[addr] = struct{}{} // s.parallel.kvChangesInSlot[addr] = make(StateKeys) // all key changes are discarded return true } + s.parallel.addrStateChangesInSlot[addr] = false // false: the address does not exist any more, + s.parallel.balanceChangesInSlot[addr] = struct{}{} + s.parallel.codeChangesInSlot[addr] = struct{}{} } stateObject.markSuicided() @@ -1797,6 +1912,10 @@ func (s *StateDB) getDeletedStateObject(addr common.Address) *StateObject { } } // Insert into the live set + // if obj, ok := s.loadStateObj(addr); ok { + // fixme: concurrent not safe, merge could update it... + // return obj + //} obj := newObject(s, s.isParallel, addr, *data) s.SetStateObject(obj) return obj @@ -1821,7 +1940,7 @@ func (s *StateDB) GetOrNewStateObject(addr common.Address) *StateObject { if stateObject == nil { stateObject = s.getStateObjectNoSlot(addr) } - if stateObject == nil || stateObject.deleted { + if stateObject == nil || stateObject.deleted || stateObject.suicided { stateObject = s.createObject(addr) exist = false } @@ -1859,11 +1978,15 @@ func (s *StateDB) createObject(addr common.Address) (newobj *StateObject) { var prevdestruct bool if s.snap != nil && prev != nil { - _, prevdestruct = s.snapDestructs[prev.address] + _, prevdestruct = s.snapDestructs[prev.address] // fixme, record the snapshot read for create Account + if s.parallel.isSlotDB { + s.parallel.addrSnapDestructsReadsInSlot[addr] = prevdestruct + } if !prevdestruct { // To destroy the previous trie node first and update the trie tree // with the new object on block commit. s.snapDestructs[prev.address] = struct{}{} + } } newobj = newObject(s, s.isParallel, addr, Account{}) @@ -1875,8 +1998,8 @@ func (s *StateDB) createObject(addr common.Address) (newobj *StateObject) { } if s.parallel.isSlotDB { - s.parallel.dirtiedStateObjectsInSlot[addr] = newobj - s.parallel.addrStateChangesInSlot[addr] = true // the object is created + // s.parallel.dirtiedStateObjectsInSlot[addr] = newobj // would change the bahavior of AddBalance... + s.parallel.addrStateChangesInSlot[addr] = true // the object sis created s.parallel.nonceChangesInSlot[addr] = struct{}{} s.parallel.balanceChangesInSlot[addr] = struct{}{} s.parallel.codeChangesInSlot[addr] = struct{}{} @@ -2179,17 +2302,18 @@ func (s *StateDB) CopyForSlot() *StateDB { stateObjects: s.parallel.stateObjects, unconfirmedDBInShot: make(map[int]*StateDB, 100), - codeReadsInSlot: make(map[common.Address][]byte, 10), // addressStructPool.Get().(map[common.Address]struct{}), - codeHashReadsInSlot: make(map[common.Address]common.Hash), - codeChangesInSlot: make(map[common.Address]struct{}), // addressStructPool.Get().(map[common.Address]struct{}), - kvChangesInSlot: make(map[common.Address]StateKeys), // stateKeysPool.Get().(map[common.Address]StateKeys), - kvReadsInSlot: make(map[common.Address]Storage, 100), // stateKeysPool.Get().(map[common.Address]Storage), - balanceChangesInSlot: make(map[common.Address]struct{}), // addressStructPool.Get().(map[common.Address]struct{}), - balanceReadsInSlot: make(map[common.Address]*big.Int), // addressStructPool.Get().(map[common.Address]struct{}), - addrStateReadsInSlot: make(map[common.Address]bool), // addressStructPool.Get().(map[common.Address]struct{}), - addrStateChangesInSlot: make(map[common.Address]bool), // addressStructPool.Get().(map[common.Address]struct{}), - nonceChangesInSlot: make(map[common.Address]struct{}), // addressStructPool.Get().(map[common.Address]struct{}), - nonceReadsInSlot: make(map[common.Address]uint64), + codeReadsInSlot: make(map[common.Address][]byte, 10), // addressStructPool.Get().(map[common.Address]struct{}), + codeHashReadsInSlot: make(map[common.Address]common.Hash), + codeChangesInSlot: make(map[common.Address]struct{}), // addressStructPool.Get().(map[common.Address]struct{}), + kvChangesInSlot: make(map[common.Address]StateKeys), // stateKeysPool.Get().(map[common.Address]StateKeys), + kvReadsInSlot: make(map[common.Address]Storage, 100), // stateKeysPool.Get().(map[common.Address]Storage), + balanceChangesInSlot: make(map[common.Address]struct{}), // addressStructPool.Get().(map[common.Address]struct{}), + balanceReadsInSlot: make(map[common.Address]*big.Int), // addressStructPool.Get().(map[common.Address]struct{}), + addrStateReadsInSlot: make(map[common.Address]bool), // addressStructPool.Get().(map[common.Address]struct{}), + addrStateChangesInSlot: make(map[common.Address]bool), // addressStructPool.Get().(map[common.Address]struct{}), + nonceChangesInSlot: make(map[common.Address]struct{}), // addressStructPool.Get().(map[common.Address]struct{}), + nonceReadsInSlot: make(map[common.Address]uint64), + addrSnapDestructsReadsInSlot: make(map[common.Address]bool), isSlotDB: true, dirtiedStateObjectsInSlot: make(map[common.Address]*StateObject), // stateObjectsPool.Get().(map[common.Address]*StateObject), From be5e6ebc313fd329c48f9c4b3c3a3d96cf0e45d1 Mon Sep 17 00:00:00 2001 From: setunapo Date: Tue, 26 Apr 2022 13:44:37 +0800 Subject: [PATCH 06/10] 0426: code improve --- core/state/statedb.go | 11 ++--------- 1 file changed, 2 insertions(+), 9 deletions(-) diff --git a/core/state/statedb.go b/core/state/statedb.go index dd1752bcbc..86c951ed7b 100644 --- a/core/state/statedb.go +++ b/core/state/statedb.go @@ -994,9 +994,7 @@ func (s *StateDB) GetBalance(addr common.Address) *big.Int { if obj, ok := s.parallel.dirtiedStateObjectsInSlot[addr]; ok { // on balance fixup, addr may not exist in dirtiedStateObjectsInSlot // we intend to fixup balance based on unconfirmed DB or main DB - balance := obj.Balance() - log.Debug("GetBalance in dirty", "txIndex", s.txIndex, "addr", addr, "balance", balance) - return balance + return obj.Balance() } } // 2.Try to get from uncomfirmed DB or main DB @@ -1029,9 +1027,7 @@ func (s *StateDB) GetNonce(addr common.Address) uint64 { if obj, ok := s.parallel.dirtiedStateObjectsInSlot[addr]; ok { // on nonce fixup, addr may not exist in dirtiedStateObjectsInSlot // we intend to fixup nonce based on unconfirmed DB or main DB - nonce := obj.Nonce() - log.Debug("GetNonce in dirty", "txIndex", s.txIndex, "addr", addr, "nonce", nonce) - return nonce + return obj.Nonce() } } // 2.Try to get from uncomfirmed DB or main DB @@ -1729,9 +1725,6 @@ func (s *StateDB) Suicide(addr common.Address) bool { // 1.Try to get from dirty, it could be suicided inside of contract call stateObject = s.parallel.dirtiedStateObjectsInSlot[addr] if stateObject == nil { - if _, ok := s.parallel.addrStateReadsInSlot[addr]; !ok { - log.Info("Suicide addr not in dirty", "txIndex", s.txIndex, "addr", addr) - } // 2.Try to get from uncomfirmed, if deleted return false, since the address does not exist if obj, ok := s.getStateObjectFromUnconfirmedDB(addr); ok { stateObject = obj From bbf13718b9693a705414f4b660ae706457f0eaa1 Mon Sep 17 00:00:00 2001 From: setunapo Date: Tue, 26 Apr 2022 18:34:56 +0800 Subject: [PATCH 07/10] Empty() and log level --- core/state/state_object.go | 53 ++++++++++++++++++++++++++++++++------ core/state/statedb.go | 22 ++++++++-------- 2 files changed, 56 insertions(+), 19 deletions(-) diff --git a/core/state/state_object.go b/core/state/state_object.go index 6a1154b06d..b442954efe 100644 --- a/core/state/state_object.go +++ b/core/state/state_object.go @@ -182,11 +182,51 @@ type StateObject struct { // empty returns whether the account is considered empty. func (s *StateObject) empty() bool { - return s.data.Nonce == 0 && s.data.Balance.Sign() == 0 && bytes.Equal(s.data.CodeHash, emptyCodeHash) - - // in parallel mode, we should never get raw nonce, balance, codeHash any more, - // since it could be invalid, if the element was read from unconfirmed DB or base DB + // return s.data.Nonce == 0 && s.data.Balance.Sign() == 0 && bytes.Equal(s.data.CodeHash, emptyCodeHash) + + // 0426, leave some notation, empty() works so far + // empty() has 3 use cases: + // 1.StateDB.Empty(), to empty check + // A: It is ok, we have handled it in Empty(), to make sure nonce, balance, codeHash are solid + // 2:AddBalance 0, empty check for touch event + // empty() will add a touch event. + // if we misjudge it, the touch event could be lost, which make address not deleted. // fixme + // 3.Finalise(), to do empty delete + // the address should be dirtied or touched + // if it nonce dirtied, it is ok, since nonce is monotonically increasing, won't be zero + // if balance is dirtied, balance could be zero, we should refer solid nonce & codeHash // fixme + // if codeHash is dirtied, it is ok, since code will not be updated. + // if suicide, it is ok + // if object is new created, it is ok + // if CreateAccout, recreate the address, it is ok. + + // Slot 0 tx 0: AddBalance(100) to addr_1, => addr_1: balance = 100, nonce = 0, code is empty + // Slot 1 tx 1: addr_1 Transfer 99.9979 with GasFee 0.0021, => addr_1: balance = 0, nonce = 1, code is empty + // notice: balance transfer cost 21,000 gas, with gasPrice = 100Gwei, GasFee will be 0.0021 + // Slot 0 tx 2: add balance 0 to addr_1(empty check for touch event), + // the object was lightCopied from tx 0, + + // in parallel mode, we should not check empty by raw nonce, balance, codeHash any more, + // since it could be invalid. + // e.g., AddBalance() to an address, we will do lightCopy to get a new StateObject, we did balance fixup to + // make sure object's Balance is reliable. But we did not fixup nonce or code, we only do nonce or codehash + // fixup on need, that's when we wanna to update the nonce or codehash. + // So nonce, blance + // Before the block is processed, addr_1 account: nonce = 0, emptyCodeHash, balance = 100 + // Slot 0 tx 0: no access to addr_1 + // Slot 1 tx 1: sub balance 100, it is empty and deleted + // Slot 0 tx 2: GetNonce, lightCopy based on main DB(balance = 100) , not empty // return s.db.GetNonce(s.address) == 0 && s.db.GetBalance(s.address).Sign() == 0 && bytes.Equal(s.db.GetCodeHash(s.address).Bytes(), emptyCodeHash) + + if s.db.GetBalance(s.address).Sign() != 0 { // check balance first, since it is most likely not zero + return false + } + if s.db.GetNonce(s.address) != 0 { + return false + } + codeHash := s.db.GetCodeHash(s.address) + return bytes.Equal(codeHash.Bytes(), emptyCodeHash) // code is empty, the object is empty + } // Account is the Ethereum consensus representation of accounts. @@ -288,7 +328,6 @@ func (s *StateObject) GetState(db Database, key common.Hash) common.Hash { if dirty { return value } - // Otherwise return the entry's original value return s.GetCommittedState(db, key) } @@ -658,9 +697,7 @@ func (s *StateObject) MergeSlotObject(db Database, dirtyObjs *StateObject, keys // In parallel mode, always GetState by StateDB, not by StateObject directly, // since it the KV could exist in unconfirmed DB. // But here, it should be ok, since the KV should be changed and valid in the SlotDB, - // we still do GetState by StateDB, it is not an issue. - val := dirtyObjs.db.GetState(s.address, key) - s.SetState(db, key, val) + s.SetState(db, key, dirtyObjs.GetState(db, key)) } } diff --git a/core/state/statedb.go b/core/state/statedb.go index 86c951ed7b..4e852cdbd8 100644 --- a/core/state/statedb.go +++ b/core/state/statedb.go @@ -952,10 +952,10 @@ func (s *StateDB) Empty(addr common.Address) bool { } // so we have to check it manually // empty means: Nonce == 0 && Balance == 0 && CodeHash == emptyCodeHash - if s.GetNonce(addr) != 0 { + if s.GetBalance(addr).Sign() != 0 { // check balance first, since it is most likely not zero return false } - if s.GetBalance(addr).Sign() != 0 { + if s.GetNonce(addr) != 0 { return false } codeHash := s.GetCodeHash(addr) @@ -1085,7 +1085,7 @@ func (s *StateDB) IsParallelReadsValid() bool { for addr, nonceSlot := range slotDB.parallel.nonceReadsInSlot { nonceMain := mainDB.GetNonce(addr) if nonceSlot != nonceMain { - log.Info("IsSlotDBReadsValid nonce read is invalid", "addr", addr, + log.Debug("IsSlotDBReadsValid nonce read is invalid", "addr", addr, "nonceSlot", nonceSlot, "nonceMain", nonceMain, "SlotIndex", slotDB.parallel.SlotIndex, "txIndex", slotDB.txIndex, "baseTxIndex", slotDB.parallel.baseTxIndex) return false @@ -1096,7 +1096,7 @@ func (s *StateDB) IsParallelReadsValid() bool { if addr != s.parallel.systemAddress { // skip balance check for system address balanceMain := mainDB.GetBalance(addr) if balanceSlot.Cmp(balanceMain) != 0 { - log.Info("IsSlotDBReadsValid balance read is invalid", "addr", addr, + log.Debug("IsSlotDBReadsValid balance read is invalid", "addr", addr, "balanceSlot", balanceSlot, "balanceMain", balanceMain, "SlotIndex", slotDB.parallel.SlotIndex, "txIndex", slotDB.txIndex, "baseTxIndex", slotDB.parallel.baseTxIndex) return false @@ -1107,7 +1107,7 @@ func (s *StateDB) IsParallelReadsValid() bool { for addr, codeSlot := range slotDB.parallel.codeReadsInSlot { codeMain := mainDB.GetCode(addr) if !bytes.Equal(codeSlot, codeMain) { - log.Info("IsSlotDBReadsValid code read is invalid", "addr", addr, + log.Debug("IsSlotDBReadsValid code read is invalid", "addr", addr, "len codeSlot", len(codeSlot), "len codeMain", len(codeMain), "SlotIndex", slotDB.parallel.SlotIndex, "txIndex", slotDB.txIndex, "baseTxIndex", slotDB.parallel.baseTxIndex) return false @@ -1117,7 +1117,7 @@ func (s *StateDB) IsParallelReadsValid() bool { for addr, codeHashSlot := range slotDB.parallel.codeHashReadsInSlot { codeHashMain := mainDB.GetCodeHash(addr) if !bytes.Equal(codeHashSlot.Bytes(), codeHashMain.Bytes()) { - log.Info("IsSlotDBReadsValid codehash read is invalid", "addr", addr, + log.Debug("IsSlotDBReadsValid codehash read is invalid", "addr", addr, "codeHashSlot", codeHashSlot, "codeHashMain", codeHashMain, "SlotIndex", slotDB.parallel.SlotIndex, "txIndex", slotDB.txIndex, "baseTxIndex", slotDB.parallel.baseTxIndex) return false @@ -1129,7 +1129,7 @@ func (s *StateDB) IsParallelReadsValid() bool { slotStorage.Range(func(keySlot, valSlot interface{}) bool { valMain := mainDB.GetState(addr, keySlot.(common.Hash)) if !bytes.Equal(valSlot.(common.Hash).Bytes(), valMain.Bytes()) { - log.Info("IsSlotDBReadsValid KV read is invalid", "addr", addr, + log.Debug("IsSlotDBReadsValid KV read is invalid", "addr", addr, "key", keySlot.(common.Hash), "valSlot", valSlot.(common.Hash), "valMain", valMain, "SlotIndex", slotDB.parallel.SlotIndex, "txIndex", slotDB.txIndex, "baseTxIndex", slotDB.parallel.baseTxIndex) @@ -1151,7 +1151,7 @@ func (s *StateDB) IsParallelReadsValid() bool { if stateSlot != stateMain { // skip addr state check for system address if addr != s.parallel.systemAddress { - log.Info("IsSlotDBReadsValid addrState read invalid(true: exist, false: not exist)", + log.Debug("IsSlotDBReadsValid addrState read invalid(true: exist, false: not exist)", "addr", addr, "stateSlot", stateSlot, "stateMain", stateMain, "SlotIndex", slotDB.parallel.SlotIndex, "txIndex", slotDB.txIndex, "baseTxIndex", slotDB.parallel.baseTxIndex) @@ -1164,7 +1164,7 @@ func (s *StateDB) IsParallelReadsValid() bool { for addr, destructRead := range slotDB.parallel.addrSnapDestructsReadsInSlot { mainObj := mainDB.getStateObjectNoSlot(addr) if mainObj == nil { - log.Info("IsSlotDBReadsValid snapshot destructs read invalid, address should exist", + log.Debug("IsSlotDBReadsValid snapshot destructs read invalid, address should exist", "addr", addr, "destruct", destructRead, "SlotIndex", slotDB.parallel.SlotIndex, "txIndex", slotDB.txIndex, "baseTxIndex", slotDB.parallel.baseTxIndex) @@ -1172,7 +1172,7 @@ func (s *StateDB) IsParallelReadsValid() bool { } _, destructMain := mainDB.snapDestructs[addr] // addr not exist if destructRead != destructMain { - log.Info("IsSlotDBReadsValid snapshot destructs read invalid", + log.Debug("IsSlotDBReadsValid snapshot destructs read invalid", "addr", addr, "destructRead", destructRead, "destructMain", destructMain, "SlotIndex", slotDB.parallel.SlotIndex, "txIndex", slotDB.txIndex, "baseTxIndex", slotDB.parallel.baseTxIndex) @@ -1189,7 +1189,7 @@ func (s *StateDB) IsParallelReadsValid() bool { // (systemAddressOpsCount > 3) means the transaction tries to access systemAddress, in // this case, we should redo and keep its balance on NewSlotDB() func (s *StateDB) SystemAddressRedo() bool { - return s.parallel.systemAddressOpsCount > 3 + return s.parallel.systemAddressOpsCount > 4 } // NeedsRedo returns true if there is any clear reason that we need to redo this transaction From 9bcd4c95f7a4d18e6fa109353670dd34cec86737 Mon Sep 17 00:00:00 2001 From: setunapo Date: Thu, 21 Apr 2022 11:54:34 +0800 Subject: [PATCH 08/10] implement ParallelStateDB ** add ParallelStateDB ** remove isSlotDB in StateDB ** state.StateDBer ** remove getStateObjectNoSlot ... --- core/state/interface.go | 82 + core/state/journal.go | 52 +- core/state/state_object.go | 22 +- core/state/statedb.go | 4026 +++++++++++++++++++----------------- core/state_processor.go | 22 +- 5 files changed, 2289 insertions(+), 1915 deletions(-) create mode 100644 core/state/interface.go diff --git a/core/state/interface.go b/core/state/interface.go new file mode 100644 index 0000000000..2362ac828b --- /dev/null +++ b/core/state/interface.go @@ -0,0 +1,82 @@ +// Copyright 2016 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see . + +package state + +import ( + "math/big" + + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core/types" +) + +// StateDBer is copied from vm/interface.go +// It is used by StateObject & Journal right now, to abstract StateDB & ParallelStateDB +type StateDBer interface { + getBaseStateDB() *StateDB + getStateObject(common.Address) *StateObject // only accessible for journal + storeStateObj(common.Address, *StateObject) // only accessible for journal + + CreateAccount(common.Address) + + SubBalance(common.Address, *big.Int) + AddBalance(common.Address, *big.Int) + GetBalance(common.Address) *big.Int + + GetNonce(common.Address) uint64 + SetNonce(common.Address, uint64) + + GetCodeHash(common.Address) common.Hash + GetCode(common.Address) []byte + SetCode(common.Address, []byte) + GetCodeSize(common.Address) int + + AddRefund(uint64) + SubRefund(uint64) + GetRefund() uint64 + + GetCommittedState(common.Address, common.Hash) common.Hash + GetState(common.Address, common.Hash) common.Hash + SetState(common.Address, common.Hash, common.Hash) + + Suicide(common.Address) bool + HasSuicided(common.Address) bool + + // Exist reports whether the given account exists in state. + // Notably this should also return true for suicided accounts. + Exist(common.Address) bool + // Empty returns whether the given account is empty. Empty + // is defined according to EIP161 (balance = nonce = code = 0). + Empty(common.Address) bool + + PrepareAccessList(sender common.Address, dest *common.Address, precompiles []common.Address, txAccesses types.AccessList) + AddressInAccessList(addr common.Address) bool + SlotInAccessList(addr common.Address, slot common.Hash) (addressOk bool, slotOk bool) + // AddAddressToAccessList adds the given address to the access list. This operation is safe to perform + // even if the feature/fork is not active yet + AddAddressToAccessList(addr common.Address) + // AddSlotToAccessList adds the given (address,slot) to the access list. This operation is safe to perform + // even if the feature/fork is not active yet + AddSlotToAccessList(addr common.Address, slot common.Hash) + + RevertToSnapshot(int) + Snapshot() int + + AddLog(*types.Log) + AddPreimage(common.Hash, []byte) + + ForEachStorage(common.Address, func(common.Hash, common.Hash) bool) error +} diff --git a/core/state/journal.go b/core/state/journal.go index 96655d007d..e267205688 100644 --- a/core/state/journal.go +++ b/core/state/journal.go @@ -26,7 +26,7 @@ import ( // reverted on demand. type journalEntry interface { // revert undoes the changes introduced by this journal entry. - revert(*StateDB) + revert(StateDBer) // dirtied returns the Ethereum address modified by this journal entry. dirtied() *common.Address @@ -58,10 +58,10 @@ func (j *journal) append(entry journalEntry) { // revert undoes a batch of journalled modifications along with any reverted // dirty handling too. -func (j *journal) revert(statedb *StateDB, snapshot int) { +func (j *journal) revert(dber StateDBer, snapshot int) { for i := len(j.entries) - 1; i >= snapshot; i-- { // Undo the changes made by the operation - j.entries[i].revert(statedb) + j.entries[i].revert(dber) // Drop any dirty tracking induced by the change if addr := j.entries[i].dirtied(); addr != nil { @@ -141,7 +141,8 @@ type ( } ) -func (ch createObjectChange) revert(s *StateDB) { +func (ch createObjectChange) revert(dber StateDBer) { + s := dber.getBaseStateDB() if s.parallel.isSlotDB { delete(s.parallel.dirtiedStateObjectsInSlot, *ch.account) delete(s.parallel.addrStateChangesInSlot, *ch.account) @@ -159,13 +160,14 @@ func (ch createObjectChange) dirtied() *common.Address { return ch.account } -func (ch resetObjectChange) revert(s *StateDB) { +func (ch resetObjectChange) revert(dber StateDBer) { + s := dber.getBaseStateDB() if s.parallel.isSlotDB { // ch.prev must be from dirtiedStateObjectsInSlot, put it back s.parallel.dirtiedStateObjectsInSlot[ch.prev.address] = ch.prev } else { // ch.prev was got from main DB, put it back to main DB. - s.SetStateObject(ch.prev) + s.storeStateObj(ch.prev.address, ch.prev) } if !ch.prevdestruct && s.snap != nil { delete(s.snapDestructs, ch.prev.address) @@ -176,8 +178,8 @@ func (ch resetObjectChange) dirtied() *common.Address { return nil } -func (ch suicideChange) revert(s *StateDB) { - obj := s.getStateObject(*ch.account) +func (ch suicideChange) revert(dber StateDBer) { + obj := dber.getStateObject(*ch.account) if obj != nil { obj.suicided = ch.prev obj.setBalance(ch.prevbalance) @@ -190,46 +192,47 @@ func (ch suicideChange) dirtied() *common.Address { var ripemd = common.HexToAddress("0000000000000000000000000000000000000003") -func (ch touchChange) revert(s *StateDB) { +func (ch touchChange) revert(dber StateDBer) { } func (ch touchChange) dirtied() *common.Address { return ch.account } -func (ch balanceChange) revert(s *StateDB) { - s.getStateObject(*ch.account).setBalance(ch.prev) +func (ch balanceChange) revert(dber StateDBer) { + dber.getStateObject(*ch.account).setBalance(ch.prev) } func (ch balanceChange) dirtied() *common.Address { return ch.account } -func (ch nonceChange) revert(s *StateDB) { - s.getStateObject(*ch.account).setNonce(ch.prev) +func (ch nonceChange) revert(dber StateDBer) { + dber.getStateObject(*ch.account).setNonce(ch.prev) } func (ch nonceChange) dirtied() *common.Address { return ch.account } -func (ch codeChange) revert(s *StateDB) { - s.getStateObject(*ch.account).setCode(common.BytesToHash(ch.prevhash), ch.prevcode) +func (ch codeChange) revert(dber StateDBer) { + dber.getStateObject(*ch.account).setCode(common.BytesToHash(ch.prevhash), ch.prevcode) } func (ch codeChange) dirtied() *common.Address { return ch.account } -func (ch storageChange) revert(s *StateDB) { - s.getStateObject(*ch.account).setState(ch.key, ch.prevalue) +func (ch storageChange) revert(dber StateDBer) { + dber.getStateObject(*ch.account).setState(ch.key, ch.prevalue) } func (ch storageChange) dirtied() *common.Address { return ch.account } -func (ch refundChange) revert(s *StateDB) { +func (ch refundChange) revert(dber StateDBer) { + s := dber.getBaseStateDB() s.refund = ch.prev } @@ -237,7 +240,9 @@ func (ch refundChange) dirtied() *common.Address { return nil } -func (ch addLogChange) revert(s *StateDB) { +func (ch addLogChange) revert(dber StateDBer) { + s := dber.getBaseStateDB() + logs := s.logs[ch.txhash] if len(logs) == 1 { delete(s.logs, ch.txhash) @@ -251,7 +256,8 @@ func (ch addLogChange) dirtied() *common.Address { return nil } -func (ch addPreimageChange) revert(s *StateDB) { +func (ch addPreimageChange) revert(dber StateDBer) { + s := dber.getBaseStateDB() delete(s.preimages, ch.hash) } @@ -259,7 +265,8 @@ func (ch addPreimageChange) dirtied() *common.Address { return nil } -func (ch accessListAddAccountChange) revert(s *StateDB) { +func (ch accessListAddAccountChange) revert(dber StateDBer) { + s := dber.getBaseStateDB() /* One important invariant here, is that whenever a (addr, slot) is added, if the addr is not already present, the add causes two journal entries: @@ -278,7 +285,8 @@ func (ch accessListAddAccountChange) dirtied() *common.Address { return nil } -func (ch accessListAddSlotChange) revert(s *StateDB) { +func (ch accessListAddSlotChange) revert(dber StateDBer) { + s := dber.getBaseStateDB() if s.accessList != nil { s.accessList.DeleteSlot(*ch.address, *ch.slot) } diff --git a/core/state/state_object.go b/core/state/state_object.go index b442954efe..b516db042a 100644 --- a/core/state/state_object.go +++ b/core/state/state_object.go @@ -150,6 +150,7 @@ type StateObject struct { addrHash common.Hash // hash of ethereum address of the account data Account db *StateDB + dbItf StateDBer // DB error. // State objects are used by the consensus core and VM which are @@ -218,13 +219,13 @@ func (s *StateObject) empty() bool { // Slot 0 tx 2: GetNonce, lightCopy based on main DB(balance = 100) , not empty // return s.db.GetNonce(s.address) == 0 && s.db.GetBalance(s.address).Sign() == 0 && bytes.Equal(s.db.GetCodeHash(s.address).Bytes(), emptyCodeHash) - if s.db.GetBalance(s.address).Sign() != 0 { // check balance first, since it is most likely not zero + if s.dbItf.GetBalance(s.address).Sign() != 0 { // check balance first, since it is most likely not zero return false } - if s.db.GetNonce(s.address) != 0 { + if s.dbItf.GetNonce(s.address) != 0 { return false } - codeHash := s.db.GetCodeHash(s.address) + codeHash := s.dbItf.GetCodeHash(s.address) return bytes.Equal(codeHash.Bytes(), emptyCodeHash) // code is empty, the object is empty } @@ -239,7 +240,8 @@ type Account struct { } // newObject creates a state object. -func newObject(db *StateDB, isParallel bool, address common.Address, data Account) *StateObject { +func newObject(dbItf StateDBer, isParallel bool, address common.Address, data Account) *StateObject { + db := dbItf.getBaseStateDB() if data.Balance == nil { data.Balance = new(big.Int) // todo: why not common.Big0? } @@ -257,6 +259,7 @@ func newObject(db *StateDB, isParallel bool, address common.Address, data Accoun return &StateObject{ db: db, + dbItf: dbItf, address: address, addrHash: crypto.Keccak256Hash(address[:]), data: data, @@ -448,7 +451,7 @@ func (s *StateObject) SetState(db Database, key, value common.Hash) { // this `SetState could be skipped` // d.Finally, the key's value will be `val_2`, while it should be `val_1` // such as: https://bscscan.com/txs?block=2491181 - prev := s.db.GetState(s.address, key) + prev := s.dbItf.GetState(s.address, key) // fixme: if it is for journal, may not necessary, we can remove this change record if prev == value { return } @@ -634,7 +637,6 @@ func (s *StateObject) AddBalance(amount *big.Int) { return } s.SetBalance(new(big.Int).Add(s.Balance(), amount)) - // s.SetBalance(new(big.Int).Add(s.db.GetBalance(s.address), amount)) } // SubBalance removes amount from s's balance. @@ -644,11 +646,9 @@ func (s *StateObject) SubBalance(amount *big.Int) { return } s.SetBalance(new(big.Int).Sub(s.Balance(), amount)) - // s.SetBalance(new(big.Int).Sub(s.db.GetBalance(s.address), amount)) } func (s *StateObject) SetBalance(amount *big.Int) { - // prevBalance := new(big.Int).Set(s.db.GetBalance(s.address)) s.db.journal.append(balanceChange{ account: &s.address, prev: new(big.Int).Set(s.data.Balance), // prevBalance, @@ -664,7 +664,7 @@ func (s *StateObject) setBalance(amount *big.Int) { // Return the gas back to the origin. Used by the Virtual machine or Closures func (s *StateObject) ReturnGas(gas *big.Int) {} -func (s *StateObject) lightCopy(db *StateDB) *StateObject { +func (s *StateObject) lightCopy(db *ParallelStateDB) *StateObject { stateObject := newObject(db, s.isParallel, s.address, s.data) if s.trie != nil { // fixme: no need to copy trie for light copy, since light copied object won't access trie DB @@ -744,7 +744,7 @@ func (s *StateObject) CodeSize(db Database) int { } func (s *StateObject) SetCode(codeHash common.Hash, code []byte) { - prevcode := s.db.GetCode(s.address) + prevcode := s.dbItf.GetCode(s.address) s.db.journal.append(codeChange{ account: &s.address, prevhash: s.CodeHash(), @@ -760,7 +760,7 @@ func (s *StateObject) setCode(codeHash common.Hash, code []byte) { } func (s *StateObject) SetNonce(nonce uint64) { - prevNonce := s.db.GetNonce(s.address) + prevNonce := s.dbItf.GetNonce(s.address) s.db.journal.append(nonceChange{ account: &s.address, prev: prevNonce, diff --git a/core/state/statedb.go b/core/state/statedb.go index 4e852cdbd8..217bdd6a1b 100644 --- a/core/state/statedb.go +++ b/core/state/statedb.go @@ -97,20 +97,9 @@ func (s *StateDB) storeStateObj(addr common.Address, stateObject *StateObject) { if s.isParallel { // When a state object is stored into s.parallel.stateObjects, // it belongs to base StateDB, it is confirmed and valid. - if s.parallel.isSlotDB { - // the object could be create in SlotDB, if it got the object from DB and - // update it to the shared `s.parallel.stateObjects`` - stateObject.db = s.parallel.baseStateDB - stateObject.db.storeParallelLock.Lock() - if _, ok := s.parallel.stateObjects.Load(addr); !ok { - s.parallel.stateObjects.Store(addr, stateObject) - } - stateObject.db.storeParallelLock.Unlock() - } else { - stateObject.db.storeParallelLock.Lock() - s.parallel.stateObjects.Store(addr, stateObject) - stateObject.db.storeParallelLock.Unlock() - } + stateObject.db.storeParallelLock.Lock() + s.parallel.stateObjects.Store(addr, stateObject) + stateObject.db.storeParallelLock.Unlock() } else { s.stateObjects[addr] = stateObject } @@ -127,7 +116,7 @@ func (s *StateDB) deleteStateObj(addr common.Address) { // For parallel mode only type ParallelState struct { - isSlotDB bool // isSlotDB denotes StateDB is used in slot + isSlotDB bool // denotes StateDB is used in slot, we will try to remove it SlotIndex int // fixme: to be removed // stateObjects holds the state objects in the base slot db // the reason for using stateObjects instead of stateObjects on the outside is @@ -139,7 +128,7 @@ type ParallelState struct { baseStateDB *StateDB // for parallel mode, there will be a base StateDB in dispatcher routine. baseTxIndex int // slotDB is created base on this tx index. dirtiedStateObjectsInSlot map[common.Address]*StateObject - unconfirmedDBInShot map[int]*StateDB // do unconfirmed reference in same slot. + unconfirmedDBInShot map[int]*ParallelStateDB // do unconfirmed reference in same slot. // we will record the read detail for conflict check and // the changed addr or key for object merge, the changed detail can be acheived from the dirty object @@ -262,40 +251,6 @@ func New(root common.Hash, db Database, snaps *snapshot.Tree) (*StateDB, error) return newStateDB(root, db, snaps) } -// NewSlotDB creates a new State DB based on the provided StateDB. -// With parallel, each execution slot would have its own StateDB. -func NewSlotDB(db *StateDB, systemAddr common.Address, txIndex int, baseTxIndex int, keepSystem bool, - unconfirmedDBs *sync.Map /*map[int]*StateDB*/) *StateDB { - slotDB := db.CopyForSlot() - slotDB.txIndex = txIndex - slotDB.originalRoot = db.originalRoot - slotDB.parallel.baseStateDB = db - slotDB.parallel.baseTxIndex = baseTxIndex - slotDB.parallel.systemAddress = systemAddr - slotDB.parallel.systemAddressOpsCount = 0 - slotDB.parallel.keepSystemAddressBalance = keepSystem - slotDB.storagePool = NewStoragePool() - slotDB.EnableWriteOnSharedStorage() - for index := baseTxIndex + 1; index < slotDB.txIndex; index++ { // txIndex - unconfirmedDB, ok := unconfirmedDBs.Load(index) - if ok { - slotDB.parallel.unconfirmedDBInShot[index] = unconfirmedDB.(*StateDB) - } - } - - // All transactions will pay gas fee to the systemAddr at the end, this address is - // deemed to conflict, we handle it specially, clear it now and set it back to the main - // StateDB later; - // But there are transactions that will try to read systemAddr's balance, such as: - // https://bscscan.com/tx/0xcd69755be1d2f55af259441ff5ee2f312830b8539899e82488a21e85bc121a2a. - // It will trigger transaction redo and keepSystem will be marked as true. - if !keepSystem { - slotDB.SetBalance(systemAddr, big.NewInt(0)) - } - - return slotDB -} - // NewWithSharedPool creates a new state with sharedStorge on layer 1.5 func NewWithSharedPool(root common.Hash, db Database, snaps *snapshot.Tree) (*StateDB, error) { statedb, err := newStateDB(root, db, snaps) @@ -342,200 +297,17 @@ func newStateDB(root common.Hash, db Database, snaps *snapshot.Tree) (*StateDB, return sdb, nil } -func (s *StateDB) getStateObjectFromStateObjects(addr common.Address) (*StateObject, bool) { - return s.loadStateObj(addr) -} - -// RevertSlotDB keep the Read list for conflict detect, -// discard all state changes except: -// - nonce and balance of from address -// - balance of system address: will be used on merge to update SystemAddress's balance -func (s *StateDB) RevertSlotDB(from common.Address) { - s.parallel.kvChangesInSlot = make(map[common.Address]StateKeys) - - // balance := s.parallel.balanceChangesInSlot[from] - s.parallel.nonceChangesInSlot = make(map[common.Address]struct{}) - s.parallel.balanceChangesInSlot = make(map[common.Address]struct{}, 1) - s.parallel.addrStateChangesInSlot = make(map[common.Address]bool) // 0: created, 1: deleted - - selfStateObject := s.parallel.dirtiedStateObjectsInSlot[from] - systemAddress := s.parallel.systemAddress - systemStateObject := s.parallel.dirtiedStateObjectsInSlot[systemAddress] - s.parallel.dirtiedStateObjectsInSlot = make(map[common.Address]*StateObject, 2) - // keep these elements - s.parallel.dirtiedStateObjectsInSlot[from] = selfStateObject - s.parallel.dirtiedStateObjectsInSlot[systemAddress] = systemStateObject - s.parallel.balanceChangesInSlot[from] = struct{}{} - s.parallel.balanceChangesInSlot[systemAddress] = struct{}{} - s.parallel.nonceChangesInSlot[from] = struct{}{} -} - -// PrepareForParallel prepares for state db to be used in parallel execution mode. -func (s *StateDB) PrepareForParallel() { - s.isParallel = true - s.parallel.stateObjects = &StateObjectSyncMap{} +func (s *StateDB) getBaseStateDB() *StateDB { + return s } -// MergeSlotDB is for Parallel execution mode, when the transaction has been -// finalized(dirty -> pending) on execution slot, the execution results should be -// merged back to the main StateDB. -// And it will return and keep the slot's change list for later conflict detect. -func (s *StateDB) MergeSlotDB(slotDb *StateDB, slotReceipt *types.Receipt, txIndex int) { - // receipt.Logs use unified log index within a block - // align slotDB's log index to the block stateDB's logSize - for _, l := range slotReceipt.Logs { - l.Index += s.logSize - } - s.logSize += slotDb.logSize - - // before merge, pay the gas fee first: AddBalance to consensus.SystemAddress - systemAddress := slotDb.parallel.systemAddress - if slotDb.parallel.keepSystemAddressBalance { - s.SetBalance(systemAddress, slotDb.GetBalance(systemAddress)) - } else { - s.AddBalance(systemAddress, slotDb.GetBalance(systemAddress)) - } - - // only merge dirty objects - addressesToPrefetch := make([][]byte, 0, len(slotDb.stateObjectsDirty)) - for addr := range slotDb.stateObjectsDirty { - if _, exist := s.stateObjectsDirty[addr]; !exist { - s.stateObjectsDirty[addr] = struct{}{} - } - // system address is EOA account, it should have no storage change - if addr == systemAddress { - continue - } - - // stateObjects: KV, balance, nonce... - dirtyObj, ok := slotDb.parallel.dirtiedStateObjectsInSlot[addr] - if !ok { - log.Error("parallel merge, but dirty object not exist!", "SlotIndex", slotDb.parallel.SlotIndex, "txIndex:", slotDb.txIndex, "addr", addr) - continue - } - mainObj, exist := s.loadStateObj(addr) - if !exist { // fixme: it is also state change - // addr not exist on main DB, do ownership transfer - // dirtyObj.db = s - // dirtyObj.finalise(true) // true: prefetch on dispatcher - mainObj = dirtyObj.deepCopy(s) - mainObj.finalise(true) - s.storeStateObj(addr, mainObj) - // fixme: should not delete, would cause unconfirmed DB incorrect? - // delete(slotDb.parallel.dirtiedStateObjectsInSlot, addr) // transfer ownership, fixme: shared read? - if dirtyObj.deleted { - // remove the addr from snapAccounts&snapStorage only when object is deleted. - // "deleted" is not equal to "snapDestructs", since createObject() will add an addr for - // snapDestructs to destroy previous object, while it will keep the addr in snapAccounts & snapAccounts - delete(s.snapAccounts, addr) - delete(s.snapStorage, addr) - } - } else { - // addr already in main DB, do merge: balance, KV, code, State(create, suicide) - // can not do copy or ownership transfer directly, since dirtyObj could have outdated - // data(may be updated within the conflict window) - - var newMainObj = mainObj // we don't need to copy the object since the storages are thread safe - if _, ok := slotDb.parallel.addrStateChangesInSlot[addr]; ok { - // there are 3 kinds of state change: - // 1.Suicide - // 2.Empty Delete - // 3.createObject - // a.AddBalance,SetState to an unexist or deleted(suicide, empty delete) address. - // b.CreateAccount: like DAO the fork, regenerate a account carry its balance without KV - // For these state change, do ownership transafer for efficiency: - // dirtyObj.db = s - // newMainObj = dirtyObj - newMainObj = dirtyObj.deepCopy(s) - // should not delete, would cause unconfirmed DB incorrect. - // delete(slotDb.parallel.dirtiedStateObjectsInSlot, addr) // transfer ownership, fixme: shared read? - if dirtyObj.deleted { - // remove the addr from snapAccounts&snapStorage only when object is deleted. - // "deleted" is not equal to "snapDestructs", since createObject() will add an addr for - // snapDestructs to destroy previous object, while it will keep the addr in snapAccounts & snapAccounts - delete(s.snapAccounts, addr) - delete(s.snapStorage, addr) - } - } else { - // deepCopy a temporary *StateObject for safety, since slot could read the address, - // dispatch should avoid overwrite the StateObject directly otherwise, it could - // crash for: concurrent map iteration and map write - - if _, balanced := slotDb.parallel.balanceChangesInSlot[addr]; balanced { - newMainObj.SetBalance(dirtyObj.Balance()) - } - if _, coded := slotDb.parallel.codeChangesInSlot[addr]; coded { - newMainObj.code = dirtyObj.code - newMainObj.data.CodeHash = dirtyObj.data.CodeHash - newMainObj.dirtyCode = true - } - if keys, stated := slotDb.parallel.kvChangesInSlot[addr]; stated { - newMainObj.MergeSlotObject(s.db, dirtyObj, keys) - } - if _, nonced := slotDb.parallel.nonceChangesInSlot[addr]; nonced { - // dirtyObj.Nonce() should not be less than newMainObj - newMainObj.setNonce(dirtyObj.Nonce()) - } - } - newMainObj.finalise(true) // true: prefetch on dispatcher - // update the object - s.storeStateObj(addr, newMainObj) - } - addressesToPrefetch = append(addressesToPrefetch, common.CopyBytes(addr[:])) // Copy needed for closure - } - - if s.prefetcher != nil && len(addressesToPrefetch) > 0 { - s.prefetcher.prefetch(s.originalRoot, addressesToPrefetch, emptyAddr) // prefetch for trie node of account - } - - for addr := range slotDb.stateObjectsPending { - if _, exist := s.stateObjectsPending[addr]; !exist { - s.stateObjectsPending[addr] = struct{}{} - } - } - - // slotDb.logs: logs will be kept in receipts, no need to do merge - - for hash, preimage := range slotDb.preimages { - s.preimages[hash] = preimage - } - if s.accessList != nil { - // fixme: accessList is not enabled yet, but it should use merge rather than overwrite Copy - s.accessList = slotDb.accessList.Copy() - } - - if slotDb.snaps != nil { - for k := range slotDb.snapDestructs { - // There could be a race condition for parallel transaction execution - // One transaction add balance 0 to an empty address, will delete it(delete empty is enabled). - // While another concurrent transaction could add a none-zero balance to it, make it not empty - // We fixed it by add a addr state read record for add balance 0 - s.snapParallelLock.Lock() - s.snapDestructs[k] = struct{}{} - s.snapParallelLock.Unlock() - } - - // slotDb.snapAccounts should be empty, comment out and to be deleted later - // for k, v := range slotDb.snapAccounts { - // s.snapAccounts[k] = v - // } - // slotDb.snapStorage should be empty, comment out and to be deleted later - // for k, v := range slotDb.snapStorage { - // temp := make(map[string][]byte) - // for kk, vv := range v { - // temp[kk] = vv - // } - // s.snapStorage[k] = temp - // } - } +func (s *StateDB) getStateObjectFromStateObjects(addr common.Address) (*StateObject, bool) { + return s.loadStateObj(addr) } func (s *StateDB) EnableWriteOnSharedStorage() { s.writeOnSharedStorage = true } -func (s *StateDB) SetSlotIndex(index int) { - s.parallel.SlotIndex = index -} // StartPrefetcher initializes a new trie prefetcher to pull in nodes from the // state trie concurrently while the state is mutated so that when we reach the @@ -674,1462 +446,806 @@ func (s *StateDB) AddRefund(gas uint64) { func (s *StateDB) SubRefund(gas uint64) { s.journal.append(refundChange{prev: s.refund}) if gas > s.refund { - if s.isParallel { - // we don't need to panic here if we read the wrong state, we just need to redo this transaction - log.Info(fmt.Sprintf("Refund counter below zero (gas: %d > refund: %d)", gas, s.refund), "tx", s.thash.String()) - s.parallel.needsRedo = true - return - } panic(fmt.Sprintf("Refund counter below zero (gas: %d > refund: %d)", gas, s.refund)) } s.refund -= gas } -// For Parallel Execution Mode, it can be seen as Penetrated Access: -// ------------------------------------------------------- -// | BaseTxIndex | Unconfirmed Txs... | Current TxIndex | -// ------------------------------------------------------- -// Access from the unconfirmed DB with range&priority: txIndex -1(previous tx) -> baseTxIndex + 1 -func (s *StateDB) getBalanceFromUnconfirmedDB(addr common.Address) *big.Int { - if addr == s.parallel.systemAddress { - // never get systemaddress from unconfirmed DB - return nil +// Exist reports whether the given account address exists in the state. +// Notably this also returns true for suicided accounts. +func (s *StateDB) Exist(addr common.Address) bool { + log.Debug("StateDB Exist", "SlotxIndex", s.parallel.SlotIndex, "txIndex", s.TxIndex()) + // 3.Try to get from main StateDB + exist := s.getStateObject(addr) != nil + return exist +} + +// Empty returns whether the state object is either non-existent +// or empty according to the EIP161 specification (balance = nonce = code = 0) +func (s *StateDB) Empty(addr common.Address) bool { + log.Debug("StateDB Empty", "SlotxIndex", s.parallel.SlotIndex, "txIndex", s.TxIndex()) + so := s.getStateObject(addr) + empty := (so == nil || so.empty()) + return empty +} + +// GetBalance retrieves the balance from the given address or 0 if object not found +// GetFrom the dirty list => from unconfirmed DB => get from main stateDB +func (s *StateDB) GetBalance(addr common.Address) *big.Int { + log.Debug("StateDB GetBalance", "SlotxIndex", s.parallel.SlotIndex, "txIndex", s.TxIndex()) + if s.parallel.SlotIndex != -1 { + log.Debug("StateDB GetBalance in slot", "SlotxIndex", s.parallel.SlotIndex, "txIndex", s.TxIndex()) + } + balance := common.Big0 + stateObject := s.getStateObject(addr) + if stateObject != nil { + balance = stateObject.Balance() } + return balance +} - for i := s.txIndex - 1; i > s.parallel.baseTxIndex; i-- { - if db, ok := s.parallel.unconfirmedDBInShot[i]; ok { - // 1.Refer the state of address, exist or not in dirtiedStateObjectsInSlot - if obj, exist := db.parallel.dirtiedStateObjectsInSlot[addr]; exist { - balanceHit := false - if _, exist := db.parallel.addrStateChangesInSlot[addr]; exist { - balanceHit = true - } - if _, exist := db.parallel.balanceChangesInSlot[addr]; exist { // only changed balance is reliable - balanceHit = true - } - if !balanceHit { - continue - } - balance := obj.Balance() - if obj.deleted { - balance = common.Big0 - } - return balance - } - } +func (s *StateDB) GetNonce(addr common.Address) uint64 { + log.Debug("StateDB GetNonce", "SlotxIndex", s.parallel.SlotIndex, "txIndex", s.TxIndex()) + + var nonce uint64 = 0 + stateObject := s.getStateObject(addr) + if stateObject != nil { + nonce = stateObject.Nonce() } - return nil + + return nonce } -// Similar to getBalanceFromUnconfirmedDB -func (s *StateDB) getNonceFromUnconfirmedDB(addr common.Address) (uint64, bool) { - if addr == s.parallel.systemAddress { - // never get systemaddress from unconfirmed DB - return 0, false - } +// TxIndex returns the current transaction index set by Prepare. +func (s *StateDB) TxIndex() int { + return s.txIndex +} - for i := s.txIndex - 1; i > s.parallel.baseTxIndex; i-- { - if unconfirmedDb, ok := s.parallel.unconfirmedDBInShot[i]; ok { - nonceHit := false - if _, ok := unconfirmedDb.parallel.addrStateChangesInSlot[addr]; ok { - nonceHit = true - } else if _, ok := unconfirmedDb.parallel.nonceChangesInSlot[addr]; ok { - nonceHit = true - } - if !nonceHit { - // nonce refer not hit, try next unconfirmedDb - continue - } - // nonce hit, return the nonce - obj := unconfirmedDb.parallel.dirtiedStateObjectsInSlot[addr] - if obj == nil { - // could not exist, if it is changed but reverted - // fixme: revert should remove the change record - log.Debug("Get nonce from UnconfirmedDB, changed but object not exist, ", - "txIndex", s.txIndex, "referred txIndex", i, "addr", addr) - continue - } - nonce := obj.Nonce() - // deleted object with nonce == 0 - if obj.deleted { - nonce = 0 - } - return nonce, true - } - } - return 0, false +// BlockHash returns the current block hash set by Prepare. +func (s *StateDB) BlockHash() common.Hash { + return s.bhash } -// Similar to getBalanceFromUnconfirmedDB -// It is not only for code, but also codeHash and codeSize, we return the *StateObject for convienence. -func (s *StateDB) getCodeFromUnconfirmedDB(addr common.Address) ([]byte, bool) { - if addr == s.parallel.systemAddress { - // never get systemaddress from unconfirmed DB - return nil, false - } +// BaseTxIndex returns the tx index that slot db based. +func (s *StateDB) BaseTxIndex() int { + return s.parallel.baseTxIndex +} - for i := s.txIndex - 1; i > s.parallel.baseTxIndex; i-- { - if db, ok := s.parallel.unconfirmedDBInShot[i]; ok { - codeHit := false - if _, exist := db.parallel.addrStateChangesInSlot[addr]; exist { - codeHit = true - } - if _, exist := db.parallel.codeChangesInSlot[addr]; exist { - codeHit = true - } - if !codeHit { - // try next unconfirmedDb - continue - } - obj := db.parallel.dirtiedStateObjectsInSlot[addr] - if obj == nil { - // could not exist, if it is changed but reverted - // fixme: revert should remove the change record - log.Debug("Get code from UnconfirmedDB, changed but object not exist, ", - "txIndex", s.txIndex, "referred txIndex", i, "addr", addr) - continue - } - code := obj.Code(s.db) - if obj.deleted { - code = nil - } - return code, true - } +func (s *StateDB) GetCode(addr common.Address) []byte { + log.Debug("StateDB GetCode", "SlotxIndex", s.parallel.SlotIndex, "txIndex", s.TxIndex()) + + stateObject := s.getStateObject(addr) + var code []byte + if stateObject != nil { + code = stateObject.Code(s.db) } - return nil, false + return code } -// Similar to getCodeFromUnconfirmedDB -// but differ when address is deleted or not exist -func (s *StateDB) getCodeHashFromUnconfirmedDB(addr common.Address) (common.Hash, bool) { - if addr == s.parallel.systemAddress { - // never get systemaddress from unconfirmed DB - return common.Hash{}, false +func (s *StateDB) GetCodeSize(addr common.Address) int { + log.Debug("StateDB GetCodeSize", "SlotxIndex", s.parallel.SlotIndex, "txIndex", s.TxIndex()) + + var codeSize int = 0 + stateObject := s.getStateObject(addr) + if stateObject != nil { + codeSize = stateObject.CodeSize(s.db) } + return codeSize +} - for i := s.txIndex - 1; i > s.parallel.baseTxIndex; i-- { - if db, ok := s.parallel.unconfirmedDBInShot[i]; ok { - hashHit := false - if _, exist := db.parallel.addrStateChangesInSlot[addr]; exist { - hashHit = true - } - if _, exist := db.parallel.codeChangesInSlot[addr]; exist { - hashHit = true - } - if !hashHit { - // try next unconfirmedDb - continue - } +// return value of GetCodeHash: +// - common.Hash{}: the address does not exist +// - emptyCodeHash: the address exist, but code is empty +// - others: the address exist, and code is not empty +func (s *StateDB) GetCodeHash(addr common.Address) common.Hash { + log.Debug("StateDB GetCodeHash", "SlotxIndex", s.parallel.SlotIndex, "txIndex", s.TxIndex()) - obj := db.parallel.dirtiedStateObjectsInSlot[addr] - if obj == nil { - // could not exist, if it is changed but reverted - // fixme: revert should remove the change record - log.Debug("Get codeHash from UnconfirmedDB, changed but object not exist, ", - "txIndex", s.txIndex, "referred txIndex", i, "addr", addr) - continue - } - codeHash := common.Hash{} - if !obj.deleted { - codeHash = common.BytesToHash(obj.CodeHash()) - } - return codeHash, true - } + stateObject := s.getStateObject(addr) + codeHash := common.Hash{} + if stateObject != nil { + codeHash = common.BytesToHash(stateObject.CodeHash()) } - return common.Hash{}, false + return codeHash } -// Similar to getCodeFromUnconfirmedDB -// It is for address state check of: Exist(), Empty() and HasSuicided() -// Since the unconfirmed DB should have done Finalise() with `deleteEmptyObjects = true` -// If the dirty address is empty or suicided, it will be marked as deleted, so we only need to return `deleted` or not. -func (s *StateDB) getAddrStateFromUnconfirmedDB(addr common.Address) (bool, bool) { - if addr == s.parallel.systemAddress { - // never get systemaddress from unconfirmed DB - return false, false +// GetState retrieves a value from the given account's storage trie. +func (s *StateDB) GetState(addr common.Address, hash common.Hash) common.Hash { + log.Debug("StateDB GetState", "SlotxIndex", s.parallel.SlotIndex, "txIndex", s.TxIndex()) + stateObject := s.getStateObject(addr) + val := common.Hash{} + if stateObject != nil { + val = stateObject.GetState(s.db, hash) } + return val +} - // check the unconfirmed DB with range: baseTxIndex -> txIndex -1(previous tx) - for i := s.txIndex - 1; i > s.parallel.baseTxIndex; i-- { - if db, ok := s.parallel.unconfirmedDBInShot[i]; ok { - if exist, ok := db.parallel.addrStateChangesInSlot[addr]; ok { - if _, ok := db.parallel.dirtiedStateObjectsInSlot[addr]; !ok { - // could not exist, if it is changed but reverted - // fixme: revert should remove the change record - log.Debug("Get addr State from UnconfirmedDB, changed but object not exist, ", - "txIndex", s.txIndex, "referred txIndex", i, "addr", addr) - continue - } +// GetProof returns the Merkle proof for a given account. +func (s *StateDB) GetProof(addr common.Address) ([][]byte, error) { + return s.GetProofByHash(crypto.Keccak256Hash(addr.Bytes())) +} - return exist, true - } - } +// GetProofByHash returns the Merkle proof for a given account. +func (s *StateDB) GetProofByHash(addrHash common.Hash) ([][]byte, error) { + var proof proofList + if _, err := s.Trie(); err != nil { + return nil, err } - return false, false + err := s.trie.Prove(addrHash[:], 0, &proof) + return proof, err } -func (s *StateDB) getKVFromUnconfirmedDB(addr common.Address, key common.Hash) (common.Hash, bool) { - // check the unconfirmed DB with range: baseTxIndex -> txIndex -1(previous tx) - for i := s.txIndex - 1; i > s.parallel.baseTxIndex; i-- { - if db, ok := s.parallel.unconfirmedDBInShot[i]; ok { - if obj, ok := db.parallel.dirtiedStateObjectsInSlot[addr]; ok { // if deleted on merge, can get from main StateDB, ok but fixme: concurrent safe - if obj.deleted { - return common.Hash{}, true - } - if _, ok := db.parallel.kvChangesInSlot[addr]; ok { - if val, exist := obj.dirtyStorage.GetValue(key); exist { - return val, true - } - if val, exist := obj.pendingStorage.GetValue(key); exist { // fixme: can be removed - log.Error("Get KV from Unconfirmed StateDB, in pending", - "my txIndex", s.txIndex, "DB's txIndex", i, "addr", addr, - "key", key, "val", val) - return val, true - } - } - } - } +// GetStorageProof returns the Merkle proof for given storage slot. +func (s *StateDB) GetStorageProof(a common.Address, key common.Hash) ([][]byte, error) { + var proof proofList + trie := s.StorageTrie(a) + if trie == nil { + return proof, errors.New("storage trie for requested address does not exist") } - return common.Hash{}, false + err := trie.Prove(crypto.Keccak256(key.Bytes()), 0, &proof) + return proof, err } -func (s *StateDB) getStateObjectFromUnconfirmedDB(addr common.Address) (*StateObject, bool) { - // check the unconfirmed DB with range: baseTxIndex -> txIndex -1(previous tx) - for i := s.txIndex - 1; i > s.parallel.baseTxIndex; i-- { - if db, ok := s.parallel.unconfirmedDBInShot[i]; ok { - if obj, ok := db.parallel.dirtiedStateObjectsInSlot[addr]; ok { // if deleted on merge, can get from main StateDB, ok but fixme: concurrent safe - return obj, true - } - } +// GetStorageProofByHash returns the Merkle proof for given storage slot. +func (s *StateDB) GetStorageProofByHash(a common.Address, key common.Hash) ([][]byte, error) { + var proof proofList + trie := s.StorageTrie(a) + if trie == nil { + return proof, errors.New("storage trie for requested address does not exist") } - return nil, false + err := trie.Prove(crypto.Keccak256(key.Bytes()), 0, &proof) + return proof, err } -// Exist reports whether the given account address exists in the state. -// Notably this also returns true for suicided accounts. -func (s *StateDB) Exist(addr common.Address) bool { - if s.parallel.isSlotDB { - // 1.Try to get from dirty - if obj, ok := s.parallel.dirtiedStateObjectsInSlot[addr]; ok { - // dirty object should not be deleted, since deleted is only flagged on finalise - // and if it is suicided in contract call, suicide is taken as exist until it is finalised - // todo: add a check here, to be removed later - if obj.deleted || obj.suicided { - log.Error("Exist in dirty, but marked as deleted or suicided", - "txIndex", s.txIndex, "baseTxIndex:", s.parallel.baseTxIndex) - } - return true - } - // 2.Try to get from uncomfirmed & main DB - // 2.1 Already read before - if exist, ok := s.parallel.addrStateReadsInSlot[addr]; ok { - return exist - } - // 2.2 Try to get from unconfirmed DB if exist - if exist, ok := s.getAddrStateFromUnconfirmedDB(addr); ok { - s.parallel.addrStateReadsInSlot[addr] = exist // update and cache - return exist - } - } - // 3.Try to get from main StateDB - exist := s.getStateObjectNoSlot(addr) != nil - if s.parallel.isSlotDB { - s.parallel.addrStateReadsInSlot[addr] = exist // update and cache +// GetCommittedState retrieves a value from the given account's committed storage trie. +func (s *StateDB) GetCommittedState(addr common.Address, hash common.Hash) common.Hash { + log.Debug("StateDB GetCommittedState", "SlotxIndex", s.parallel.SlotIndex, "txIndex", s.TxIndex()) + stateObject := s.getStateObject(addr) + val := common.Hash{} + if stateObject != nil { + val = stateObject.GetCommittedState(s.db, hash) } - return exist + return val } -// Empty returns whether the state object is either non-existent -// or empty according to the EIP161 specification (balance = nonce = code = 0) -func (s *StateDB) Empty(addr common.Address) bool { - if s.parallel.isSlotDB { - // 1.Try to get from dirty - if obj, ok := s.parallel.dirtiedStateObjectsInSlot[addr]; ok { - // dirty object is light copied and fixup on need, - // empty could be wrong, except it is created with this TX - if _, ok := s.parallel.addrStateChangesInSlot[addr]; ok { - return obj.empty() - } - // so we have to check it manually - // empty means: Nonce == 0 && Balance == 0 && CodeHash == emptyCodeHash - if s.GetBalance(addr).Sign() != 0 { // check balance first, since it is most likely not zero - return false - } - if s.GetNonce(addr) != 0 { - return false - } - codeHash := s.GetCodeHash(addr) - return bytes.Equal(codeHash.Bytes(), emptyCodeHash) // code is empty, the object is empty - } - // 2.Try to get from uncomfirmed & main DB - // 2.1 Already read before - if exist, ok := s.parallel.addrStateReadsInSlot[addr]; ok { - // exist means not empty - return !exist - } - // 2.2 Try to get from unconfirmed DB if exist - if exist, ok := s.getAddrStateFromUnconfirmedDB(addr); ok { - s.parallel.addrStateReadsInSlot[addr] = exist // update and cache - return !exist - } - } +// Database retrieves the low level database supporting the lower level trie ops. +func (s *StateDB) Database() Database { + return s.db +} - so := s.getStateObjectNoSlot(addr) - empty := (so == nil || so.empty()) - if s.parallel.isSlotDB { - s.parallel.addrStateReadsInSlot[addr] = !empty // update and cache +// StorageTrie returns the storage trie of an account. +// The return value is a copy and is nil for non-existent accounts. +func (s *StateDB) StorageTrie(addr common.Address) Trie { + stateObject := s.getStateObject(addr) + if stateObject == nil { + return nil } - return empty + cpy := stateObject.deepCopy(s) + cpy.updateTrie(s.db) + return cpy.getTrie(s.db) } -// GetBalance retrieves the balance from the given address or 0 if object not found -// GetFrom the dirty list => from unconfirmed DB => get from main stateDB -func (s *StateDB) GetBalance(addr common.Address) *big.Int { - if s.parallel.isSlotDB { - if addr == s.parallel.systemAddress { - s.parallel.systemAddressOpsCount++ - } - // 1.Try to get from dirty - if _, ok := s.parallel.balanceChangesInSlot[addr]; ok { - if obj, ok := s.parallel.dirtiedStateObjectsInSlot[addr]; ok { - // on balance fixup, addr may not exist in dirtiedStateObjectsInSlot - // we intend to fixup balance based on unconfirmed DB or main DB - return obj.Balance() - } - } - // 2.Try to get from uncomfirmed DB or main DB - // 2.1 Already read before - if balance, ok := s.parallel.balanceReadsInSlot[addr]; ok { - return balance - } - // 2.2 Try to get from unconfirmed DB if exist - if balance := s.getBalanceFromUnconfirmedDB(addr); balance != nil { - s.parallel.balanceReadsInSlot[addr] = balance - return balance - } - } - // 3. Try to get from main StateObejct - balance := common.Big0 - stateObject := s.getStateObjectNoSlot(addr) +func (s *StateDB) HasSuicided(addr common.Address) bool { + stateObject := s.getStateObject(addr) if stateObject != nil { - balance = stateObject.Balance() - } - if s.parallel.isSlotDB { - s.parallel.balanceReadsInSlot[addr] = balance + return stateObject.suicided } - return balance + return false } -func (s *StateDB) GetNonce(addr common.Address) uint64 { - if s.parallel.isSlotDB { - // 1.Try to get from dirty - if _, ok := s.parallel.nonceChangesInSlot[addr]; ok { - if obj, ok := s.parallel.dirtiedStateObjectsInSlot[addr]; ok { - // on nonce fixup, addr may not exist in dirtiedStateObjectsInSlot - // we intend to fixup nonce based on unconfirmed DB or main DB - return obj.Nonce() - } - } - // 2.Try to get from uncomfirmed DB or main DB - // 2.1 Already read before - if nonce, ok := s.parallel.nonceReadsInSlot[addr]; ok { - return nonce - } - // 2.2 Try to get from unconfirmed DB if exist - if nonce, ok := s.getNonceFromUnconfirmedDB(addr); ok { - s.parallel.nonceReadsInSlot[addr] = nonce - return nonce - } - } - // 3.Try to get from main StateDB - var nonce uint64 = 0 - stateObject := s.getStateObjectNoSlot(addr) +/* + * SETTERS + */ + +// AddBalance adds amount to the account associated with addr. +func (s *StateDB) AddBalance(addr common.Address, amount *big.Int) { + log.Debug("StateDB AddBalance", "SlotxIndex", s.parallel.SlotIndex, "txIndex", s.TxIndex()) + + stateObject := s.GetOrNewStateObject(addr) if stateObject != nil { - nonce = stateObject.Nonce() - } - if s.parallel.isSlotDB { - s.parallel.nonceReadsInSlot[addr] = nonce + stateObject.AddBalance(amount) } - - return nonce } -// TxIndex returns the current transaction index set by Prepare. -func (s *StateDB) TxIndex() int { - return s.txIndex -} +// SubBalance subtracts amount from the account associated with addr. +func (s *StateDB) SubBalance(addr common.Address, amount *big.Int) { + log.Debug("StateDB SubBalance", "SlotxIndex", s.parallel.SlotIndex, "txIndex", s.TxIndex()) -// BlockHash returns the current block hash set by Prepare. -func (s *StateDB) BlockHash() common.Hash { - return s.bhash + stateObject := s.GetOrNewStateObject(addr) + if stateObject != nil { + stateObject.SubBalance(amount) + } } -// BaseTxIndex returns the tx index that slot db based. -func (s *StateDB) BaseTxIndex() int { - return s.parallel.baseTxIndex -} +func (s *StateDB) SetBalance(addr common.Address, amount *big.Int) { + log.Debug("StateDB SetBalance", "SlotxIndex", s.parallel.SlotIndex, "txIndex", s.TxIndex()) -func (s *StateDB) IsParallelReadsValid() bool { - slotDB := s - if !slotDB.parallel.isSlotDB { - log.Error("IsSlotDBReadsValid slotDB should be slot DB", "SlotIndex", slotDB.parallel.SlotIndex, "txIndex", slotDB.txIndex) - return false + stateObject := s.GetOrNewStateObject(addr) + if stateObject != nil { + stateObject.SetBalance(amount) } +} - mainDB := slotDB.parallel.baseStateDB - if mainDB.parallel.isSlotDB { - log.Error("IsSlotDBReadsValid s should be main DB", "SlotIndex", slotDB.parallel.SlotIndex, "txIndex", slotDB.txIndex) - return false - } - // for nonce - for addr, nonceSlot := range slotDB.parallel.nonceReadsInSlot { - nonceMain := mainDB.GetNonce(addr) - if nonceSlot != nonceMain { - log.Debug("IsSlotDBReadsValid nonce read is invalid", "addr", addr, - "nonceSlot", nonceSlot, "nonceMain", nonceMain, "SlotIndex", slotDB.parallel.SlotIndex, - "txIndex", slotDB.txIndex, "baseTxIndex", slotDB.parallel.baseTxIndex) - return false - } - } - // balance - for addr, balanceSlot := range slotDB.parallel.balanceReadsInSlot { - if addr != s.parallel.systemAddress { // skip balance check for system address - balanceMain := mainDB.GetBalance(addr) - if balanceSlot.Cmp(balanceMain) != 0 { - log.Debug("IsSlotDBReadsValid balance read is invalid", "addr", addr, - "balanceSlot", balanceSlot, "balanceMain", balanceMain, "SlotIndex", slotDB.parallel.SlotIndex, - "txIndex", slotDB.txIndex, "baseTxIndex", slotDB.parallel.baseTxIndex) - return false - } - } - } - // check code - for addr, codeSlot := range slotDB.parallel.codeReadsInSlot { - codeMain := mainDB.GetCode(addr) - if !bytes.Equal(codeSlot, codeMain) { - log.Debug("IsSlotDBReadsValid code read is invalid", "addr", addr, - "len codeSlot", len(codeSlot), "len codeMain", len(codeMain), "SlotIndex", slotDB.parallel.SlotIndex, - "txIndex", slotDB.txIndex, "baseTxIndex", slotDB.parallel.baseTxIndex) - return false - } - } - // check codeHash - for addr, codeHashSlot := range slotDB.parallel.codeHashReadsInSlot { - codeHashMain := mainDB.GetCodeHash(addr) - if !bytes.Equal(codeHashSlot.Bytes(), codeHashMain.Bytes()) { - log.Debug("IsSlotDBReadsValid codehash read is invalid", "addr", addr, - "codeHashSlot", codeHashSlot, "codeHashMain", codeHashMain, "SlotIndex", slotDB.parallel.SlotIndex, - "txIndex", slotDB.txIndex, "baseTxIndex", slotDB.parallel.baseTxIndex) - return false - } - } - // check KV - for addr, slotStorage := range slotDB.parallel.kvReadsInSlot { - conflict := false - slotStorage.Range(func(keySlot, valSlot interface{}) bool { - valMain := mainDB.GetState(addr, keySlot.(common.Hash)) - if !bytes.Equal(valSlot.(common.Hash).Bytes(), valMain.Bytes()) { - log.Debug("IsSlotDBReadsValid KV read is invalid", "addr", addr, - "key", keySlot.(common.Hash), "valSlot", valSlot.(common.Hash), - "valMain", valMain, "SlotIndex", slotDB.parallel.SlotIndex, - "txIndex", slotDB.txIndex, "baseTxIndex", slotDB.parallel.baseTxIndex) - conflict = true - return false // return false, Range will be terminated. - } - return true // return true, Range will try next KV - }) - if conflict { - return false - } - } - // addr state check - for addr, stateSlot := range slotDB.parallel.addrStateReadsInSlot { - stateMain := false // addr not exist - if mainDB.getStateObjectNoSlot(addr) != nil { - stateMain = true // addr exist in main DB - } - if stateSlot != stateMain { - // skip addr state check for system address - if addr != s.parallel.systemAddress { - log.Debug("IsSlotDBReadsValid addrState read invalid(true: exist, false: not exist)", - "addr", addr, "stateSlot", stateSlot, "stateMain", stateMain, - "SlotIndex", slotDB.parallel.SlotIndex, - "txIndex", slotDB.txIndex, "baseTxIndex", slotDB.parallel.baseTxIndex) - return false - } - } - } - // snapshot destructs check +func (s *StateDB) SetNonce(addr common.Address, nonce uint64) { + log.Debug("StateDB SetNonce", "SlotxIndex", s.parallel.SlotIndex, "txIndex", s.TxIndex()) - for addr, destructRead := range slotDB.parallel.addrSnapDestructsReadsInSlot { - mainObj := mainDB.getStateObjectNoSlot(addr) - if mainObj == nil { - log.Debug("IsSlotDBReadsValid snapshot destructs read invalid, address should exist", - "addr", addr, "destruct", destructRead, - "SlotIndex", slotDB.parallel.SlotIndex, - "txIndex", slotDB.txIndex, "baseTxIndex", slotDB.parallel.baseTxIndex) - return false - } - _, destructMain := mainDB.snapDestructs[addr] // addr not exist - if destructRead != destructMain { - log.Debug("IsSlotDBReadsValid snapshot destructs read invalid", - "addr", addr, "destructRead", destructRead, "destructMain", destructMain, - "SlotIndex", slotDB.parallel.SlotIndex, - "txIndex", slotDB.txIndex, "baseTxIndex", slotDB.parallel.baseTxIndex) - return false - } + stateObject := s.GetOrNewStateObject(addr) + if stateObject != nil { + stateObject.SetNonce(nonce) } - - return true } -// For most of the transactions, systemAddressOpsCount should be 3: -// one for SetBalance(0) on NewSlotDB() -// the other is for AddBalance(GasFee) at the end. -// (systemAddressOpsCount > 3) means the transaction tries to access systemAddress, in -// this case, we should redo and keep its balance on NewSlotDB() -func (s *StateDB) SystemAddressRedo() bool { - return s.parallel.systemAddressOpsCount > 4 -} +func (s *StateDB) SetCode(addr common.Address, code []byte) { + log.Debug("StateDB SetCode", "SlotxIndex", s.parallel.SlotIndex, "txIndex", s.TxIndex()) -// NeedsRedo returns true if there is any clear reason that we need to redo this transaction -func (s *StateDB) NeedsRedo() bool { - return s.parallel.needsRedo + stateObject := s.GetOrNewStateObject(addr) + if stateObject != nil { + codeHash := crypto.Keccak256Hash(code) + stateObject.SetCode(codeHash, code) + } } -func (s *StateDB) GetCode(addr common.Address) []byte { - if s.parallel.isSlotDB { - // 1.Try to get from dirty - if _, ok := s.parallel.codeChangesInSlot[addr]; ok { - if obj, ok := s.parallel.dirtiedStateObjectsInSlot[addr]; ok { - // on code fixup, addr may not exist in dirtiedStateObjectsInSlot - // we intend to fixup code based on unconfirmed DB or main DB - code := obj.Code(s.db) - return code - } - } - // 2.Try to get from uncomfirmed DB or main DB - // 2.1 Already read before - if code, ok := s.parallel.codeReadsInSlot[addr]; ok { - return code - } - // 2.2 Try to get from unconfirmed DB if exist - if code, ok := s.getCodeFromUnconfirmedDB(addr); ok { - s.parallel.codeReadsInSlot[addr] = code - return code - } - } +func (s *StateDB) SetState(addr common.Address, key, value common.Hash) { + log.Debug("StateDB SetState", "SlotxIndex", s.parallel.SlotIndex, "txIndex", s.TxIndex()) - // 3. Try to get from main StateObejct - stateObject := s.getStateObjectNoSlot(addr) - var code []byte + stateObject := s.GetOrNewStateObject(addr) if stateObject != nil { - code = stateObject.Code(s.db) - } - if s.parallel.isSlotDB { - s.parallel.codeReadsInSlot[addr] = code + stateObject.SetState(s.db, key, value) } - return code } -func (s *StateDB) GetCodeSize(addr common.Address) int { - if s.parallel.isSlotDB { - // 1.Try to get from dirty - if _, ok := s.parallel.codeChangesInSlot[addr]; ok { - if obj, ok := s.parallel.dirtiedStateObjectsInSlot[addr]; ok { - // on code fixup, addr may not exist in dirtiedStateObjectsInSlot - // we intend to fixup code based on unconfirmed DB or main DB - return obj.CodeSize(s.db) - } - } - // 2.Try to get from uncomfirmed DB or main DB - // 2.1 Already read before - if code, ok := s.parallel.codeReadsInSlot[addr]; ok { - return len(code) // len(nil) is 0 too - } - // 2.2 Try to get from unconfirmed DB if exist - if code, ok := s.getCodeFromUnconfirmedDB(addr); ok { - s.parallel.codeReadsInSlot[addr] = code - return len(code) // len(nil) is 0 too - } - } - - // 3. Try to get from main StateObejct - var codeSize int = 0 - var code []byte - stateObject := s.getStateObjectNoSlot(addr) - +// SetStorage replaces the entire storage for the specified account with given +// storage. This function should only be used for debugging. +func (s *StateDB) SetStorage(addr common.Address, storage map[common.Hash]common.Hash) { + stateObject := s.GetOrNewStateObject(addr) // fixme: parallel mode? if stateObject != nil { - code = stateObject.Code(s.db) - codeSize = stateObject.CodeSize(s.db) - } - if s.parallel.isSlotDB { - s.parallel.codeReadsInSlot[addr] = code + stateObject.SetStorage(storage) } - return codeSize } -// return value of GetCodeHash: -// - common.Hash{}: the address does not exist -// - emptyCodeHash: the address exist, but code is empty -// - others: the address exist, and code is not empty -func (s *StateDB) GetCodeHash(addr common.Address) common.Hash { - if s.parallel.isSlotDB { - // 1.Try to get from dirty - if _, ok := s.parallel.codeChangesInSlot[addr]; ok { - if obj, ok := s.parallel.dirtiedStateObjectsInSlot[addr]; ok { - // on code fixup, addr may not exist in dirtiedStateObjectsInSlot - // we intend to fixup balance based on unconfirmed DB or main DB - return common.BytesToHash(obj.CodeHash()) - } - } - // 2.Try to get from uncomfirmed DB or main DB - // 2.1 Already read before - if codeHash, ok := s.parallel.codeHashReadsInSlot[addr]; ok { - return codeHash - } - // 2.2 Try to get from unconfirmed DB if exist - if codeHash, ok := s.getCodeHashFromUnconfirmedDB(addr); ok { - s.parallel.codeHashReadsInSlot[addr] = codeHash - return codeHash +// Suicide marks the given account as suicided. +// This clears the account balance. +// +// The account's state object is still available until the state is committed, +// getStateObject will return a non-nil account after Suicide. +func (s *StateDB) Suicide(addr common.Address) bool { + log.Debug("StateDB Suicide", "SlotxIndex", s.parallel.SlotIndex, "txIndex", s.TxIndex()) + + var stateObject *StateObject + if stateObject == nil { + // 3.Try to get from main StateDB + stateObject = s.getStateObject(addr) + if stateObject == nil { + s.parallel.addrStateReadsInSlot[addr] = false // true: exist, false: deleted + log.Error("Suicide addr not exist", "txIndex", s.txIndex, "addr", addr) + return false } + s.parallel.addrStateReadsInSlot[addr] = true // true: exist, false: deleted } - // 3. Try to get from main StateObejct - stateObject := s.getStateObjectNoSlot(addr) - codeHash := common.Hash{} - if stateObject != nil { - codeHash = common.BytesToHash(stateObject.CodeHash()) - } - if s.parallel.isSlotDB { - s.parallel.codeHashReadsInSlot[addr] = codeHash - } - return codeHash + s.journal.append(suicideChange{ + account: &addr, + prev: stateObject.suicided, // todo: must be false? + prevbalance: new(big.Int).Set(s.GetBalance(addr)), + }) + + stateObject.markSuicided() + stateObject.data.Balance = new(big.Int) + return true } -// GetState retrieves a value from the given account's storage trie. -// For parallel mode wih, get from the state in order: -// -> self dirty, both Slot & MainProcessor -// -> pending of self: Slot on merge -// -> pending of unconfirmed DB -// -> pending of main StateDB -// -> origin -func (s *StateDB) GetState(addr common.Address, hash common.Hash) common.Hash { - if s.parallel.isSlotDB { +// +// Setting, updating & deleting state object methods. +// - // 1.Try to get from dirty - if exist, ok := s.parallel.addrStateChangesInSlot[addr]; ok { - if !exist { - return common.Hash{} - } - obj := s.parallel.dirtiedStateObjectsInSlot[addr] // addr must exist in dirtiedStateObjectsInSlot - return obj.GetState(s.db, hash) - } - if keys, ok := s.parallel.kvChangesInSlot[addr]; ok { - if _, ok := keys[hash]; ok { - obj := s.parallel.dirtiedStateObjectsInSlot[addr] // addr must exist in dirtiedStateObjectsInSlot - return obj.GetState(s.db, hash) - } - } - // 2.Try to get from uncomfirmed DB or main DB - // 2.1 Already read before - if storage, ok := s.parallel.kvReadsInSlot[addr]; ok { - if val, ok := storage.GetValue(hash); ok { - return val - } - } - // 2.2 Try to get from unconfirmed DB if exist - if val, ok := s.getKVFromUnconfirmedDB(addr, hash); ok { - if s.parallel.kvReadsInSlot[addr] == nil { - s.parallel.kvReadsInSlot[addr] = newStorage(false) - } - s.parallel.kvReadsInSlot[addr].StoreValue(hash, val) // update cache - return val - } - } +// updateStateObject writes the given object to the trie. +func (s *StateDB) updateStateObject(obj *StateObject) { + log.Debug("StateDB updateStateObject", "SlotxIndex", s.parallel.SlotIndex, "txIndex", s.TxIndex()) - // 3.Get from main StateDB - stateObject := s.getStateObjectNoSlot(addr) - val := common.Hash{} - if stateObject != nil { - val = stateObject.GetState(s.db, hash) + // Track the amount of time wasted on updating the account from the trie + if metrics.EnabledExpensive { + defer func(start time.Time) { s.AccountUpdates += time.Since(start) }(time.Now()) } - if s.parallel.isSlotDB { - if s.parallel.kvReadsInSlot[addr] == nil { - s.parallel.kvReadsInSlot[addr] = newStorage(false) + // Encode the account and update the account trie + addr := obj.Address() + data := obj.encodeData + var err error + if data == nil { + data, err = rlp.EncodeToBytes(obj) + if err != nil { + panic(fmt.Errorf("can't encode object at %x: %v", addr[:], err)) } - s.parallel.kvReadsInSlot[addr].StoreValue(hash, val) // update cache } - return val -} - -// GetProof returns the Merkle proof for a given account. -func (s *StateDB) GetProof(addr common.Address) ([][]byte, error) { - return s.GetProofByHash(crypto.Keccak256Hash(addr.Bytes())) -} - -// GetProofByHash returns the Merkle proof for a given account. -func (s *StateDB) GetProofByHash(addrHash common.Hash) ([][]byte, error) { - var proof proofList - if _, err := s.Trie(); err != nil { - return nil, err + if err = s.trie.TryUpdate(addr[:], data); err != nil { + s.setError(fmt.Errorf("updateStateObject (%x) error: %v", addr[:], err)) } - err := s.trie.Prove(addrHash[:], 0, &proof) - return proof, err } -// GetStorageProof returns the Merkle proof for given storage slot. -func (s *StateDB) GetStorageProof(a common.Address, key common.Hash) ([][]byte, error) { - var proof proofList - trie := s.StorageTrie(a) - if trie == nil { - return proof, errors.New("storage trie for requested address does not exist") +// deleteStateObject removes the given object from the state trie. +func (s *StateDB) deleteStateObject(obj *StateObject) { + // Track the amount of time wasted on deleting the account from the trie + if metrics.EnabledExpensive { + defer func(start time.Time) { s.AccountUpdates += time.Since(start) }(time.Now()) + } + // Delete the account from the trie + addr := obj.Address() + if err := s.trie.TryDelete(addr[:]); err != nil { + s.setError(fmt.Errorf("deleteStateObject (%x) error: %v", addr[:], err)) } - err := trie.Prove(crypto.Keccak256(key.Bytes()), 0, &proof) - return proof, err } -// GetStorageProofByHash returns the Merkle proof for given storage slot. -func (s *StateDB) GetStorageProofByHash(a common.Address, key common.Hash) ([][]byte, error) { - var proof proofList - trie := s.StorageTrie(a) - if trie == nil { - return proof, errors.New("storage trie for requested address does not exist") +// getStateObject retrieves a state object given by the address, returning nil if +// the object is not found or was deleted in this execution context. If you need +// to differentiate between non-existent/just-deleted, use getDeletedStateObject. +func (s *StateDB) getStateObject(addr common.Address) *StateObject { + if obj := s.getDeletedStateObject(addr); obj != nil && !obj.deleted { + return obj } - err := trie.Prove(crypto.Keccak256(key.Bytes()), 0, &proof) - return proof, err + return nil } -// GetCommittedState retrieves a value from the given account's committed storage trie. -func (s *StateDB) GetCommittedState(addr common.Address, hash common.Hash) common.Hash { - if s.parallel.isSlotDB { - // 1.No need to get from pending of itself even on merge, since stateobject in SlotDB won't do finalise - // 2.Try to get from uncomfirmed DB or main DB - // KVs in unconfirmed DB can be seen as pending storage - // KVs in main DB are merged from SlotDB and has done finalise() on merge, can be seen as pending storage too. - // 2.1 Already read before - if storage, ok := s.parallel.kvReadsInSlot[addr]; ok { - if val, ok := storage.GetValue(hash); ok { - return val - } - } - // 2.2 Try to get from unconfirmed DB if exist - if val, ok := s.getKVFromUnconfirmedDB(addr, hash); ok { - if s.parallel.kvReadsInSlot[addr] == nil { - s.parallel.kvReadsInSlot[addr] = newStorage(false) - } - s.parallel.kvReadsInSlot[addr].StoreValue(hash, val) // update cache - return val +func (s *StateDB) getStateObjectFromSnapshotOrTrie(addr common.Address) (data *Account, ok bool) { + var err error + // If no live objects are available, attempt to use snapshots + if s.snap != nil { + if metrics.EnabledExpensive { + defer func(start time.Time) { s.SnapshotAccountReads += time.Since(start) }(time.Now()) + } + var acc *snapshot.Account + if acc, err = s.snap.Account(crypto.HashData(s.hasher, addr.Bytes())); err == nil { + if acc == nil { + return nil, false + } + data = &Account{ + Nonce: acc.Nonce, + Balance: acc.Balance, + CodeHash: acc.CodeHash, + Root: common.BytesToHash(acc.Root), + } + if len(data.CodeHash) == 0 { + data.CodeHash = emptyCodeHash + } + if data.Root == (common.Hash{}) { + data.Root = emptyRoot + } } } - // 3. Try to get from main DB - stateObject := s.getStateObjectNoSlot(addr) - val := common.Hash{} - if stateObject != nil { - val = stateObject.GetCommittedState(s.db, hash) - } - if s.parallel.isSlotDB { - if s.parallel.kvReadsInSlot[addr] == nil { - s.parallel.kvReadsInSlot[addr] = newStorage(false) + // If snapshot unavailable or reading from it failed, load from the database + if s.snap == nil || err != nil { + if s.trie == nil { + tr, err := s.db.OpenTrie(s.originalRoot) + if err != nil { + s.setError(fmt.Errorf("failed to open trie tree")) + return nil, false + } + s.trie = tr + } + if metrics.EnabledExpensive { + defer func(start time.Time) { s.AccountReads += time.Since(start) }(time.Now()) + } + enc, err := s.trie.TryGet(addr.Bytes()) + if err != nil { + s.setError(fmt.Errorf("getDeleteStateObject (%x) error: %v", addr.Bytes(), err)) + return nil, false + } + if len(enc) == 0 { + return nil, false + } + data = new(Account) + if err := rlp.DecodeBytes(enc, data); err != nil { + log.Error("Failed to decode state object", "addr", addr, "err", err) + return nil, false } - s.parallel.kvReadsInSlot[addr].StoreValue(hash, val) // update cache } - return val -} - -// Database retrieves the low level database supporting the lower level trie ops. -func (s *StateDB) Database() Database { - return s.db + return data, true } -// StorageTrie returns the storage trie of an account. -// The return value is a copy and is nil for non-existent accounts. -func (s *StateDB) StorageTrie(addr common.Address) Trie { - stateObject := s.getStateObject(addr) - if stateObject == nil { +// getDeletedStateObject is similar to getStateObject, but instead of returning +// nil for a deleted state object, it returns the actual object with the deleted +// flag set. This is needed by the state journal to revert to the correct s- +// destructed object instead of wiping all knowledge about the state object. +func (s *StateDB) getDeletedStateObject(addr common.Address) *StateObject { + // Prefer live objects if any is available + if obj, _ := s.getStateObjectFromStateObjects(addr); obj != nil { + return obj + } + data, ok := s.getStateObjectFromSnapshotOrTrie(addr) + if !ok { return nil } - cpy := stateObject.deepCopy(s) - cpy.updateTrie(s.db) - return cpy.getTrie(s.db) + // Insert into the live set + // if obj, ok := s.loadStateObj(addr); ok { + // fixme: concurrent not safe, merge could update it... + // return obj + //} + obj := newObject(s, s.isParallel, addr, *data) + s.storeStateObj(addr, obj) + return obj } -func (s *StateDB) HasSuicided(addr common.Address) bool { - if s.parallel.isSlotDB { - // 1.Try to get from dirty - if obj, ok := s.parallel.dirtiedStateObjectsInSlot[addr]; ok { - return obj.suicided - } - // 2.Try to get from uncomfirmed - if exist, ok := s.getAddrStateFromUnconfirmedDB(addr); ok { - return !exist - } +// func (s *StateDB) SetStateObject(object *StateObject) { +// s.storeStateObj(object.Address(), object) +// } + +// GetOrNewStateObject retrieves a state object or create a new state object if nil. +// dirtyInSlot -> Unconfirmed DB -> main DB -> snapshot, no? create one +func (s *StateDB) GetOrNewStateObject(addr common.Address) *StateObject { + var stateObject *StateObject = nil + if stateObject == nil { + stateObject = s.getStateObject(addr) } - stateObject := s.getStateObjectNoSlot(addr) - if stateObject != nil { - return stateObject.suicided + if stateObject == nil || stateObject.deleted || stateObject.suicided { + stateObject = s.createObject(addr) } - return false + return stateObject } -/* - * SETTERS - */ +// createObject creates a new state object. If there is an existing account with +// the given address, it is overwritten and returned as the second return value. -// the source mainObj should be got from the main StateDB -// we have to update its nonce, balance, code if they have updated in the unconfirmed DBs -/* -func (s *StateDB) unconfirmedLightCopy(mainObj *StateObject) *StateObject { - newObj := mainObj.lightCopy(s) // copied nonce, balance, code from base DB +// prev is used for CreateAccount to get its balance +// Parallel mode: +// if prev in dirty: revert is ok +// if prev in unconfirmed DB: addr state read record, revert should not put it back +// if prev in main DB: addr state read record, revert should not put it back +// if pre no exist: addr state read record, - // do balance fixup only when it exist in unconfirmed DB - if nonce, ok := s.getNonceFromUnconfirmedDB(mainObj.address); ok { - // code got from unconfirmed DB - newObj.setNonce(nonce) - } +// `prev` is used to handle revert, to recover with the `prev` object +// In Parallel mode, we only need to recover to `prev` in SlotDB, +// a.if it is not in SlotDB, `revert` will remove it from the SlotDB +// b.if it is exist in SlotDB, `revert` will recover to the `prev` in SlotDB +// c.as `snapDestructs` it is the same +func (s *StateDB) createObject(addr common.Address) (newobj *StateObject) { + prev := s.getDeletedStateObject(addr) // Note, prev might have been deleted, we need that! + var prevdestruct bool - // do balance fixup - if balance := s.getBalanceFromUnconfirmedDB(mainObj.address); balance != nil { - // balance got from unconfirmed DB - newObj.setBalance(balance) + if s.snap != nil && prev != nil { + _, prevdestruct = s.snapDestructs[prev.address] + if !prevdestruct { + // To destroy the previous trie node first and update the trie tree + // with the new object on block commit. + s.snapDestructs[prev.address] = struct{}{} + } } - // do code fixup - if codeObj, ok := s.getCodeFromUnconfirmedDB(mainObj.address); ok { - newObj.setCode(crypto.Keccak256Hash(codeObj), codeObj) // fixme: to confirm if we should use "codeObj.Code(db)" - newObj.dirtyCode = false // copy does not make the code dirty, + newobj = newObject(s, s.isParallel, addr, Account{}) + newobj.setNonce(0) // sets the object to dirty + if prev == nil { + s.journal.append(createObjectChange{account: &addr}) + } else { + s.journal.append(resetObjectChange{prev: prev, prevdestruct: prevdestruct}) } - return newObj + + s.storeStateObj(addr, newobj) + return newobj } -*/ -// AddBalance adds amount to the account associated with addr. -func (s *StateDB) AddBalance(addr common.Address, amount *big.Int) { - // if s.parallel.isSlotDB { - // add balance will perform a read operation first - // s.parallel.balanceReadsInSlot[addr] = struct{}{} // fixme: to make the the balance valid, since unconfirmed would refer it. - // if amount.Sign() == 0 { - // if amount == 0, no balance change, but there is still an empty check. - // take this empty check as addr state read(create, suicide, empty delete) - // s.parallel.addrStateReadsInSlot[addr] = struct{}{} - // } - // } +// CreateAccount explicitly creates a state object. If a state object with the address +// already exists the balance is carried over to the new account. +// +// CreateAccount is called during the EVM CREATE operation. The situation might arise that +// a contract does the following: +// +// 1. sends funds to sha(account ++ (nonce + 1)) +// 2. tx_create(sha(account ++ nonce)) (note that this gets the address of 1) +// +// Carrying over the balance ensures that Ether doesn't disappear. +func (s *StateDB) CreateAccount(addr common.Address) { + // no matter it is got from dirty, unconfirmed or main DB + // if addr not exist, preBalance will be common.Big0, it is same as new(big.Int) which + // is the value newObject(), + preBalance := s.GetBalance(addr) + newObj := s.createObject(addr) + newObj.setBalance(new(big.Int).Set(preBalance)) // new big.Int for newObj +} - stateObject := s.GetOrNewStateObject(addr) - if stateObject != nil { - if s.parallel.isSlotDB { - if addr == s.parallel.systemAddress { - s.parallel.systemAddressOpsCount++ - } - // if amount.Sign() != 0 { // todo: to reenable it - if _, ok := s.parallel.dirtiedStateObjectsInSlot[addr]; !ok { - newStateObject := stateObject.lightCopy(s) // light copy from main DB - // do balance fixup from the confirmed DB, it could be more reliable than main DB - balance := s.GetBalance(addr) - newStateObject.setBalance(balance) - // s.parallel.balanceReadsInSlot[addr] = newStateObject.Balance() // could read from main DB or unconfirmed DB - newStateObject.AddBalance(amount) - s.parallel.dirtiedStateObjectsInSlot[addr] = newStateObject - s.parallel.balanceChangesInSlot[addr] = struct{}{} - return - } - // already dirty, make sure the balance if fixed up - // if stateObject.Balance() - if addr != s.parallel.systemAddress { - if stateObject.Balance().Cmp(s.GetBalance(addr)) != 0 { - log.Warn("AddBalance in dirty, but balance is incorrect", "txIndex", s.txIndex, "addr", addr, - "stateObject.Balance()", stateObject.Balance(), "s.GetBalance(addr)", s.GetBalance(addr)) - stateObject.setBalance(s.GetBalance(addr)) - } +func (s *StateDB) ForEachStorage(addr common.Address, cb func(key, value common.Hash) bool) error { + so := s.getStateObject(addr) + if so == nil { + return nil + } + it := trie.NewIterator(so.getTrie(s.db).NodeIterator(nil)) + + for it.Next() { + key := common.BytesToHash(s.trie.GetKey(it.Key)) + if value, dirty := so.dirtyStorage.GetValue(key); dirty { + if !cb(key, value) { + return nil } + continue } - stateObject.AddBalance(amount) - if s.parallel.isSlotDB { - s.parallel.balanceChangesInSlot[addr] = struct{}{} - } - } -} -// SubBalance subtracts amount from the account associated with addr. -func (s *StateDB) SubBalance(addr common.Address, amount *big.Int) { - // if s.parallel.isSlotDB { - // if amount.Sign() != 0 { - // unlike add, sub 0 balance will not touch empty object - // s.parallel.balanceReadsInSlot[addr] = struct{}{} - // } - // } - stateObject := s.GetOrNewStateObject(addr) - if stateObject != nil { - if s.parallel.isSlotDB { - if addr == s.parallel.systemAddress { - s.parallel.systemAddressOpsCount++ - } - - // if amount.Sign() != 0 { // todo: to reenable it - if _, ok := s.parallel.dirtiedStateObjectsInSlot[addr]; !ok { - newStateObject := stateObject.lightCopy(s) // light copy from main DB - // do balance fixup from the confirmed DB, it could be more reliable than main DB - balance := s.GetBalance(addr) - newStateObject.setBalance(balance) - // s.parallel.balanceReadsInSlot[addr] = newStateObject.Balance() - newStateObject.SubBalance(amount) - s.parallel.balanceChangesInSlot[addr] = struct{}{} - s.parallel.dirtiedStateObjectsInSlot[addr] = newStateObject - return + if len(it.Value) > 0 { + _, content, _, err := rlp.Split(it.Value) + if err != nil { + return err } - // already dirty, make sure the balance if fixed - // if stateObject.Balance() - if addr != s.parallel.systemAddress { - if stateObject.Balance().Cmp(s.GetBalance(addr)) != 0 { - log.Warn("SubBalance in dirty, but balance is incorrect", "txIndex", s.txIndex, "addr", addr, - "stateObject.Balance()", stateObject.Balance(), "s.GetBalance(addr)", s.GetBalance(addr)) - stateObject.setBalance(s.GetBalance(addr)) - } + if !cb(key, common.BytesToHash(content)) { + return nil } } - stateObject.SubBalance(amount) - if s.parallel.isSlotDB { - s.parallel.balanceChangesInSlot[addr] = struct{}{} - } - } + return nil } -func (s *StateDB) SetBalance(addr common.Address, amount *big.Int) { - stateObject := s.GetOrNewStateObject(addr) - if stateObject != nil { - if s.parallel.isSlotDB { - if addr == s.parallel.systemAddress { - s.parallel.systemAddressOpsCount++ - } - if _, ok := s.parallel.dirtiedStateObjectsInSlot[addr]; !ok { - newStateObject := stateObject.lightCopy(s) - // update balance for revert, in case child contract is revertted, - // it should revert to the previous balance - balance := s.GetBalance(addr) - newStateObject.setBalance(balance) - newStateObject.SetBalance(amount) - s.parallel.balanceChangesInSlot[addr] = struct{}{} - s.parallel.dirtiedStateObjectsInSlot[addr] = newStateObject - return - } +// Copy creates a deep, independent copy of the state. +// Snapshots of the copied state cannot be applied to the copy. +func (s *StateDB) Copy() *StateDB { + // Copy all the basic fields, initialize the memory ones + state := &StateDB{ + db: s.db, + trie: s.db.CopyTrie(s.trie), + stateObjects: make(map[common.Address]*StateObject, len(s.journal.dirties)), + stateObjectsPending: make(map[common.Address]struct{}, len(s.stateObjectsPending)), + stateObjectsDirty: make(map[common.Address]struct{}, len(s.journal.dirties)), + storagePool: s.storagePool, + refund: s.refund, + logs: make(map[common.Hash][]*types.Log, len(s.logs)), + logSize: s.logSize, + preimages: make(map[common.Hash][]byte, len(s.preimages)), + journal: newJournal(), + hasher: crypto.NewKeccakState(), + parallel: ParallelState{}, + } + // Copy the dirty states, logs, and preimages + for addr := range s.journal.dirties { + // As documented [here](https://github.com/ethereum/go-ethereum/pull/16485#issuecomment-380438527), + // and in the Finalise-method, there is a case where an object is in the journal but not + // in the stateObjects: OOG after touch on ripeMD prior to Byzantium. Thus, we need to check for + // nil + if object, exist := s.getStateObjectFromStateObjects(addr); exist { + // Even though the original object is dirty, we are not copying the journal, + // so we need to make sure that anyside effect the journal would have caused + // during a commit (or similar op) is already applied to the copy. + state.storeStateObj(addr, object.deepCopy(state)) + state.stateObjectsDirty[addr] = struct{}{} // Mark the copy dirty to force internal (code/state) commits + state.stateObjectsPending[addr] = struct{}{} // Mark the copy pending to force external (account) commits } - stateObject.SetBalance(amount) - if s.parallel.isSlotDB { - s.parallel.balanceChangesInSlot[addr] = struct{}{} + } + // Above, we don't copy the actual journal. This means that if the copy is copied, the + // loop above will be a no-op, since the copy's journal is empty. + // Thus, here we iterate over stateObjects, to enable copies of copies + for addr := range s.stateObjectsPending { + if _, exist := state.getStateObjectFromStateObjects(addr); !exist { + object, _ := s.getStateObjectFromStateObjects(addr) + state.storeStateObj(addr, object.deepCopy(state)) } + state.stateObjectsPending[addr] = struct{}{} } -} - -func (s *StateDB) SetNonce(addr common.Address, nonce uint64) { - stateObject := s.GetOrNewStateObject(addr) - if stateObject != nil { - if s.parallel.isSlotDB { - if _, ok := s.parallel.dirtiedStateObjectsInSlot[addr]; !ok { - newStateObject := stateObject.lightCopy(s) - noncePre := s.GetNonce(addr) - newStateObject.setNonce(noncePre) // nonce fixup - newStateObject.SetNonce(nonce) - s.parallel.nonceChangesInSlot[addr] = struct{}{} - s.parallel.dirtiedStateObjectsInSlot[addr] = newStateObject - return - } - noncePre := s.GetNonce(addr) - stateObject.setNonce(noncePre) // nonce fixup + for addr := range s.stateObjectsDirty { + if _, exist := state.getStateObjectFromStateObjects(addr); !exist { + object, _ := s.getStateObjectFromStateObjects(addr) + state.storeStateObj(addr, object.deepCopy(state)) } - stateObject.SetNonce(nonce) - if s.parallel.isSlotDB { - s.parallel.nonceChangesInSlot[addr] = struct{}{} + state.stateObjectsDirty[addr] = struct{}{} + } + for hash, logs := range s.logs { + cpy := make([]*types.Log, len(logs)) + for i, l := range logs { + cpy[i] = new(types.Log) + *cpy[i] = *l } + state.logs[hash] = cpy + } + for hash, preimage := range s.preimages { + state.preimages[hash] = preimage + } + // Do we need to copy the access list? In practice: No. At the start of a + // transaction, the access list is empty. In practice, we only ever copy state + // _between_ transactions/blocks, never in the middle of a transaction. + // However, it doesn't cost us much to copy an empty list, so we do it anyway + // to not blow up if we ever decide copy it in the middle of a transaction + if s.accessList != nil { + state.accessList = s.accessList.Copy() } -} -func (s *StateDB) SetCode(addr common.Address, code []byte) { - stateObject := s.GetOrNewStateObject(addr) - if stateObject != nil { - codeHash := crypto.Keccak256Hash(code) - if s.parallel.isSlotDB { - if _, ok := s.parallel.dirtiedStateObjectsInSlot[addr]; !ok { - newStateObject := stateObject.lightCopy(s) - codePre := s.GetCode(addr) // code fixup - codeHashPre := crypto.Keccak256Hash(codePre) - newStateObject.setCode(codeHashPre, codePre) - - newStateObject.SetCode(codeHash, code) - s.parallel.dirtiedStateObjectsInSlot[addr] = newStateObject - s.parallel.codeChangesInSlot[addr] = struct{}{} - return - } - codePre := s.GetCode(addr) // code fixup - codeHashPre := crypto.Keccak256Hash(codePre) - stateObject.setCode(codeHashPre, codePre) + // If there's a prefetcher running, make an inactive copy of it that can + // only access data but does not actively preload (since the user will not + // know that they need to explicitly terminate an active copy). + if s.prefetcher != nil { + state.prefetcher = s.prefetcher.copy() + } + if s.snaps != nil { + // In order for the miner to be able to use and make additions + // to the snapshot tree, we need to copy that aswell. + // Otherwise, any block mined by ourselves will cause gaps in the tree, + // and force the miner to operate trie-backed only + state.snaps = s.snaps + state.snap = s.snap + // deep copy needed + state.snapDestructs = make(map[common.Address]struct{}) + for k, v := range s.snapDestructs { + state.snapDestructs[k] = v } - stateObject.SetCode(codeHash, code) - if s.parallel.isSlotDB { - s.parallel.codeChangesInSlot[addr] = struct{}{} + state.snapAccounts = make(map[common.Address][]byte) + for k, v := range s.snapAccounts { + state.snapAccounts[k] = v } - } -} - -func (s *StateDB) SetState(addr common.Address, key, value common.Hash) { - stateObject := s.GetOrNewStateObject(addr) // attention: if StateObject's lightCopy, its storage is only a part of the full storage, - if stateObject != nil { - if s.parallel.isSlotDB { - if s.parallel.baseTxIndex+1 == s.txIndex { - // we check if state is unchanged - // only when current transaction is the next transaction to be committed - // fixme: there is a bug, block: 14,962,284, - // stateObject is in dirty (light copy), but the key is in mainStateDB - // stateObject dirty -> committed, will skip mainStateDB dirty - if s.GetState(addr, key) == value { - log.Debug("Skip set same state", "baseTxIndex", s.parallel.baseTxIndex, - "txIndex", s.txIndex, "addr", addr, - "key", key, "value", value) - return - } - } - - if s.parallel.kvChangesInSlot[addr] == nil { - s.parallel.kvChangesInSlot[addr] = make(StateKeys) // make(Storage, defaultNumOfSlots) - } - - if _, ok := s.parallel.dirtiedStateObjectsInSlot[addr]; !ok { - newStateObject := stateObject.lightCopy(s) - newStateObject.SetState(s.db, key, value) - s.parallel.dirtiedStateObjectsInSlot[addr] = newStateObject - return + state.snapStorage = make(map[common.Address]map[string][]byte) + for k, v := range s.snapStorage { + temp := make(map[string][]byte) + for kk, vv := range v { + temp[kk] = vv } - // do State Update + state.snapStorage[k] = temp } - stateObject.SetState(s.db, key, value) } + return state } -// SetStorage replaces the entire storage for the specified account with given -// storage. This function should only be used for debugging. -func (s *StateDB) SetStorage(addr common.Address, storage map[common.Hash]common.Hash) { - stateObject := s.GetOrNewStateObject(addr) // fixme: parallel mode? - if stateObject != nil { - stateObject.SetStorage(storage) - } +/* +var addressStructPool = sync.Pool{ + New: func() interface{} { return make(map[common.Address]struct{}, defaultNumOfSlots) }, } -// Suicide marks the given account as suicided. -// This clears the account balance. -// -// The account's state object is still available until the state is committed, -// getStateObject will return a non-nil account after Suicide. -func (s *StateDB) Suicide(addr common.Address) bool { - var stateObject *StateObject - if s.parallel.isSlotDB { - // 1.Try to get from dirty, it could be suicided inside of contract call - stateObject = s.parallel.dirtiedStateObjectsInSlot[addr] - if stateObject == nil { - // 2.Try to get from uncomfirmed, if deleted return false, since the address does not exist - if obj, ok := s.getStateObjectFromUnconfirmedDB(addr); ok { - stateObject = obj - s.parallel.addrStateReadsInSlot[addr] = !stateObject.deleted // true: exist, false: deleted - if stateObject.deleted { - log.Error("Suicide addr alreay deleted in confirmed DB", "txIndex", s.txIndex, "addr", addr) - return false - } - } - } - } - if stateObject == nil { - // 3.Try to get from main StateDB - stateObject = s.getStateObjectNoSlot(addr) - if stateObject == nil { - s.parallel.addrStateReadsInSlot[addr] = false // true: exist, false: deleted - log.Error("Suicide addr not exist", "txIndex", s.txIndex, "addr", addr) - return false +var journalPool = sync.Pool{ + New: func() interface{} { + return &journal{ + dirties: make(map[common.Address]int, defaultNumOfSlots), + entries: make([]journalEntry, 0, defaultNumOfSlots), } - s.parallel.addrStateReadsInSlot[addr] = true // true: exist, false: deleted - } + }, +} - s.journal.append(suicideChange{ - account: &addr, - prev: stateObject.suicided, // todo: must be false? - prevbalance: new(big.Int).Set(s.GetBalance(addr)), - }) +var stateKeysPool = sync.Pool{ + New: func() interface{} { return make(map[common.Address]StateKeys, defaultNumOfSlots) }, +} - if s.parallel.isSlotDB { - if _, ok := s.parallel.dirtiedStateObjectsInSlot[addr]; !ok { - // do copy-on-write for suicide "write" - newStateObject := stateObject.lightCopy(s) - newStateObject.markSuicided() - newStateObject.data.Balance = new(big.Int) - s.parallel.dirtiedStateObjectsInSlot[addr] = newStateObject - s.parallel.addrStateChangesInSlot[addr] = false // false: the address does not exist any more, - // s.parallel.nonceChangesInSlot[addr] = struct{}{} - s.parallel.balanceChangesInSlot[addr] = struct{}{} - s.parallel.codeChangesInSlot[addr] = struct{}{} - // s.parallel.kvChangesInSlot[addr] = make(StateKeys) // all key changes are discarded - return true - } - s.parallel.addrStateChangesInSlot[addr] = false // false: the address does not exist any more, - s.parallel.balanceChangesInSlot[addr] = struct{}{} - s.parallel.codeChangesInSlot[addr] = struct{}{} - } +var stateObjectsPool = sync.Pool{ + New: func() interface{} { return make(map[common.Address]*StateObject, defaultNumOfSlots) }, +} - stateObject.markSuicided() - stateObject.data.Balance = new(big.Int) - return true +var balancePool = sync.Pool{ + New: func() interface{} { return make(map[common.Address]*big.Int, defaultNumOfSlots) }, } -// -// Setting, updating & deleting state object methods. -// +var snapAccountPool = sync.Pool{ + New: func() interface{} { return make(map[common.Address][]byte, defaultNumOfSlots) }, +} -// updateStateObject writes the given object to the trie. -func (s *StateDB) updateStateObject(obj *StateObject) { - // Track the amount of time wasted on updating the account from the trie - if metrics.EnabledExpensive { - defer func(start time.Time) { s.AccountUpdates += time.Since(start) }(time.Now()) - } - // Encode the account and update the account trie - addr := obj.Address() - data := obj.encodeData - var err error - if data == nil { - data, err = rlp.EncodeToBytes(obj) - if err != nil { - panic(fmt.Errorf("can't encode object at %x: %v", addr[:], err)) - } - } - if err = s.trie.TryUpdate(addr[:], data); err != nil { - s.setError(fmt.Errorf("updateStateObject (%x) error: %v", addr[:], err)) - } +var snapStoragePool = sync.Pool{ + New: func() interface{} { return make(map[common.Address]map[string][]byte, defaultNumOfSlots) }, } -// deleteStateObject removes the given object from the state trie. -func (s *StateDB) deleteStateObject(obj *StateObject) { - // Track the amount of time wasted on deleting the account from the trie - if metrics.EnabledExpensive { - defer func(start time.Time) { s.AccountUpdates += time.Since(start) }(time.Now()) - } - // Delete the account from the trie - addr := obj.Address() - if err := s.trie.TryDelete(addr[:]); err != nil { - s.setError(fmt.Errorf("deleteStateObject (%x) error: %v", addr[:], err)) - } +var snapStorageValuePool = sync.Pool{ + New: func() interface{} { return make(map[string][]byte, defaultNumOfSlots) }, } -// getStateObject retrieves a state object given by the address, returning nil if -// the object is not found or was deleted in this execution context. If you need -// to differentiate between non-existent/just-deleted, use getDeletedStateObject. -// fixme: avoid getStateObjectNoSlot, may be we define a new struct SlotDB which inherit StateDB -func (s *StateDB) getStateObjectNoSlot(addr common.Address) *StateObject { - if obj := s.getDeletedStateObject(addr); obj != nil && !obj.deleted { - return obj - } - return nil +var logsPool = sync.Pool{ + New: func() interface{} { return make(map[common.Hash][]*types.Log, defaultNumOfSlots) }, } -// for parallel execution mode, try to get dirty StateObject in slot first. -func (s *StateDB) getStateObject(addr common.Address) *StateObject { - if s.parallel.isSlotDB { - if obj, ok := s.parallel.dirtiedStateObjectsInSlot[addr]; ok { - return obj - } +func (s *StateDB) SlotDBPutSyncPool() { + // for key := range s.parallel.codeReadsInSlot { + // delete(s.parallel.codeReadsInSlot, key) + //} + //addressStructPool.Put(s.parallel.codeReadsInSlot) + + for key := range s.parallel.codeChangesInSlot { + delete(s.parallel.codeChangesInSlot, key) } + addressStructPool.Put(s.parallel.codeChangesInSlot) - if obj := s.getDeletedStateObject(addr); obj != nil && !obj.deleted { - return obj + for key := range s.parallel.balanceChangesInSlot { + delete(s.parallel.balanceChangesInSlot, key) } - return nil -} + addressStructPool.Put(s.parallel.balanceChangesInSlot) -// getDeletedStateObject is similar to getStateObject, but instead of returning -// nil for a deleted state object, it returns the actual object with the deleted -// flag set. This is needed by the state journal to revert to the correct s- -// destructed object instead of wiping all knowledge about the state object. -func (s *StateDB) getDeletedStateObject(addr common.Address) *StateObject { - // Prefer live objects if any is available - if obj, _ := s.getStateObjectFromStateObjects(addr); obj != nil { - return obj + for key := range s.parallel.balanceReadsInSlot { + delete(s.parallel.balanceReadsInSlot, key) } - // If no live objects are available, attempt to use snapshots - var ( - data *Account - err error - ) - if s.snap != nil { - if metrics.EnabledExpensive { - defer func(start time.Time) { s.SnapshotAccountReads += time.Since(start) }(time.Now()) - } - var acc *snapshot.Account - if acc, err = s.snap.Account(crypto.HashData(s.hasher, addr.Bytes())); err == nil { - if acc == nil { - return nil - } - data = &Account{ - Nonce: acc.Nonce, - Balance: acc.Balance, - CodeHash: acc.CodeHash, - Root: common.BytesToHash(acc.Root), - } - if len(data.CodeHash) == 0 { - data.CodeHash = emptyCodeHash - } - if data.Root == (common.Hash{}) { - data.Root = emptyRoot - } - } + balancePool.Put(s.parallel.balanceReadsInSlot) + + // for key := range s.parallel.addrStateReadsInSlot { + // delete(s.parallel.addrStateReadsInSlot, key) + // } + // addressStructPool.Put(s.parallel.addrStateReadsInSlot) + + for key := range s.parallel.nonceChangesInSlot { + delete(s.parallel.nonceChangesInSlot, key) } - // If snapshot unavailable or reading from it failed, load from the database - if s.snap == nil || err != nil { - if s.trie == nil { - tr, err := s.db.OpenTrie(s.originalRoot) - if err != nil { - s.setError(fmt.Errorf("failed to open trie tree")) - return nil - } - s.trie = tr - } - if metrics.EnabledExpensive { - defer func(start time.Time) { s.AccountReads += time.Since(start) }(time.Now()) - } - enc, err := s.trie.TryGet(addr.Bytes()) - if err != nil { - s.setError(fmt.Errorf("getDeleteStateObject (%x) error: %v", addr.Bytes(), err)) - return nil - } - if len(enc) == 0 { - return nil - } - data = new(Account) - if err := rlp.DecodeBytes(enc, data); err != nil { - log.Error("Failed to decode state object", "addr", addr, "err", err) - return nil - } + addressStructPool.Put(s.parallel.nonceChangesInSlot) + + for key := range s.stateObjectsPending { + delete(s.stateObjectsPending, key) } - // Insert into the live set - // if obj, ok := s.loadStateObj(addr); ok { - // fixme: concurrent not safe, merge could update it... - // return obj - //} - obj := newObject(s, s.isParallel, addr, *data) - s.SetStateObject(obj) - return obj -} + addressStructPool.Put(s.stateObjectsPending) -func (s *StateDB) SetStateObject(object *StateObject) { - s.storeStateObj(object.Address(), object) -} + for key := range s.stateObjectsDirty { + delete(s.stateObjectsDirty, key) + } + addressStructPool.Put(s.stateObjectsDirty) -// GetOrNewStateObject retrieves a state object or create a new state object if nil. -// dirtyInSlot -> Unconfirmed DB -> main DB -> snapshot, no? create one -func (s *StateDB) GetOrNewStateObject(addr common.Address) *StateObject { - var stateObject *StateObject = nil - exist := true - if s.parallel.isSlotDB { - if stateObject, ok := s.parallel.dirtiedStateObjectsInSlot[addr]; ok { - return stateObject - } - stateObject, _ = s.getStateObjectFromUnconfirmedDB(addr) + for key := range s.journal.dirties { + delete(s.journal.dirties, key) } + s.journal.entries = s.journal.entries[:0] + journalPool.Put(s.journal) - if stateObject == nil { - stateObject = s.getStateObjectNoSlot(addr) + for key := range s.parallel.kvChangesInSlot { + delete(s.parallel.kvChangesInSlot, key) } - if stateObject == nil || stateObject.deleted || stateObject.suicided { - stateObject = s.createObject(addr) - exist = false + stateKeysPool.Put(s.parallel.kvChangesInSlot) + + // for key := range s.parallel.kvReadsInSlot { + // delete(s.parallel.kvReadsInSlot, key) + // } + // stateKeysPool.Put(s.parallel.kvReadsInSlot) + + for key := range s.parallel.dirtiedStateObjectsInSlot { + delete(s.parallel.dirtiedStateObjectsInSlot, key) } + stateObjectsPool.Put(s.parallel.dirtiedStateObjectsInSlot) - if s.parallel.isSlotDB { - s.parallel.addrStateReadsInSlot[addr] = exist // true: exist, false: not exist + for key := range s.snapDestructs { + delete(s.snapDestructs, key) } - return stateObject -} + addressStructPool.Put(s.snapDestructs) -// createObject creates a new state object. If there is an existing account with -// the given address, it is overwritten and returned as the second return value. - -// prev is used for CreateAccount to get its balance -// Parallel mode: -// if prev in dirty: revert is ok -// if prev in unconfirmed DB: addr state read record, revert should not put it back -// if prev in main DB: addr state read record, revert should not put it back -// if pre no exist: addr state read record, - -// `prev` is used to handle revert, to recover with the `prev` object -// In Parallel mode, we only need to recover to `prev` in SlotDB, -// a.if it is not in SlotDB, `revert` will remove it from the SlotDB -// b.if it is exist in SlotDB, `revert` will recover to the `prev` in SlotDB -// c.as `snapDestructs` it is the same -func (s *StateDB) createObject(addr common.Address) (newobj *StateObject) { - var prev *StateObject = nil - if s.parallel.isSlotDB { - // do not get from unconfirmed DB, since it will has problem on revert - prev = s.parallel.dirtiedStateObjectsInSlot[addr] - } else { - prev = s.getDeletedStateObject(addr) // Note, prev might have been deleted, we need that! - } - - var prevdestruct bool - - if s.snap != nil && prev != nil { - _, prevdestruct = s.snapDestructs[prev.address] // fixme, record the snapshot read for create Account - if s.parallel.isSlotDB { - s.parallel.addrSnapDestructsReadsInSlot[addr] = prevdestruct - } - if !prevdestruct { - // To destroy the previous trie node first and update the trie tree - // with the new object on block commit. - s.snapDestructs[prev.address] = struct{}{} + for key := range s.snapAccounts { + delete(s.snapAccounts, key) + } + snapAccountPool.Put(s.snapAccounts) + for key, storage := range s.snapStorage { + for key := range storage { + delete(storage, key) } + snapStorageValuePool.Put(storage) + delete(s.snapStorage, key) } - newobj = newObject(s, s.isParallel, addr, Account{}) - newobj.setNonce(0) // sets the object to dirty - if prev == nil { - s.journal.append(createObjectChange{account: &addr}) - } else { - s.journal.append(resetObjectChange{prev: prev, prevdestruct: prevdestruct}) - } - - if s.parallel.isSlotDB { - // s.parallel.dirtiedStateObjectsInSlot[addr] = newobj // would change the bahavior of AddBalance... - s.parallel.addrStateChangesInSlot[addr] = true // the object sis created - s.parallel.nonceChangesInSlot[addr] = struct{}{} - s.parallel.balanceChangesInSlot[addr] = struct{}{} - s.parallel.codeChangesInSlot[addr] = struct{}{} - // notice: all the KVs are cleared if any - s.parallel.kvChangesInSlot[addr] = make(StateKeys) - } else { - s.SetStateObject(newobj) - } - return newobj -} - -// CreateAccount explicitly creates a state object. If a state object with the address -// already exists the balance is carried over to the new account. -// -// CreateAccount is called during the EVM CREATE operation. The situation might arise that -// a contract does the following: -// -// 1. sends funds to sha(account ++ (nonce + 1)) -// 2. tx_create(sha(account ++ nonce)) (note that this gets the address of 1) -// -// Carrying over the balance ensures that Ether doesn't disappear. -func (s *StateDB) CreateAccount(addr common.Address) { - // no matter it is got from dirty, unconfirmed or main DB - // if addr not exist, preBalance will be common.Big0, it is same as new(big.Int) which - // is the value newObject(), - preBalance := s.GetBalance(addr) // parallel balance read will be recorded inside of GetBalance - newObj := s.createObject(addr) - newObj.setBalance(new(big.Int).Set(preBalance)) // new big.Int for newObj -} - -func (s *StateDB) ForEachStorage(addr common.Address, cb func(key, value common.Hash) bool) error { - so := s.getStateObject(addr) - if so == nil { - return nil - } - it := trie.NewIterator(so.getTrie(s.db).NodeIterator(nil)) - - for it.Next() { - key := common.BytesToHash(s.trie.GetKey(it.Key)) - if value, dirty := so.dirtyStorage.GetValue(key); dirty { - if !cb(key, value) { - return nil - } - continue - } + snapStoragePool.Put(s.snapStorage) - if len(it.Value) > 0 { - _, content, _, err := rlp.Split(it.Value) - if err != nil { - return err - } - if !cb(key, common.BytesToHash(content)) { - return nil - } - } + for key := range s.logs { + delete(s.logs, key) } - return nil + logsPool.Put(s.logs) } +*/ +// CopyForSlot copy all the basic fields, initialize the memory ones +func (s *StateDB) CopyForSlot() *ParallelStateDB { + parallel := ParallelState{ + // use base(dispatcher) slot db's stateObjects. + // It is a SyncMap, only readable to slot, not writable + stateObjects: s.parallel.stateObjects, + unconfirmedDBInShot: make(map[int]*ParallelStateDB, 100), -// Copy creates a deep, independent copy of the state. -// Snapshots of the copied state cannot be applied to the copy. -func (s *StateDB) Copy() *StateDB { - // Copy all the basic fields, initialize the memory ones - state := &StateDB{ - db: s.db, - trie: s.db.CopyTrie(s.trie), - stateObjects: make(map[common.Address]*StateObject, len(s.journal.dirties)), - stateObjectsPending: make(map[common.Address]struct{}, len(s.stateObjectsPending)), - stateObjectsDirty: make(map[common.Address]struct{}, len(s.journal.dirties)), - storagePool: s.storagePool, - refund: s.refund, - logs: make(map[common.Hash][]*types.Log, len(s.logs)), - logSize: s.logSize, - preimages: make(map[common.Hash][]byte, len(s.preimages)), - journal: newJournal(), - hasher: crypto.NewKeccakState(), - parallel: ParallelState{}, - } - // Copy the dirty states, logs, and preimages - for addr := range s.journal.dirties { - // As documented [here](https://github.com/ethereum/go-ethereum/pull/16485#issuecomment-380438527), - // and in the Finalise-method, there is a case where an object is in the journal but not - // in the stateObjects: OOG after touch on ripeMD prior to Byzantium. Thus, we need to check for - // nil - if object, exist := s.getStateObjectFromStateObjects(addr); exist { - // Even though the original object is dirty, we are not copying the journal, - // so we need to make sure that anyside effect the journal would have caused - // during a commit (or similar op) is already applied to the copy. - state.storeStateObj(addr, object.deepCopy(state)) + codeReadsInSlot: make(map[common.Address][]byte, 10), // addressStructPool.Get().(map[common.Address]struct{}), + codeHashReadsInSlot: make(map[common.Address]common.Hash), + codeChangesInSlot: make(map[common.Address]struct{}), // addressStructPool.Get().(map[common.Address]struct{}), + kvChangesInSlot: make(map[common.Address]StateKeys), // stateKeysPool.Get().(map[common.Address]StateKeys), + kvReadsInSlot: make(map[common.Address]Storage, 100), // stateKeysPool.Get().(map[common.Address]Storage), + balanceChangesInSlot: make(map[common.Address]struct{}), // addressStructPool.Get().(map[common.Address]struct{}), + balanceReadsInSlot: make(map[common.Address]*big.Int), // addressStructPool.Get().(map[common.Address]struct{}), + addrStateReadsInSlot: make(map[common.Address]bool), // addressStructPool.Get().(map[common.Address]struct{}), + addrStateChangesInSlot: make(map[common.Address]bool), // addressStructPool.Get().(map[common.Address]struct{}), + nonceChangesInSlot: make(map[common.Address]struct{}), // addressStructPool.Get().(map[common.Address]struct{}), + nonceReadsInSlot: make(map[common.Address]uint64), + addrSnapDestructsReadsInSlot: make(map[common.Address]bool), - state.stateObjectsDirty[addr] = struct{}{} // Mark the copy dirty to force internal (code/state) commits - state.stateObjectsPending[addr] = struct{}{} // Mark the copy pending to force external (account) commits - } - } - // Above, we don't copy the actual journal. This means that if the copy is copied, the - // loop above will be a no-op, since the copy's journal is empty. - // Thus, here we iterate over stateObjects, to enable copies of copies - for addr := range s.stateObjectsPending { - if _, exist := state.getStateObjectFromStateObjects(addr); !exist { - object, _ := s.getStateObjectFromStateObjects(addr) - state.storeStateObj(addr, object.deepCopy(state)) - } - state.stateObjectsPending[addr] = struct{}{} - } - for addr := range s.stateObjectsDirty { - if _, exist := state.getStateObjectFromStateObjects(addr); !exist { - object, _ := s.getStateObjectFromStateObjects(addr) - state.storeStateObj(addr, object.deepCopy(state)) - } - state.stateObjectsDirty[addr] = struct{}{} + isSlotDB: true, + dirtiedStateObjectsInSlot: make(map[common.Address]*StateObject), // stateObjectsPool.Get().(map[common.Address]*StateObject), } - for hash, logs := range s.logs { - cpy := make([]*types.Log, len(logs)) - for i, l := range logs { - cpy[i] = new(types.Log) - *cpy[i] = *l - } - state.logs[hash] = cpy + state := &ParallelStateDB{ + StateDB{ + db: s.db, + trie: s.db.CopyTrie(s.trie), + stateObjects: make(map[common.Address]*StateObject), // replaced by parallel.stateObjects in parallel mode + stateObjectsPending: make(map[common.Address]struct{}), // addressStructPool.Get().(map[common.Address]struct{}), + stateObjectsDirty: make(map[common.Address]struct{}), //addressStructPool.Get().(map[common.Address]struct{}), + refund: s.refund, // should be 0 + logs: make(map[common.Hash][]*types.Log, defaultNumOfSlots), // logsPool.Get().(map[common.Hash][]*types.Log), + logSize: 0, + preimages: make(map[common.Hash][]byte, len(s.preimages)), + journal: newJournal(), // journalPool.Get().(*journal), + hasher: crypto.NewKeccakState(), + isParallel: true, + parallel: parallel, + }, } for hash, preimage := range s.preimages { state.preimages[hash] = preimage } - // Do we need to copy the access list? In practice: No. At the start of a - // transaction, the access list is empty. In practice, we only ever copy state - // _between_ transactions/blocks, never in the middle of a transaction. - // However, it doesn't cost us much to copy an empty list, so we do it anyway - // to not blow up if we ever decide copy it in the middle of a transaction - if s.accessList != nil { - state.accessList = s.accessList.Copy() - } - // If there's a prefetcher running, make an inactive copy of it that can - // only access data but does not actively preload (since the user will not - // know that they need to explicitly terminate an active copy). - if s.prefetcher != nil { - state.prefetcher = s.prefetcher.copy() - } if s.snaps != nil { // In order for the miner to be able to use and make additions // to the snapshot tree, we need to copy that aswell. @@ -2138,271 +1254,71 @@ func (s *StateDB) Copy() *StateDB { state.snaps = s.snaps state.snap = s.snap // deep copy needed - state.snapDestructs = make(map[common.Address]struct{}) + state.snapDestructs = make(map[common.Address]struct{}) //addressStructPool.Get().(map[common.Address]struct{}) + s.snapParallelLock.RLock() for k, v := range s.snapDestructs { state.snapDestructs[k] = v } - state.snapAccounts = make(map[common.Address][]byte) + s.snapParallelLock.RUnlock() + // + state.snapAccounts = make(map[common.Address][]byte) // snapAccountPool.Get().(map[common.Address][]byte) for k, v := range s.snapAccounts { state.snapAccounts[k] = v } - state.snapStorage = make(map[common.Address]map[string][]byte) + state.snapStorage = make(map[common.Address]map[string][]byte) // snapStoragePool.Get().(map[common.Address]map[string][]byte) for k, v := range s.snapStorage { - temp := make(map[string][]byte) + temp := make(map[string][]byte) // snapStorageValuePool.Get().(map[string][]byte) for kk, vv := range v { temp[kk] = vv } state.snapStorage[k] = temp } + // trie prefetch should be done by dispacther on StateObject Merge, + // disable it in parallel slot + // state.prefetcher = s.prefetcher } - return state -} -/* -var addressStructPool = sync.Pool{ - New: func() interface{} { return make(map[common.Address]struct{}, defaultNumOfSlots) }, + return state } -var journalPool = sync.Pool{ - New: func() interface{} { - return &journal{ - dirties: make(map[common.Address]int, defaultNumOfSlots), - entries: make([]journalEntry, 0, defaultNumOfSlots), - } - }, +// Snapshot returns an identifier for the current revision of the state. +func (s *StateDB) Snapshot() int { + id := s.nextRevisionId + s.nextRevisionId++ + s.validRevisions = append(s.validRevisions, revision{id, s.journal.length()}) + return id } -var stateKeysPool = sync.Pool{ - New: func() interface{} { return make(map[common.Address]StateKeys, defaultNumOfSlots) }, -} +// RevertToSnapshot reverts all state changes made since the given revision. +func (s *StateDB) RevertToSnapshot(revid int) { + // Find the snapshot in the stack of valid snapshots. + idx := sort.Search(len(s.validRevisions), func(i int) bool { + return s.validRevisions[i].id >= revid + }) + if idx == len(s.validRevisions) || s.validRevisions[idx].id != revid { + panic(fmt.Errorf("revision id %v cannot be reverted", revid)) + } + snapshot := s.validRevisions[idx].journalIndex -var stateObjectsPool = sync.Pool{ - New: func() interface{} { return make(map[common.Address]*StateObject, defaultNumOfSlots) }, + // Replay the journal to undo changes and remove invalidated snapshots + s.journal.revert(s, snapshot) + s.validRevisions = s.validRevisions[:idx] } -var balancePool = sync.Pool{ - New: func() interface{} { return make(map[common.Address]*big.Int, defaultNumOfSlots) }, +// GetRefund returns the current value of the refund counter. +func (s *StateDB) GetRefund() uint64 { + return s.refund } -var snapAccountPool = sync.Pool{ - New: func() interface{} { return make(map[common.Address][]byte, defaultNumOfSlots) }, -} - -var snapStoragePool = sync.Pool{ - New: func() interface{} { return make(map[common.Address]map[string][]byte, defaultNumOfSlots) }, -} - -var snapStorageValuePool = sync.Pool{ - New: func() interface{} { return make(map[string][]byte, defaultNumOfSlots) }, -} - -var logsPool = sync.Pool{ - New: func() interface{} { return make(map[common.Hash][]*types.Log, defaultNumOfSlots) }, -} - -func (s *StateDB) SlotDBPutSyncPool() { - // for key := range s.parallel.codeReadsInSlot { - // delete(s.parallel.codeReadsInSlot, key) - //} - //addressStructPool.Put(s.parallel.codeReadsInSlot) - - for key := range s.parallel.codeChangesInSlot { - delete(s.parallel.codeChangesInSlot, key) - } - addressStructPool.Put(s.parallel.codeChangesInSlot) - - for key := range s.parallel.balanceChangesInSlot { - delete(s.parallel.balanceChangesInSlot, key) - } - addressStructPool.Put(s.parallel.balanceChangesInSlot) - - for key := range s.parallel.balanceReadsInSlot { - delete(s.parallel.balanceReadsInSlot, key) - } - balancePool.Put(s.parallel.balanceReadsInSlot) - - // for key := range s.parallel.addrStateReadsInSlot { - // delete(s.parallel.addrStateReadsInSlot, key) - // } - // addressStructPool.Put(s.parallel.addrStateReadsInSlot) - - for key := range s.parallel.nonceChangesInSlot { - delete(s.parallel.nonceChangesInSlot, key) - } - addressStructPool.Put(s.parallel.nonceChangesInSlot) - - for key := range s.stateObjectsPending { - delete(s.stateObjectsPending, key) - } - addressStructPool.Put(s.stateObjectsPending) - - for key := range s.stateObjectsDirty { - delete(s.stateObjectsDirty, key) - } - addressStructPool.Put(s.stateObjectsDirty) - - for key := range s.journal.dirties { - delete(s.journal.dirties, key) - } - s.journal.entries = s.journal.entries[:0] - journalPool.Put(s.journal) - - for key := range s.parallel.kvChangesInSlot { - delete(s.parallel.kvChangesInSlot, key) - } - stateKeysPool.Put(s.parallel.kvChangesInSlot) - - // for key := range s.parallel.kvReadsInSlot { - // delete(s.parallel.kvReadsInSlot, key) - // } - // stateKeysPool.Put(s.parallel.kvReadsInSlot) - - for key := range s.parallel.dirtiedStateObjectsInSlot { - delete(s.parallel.dirtiedStateObjectsInSlot, key) - } - stateObjectsPool.Put(s.parallel.dirtiedStateObjectsInSlot) - - for key := range s.snapDestructs { - delete(s.snapDestructs, key) - } - addressStructPool.Put(s.snapDestructs) - - for key := range s.snapAccounts { - delete(s.snapAccounts, key) - } - snapAccountPool.Put(s.snapAccounts) - - for key, storage := range s.snapStorage { - for key := range storage { - delete(storage, key) - } - snapStorageValuePool.Put(storage) - delete(s.snapStorage, key) - } - snapStoragePool.Put(s.snapStorage) - - for key := range s.logs { - delete(s.logs, key) - } - logsPool.Put(s.logs) -} -*/ -// CopyForSlot copy all the basic fields, initialize the memory ones -func (s *StateDB) CopyForSlot() *StateDB { - parallel := ParallelState{ - // use base(dispatcher) slot db's stateObjects. - // It is a SyncMap, only readable to slot, not writable - stateObjects: s.parallel.stateObjects, - unconfirmedDBInShot: make(map[int]*StateDB, 100), - - codeReadsInSlot: make(map[common.Address][]byte, 10), // addressStructPool.Get().(map[common.Address]struct{}), - codeHashReadsInSlot: make(map[common.Address]common.Hash), - codeChangesInSlot: make(map[common.Address]struct{}), // addressStructPool.Get().(map[common.Address]struct{}), - kvChangesInSlot: make(map[common.Address]StateKeys), // stateKeysPool.Get().(map[common.Address]StateKeys), - kvReadsInSlot: make(map[common.Address]Storage, 100), // stateKeysPool.Get().(map[common.Address]Storage), - balanceChangesInSlot: make(map[common.Address]struct{}), // addressStructPool.Get().(map[common.Address]struct{}), - balanceReadsInSlot: make(map[common.Address]*big.Int), // addressStructPool.Get().(map[common.Address]struct{}), - addrStateReadsInSlot: make(map[common.Address]bool), // addressStructPool.Get().(map[common.Address]struct{}), - addrStateChangesInSlot: make(map[common.Address]bool), // addressStructPool.Get().(map[common.Address]struct{}), - nonceChangesInSlot: make(map[common.Address]struct{}), // addressStructPool.Get().(map[common.Address]struct{}), - nonceReadsInSlot: make(map[common.Address]uint64), - addrSnapDestructsReadsInSlot: make(map[common.Address]bool), - - isSlotDB: true, - dirtiedStateObjectsInSlot: make(map[common.Address]*StateObject), // stateObjectsPool.Get().(map[common.Address]*StateObject), - } - state := &StateDB{ - db: s.db, - trie: s.db.CopyTrie(s.trie), - stateObjects: make(map[common.Address]*StateObject), // replaced by parallel.stateObjects in parallel mode - stateObjectsPending: make(map[common.Address]struct{}), // addressStructPool.Get().(map[common.Address]struct{}), - stateObjectsDirty: make(map[common.Address]struct{}), //addressStructPool.Get().(map[common.Address]struct{}), - refund: s.refund, // should be 0 - logs: make(map[common.Hash][]*types.Log, defaultNumOfSlots), // logsPool.Get().(map[common.Hash][]*types.Log), - logSize: 0, - preimages: make(map[common.Hash][]byte, len(s.preimages)), - journal: newJournal(), // journalPool.Get().(*journal), - hasher: crypto.NewKeccakState(), - isParallel: true, - parallel: parallel, - } - - for hash, preimage := range s.preimages { - state.preimages[hash] = preimage - } - - if s.snaps != nil { - // In order for the miner to be able to use and make additions - // to the snapshot tree, we need to copy that aswell. - // Otherwise, any block mined by ourselves will cause gaps in the tree, - // and force the miner to operate trie-backed only - state.snaps = s.snaps - state.snap = s.snap - // deep copy needed - state.snapDestructs = make(map[common.Address]struct{}) //addressStructPool.Get().(map[common.Address]struct{}) - s.snapParallelLock.RLock() - for k, v := range s.snapDestructs { - state.snapDestructs[k] = v - } - s.snapParallelLock.RUnlock() - // - state.snapAccounts = make(map[common.Address][]byte) // snapAccountPool.Get().(map[common.Address][]byte) - for k, v := range s.snapAccounts { - state.snapAccounts[k] = v - } - state.snapStorage = make(map[common.Address]map[string][]byte) // snapStoragePool.Get().(map[common.Address]map[string][]byte) - for k, v := range s.snapStorage { - temp := make(map[string][]byte) // snapStorageValuePool.Get().(map[string][]byte) - for kk, vv := range v { - temp[kk] = vv - } - state.snapStorage[k] = temp - } - // trie prefetch should be done by dispacther on StateObject Merge, - // disable it in parallel slot - // state.prefetcher = s.prefetcher - } - return state -} - -// Snapshot returns an identifier for the current revision of the state. -func (s *StateDB) Snapshot() int { - id := s.nextRevisionId - s.nextRevisionId++ - s.validRevisions = append(s.validRevisions, revision{id, s.journal.length()}) - return id -} - -// RevertToSnapshot reverts all state changes made since the given revision. -func (s *StateDB) RevertToSnapshot(revid int) { - // Find the snapshot in the stack of valid snapshots. - idx := sort.Search(len(s.validRevisions), func(i int) bool { - return s.validRevisions[i].id >= revid - }) - if idx == len(s.validRevisions) || s.validRevisions[idx].id != revid { - panic(fmt.Errorf("revision id %v cannot be reverted", revid)) - } - snapshot := s.validRevisions[idx].journalIndex - - // Replay the journal to undo changes and remove invalidated snapshots - s.journal.revert(s, snapshot) - s.validRevisions = s.validRevisions[:idx] -} - -// GetRefund returns the current value of the refund counter. -func (s *StateDB) GetRefund() uint64 { - return s.refund -} - -// GetRefund returns the current value of the refund counter. -func (s *StateDB) WaitPipeVerification() error { - // We need wait for the parent trie to commit - if s.snap != nil { - if valid := s.snap.WaitAndGetVerifyRes(); !valid { - return fmt.Errorf("verification on parent snap failed") - } - } - return nil +// GetRefund returns the current value of the refund counter. +func (s *StateDB) WaitPipeVerification() error { + // We need wait for the parent trie to commit + if s.snap != nil { + if valid := s.snap.WaitAndGetVerifyRes(); !valid { + return fmt.Errorf("verification on parent snap failed") + } + } + return nil } // Finalise finalises the state by removing the s destructed objects and clears @@ -2955,149 +1871,1515 @@ func (s *StateDB) Commit(failPostCommitFunc func(), postCommitFuncs ...func() er return common.Hash{}, nil, r } } - root := s.stateRoot - if s.pipeCommit { - root = s.expectedRoot - } - - return root, diffLayer, nil + root := s.stateRoot + if s.pipeCommit { + root = s.expectedRoot + } + + return root, diffLayer, nil +} + +func (s *StateDB) DiffLayerToSnap(diffLayer *types.DiffLayer) (map[common.Address]struct{}, map[common.Address][]byte, map[common.Address]map[string][]byte, error) { + snapDestructs := make(map[common.Address]struct{}) + snapAccounts := make(map[common.Address][]byte) + snapStorage := make(map[common.Address]map[string][]byte) + + for _, des := range diffLayer.Destructs { + snapDestructs[des] = struct{}{} + } + for _, account := range diffLayer.Accounts { + snapAccounts[account.Account] = account.Blob + } + for _, storage := range diffLayer.Storages { + // should never happen + if len(storage.Keys) != len(storage.Vals) { + return nil, nil, nil, errors.New("invalid diffLayer: length of keys and values mismatch") + } + snapStorage[storage.Account] = make(map[string][]byte, len(storage.Keys)) + n := len(storage.Keys) + for i := 0; i < n; i++ { + snapStorage[storage.Account][storage.Keys[i]] = storage.Vals[i] + } + } + return snapDestructs, snapAccounts, snapStorage, nil +} + +func (s *StateDB) SnapToDiffLayer() ([]common.Address, []types.DiffAccount, []types.DiffStorage) { + destructs := make([]common.Address, 0, len(s.snapDestructs)) + for account := range s.snapDestructs { + destructs = append(destructs, account) + } + accounts := make([]types.DiffAccount, 0, len(s.snapAccounts)) + for accountHash, account := range s.snapAccounts { + accounts = append(accounts, types.DiffAccount{ + Account: accountHash, + Blob: account, + }) + } + storages := make([]types.DiffStorage, 0, len(s.snapStorage)) + for accountHash, storage := range s.snapStorage { + keys := make([]string, 0, len(storage)) + values := make([][]byte, 0, len(storage)) + for k, v := range storage { + keys = append(keys, k) + values = append(values, v) + } + storages = append(storages, types.DiffStorage{ + Account: accountHash, + Keys: keys, + Vals: values, + }) + } + return destructs, accounts, storages +} + +// PrepareAccessList handles the preparatory steps for executing a state transition with +// regards to both EIP-2929 and EIP-2930: +// +// - Add sender to access list (2929) +// - Add destination to access list (2929) +// - Add precompiles to access list (2929) +// - Add the contents of the optional tx access list (2930) +// +// This method should only be called if Yolov3/Berlin/2929+2930 is applicable at the current number. +func (s *StateDB) PrepareAccessList(sender common.Address, dst *common.Address, precompiles []common.Address, list types.AccessList) { + s.AddAddressToAccessList(sender) + if dst != nil { + s.AddAddressToAccessList(*dst) + // If it's a create-tx, the destination will be added inside evm.create + } + for _, addr := range precompiles { + s.AddAddressToAccessList(addr) + } + for _, el := range list { + s.AddAddressToAccessList(el.Address) + for _, key := range el.StorageKeys { + s.AddSlotToAccessList(el.Address, key) + } + } +} + +// AddAddressToAccessList adds the given address to the access list +func (s *StateDB) AddAddressToAccessList(addr common.Address) { + if s.accessList == nil { + s.accessList = newAccessList() + } + if s.accessList.AddAddress(addr) { + s.journal.append(accessListAddAccountChange{&addr}) + } +} + +// AddSlotToAccessList adds the given (address, slot)-tuple to the access list +func (s *StateDB) AddSlotToAccessList(addr common.Address, slot common.Hash) { + if s.accessList == nil { + s.accessList = newAccessList() + } + addrMod, slotMod := s.accessList.AddSlot(addr, slot) + if addrMod { + // In practice, this should not happen, since there is no way to enter the + // scope of 'address' without having the 'address' become already added + // to the access list (via call-variant, create, etc). + // Better safe than sorry, though + s.journal.append(accessListAddAccountChange{&addr}) + } + if slotMod { + s.journal.append(accessListAddSlotChange{ + address: &addr, + slot: &slot, + }) + } +} + +// AddressInAccessList returns true if the given address is in the access list. +func (s *StateDB) AddressInAccessList(addr common.Address) bool { + if s.accessList == nil { + return false + } + return s.accessList.ContainsAddress(addr) +} + +// SlotInAccessList returns true if the given (address, slot)-tuple is in the access list. +func (s *StateDB) SlotInAccessList(addr common.Address, slot common.Hash) (addressPresent bool, slotPresent bool) { + if s.accessList == nil { + return false, false + } + return s.accessList.Contains(addr, slot) +} + +func (s *StateDB) GetDirtyAccounts() []common.Address { + accounts := make([]common.Address, 0, len(s.stateObjectsDirty)) + for account := range s.stateObjectsDirty { + accounts = append(accounts, account) + } + return accounts +} + +func (s *StateDB) GetStorage(address common.Address) *sync.Map { + return s.storagePool.getStorage(address) +} + +// PrepareForParallel prepares for state db to be used in parallel execution mode. +func (s *StateDB) PrepareForParallel() { + s.isParallel = true + s.parallel.stateObjects = &StateObjectSyncMap{} +} + +// MergeSlotDB is for Parallel execution mode, when the transaction has been +// finalized(dirty -> pending) on execution slot, the execution results should be +// merged back to the main StateDB. +// And it will return and keep the slot's change list for later conflict detect. +func (s *StateDB) MergeSlotDB(slotDb *ParallelStateDB, slotReceipt *types.Receipt, txIndex int) { + // receipt.Logs use unified log index within a block + // align slotDB's log index to the block stateDB's logSize + for _, l := range slotReceipt.Logs { + l.Index += s.logSize + } + s.logSize += slotDb.logSize + + // before merge, pay the gas fee first: AddBalance to consensus.SystemAddress + systemAddress := slotDb.parallel.systemAddress + if slotDb.parallel.keepSystemAddressBalance { + s.SetBalance(systemAddress, slotDb.GetBalance(systemAddress)) + } else { + s.AddBalance(systemAddress, slotDb.GetBalance(systemAddress)) + } + + // only merge dirty objects + addressesToPrefetch := make([][]byte, 0, len(slotDb.stateObjectsDirty)) + for addr := range slotDb.stateObjectsDirty { + if _, exist := s.stateObjectsDirty[addr]; !exist { + s.stateObjectsDirty[addr] = struct{}{} + } + // system address is EOA account, it should have no storage change + if addr == systemAddress { + continue + } + + // stateObjects: KV, balance, nonce... + dirtyObj, ok := slotDb.parallel.dirtiedStateObjectsInSlot[addr] + if !ok { + log.Error("parallel merge, but dirty object not exist!", "SlotIndex", slotDb.parallel.SlotIndex, "txIndex:", slotDb.txIndex, "addr", addr) + continue + } + mainObj, exist := s.loadStateObj(addr) + if !exist { // fixme: it is also state change + // addr not exist on main DB, do ownership transfer + // dirtyObj.db = s + // dirtyObj.finalise(true) // true: prefetch on dispatcher + mainObj = dirtyObj.deepCopy(s) + mainObj.finalise(true) + s.storeStateObj(addr, mainObj) + // fixme: should not delete, would cause unconfirmed DB incorrect? + // delete(slotDb.parallel.dirtiedStateObjectsInSlot, addr) // transfer ownership, fixme: shared read? + if dirtyObj.deleted { + // remove the addr from snapAccounts&snapStorage only when object is deleted. + // "deleted" is not equal to "snapDestructs", since createObject() will add an addr for + // snapDestructs to destroy previous object, while it will keep the addr in snapAccounts & snapAccounts + delete(s.snapAccounts, addr) + delete(s.snapStorage, addr) + } + } else { + // addr already in main DB, do merge: balance, KV, code, State(create, suicide) + // can not do copy or ownership transfer directly, since dirtyObj could have outdated + // data(may be updated within the conflict window) + + var newMainObj = mainObj // we don't need to copy the object since the storages are thread safe + if _, ok := slotDb.parallel.addrStateChangesInSlot[addr]; ok { + // there are 3 kinds of state change: + // 1.Suicide + // 2.Empty Delete + // 3.createObject + // a.AddBalance,SetState to an unexist or deleted(suicide, empty delete) address. + // b.CreateAccount: like DAO the fork, regenerate a account carry its balance without KV + // For these state change, do ownership transafer for efficiency: + // dirtyObj.db = s + // newMainObj = dirtyObj + newMainObj = dirtyObj.deepCopy(s) + // should not delete, would cause unconfirmed DB incorrect. + // delete(slotDb.parallel.dirtiedStateObjectsInSlot, addr) // transfer ownership, fixme: shared read? + if dirtyObj.deleted { + // remove the addr from snapAccounts&snapStorage only when object is deleted. + // "deleted" is not equal to "snapDestructs", since createObject() will add an addr for + // snapDestructs to destroy previous object, while it will keep the addr in snapAccounts & snapAccounts + delete(s.snapAccounts, addr) + delete(s.snapStorage, addr) + } + } else { + // deepCopy a temporary *StateObject for safety, since slot could read the address, + // dispatch should avoid overwrite the StateObject directly otherwise, it could + // crash for: concurrent map iteration and map write + + if _, balanced := slotDb.parallel.balanceChangesInSlot[addr]; balanced { + newMainObj.SetBalance(dirtyObj.Balance()) + } + if _, coded := slotDb.parallel.codeChangesInSlot[addr]; coded { + newMainObj.code = dirtyObj.code + newMainObj.data.CodeHash = dirtyObj.data.CodeHash + newMainObj.dirtyCode = true + } + if keys, stated := slotDb.parallel.kvChangesInSlot[addr]; stated { + newMainObj.MergeSlotObject(s.db, dirtyObj, keys) + } + if _, nonced := slotDb.parallel.nonceChangesInSlot[addr]; nonced { + // dirtyObj.Nonce() should not be less than newMainObj + newMainObj.setNonce(dirtyObj.Nonce()) + } + } + newMainObj.finalise(true) // true: prefetch on dispatcher + // update the object + s.storeStateObj(addr, newMainObj) + } + addressesToPrefetch = append(addressesToPrefetch, common.CopyBytes(addr[:])) // Copy needed for closure + } + + if s.prefetcher != nil && len(addressesToPrefetch) > 0 { + s.prefetcher.prefetch(s.originalRoot, addressesToPrefetch, emptyAddr) // prefetch for trie node of account + } + + for addr := range slotDb.stateObjectsPending { + if _, exist := s.stateObjectsPending[addr]; !exist { + s.stateObjectsPending[addr] = struct{}{} + } + } + + // slotDb.logs: logs will be kept in receipts, no need to do merge + + for hash, preimage := range slotDb.preimages { + s.preimages[hash] = preimage + } + if s.accessList != nil { + // fixme: accessList is not enabled yet, but it should use merge rather than overwrite Copy + s.accessList = slotDb.accessList.Copy() + } + + if slotDb.snaps != nil { + for k := range slotDb.snapDestructs { + // There could be a race condition for parallel transaction execution + // One transaction add balance 0 to an empty address, will delete it(delete empty is enabled). + // While another concurrent transaction could add a none-zero balance to it, make it not empty + // We fixed it by add a addr state read record for add balance 0 + s.snapParallelLock.Lock() + s.snapDestructs[k] = struct{}{} + s.snapParallelLock.Unlock() + } + + // slotDb.snapAccounts should be empty, comment out and to be deleted later + // for k, v := range slotDb.snapAccounts { + // s.snapAccounts[k] = v + // } + // slotDb.snapStorage should be empty, comment out and to be deleted later + // for k, v := range slotDb.snapStorage { + // temp := make(map[string][]byte) + // for kk, vv := range v { + // temp[kk] = vv + // } + // s.snapStorage[k] = temp + // } + } +} + +type ParallelStateDB struct { + StateDB +} + +// NewSlotDB creates a new State DB based on the provided StateDB. +// With parallel, each execution slot would have its own StateDB. +func NewSlotDB(db *StateDB, systemAddr common.Address, txIndex int, baseTxIndex int, keepSystem bool, + unconfirmedDBs *sync.Map /*map[int]*ParallelStateDB*/) *ParallelStateDB { + slotDB := db.CopyForSlot() + slotDB.txIndex = txIndex + slotDB.originalRoot = db.originalRoot + slotDB.parallel.baseStateDB = db + slotDB.parallel.baseTxIndex = baseTxIndex + slotDB.parallel.systemAddress = systemAddr + slotDB.parallel.systemAddressOpsCount = 0 + slotDB.parallel.keepSystemAddressBalance = keepSystem + slotDB.storagePool = NewStoragePool() + slotDB.EnableWriteOnSharedStorage() + for index := baseTxIndex + 1; index < slotDB.txIndex; index++ { // txIndex + unconfirmedDB, ok := unconfirmedDBs.Load(index) + if ok { + slotDB.parallel.unconfirmedDBInShot[index] = unconfirmedDB.(*ParallelStateDB) + } + } + + // All transactions will pay gas fee to the systemAddr at the end, this address is + // deemed to conflict, we handle it specially, clear it now and set it back to the main + // StateDB later; + // But there are transactions that will try to read systemAddr's balance, such as: + // https://bscscan.com/tx/0xcd69755be1d2f55af259441ff5ee2f312830b8539899e82488a21e85bc121a2a. + // It will trigger transaction redo and keepSystem will be marked as true. + if !keepSystem { + slotDB.SetBalance(systemAddr, big.NewInt(0)) + } + + return slotDB +} + +// RevertSlotDB keep the Read list for conflict detect, +// discard all state changes except: +// - nonce and balance of from address +// - balance of system address: will be used on merge to update SystemAddress's balance +func (s *ParallelStateDB) RevertSlotDB(from common.Address) { + s.parallel.kvChangesInSlot = make(map[common.Address]StateKeys) + + // balance := s.parallel.balanceChangesInSlot[from] + s.parallel.nonceChangesInSlot = make(map[common.Address]struct{}) + s.parallel.balanceChangesInSlot = make(map[common.Address]struct{}, 1) + s.parallel.addrStateChangesInSlot = make(map[common.Address]bool) // 0: created, 1: deleted + + selfStateObject := s.parallel.dirtiedStateObjectsInSlot[from] + systemAddress := s.parallel.systemAddress + systemStateObject := s.parallel.dirtiedStateObjectsInSlot[systemAddress] + s.parallel.dirtiedStateObjectsInSlot = make(map[common.Address]*StateObject, 2) + // keep these elements + s.parallel.dirtiedStateObjectsInSlot[from] = selfStateObject + s.parallel.dirtiedStateObjectsInSlot[systemAddress] = systemStateObject + s.parallel.balanceChangesInSlot[from] = struct{}{} + s.parallel.balanceChangesInSlot[systemAddress] = struct{}{} + s.parallel.nonceChangesInSlot[from] = struct{}{} +} + +func (s *ParallelStateDB) getBaseStateDB() *StateDB { + return &s.StateDB +} + +func (s *ParallelStateDB) SetSlotIndex(index int) { + s.parallel.SlotIndex = index +} + +// for parallel execution mode, try to get dirty StateObject in slot first. +// it is mainly used by journal revert right now. +func (s *ParallelStateDB) getStateObject(addr common.Address) *StateObject { + if obj, ok := s.parallel.dirtiedStateObjectsInSlot[addr]; ok { + return obj + } + // can not call s.StateDB.getStateObject(), since `newObject` need ParallelStateDB as the interface + return s.getStateObjectNoSlot(addr) +} + +func (s *ParallelStateDB) storeStateObj(addr common.Address, stateObject *StateObject) { + // When a state object is stored into s.parallel.stateObjects, + // it belongs to base StateDB, it is confirmed and valid. + stateObject.db = s.parallel.baseStateDB + stateObject.dbItf = s.parallel.baseStateDB + // the object could be create in SlotDB, if it got the object from DB and + // update it to the shared `s.parallel.stateObjects`` + stateObject.db.storeParallelLock.Lock() + if _, ok := s.parallel.stateObjects.Load(addr); !ok { + s.parallel.stateObjects.Store(addr, stateObject) + } + stateObject.db.storeParallelLock.Unlock() +} + +func (s *ParallelStateDB) getStateObjectNoSlot(addr common.Address) *StateObject { + if obj := s.getDeletedStateObject(addr); obj != nil && !obj.deleted { + return obj + } + return nil +} + +// createObject creates a new state object. If there is an existing account with +// the given address, it is overwritten and returned as the second return value. + +// prev is used for CreateAccount to get its balance +// Parallel mode: +// if prev in dirty: revert is ok +// if prev in unconfirmed DB: addr state read record, revert should not put it back +// if prev in main DB: addr state read record, revert should not put it back +// if pre no exist: addr state read record, + +// `prev` is used to handle revert, to recover with the `prev` object +// In Parallel mode, we only need to recover to `prev` in SlotDB, +// a.if it is not in SlotDB, `revert` will remove it from the SlotDB +// b.if it is exist in SlotDB, `revert` will recover to the `prev` in SlotDB +// c.as `snapDestructs` it is the same +func (s *ParallelStateDB) createObject(addr common.Address) (newobj *StateObject) { + log.Debug("ParallelStateDB createObject", "SlotxIndex", s.parallel.SlotIndex, "txIndex", s.TxIndex()) + + // do not get from unconfirmed DB, since it will has problem on revert + prev := s.parallel.dirtiedStateObjectsInSlot[addr] + + var prevdestruct bool + + if s.snap != nil && prev != nil { + _, prevdestruct = s.snapDestructs[prev.address] // fixme, record the snapshot read for create Account + s.parallel.addrSnapDestructsReadsInSlot[addr] = prevdestruct + if !prevdestruct { + // To destroy the previous trie node first and update the trie tree + // with the new object on block commit. + s.snapDestructs[prev.address] = struct{}{} + } + } + newobj = newObject(s, s.isParallel, addr, Account{}) + newobj.setNonce(0) // sets the object to dirty + if prev == nil { + s.journal.append(createObjectChange{account: &addr}) + } else { + s.journal.append(resetObjectChange{prev: prev, prevdestruct: prevdestruct}) + } + + // s.parallel.dirtiedStateObjectsInSlot[addr] = newobj // would change the bahavior of AddBalance... + s.parallel.addrStateChangesInSlot[addr] = true // the object sis created + s.parallel.nonceChangesInSlot[addr] = struct{}{} + s.parallel.balanceChangesInSlot[addr] = struct{}{} + s.parallel.codeChangesInSlot[addr] = struct{}{} + // notice: all the KVs are cleared if any + s.parallel.kvChangesInSlot[addr] = make(StateKeys) + return newobj +} + +// getDeletedStateObject is similar to getStateObject, but instead of returning +// nil for a deleted state object, it returns the actual object with the deleted +// flag set. This is needed by the state journal to revert to the correct s- +// destructed object instead of wiping all knowledge about the state object. +func (s *ParallelStateDB) getDeletedStateObject(addr common.Address) *StateObject { + // Prefer live objects if any is available + if obj, _ := s.getStateObjectFromStateObjects(addr); obj != nil { + return obj + } + data, ok := s.getStateObjectFromSnapshotOrTrie(addr) + if !ok { + return nil + } + // Insert into the live set + // if obj, ok := s.loadStateObj(addr); ok { + // fixme: concurrent not safe, merge could update it... + // return obj + // } + // this is why we have to use a seperate getDeletedStateObject for ParallelStateDB + // `s` has to be the ParallelStateDB + obj := newObject(s, s.isParallel, addr, *data) + s.storeStateObj(addr, obj) + // s.SetStateObject(obj) + return obj +} + +// GetOrNewStateObject retrieves a state object or create a new state object if nil. +// dirtyInSlot -> Unconfirmed DB -> main DB -> snapshot, no? create one +func (s *ParallelStateDB) GetOrNewStateObject(addr common.Address) *StateObject { + log.Debug("ParallelStateDB GetOrNewStateObject", "SlotxIndex", s.parallel.SlotIndex, "txIndex", s.TxIndex()) + + var stateObject *StateObject = nil + exist := true + if stateObject, ok := s.parallel.dirtiedStateObjectsInSlot[addr]; ok { + return stateObject + } + stateObject, _ = s.getStateObjectFromUnconfirmedDB(addr) + + if stateObject == nil { + stateObject = s.getStateObjectNoSlot(addr) // try to get from base db + } + if stateObject == nil || stateObject.deleted || stateObject.suicided { + stateObject = s.createObject(addr) + exist = false + } + + s.parallel.addrStateReadsInSlot[addr] = exist // true: exist, false: not exist + return stateObject +} + +// Exist reports whether the given account address exists in the state. +// Notably this also returns true for suicided accounts. +func (s *ParallelStateDB) Exist(addr common.Address) bool { + log.Debug("ParallelStateDB Exist", "SlotxIndex", s.parallel.SlotIndex, "txIndex", s.TxIndex()) + // 1.Try to get from dirty + if obj, ok := s.parallel.dirtiedStateObjectsInSlot[addr]; ok { + // dirty object should not be deleted, since deleted is only flagged on finalise + // and if it is suicided in contract call, suicide is taken as exist until it is finalised + // todo: add a check here, to be removed later + if obj.deleted || obj.suicided { + log.Error("Exist in dirty, but marked as deleted or suicided", + "txIndex", s.txIndex, "baseTxIndex:", s.parallel.baseTxIndex) + } + return true + } + // 2.Try to get from uncomfirmed & main DB + // 2.1 Already read before + if exist, ok := s.parallel.addrStateReadsInSlot[addr]; ok { + return exist + } + // 2.2 Try to get from unconfirmed DB if exist + if exist, ok := s.getAddrStateFromUnconfirmedDB(addr); ok { + s.parallel.addrStateReadsInSlot[addr] = exist // update and cache + return exist + } + + // 3.Try to get from main StateDB + exist := s.getStateObjectNoSlot(addr) != nil + s.parallel.addrStateReadsInSlot[addr] = exist // update and cache + return exist +} + +// Empty returns whether the state object is either non-existent +// or empty according to the EIP161 specification (balance = nonce = code = 0) +func (s *ParallelStateDB) Empty(addr common.Address) bool { + log.Debug("ParallelStateDB Empty", "SlotxIndex", s.parallel.SlotIndex, "txIndex", s.TxIndex()) + // 1.Try to get from dirty + if obj, ok := s.parallel.dirtiedStateObjectsInSlot[addr]; ok { + // dirty object is light copied and fixup on need, + // empty could be wrong, except it is created with this TX + if _, ok := s.parallel.addrStateChangesInSlot[addr]; ok { + return obj.empty() + } + // so we have to check it manually + // empty means: Nonce == 0 && Balance == 0 && CodeHash == emptyCodeHash + if s.GetBalance(addr).Sign() != 0 { // check balance first, since it is most likely not zero + return false + } + if s.GetNonce(addr) != 0 { + return false + } + codeHash := s.GetCodeHash(addr) + return bytes.Equal(codeHash.Bytes(), emptyCodeHash) // code is empty, the object is empty + } + // 2.Try to get from uncomfirmed & main DB + // 2.1 Already read before + if exist, ok := s.parallel.addrStateReadsInSlot[addr]; ok { + // exist means not empty + return !exist + } + // 2.2 Try to get from unconfirmed DB if exist + if exist, ok := s.getAddrStateFromUnconfirmedDB(addr); ok { + s.parallel.addrStateReadsInSlot[addr] = exist // update and cache + return !exist + } + + so := s.getStateObjectNoSlot(addr) + empty := (so == nil || so.empty()) + s.parallel.addrStateReadsInSlot[addr] = !empty // update and cache + return empty +} + +// GetBalance retrieves the balance from the given address or 0 if object not found +// GetFrom the dirty list => from unconfirmed DB => get from main stateDB +func (s *ParallelStateDB) GetBalance(addr common.Address) *big.Int { + log.Debug("ParallelStateDB GetBalance", "SlotxIndex", s.parallel.SlotIndex, "txIndex", s.TxIndex()) + if addr == s.parallel.systemAddress { + s.parallel.systemAddressOpsCount++ + } + // 1.Try to get from dirty + if _, ok := s.parallel.balanceChangesInSlot[addr]; ok { + if obj, ok := s.parallel.dirtiedStateObjectsInSlot[addr]; ok { + // on balance fixup, addr may not exist in dirtiedStateObjectsInSlot + // we intend to fixup balance based on unconfirmed DB or main DB + return obj.Balance() + } + } + // 2.Try to get from uncomfirmed DB or main DB + // 2.1 Already read before + if balance, ok := s.parallel.balanceReadsInSlot[addr]; ok { + return balance + } + // 2.2 Try to get from unconfirmed DB if exist + if balance := s.getBalanceFromUnconfirmedDB(addr); balance != nil { + s.parallel.balanceReadsInSlot[addr] = balance + return balance + } + + // 3. Try to get from main StateObejct + balance := common.Big0 + stateObject := s.getStateObjectNoSlot(addr) + if stateObject != nil { + balance = stateObject.Balance() + } + s.parallel.balanceReadsInSlot[addr] = balance + return balance +} + +func (s *ParallelStateDB) GetNonce(addr common.Address) uint64 { + log.Debug("ParallelStateDB GetNonce", "SlotxIndex", s.parallel.SlotIndex, "txIndex", s.TxIndex()) + // 1.Try to get from dirty + if _, ok := s.parallel.nonceChangesInSlot[addr]; ok { + if obj, ok := s.parallel.dirtiedStateObjectsInSlot[addr]; ok { + // on nonce fixup, addr may not exist in dirtiedStateObjectsInSlot + // we intend to fixup nonce based on unconfirmed DB or main DB + return obj.Nonce() + } + } + // 2.Try to get from uncomfirmed DB or main DB + // 2.1 Already read before + if nonce, ok := s.parallel.nonceReadsInSlot[addr]; ok { + return nonce + } + // 2.2 Try to get from unconfirmed DB if exist + if nonce, ok := s.getNonceFromUnconfirmedDB(addr); ok { + s.parallel.nonceReadsInSlot[addr] = nonce + return nonce + } + + // 3.Try to get from main StateDB + var nonce uint64 = 0 + stateObject := s.getStateObjectNoSlot(addr) + if stateObject != nil { + nonce = stateObject.Nonce() + } + s.parallel.nonceReadsInSlot[addr] = nonce + + return nonce +} + +func (s *ParallelStateDB) GetCode(addr common.Address) []byte { + log.Debug("ParallelStateDB GetCode", "SlotxIndex", s.parallel.SlotIndex, "txIndex", s.TxIndex()) + // 1.Try to get from dirty + if _, ok := s.parallel.codeChangesInSlot[addr]; ok { + if obj, ok := s.parallel.dirtiedStateObjectsInSlot[addr]; ok { + // on code fixup, addr may not exist in dirtiedStateObjectsInSlot + // we intend to fixup code based on unconfirmed DB or main DB + code := obj.Code(s.db) + return code + } + } + // 2.Try to get from uncomfirmed DB or main DB + // 2.1 Already read before + if code, ok := s.parallel.codeReadsInSlot[addr]; ok { + return code + } + // 2.2 Try to get from unconfirmed DB if exist + if code, ok := s.getCodeFromUnconfirmedDB(addr); ok { + s.parallel.codeReadsInSlot[addr] = code + return code + } + + // 3. Try to get from main StateObejct + stateObject := s.getStateObjectNoSlot(addr) + var code []byte + if stateObject != nil { + code = stateObject.Code(s.db) + } + s.parallel.codeReadsInSlot[addr] = code + return code +} + +func (s *ParallelStateDB) GetCodeSize(addr common.Address) int { + log.Debug("ParallelStateDB GetCodeSize", "SlotxIndex", s.parallel.SlotIndex, "txIndex", s.TxIndex()) + + // 1.Try to get from dirty + if _, ok := s.parallel.codeChangesInSlot[addr]; ok { + if obj, ok := s.parallel.dirtiedStateObjectsInSlot[addr]; ok { + // on code fixup, addr may not exist in dirtiedStateObjectsInSlot + // we intend to fixup code based on unconfirmed DB or main DB + return obj.CodeSize(s.db) + } + } + // 2.Try to get from uncomfirmed DB or main DB + // 2.1 Already read before + if code, ok := s.parallel.codeReadsInSlot[addr]; ok { + return len(code) // len(nil) is 0 too + } + // 2.2 Try to get from unconfirmed DB if exist + if code, ok := s.getCodeFromUnconfirmedDB(addr); ok { + s.parallel.codeReadsInSlot[addr] = code + return len(code) // len(nil) is 0 too + } + + // 3. Try to get from main StateObejct + var codeSize int = 0 + var code []byte + stateObject := s.getStateObjectNoSlot(addr) + + if stateObject != nil { + code = stateObject.Code(s.db) + codeSize = stateObject.CodeSize(s.db) + } + s.parallel.codeReadsInSlot[addr] = code + return codeSize +} + +// return value of GetCodeHash: +// - common.Hash{}: the address does not exist +// - emptyCodeHash: the address exist, but code is empty +// - others: the address exist, and code is not empty +func (s *ParallelStateDB) GetCodeHash(addr common.Address) common.Hash { + log.Debug("ParallelStateDB GetCodeHash", "SlotxIndex", s.parallel.SlotIndex, "txIndex", s.TxIndex()) + + // 1.Try to get from dirty + if _, ok := s.parallel.codeChangesInSlot[addr]; ok { + if obj, ok := s.parallel.dirtiedStateObjectsInSlot[addr]; ok { + // on code fixup, addr may not exist in dirtiedStateObjectsInSlot + // we intend to fixup balance based on unconfirmed DB or main DB + return common.BytesToHash(obj.CodeHash()) + } + } + // 2.Try to get from uncomfirmed DB or main DB + // 2.1 Already read before + if codeHash, ok := s.parallel.codeHashReadsInSlot[addr]; ok { + return codeHash + } + // 2.2 Try to get from unconfirmed DB if exist + if codeHash, ok := s.getCodeHashFromUnconfirmedDB(addr); ok { + s.parallel.codeHashReadsInSlot[addr] = codeHash + return codeHash + } + // 3. Try to get from main StateObejct + stateObject := s.getStateObjectNoSlot(addr) + codeHash := common.Hash{} + if stateObject != nil { + codeHash = common.BytesToHash(stateObject.CodeHash()) + } + s.parallel.codeHashReadsInSlot[addr] = codeHash + return codeHash +} + +// GetState retrieves a value from the given account's storage trie. +// For parallel mode wih, get from the state in order: +// -> self dirty, both Slot & MainProcessor +// -> pending of self: Slot on merge +// -> pending of unconfirmed DB +// -> pending of main StateDB +// -> origin +func (s *ParallelStateDB) GetState(addr common.Address, hash common.Hash) common.Hash { + log.Debug("ParallelStateDB GetState", "SlotxIndex", s.parallel.SlotIndex, "txIndex", s.TxIndex()) + + // 1.Try to get from dirty + if exist, ok := s.parallel.addrStateChangesInSlot[addr]; ok { + if !exist { + return common.Hash{} + } + obj := s.parallel.dirtiedStateObjectsInSlot[addr] // addr must exist in dirtiedStateObjectsInSlot + return obj.GetState(s.db, hash) + } + if keys, ok := s.parallel.kvChangesInSlot[addr]; ok { + if _, ok := keys[hash]; ok { + obj := s.parallel.dirtiedStateObjectsInSlot[addr] // addr must exist in dirtiedStateObjectsInSlot + return obj.GetState(s.db, hash) + } + } + // 2.Try to get from uncomfirmed DB or main DB + // 2.1 Already read before + if storage, ok := s.parallel.kvReadsInSlot[addr]; ok { + if val, ok := storage.GetValue(hash); ok { + return val + } + } + // 2.2 Try to get from unconfirmed DB if exist + if val, ok := s.getKVFromUnconfirmedDB(addr, hash); ok { + if s.parallel.kvReadsInSlot[addr] == nil { + s.parallel.kvReadsInSlot[addr] = newStorage(false) + } + s.parallel.kvReadsInSlot[addr].StoreValue(hash, val) // update cache + return val + } + + // 3.Get from main StateDB + stateObject := s.getStateObjectNoSlot(addr) + val := common.Hash{} + if stateObject != nil { + val = stateObject.GetState(s.db, hash) + } + if s.parallel.kvReadsInSlot[addr] == nil { + s.parallel.kvReadsInSlot[addr] = newStorage(false) + } + s.parallel.kvReadsInSlot[addr].StoreValue(hash, val) // update cache + return val +} + +// GetCommittedState retrieves a value from the given account's committed storage trie. +func (s *ParallelStateDB) GetCommittedState(addr common.Address, hash common.Hash) common.Hash { + log.Debug("ParallelStateDB GetCommittedState", "SlotxIndex", s.parallel.SlotIndex, "txIndex", s.TxIndex()) + // 1.No need to get from pending of itself even on merge, since stateobject in SlotDB won't do finalise + // 2.Try to get from uncomfirmed DB or main DB + // KVs in unconfirmed DB can be seen as pending storage + // KVs in main DB are merged from SlotDB and has done finalise() on merge, can be seen as pending storage too. + // 2.1 Already read before + if storage, ok := s.parallel.kvReadsInSlot[addr]; ok { + if val, ok := storage.GetValue(hash); ok { + return val + } + } + // 2.2 Try to get from unconfirmed DB if exist + if val, ok := s.getKVFromUnconfirmedDB(addr, hash); ok { + if s.parallel.kvReadsInSlot[addr] == nil { + s.parallel.kvReadsInSlot[addr] = newStorage(false) + } + s.parallel.kvReadsInSlot[addr].StoreValue(hash, val) // update cache + return val + } + + // 3. Try to get from main DB + stateObject := s.getStateObjectNoSlot(addr) + val := common.Hash{} + if stateObject != nil { + val = stateObject.GetCommittedState(s.db, hash) + } + if s.parallel.kvReadsInSlot[addr] == nil { + s.parallel.kvReadsInSlot[addr] = newStorage(false) + } + s.parallel.kvReadsInSlot[addr].StoreValue(hash, val) // update cache + return val +} + +func (s *ParallelStateDB) HasSuicided(addr common.Address) bool { + log.Debug("ParallelStateDB HasSuicided", "SlotxIndex", s.parallel.SlotIndex, "txIndex", s.TxIndex()) + // 1.Try to get from dirty + if obj, ok := s.parallel.dirtiedStateObjectsInSlot[addr]; ok { + return obj.suicided + } + // 2.Try to get from uncomfirmed + if exist, ok := s.getAddrStateFromUnconfirmedDB(addr); ok { + return !exist + } + + stateObject := s.getStateObjectNoSlot(addr) + if stateObject != nil { + return stateObject.suicided + } + return false +} + +// AddBalance adds amount to the account associated with addr. +func (s *ParallelStateDB) AddBalance(addr common.Address, amount *big.Int) { + // add balance will perform a read operation first + // s.parallel.balanceReadsInSlot[addr] = struct{}{} // fixme: to make the the balance valid, since unconfirmed would refer it. + // if amount.Sign() == 0 { + // if amount == 0, no balance change, but there is still an empty check. + // take this empty check as addr state read(create, suicide, empty delete) + // s.parallel.addrStateReadsInSlot[addr] = struct{}{} + // } + log.Debug("ParallelStateDB AddBalance", "SlotxIndex", s.parallel.SlotIndex, "txIndex", s.TxIndex()) + + stateObject := s.GetOrNewStateObject(addr) + if stateObject != nil { + if addr == s.parallel.systemAddress { + s.parallel.systemAddressOpsCount++ + } + // if amount.Sign() != 0 { // todo: to reenable it + if _, ok := s.parallel.dirtiedStateObjectsInSlot[addr]; !ok { + newStateObject := stateObject.lightCopy(s) // light copy from main DB + // do balance fixup from the confirmed DB, it could be more reliable than main DB + balance := s.GetBalance(addr) + newStateObject.setBalance(balance) + // s.parallel.balanceReadsInSlot[addr] = newStateObject.Balance() // could read from main DB or unconfirmed DB + newStateObject.AddBalance(amount) + s.parallel.dirtiedStateObjectsInSlot[addr] = newStateObject + s.parallel.balanceChangesInSlot[addr] = struct{}{} + return + } + // already dirty, make sure the balance if fixed up + // if stateObject.Balance() + if addr != s.parallel.systemAddress { + if stateObject.Balance().Cmp(s.GetBalance(addr)) != 0 { + log.Warn("AddBalance in dirty, but balance is incorrect", "txIndex", s.txIndex, "addr", addr, + "stateObject.Balance()", stateObject.Balance(), "s.GetBalance(addr)", s.GetBalance(addr)) + stateObject.setBalance(s.GetBalance(addr)) + } + } + + stateObject.AddBalance(amount) + s.parallel.balanceChangesInSlot[addr] = struct{}{} + } +} + +// SubBalance subtracts amount from the account associated with addr. +func (s *ParallelStateDB) SubBalance(addr common.Address, amount *big.Int) { + // if amount.Sign() != 0 { + // unlike add, sub 0 balance will not touch empty object + // s.parallel.balanceReadsInSlot[addr] = struct{}{} + // } + log.Debug("ParallelStateDB SubBalance", "SlotxIndex", s.parallel.SlotIndex, "txIndex", s.TxIndex()) + + stateObject := s.GetOrNewStateObject(addr) + if stateObject != nil { + if addr == s.parallel.systemAddress { + s.parallel.systemAddressOpsCount++ + } + + // if amount.Sign() != 0 { // todo: to reenable it + if _, ok := s.parallel.dirtiedStateObjectsInSlot[addr]; !ok { + newStateObject := stateObject.lightCopy(s) // light copy from main DB + // do balance fixup from the confirmed DB, it could be more reliable than main DB + balance := s.GetBalance(addr) + newStateObject.setBalance(balance) + // s.parallel.balanceReadsInSlot[addr] = newStateObject.Balance() + newStateObject.SubBalance(amount) + s.parallel.balanceChangesInSlot[addr] = struct{}{} + s.parallel.dirtiedStateObjectsInSlot[addr] = newStateObject + return + } + // already dirty, make sure the balance if fixed + // if stateObject.Balance() + if addr != s.parallel.systemAddress { + if stateObject.Balance().Cmp(s.GetBalance(addr)) != 0 { + log.Warn("SubBalance in dirty, but balance is incorrect", "txIndex", s.txIndex, "addr", addr, + "stateObject.Balance()", stateObject.Balance(), "s.GetBalance(addr)", s.GetBalance(addr)) + stateObject.setBalance(s.GetBalance(addr)) + } + } + + stateObject.SubBalance(amount) + s.parallel.balanceChangesInSlot[addr] = struct{}{} + } +} + +func (s *ParallelStateDB) SetBalance(addr common.Address, amount *big.Int) { + log.Debug("ParallelStateDB SetBalance", "SlotxIndex", s.parallel.SlotIndex, "txIndex", s.TxIndex()) + + stateObject := s.GetOrNewStateObject(addr) + if stateObject != nil { + if addr == s.parallel.systemAddress { + s.parallel.systemAddressOpsCount++ + } + if _, ok := s.parallel.dirtiedStateObjectsInSlot[addr]; !ok { + newStateObject := stateObject.lightCopy(s) + // update balance for revert, in case child contract is revertted, + // it should revert to the previous balance + balance := s.GetBalance(addr) + newStateObject.setBalance(balance) + newStateObject.SetBalance(amount) + s.parallel.balanceChangesInSlot[addr] = struct{}{} + s.parallel.dirtiedStateObjectsInSlot[addr] = newStateObject + return + } + + stateObject.SetBalance(amount) + s.parallel.balanceChangesInSlot[addr] = struct{}{} + } +} + +func (s *ParallelStateDB) SetNonce(addr common.Address, nonce uint64) { + log.Debug("ParallelStateDB SetNonce", "SlotxIndex", s.parallel.SlotIndex, "txIndex", s.TxIndex()) + + stateObject := s.GetOrNewStateObject(addr) + if stateObject != nil { + if _, ok := s.parallel.dirtiedStateObjectsInSlot[addr]; !ok { + newStateObject := stateObject.lightCopy(s) + noncePre := s.GetNonce(addr) + newStateObject.setNonce(noncePre) // nonce fixup + newStateObject.SetNonce(nonce) + s.parallel.nonceChangesInSlot[addr] = struct{}{} + s.parallel.dirtiedStateObjectsInSlot[addr] = newStateObject + return + } + noncePre := s.GetNonce(addr) + stateObject.setNonce(noncePre) // nonce fixup + + stateObject.SetNonce(nonce) + s.parallel.nonceChangesInSlot[addr] = struct{}{} + } +} + +func (s *ParallelStateDB) SetCode(addr common.Address, code []byte) { + log.Debug("ParallelStateDB SetCode", "SlotxIndex", s.parallel.SlotIndex, "txIndex", s.TxIndex()) + + stateObject := s.GetOrNewStateObject(addr) + if stateObject != nil { + codeHash := crypto.Keccak256Hash(code) + if _, ok := s.parallel.dirtiedStateObjectsInSlot[addr]; !ok { + newStateObject := stateObject.lightCopy(s) + codePre := s.GetCode(addr) // code fixup + codeHashPre := crypto.Keccak256Hash(codePre) + newStateObject.setCode(codeHashPre, codePre) + + newStateObject.SetCode(codeHash, code) + s.parallel.dirtiedStateObjectsInSlot[addr] = newStateObject + s.parallel.codeChangesInSlot[addr] = struct{}{} + return + } + codePre := s.GetCode(addr) // code fixup + codeHashPre := crypto.Keccak256Hash(codePre) + stateObject.setCode(codeHashPre, codePre) + + stateObject.SetCode(codeHash, code) + s.parallel.codeChangesInSlot[addr] = struct{}{} + } +} + +func (s *ParallelStateDB) SetState(addr common.Address, key, value common.Hash) { + log.Debug("ParallelStateDB SetState", "SlotxIndex", s.parallel.SlotIndex, "txIndex", s.TxIndex()) + + stateObject := s.GetOrNewStateObject(addr) // attention: if StateObject's lightCopy, its storage is only a part of the full storage, + if stateObject != nil { + if s.parallel.baseTxIndex+1 == s.txIndex { + // we check if state is unchanged + // only when current transaction is the next transaction to be committed + // fixme: there is a bug, block: 14,962,284, + // stateObject is in dirty (light copy), but the key is in mainStateDB + // stateObject dirty -> committed, will skip mainStateDB dirty + if s.GetState(addr, key) == value { + log.Debug("Skip set same state", "baseTxIndex", s.parallel.baseTxIndex, + "txIndex", s.txIndex, "addr", addr, + "key", key, "value", value) + return + } + } + + if s.parallel.kvChangesInSlot[addr] == nil { + s.parallel.kvChangesInSlot[addr] = make(StateKeys) // make(Storage, defaultNumOfSlots) + } + + if _, ok := s.parallel.dirtiedStateObjectsInSlot[addr]; !ok { + newStateObject := stateObject.lightCopy(s) + newStateObject.SetState(s.db, key, value) + s.parallel.dirtiedStateObjectsInSlot[addr] = newStateObject + return + } + // do State Update + stateObject.SetState(s.db, key, value) + } +} + +// Suicide marks the given account as suicided. +// This clears the account balance. +// +// The account's state object is still available until the state is committed, +// getStateObject will return a non-nil account after Suicide. +func (s *ParallelStateDB) Suicide(addr common.Address) bool { + log.Debug("ParallelStateDB Suicide", "SlotxIndex", s.parallel.SlotIndex, "txIndex", s.TxIndex()) + + var stateObject *StateObject + // 1.Try to get from dirty, it could be suicided inside of contract call + stateObject = s.parallel.dirtiedStateObjectsInSlot[addr] + if stateObject == nil { + // 2.Try to get from uncomfirmed, if deleted return false, since the address does not exist + if obj, ok := s.getStateObjectFromUnconfirmedDB(addr); ok { + stateObject = obj + s.parallel.addrStateReadsInSlot[addr] = !stateObject.deleted // true: exist, false: deleted + if stateObject.deleted { + log.Error("Suicide addr alreay deleted in confirmed DB", "txIndex", s.txIndex, "addr", addr) + return false + } + } + } + + if stateObject == nil { + // 3.Try to get from main StateDB + stateObject = s.getStateObjectNoSlot(addr) + if stateObject == nil { + s.parallel.addrStateReadsInSlot[addr] = false // true: exist, false: deleted + log.Error("Suicide addr not exist", "txIndex", s.txIndex, "addr", addr) + return false + } + s.parallel.addrStateReadsInSlot[addr] = true // true: exist, false: deleted + } + + s.journal.append(suicideChange{ + account: &addr, + prev: stateObject.suicided, // todo: must be false? + prevbalance: new(big.Int).Set(s.GetBalance(addr)), + }) + + if _, ok := s.parallel.dirtiedStateObjectsInSlot[addr]; !ok { + // do copy-on-write for suicide "write" + newStateObject := stateObject.lightCopy(s) + newStateObject.markSuicided() + newStateObject.data.Balance = new(big.Int) + s.parallel.dirtiedStateObjectsInSlot[addr] = newStateObject + s.parallel.addrStateChangesInSlot[addr] = false // false: the address does not exist any more, + // s.parallel.nonceChangesInSlot[addr] = struct{}{} + s.parallel.balanceChangesInSlot[addr] = struct{}{} + s.parallel.codeChangesInSlot[addr] = struct{}{} + // s.parallel.kvChangesInSlot[addr] = make(StateKeys) // all key changes are discarded + return true + } + s.parallel.addrStateChangesInSlot[addr] = false // false: the address does not exist any more, + s.parallel.balanceChangesInSlot[addr] = struct{}{} + s.parallel.codeChangesInSlot[addr] = struct{}{} + + stateObject.markSuicided() + stateObject.data.Balance = new(big.Int) + return true +} + +// CreateAccount explicitly creates a state object. If a state object with the address +// already exists the balance is carried over to the new account. +// +// CreateAccount is called during the EVM CREATE operation. The situation might arise that +// a contract does the following: +// +// 1. sends funds to sha(account ++ (nonce + 1)) +// 2. tx_create(sha(account ++ nonce)) (note that this gets the address of 1) +// +// Carrying over the balance ensures that Ether doesn't disappear. +func (s *ParallelStateDB) CreateAccount(addr common.Address) { + // no matter it is got from dirty, unconfirmed or main DB + // if addr not exist, preBalance will be common.Big0, it is same as new(big.Int) which + // is the value newObject(), + preBalance := s.GetBalance(addr) // parallel balance read will be recorded inside of GetBalance + newObj := s.createObject(addr) + newObj.setBalance(new(big.Int).Set(preBalance)) // new big.Int for newObj +} + +// RevertToSnapshot reverts all state changes made since the given revision. +func (s *ParallelStateDB) RevertToSnapshot(revid int) { + // Find the snapshot in the stack of valid snapshots. + idx := sort.Search(len(s.validRevisions), func(i int) bool { + return s.validRevisions[i].id >= revid + }) + if idx == len(s.validRevisions) || s.validRevisions[idx].id != revid { + panic(fmt.Errorf("revision id %v cannot be reverted", revid)) + } + snapshot := s.validRevisions[idx].journalIndex + + // Replay the journal to undo changes and remove invalidated snapshots + s.journal.revert(s, snapshot) + s.validRevisions = s.validRevisions[:idx] +} + +// AddRefund adds gas to the refund counter +// journal.append will use ParallelState for revert +func (s *ParallelStateDB) AddRefund(gas uint64) { // fixme: not needed + s.journal.append(refundChange{prev: s.refund}) + s.refund += gas +} + +// SubRefund removes gas from the refund counter. +// This method will panic if the refund counter goes below zero +func (s *ParallelStateDB) SubRefund(gas uint64) { // fixme: not needed + s.journal.append(refundChange{prev: s.refund}) + if gas > s.refund { + // we don't need to panic here if we read the wrong state in parallelm mode + // we just need to redo this transaction + log.Info(fmt.Sprintf("Refund counter below zero (gas: %d > refund: %d)", gas, s.refund), "tx", s.thash.String()) + s.parallel.needsRedo = true + return + } + s.refund -= gas +} + +// For Parallel Execution Mode, it can be seen as Penetrated Access: +// ------------------------------------------------------- +// | BaseTxIndex | Unconfirmed Txs... | Current TxIndex | +// ------------------------------------------------------- +// Access from the unconfirmed DB with range&priority: txIndex -1(previous tx) -> baseTxIndex + 1 +func (s *ParallelStateDB) getBalanceFromUnconfirmedDB(addr common.Address) *big.Int { + if addr == s.parallel.systemAddress { + // never get systemaddress from unconfirmed DB + return nil + } + + for i := s.txIndex - 1; i > s.parallel.baseTxIndex; i-- { + if db, ok := s.parallel.unconfirmedDBInShot[i]; ok { + // 1.Refer the state of address, exist or not in dirtiedStateObjectsInSlot + if obj, exist := db.parallel.dirtiedStateObjectsInSlot[addr]; exist { + balanceHit := false + if _, exist := db.parallel.addrStateChangesInSlot[addr]; exist { + balanceHit = true + } + if _, exist := db.parallel.balanceChangesInSlot[addr]; exist { // only changed balance is reliable + balanceHit = true + } + if !balanceHit { + continue + } + balance := obj.Balance() + if obj.deleted { + balance = common.Big0 + } + return balance + } + } + } + return nil +} + +// Similar to getBalanceFromUnconfirmedDB +func (s *ParallelStateDB) getNonceFromUnconfirmedDB(addr common.Address) (uint64, bool) { + if addr == s.parallel.systemAddress { + // never get systemaddress from unconfirmed DB + return 0, false + } + + for i := s.txIndex - 1; i > s.parallel.baseTxIndex; i-- { + if unconfirmedDb, ok := s.parallel.unconfirmedDBInShot[i]; ok { + nonceHit := false + if _, ok := unconfirmedDb.parallel.addrStateChangesInSlot[addr]; ok { + nonceHit = true + } else if _, ok := unconfirmedDb.parallel.nonceChangesInSlot[addr]; ok { + nonceHit = true + } + if !nonceHit { + // nonce refer not hit, try next unconfirmedDb + continue + } + // nonce hit, return the nonce + obj := unconfirmedDb.parallel.dirtiedStateObjectsInSlot[addr] + if obj == nil { + // could not exist, if it is changed but reverted + // fixme: revert should remove the change record + log.Debug("Get nonce from UnconfirmedDB, changed but object not exist, ", + "txIndex", s.txIndex, "referred txIndex", i, "addr", addr) + continue + } + nonce := obj.Nonce() + // deleted object with nonce == 0 + if obj.deleted { + nonce = 0 + } + return nonce, true + } + } + return 0, false +} + +// Similar to getBalanceFromUnconfirmedDB +// It is not only for code, but also codeHash and codeSize, we return the *StateObject for convienence. +func (s *ParallelStateDB) getCodeFromUnconfirmedDB(addr common.Address) ([]byte, bool) { + if addr == s.parallel.systemAddress { + // never get systemaddress from unconfirmed DB + return nil, false + } + + for i := s.txIndex - 1; i > s.parallel.baseTxIndex; i-- { + if db, ok := s.parallel.unconfirmedDBInShot[i]; ok { + codeHit := false + if _, exist := db.parallel.addrStateChangesInSlot[addr]; exist { + codeHit = true + } + if _, exist := db.parallel.codeChangesInSlot[addr]; exist { + codeHit = true + } + if !codeHit { + // try next unconfirmedDb + continue + } + obj := db.parallel.dirtiedStateObjectsInSlot[addr] + if obj == nil { + // could not exist, if it is changed but reverted + // fixme: revert should remove the change record + log.Debug("Get code from UnconfirmedDB, changed but object not exist, ", + "txIndex", s.txIndex, "referred txIndex", i, "addr", addr) + continue + } + code := obj.Code(s.db) + if obj.deleted { + code = nil + } + return code, true + } + } + return nil, false +} + +// Similar to getCodeFromUnconfirmedDB +// but differ when address is deleted or not exist +func (s *ParallelStateDB) getCodeHashFromUnconfirmedDB(addr common.Address) (common.Hash, bool) { + if addr == s.parallel.systemAddress { + // never get systemaddress from unconfirmed DB + return common.Hash{}, false + } + + for i := s.txIndex - 1; i > s.parallel.baseTxIndex; i-- { + if db, ok := s.parallel.unconfirmedDBInShot[i]; ok { + hashHit := false + if _, exist := db.parallel.addrStateChangesInSlot[addr]; exist { + hashHit = true + } + if _, exist := db.parallel.codeChangesInSlot[addr]; exist { + hashHit = true + } + if !hashHit { + // try next unconfirmedDb + continue + } + + obj := db.parallel.dirtiedStateObjectsInSlot[addr] + if obj == nil { + // could not exist, if it is changed but reverted + // fixme: revert should remove the change record + log.Debug("Get codeHash from UnconfirmedDB, changed but object not exist, ", + "txIndex", s.txIndex, "referred txIndex", i, "addr", addr) + continue + } + codeHash := common.Hash{} + if !obj.deleted { + codeHash = common.BytesToHash(obj.CodeHash()) + } + return codeHash, true + } + } + return common.Hash{}, false +} + +// Similar to getCodeFromUnconfirmedDB +// It is for address state check of: Exist(), Empty() and HasSuicided() +// Since the unconfirmed DB should have done Finalise() with `deleteEmptyObjects = true` +// If the dirty address is empty or suicided, it will be marked as deleted, so we only need to return `deleted` or not. +func (s *ParallelStateDB) getAddrStateFromUnconfirmedDB(addr common.Address) (bool, bool) { + if addr == s.parallel.systemAddress { + // never get systemaddress from unconfirmed DB + return false, false + } + + // check the unconfirmed DB with range: baseTxIndex -> txIndex -1(previous tx) + for i := s.txIndex - 1; i > s.parallel.baseTxIndex; i-- { + if db, ok := s.parallel.unconfirmedDBInShot[i]; ok { + if exist, ok := db.parallel.addrStateChangesInSlot[addr]; ok { + if _, ok := db.parallel.dirtiedStateObjectsInSlot[addr]; !ok { + // could not exist, if it is changed but reverted + // fixme: revert should remove the change record + log.Debug("Get addr State from UnconfirmedDB, changed but object not exist, ", + "txIndex", s.txIndex, "referred txIndex", i, "addr", addr) + continue + } + + return exist, true + } + } + } + return false, false } -func (s *StateDB) DiffLayerToSnap(diffLayer *types.DiffLayer) (map[common.Address]struct{}, map[common.Address][]byte, map[common.Address]map[string][]byte, error) { - snapDestructs := make(map[common.Address]struct{}) - snapAccounts := make(map[common.Address][]byte) - snapStorage := make(map[common.Address]map[string][]byte) - - for _, des := range diffLayer.Destructs { - snapDestructs[des] = struct{}{} - } - for _, account := range diffLayer.Accounts { - snapAccounts[account.Account] = account.Blob - } - for _, storage := range diffLayer.Storages { - // should never happen - if len(storage.Keys) != len(storage.Vals) { - return nil, nil, nil, errors.New("invalid diffLayer: length of keys and values mismatch") - } - snapStorage[storage.Account] = make(map[string][]byte, len(storage.Keys)) - n := len(storage.Keys) - for i := 0; i < n; i++ { - snapStorage[storage.Account][storage.Keys[i]] = storage.Vals[i] +func (s *ParallelStateDB) getKVFromUnconfirmedDB(addr common.Address, key common.Hash) (common.Hash, bool) { + // check the unconfirmed DB with range: baseTxIndex -> txIndex -1(previous tx) + for i := s.txIndex - 1; i > s.parallel.baseTxIndex; i-- { + if db, ok := s.parallel.unconfirmedDBInShot[i]; ok { + if obj, ok := db.parallel.dirtiedStateObjectsInSlot[addr]; ok { // if deleted on merge, can get from main StateDB, ok but fixme: concurrent safe + if obj.deleted { + return common.Hash{}, true + } + if _, ok := db.parallel.kvChangesInSlot[addr]; ok { + if val, exist := obj.dirtyStorage.GetValue(key); exist { + return val, true + } + if val, exist := obj.pendingStorage.GetValue(key); exist { // fixme: can be removed + log.Error("Get KV from Unconfirmed StateDB, in pending", + "my txIndex", s.txIndex, "DB's txIndex", i, "addr", addr, + "key", key, "val", val) + return val, true + } + } + } } } - return snapDestructs, snapAccounts, snapStorage, nil + return common.Hash{}, false } -func (s *StateDB) SnapToDiffLayer() ([]common.Address, []types.DiffAccount, []types.DiffStorage) { - destructs := make([]common.Address, 0, len(s.snapDestructs)) - for account := range s.snapDestructs { - destructs = append(destructs, account) - } - accounts := make([]types.DiffAccount, 0, len(s.snapAccounts)) - for accountHash, account := range s.snapAccounts { - accounts = append(accounts, types.DiffAccount{ - Account: accountHash, - Blob: account, - }) - } - storages := make([]types.DiffStorage, 0, len(s.snapStorage)) - for accountHash, storage := range s.snapStorage { - keys := make([]string, 0, len(storage)) - values := make([][]byte, 0, len(storage)) - for k, v := range storage { - keys = append(keys, k) - values = append(values, v) +func (s *ParallelStateDB) getStateObjectFromUnconfirmedDB(addr common.Address) (*StateObject, bool) { + // check the unconfirmed DB with range: baseTxIndex -> txIndex -1(previous tx) + for i := s.txIndex - 1; i > s.parallel.baseTxIndex; i-- { + if db, ok := s.parallel.unconfirmedDBInShot[i]; ok { + if obj, ok := db.parallel.dirtiedStateObjectsInSlot[addr]; ok { // if deleted on merge, can get from main StateDB, ok but fixme: concurrent safe + return obj, true + } } - storages = append(storages, types.DiffStorage{ - Account: accountHash, - Keys: keys, - Vals: values, - }) } - return destructs, accounts, storages + return nil, false } -// PrepareAccessList handles the preparatory steps for executing a state transition with -// regards to both EIP-2929 and EIP-2930: -// -// - Add sender to access list (2929) -// - Add destination to access list (2929) -// - Add precompiles to access list (2929) -// - Add the contents of the optional tx access list (2930) -// -// This method should only be called if Yolov3/Berlin/2929+2930 is applicable at the current number. -func (s *StateDB) PrepareAccessList(sender common.Address, dst *common.Address, precompiles []common.Address, list types.AccessList) { - s.AddAddressToAccessList(sender) - if dst != nil { - s.AddAddressToAccessList(*dst) - // If it's a create-tx, the destination will be added inside evm.create +func (s *ParallelStateDB) IsParallelReadsValid() bool { + slotDB := s + if !slotDB.parallel.isSlotDB { + log.Error("IsSlotDBReadsValid slotDB should be slot DB", "SlotIndex", slotDB.parallel.SlotIndex, "txIndex", slotDB.txIndex) + return false } - for _, addr := range precompiles { - s.AddAddressToAccessList(addr) + + mainDB := slotDB.parallel.baseStateDB + if mainDB.parallel.isSlotDB { + log.Error("IsSlotDBReadsValid s should be main DB", "SlotIndex", slotDB.parallel.SlotIndex, "txIndex", slotDB.txIndex) + return false } - for _, el := range list { - s.AddAddressToAccessList(el.Address) - for _, key := range el.StorageKeys { - s.AddSlotToAccessList(el.Address, key) + // for nonce + for addr, nonceSlot := range slotDB.parallel.nonceReadsInSlot { + nonceMain := mainDB.GetNonce(addr) + if nonceSlot != nonceMain { + log.Debug("IsSlotDBReadsValid nonce read is invalid", "addr", addr, + "nonceSlot", nonceSlot, "nonceMain", nonceMain, "SlotIndex", slotDB.parallel.SlotIndex, + "txIndex", slotDB.txIndex, "baseTxIndex", slotDB.parallel.baseTxIndex) + return false } } -} - -// AddAddressToAccessList adds the given address to the access list -func (s *StateDB) AddAddressToAccessList(addr common.Address) { - if s.accessList == nil { - s.accessList = newAccessList() - } - if s.accessList.AddAddress(addr) { - s.journal.append(accessListAddAccountChange{&addr}) + // balance + for addr, balanceSlot := range slotDB.parallel.balanceReadsInSlot { + if addr != s.parallel.systemAddress { // skip balance check for system address + balanceMain := mainDB.GetBalance(addr) + if balanceSlot.Cmp(balanceMain) != 0 { + log.Debug("IsSlotDBReadsValid balance read is invalid", "addr", addr, + "balanceSlot", balanceSlot, "balanceMain", balanceMain, "SlotIndex", slotDB.parallel.SlotIndex, + "txIndex", slotDB.txIndex, "baseTxIndex", slotDB.parallel.baseTxIndex) + return false + } + } } -} - -// AddSlotToAccessList adds the given (address, slot)-tuple to the access list -func (s *StateDB) AddSlotToAccessList(addr common.Address, slot common.Hash) { - if s.accessList == nil { - s.accessList = newAccessList() + // check code + for addr, codeSlot := range slotDB.parallel.codeReadsInSlot { + codeMain := mainDB.GetCode(addr) + if !bytes.Equal(codeSlot, codeMain) { + log.Debug("IsSlotDBReadsValid code read is invalid", "addr", addr, + "len codeSlot", len(codeSlot), "len codeMain", len(codeMain), "SlotIndex", slotDB.parallel.SlotIndex, + "txIndex", slotDB.txIndex, "baseTxIndex", slotDB.parallel.baseTxIndex) + return false + } } - addrMod, slotMod := s.accessList.AddSlot(addr, slot) - if addrMod { - // In practice, this should not happen, since there is no way to enter the - // scope of 'address' without having the 'address' become already added - // to the access list (via call-variant, create, etc). - // Better safe than sorry, though - s.journal.append(accessListAddAccountChange{&addr}) + // check codeHash + for addr, codeHashSlot := range slotDB.parallel.codeHashReadsInSlot { + codeHashMain := mainDB.GetCodeHash(addr) + if !bytes.Equal(codeHashSlot.Bytes(), codeHashMain.Bytes()) { + log.Debug("IsSlotDBReadsValid codehash read is invalid", "addr", addr, + "codeHashSlot", codeHashSlot, "codeHashMain", codeHashMain, "SlotIndex", slotDB.parallel.SlotIndex, + "txIndex", slotDB.txIndex, "baseTxIndex", slotDB.parallel.baseTxIndex) + return false + } } - if slotMod { - s.journal.append(accessListAddSlotChange{ - address: &addr, - slot: &slot, + // check KV + for addr, slotStorage := range slotDB.parallel.kvReadsInSlot { + conflict := false + slotStorage.Range(func(keySlot, valSlot interface{}) bool { + valMain := mainDB.GetState(addr, keySlot.(common.Hash)) + if !bytes.Equal(valSlot.(common.Hash).Bytes(), valMain.Bytes()) { + log.Debug("IsSlotDBReadsValid KV read is invalid", "addr", addr, + "key", keySlot.(common.Hash), "valSlot", valSlot.(common.Hash), + "valMain", valMain, "SlotIndex", slotDB.parallel.SlotIndex, + "txIndex", slotDB.txIndex, "baseTxIndex", slotDB.parallel.baseTxIndex) + conflict = true + return false // return false, Range will be terminated. + } + return true // return true, Range will try next KV }) + if conflict { + return false + } } -} - -// AddressInAccessList returns true if the given address is in the access list. -func (s *StateDB) AddressInAccessList(addr common.Address) bool { - if s.accessList == nil { - return false + // addr state check + for addr, stateSlot := range slotDB.parallel.addrStateReadsInSlot { + stateMain := false // addr not exist + if mainDB.getStateObject(addr) != nil { + stateMain = true // addr exist in main DB + } + if stateSlot != stateMain { + // skip addr state check for system address + if addr != s.parallel.systemAddress { + log.Debug("IsSlotDBReadsValid addrState read invalid(true: exist, false: not exist)", + "addr", addr, "stateSlot", stateSlot, "stateMain", stateMain, + "SlotIndex", slotDB.parallel.SlotIndex, + "txIndex", slotDB.txIndex, "baseTxIndex", slotDB.parallel.baseTxIndex) + return false + } + } } - return s.accessList.ContainsAddress(addr) -} + // snapshot destructs check -// SlotInAccessList returns true if the given (address, slot)-tuple is in the access list. -func (s *StateDB) SlotInAccessList(addr common.Address, slot common.Hash) (addressPresent bool, slotPresent bool) { - if s.accessList == nil { - return false, false + for addr, destructRead := range slotDB.parallel.addrSnapDestructsReadsInSlot { + mainObj := mainDB.getStateObject(addr) + if mainObj == nil { + log.Debug("IsSlotDBReadsValid snapshot destructs read invalid, address should exist", + "addr", addr, "destruct", destructRead, + "SlotIndex", slotDB.parallel.SlotIndex, + "txIndex", slotDB.txIndex, "baseTxIndex", slotDB.parallel.baseTxIndex) + return false + } + _, destructMain := mainDB.snapDestructs[addr] // addr not exist + if destructRead != destructMain { + log.Debug("IsSlotDBReadsValid snapshot destructs read invalid", + "addr", addr, "destructRead", destructRead, "destructMain", destructMain, + "SlotIndex", slotDB.parallel.SlotIndex, + "txIndex", slotDB.txIndex, "baseTxIndex", slotDB.parallel.baseTxIndex) + return false + } } - return s.accessList.Contains(addr, slot) + + return true } -func (s *StateDB) GetDirtyAccounts() []common.Address { - accounts := make([]common.Address, 0, len(s.stateObjectsDirty)) - for account := range s.stateObjectsDirty { - accounts = append(accounts, account) - } - return accounts +// For most of the transactions, systemAddressOpsCount should be 3: +// one for SetBalance(0) on NewSlotDB() +// the other is for AddBalance(GasFee) at the end. +// (systemAddressOpsCount > 3) means the transaction tries to access systemAddress, in +// this case, we should redo and keep its balance on NewSlotDB() +func (s *ParallelStateDB) SystemAddressRedo() bool { + return s.parallel.systemAddressOpsCount > 4 } -func (s *StateDB) GetStorage(address common.Address) *sync.Map { - return s.storagePool.getStorage(address) +// NeedsRedo returns true if there is any clear reason that we need to redo this transaction +func (s *ParallelStateDB) NeedsRedo() bool { + return s.parallel.needsRedo } diff --git a/core/state_processor.go b/core/state_processor.go index 9e6da7fee1..4dde8c064a 100644 --- a/core/state_processor.go +++ b/core/state_processor.go @@ -77,7 +77,7 @@ type ParallelStateProcessor struct { // txReqAccountSorted map[common.Address][]*ParallelTxRequest // fixme: *ParallelTxRequest => ParallelTxRequest? slotState []*SlotState // idle, or pending messages mergedTxIndex int // the latest finalized tx index - slotDBsToRelease []*state.StateDB + slotDBsToRelease []*state.ParallelStateDB debugErrorRedoNum int debugConflictRedoNum int } @@ -398,8 +398,8 @@ func (p *LightStateProcessor) LightProcess(diffLayer *types.DiffLayer, block *ty type SlotState struct { pendingTxReqChan chan struct{} pendingConfirmChan chan *ParallelTxResult - pendingTxReqList []*ParallelTxRequest // maintained by dispatcher for dispatch policy - slotdbChan chan *state.StateDB // dispatch will create and send this slotDB to slot + pendingTxReqList []*ParallelTxRequest // maintained by dispatcher for dispatch policy + slotdbChan chan *state.ParallelStateDB // dispatch will create and send this slotDB to slot // txReqUnits []*ParallelDispatchUnit // only dispatch can accesssd unconfirmedStateDBs *sync.Map // [int]*state.StateDB // fixme: concurrent safe, not use sync.Map? } @@ -411,7 +411,7 @@ type ParallelTxResult struct { err error // to describe error message? txReq *ParallelTxRequest receipt *types.Receipt - slotDB *state.StateDB // if updated, it is not equal to txReq.slotDB + slotDB *state.ParallelStateDB // if updated, it is not equal to txReq.slotDB gpSlot *GasPool evm *vm.EVM result *ExecutionResult @@ -420,7 +420,7 @@ type ParallelTxResult struct { type ParallelTxRequest struct { txIndex int tx *types.Transaction - slotDB *state.StateDB + slotDB *state.ParallelStateDB gasLimit uint64 msg types.Message block *types.Block @@ -441,7 +441,7 @@ func (p *ParallelStateProcessor) init() { for i := 0; i < p.parallelNum; i++ { p.slotState[i] = &SlotState{ - slotdbChan: make(chan *state.StateDB, 1), + slotdbChan: make(chan *state.ParallelStateDB, 1), pendingTxReqChan: make(chan struct{}, 1), pendingConfirmChan: make(chan *ParallelTxResult, p.queueSize), } @@ -894,7 +894,7 @@ func (p *ParallelStateProcessor) resetState(txNum int, statedb *state.StateDB) { statedb.PrepareForParallel() - p.slotDBsToRelease = make([]*state.StateDB, 0, txNum) + p.slotDBsToRelease = make([]*state.ParallelStateDB, 0, txNum) /* stateDBsToRelease := p.slotDBsToRelease go func() { @@ -916,10 +916,12 @@ func (p *ParallelStateProcessor) Process(block *types.Block, statedb *state.Stat header = block.Header() gp = new(GasPool).AddGas(block.GasLimit()) ) - log.Info("ProcessParallel", "block", header.Number) var receipts = make([]*types.Receipt, 0) txNum := len(block.Transactions()) p.resetState(txNum, statedb) + if txNum > 0 { + log.Info("ProcessParallel", "block", header.Number) + } // Iterate over and process the individual transactions posa, isPoSA := p.engine.(consensus.PoSA) @@ -1150,7 +1152,7 @@ func applyTransaction(msg types.Message, config *params.ChainConfig, bc ChainCon return receipt, err } -func applyTransactionStageExecution(msg types.Message, gp *GasPool, statedb *state.StateDB, evm *vm.EVM) (*vm.EVM, *ExecutionResult, error) { +func applyTransactionStageExecution(msg types.Message, gp *GasPool, statedb *state.ParallelStateDB, evm *vm.EVM) (*vm.EVM, *ExecutionResult, error) { // Create a new context to be used in the EVM environment. txContext := NewEVMTxContext(msg) evm.Reset(txContext, statedb) @@ -1164,7 +1166,7 @@ func applyTransactionStageExecution(msg types.Message, gp *GasPool, statedb *sta return evm, result, err } -func applyTransactionStageFinalization(evm *vm.EVM, result *ExecutionResult, msg types.Message, config *params.ChainConfig, statedb *state.StateDB, header *types.Header, tx *types.Transaction, usedGas *uint64, receiptProcessors ...ReceiptProcessor) (*types.Receipt, error) { +func applyTransactionStageFinalization(evm *vm.EVM, result *ExecutionResult, msg types.Message, config *params.ChainConfig, statedb *state.ParallelStateDB, header *types.Header, tx *types.Transaction, usedGas *uint64, receiptProcessors ...ReceiptProcessor) (*types.Receipt, error) { // Update the state with pending changes. var root []byte if config.IsByzantium(header.Number) { From 53c836f688e7dc11e08893af20542941f72a0aa5 Mon Sep 17 00:00:00 2001 From: setunapo Date: Fri, 29 Apr 2022 21:57:44 +0800 Subject: [PATCH 09/10] 0429: universal unconfirmed DB --- core/state_processor.go | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) diff --git a/core/state_processor.go b/core/state_processor.go index 4dde8c064a..3398d9f32b 100644 --- a/core/state_processor.go +++ b/core/state_processor.go @@ -80,6 +80,7 @@ type ParallelStateProcessor struct { slotDBsToRelease []*state.ParallelStateDB debugErrorRedoNum int debugConflictRedoNum int + unconfirmedStateDBs *sync.Map // [int]*state.StateDB // fixme: concurrent safe, not use sync.Map? } func NewParallelStateProcessor(config *params.ChainConfig, bc *BlockChain, engine consensus.Engine, parallelNum int, queueSize int) *ParallelStateProcessor { @@ -401,7 +402,7 @@ type SlotState struct { pendingTxReqList []*ParallelTxRequest // maintained by dispatcher for dispatch policy slotdbChan chan *state.ParallelStateDB // dispatch will create and send this slotDB to slot // txReqUnits []*ParallelDispatchUnit // only dispatch can accesssd - unconfirmedStateDBs *sync.Map // [int]*state.StateDB // fixme: concurrent safe, not use sync.Map? + // unconfirmedStateDBs *sync.Map // [int]*state.StateDB // fixme: concurrent safe, not use sync.Map? } type ParallelTxResult struct { @@ -679,7 +680,7 @@ func (p *ParallelStateProcessor) waitUntilNextTxDone(statedb *state.StateDB, gp // the target slot is waiting for new slotDB slotState := p.slotState[result.slotIndex] slotDB := state.NewSlotDB(statedb, consensus.SystemAddress, result.txReq.txIndex, - p.mergedTxIndex, result.keepSystem, slotState.unconfirmedStateDBs) + p.mergedTxIndex, result.keepSystem, p.unconfirmedStateDBs) slotDB.SetSlotIndex(result.slotIndex) p.slotDBsToRelease = append(p.slotDBsToRelease, slotDB) slotState.slotdbChan <- slotDB @@ -866,7 +867,7 @@ func (p *ParallelStateProcessor) runSlotLoop(slotIndex int) { txReq.slotDB = <-curSlot.slotdbChan } result := p.executeInSlot(slotIndex, txReq) - curSlot.unconfirmedStateDBs.Store(txReq.txIndex, txReq.slotDB) + p.unconfirmedStateDBs.Store(txReq.txIndex, txReq.slotDB) curSlot.pendingConfirmChan <- result } } @@ -905,8 +906,9 @@ func (p *ParallelStateProcessor) resetState(txNum int, statedb *state.StateDB) { */ for _, slot := range p.slotState { slot.pendingTxReqList = make([]*ParallelTxRequest, 0) - slot.unconfirmedStateDBs = new(sync.Map) // make(map[int]*state.StateDB), fixme: resue not new? + // slot.unconfirmedStateDBs = new(sync.Map) // make(map[int]*state.StateDB), fixme: resue not new? } + p.unconfirmedStateDBs = new(sync.Map) // make(map[int]*state.StateDB), fixme: resue not new? } // Implement BEP-130: Parallel Transaction Execution. From c2c988c8b6657b0e3c17f028fc4702981804262f Mon Sep 17 00:00:00 2001 From: lunarblock Date: Fri, 29 Apr 2022 20:10:23 +0800 Subject: [PATCH 10/10] fix the comments --- core/state/statedb.go | 68 +++++++++++++++++++++---------------------- 1 file changed, 34 insertions(+), 34 deletions(-) diff --git a/core/state/statedb.go b/core/state/statedb.go index 217bdd6a1b..0f8e189640 100644 --- a/core/state/statedb.go +++ b/core/state/statedb.go @@ -2263,7 +2263,7 @@ func (s *ParallelStateDB) storeStateObj(addr common.Address, stateObject *StateO // it belongs to base StateDB, it is confirmed and valid. stateObject.db = s.parallel.baseStateDB stateObject.dbItf = s.parallel.baseStateDB - // the object could be create in SlotDB, if it got the object from DB and + // the object could be created in SlotDB, if it got the object from DB and // update it to the shared `s.parallel.stateObjects`` stateObject.db.storeParallelLock.Lock() if _, ok := s.parallel.stateObjects.Load(addr); !ok { @@ -2291,13 +2291,13 @@ func (s *ParallelStateDB) getStateObjectNoSlot(addr common.Address) *StateObject // `prev` is used to handle revert, to recover with the `prev` object // In Parallel mode, we only need to recover to `prev` in SlotDB, -// a.if it is not in SlotDB, `revert` will remove it from the SlotDB -// b.if it is exist in SlotDB, `revert` will recover to the `prev` in SlotDB +// a.if it doesn't exist in SlotDB, `revert` will remove it from the SlotDB +// b.if it exists in SlotDB, `revert` will recover to the `prev` in SlotDB // c.as `snapDestructs` it is the same func (s *ParallelStateDB) createObject(addr common.Address) (newobj *StateObject) { log.Debug("ParallelStateDB createObject", "SlotxIndex", s.parallel.SlotIndex, "txIndex", s.TxIndex()) - // do not get from unconfirmed DB, since it will has problem on revert + // do not get from unconfirmed DB, since it will have problem on revert prev := s.parallel.dirtiedStateObjectsInSlot[addr] var prevdestruct bool @@ -2347,7 +2347,7 @@ func (s *ParallelStateDB) getDeletedStateObject(addr common.Address) *StateObjec // fixme: concurrent not safe, merge could update it... // return obj // } - // this is why we have to use a seperate getDeletedStateObject for ParallelStateDB + // this is why we have to use a separate getDeletedStateObject for ParallelStateDB // `s` has to be the ParallelStateDB obj := newObject(s, s.isParallel, addr, *data) s.storeStateObj(addr, obj) @@ -2394,7 +2394,7 @@ func (s *ParallelStateDB) Exist(addr common.Address) bool { } return true } - // 2.Try to get from uncomfirmed & main DB + // 2.Try to get from unconfirmed & main DB // 2.1 Already read before if exist, ok := s.parallel.addrStateReadsInSlot[addr]; ok { return exist @@ -2433,7 +2433,7 @@ func (s *ParallelStateDB) Empty(addr common.Address) bool { codeHash := s.GetCodeHash(addr) return bytes.Equal(codeHash.Bytes(), emptyCodeHash) // code is empty, the object is empty } - // 2.Try to get from uncomfirmed & main DB + // 2.Try to get from unconfirmed & main DB // 2.1 Already read before if exist, ok := s.parallel.addrStateReadsInSlot[addr]; ok { // exist means not empty @@ -2466,7 +2466,7 @@ func (s *ParallelStateDB) GetBalance(addr common.Address) *big.Int { return obj.Balance() } } - // 2.Try to get from uncomfirmed DB or main DB + // 2.Try to get from unconfirmed DB or main DB // 2.1 Already read before if balance, ok := s.parallel.balanceReadsInSlot[addr]; ok { return balance @@ -2477,7 +2477,7 @@ func (s *ParallelStateDB) GetBalance(addr common.Address) *big.Int { return balance } - // 3. Try to get from main StateObejct + // 3. Try to get from main StateObject balance := common.Big0 stateObject := s.getStateObjectNoSlot(addr) if stateObject != nil { @@ -2497,7 +2497,7 @@ func (s *ParallelStateDB) GetNonce(addr common.Address) uint64 { return obj.Nonce() } } - // 2.Try to get from uncomfirmed DB or main DB + // 2.Try to get from unconfirmed DB or main DB // 2.1 Already read before if nonce, ok := s.parallel.nonceReadsInSlot[addr]; ok { return nonce @@ -2530,7 +2530,7 @@ func (s *ParallelStateDB) GetCode(addr common.Address) []byte { return code } } - // 2.Try to get from uncomfirmed DB or main DB + // 2.Try to get from unconfirmed DB or main DB // 2.1 Already read before if code, ok := s.parallel.codeReadsInSlot[addr]; ok { return code @@ -2541,7 +2541,7 @@ func (s *ParallelStateDB) GetCode(addr common.Address) []byte { return code } - // 3. Try to get from main StateObejct + // 3. Try to get from main StateObject stateObject := s.getStateObjectNoSlot(addr) var code []byte if stateObject != nil { @@ -2562,7 +2562,7 @@ func (s *ParallelStateDB) GetCodeSize(addr common.Address) int { return obj.CodeSize(s.db) } } - // 2.Try to get from uncomfirmed DB or main DB + // 2.Try to get from unconfirmed DB or main DB // 2.1 Already read before if code, ok := s.parallel.codeReadsInSlot[addr]; ok { return len(code) // len(nil) is 0 too @@ -2573,7 +2573,7 @@ func (s *ParallelStateDB) GetCodeSize(addr common.Address) int { return len(code) // len(nil) is 0 too } - // 3. Try to get from main StateObejct + // 3. Try to get from main StateObject var codeSize int = 0 var code []byte stateObject := s.getStateObjectNoSlot(addr) @@ -2586,7 +2586,7 @@ func (s *ParallelStateDB) GetCodeSize(addr common.Address) int { return codeSize } -// return value of GetCodeHash: +// GetCodeHash returns value of GetCodeHash: // - common.Hash{}: the address does not exist // - emptyCodeHash: the address exist, but code is empty // - others: the address exist, and code is not empty @@ -2601,7 +2601,7 @@ func (s *ParallelStateDB) GetCodeHash(addr common.Address) common.Hash { return common.BytesToHash(obj.CodeHash()) } } - // 2.Try to get from uncomfirmed DB or main DB + // 2.Try to get from unconfirmed DB or main DB // 2.1 Already read before if codeHash, ok := s.parallel.codeHashReadsInSlot[addr]; ok { return codeHash @@ -2611,7 +2611,7 @@ func (s *ParallelStateDB) GetCodeHash(addr common.Address) common.Hash { s.parallel.codeHashReadsInSlot[addr] = codeHash return codeHash } - // 3. Try to get from main StateObejct + // 3. Try to get from main StateObject stateObject := s.getStateObjectNoSlot(addr) codeHash := common.Hash{} if stateObject != nil { @@ -2645,7 +2645,7 @@ func (s *ParallelStateDB) GetState(addr common.Address, hash common.Hash) common return obj.GetState(s.db, hash) } } - // 2.Try to get from uncomfirmed DB or main DB + // 2.Try to get from unconfirmed DB or main DB // 2.1 Already read before if storage, ok := s.parallel.kvReadsInSlot[addr]; ok { if val, ok := storage.GetValue(hash); ok { @@ -2677,8 +2677,8 @@ func (s *ParallelStateDB) GetState(addr common.Address, hash common.Hash) common // GetCommittedState retrieves a value from the given account's committed storage trie. func (s *ParallelStateDB) GetCommittedState(addr common.Address, hash common.Hash) common.Hash { log.Debug("ParallelStateDB GetCommittedState", "SlotxIndex", s.parallel.SlotIndex, "txIndex", s.TxIndex()) - // 1.No need to get from pending of itself even on merge, since stateobject in SlotDB won't do finalise - // 2.Try to get from uncomfirmed DB or main DB + // 1.No need to get from pending of itself even on merge, since state object in SlotDB won't do finalise + // 2.Try to get from unconfirmed DB or main DB // KVs in unconfirmed DB can be seen as pending storage // KVs in main DB are merged from SlotDB and has done finalise() on merge, can be seen as pending storage too. // 2.1 Already read before @@ -2715,7 +2715,7 @@ func (s *ParallelStateDB) HasSuicided(addr common.Address) bool { if obj, ok := s.parallel.dirtiedStateObjectsInSlot[addr]; ok { return obj.suicided } - // 2.Try to get from uncomfirmed + // 2.Try to get from unconfirmed if exist, ok := s.getAddrStateFromUnconfirmedDB(addr); ok { return !exist } @@ -2756,7 +2756,7 @@ func (s *ParallelStateDB) AddBalance(addr common.Address, amount *big.Int) { return } // already dirty, make sure the balance if fixed up - // if stateObject.Balance() + // if stateObject.Balance() // todo: fix the comment if addr != s.parallel.systemAddress { if stateObject.Balance().Cmp(s.GetBalance(addr)) != 0 { log.Warn("AddBalance in dirty, but balance is incorrect", "txIndex", s.txIndex, "addr", addr, @@ -2797,7 +2797,7 @@ func (s *ParallelStateDB) SubBalance(addr common.Address, amount *big.Int) { return } // already dirty, make sure the balance if fixed - // if stateObject.Balance() + // if stateObject.Balance() // todo: fix the comment if addr != s.parallel.systemAddress { if stateObject.Balance().Cmp(s.GetBalance(addr)) != 0 { log.Warn("SubBalance in dirty, but balance is incorrect", "txIndex", s.txIndex, "addr", addr, @@ -2821,7 +2821,7 @@ func (s *ParallelStateDB) SetBalance(addr common.Address, amount *big.Int) { } if _, ok := s.parallel.dirtiedStateObjectsInSlot[addr]; !ok { newStateObject := stateObject.lightCopy(s) - // update balance for revert, in case child contract is revertted, + // update balance for revert, in case child contract is reverted, // it should revert to the previous balance balance := s.GetBalance(addr) newStateObject.setBalance(balance) @@ -2930,12 +2930,12 @@ func (s *ParallelStateDB) Suicide(addr common.Address) bool { // 1.Try to get from dirty, it could be suicided inside of contract call stateObject = s.parallel.dirtiedStateObjectsInSlot[addr] if stateObject == nil { - // 2.Try to get from uncomfirmed, if deleted return false, since the address does not exist + // 2.Try to get from unconfirmed, if deleted return false, since the address does not exist if obj, ok := s.getStateObjectFromUnconfirmedDB(addr); ok { stateObject = obj s.parallel.addrStateReadsInSlot[addr] = !stateObject.deleted // true: exist, false: deleted if stateObject.deleted { - log.Error("Suicide addr alreay deleted in confirmed DB", "txIndex", s.txIndex, "addr", addr) + log.Error("Suicide addr already deleted in confirmed DB", "txIndex", s.txIndex, "addr", addr) return false } } @@ -2971,7 +2971,7 @@ func (s *ParallelStateDB) Suicide(addr common.Address) bool { // s.parallel.kvChangesInSlot[addr] = make(StateKeys) // all key changes are discarded return true } - s.parallel.addrStateChangesInSlot[addr] = false // false: the address does not exist any more, + s.parallel.addrStateChangesInSlot[addr] = false // false: the address does not exist anymore, s.parallel.balanceChangesInSlot[addr] = struct{}{} s.parallel.codeChangesInSlot[addr] = struct{}{} @@ -2994,7 +2994,7 @@ func (s *ParallelStateDB) CreateAccount(addr common.Address) { // no matter it is got from dirty, unconfirmed or main DB // if addr not exist, preBalance will be common.Big0, it is same as new(big.Int) which // is the value newObject(), - preBalance := s.GetBalance(addr) // parallel balance read will be recorded inside of GetBalance + preBalance := s.GetBalance(addr) // parallel balance read will be recorded inside GetBalance newObj := s.createObject(addr) newObj.setBalance(new(big.Int).Set(preBalance)) // new big.Int for newObj } @@ -3027,7 +3027,7 @@ func (s *ParallelStateDB) AddRefund(gas uint64) { // fixme: not needed func (s *ParallelStateDB) SubRefund(gas uint64) { // fixme: not needed s.journal.append(refundChange{prev: s.refund}) if gas > s.refund { - // we don't need to panic here if we read the wrong state in parallelm mode + // we don't need to panic here if we read the wrong state in parallel mode // we just need to redo this transaction log.Info(fmt.Sprintf("Refund counter below zero (gas: %d > refund: %d)", gas, s.refund), "tx", s.thash.String()) s.parallel.needsRedo = true @@ -3043,7 +3043,7 @@ func (s *ParallelStateDB) SubRefund(gas uint64) { // fixme: not needed // Access from the unconfirmed DB with range&priority: txIndex -1(previous tx) -> baseTxIndex + 1 func (s *ParallelStateDB) getBalanceFromUnconfirmedDB(addr common.Address) *big.Int { if addr == s.parallel.systemAddress { - // never get systemaddress from unconfirmed DB + // never get system address from unconfirmed DB return nil } @@ -3075,7 +3075,7 @@ func (s *ParallelStateDB) getBalanceFromUnconfirmedDB(addr common.Address) *big. // Similar to getBalanceFromUnconfirmedDB func (s *ParallelStateDB) getNonceFromUnconfirmedDB(addr common.Address) (uint64, bool) { if addr == s.parallel.systemAddress { - // never get systemaddress from unconfirmed DB + // never get system address from unconfirmed DB return 0, false } @@ -3115,7 +3115,7 @@ func (s *ParallelStateDB) getNonceFromUnconfirmedDB(addr common.Address) (uint64 // It is not only for code, but also codeHash and codeSize, we return the *StateObject for convienence. func (s *ParallelStateDB) getCodeFromUnconfirmedDB(addr common.Address) ([]byte, bool) { if addr == s.parallel.systemAddress { - // never get systemaddress from unconfirmed DB + // never get system address from unconfirmed DB return nil, false } @@ -3154,7 +3154,7 @@ func (s *ParallelStateDB) getCodeFromUnconfirmedDB(addr common.Address) ([]byte, // but differ when address is deleted or not exist func (s *ParallelStateDB) getCodeHashFromUnconfirmedDB(addr common.Address) (common.Hash, bool) { if addr == s.parallel.systemAddress { - // never get systemaddress from unconfirmed DB + // never get system address from unconfirmed DB return common.Hash{}, false } @@ -3196,7 +3196,7 @@ func (s *ParallelStateDB) getCodeHashFromUnconfirmedDB(addr common.Address) (com // If the dirty address is empty or suicided, it will be marked as deleted, so we only need to return `deleted` or not. func (s *ParallelStateDB) getAddrStateFromUnconfirmedDB(addr common.Address) (bool, bool) { if addr == s.parallel.systemAddress { - // never get systemaddress from unconfirmed DB + // never get system address from unconfirmed DB return false, false }