diff --git a/evmd/app.go b/evmd/app.go index 41d1911a9..be32fb902 100644 --- a/evmd/app.go +++ b/evmd/app.go @@ -185,7 +185,7 @@ type EVMD struct { FeeMarketKeeper feemarketkeeper.Keeper EVMKeeper *evmkeeper.Keeper Erc20Keeper erc20keeper.Keeper - EVMMempool *evmmempool.ExperimentalEVMMempool + EVMMempool sdkmempool.ExtMempool // the module manager ModuleManager *module.Manager diff --git a/evmd/mempool.go b/evmd/mempool.go index 1046f0b3b..dee38daca 100644 --- a/evmd/mempool.go +++ b/evmd/mempool.go @@ -17,10 +17,6 @@ import ( sdkmempool "github.com/cosmos/cosmos-sdk/types/mempool" ) -// enables abci.InsertTx & abci.ReapTxs to be used exclusively by the mempool. -// @see evmmempool.ExperimentalEVMMempool.OperateExclusively -const mempoolOperateExclusively = true - // configureEVMMempool sets up the EVM mempool and related handlers using viper configuration. func (app *EVMD) configureEVMMempool(appOpts servertypes.AppOptions, logger log.Logger) error { if evmtypes.GetChainConfig() == nil { @@ -34,65 +30,97 @@ func (app *EVMD) configureEVMMempool(appOpts servertypes.AppOptions, logger log. return nil } - mempoolConfig, err := app.createMempoolConfig(appOpts, logger) - if err != nil { - return fmt.Errorf("failed to get mempool config: %w", err) + if server.GetShouldOperateExclusively(appOpts, logger) { + logger.Info("app-side mempool is operating exclusively, setting up Krakatoa mempool") + + krakatoaConfig := app.createKrakatoaMempoolConfig(appOpts, logger) + txEncoder := evmmempool.NewTxEncoder(app.txConfig) + evmRechecker := evmmempool.NewTxRechecker(krakatoaConfig.AnteHandler, txEncoder) + cosmosRechecker := evmmempool.NewTxRechecker(krakatoaConfig.AnteHandler, txEncoder) + + krakatoaMempool := evmmempool.NewKrakatoaMempool( + app.CreateQueryContext, + logger, + app.EVMKeeper, + app.FeeMarketKeeper, + app.txConfig, + evmRechecker, + cosmosRechecker, + krakatoaConfig, + cosmosPoolMaxTx, + ) + + app.SetInsertTxHandler(app.NewInsertTxHandler(krakatoaMempool)) + app.SetReapTxsHandler(app.NewReapTxsHandler(krakatoaMempool)) + + txVerifier := NewNoCheckProposalTxVerifier(app.BaseApp) + abciProposalHandler := baseapp.NewDefaultProposalHandler(krakatoaMempool, txVerifier) + abciProposalHandler.SetSignerExtractionAdapter( + evmmempool.NewEthSignerExtractionAdapter( + sdkmempool.NewDefaultSignerExtractionAdapter(), + ), + ) + app.SetPrepareProposal(abciProposalHandler.PrepareProposalHandler()) + + app.EVMMempool = krakatoaMempool + app.SetMempool(krakatoaMempool) + } else { + logger.Info("app-side mempool is not operating exclusively, setting up default EVM mempool") + + evmMempool := evmmempool.NewExperimentalEVMMempool( + app.CreateQueryContext, + logger, + app.EVMKeeper, + app.FeeMarketKeeper, + app.txConfig, + app.createMempoolConfig(appOpts, logger), + cosmosPoolMaxTx, + ) + + app.SetCheckTxHandler(evmmempool.NewCheckTxHandler(evmMempool)) + + abciProposalHandler := baseapp.NewDefaultProposalHandler(evmMempool, app) + abciProposalHandler.SetSignerExtractionAdapter( + evmmempool.NewEthSignerExtractionAdapter( + sdkmempool.NewDefaultSignerExtractionAdapter(), + ), + ) + app.SetPrepareProposal(abciProposalHandler.PrepareProposalHandler()) + + app.EVMMempool = evmMempool + app.SetMempool(evmMempool) } - txEncoder := evmmempool.NewTxEncoder(app.txConfig) - evmRechecker := evmmempool.NewTxRechecker(mempoolConfig.AnteHandler, txEncoder) - cosmosRechecker := evmmempool.NewTxRechecker(mempoolConfig.AnteHandler, txEncoder) - - evmMempool := evmmempool.NewExperimentalEVMMempool( - app.CreateQueryContext, - logger, - app.EVMKeeper, - app.FeeMarketKeeper, - app.txConfig, - txEncoder, - evmRechecker, - cosmosRechecker, - mempoolConfig, - cosmosPoolMaxTx, - ) - app.EVMMempool = evmMempool - app.SetMempool(evmMempool) - checkTxHandler := evmmempool.NewCheckTxHandler(evmMempool) - app.SetCheckTxHandler(checkTxHandler) - app.SetInsertTxHandler(app.NewInsertTxHandler(evmMempool)) - app.SetReapTxsHandler(app.NewReapTxsHandler(evmMempool)) - - txVerifier := NewNoCheckProposalTxVerifier(app.BaseApp) - abciProposalHandler := baseapp.NewDefaultProposalHandler(evmMempool, txVerifier) - abciProposalHandler.SetSignerExtractionAdapter( - evmmempool.NewEthSignerExtractionAdapter( - sdkmempool.NewDefaultSignerExtractionAdapter(), - ), - ) - app.SetPrepareProposal(abciProposalHandler.PrepareProposalHandler()) - return nil } // createMempoolConfig creates a new EVMMempoolConfig with the default configuration // and overrides it with values from appOpts if they exist and are non-zero. -func (app *EVMD) createMempoolConfig(appOpts servertypes.AppOptions, logger log.Logger) (*evmmempool.EVMMempoolConfig, error) { +func (app *EVMD) createMempoolConfig(appOpts servertypes.AppOptions, logger log.Logger) *evmmempool.EVMMempoolConfig { return &evmmempool.EVMMempoolConfig{ - AnteHandler: app.GetAnteHandler(), - LegacyPoolConfig: server.GetLegacyPoolConfig(appOpts, logger), - BlockGasLimit: server.GetBlockGasLimit(appOpts, logger), - MinTip: server.GetMinTip(appOpts, logger), - OperateExclusively: mempoolOperateExclusively, + AnteHandler: app.GetAnteHandler(), + LegacyPoolConfig: server.GetLegacyPoolConfig(appOpts, logger), + BlockGasLimit: server.GetBlockGasLimit(appOpts, logger), + MinTip: server.GetMinTip(appOpts, logger), + } +} + +// createKrakatoaMempoolConfig creates a new KrakatoaMempoolConfig with the default configuration +// and overrides it with values from appOpts if they exist and are non-zero. +func (app *EVMD) createKrakatoaMempoolConfig(appOpts servertypes.AppOptions, logger log.Logger) *evmmempool.KrakatoaMempoolConfig { + mempoolConfig := app.createMempoolConfig(appOpts, logger) + return &evmmempool.KrakatoaMempoolConfig{ + EVMMempoolConfig: *mempoolConfig, PendingTxProposalTimeout: server.GetPendingTxProposalTimeout(appOpts, logger), InsertQueueSize: server.GetMempoolInsertQueueSize(appOpts, logger), - }, nil + } } const ( CodeTypeNoRetry = 1 ) -func (app *EVMD) NewInsertTxHandler(evmMempool *evmmempool.ExperimentalEVMMempool) sdk.InsertTxHandler { +func (app *EVMD) NewInsertTxHandler(evmMempool *evmmempool.KrakatoaMempool) sdk.InsertTxHandler { return func(req *abci.RequestInsertTx) (*abci.ResponseInsertTx, error) { txBytes := req.GetTx() @@ -121,7 +149,7 @@ func (app *EVMD) NewInsertTxHandler(evmMempool *evmmempool.ExperimentalEVMMempoo } } -func (app *EVMD) NewReapTxsHandler(evmMempool *evmmempool.ExperimentalEVMMempool) sdk.ReapTxsHandler { +func (app *EVMD) NewReapTxsHandler(evmMempool *evmmempool.KrakatoaMempool) sdk.ReapTxsHandler { return func(req *abci.RequestReapTxs) (*abci.ResponseReapTxs, error) { maxBytes, maxGas := req.GetMaxBytes(), req.GetMaxGas() txs, err := evmMempool.ReapNewValidTxs(maxBytes, maxGas) diff --git a/evmd/tests/integration/create_app.go b/evmd/tests/integration/create_app.go index a8deb2dba..f320420f4 100644 --- a/evmd/tests/integration/create_app.go +++ b/evmd/tests/integration/create_app.go @@ -7,7 +7,6 @@ import ( dbm "github.com/cosmos/cosmos-db" "github.com/cosmos/evm" "github.com/cosmos/evm/evmd" - evmmempool "github.com/cosmos/evm/mempool" srvflags "github.com/cosmos/evm/server/flags" "github.com/cosmos/evm/testutil/constants" feemarkettypes "github.com/cosmos/evm/x/feemarket/types" @@ -23,9 +22,8 @@ import ( stakingtypes "github.com/cosmos/cosmos-sdk/x/staking/types" ) -// CreateEvmd creates an evm app for regular integration tests (non-mempool) -// This version uses a noop mempool to avoid state issues during transaction processing -func CreateEvmd(chainID string, evmChainID uint64, customBaseAppOptions ...func(*baseapp.BaseApp)) evm.EvmApp { +// CreateEvmd creates an evm app for integration tests +func CreateEvmd(chainID string, evmChainID uint64, exclusiveMempool bool, customBaseAppOptions ...func(*baseapp.BaseApp)) evm.EvmApp { // A temporary home directory is created and used to prevent race conditions // related to home directory locks in chains that use the WASM module. defaultNodeHome, err := os.MkdirTemp("", "evmd-temp-homedir") @@ -36,7 +34,7 @@ func CreateEvmd(chainID string, evmChainID uint64, customBaseAppOptions ...func( db := dbm.NewMemDB() logger := log.NewNopLogger() loadLatest := true - appOptions := NewAppOptionsWithFlagHomeAndChainID(defaultNodeHome, evmChainID) + appOptions := NewAppOptionsWithFlagHomeAndChainID(defaultNodeHome, evmChainID, exclusiveMempool) baseAppOptions := append(customBaseAppOptions, baseapp.SetChainID(chainID)) @@ -55,8 +53,8 @@ func CreateEvmd(chainID string, evmChainID uint64, customBaseAppOptions ...func( WithHeight(1). WithTxConfig(app.GetTxConfig()) - // Get the mempool and set the client context - if m, ok := app.GetMempool().(*evmmempool.ExperimentalEVMMempool); ok && m != nil { + // Get the mempool and set the client context if supported + if m, ok := app.GetMempool().(interface{ SetClientCtx(client.Context) }); ok && m != nil { m.SetClientCtx(clientCtx) } @@ -76,7 +74,7 @@ func SetupEvmd() (ibctesting.TestingApp, map[string]json.RawMessage) { dbm.NewMemDB(), nil, true, - NewAppOptionsWithFlagHomeAndChainID(defaultNodeHome, constants.EighteenDecimalsChainID), + NewAppOptionsWithFlagHomeAndChainID(defaultNodeHome, constants.EighteenDecimalsChainID, false), ) // disable base fee for testing genesisState := app.DefaultGenesis() @@ -93,11 +91,12 @@ func SetupEvmd() (ibctesting.TestingApp, map[string]json.RawMessage) { return app, genesisState } -func NewAppOptionsWithFlagHomeAndChainID(home string, evmChainID uint64) simutils.AppOptionsMap { +func NewAppOptionsWithFlagHomeAndChainID(home string, evmChainID uint64, exlcusiveMempool bool) simutils.AppOptionsMap { return simutils.AppOptionsMap{ - flags.FlagHome: home, - srvflags.EVMChainID: evmChainID, - srvflags.EVMMempoolInsertQueueSize: 5000, + flags.FlagHome: home, + srvflags.EVMChainID: evmChainID, + srvflags.EVMMempoolOperateExclusively: exlcusiveMempool, + srvflags.EVMMempoolInsertQueueSize: 5000, srvflags.EVMMempoolPendingTxProposalTimeout: "250ms", } } diff --git a/evmd/tests/integration/mempool/mempool_test.go b/evmd/tests/integration/mempool/mempool_test.go index 50090ea8f..1dad99e9b 100644 --- a/evmd/tests/integration/mempool/mempool_test.go +++ b/evmd/tests/integration/mempool/mempool_test.go @@ -9,9 +9,15 @@ import ( "github.com/cosmos/evm/evmd/tests/integration" "github.com/cosmos/evm/tests/integration/mempool" testapp "github.com/cosmos/evm/testutil/app" + "github.com/cosmos/evm/testutil/integration/evm/network" ) func TestMempoolIntegrationTestSuite(t *testing.T) { create := testapp.ToEvmAppCreator[evm.IntegrationNetworkApp](integration.CreateEvmd, "evm.IntegrationNetworkApp") suite.Run(t, mempool.NewMempoolIntegrationTestSuite(create)) } + +func TestKrakatoaMempoolIntegrationTestSuite(t *testing.T) { + create := testapp.ToEvmAppCreator[evm.IntegrationNetworkApp](integration.CreateEvmd, "evm.IntegrationNetworkApp") + suite.Run(t, mempool.NewMempoolIntegrationTestSuite(create, network.WithExclusiveMempool())) +} diff --git a/go.mod b/go.mod index ff06bbb64..cd0ff704c 100644 --- a/go.mod +++ b/go.mod @@ -115,6 +115,7 @@ require ( github.com/cenkalti/backoff/v4 v4.3.0 // indirect github.com/cenkalti/backoff/v5 v5.0.3 // indirect github.com/cespare/xxhash/v2 v2.3.0 // indirect + github.com/chigopher/pathlib v0.19.1 // indirect github.com/chzyer/readline v1.5.1 // indirect github.com/cloudwego/base64x v0.1.6 // indirect github.com/cncf/xds/go v0.0.0-20251210132809-ee656c7534f5 // indirect @@ -195,12 +196,14 @@ require ( github.com/hdevalence/ed25519consensus v0.2.0 // indirect github.com/holiman/bloomfilter/v2 v2.0.3 // indirect github.com/huandu/skiplist v1.2.1 // indirect + github.com/huandu/xstrings v1.4.0 // indirect github.com/huin/goupnp v1.3.0 // indirect github.com/iancoleman/strcase v0.3.0 // indirect github.com/inconshreveable/mousetrap v1.1.0 // indirect github.com/ipfs/go-cid v0.5.0 // indirect github.com/jackpal/go-nat-pmp v1.0.2 // indirect github.com/jbenet/go-temp-err-catcher v0.1.0 // indirect + github.com/jinzhu/copier v0.4.0 // indirect github.com/jmhodges/levigo v1.0.0 // indirect github.com/klauspost/compress v1.18.4 // indirect github.com/klauspost/cpuid/v2 v2.3.0 // indirect @@ -306,6 +309,7 @@ require ( github.com/tklauser/numcpus v0.11.0 // indirect github.com/twitchyliquid64/golang-asm v0.15.1 // indirect github.com/ulikunitz/xz v0.5.15 // indirect + github.com/vektra/mockery/v2 v2.53.6 // indirect github.com/wlynxg/anet v0.0.5 // indirect github.com/yusufpapurcu/wmi v1.2.4 // indirect github.com/zondax/golem v0.27.0 // indirect diff --git a/go.sum b/go.sum index de9c9794e..9db6116c6 100644 --- a/go.sum +++ b/go.sum @@ -201,6 +201,8 @@ github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UF github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/chenzhuoyu/base64x v0.0.0-20211019084208-fb5309c8db06/go.mod h1:DH46F32mSOjUmXrMHnKwZdA8wcEefY7UVqBKYGjpdQY= github.com/chenzhuoyu/base64x v0.0.0-20221115062448-fe3a3abad311/go.mod h1:b583jCggY9gE99b6G5LEC39OIiVsWj+R97kbl5odCEk= +github.com/chigopher/pathlib v0.19.1 h1:RoLlUJc0CqBGwq239cilyhxPNLXTK+HXoASGyGznx5A= +github.com/chigopher/pathlib v0.19.1/go.mod h1:tzC1dZLW8o33UQpWkNkhvPwL5n4yyFRFm/jL1YGWFvY= github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI= github.com/chzyer/logex v1.2.1 h1:XHDu3E6q+gdHgsdTPH6ImJMIp436vR6MPtH8gP05QzM= github.com/chzyer/logex v1.2.1/go.mod h1:JLbx6lG2kDbNRFnfkgvh4eRJRPX1QCoOIWomwysCBrQ= @@ -609,6 +611,8 @@ github.com/huandu/go-assert v1.1.5 h1:fjemmA7sSfYHJD7CUqs9qTwwfdNAx7/j2/ZlHXzNB3 github.com/huandu/go-assert v1.1.5/go.mod h1:yOLvuqZwmcHIC5rIzrBhT7D3Q9c3GFnd0JrPVhn/06U= github.com/huandu/skiplist v1.2.1 h1:dTi93MgjwErA/8idWTzIw4Y1kZsMWx35fmI2c8Rij7w= github.com/huandu/skiplist v1.2.1/go.mod h1:7v3iFjLcSAzO4fN5B8dvebvo/qsfumiLiDXMrPiHF9w= +github.com/huandu/xstrings v1.4.0 h1:D17IlohoQq4UcpqD7fDk80P7l+lwAmlFaBHgOipl2FU= +github.com/huandu/xstrings v1.4.0/go.mod h1:y5/lhBue+AyNmUVz9RLU9xbLR0o4KIIExikq4ovT0aE= github.com/hudl/fargo v1.3.0/go.mod h1:y3CKSmjA+wD2gak7sUSXTAoopbhU08POFhmITJgmKTg= github.com/huin/goupnp v1.3.0 h1:UvLUlWDNpoUdYzb2TCn+MuTWtcjXKSza2n6CBdQ0xXc= github.com/huin/goupnp v1.3.0/go.mod h1:gnGPsThkYa7bFi/KWmEysQRf48l2dvR5bxr2OFckNX8= @@ -638,6 +642,8 @@ github.com/jhump/protoreflect v1.18.0 h1:TOz0MSR/0JOZ5kECB/0ufGnC2jdsgZ123Rd/k4Z github.com/jhump/protoreflect v1.18.0/go.mod h1:ezWcltJIVF4zYdIFM+D/sHV4Oh5LNU08ORzCGfwvTz8= github.com/jhump/protoreflect/v2 v2.0.0-beta.1 h1:Dw1rslK/VotaUGYsv53XVWITr+5RCPXfvvlGrM/+B6w= github.com/jhump/protoreflect/v2 v2.0.0-beta.1/go.mod h1:D9LBEowZyv8/iSu97FU2zmXG3JxVTmNw21mu63niFzU= +github.com/jinzhu/copier v0.4.0 h1:w3ciUoD19shMCRargcpm0cm91ytaBhDvuRpz1ODO/U8= +github.com/jinzhu/copier v0.4.0/go.mod h1:DfbEm0FYsaqBcKcFuvmOZb218JkPGtvSHsKg8S8hyyg= github.com/jmespath/go-jmespath v0.0.0-20180206201540-c2b33e8439af/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k= github.com/jmhodges/levigo v1.0.0 h1:q5EC36kV79HWeTBWsod3mG11EgStG3qArTKcvlksN1U= github.com/jmhodges/levigo v1.0.0/go.mod h1:Q6Qx+uH3RAqyK4rFQroq9RL7mdkABMcfhEI+nNuzMJQ= @@ -1117,6 +1123,8 @@ github.com/urfave/cli v1.22.1 h1:+mkCCcOFKPnCmVYVcURKps1Xe+3zP90gSYGNfRkjoIY= github.com/urfave/cli v1.22.1/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0= github.com/urfave/cli/v2 v2.27.5 h1:WoHEJLdsXr6dDWoJgMq/CboDmyY/8HMMH1fTECbih+w= github.com/urfave/cli/v2 v2.27.5/go.mod h1:3Sevf16NykTbInEnD0yKkjDAeZDS0A6bzhBH5hrMvTQ= +github.com/vektra/mockery/v2 v2.53.6 h1:qfUB6saauu652ZlMF/mEdlj7B/A0fw2XR0XBACBrf7Y= +github.com/vektra/mockery/v2 v2.53.6/go.mod h1:fjxC+mskIZqf67+z34pHxRRyyZnPnWNA36Cirf01Pkg= github.com/wlynxg/anet v0.0.3/go.mod h1:eay5PRQr7fIVAMbTbchTnO9gG65Hg/uYGdc7mguHxoA= github.com/wlynxg/anet v0.0.5 h1:J3VJGi1gvo0JwZ/P1/Yc/8p63SoW98B5dHkYDmpgvvU= github.com/wlynxg/anet v0.0.5/go.mod h1:eay5PRQr7fIVAMbTbchTnO9gG65Hg/uYGdc7mguHxoA= diff --git a/mempool/blockchain_test.go b/mempool/blockchain_test.go index 97ae83d63..581fb6582 100644 --- a/mempool/blockchain_test.go +++ b/mempool/blockchain_test.go @@ -45,7 +45,7 @@ func TestBlockchainRaceCondition(t *testing.T) { logger := log.NewNopLogger() // Create mock keepers using generated mocks - mockVMKeeper := mocks.NewVMKeeper(t) + mockVMKeeper := mocks.NewVMKeeperI(t) mockFeeMarketKeeper := mocks.NewFeeMarketKeeper(t) ethCfg := vmtypes.DefaultChainConfig(constants.EighteenDecimalsChainID) diff --git a/mempool/interface.go b/mempool/interface.go index b0c56673b..ee5b0b488 100644 --- a/mempool/interface.go +++ b/mempool/interface.go @@ -13,6 +13,17 @@ import ( sdk "github.com/cosmos/cosmos-sdk/types" ) +// NotifiedMempool is the set of methods that a mempool must implement in order +// to be notified of new blocks by this Keeper via the EndBlocker. +type NotifiedMempool interface { + // HasEventBus returns true if the mempool has an event bus configured to + // get cometbft events + HasEventBus() bool + + // GetBlockchain returns the mempools blockchain representation. + GetBlockchain() *Blockchain +} + type VMKeeperI interface { GetBaseFee(ctx sdk.Context) *big.Int GetParams(ctx sdk.Context) (params vmtypes.Params) @@ -29,7 +40,7 @@ type VMKeeperI interface { SetCode(ctx sdk.Context, codeHash []byte, code []byte) DeleteAccount(ctx sdk.Context, addr common.Address) error KVStoreKeys() map[string]storetypes.StoreKey - SetEvmMempool(evmMempool *ExperimentalEVMMempool) + SetEvmMempool(evmMempool NotifiedMempool) } type FeeMarketKeeperI interface { diff --git a/mempool/internal/queue/queue.go b/mempool/internal/queue/queue.go index 68eb23c56..b74e99dbf 100644 --- a/mempool/internal/queue/queue.go +++ b/mempool/internal/queue/queue.go @@ -8,8 +8,6 @@ import ( "github.com/gammazero/deque" - "cosmossdk.io/log/v2" - "github.com/cosmos/cosmos-sdk/telemetry" ) @@ -40,18 +38,16 @@ type Queue[Tx any] struct { // rejecting new additions maxSize int - logger log.Logger - done chan struct{} + done chan struct{} } var ErrQueueFull = errors.New("queue full") // New creates a new queue. -func New[Tx any](insert func(txs []*Tx) []error, maxSize int, logger log.Logger) *Queue[Tx] { +func New[Tx any](insert func(txs []*Tx) []error, maxSize int) *Queue[Tx] { iq := &Queue[Tx]{ insert: insert, maxSize: maxSize, - logger: logger, signal: make(chan struct{}, 1), done: make(chan struct{}), } diff --git a/mempool/internal/queue/queue_test.go b/mempool/internal/queue/queue_test.go index 98eafe106..8321c2ac1 100644 --- a/mempool/internal/queue/queue_test.go +++ b/mempool/internal/queue/queue_test.go @@ -7,8 +7,6 @@ import ( ethtypes "github.com/ethereum/go-ethereum/core/types" "github.com/stretchr/testify/require" - - "cosmossdk.io/log/v2" ) // mockPool is a mock that records inserted transactions and optionally @@ -52,8 +50,7 @@ func (m *mockPool) setInsertFn(fn func([]*ethtypes.Transaction) []error) { func TestInsertQueue_PushAndProcess(t *testing.T) { pool := newMockPool() - logger := log.NewNopLogger() - iq := New[ethtypes.Transaction](pool.insert, 1000, logger) + iq := New[ethtypes.Transaction](pool.insert, 1000) defer iq.Close() // Create a test transaction @@ -75,8 +72,7 @@ func TestInsertQueue_PushAndProcess(t *testing.T) { func TestInsertQueue_ProcessesMultipleTransactions(t *testing.T) { pool := newMockPool() - logger := log.NewNopLogger() - iq := New[ethtypes.Transaction](pool.insert, 1000, logger) + iq := New[ethtypes.Transaction](pool.insert, 1000) defer iq.Close() // Create multiple test transactions @@ -104,8 +100,7 @@ func TestInsertQueue_ProcessesMultipleTransactions(t *testing.T) { func TestInsertQueue_IgnoresNilTransaction(t *testing.T) { pool := newMockPool() - logger := log.NewNopLogger() - iq := New[ethtypes.Transaction](pool.insert, 1000, logger) + iq := New[ethtypes.Transaction](pool.insert, 1000) defer iq.Close() // Push nil transaction @@ -128,8 +123,7 @@ func TestInsertQueue_SlowAddition(t *testing.T) { return make([]error, len(txs)) }) - logger := log.NewNopLogger() - iq := New[ethtypes.Transaction](pool.insert, 1000, logger) + iq := New[ethtypes.Transaction](pool.insert, 1000) defer iq.Close() // Push first transaction to start processing @@ -162,8 +156,7 @@ func TestInsertQueue_RejectsWhenFull(t *testing.T) { select {} // block forever }) - logger := log.NewNopLogger() - iq := New[ethtypes.Transaction](pool.insert, 5, logger) + iq := New[ethtypes.Transaction](pool.insert, 5) defer iq.Close() // This first tx will be immediately popped and start processing (where it diff --git a/mempool/iterator_bench_test.go b/mempool/iterator_bench_test.go index 6e3b7854c..acc5ad232 100644 --- a/mempool/iterator_bench_test.go +++ b/mempool/iterator_bench_test.go @@ -191,16 +191,14 @@ func setupBenchMempool(b *testing.B, evmAccounts, cosmosAccounts []benchAccount) WithEVMCoinInfo(constants.ChainsCoinInfo[constants.EighteenDecimalsChainID]). Configure() // ignore if already configured - mockVMKeeper := mocks.NewVMKeeper(b) - mockFeeMarketKeeper := mocks.NewFeeMarketKeeper(b) - mockEVMRechecker := &MockRechecker{} - mockCosmosRechecker := &MockRechecker{} - + mockVMKeeper := mocks.NewVMKeeperI(b) mockVMKeeper.On("GetBaseFee", mock.Anything).Return(big.NewInt(1e9)).Maybe() mockVMKeeper.On("GetParams", mock.Anything).Return(vmtypes.DefaultParams()).Maybe() - mockFeeMarketKeeper.On("GetBlockGasWanted", mock.Anything).Return(uint64(10_000_000)).Maybe() mockVMKeeper.On("GetEvmCoinInfo", mock.Anything).Return(constants.ChainsCoinInfo[constants.EighteenDecimalsChainID]).Maybe() + mockFeeMarketKeeper := mocks.NewFeeMarketKeeper(b) + mockFeeMarketKeeper.On("GetBlockGasWanted", mock.Anything).Return(uint64(10_000_000)).Maybe() + // Register each account with proper balance for _, acc := range evmAccounts { mockVMKeeper.On("GetAccount", mock.Anything, acc.address).Return(&statedb.Account{ @@ -276,9 +274,6 @@ func setupBenchMempool(b *testing.B, evmAccounts, cosmosAccounts []benchAccount) mockVMKeeper, mockFeeMarketKeeper, txConfig, - evmmempool.NewTxEncoder(txConfig), - mockEVMRechecker, - mockCosmosRechecker, config, 0, // cosmosPoolMaxTx (0 = unlimited) ) diff --git a/mempool/krakatoa_mempool.go b/mempool/krakatoa_mempool.go new file mode 100644 index 000000000..c128d8576 --- /dev/null +++ b/mempool/krakatoa_mempool.go @@ -0,0 +1,628 @@ +package mempool + +import ( + "context" + "errors" + "fmt" + "math/big" + "sync" + "time" + + "github.com/ethereum/go-ethereum/common" + ethtypes "github.com/ethereum/go-ethereum/core/types" + "github.com/holiman/uint256" + + cmttypes "github.com/cometbft/cometbft/types" + + "github.com/cosmos/evm/mempool/internal/heightsync" + "github.com/cosmos/evm/mempool/internal/queue" + "github.com/cosmos/evm/mempool/miner" + "github.com/cosmos/evm/mempool/reserver" + "github.com/cosmos/evm/mempool/txpool" + "github.com/cosmos/evm/mempool/txpool/legacypool" + "github.com/cosmos/evm/rpc/stream" + + "cosmossdk.io/log/v2" + "cosmossdk.io/math" + + "github.com/cosmos/cosmos-sdk/client" + "github.com/cosmos/cosmos-sdk/telemetry" + sdk "github.com/cosmos/cosmos-sdk/types" + sdkerrors "github.com/cosmos/cosmos-sdk/types/errors" + sdkmempool "github.com/cosmos/cosmos-sdk/types/mempool" +) + +var _ sdkmempool.ExtMempool = (*KrakatoaMempool)(nil) + +// KrakatoaMempoolConfig extends the EVMMempoolConfig to have Krakatoa specific +// configuration options. +type KrakatoaMempoolConfig struct { + EVMMempoolConfig + // PendingTxProposalTimeout is the max amount of time to allocate to + // fetching (or waiting to fetch) pending txs from the evm mempool. + PendingTxProposalTimeout time.Duration + // InsertQueueSize is how many txs can be stored in the insert queue + // pending insertion into the mempool. Note the insert queue is only used + // for EVM txs. + InsertQueueSize int +} + +// KrakatoaMempool is an application side mempool implementation that operates +// in conjunction with the CometBFT 'app' configuration. The KrakatoaMempool +// handles application side rechecking of txs and supports ABCI methods +// InesrtTx and ReapTxs. +type KrakatoaMempool struct { + /** Keepers **/ + vmKeeper VMKeeperI + + /** Mempools **/ + txPool *txpool.TxPool + legacyTxPool *legacypool.LegacyPool + recheckCosmosPool *RecheckMempool + pendingTxProposalTimeout time.Duration + + /** Utils **/ + logger log.Logger + txConfig client.TxConfig + clientCtx client.Context + blockchain *Blockchain + blockGasLimit uint64 // Block gas limit from consensus parameters + minTip *uint256.Int + + eventBus *cmttypes.EventBus + + /** Transaction Reaping **/ + reapList *ReapList + + /** Transaction Tracking **/ + txTracker *txTracker + + /** Transaction Inserting **/ + cosmosInsertQueue *queue.Queue[sdk.Tx] + evmInsertQueue *queue.Queue[ethtypes.Transaction] +} + +func NewKrakatoaMempool( + getCtxCallback func(height int64, prove bool) (sdk.Context, error), + logger log.Logger, + vmKeeper VMKeeperI, + feeMarketKeeper FeeMarketKeeperI, + txConfig client.TxConfig, + evmRechecker legacypool.Rechecker, + cosmosRechecker Rechecker, + config *KrakatoaMempoolConfig, + cosmosPoolMaxTx int, +) *KrakatoaMempool { + logger = logger.With(log.ModuleKey, "KrakatoaMempool") + logger.Debug("creating new Krakatoa mempool") + + if config == nil { + panic("config must not be nil") + } + if config.BlockGasLimit == 0 { + logger.Warn("block gas limit is 0, setting to fallback", "fallback_limit", fallbackBlockGasLimit) + config.BlockGasLimit = fallbackBlockGasLimit + } + blockchain := NewBlockchain(getCtxCallback, logger, vmKeeper, feeMarketKeeper, config.BlockGasLimit) + + legacyConfig := legacypool.DefaultConfig + if config.LegacyPoolConfig != nil { + legacyConfig = *config.LegacyPoolConfig + } + legacyPool := legacypool.New(legacyConfig, logger, blockchain, legacypool.WithRecheck(evmRechecker)) + + tracker := reserver.NewReservationTracker() + txPool, err := txpool.New(uint64(0), blockchain, tracker, []txpool.SubPool{legacyPool}) + if err != nil { + panic(err) + } + if len(txPool.Subpools) != 1 { + panic("tx pool should contain one subpool") + } + if _, ok := txPool.Subpools[0].(*legacypool.LegacyPool); !ok { + panic("tx pool should contain only legacypool") + } + + cosmosPoolConfig := config.CosmosPoolConfig + if cosmosPoolConfig == nil { + // Default configuration + defaultConfig := sdkmempool.PriorityNonceMempoolConfig[math.Int]{} + defaultConfig.TxPriority = sdkmempool.TxPriority[math.Int]{ + GetTxPriority: func(goCtx context.Context, tx sdk.Tx) math.Int { + ctx := sdk.UnwrapSDKContext(goCtx) + cosmosTxFee, ok := tx.(sdk.FeeTx) + if !ok { + return math.ZeroInt() + } + found, coin := cosmosTxFee.GetFee().Find(vmKeeper.GetEvmCoinInfo(ctx).Denom) + if !found { + return math.ZeroInt() + } + + gasPrice := coin.Amount.Quo(math.NewIntFromUint64(cosmosTxFee.GetGas())) + + return gasPrice + }, + Compare: func(a, b math.Int) int { + return a.BigInt().Cmp(b.BigInt()) + }, + MinValue: math.ZeroInt(), + } + cosmosPoolConfig = &defaultConfig + } + cosmosPoolConfig.MaxTx = cosmosPoolMaxTx + cosmosPool := sdkmempool.NewPriorityMempool(*cosmosPoolConfig) + recheckPool := NewRecheckMempool( + logger, + cosmosPool, + tracker.NewHandle(-1), + cosmosRechecker, + heightsync.New(blockchain.CurrentBlock().Number, NewCosmosTxStore, logger), + blockchain, + ) + + krakatoaMempool := &KrakatoaMempool{ + vmKeeper: vmKeeper, + txPool: txPool, + legacyTxPool: txPool.Subpools[0].(*legacypool.LegacyPool), + recheckCosmosPool: recheckPool, + logger: logger, + txConfig: txConfig, + blockchain: blockchain, + blockGasLimit: config.BlockGasLimit, + minTip: config.MinTip, + pendingTxProposalTimeout: config.PendingTxProposalTimeout, + reapList: NewReapList(NewTxEncoder(txConfig)), + txTracker: newTxTracker(), + } + + // Setup queues + krakatoaMempool.evmInsertQueue = queue.New( + func(txs []*ethtypes.Transaction) []error { + return txPool.Add(txs, AllowUnsafeSyncInsert) + }, + config.InsertQueueSize, + ) + + krakatoaMempool.cosmosInsertQueue = queue.New( + func(txs []*sdk.Tx) []error { + errs := make([]error, len(txs)) + for i, tx := range txs { + // NOTE: cosmos txs must be added to the reap list directly + // after insert, since recheck runs on insert, if insert + // completes successfully, then we know they are valid and + // should be added to the reap list, we do not need to wait + // until the next blocks recheck. + errs[i] = krakatoaMempool.insertAndReapCosmosTx(*tx) + } + return errs + }, + config.InsertQueueSize, + ) + + legacyPool.OnTxEnqueued = krakatoaMempool.onEVMTxEnqueued() + legacyPool.OnTxPromoted = krakatoaMempool.onEVMTxPromoted() + legacyPool.OnTxRemoved = krakatoaMempool.onEVMTxRemoved() + + vmKeeper.SetEvmMempool(krakatoaMempool) + + // Start the cosmos pool recheck loop + krakatoaMempool.recheckCosmosPool.Start(blockchain.CurrentBlock()) + + return krakatoaMempool +} + +// onEVMTxEnqueued defines a hook to run whenever an evm tx enters the queued pool. +func (m *KrakatoaMempool) onEVMTxEnqueued() func(tx *ethtypes.Transaction) { + return func(tx *ethtypes.Transaction) { + _ = m.txTracker.EnteredQueued(tx.Hash()) + } +} + +// onEVMTxPromoted defines a hook to run whenever an evm tx is promoted from +// the queued pool to the pending pool. +func (m *KrakatoaMempool) onEVMTxPromoted() func(tx *ethtypes.Transaction) { + return func(tx *ethtypes.Transaction) { + // once we have validated that the tx is valid (and can be promoted, set it + // to be reaped) + if err := m.reapList.PushEVMTx(tx); err != nil { + m.logger.Error("could not push promoted evm tx to ReapList", "err", err) + } + + hash := tx.Hash() + _ = m.txTracker.ExitedQueued(hash) + _ = m.txTracker.EnteredPending(hash) + } +} + +// onEVMTxRemoved defines a hook to run whenever an evm tx is removed from a +// pool (queued or pending). +func (m *KrakatoaMempool) onEVMTxRemoved() func(tx *ethtypes.Transaction, pool legacypool.PoolType) { + return func(tx *ethtypes.Transaction, pool legacypool.PoolType) { + // tx was invalidated for some reason or was included in a block + // (either way it is no longer in the mempool), if this tx is in the + // reap list we need remove it from there (no longer need to gossip to + // others about the tx) + the reap guard (since we may see this tx at a + // later time, in which case we should gossip it again) by readding to + // the reap guard. + m.reapList.DropEVMTx(tx) + + _ = m.txTracker.RemoveTxFromPool(tx.Hash(), pool) + } +} + +// IsExclusive returns true if this mempool is the ONLY mempool in the chain. +func (m *KrakatoaMempool) IsExclusive() bool { + return true +} + +// GetBlockchain returns the blockchain interface used for chain head event notifications. +// This is primarily used to notify the mempool when new blocks are finalized. +func (m *KrakatoaMempool) GetBlockchain() *Blockchain { + return m.blockchain +} + +// GetTxPool returns the underlying EVM txpool. +// This provides direct access to the EVM-specific transaction management functionality. +func (m *KrakatoaMempool) GetTxPool() *txpool.TxPool { + return m.txPool +} + +// SetClientCtx sets the client context provider for broadcasting transactions +func (m *KrakatoaMempool) SetClientCtx(clientCtx client.Context) { + m.clientCtx = clientCtx +} + +// Insert adds a transaction to the appropriate mempool (EVM or Cosmos). +// EVM transactions are routed to the EVM transaction pool, while all other +// transactions are inserted into the Cosmos sdkmempool. +func (m *KrakatoaMempool) Insert(ctx context.Context, tx sdk.Tx) error { + errC, err := m.insert(ctx, tx) + if err != nil { + return fmt.Errorf("inserting tx: %w", err) + } + + if errC != nil { + // if we got back a non nil async error channel, wait for that to + // resolve + select { + case err := <-errC: + return err + case <-ctx.Done(): + return ctx.Err() + } + } + return nil +} + +// InsertAsync adds a transaction to the appropriate mempool (EVM or Cosmos). EVM +// transactions are routed to the EVM transaction pool, while all other +// transactions are inserted into the Cosmos sdkmempool. EVM transactions are +// inserted async, i.e. they are scheduled for promotion only, we do not wait +// for it to complete. +func (m *KrakatoaMempool) InsertAsync(ctx context.Context, tx sdk.Tx) error { + errC, err := m.insert(ctx, tx) + if err != nil { + return fmt.Errorf("inserting tx: %w", err) + } + + select { + case err := <-errC: + // if we have a result immediately, ready on the channel returned from + // insert, return that (cosmos tx or unable to try and insert the tx + // due to parsing error). + return err + case <-ctx.Done(): + return ctx.Err() + default: + // result was not ready immediately, return nil while async things happen + return nil + } +} + +// insert inserts a tx into its respective mempool, returning a channel for any +// async errors that may happen later upon actual mempool insertion, and an +// error for any errors that occurred synchronously. +func (m *KrakatoaMempool) insert(_ context.Context, tx sdk.Tx) (<-chan error, error) { + ethMsg, err := evmTxFromCosmosTx(tx) + switch { + case err == nil: + ethTx := ethMsg.AsTransaction() + + // we push the tx onto the evm insert queue so the tx will be inserted + // at a later point. We get back a subscription that the insert queue + // will use to notify the caller of any errors that occurred when + // inserting into the mempool. + return m.evmInsertQueue.Push(ethTx), nil + case errors.Is(err, ErrMultiMsgEthereumTransaction): + // there are multiple messages in this tx and one or more of them is an + // evm tx, this is invalid + return nil, err + default: + // we push the tx onto the cosmos insert queue so the tx will be + // inserted at a later point. We get back a subscription that the + // insert queue will use to notify the caller of any errors that + // occurred when inserting into the mempool. + return m.cosmosInsertQueue.Push(&tx), nil + } +} + +// insertAndReapCosmosTx inserts a cosmos tx into the cosmos mempool and sets +// it to be reaped. +func (m *KrakatoaMempool) insertAndReapCosmosTx(tx sdk.Tx) error { + m.logger.Debug("inserting Cosmos transaction") + + // Insert into cosmos pool (handles locking, ante handler, and address reservation internally) + if err := m.recheckCosmosPool.Insert(context.Background(), tx); err != nil { + m.logger.Error("failed to insert Cosmos transaction", "error", err) + return err + } + + m.logger.Debug("Cosmos transaction inserted successfully") + if err := m.reapList.PushCosmosTx(tx); err != nil { + panic(fmt.Errorf("successfully inserted cosmos tx, but failed to insert into reap list: %w", err)) + } + return nil +} + +// ReapNewValidTxs removes and returns the oldest transactions from the reap +// list until maxBytes or maxGas limits are reached. +func (m *KrakatoaMempool) ReapNewValidTxs(maxBytes uint64, maxGas uint64) ([][]byte, error) { + m.logger.Debug("reaping transactions", "maxBytes", maxBytes, "maxGas", maxGas, "available_txs") + txs := m.reapList.Reap(maxBytes, maxGas) + m.logger.Debug("reap complete", "txs_reaped", len(txs)) + + return txs, nil +} + +// Select returns a unified iterator over both EVM and Cosmos transactions. +// The iterator prioritizes transactions based on their fees and manages proper +// sequencing. The i parameter contains transaction hashes to exclude from selection. +func (m *KrakatoaMempool) Select(goCtx context.Context, i [][]byte) sdkmempool.Iterator { + return m.buildIterator(goCtx, i) +} + +// SelectBy iterates through transactions until the provided filter function returns false. +// It uses the same unified iterator as Select but allows early termination based on +// custom criteria defined by the filter function. +func (m *KrakatoaMempool) SelectBy(goCtx context.Context, txs [][]byte, filter func(sdk.Tx) bool) { + defer func(t0 time.Time) { telemetry.MeasureSince(t0, "expmempool_selectby_duration") }(time.Now()) //nolint:staticcheck + + iter := m.buildIterator(goCtx, txs) + + for iter != nil && filter(iter.Tx()) { + iter = iter.Next() + } +} + +// buildIterator ensures that EVM mempool has checked txs for reorgs up to COMMITTED +// block height and then returns a combined iterator over EVM & Cosmos txs. +func (m *KrakatoaMempool) buildIterator(ctx context.Context, txs [][]byte) sdkmempool.Iterator { + defer func(t0 time.Time) { telemetry.MeasureSince(t0, "expmempool_builditerator_duration") }(time.Now()) //nolint:staticcheck + + evmIterator, cosmosIterator := m.getIterators(ctx, txs) + + return NewEVMMempoolIterator( + evmIterator, + cosmosIterator, + m.logger, + m.txConfig, + m.vmKeeper.GetEvmCoinInfo(sdk.UnwrapSDKContext(ctx)).Denom, + m.blockchain, + ) +} + +// CountTx returns the total number of transactions in both EVM and Cosmos pools. +// This provides a combined count across all mempool types. +func (m *KrakatoaMempool) CountTx() int { + pending, _ := m.txPool.Stats() + return m.recheckCosmosPool.CountTx() + pending +} + +// Remove fallbacks for RemoveWithReason +func (m *KrakatoaMempool) Remove(tx sdk.Tx) error { + return m.RemoveWithReason(context.Background(), tx, sdkmempool.RemoveReason{ + Caller: "remove", + Error: nil, + }) +} + +// RemoveWithReason removes a transaction from the appropriate sdkmempool. +// For EVM transactions, removal is typically handled automatically by the pool +// based on nonce progression. Cosmos transactions are removed from the Cosmos pool. +func (m *KrakatoaMempool) RemoveWithReason(ctx context.Context, tx sdk.Tx, reason sdkmempool.RemoveReason) error { + chainCtx, err := m.blockchain.GetLatestContext() + if err != nil || chainCtx.BlockHeight() == 0 { + m.logger.Warn("Failed to get latest context, skipping removal") + return nil + } + + msgEthereumTx, err := evmTxFromCosmosTx(tx) + switch { + case errors.Is(err, ErrNoMessages): + return err + case err != nil: + // unable to parse evm tx -> process as cosmos tx + return m.removeCosmosTx(ctx, tx, reason) + } + + hash := msgEthereumTx.Hash() + + if m.shouldRemoveFromEVMPool(hash, reason) { + m.logger.Debug("Manually removing EVM transaction", "tx_hash", hash) + m.legacyTxPool.RemoveTx(hash, false, true, convertRemovalReason(reason.Caller)) + } + + if reason.Caller == sdkmempool.CallerRunTxFinalize { + _ = m.txTracker.IncludedInBlock(hash) + } + + return nil +} + +// removeCosmosTx removes a cosmos tx from the mempool. +// The RecheckMempool handles locking internally. +func (m *KrakatoaMempool) removeCosmosTx(ctx context.Context, tx sdk.Tx, reason sdkmempool.RemoveReason) error { + m.logger.Debug("Removing Cosmos transaction") + + // Remove from cosmos pool (handles address reservation release internally) + err := sdkmempool.RemoveWithReason(ctx, m.recheckCosmosPool, tx, reason) + if err != nil { + m.logger.Error("Failed to remove Cosmos transaction", "error", err) + return err + } + + m.reapList.DropCosmosTx(tx) + m.logger.Debug("Cosmos transaction removed successfully") + + return nil +} + +// shouldRemoveFromEVMPool determines whether an EVM transaction should be manually removed. +func (m *KrakatoaMempool) shouldRemoveFromEVMPool(hash common.Hash, reason sdkmempool.RemoveReason) bool { + if reason.Error == nil { + return false + } + // Comet will attempt to remove transactions from the mempool after completing successfully. + // We should not do this with EVM transactions because removing them causes the subsequent ones to + // be dequeued as temporarily invalid, only to be requeued a block later. + // The EVM mempool handles removal based on account nonce automatically. + isKnown := errors.Is(reason.Error, ErrNonceGap) || + errors.Is(reason.Error, sdkerrors.ErrInvalidSequence) || + errors.Is(reason.Error, sdkerrors.ErrOutOfGas) + + if isKnown { + m.logger.Debug("Transaction validation succeeded, should be kept", "tx_hash", hash, "caller", reason.Caller) + return false + } + + m.logger.Debug("Transaction validation failed, should be removed", "tx_hash", hash, "caller", reason.Caller) + return true +} + +// SetEventBus sets CometBFT event bus to listen for new block header event. +func (m *KrakatoaMempool) SetEventBus(eventBus *cmttypes.EventBus) { + if m.HasEventBus() { + m.eventBus.Unsubscribe(context.Background(), SubscriberName, stream.NewBlockHeaderEvents) //nolint: errcheck + } + m.eventBus = eventBus + sub, err := eventBus.Subscribe(context.Background(), SubscriberName, stream.NewBlockHeaderEvents) + if err != nil { + panic(err) + } + go func() { + bc := m.GetBlockchain() + for range sub.Out() { + bc.NotifyNewBlock() + // Trigger cosmos pool recheck on new block (non-blocking) + m.recheckCosmosPool.TriggerRecheck(bc.CurrentBlock()) + } + }() +} + +// HasEventBus returns true if the blockchain is configured to use an event bus for block notifications. +func (m *KrakatoaMempool) HasEventBus() bool { + return m.eventBus != nil +} + +func (m *KrakatoaMempool) Close() error { + var errs []error + if m.eventBus != nil { + if err := m.eventBus.Unsubscribe(context.Background(), SubscriberName, stream.NewBlockHeaderEvents); err != nil { + errs = append(errs, fmt.Errorf("failed to unsubscribe from event bus: %w", err)) + } + } + + if err := m.recheckCosmosPool.Close(); err != nil { + errs = append(errs, fmt.Errorf("failed to close cosmos pool: %w", err)) + } + + if err := m.txPool.Close(); err != nil { + errs = append(errs, fmt.Errorf("failed to close txpool: %w", err)) + } + + return errors.Join(errs...) +} + +// getIterators prepares iterators over pending EVM and Cosmos transactions. +// It configures EVM transactions with proper base fee filtering and priority ordering, +// while setting up the Cosmos iterator with the provided exclusion list. +func (m *KrakatoaMempool) getIterators(ctx context.Context, _ [][]byte) (evm *miner.TransactionsByPriceAndNonce, cosmos sdkmempool.Iterator) { + var ( + evmIterator *miner.TransactionsByPriceAndNonce + cosmosIterator sdkmempool.Iterator + wg sync.WaitGroup + ) + + // using ctx.BlockHeight() - 1 since we want to get txs that have been + // validated at latest committed height, and ctx.BlockHeight() returns the + // latest uncommitted height + selectHeight := new(big.Int).SetInt64(sdk.UnwrapSDKContext(ctx).BlockHeight() - 1) + + wg.Go(func() { + evmIterator = m.evmIterator(ctx, selectHeight) + }) + + wg.Go(func() { + cosmosIterator = m.cosmosIterator(ctx, selectHeight) + }) + + wg.Wait() + + return evmIterator, cosmosIterator +} + +// evmIterator returns an iterator over the current valid txs in the evm +// mempool at height. +func (m *KrakatoaMempool) evmIterator(ctx context.Context, height *big.Int) *miner.TransactionsByPriceAndNonce { + sdkctx := sdk.UnwrapSDKContext(ctx) + baseFee := m.vmKeeper.GetBaseFee(sdkctx) + var baseFeeUint *uint256.Int + if baseFee != nil { + baseFeeUint = uint256.MustFromBig(baseFee) + } + + filter := txpool.PendingFilter{ + MinTip: m.minTip, + BaseFee: baseFeeUint, + BlobFee: nil, + OnlyPlainTxs: true, + OnlyBlobTxs: false, + } + + if m.pendingTxProposalTimeout > 0 { + var cancel context.CancelFunc + ctx, cancel = context.WithTimeout(ctx, m.pendingTxProposalTimeout) + defer cancel() + } + evmPendingTxs := m.txPool.Rechecked(ctx, height, filter) + return miner.NewTransactionsByPriceAndNonce(nil, evmPendingTxs, baseFee) +} + +// cosmosIterator returns an iterator over the current valid txs in the cosmos +// mempool at height. +func (m *KrakatoaMempool) cosmosIterator(ctx context.Context, height *big.Int) sdkmempool.Iterator { + if m.pendingTxProposalTimeout > 0 { + var cancel context.CancelFunc + ctx, cancel = context.WithTimeout(ctx, m.pendingTxProposalTimeout) + defer cancel() + } + return m.recheckCosmosPool.RecheckedTxs(ctx, height) +} + +// TrackTx submits a tx to be tracked for its tx inclusion metrics. +func (m *KrakatoaMempool) TrackTx(hash common.Hash) error { + return m.txTracker.Track(hash) +} + +// RecheckEVMTxs triggers a synchronous recheck of evm transactions. +// This should only be used for testing. +func (m *KrakatoaMempool) RecheckEVMTxs(newHead *ethtypes.Header) { + m.txPool.Reset(nil, newHead) +} + +// RecheckCosmosTxs triggers a synchronous recheck of cosmos transactions. +// This should only used for testing. +func (m *KrakatoaMempool) RecheckCosmosTxs(newHead *ethtypes.Header) { + m.recheckCosmosPool.TriggerRecheckSync(newHead) +} diff --git a/mempool/mempool_test.go b/mempool/krakatoa_mempool_test.go similarity index 85% rename from mempool/mempool_test.go rename to mempool/krakatoa_mempool_test.go index f3af034cf..72222c7b4 100644 --- a/mempool/mempool_test.go +++ b/mempool/krakatoa_mempool_test.go @@ -4,7 +4,6 @@ import ( "context" "crypto/ecdsa" "errors" - "fmt" "math/big" "strconv" "sync" @@ -40,135 +39,81 @@ import ( sdk "github.com/cosmos/cosmos-sdk/types" mempooltypes "github.com/cosmos/cosmos-sdk/types/mempool" signingtypes "github.com/cosmos/cosmos-sdk/types/tx/signing" - authsigning "github.com/cosmos/cosmos-sdk/x/auth/signing" banktypes "github.com/cosmos/cosmos-sdk/x/bank/types" ) -const ( - txValue = 100 - txGasLimit = 50000 -) - -func TestMempool_Iterate(t *testing.T) { - numAccs := 20 - storeKey := storetypes.NewKVStoreKey("test") - transientKey := storetypes.NewTransientStoreKey("transient_test") - ctx := testutil.DefaultContext(storeKey, transientKey) //nolint:staticcheck // false positive. - s := setupMempoolWithAccounts(t, numAccs) - mp, txConfig, accounts := s.mp, s.txConfig, s.accounts - - numTxsEach := 5 - for i := range numAccs { - for range numTxsEach { - cosmosTx := createTestCosmosTx(t, txConfig, accounts[i].key, accounts[i].nonce) - accounts[i].nonce++ - err := mp.Insert(ctx, cosmosTx) - require.NoError(t, err) - } - } - - // have to do the below to make select work.. - ctx = ctx.WithBlockHeight(2) - myCtx, cancel := context.WithTimeout(ctx, time.Nanosecond) - t.Cleanup(cancel) - - // ------- - iter := mp.Select(myCtx, nil) - - for iter != nil { - sdkTx := iter.Tx() - if sdkTx == nil { - break - } - if sigTx, ok := sdkTx.(authsigning.SigVerifiableTx); ok { - signers, _ := sigTx.GetSigners() - sigs, _ := sigTx.GetSignaturesV2() - for i, signer := range signers { - fmt.Printf("sender: %x, nonce: %d\n", signer, sigs[i].Sequence) - } - } - iter = iter.Next() - } -} - -func TestMempool_Reserver(t *testing.T) { - storeKey := storetypes.NewKVStoreKey("test") - transientKey := storetypes.NewTransientStoreKey("transient_test") - ctx := testutil.DefaultContext(storeKey, transientKey) //nolint:staticcheck // false positive. - s := setupMempoolWithAccounts(t, 3) - mp, txConfig, accounts := s.mp, s.txConfig, s.accounts +func TestKrakatoaMempool_Reserver(t *testing.T) { + mp, s := setupKrakatoaMempoolWithAccounts(t, 3) + txConfig, accounts := s.txConfig, s.accounts accountKey := accounts[0].key // insert eth tx from account0 ethTx := createMsgEthereumTx(t, txConfig, accountKey, 0, big.NewInt(1e8)) - err := mp.Insert(ctx, ethTx) + err := mp.Insert(context.Background(), ethTx) require.NoError(t, err) // insert cosmos tx from acount0, should error cosmosTx := createTestCosmosTx(t, txConfig, accountKey, 0) - err = mp.Insert(ctx, cosmosTx) + err = mp.Insert(context.Background(), cosmosTx) require.ErrorIs(t, err, reserver.ErrAlreadyReserved) // remove the eth tx - err = mp.RemoveWithReason(ctx, ethTx, mempooltypes.RemoveReason{Error: errors.New("some error")}) + err = mp.RemoveWithReason(context.Background(), ethTx, mempooltypes.RemoveReason{Error: errors.New("some error")}) require.NoError(t, err) // pool should be clear require.Equal(t, 0, mp.CountTx()) // should be able to insert the cosmos tx now - err = mp.Insert(ctx, cosmosTx) + err = mp.Insert(context.Background(), cosmosTx) require.NoError(t, err) // should be able to send another tx from the same account to the same pool. cosmosTx2 := createTestCosmosTx(t, txConfig, accountKey, 1) - err = mp.Insert(ctx, cosmosTx2) + err = mp.Insert(context.Background(), cosmosTx2) require.NoError(t, err) // there should be 2 txs at this point require.Equal(t, 2, mp.CountTx()) // eth tx should now fail. - err = mp.Insert(ctx, ethTx) + err = mp.Insert(context.Background(), ethTx) require.ErrorIs(t, err, reserver.ErrAlreadyReserved) } -func TestMempool_ReserverMultiSigner(t *testing.T) { - storeKey := storetypes.NewKVStoreKey("test") - transientKey := storetypes.NewTransientStoreKey("transient_test") - ctx := testutil.DefaultContext(storeKey, transientKey) //nolint:staticcheck // false positive. - s := setupMempoolWithAccounts(t, 4) - mp, txConfig, accounts := s.mp, s.txConfig, s.accounts +func TestKrakatoaMempool_ReserverMultiSigner(t *testing.T) { + mp, s := setupKrakatoaMempoolWithAccounts(t, 4) + txConfig, accounts := s.txConfig, s.accounts accountKey := accounts[0].key // insert eth tx from account0 ethTx := createMsgEthereumTx(t, txConfig, accountKey, 0, big.NewInt(1e8)) - err := mp.Insert(ctx, ethTx) + err := mp.Insert(context.Background(), ethTx) require.NoError(t, err) // inserting accounts 1 & 2 should be fine. cosmosTx := createTestMultiSignerCosmosTx(t, txConfig, accounts[1].key, accounts[2].key) - err = mp.Insert(ctx, cosmosTx) + err = mp.Insert(context.Background(), cosmosTx) require.NoError(t, err) // submitting account1 key should fail, since it was part of the signer group in the cosmos tx. ethTx2 := createMsgEthereumTx(t, txConfig, accounts[1].key, 1, big.NewInt(1e8)) - err = mp.Insert(ctx, ethTx2) + err = mp.Insert(context.Background(), ethTx2) require.ErrorIs(t, err, reserver.ErrAlreadyReserved) // account 0 already has ethTx in pool, should fail. comsosTx := createTestMultiSignerCosmosTx(t, txConfig, accounts[3].key, accounts[0].key) - err = mp.Insert(ctx, comsosTx) + err = mp.Insert(context.Background(), comsosTx) require.ErrorIs(t, err, reserver.ErrAlreadyReserved) } // Ensures txs are not reaped multiple times when promoting and demoting the // same tx -func TestMempool_ReapPromoteDemotePromote(t *testing.T) { - s := setupMempoolWithAccounts(t, 3) - mp, txConfig, rechecker, bus, accounts := s.mp, s.txConfig, s.evmRechecker, s.eventBus, s.accounts +func TestKrakatoaMempool_ReapPromoteDemotePromote(t *testing.T) { + mp, s := setupKrakatoaMempoolWithAccounts(t, 3) + txConfig, rechecker, bus, accounts := s.txConfig, s.evmRechecker, s.eventBus, s.accounts err := bus.PublishEventNewBlockHeader(cmttypes.EventDataNewBlockHeader{ Header: cmttypes.Header{ @@ -259,9 +204,9 @@ func TestMempool_ReapPromoteDemotePromote(t *testing.T) { require.Equal(t, uint64(1), getTxNonce(t, txConfig, txs[0])) } -func TestMempool_QueueInvalidWhenUsingPendingState(t *testing.T) { - s := setupMempoolWithAccounts(t, 3) - mp, txConfig, rechecker, bus, accounts := s.mp, s.txConfig, s.evmRechecker, s.eventBus, s.accounts +func TestKrakatoaMempool_QueueInvalidWhenUsingPendingState(t *testing.T) { + mp, s := setupKrakatoaMempoolWithAccounts(t, 3) + txConfig, rechecker, bus, accounts := s.txConfig, s.evmRechecker, s.eventBus, s.accounts err := bus.PublishEventNewBlockHeader(cmttypes.EventDataNewBlockHeader{ Header: cmttypes.Header{ Height: 1, @@ -314,9 +259,9 @@ func TestMempool_QueueInvalidWhenUsingPendingState(t *testing.T) { require.Len(t, queued, 0) } -func TestMempool_ReapPromoteDemoteReap(t *testing.T) { - s := setupMempoolWithAccounts(t, 3) - mp, txConfig, rechecker, bus, accounts := s.mp, s.txConfig, s.evmRechecker, s.eventBus, s.accounts +func TestKrakatoaMempool_ReapPromoteDemoteReap(t *testing.T) { + mp, s := setupKrakatoaMempoolWithAccounts(t, 3) + txConfig, rechecker, bus, accounts := s.txConfig, s.evmRechecker, s.eventBus, s.accounts err := bus.PublishEventNewBlockHeader(cmttypes.EventDataNewBlockHeader{ Header: cmttypes.Header{ Height: 1, @@ -383,9 +328,9 @@ func TestMempool_ReapPromoteDemoteReap(t *testing.T) { require.Equal(t, uint64(0), getTxNonce(t, txConfig, txs[0])) } -func TestMempool_ReapNewBlock(t *testing.T) { - s := setupMempoolWithAccounts(t, 3) - mp, vmKeeper, txConfig, bus, accounts := s.mp, s.vmKeeper, s.txConfig, s.eventBus, s.accounts +func TestKrakatoaMempool_ReapNewBlock(t *testing.T) { + mp, s := setupKrakatoaMempoolWithAccounts(t, 3) + vmKeeper, txConfig, bus, accounts := s.vmKeeper, s.txConfig, s.eventBus, s.accounts err := bus.PublishEventNewBlockHeader(cmttypes.EventDataNewBlockHeader{ Header: cmttypes.Header{ Height: 1, @@ -444,9 +389,9 @@ func TestMempool_ReapNewBlock(t *testing.T) { require.GreaterOrEqual(t, getTxNonce(t, txConfig, txs[1]), uint64(1)) // 1 or 2 } -func TestMempool_InsertMultiMsgCosmosTx(t *testing.T) { - s := setupMempoolWithAccounts(t, 3) - mp, txConfig, bus := s.mp, s.txConfig, s.eventBus +func TestKrakatoaMempool_InsertMultiMsgCosmosTx(t *testing.T) { + mp, s := setupKrakatoaMempoolWithAccounts(t, 3) + txConfig, bus := s.txConfig, s.eventBus err := bus.PublishEventNewBlockHeader(cmttypes.EventDataNewBlockHeader{ Header: cmttypes.Header{ @@ -506,9 +451,49 @@ func TestMempool_InsertMultiMsgCosmosTx(t *testing.T) { require.Len(t, txs, 1, "expected a single tx to be reaped") } -func TestMempool_InsertMultiMsgEthereumTx(t *testing.T) { - s := setupMempoolWithAccounts(t, 3) - mp, txConfig, bus := s.mp, s.txConfig, s.eventBus +func TestKrakatoaMempool_InsertSynchronous(t *testing.T) { + mp, s := setupKrakatoaMempoolWithAccounts(t, 3) + txConfig, bus, accounts := s.txConfig, s.eventBus, s.accounts + err := bus.PublishEventNewBlockHeader(cmttypes.EventDataNewBlockHeader{ + Header: cmttypes.Header{ + Height: 1, + Time: time.Now(), + ChainID: strconv.Itoa(constants.EighteenDecimalsChainID), + }, + }) + require.NoError(t, err) + + // Wait for reset to happen for block 1 + require.NoError(t, mp.GetTxPool().Sync()) + + legacyPool := mp.GetTxPool().Subpools[0].(*legacypool.LegacyPool) + + // Insert a transaction using the synchronous Insert method + // This should wait for the transaction to be added to the pool before returning + tx := createMsgEthereumTx(t, txConfig, accounts[0].key, 0, big.NewInt(1e8)) + err = mp.Insert(sdk.Context{}.WithContext(context.Background()), tx) + require.NoError(t, err) + + // After Insert returns, the transaction should already be in the pool + // (either pending or queued). We don't need to call Sync() to wait. + pending, queued := legacyPool.ContentFrom(accounts[0].address) + totalTxs := len(pending) + len(queued) + require.Equal(t, 1, totalTxs, "transaction should be in pool immediately after Insert returns") + + // Create a transaction with a gas price that would exceed the account balance + // Account balance is 100000000000100, so set gas price extremely high + excessiveGasPrice := new(big.Int).SetUint64(accounts[0].initialBalance * 100) + tx = createMsgEthereumTx(t, txConfig, accounts[0].key, 0, excessiveGasPrice) + err = mp.Insert(sdk.Context{}.WithContext(context.Background()), tx) + + // The synchronous Insert should return the error from the tx pool + require.Error(t, err, "Insert should return error when tx pool rejects transaction") + require.Contains(t, err.Error(), "insufficient funds", "error should indicate insufficient funds") +} + +func TestKrakatoaMempool_InsertMultiMsgEthereumTx(t *testing.T) { + mp, s := setupKrakatoaMempoolWithAccounts(t, 3) + txConfig, bus := s.txConfig, s.eventBus err := bus.PublishEventNewBlockHeader(cmttypes.EventDataNewBlockHeader{ Header: cmttypes.Header{ @@ -556,64 +541,13 @@ func TestMempool_InsertMultiMsgEthereumTx(t *testing.T) { require.Len(t, txs, 0, "expected no txs to be reaped") } -func TestMempool_InsertSynchronous(t *testing.T) { - s := setupMempoolWithAccounts(t, 3) - mp, txConfig, bus, accounts := s.mp, s.txConfig, s.eventBus, s.accounts - err := bus.PublishEventNewBlockHeader(cmttypes.EventDataNewBlockHeader{ - Header: cmttypes.Header{ - Height: 1, - Time: time.Now(), - ChainID: strconv.Itoa(constants.EighteenDecimalsChainID), - }, - }) - require.NoError(t, err) - - // Wait for reset to happen for block 1 - require.NoError(t, mp.GetTxPool().Sync()) - - legacyPool := mp.GetTxPool().Subpools[0].(*legacypool.LegacyPool) - - // Insert a transaction using the synchronous Insert method - // This should wait for the transaction to be added to the pool before returning - tx := createMsgEthereumTx(t, txConfig, accounts[0].key, 0, big.NewInt(1e8)) - err = mp.Insert(sdk.Context{}.WithContext(context.Background()), tx) - require.NoError(t, err) - - // After Insert returns, the transaction should already be in the pool - // (either pending or queued). We don't need to call Sync() to wait. - pending, queued := legacyPool.ContentFrom(accounts[0].address) - totalTxs := len(pending) + len(queued) - require.Equal(t, 1, totalTxs, "transaction should be in pool immediately after Insert returns") -} - -func TestMempool_InsertSynchronousReturnsError(t *testing.T) { - s := setupMempoolWithAccounts(t, 3) - mp, txConfig, bus, accounts := s.mp, s.txConfig, s.eventBus, s.accounts - err := bus.PublishEventNewBlockHeader(cmttypes.EventDataNewBlockHeader{ - Header: cmttypes.Header{ - Height: 1, - Time: time.Now(), - ChainID: strconv.Itoa(constants.EighteenDecimalsChainID), - }, - }) - require.NoError(t, err) - - // Wait for reset to happen for block 1 - require.NoError(t, mp.GetTxPool().Sync()) - - // Create a transaction with a gas price that would exceed the account balance - // Account balance is 100000000000100, so set gas price extremely high - excessiveGasPrice := new(big.Int).SetUint64(accounts[0].initialBalance * 100) - tx := createMsgEthereumTx(t, txConfig, accounts[0].key, 0, excessiveGasPrice) - err = mp.Insert(sdk.Context{}.WithContext(context.Background()), tx) - - // The synchronous Insert should return the error from the tx pool - require.Error(t, err, "Insert should return error when tx pool rejects transaction") - require.Contains(t, err.Error(), "insufficient funds", "error should indicate insufficient funds") -} - // Helper types and functions +const ( + txValue = 100 + txGasLimit = 50000 +) + type testAccount struct { key *ecdsa.PrivateKey address common.Address @@ -621,9 +555,8 @@ type testAccount struct { initialBalance uint64 } -type testMempool struct { - mp *mempool.ExperimentalEVMMempool - vmKeeper *mocks.VMKeeper +type testMempoolDependencies struct { + vmKeeper *mocks.VMKeeperI txConfig client.TxConfig evmRechecker *MockRechecker cosmosRechecker *MockRechecker @@ -631,7 +564,7 @@ type testMempool struct { accounts []testAccount } -func setupMempoolWithAccounts(t *testing.T, numAccounts int) testMempool { +func setupKrakatoaMempoolWithAccounts(t *testing.T, numAccounts int) (*mempool.KrakatoaMempool, testMempoolDependencies) { t.Helper() // Create accounts @@ -658,7 +591,7 @@ func setupMempoolWithAccounts(t *testing.T, numAccounts int) testMempool { require.NoError(t, err) // Create mocks - mockVMKeeper := mocks.NewVMKeeper(t) + mockVMKeeper := mocks.NewVMKeeperI(t) mockFeeMarketKeeper := mocks.NewFeeMarketKeeper(t) // Setup mock expectations @@ -722,26 +655,27 @@ func setupMempoolWithAccounts(t *testing.T, numAccounts int) testMempool { legacyConfig.PriceLimit = 1 legacyConfig.PriceBump = 10 // 10% price bump for replacement - config := &mempool.EVMMempoolConfig{ - LegacyPoolConfig: &legacyConfig, - BlockGasLimit: 30000000, - MinTip: uint256.NewInt(0), - InsertQueueSize: 1000, + krakatoaConfig := &mempool.KrakatoaMempoolConfig{ + EVMMempoolConfig: mempool.EVMMempoolConfig{ + LegacyPoolConfig: &legacyConfig, + BlockGasLimit: 30000000, + MinTip: uint256.NewInt(0), + }, + InsertQueueSize: 1000, } // Create mempool evmRechecker := &MockRechecker{} cosmosRechecker := &MockRechecker{} - mp := mempool.NewExperimentalEVMMempool( + mp := mempool.NewKrakatoaMempool( getCtxCallback, log.NewNopLogger(), mockVMKeeper, mockFeeMarketKeeper, txConfig, - mempool.NewTxEncoder(txConfig), evmRechecker, cosmosRechecker, - config, + krakatoaConfig, 1000, // cosmos pool max tx ) require.NotNil(t, mp) @@ -752,8 +686,7 @@ func setupMempoolWithAccounts(t *testing.T, numAccounts int) testMempool { require.NoError(t, eventBus.Start()) mp.SetEventBus(eventBus) - return testMempool{ - mp: mp, + return mp, testMempoolDependencies{ vmKeeper: mockVMKeeper, txConfig: txConfig, evmRechecker: evmRechecker, diff --git a/mempool/mempool.go b/mempool/mempool.go index 19da35c95..d02345cd9 100644 --- a/mempool/mempool.go +++ b/mempool/mempool.go @@ -4,7 +4,6 @@ import ( "context" "errors" "fmt" - "math/big" "sync" "time" @@ -15,8 +14,6 @@ import ( cmttypes "github.com/cometbft/cometbft/types" - "github.com/cosmos/evm/mempool/internal/heightsync" - "github.com/cosmos/evm/mempool/internal/queue" "github.com/cosmos/evm/mempool/miner" "github.com/cosmos/evm/mempool/reserver" "github.com/cosmos/evm/mempool/txpool" @@ -62,11 +59,9 @@ type ( vmKeeper VMKeeperI /** Mempools **/ - txPool *txpool.TxPool - legacyTxPool *legacypool.LegacyPool - cosmosPool *RecheckMempool - operateExclusively bool - pendingTxProposalTimeout time.Duration + txPool *txpool.TxPool + legacyTxPool *legacypool.LegacyPool + cosmosPool sdkmempool.ExtMempool /** Utils **/ logger log.Logger @@ -77,16 +72,6 @@ type ( minTip *uint256.Int eventBus *cmttypes.EventBus - - /** Transaction Reaping **/ - reapList *ReapList - - /** Transaction Tracking **/ - txTracker *txTracker - - /** Transaction Inserting **/ - cosmosQueue *queue.Queue[sdk.Tx] - evmQueue *queue.Queue[ethtypes.Transaction] } ) @@ -97,20 +82,10 @@ type EVMMempoolConfig struct { LegacyPoolConfig *legacypool.Config CosmosPoolConfig *sdkmempool.PriorityNonceMempoolConfig[math.Int] AnteHandler sdk.AnteHandler + BroadCastTxFn func(txs []*ethtypes.Transaction) error // Block gas limit from consensus parameters BlockGasLimit uint64 MinTip *uint256.Int - // OperateExclusively indicates whether this mempool is the ONLY mempool in the chain. - // If false, comet-bft also operates its own clist-mempool. If true, then the mempool expects exclusive - // handling of transactions via ABCI.InsertTx & ABCI.ReapTxs. - OperateExclusively bool - // PendingTxProposalTimeout is the max amount of time to allocate to - // fetching (or watiing to fetch) pending txs from the evm mempool. - PendingTxProposalTimeout time.Duration - // InsertQueueSize is how many txs can be stored in the insert queue - // pending insertion into the mempool. Note the insert queue is only used - // for EVM txs. - InsertQueueSize int } // NewExperimentalEVMMempool creates a new unified mempool for EVM and Cosmos transactions. @@ -123,9 +98,6 @@ func NewExperimentalEVMMempool( vmKeeper VMKeeperI, feeMarketKeeper FeeMarketKeeperI, txConfig client.TxConfig, - txEncoder *TxEncoder, - evmRechecker legacypool.Rechecker, - cosmosRechecker Rechecker, config *EVMMempoolConfig, cosmosPoolMaxTx int, ) *ExperimentalEVMMempool { @@ -155,12 +127,8 @@ func NewExperimentalEVMMempool( if config.LegacyPoolConfig != nil { legacyConfig = *config.LegacyPoolConfig } - legacyPool := legacypool.New( - legacyConfig, - logger, - blockchain, - legacypool.WithRecheck(evmRechecker), - ) + + legacyPool := legacypool.New(legacyConfig, logger, blockchain) tracker := reserver.NewReservationTracker() txPool, err := txpool.New(uint64(0), blockchain, tracker, []txpool.SubPool{legacyPool}) @@ -208,97 +176,28 @@ func NewExperimentalEVMMempool( cosmosPoolConfig.MaxTx = cosmosPoolMaxTx cosmosPool = sdkmempool.NewPriorityMempool(*cosmosPoolConfig) - // Wrap cosmos pool with recheck functionality - recheckPool := NewRecheckMempool( - logger, - cosmosPool, - tracker.NewHandle(-1), - cosmosRechecker, - heightsync.New(blockchain.CurrentBlock().Number, NewCosmosTxStore, logger.With("pool", "recheckpool")), - blockchain, - ) - evmMempool := &ExperimentalEVMMempool{ - vmKeeper: vmKeeper, - txPool: txPool, - legacyTxPool: txPool.Subpools[0].(*legacypool.LegacyPool), - cosmosPool: recheckPool, - logger: logger, - txConfig: txConfig, - blockchain: blockchain, - blockGasLimit: config.BlockGasLimit, - minTip: config.MinTip, - operateExclusively: config.OperateExclusively, - pendingTxProposalTimeout: config.PendingTxProposalTimeout, - reapList: NewReapList(txEncoder), - txTracker: newTxTracker(), - } - - // Create insert queues for evm and cosmos txs - - evmQueue := queue.New( - func(txs []*ethtypes.Transaction) []error { - return txPool.Add(txs, AllowUnsafeSyncInsert) - }, - config.InsertQueueSize, - logger, - ) - evmMempool.evmQueue = evmQueue - - cosmosQueue := queue.New( - func(txs []*sdk.Tx) []error { - errs := make([]error, len(txs)) - for i, tx := range txs { - errs[i] = evmMempool.insertCosmosTx(*tx) - } - return errs - }, - config.InsertQueueSize, - logger, - ) - evmMempool.cosmosQueue = cosmosQueue - - // Once we have validated that the tx is valid (and can be promoted, set it - // to be reaped) - legacyPool.OnTxPromoted = func(tx *ethtypes.Transaction) { - if err := evmMempool.reapList.PushEVMTx(tx); err != nil { - logger.Error("could not push evm tx to ReapList", "err", err) - } - - hash := tx.Hash() - _ = evmMempool.txTracker.ExitedQueued(hash) - _ = evmMempool.txTracker.EnteredPending(hash) - } - - legacyPool.OnTxEnqueued = func(tx *ethtypes.Transaction) { - _ = evmMempool.txTracker.EnteredQueued(tx.Hash()) + vmKeeper: vmKeeper, + txPool: txPool, + legacyTxPool: txPool.Subpools[0].(*legacypool.LegacyPool), + cosmosPool: cosmosPool, + logger: logger, + txConfig: txConfig, + blockchain: blockchain, + blockGasLimit: config.BlockGasLimit, + minTip: config.MinTip, } - // Once we are removing the tx, we no longer need to block it from being - // sent to the reaplist again and can remove from the guard - legacyPool.OnTxRemoved = func(tx *ethtypes.Transaction, pool legacypool.PoolType) { - // tx was invalidated for some reason or was included in a block - // (either way it is no longer in the mempool), if this tx is in the - // reap list we need remove it from there (no longer need to gossip to - // others about the tx) + the reap guard (since we may see this tx at a - // later time, in which case we should gossip it again) by readding to - // the reap guard. - evmMempool.reapList.DropEVMTx(tx) - - _ = evmMempool.txTracker.RemoveTxFromPool(tx.Hash(), pool) - } + legacyPool.OnTxPromoted = evmMempool.onEVMTxPromoted(config.BroadCastTxFn) vmKeeper.SetEvmMempool(evmMempool) - // Start the cosmos pool recheck loop - evmMempool.cosmosPool.Start(blockchain.CurrentBlock()) - return evmMempool } // IsExclusive returns true if this mempool is the ONLY mempool in the chain. func (m *ExperimentalEVMMempool) IsExclusive() bool { - return m.operateExclusively + return false } // GetBlockchain returns the blockchain interface used for chain head event notifications. @@ -321,93 +220,40 @@ func (m *ExperimentalEVMMempool) SetClientCtx(clientCtx client.Context) { // Insert adds a transaction to the appropriate mempool (EVM or Cosmos). // EVM transactions are routed to the EVM transaction pool, while all other // transactions are inserted into the Cosmos sdkmempool. -func (m *ExperimentalEVMMempool) Insert(ctx context.Context, tx sdk.Tx) error { - errC, err := m.insert(ctx, tx) - if err != nil { - return fmt.Errorf("inserting tx: %w", err) - } - - if errC != nil { - // if we got back a non nil async error channel, wait for that to - // resolve - select { - case err := <-errC: - return err - case <-ctx.Done(): - return ctx.Err() - } - } - return nil -} - -// InsertAsync adds a transaction to the appropriate mempool (EVM or Cosmos). EVM -// transactions are routed to the EVM transaction pool, while all other -// transactions are inserted into the Cosmos sdkmempool. EVM transactions are -// inserted async, i.e. they are scheduled for promotion only, we do not wait -// for it to complete. -func (m *ExperimentalEVMMempool) InsertAsync(ctx context.Context, tx sdk.Tx) error { - errC, err := m.insert(ctx, tx) - if err != nil { - return fmt.Errorf("inserting tx: %w", err) - } +func (m *ExperimentalEVMMempool) Insert(goCtx context.Context, tx sdk.Tx) error { + ctx := sdk.UnwrapSDKContext(goCtx) + blockHeight := ctx.BlockHeight() - select { - case err := <-errC: - // if we have a result immediately, ready on the channel returned from - // insert, return that (cosmos tx or unable to try and insert the tx - // due to parsing error). - return err - case <-ctx.Done(): - return ctx.Err() - default: - // result was not ready immediately, return nil while async things happen - return nil - } -} - -// insert inserts a tx into its respective mempool, returning a channel for any -// async errors that may happen later upon actual mempool insertion, and an -// error for any errors that occurred synchronously. -func (m *ExperimentalEVMMempool) insert(_ context.Context, tx sdk.Tx) (<-chan error, error) { + m.logger.Debug("inserting transaction into mempool", "block_height", blockHeight) ethMsg, err := evmTxFromCosmosTx(tx) switch { case err == nil: - ethTx := ethMsg.AsTransaction() - - // we push the tx onto the evm insert queue so the tx will be inserted - // at a later point. We get back a subscription that the insert queue - // will use to notify the caller of any errors that occurred when - // inserting into the mempool. - return m.evmQueue.Push(ethTx), nil + // Insert into EVM pool + hash := ethMsg.Hash() + m.logger.Debug("inserting EVM transaction", "tx_hash", hash) + ethTxs := []*ethtypes.Transaction{ethMsg.AsTransaction()} + errs := m.txPool.Add(ethTxs, AllowUnsafeSyncInsert) + if len(errs) > 0 && errs[0] != nil { + m.logger.Error("failed to insert EVM transaction", "error", errs[0], "tx_hash", hash) + return errs[0] + } + m.logger.Debug("EVM transaction inserted successfully", "tx_hash", hash) + return nil case errors.Is(err, ErrMultiMsgEthereumTransaction): // there are multiple messages in this tx and one or more of them is an // evm tx, this is invalid - return nil, err - default: - // we push the tx onto the cosmos insert queue so the tx will be - // inserted at a later point. We get back a subscription that the - // insert queue will use to notify the caller of any errors that - // occurred when inserting into the mempool. - return m.cosmosQueue.Push(&tx), nil - } -} - -// insertCosmosTx inserts a cosmos tx into the cosmos mempool. -// The RecheckMempool handles ante handler validation, address reservation, and locking internally. -func (m *ExperimentalEVMMempool) insertCosmosTx(tx sdk.Tx) error { - m.logger.Debug("inserting Cosmos transaction") - - // Insert into cosmos pool (handles locking, ante handler, and address reservation internally) - if err := m.cosmosPool.Insert(context.Background(), tx); err != nil { - m.logger.Error("failed to insert Cosmos transaction", "error", err) return err - } + default: + // Insert into cosmos pool for non-EVM transactions + m.logger.Debug("inserting Cosmos transaction") + if err = m.cosmosPool.Insert(goCtx, tx); err != nil { + m.logger.Error("failed to insert Cosmos transaction", "error", err) + return err + } - m.logger.Debug("Cosmos transaction inserted successfully") - if err := m.reapList.PushCosmosTx(tx); err != nil { - panic(fmt.Errorf("successfully inserted cosmos tx, but failed to insert into reap list: %w", err)) + m.logger.Debug("Cosmos transaction inserted successfully") + return nil } - return nil } // InsertInvalidNonce handles transactions that failed with nonce gap errors. @@ -442,16 +288,6 @@ func (m *ExperimentalEVMMempool) InsertInvalidNonce(txBytes []byte) error { return nil } -// ReapNewValidTxs removes and returns the oldest transactions from the reap -// list until maxBytes or maxGas limits are reached. -func (m *ExperimentalEVMMempool) ReapNewValidTxs(maxBytes uint64, maxGas uint64) ([][]byte, error) { - m.logger.Debug("reaping transactions", "maxBytes", maxBytes, "maxGas", maxGas, "available_txs") - txs := m.reapList.Reap(maxBytes, maxGas) - m.logger.Debug("reap complete", "txs_reaped", len(txs)) - - return txs, nil -} - // Select returns a unified iterator over both EVM and Cosmos transactions. // The iterator prioritizes transactions based on their fees and manages proper // sequencing. The i parameter contains transaction hashes to exclude from selection. @@ -519,8 +355,15 @@ func (m *ExperimentalEVMMempool) RemoveWithReason(ctx context.Context, tx sdk.Tx case errors.Is(err, ErrNoMessages): return err case err != nil: - // unable to parse evm tx -> process as cosmos tx - return m.removeCosmosTx(ctx, tx, reason) + m.logger.Debug("Removing Cosmos transaction") + + if err := sdkmempool.RemoveWithReason(ctx, m.cosmosPool, tx, reason); err != nil { + m.logger.Error("Failed to remove Cosmos transaction", "error", err) + return err + } + + m.logger.Debug("Cosmos transaction removed successfully") + return nil } hash := msgEthereumTx.Hash() @@ -530,10 +373,6 @@ func (m *ExperimentalEVMMempool) RemoveWithReason(ctx context.Context, tx sdk.Tx m.legacyTxPool.RemoveTx(hash, false, true, convertRemovalReason(reason.Caller)) } - if reason.Caller == sdkmempool.CallerRunTxFinalize { - _ = m.txTracker.IncludedInBlock(hash) - } - return nil } @@ -551,24 +390,6 @@ func convertRemovalReason(caller sdkmempool.RemovalCaller) txpool.RemovalReason } } -// removeCosmosTx removes a cosmos tx from the mempool. -// The RecheckMempool handles locking internally. -func (m *ExperimentalEVMMempool) removeCosmosTx(ctx context.Context, tx sdk.Tx, reason sdkmempool.RemoveReason) error { - m.logger.Debug("Removing Cosmos transaction") - - // Remove from cosmos pool (handles address reservation release internally) - err := sdkmempool.RemoveWithReason(ctx, m.cosmosPool, tx, reason) - if err != nil { - m.logger.Error("Failed to remove Cosmos transaction", "error", err) - return err - } - - m.reapList.DropCosmosTx(tx) - m.logger.Debug("Cosmos transaction removed successfully") - - return nil -} - // shouldRemoveFromEVMPool determines whether an EVM transaction should be manually removed. func (m *ExperimentalEVMMempool) shouldRemoveFromEVMPool(hash common.Hash, reason sdkmempool.RemoveReason) bool { if reason.Error == nil { @@ -605,8 +426,6 @@ func (m *ExperimentalEVMMempool) SetEventBus(eventBus *cmttypes.EventBus) { bc := m.GetBlockchain() for range sub.Out() { bc.NotifyNewBlock() - // Trigger cosmos pool recheck on new block (non-blocking) - m.cosmosPool.TriggerRecheck(bc.CurrentBlock()) } }() } @@ -624,13 +443,6 @@ func (m *ExperimentalEVMMempool) Close() error { } } - m.evmQueue.Close() - m.cosmosQueue.Close() - - if err := m.cosmosPool.Close(); err != nil { - errs = append(errs, fmt.Errorf("failed to close cosmos pool: %w", err)) - } - if err := m.txPool.Close(); err != nil { errs = append(errs, fmt.Errorf("failed to close txpool: %w", err)) } @@ -673,24 +485,19 @@ func evmTxFromCosmosTx(tx sdk.Tx) (*evmtypes.MsgEthereumTx, error) { // getIterators prepares iterators over pending EVM and Cosmos transactions. // It configures EVM transactions with proper base fee filtering and priority ordering, // while setting up the Cosmos iterator with the provided exclusion list. -func (m *ExperimentalEVMMempool) getIterators(ctx context.Context, _ [][]byte) (evm *miner.TransactionsByPriceAndNonce, cosmos sdkmempool.Iterator) { +func (m *ExperimentalEVMMempool) getIterators(ctx context.Context, txs [][]byte) (evm *miner.TransactionsByPriceAndNonce, cosmos sdkmempool.Iterator) { var ( evmIterator *miner.TransactionsByPriceAndNonce cosmosIterator sdkmempool.Iterator wg sync.WaitGroup ) - // using ctx.BlockHeight() - 1 since we want to get txs that have been - // validated at latest committed height, and ctx.BlockHeight() returns the - // latest uncommitted height - selectHeight := new(big.Int).SetInt64(sdk.UnwrapSDKContext(ctx).BlockHeight() - 1) - wg.Go(func() { - evmIterator = m.evmIterator(ctx, selectHeight) + evmIterator = m.evmIterator(ctx) }) wg.Go(func() { - cosmosIterator = m.cosmosIterator(ctx, selectHeight) + cosmosIterator = m.cosmosPool.Select(ctx, txs) }) wg.Wait() @@ -700,7 +507,7 @@ func (m *ExperimentalEVMMempool) getIterators(ctx context.Context, _ [][]byte) ( // evmIterator returns an iterator over the current valid txs in the evm // mempool at height. -func (m *ExperimentalEVMMempool) evmIterator(ctx context.Context, height *big.Int) *miner.TransactionsByPriceAndNonce { +func (m *ExperimentalEVMMempool) evmIterator(ctx context.Context) *miner.TransactionsByPriceAndNonce { sdkctx := sdk.UnwrapSDKContext(ctx) baseFee := m.vmKeeper.GetBaseFee(sdkctx) var baseFeeUint *uint256.Int @@ -716,40 +523,52 @@ func (m *ExperimentalEVMMempool) evmIterator(ctx context.Context, height *big.In OnlyBlobTxs: false, } - if m.pendingTxProposalTimeout > 0 { - var cancel context.CancelFunc - ctx, cancel = context.WithTimeout(ctx, m.pendingTxProposalTimeout) - defer cancel() - } - evmPendingTxs := m.txPool.Pending(ctx, height, filter) + evmPendingTxs := m.txPool.Pending(ctx, filter) return miner.NewTransactionsByPriceAndNonce(nil, evmPendingTxs, baseFee) } -// cosmosIterator returns an iterator over the current valid txs in the cosmos -// mempool at height. -func (m *ExperimentalEVMMempool) cosmosIterator(ctx context.Context, height *big.Int) sdkmempool.Iterator { - if m.pendingTxProposalTimeout > 0 { - var cancel context.CancelFunc - ctx, cancel = context.WithTimeout(ctx, m.pendingTxProposalTimeout) - defer cancel() +func (m *ExperimentalEVMMempool) onEVMTxPromoted(broadcastTxFn func(txs []*ethtypes.Transaction) error) func(tx *ethtypes.Transaction) { + if broadcastTxFn != nil { + return func(tx *ethtypes.Transaction) { + if err := broadcastTxFn(ethtypes.Transactions{tx}); err != nil { + m.logger.Error("Failed to broadcast transaction", "err", err, "tx_hash", tx.Hash()) + } + } } - return m.cosmosPool.RecheckedTxs(ctx, height) -} -// TrackTx submits a tx to be tracked for its tx inclusion metrics. -func (m *ExperimentalEVMMempool) TrackTx(hash common.Hash) error { - return m.txTracker.Track(hash) + return func(tx *ethtypes.Transaction) { + if err := m.broadcastEVMTransaction(m.clientCtx, tx); err != nil { + m.logger.Error("Failed to broadcast transaction", "err", err, "tx_hash", tx.Hash()) + } + } } -// RecheckCosmosTxs triggers a synchronous recheck of cosmos transactions. -// This is primarily used for testing. -func (m *ExperimentalEVMMempool) RecheckCosmosTxs(newHead *ethtypes.Header) { - m.cosmosPool.TriggerRecheckSync(newHead) -} +// broadcastEVMTransaction converts an Ethereum transaction to Cosmos SDK format and broadcasts them. +// This function wraps EVM transactions in MsgEthereumTx messages and submits them to the network +// using the provided client context. It handles encoding and error reporting for each transaction. +func (m *ExperimentalEVMMempool) broadcastEVMTransaction(clientCtx client.Context, ethTx *ethtypes.Transaction) error { + msg := &evmtypes.MsgEthereumTx{} + ethSigner := ethtypes.LatestSigner(evmtypes.GetEthChainConfig()) + if err := msg.FromSignedEthereumTx(ethTx, ethSigner); err != nil { + return fmt.Errorf("failed to convert ethereum transaction: %w", err) + } + + cosmosTx, err := msg.BuildTx(clientCtx.TxConfig.NewTxBuilder(), evmtypes.GetEVMCoinDenom()) + if err != nil { + return fmt.Errorf("failed to build cosmos tx: %w", err) + } -// StopTrackingTx stops a tx from being tracked for its tx inclusion metrics. -// This should only be used if a tx has not yet been included in the mempool, -// i.e. received an error from Insert. -func (m *ExperimentalEVMMempool) StopTrackingTx(hash common.Hash) { - m.txTracker.RemoveTx(hash) + txBytes, err := clientCtx.TxConfig.TxEncoder()(cosmosTx) + if err != nil { + return fmt.Errorf("failed to encode transaction: %w", err) + } + + res, err := clientCtx.BroadcastTxSync(txBytes) + if err != nil { + return fmt.Errorf("failed to broadcast transaction %s: %w", ethTx.Hash().Hex(), err) + } + if res.Code != 0 && res.Code != 19 && res.RawLog != "already known" { + return fmt.Errorf("transaction %s rejected by mempool: code=%d, log=%s", ethTx.Hash().Hex(), res.Code, res.RawLog) + } + return nil } diff --git a/mempool/mocks/VMKeeper.go b/mempool/mocks/VMKeeperI.go similarity index 74% rename from mempool/mocks/VMKeeper.go rename to mempool/mocks/VMKeeperI.go index c341f8946..389e662ad 100644 --- a/mempool/mocks/VMKeeper.go +++ b/mempool/mocks/VMKeeperI.go @@ -19,13 +19,13 @@ import ( vmtypes "github.com/cosmos/evm/x/vm/types" ) -// VMKeeper is an autogenerated mock type for the VMKeeperI type -type VMKeeper struct { +// VMKeeperI is an autogenerated mock type for the VMKeeperI type +type VMKeeperI struct { mock.Mock } // DeleteAccount provides a mock function with given fields: ctx, addr -func (_m *VMKeeper) DeleteAccount(ctx types.Context, addr common.Address) error { +func (_m *VMKeeperI) DeleteAccount(ctx types.Context, addr common.Address) error { ret := _m.Called(ctx, addr) if len(ret) == 0 { @@ -43,22 +43,22 @@ func (_m *VMKeeper) DeleteAccount(ctx types.Context, addr common.Address) error } // DeleteCode provides a mock function with given fields: ctx, codeHash -func (_m *VMKeeper) DeleteCode(ctx types.Context, codeHash []byte) { +func (_m *VMKeeperI) DeleteCode(ctx types.Context, codeHash []byte) { _m.Called(ctx, codeHash) } // DeleteState provides a mock function with given fields: ctx, addr, key -func (_m *VMKeeper) DeleteState(ctx types.Context, addr common.Address, key common.Hash) { +func (_m *VMKeeperI) DeleteState(ctx types.Context, addr common.Address, key common.Hash) { _m.Called(ctx, addr, key) } // ForEachStorage provides a mock function with given fields: ctx, addr, cb -func (_m *VMKeeper) ForEachStorage(ctx types.Context, addr common.Address, cb func(common.Hash, common.Hash) bool) { +func (_m *VMKeeperI) ForEachStorage(ctx types.Context, addr common.Address, cb func(common.Hash, common.Hash) bool) { _m.Called(ctx, addr, cb) } // GetAccount provides a mock function with given fields: ctx, addr -func (_m *VMKeeper) GetAccount(ctx types.Context, addr common.Address) *statedb.Account { +func (_m *VMKeeperI) GetAccount(ctx types.Context, addr common.Address) *statedb.Account { ret := _m.Called(ctx, addr) if len(ret) == 0 { @@ -78,7 +78,7 @@ func (_m *VMKeeper) GetAccount(ctx types.Context, addr common.Address) *statedb. } // GetBaseFee provides a mock function with given fields: ctx -func (_m *VMKeeper) GetBaseFee(ctx types.Context) *big.Int { +func (_m *VMKeeperI) GetBaseFee(ctx types.Context) *big.Int { ret := _m.Called(ctx) if len(ret) == 0 { @@ -98,7 +98,7 @@ func (_m *VMKeeper) GetBaseFee(ctx types.Context) *big.Int { } // GetCode provides a mock function with given fields: ctx, codeHash -func (_m *VMKeeper) GetCode(ctx types.Context, codeHash common.Hash) []byte { +func (_m *VMKeeperI) GetCode(ctx types.Context, codeHash common.Hash) []byte { ret := _m.Called(ctx, codeHash) if len(ret) == 0 { @@ -118,7 +118,7 @@ func (_m *VMKeeper) GetCode(ctx types.Context, codeHash common.Hash) []byte { } // GetCodeHash provides a mock function with given fields: ctx, addr -func (_m *VMKeeper) GetCodeHash(ctx types.Context, addr common.Address) common.Hash { +func (_m *VMKeeperI) GetCodeHash(ctx types.Context, addr common.Address) common.Hash { ret := _m.Called(ctx, addr) if len(ret) == 0 { @@ -138,7 +138,7 @@ func (_m *VMKeeper) GetCodeHash(ctx types.Context, addr common.Address) common.H } // GetEvmCoinInfo provides a mock function with given fields: ctx -func (_m *VMKeeper) GetEvmCoinInfo(ctx types.Context) vmtypes.EvmCoinInfo { +func (_m *VMKeeperI) GetEvmCoinInfo(ctx types.Context) vmtypes.EvmCoinInfo { ret := _m.Called(ctx) if len(ret) == 0 { @@ -156,7 +156,7 @@ func (_m *VMKeeper) GetEvmCoinInfo(ctx types.Context) vmtypes.EvmCoinInfo { } // GetParams provides a mock function with given fields: ctx -func (_m *VMKeeper) GetParams(ctx types.Context) vmtypes.Params { +func (_m *VMKeeperI) GetParams(ctx types.Context) vmtypes.Params { ret := _m.Called(ctx) if len(ret) == 0 { @@ -174,7 +174,7 @@ func (_m *VMKeeper) GetParams(ctx types.Context) vmtypes.Params { } // GetState provides a mock function with given fields: ctx, addr, key -func (_m *VMKeeper) GetState(ctx types.Context, addr common.Address, key common.Hash) common.Hash { +func (_m *VMKeeperI) GetState(ctx types.Context, addr common.Address, key common.Hash) common.Hash { ret := _m.Called(ctx, addr, key) if len(ret) == 0 { @@ -194,7 +194,7 @@ func (_m *VMKeeper) GetState(ctx types.Context, addr common.Address, key common. } // KVStoreKeys provides a mock function with no fields -func (_m *VMKeeper) KVStoreKeys() map[string]storetypes.StoreKey { +func (_m *VMKeeperI) KVStoreKeys() map[string]storetypes.StoreKey { ret := _m.Called() if len(ret) == 0 { @@ -214,7 +214,7 @@ func (_m *VMKeeper) KVStoreKeys() map[string]storetypes.StoreKey { } // SetAccount provides a mock function with given fields: ctx, addr, account -func (_m *VMKeeper) SetAccount(ctx types.Context, addr common.Address, account statedb.Account) error { +func (_m *VMKeeperI) SetAccount(ctx types.Context, addr common.Address, account statedb.Account) error { ret := _m.Called(ctx, addr, account) if len(ret) == 0 { @@ -232,28 +232,27 @@ func (_m *VMKeeper) SetAccount(ctx types.Context, addr common.Address, account s } // SetCode provides a mock function with given fields: ctx, codeHash, code -func (_m *VMKeeper) SetCode(ctx types.Context, codeHash []byte, code []byte) { +func (_m *VMKeeperI) SetCode(ctx types.Context, codeHash []byte, code []byte) { _m.Called(ctx, codeHash, code) } // SetEvmMempool provides a mock function with given fields: evmMempool -func (_m *VMKeeper) SetEvmMempool(evmMempool *mempool.ExperimentalEVMMempool) { +func (_m *VMKeeperI) SetEvmMempool(evmMempool mempool.NotifiedMempool) { _m.Called(evmMempool) } // SetState provides a mock function with given fields: ctx, addr, key, value -func (_m *VMKeeper) SetState(ctx types.Context, addr common.Address, key common.Hash, value []byte) { +func (_m *VMKeeperI) SetState(ctx types.Context, addr common.Address, key common.Hash, value []byte) { _m.Called(ctx, addr, key, value) } -// NewVMKeeper creates a new instance of VMKeeper. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// NewVMKeeperI creates a new instance of VMKeeperI. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. // The first argument is typically a *testing.T value. -func NewVMKeeper(t interface { +func NewVMKeeperI(t interface { mock.TestingT Cleanup(func()) -}, -) *VMKeeper { - mock := &VMKeeper{} +}) *VMKeeperI { + mock := &VMKeeperI{} mock.Mock.Test(t) t.Cleanup(func() { mock.AssertExpectations(t) }) diff --git a/mempool/recheck_pool.go b/mempool/recheck_pool.go index 6616006d8..a40302ade 100644 --- a/mempool/recheck_pool.go +++ b/mempool/recheck_pool.go @@ -447,7 +447,7 @@ func (m *RecheckMempool) runRecheck(done chan struct{}, newHead *ethtypes.Header txsRemoved = len(removeTxs) } -// markTxRechecked adds a tx into the height synced cosmos tx store +// markTxRechecked adds a tx into the height synced cosmos tx store. func (m *RecheckMempool) markTxRechecked(txn sdk.Tx) { m.recheckedTxs.Do(func(store *CosmosTxStore) { store.AddTx(txn) }) } diff --git a/mempool/recheck_pool_test.go b/mempool/recheck_pool_test.go index 566a8b001..520a20c48 100644 --- a/mempool/recheck_pool_test.go +++ b/mempool/recheck_pool_test.go @@ -509,7 +509,7 @@ func TestRecheckMempool_ConcurrentTriggers(t *testing.T) { // Integration // ---------------------------------------------------------------------------- -func TestMempool_Recheck(t *testing.T) { +func TestKrakatoaMempool_Recheck(t *testing.T) { type accountTx struct { account int nonce uint64 @@ -630,12 +630,8 @@ func TestMempool_Recheck(t *testing.T) { for _, tc := range tests { t.Run(tc.name, func(t *testing.T) { - storeKey := storetypes.NewKVStoreKey("test") - transientKey := storetypes.NewTransientStoreKey("transient_test") - ctx := testutil.DefaultContext(storeKey, transientKey) //nolint:staticcheck // false positive. - - s := setupMempoolWithAccounts(t, 3) - mp, txConfig, cosmosRechecker, accounts := s.mp, s.txConfig, s.cosmosRechecker, s.accounts + mp, s := setupKrakatoaMempoolWithAccounts(t, 3) + txConfig, cosmosRechecker, accounts := s.txConfig, s.cosmosRechecker, s.accounts getSignerAddr := func(accountIdx int) []byte { pubKeyBytes := crypto.CompressPubkey(&accounts[accountIdx].key.PublicKey) @@ -645,7 +641,7 @@ func TestMempool_Recheck(t *testing.T) { for _, tx := range tc.insertTxs { cosmosTx := createTestCosmosTx(t, txConfig, accounts[tx.account].key, tx.nonce) - require.NoError(t, mp.Insert(ctx, cosmosTx)) + require.NoError(t, mp.Insert(context.Background(), cosmosTx)) } require.Equal(t, len(tc.insertTxs), mp.CountTx(), "should have all txs inserted") diff --git a/mempool/tx_tracker.go b/mempool/tx_tracker.go index 7547675fc..18cd07302 100644 --- a/mempool/tx_tracker.go +++ b/mempool/tx_tracker.go @@ -148,12 +148,6 @@ func (txt *txTracker) RemoveTxFromPool(hash common.Hash, pool legacypool.PoolTyp return nil } -// RemoveTx removes a tx from the tx tracker and does not record any metrics as -// it exits the tracker. -func (txt *txTracker) RemoveTx(hash common.Hash) { - txt.removeTx(hash) -} - // removeTx removes a tx by hash. func (txt *txTracker) removeTx(hash common.Hash) { txt.lock.Lock() diff --git a/mempool/txpool/legacypool/legacypool.go b/mempool/txpool/legacypool/legacypool.go index d1dfcce42..996fbf923 100644 --- a/mempool/txpool/legacypool/legacypool.go +++ b/mempool/txpool/legacypool/legacypool.go @@ -643,12 +643,12 @@ func (pool *LegacyPool) ContentFrom(addr common.Address) ([]*types.Transaction, return pending, queued } -// Pending retrieves all currently processable transactions, grouped by origin +// Rechecked retrieves all currently rechecked transactions, grouped by origin // account and sorted by nonce. // // The transactions can also be pre-filtered by the dynamic fee components to // reduce allocations and load on downstream subsystems. -func (pool *LegacyPool) Pending(ctx context.Context, height *big.Int, filter txpool.PendingFilter) map[common.Address][]*txpool.LazyTransaction { +func (pool *LegacyPool) Rechecked(ctx context.Context, height *big.Int, filter txpool.PendingFilter) map[common.Address][]*txpool.LazyTransaction { txStore := pool.validPendingTxs.GetStore(ctx, height) if txStore == nil { return nil @@ -656,6 +656,71 @@ func (pool *LegacyPool) Pending(ctx context.Context, height *big.Int, filter txp return txStore.Txs(filter) } +// Pending retrieves all currently processable transactions, grouped by origin +// account and sorted by nonce. +// +// The transactions can also be pre-filtered by the dynamic fee components to +// reduce allocations and load on downstream subsystems. +func (pool *LegacyPool) Pending(ctx context.Context, filter txpool.PendingFilter) map[common.Address][]*txpool.LazyTransaction { + // If only blob transactions are requested, this pool is unsuitable as it + // contains none, don't even bother. + if filter.OnlyBlobTxs { + return nil + } + pool.mu.Lock() + defer pool.mu.Unlock() + + // Convert the new uint256.Int types to the old big.Int ones used by the + // legacy pool + var ( + minTip *big.Int + baseFee *big.Int + ) + if filter.MinTip != nil { + minTip = filter.MinTip.ToBig() + } + if filter.BaseFee != nil { + baseFee = filter.BaseFee.ToBig() + } + + pending := make(map[common.Address][]*txpool.LazyTransaction, len(pool.pending)) + for addr, list := range pool.pending { + if lazies := filterAndWrapTxs(list.Flatten(), minTip, baseFee); len(lazies) > 0 { + pending[addr] = lazies + } + } + return pending +} + +// filterAndWrapTxs applies tip filtering to txs and wraps the survivors into +// LazyTransactions. +func filterAndWrapTxs(txs []*types.Transaction, minTip, baseFee *big.Int) []*txpool.LazyTransaction { + if minTip != nil { + for i, tx := range txs { + if tx.EffectiveGasTipIntCmp(minTip, baseFee) < 0 { + txs = txs[:i] + break + } + } + } + if len(txs) == 0 { + return nil + } + lazies := make([]*txpool.LazyTransaction, len(txs)) + for i, tx := range txs { + lazies[i] = &txpool.LazyTransaction{ + Hash: tx.Hash(), + Tx: tx, + Time: tx.Time(), + GasFeeCap: uint256.MustFromBig(tx.GasFeeCap()), + GasTipCap: uint256.MustFromBig(tx.GasTipCap()), + Gas: tx.Gas(), + BlobGas: tx.BlobGas(), + } + } + return lazies +} + // ValidateTxBasics checks whether a transaction is valid according to the consensus // rules, but does not check state-dependent validation such as sufficient balance. // This check is meant as an early check which only needs to be performed once, diff --git a/mempool/txpool/legacypool/legacypool_test.go b/mempool/txpool/legacypool/legacypool_test.go index 08ed57e14..904ac1e6c 100644 --- a/mempool/txpool/legacypool/legacypool_test.go +++ b/mempool/txpool/legacypool/legacypool_test.go @@ -2934,7 +2934,7 @@ func TestResetCancellation(t *testing.T) { } } -func TestPendingFutureHeight(t *testing.T) { +func TestRecheckedFutureHeight(t *testing.T) { t.Parallel() pool, _, _ := setupPool() @@ -2943,7 +2943,7 @@ func TestPendingFutureHeight(t *testing.T) { ctx, cancel := context.WithTimeout(context.Background(), 50*time.Millisecond) defer cancel() - result := pool.Pending(ctx, big.NewInt(999), txpool.PendingFilter{}) + result := pool.Rechecked(ctx, big.NewInt(999), txpool.PendingFilter{}) if result != nil { t.Fatalf("expected nil from Pending when HeightSync times out, got %v", result) } diff --git a/mempool/txpool/legacypool/tx_store.go b/mempool/txpool/legacypool/tx_store.go index 0b58d01f0..0ef3e2e67 100644 --- a/mempool/txpool/legacypool/tx_store.go +++ b/mempool/txpool/legacypool/tx_store.go @@ -10,7 +10,6 @@ import ( "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/metrics" - "github.com/holiman/uint256" ) // txsCollected is the total amount of txs returned by Collect. @@ -68,30 +67,7 @@ func (t *TxStore) Txs(filter txpool.PendingFilter) map[common.Address][]*txpool. for addr, txs := range t.txs { sort.Sort(types.TxByNonce(txs)) - // Filter by minimum tip if configured - if minTipBig != nil { - for i, tx := range txs { - if tx.EffectiveGasTipIntCmp(minTipBig, baseFeeBig) < 0 { - txs = txs[:i] - break - } - } - } - - // Convert to lazy transactions - if len(txs) > 0 { - lazies := make([]*txpool.LazyTransaction, len(txs)) - for i, tx := range txs { - lazies[i] = &txpool.LazyTransaction{ - Hash: tx.Hash(), - Tx: tx, - Time: tx.Time(), - GasFeeCap: uint256.MustFromBig(tx.GasFeeCap()), - GasTipCap: uint256.MustFromBig(tx.GasTipCap()), - Gas: tx.Gas(), - BlobGas: tx.BlobGas(), - } - } + if lazies := filterAndWrapTxs(txs, minTipBig, baseFeeBig); len(lazies) > 0 { numSelected += len(lazies) pending[addr] = lazies } diff --git a/mempool/txpool/subpool.go b/mempool/txpool/subpool.go index f78bfcfbf..226221ab1 100644 --- a/mempool/txpool/subpool.go +++ b/mempool/txpool/subpool.go @@ -34,7 +34,6 @@ import ( // enough for the miner and other APIs to handle large batches of transactions; // and supports pulling up the entire transaction when really needed. type LazyTransaction struct { - Pool LazyResolver // Transaction resolver to pull the real transaction up Hash common.Hash // Transaction hash to pull up if needed Tx *types.Transaction // Transaction if already resolved @@ -46,21 +45,6 @@ type LazyTransaction struct { BlobGas uint64 // Amount of blob gas required by the transaction } -// Resolve retrieves the full transaction belonging to a lazy handle if it is still -// maintained by the transaction pool. -// -// Note, the method will *not* cache the retrieved transaction if the original -// pool has not cached it. The idea being, that if the tx was too big to insert -// originally, silently saving it will cause more trouble down the line (and -// indeed seems to have caused a memory bloat in the original implementation -// which did just that). -func (ltx *LazyTransaction) Resolve() *types.Transaction { - if ltx.Tx != nil { - return ltx.Tx - } - return ltx.Pool.Get(ltx.Hash) -} - // LazyResolver is a minimal interface needed for a transaction pool to satisfy // resolving lazy transactions. It's mostly a helper to avoid the entire sub- // pool being injected into the lazy transaction. @@ -159,12 +143,19 @@ type SubPool interface { // to a later point to batch multiple ones together. Add(txs []*types.Transaction, sync bool) []error - // Pending retrieves all currently processable transactions, grouped by origin + // Pending retrieves all currently pending transactions, grouped by origin + // account and sorted by nonce. + // + // The transactions can also be pre-filtered by the dynamic fee components to + // reduce allocations and load on downstream subsystems. + Pending(ctx context.Context, filter PendingFilter) map[common.Address][]*LazyTransaction + + // Rechecked retrieves all currently rechecked transactions, grouped by origin // account and sorted by nonce. // // The transactions can also be pre-filtered by the dynamic fee components to // reduce allocations and load on downstream subsystems. - Pending(ctx context.Context, height *big.Int, filter PendingFilter) map[common.Address][]*LazyTransaction + Rechecked(ctx context.Context, height *big.Int, filter PendingFilter) map[common.Address][]*LazyTransaction // SubscribeTransactions subscribes to new transaction events. The subscriber // can decide whether to receive notifications only for newly seen transactions diff --git a/mempool/txpool/txpool.go b/mempool/txpool/txpool.go index 641528d91..0e194bcd5 100644 --- a/mempool/txpool/txpool.go +++ b/mempool/txpool/txpool.go @@ -383,15 +383,30 @@ func (p *TxPool) Add(txs []*types.Transaction, sync bool) []error { return errs } -// Pending retrieves all currently processable transactions, grouped by origin +// Pending retrieves all currently pending transactions, grouped by origin // account and sorted by nonce. // // The transactions can also be pre-filtered by the dynamic fee components to // reduce allocations and load on downstream subsystems. -func (p *TxPool) Pending(ctx context.Context, height *big.Int, filter PendingFilter) map[common.Address][]*LazyTransaction { +func (p *TxPool) Pending(ctx context.Context, filter PendingFilter) map[common.Address][]*LazyTransaction { txs := make(map[common.Address][]*LazyTransaction) for _, subpool := range p.Subpools { - for addr, set := range subpool.Pending(ctx, height, filter) { + for addr, set := range subpool.Pending(ctx, filter) { + txs[addr] = set + } + } + return txs +} + +// Rechecked retrieves all currently rechecked transactions, grouped by origin +// account and sorted by nonce. +// +// The transactions can also be pre-filtered by the dynamic fee components to +// reduce allocations and load on downstream subsystems. +func (p *TxPool) Rechecked(ctx context.Context, height *big.Int, filter PendingFilter) map[common.Address][]*LazyTransaction { + txs := make(map[common.Address][]*LazyTransaction) + for _, subpool := range p.Subpools { + for addr, set := range subpool.Rechecked(ctx, height, filter) { txs[addr] = set } } @@ -505,6 +520,13 @@ func (p *TxPool) Sync() error { } } +// Reset synchronously resets each subpool at a given state. +func (p *TxPool) Reset(oldHead, newHead *types.Header) { + for _, subPool := range p.Subpools { + subPool.Reset(oldHead, newHead) + } +} + // Clear removes all tracked txs from the Subpools. // // Note, this method invokes Sync() and is only used for testing, because it is diff --git a/rpc/backend/backend.go b/rpc/backend/backend.go index ec47566fe..3d01b2f13 100644 --- a/rpc/backend/backend.go +++ b/rpc/backend/backend.go @@ -19,7 +19,7 @@ import ( tmrpcclient "github.com/cometbft/cometbft/rpc/client" tmrpctypes "github.com/cometbft/cometbft/rpc/core/types" - evmmempool "github.com/cosmos/evm/mempool" + "github.com/cosmos/evm/mempool/txpool" "github.com/cosmos/evm/rpc/types" "github.com/cosmos/evm/server/config" servertypes "github.com/cosmos/evm/server/types" @@ -31,6 +31,7 @@ import ( "github.com/cosmos/cosmos-sdk/crypto/keyring" "github.com/cosmos/cosmos-sdk/server" sdk "github.com/cosmos/cosmos-sdk/types" + sdkmempool "github.com/cosmos/cosmos-sdk/types/mempool" ) // BackendI implements the Cosmos and EVM backend. @@ -138,6 +139,16 @@ type EVMBackend interface { TraceCall(ctx context.Context, args evmtypes.TransactionArgs, blockNrOrHash types.BlockNumberOrHash, config *types.TraceConfig) (interface{}, error) } +// TrackingMempool is a set of methods that a mempool may implement in order to +// track evm transaction lifecycle events. +type TrackingMempool interface { + // TrackTx is called when a tx should start to be tracked by the + // TrackingMempool. This is called on tx ingestion from the rpc backend. + // This is NOT called when txs are ingested over p2p, i.e local + // transactions only. + TrackTx(hash common.Hash) error +} + var ( _ BackendI = (*Backend)(nil) @@ -167,6 +178,14 @@ type ProcessBlocker func( targetOneFeeHistory *types.OneFeeHistory, ) error +// Mempool is a mempool that can be used for the rpc backend. +type Mempool interface { + sdkmempool.Mempool + + // GetTxPool returns the mempools underlying evm txpool. + GetTxPool() *txpool.TxPool +} + // Backend implements the BackendI interface type Backend struct { ClientCtx client.Context @@ -179,19 +198,12 @@ type Backend struct { UseAppMempool bool Indexer servertypes.EVMTxIndexer ProcessBlocker ProcessBlocker - Application Application - Mempool *evmmempool.ExperimentalEVMMempool + Mempool Mempool } // Opt is a function type that configures the backend. type Opt func(*Backend) -// Application represents ABCI application itself (us). -// This is used for opaque ABCI calls instead of injecting BaseApp into Backend. -type Application interface { - GetContextForCheckTx(txBytes []byte) sdk.Context -} - // WithUnprotectedTxs sets whether to allow unprotected transactions. func WithUnprotectedTxs(value bool) Opt { return func(b *Backend) { b.AllowUnprotectedTxs = value } @@ -208,17 +220,12 @@ func WithLogger(logger log.Logger) Opt { return func(b *Backend) { b.Logger = logger.With("module", "backend") } } -// WithApplication sets the ABCI application for the backend. -func WithApplication(abciApp Application) Opt { - return func(b *Backend) { b.Application = abciApp } -} - // NewBackend creates a new Backend instance for cosmos and ethereum namespaces func NewBackend( ctx *server.Context, clientCtx client.Context, indexer servertypes.EVMTxIndexer, - mempool *evmmempool.ExperimentalEVMMempool, + mempool Mempool, opts ...Opt, ) *Backend { appConf, err := config.GetConfig(ctx.Viper) @@ -256,3 +263,17 @@ func NewBackend( func (b *Backend) GetConfig() config.Config { return b.Cfg } + +// TrackTxIfSupported calls TrackTx on the backends mempool if it is a +// supported method. +func (b *Backend) TrackTxIfSupported(txHash common.Hash) { + tm, ok := b.Mempool.(TrackingMempool) + if !ok { + return + } + + // track the tx for tx inclusion timing metrics of local txs + if err := tm.TrackTx(txHash); err != nil { + b.Logger.Error("error tracking inserted inserted into mempool", "hash", txHash, "err", err) + } +} diff --git a/rpc/backend/call_tx.go b/rpc/backend/call_tx.go index 48f57e6c3..4385f3903 100644 --- a/rpc/backend/call_tx.go +++ b/rpc/backend/call_tx.go @@ -162,23 +162,19 @@ func (b *Backend) SendRawTransaction(ctx context.Context, data hexutil.Bytes) (r // publish tx directly to app-side mempool, avoiding broadcasting to // consensus layer. if b.UseAppMempool { - // track the tx for tx inclusion timing metrics of local txs - if err := b.Mempool.TrackTx(txHash); err != nil { - b.Logger.Error("error tracking inserted inserted into mempool", "hash", txHash, "err", err) - } // we are directly calling into the mempool rather than the ABCI // handler for InsertTx, since the ABCI handler obfuscates the error's // returned via codes, and we would like to have the full error to // return to clients. - err := b.Mempool.Insert(b.Application.GetContextForCheckTx(txBytes), cosmosTx) + err := b.Mempool.Insert(ctx, cosmosTx) if err != nil { // no need for special error handling like in the broadcast tx case // since this is coming directly from the evm mempool insert. - b.Mempool.StopTrackingTx(txHash) return common.Hash{}, err } + b.TrackTxIfSupported(txHash) return txHash, nil } diff --git a/rpc/backend/sign_tx.go b/rpc/backend/sign_tx.go index 1f0d26e2c..d6c7f25b0 100644 --- a/rpc/backend/sign_tx.go +++ b/rpc/backend/sign_tx.go @@ -119,16 +119,14 @@ func (b *Backend) SendTransaction(ctx context.Context, args evmtypes.Transaction // handler for InsertTx, since the ABCI handler obfuscates the error's // returned via codes, and we would like to have the full error to // return to clients. - err := b.Mempool.Insert(b.Application.GetContextForCheckTx(txBytes), tx) + err := b.Mempool.Insert(ctx, tx) if err != nil { // no need for special error handling like in the broadcast tx case // since this is coming directly from the evm mempool insert. return common.Hash{}, err } - if err := b.Mempool.TrackTx(txHash); err != nil { - b.Logger.Error("error tracking inserted inserted into mempool", "hash", txHash, "err", err) - } + b.TrackTxIfSupported(txHash) return txHash, nil } diff --git a/server/config/config.go b/server/config/config.go index 1ce86abeb..cffb9f6b4 100644 --- a/server/config/config.go +++ b/server/config/config.go @@ -171,6 +171,12 @@ type MempoolConfig struct { GlobalQueue uint64 `mapstructure:"global-queue"` // Lifetime is the maximum amount of time non-executable transaction are queued Lifetime time.Duration `mapstructure:"lifetime"` + // OperateExclusively determines if the mempool will assume that it is + // running as the only mempool in the application (no CometBFT mempool). + // This enables the use of new Krakatoa CometBFT ABCI methods should as + // InsertTx and ReapTxs. This also enables use of the insert queues and + // partial tx collection. + OperateExclusively bool `mapstructure:"operate-exclusively"` // PendingTxProposalTimeout is the amount of time to spend waiting for // rechecking of the mempool to complete when creating a proposal PendingTxProposalTimeout time.Duration `mapstructure:"pending-tx-proposal-timeout"` @@ -189,6 +195,7 @@ func DefaultMempoolConfig() MempoolConfig { AccountQueue: 64, // 64 non-executable transaction slots per account GlobalQueue: 1024, // 1024 global non-executable slots Lifetime: 3 * time.Hour, // 3 hour lifetime for queued transactions + OperateExclusively: false, // Assume CometBFT also has a mempool by default PendingTxProposalTimeout: 250 * time.Millisecond, // 250 milliseconds to wait for rechecks InsertQueueSize: 5_000, // 5000 txs maximum in the insert queue } diff --git a/server/flags/flags.go b/server/flags/flags.go index dd5d6347a..2eead2c22 100644 --- a/server/flags/flags.go +++ b/server/flags/flags.go @@ -79,6 +79,7 @@ const ( EVMMempoolAccountQueue = "evm.mempool.account-queue" EVMMempoolGlobalQueue = "evm.mempool.global-queue" EVMMempoolLifetime = "evm.mempool.lifetime" + EVMMempoolOperateExclusively = "evm.mempool.operate-exclusively" EVMMempoolPendingTxProposalTimeout = "evm.mempool.pending-tx-proposal-timeout" EVMMempoolInsertQueueSize = "evm.mempool.insert-queue-size" ) diff --git a/server/json_rpc.go b/server/json_rpc.go index e960f8300..d2a43c4df 100644 --- a/server/json_rpc.go +++ b/server/json_rpc.go @@ -14,7 +14,6 @@ import ( rpcclient "github.com/cometbft/cometbft/rpc/client" - evmmempool "github.com/cosmos/evm/mempool" "github.com/cosmos/evm/rpc" "github.com/cosmos/evm/rpc/backend" "github.com/cosmos/evm/rpc/stream" @@ -23,14 +22,22 @@ import ( "github.com/cosmos/cosmos-sdk/client" "github.com/cosmos/cosmos-sdk/server" - sdk "github.com/cosmos/cosmos-sdk/types" ) const shutdownTimeout = 200 * time.Millisecond type AppWithPendingTxStream interface { RegisterPendingTxListener(listener func(common.Hash)) - GetContextForCheckTx(txBytes []byte) sdk.Context +} + +// PossiblyExclusiveMempool is a Mempool that can also determine +// if it is operating exclusively or not. +type PossiblyExclusiveMempool interface { + backend.Mempool + + // IsExclusive returns true if the Mempool is the only mempool in + // the application. + IsExclusive() bool } // StartJSONRPC starts the JSON-RPC server @@ -42,7 +49,7 @@ func StartJSONRPC( config *serverconfig.Config, indexer types.EVMTxIndexer, app AppWithPendingTxStream, - mempool *evmmempool.ExperimentalEVMMempool, + mempool PossiblyExclusiveMempool, ) (*http.Server, error) { logger := srvCtx.Logger.With("module", "geth") @@ -61,7 +68,6 @@ func StartJSONRPC( mempool, backend.WithUnprotectedTxs(config.JSONRPC.AllowUnprotectedTxs), backend.WithAppMempool(mempool.IsExclusive()), - backend.WithApplication(app), backend.WithLogger(srvCtx.Logger), ) diff --git a/server/server_app_options.go b/server/server_app_options.go index 38cd2787b..a41a4d708 100644 --- a/server/server_app_options.go +++ b/server/server_app_options.go @@ -145,6 +145,15 @@ func GetLegacyPoolConfig(appOpts servertypes.AppOptions, logger log.Logger) *leg return &legacyConfig } +func GetShouldOperateExclusively(appOpts servertypes.AppOptions, logger log.Logger) bool { + if appOpts == nil { + logger.Error("app options is nil, assuming mempool is not operating exclusively") + return false + } + + return cast.ToBool(appOpts.Get(srvflags.EVMMempoolOperateExclusively)) +} + func GetPendingTxProposalTimeout(appOpts servertypes.AppOptions, logger log.Logger) time.Duration { if appOpts == nil { logger.Error("app options is nil, using pending tx proposal timeout of 0 (unlimited)") diff --git a/server/start.go b/server/start.go index a731e6417..cfbd5354b 100644 --- a/server/start.go +++ b/server/start.go @@ -231,6 +231,7 @@ which accepts a path for the resulting pprof file. cmd.Flags().Uint64(srvflags.EVMMempoolAccountQueue, cosmosevmserverconfig.DefaultMempoolConfig().AccountQueue, "the maximum number of non-executable transaction slots permitted per account") cmd.Flags().Uint64(srvflags.EVMMempoolGlobalQueue, cosmosevmserverconfig.DefaultMempoolConfig().GlobalQueue, "the maximum number of non-executable transaction slots for all accounts") cmd.Flags().Duration(srvflags.EVMMempoolLifetime, cosmosevmserverconfig.DefaultMempoolConfig().Lifetime, "the maximum amount of time non-executable transaction are queued") + cmd.Flags().Bool(srvflags.EVMMempoolOperateExclusively, cosmosevmserverconfig.DefaultMempoolConfig().OperateExclusively, "if this mempool is the only mempool in the application (CometBFT must be using the 'app' mempool if this mempool is operating exclusively)") cmd.Flags().Duration(srvflags.EVMMempoolPendingTxProposalTimeout, cosmosevmserverconfig.DefaultMempoolConfig().PendingTxProposalTimeout, "the maximum amount of time to spend waiting for rechecking of the mempool to complete when creating a proposal") cmd.Flags().Int(srvflags.EVMMempoolInsertQueueSize, cosmosevmserverconfig.DefaultMempoolConfig().InsertQueueSize, "the maximum number of transactions that can be in the insert queue at once") @@ -454,7 +455,10 @@ func startInProcess(svrCtx *server.Context, clientCtx client.Context, opts Start return err } - if m, ok := evmApp.GetMempool().(*evmmempool.ExperimentalEVMMempool); ok && m != nil { + type EventBusser interface { + SetEventBus(eventBus *cmttypes.EventBus) + } + if m, ok := evmApp.GetMempool().(EventBusser); ok && m != nil { m.SetEventBus(bftNode.EventBus()) } defer func() { @@ -553,9 +557,9 @@ func startInProcess(svrCtx *server.Context, clientCtx client.Context, opts Start if !ok { return fmt.Errorf("json-rpc server requires AppWithPendingTxStream") } - mp, ok := evmApp.GetMempool().(*evmmempool.ExperimentalEVMMempool) + mp, ok := evmApp.GetMempool().(PossiblyExclusiveMempool) if !ok { - return fmt.Errorf("json-rpc server requires ExperimentalEVMMempool") + return fmt.Errorf("json-rpc server requires PossiblyExclusiveMempool") } _, err = StartJSONRPC(ctx, svrCtx, clientCtx, g, &config, idxer, txApp, mp) diff --git a/tests/integration/mempool/test_helpers.go b/tests/integration/mempool/test_helpers.go index b39112dec..64840c260 100644 --- a/tests/integration/mempool/test_helpers.go +++ b/tests/integration/mempool/test_helpers.go @@ -109,6 +109,19 @@ func (s *IntegrationTestSuite) createEVMContractDeployTx(key keyring.Key, gasPri return tx } +// insertOrCheckTxs calls mempool Insert or or abci CheckTx depending on the +// applications mempool type +func (s *IntegrationTestSuite) insertOrCheckTxs(txs []sdk.Tx) error { + switch mp := s.network.App.GetMempool().(type) { + case *evmmempool.KrakatoaMempool: + return s.insertTxs(txs) + case *evmmempool.ExperimentalEVMMempool: + return s.checkTxs(txs) + default: + return fmt.Errorf("unknown mempool type: %T", mp) + } +} + // checkTxs call abci CheckTx for multipile transactions func (s *IntegrationTestSuite) checkTxs(txs []sdk.Tx) error { for _, tx := range txs { @@ -143,6 +156,21 @@ func (s *IntegrationTestSuite) checkTx(tx sdk.Tx) (*abci.ResponseCheckTx, error) return res, nil } +// insertTxs call mempool Insert for multiple transactions +func (s *IntegrationTestSuite) insertTxs(txs []sdk.Tx) error { + for _, tx := range txs { + if err := s.insertTx(tx); err != nil { + return fmt.Errorf("failed to Insert for tx: %s", s.getTxHash(tx)) + } + } + return nil +} + +// insertTx call mempool Insert for a transaction +func (s *IntegrationTestSuite) insertTx(tx sdk.Tx) error { + return s.network.App.GetMempool().Insert(s.network.GetContext(), tx) +} + func (s *IntegrationTestSuite) getTxBytes(txs []sdk.Tx) ([][]byte, error) { txEncoder := s.network.App.GetTxConfig().TxEncoder() txBytes := make([][]byte, 0) diff --git a/tests/integration/mempool/test_mempool_integration.go b/tests/integration/mempool/test_mempool_integration.go index f38a2ec9d..2531a450e 100644 --- a/tests/integration/mempool/test_mempool_integration.go +++ b/tests/integration/mempool/test_mempool_integration.go @@ -69,8 +69,7 @@ func (s *IntegrationTestSuite) TestMempoolInsert() { txBuilder := s.network.App.GetTxConfig().NewTxBuilder() return txBuilder.GetTx() }, - wantError: true, - errorContains: "tx contains no signers", + wantError: true, verifyFunc: func() { }, }, @@ -326,9 +325,10 @@ func (s *IntegrationTestSuite) TestMempoolSelect() { // where recheck happens after a new block notification). mpool := s.network.App.GetMempool() ctx := s.network.GetContext() - if evmMp, ok := mpool.(*evmmempool.ExperimentalEVMMempool); ok { - evmMp.GetBlockchain().NotifyNewBlock() - evmMp.RecheckCosmosTxs(ðtypes.Header{Number: big.NewInt(ctx.BlockHeight())}) + if kMp, ok := mpool.(*evmmempool.KrakatoaMempool); ok { + head := kMp.GetBlockchain().CurrentBlock() + kMp.RecheckEVMTxs(head) + kMp.RecheckCosmosTxs(head) } iterator := mpool.Select(ctx.WithBlockHeight(ctx.BlockHeight()+1), nil) tc.verifyFunc(iterator) @@ -451,9 +451,10 @@ func (s *IntegrationTestSuite) TestMempoolIterator() { // where recheck happens after a new block notification). mpool := s.network.App.GetMempool() ctx := s.network.GetContext() - if evmMp, ok := mpool.(*evmmempool.ExperimentalEVMMempool); ok { - evmMp.GetBlockchain().NotifyNewBlock() - evmMp.RecheckCosmosTxs(ðtypes.Header{Number: big.NewInt(ctx.BlockHeight())}) + if kMp, ok := mpool.(*evmmempool.KrakatoaMempool); ok { + head := kMp.GetBlockchain().CurrentBlock() + kMp.RecheckEVMTxs(head) + kMp.RecheckCosmosTxs(head) } iterator := mpool.Select(ctx.WithBlockHeight(ctx.BlockHeight()+1), nil) tc.verifyFunc(iterator) @@ -857,9 +858,10 @@ func (s *IntegrationTestSuite) TestTransactionOrdering() { // where recheck happens after a new block notification). mpool := s.network.App.GetMempool() ctx := s.network.GetContext() - if evmMp, ok := mpool.(*evmmempool.ExperimentalEVMMempool); ok { - evmMp.GetBlockchain().NotifyNewBlock() - evmMp.RecheckCosmosTxs(ðtypes.Header{Number: big.NewInt(ctx.BlockHeight())}) + if kMp, ok := mpool.(*evmmempool.KrakatoaMempool); ok { + head := kMp.GetBlockchain().CurrentBlock() + kMp.RecheckEVMTxs(head) + kMp.RecheckCosmosTxs(head) } iterator := mpool.Select(ctx.WithBlockHeight(ctx.BlockHeight()+1), nil) tc.verifyFunc(iterator) @@ -977,9 +979,10 @@ func (s *IntegrationTestSuite) TestSelectBy() { // cosmos txs are available via SelectBy (mirrors production flow // where recheck happens after a new block notification). ctx := s.network.GetContext() - if evmMp, ok := mpool.(*evmmempool.ExperimentalEVMMempool); ok { - evmMp.GetBlockchain().NotifyNewBlock() - evmMp.RecheckCosmosTxs(ðtypes.Header{Number: big.NewInt(ctx.BlockHeight())}) + if kMp, ok := mpool.(*evmmempool.KrakatoaMempool); ok { + head := kMp.GetBlockchain().CurrentBlock() + kMp.RecheckEVMTxs(head) + kMp.RecheckCosmosTxs(head) } // Track filter function calls to ensure we don't have infinite loops @@ -1063,7 +1066,12 @@ func (s *IntegrationTestSuite) TestEVMTransactionComprehensive() { wantError: false, verifyFunc: func() { mpool := s.network.App.GetMempool() - s.Require().Equal(0, mpool.CountTx()) + if s.IsExclusiveMempool() { + // exclusive mempool should validate that the gas is too low and drop it + s.Require().Equal(0, mpool.CountTx()) + } else { + s.Require().Equal(1, mpool.CountTx()) + } }, }, { diff --git a/tests/integration/mempool/test_mempool_integration_abci.go b/tests/integration/mempool/test_mempool_integration_abci.go index eeb32db78..ec2f1738b 100644 --- a/tests/integration/mempool/test_mempool_integration_abci.go +++ b/tests/integration/mempool/test_mempool_integration_abci.go @@ -5,7 +5,6 @@ import ( "math/big" "github.com/ethereum/go-ethereum/core" - "github.com/ethereum/go-ethereum/core/types" abci "github.com/cometbft/cometbft/abci/types" "github.com/cometbft/cometbft/crypto/tmhash" @@ -176,16 +175,17 @@ func (s *IntegrationTestSuite) TestTransactionOrderingWithABCIMethodCalls() { txs, expTxHashes := tc.setupTxs() - // Call CheckTx for transactions - err := s.checkTxs(txs) + // Call CheckTx or InsertTx for transactions + err := s.insertOrCheckTxs(txs) s.Require().NoError(err) // Refresh the cached latestCtx and trigger cosmos recheck so // cosmos txs are available via Select/PrepareProposal. mpool := s.network.App.GetMempool() - if evmMp, ok := mpool.(*evmmempool.ExperimentalEVMMempool); ok { - evmMp.GetBlockchain().NotifyNewBlock() - evmMp.RecheckCosmosTxs(&types.Header{Number: big.NewInt(s.network.GetContext().BlockHeight())}) + if kMp, ok := mpool.(*evmmempool.KrakatoaMempool); ok { + head := kMp.GetBlockchain().CurrentBlock() + kMp.RecheckEVMTxs(head) + kMp.RecheckCosmosTxs(head) } // Call FinalizeBlock to make finalizeState before calling PrepareProposal @@ -202,9 +202,10 @@ func (s *IntegrationTestSuite) TestTransactionOrderingWithABCIMethodCalls() { // Check whether expected transactions are included and returned as pending state in mempool ctx := s.network.GetContext() - if evmMp, ok := mpool.(*evmmempool.ExperimentalEVMMempool); ok { - evmMp.GetBlockchain().NotifyNewBlock() - evmMp.RecheckCosmosTxs(&types.Header{Number: big.NewInt(ctx.BlockHeight())}) + if kMp, ok := mpool.(*evmmempool.KrakatoaMempool); ok { + head := kMp.GetBlockchain().CurrentBlock() + kMp.RecheckEVMTxs(head) + kMp.RecheckCosmosTxs(head) } iterator := mpool.Select(ctx.WithBlockHeight(ctx.BlockHeight()+1), nil) for _, txHash := range expTxHashes { @@ -396,16 +397,17 @@ func (s *IntegrationTestSuite) TestNonceGappedEVMTransactionsWithABCIMethodCalls txs, expTxHashes := tc.setupTxs() - // Call CheckTx for transactions - err := s.checkTxs(txs) + // Call CheckTx or InsertTx for transactions + err := s.insertOrCheckTxs(txs) s.Require().NoError(err) // Refresh the cached latestCtx and trigger cosmos recheck so // HeightSync is at the correct height for Select/PrepareProposal. mpool := s.network.App.GetMempool() - if evmMp, ok := mpool.(*evmmempool.ExperimentalEVMMempool); ok { - evmMp.GetBlockchain().NotifyNewBlock() - evmMp.RecheckCosmosTxs(&types.Header{Number: big.NewInt(s.network.GetContext().BlockHeight())}) + if kMp, ok := mpool.(*evmmempool.KrakatoaMempool); ok { + head := kMp.GetBlockchain().CurrentBlock() + kMp.RecheckEVMTxs(head) + kMp.RecheckCosmosTxs(head) } // Call FinalizeBlock to make finalizeState before calling PrepareProposal @@ -420,9 +422,10 @@ func (s *IntegrationTestSuite) TestNonceGappedEVMTransactionsWithABCIMethodCalls s.Require().NoError(err) ctx := s.network.GetContext() - if evmMp, ok := mpool.(*evmmempool.ExperimentalEVMMempool); ok { - evmMp.GetBlockchain().NotifyNewBlock() - evmMp.RecheckCosmosTxs(&types.Header{Number: big.NewInt(ctx.BlockHeight())}) + if kMp, ok := mpool.(*evmmempool.KrakatoaMempool); ok { + head := kMp.GetBlockchain().CurrentBlock() + kMp.RecheckEVMTxs(head) + kMp.RecheckCosmosTxs(head) } iterator := mpool.Select(ctx.WithBlockHeight(ctx.BlockHeight()+1), nil) @@ -450,6 +453,11 @@ func (s *IntegrationTestSuite) TestNonceGappedEVMTransactionsWithABCIMethodCalls // 1. Committed transactions are not in the mempool after block finalization // 2. New transactions with nonces lower than current nonce fail at mempool level func (s *IntegrationTestSuite) TestCheckTxHandlerForCommittedAndLowerNonceTxs() { + if s.IsExclusiveMempool() { + s.T().Log("mempool is exclusive and does not configure checktx, skipping 'TestCheckTxHandlerForCommittedAndLowerNonceTxs' test") + return + } + testCases := []struct { name string setupTxs func() []sdk.Tx @@ -518,8 +526,8 @@ func (s *IntegrationTestSuite) TestCheckTxHandlerForCommittedAndLowerNonceTxs() txs := tc.setupTxs() - // Call CheckTx for transactions - err := s.checkTxs(txs) + // Call CheckTx or InsertTx for transactions + err := s.insertOrCheckTxs(txs) s.Require().NoError(err) // Finalize block with txs and Commit state diff --git a/tests/integration/mempool/test_setup.go b/tests/integration/mempool/test_setup.go index d66bd1fac..22c2f38e3 100644 --- a/tests/integration/mempool/test_setup.go +++ b/tests/integration/mempool/test_setup.go @@ -49,8 +49,8 @@ func (s *IntegrationTestSuite) TearDownTest() { // Close the mempool to stop background goroutines before the next test // This prevents race conditions when global test state is reset in SetupTest if mp := s.network.App.GetMempool(); mp != nil { - if evmmp, ok := mp.(*evmmempool.ExperimentalEVMMempool); ok { - if err := evmmp.Close(); err != nil { + if closer, ok := mp.(interface{ Close() error }); ok { + if err := closer.Close(); err != nil { s.T().Logf("Warning: failed to close mempool: %v", err) } @@ -81,8 +81,11 @@ func (s *IntegrationTestSuite) SetupTestWithChainID(chainID testconstants.ChainI options = append(options, s.options...) nw := network.NewUnitTestNetwork(s.create, options...) + s.network = nw + gh := grpc.NewIntegrationHandler(nw) tf := factory.New(nw, gh) + s.factory = tf // Advance to block 2+ where mempool is designed to operate // This ensures proper headers, StateDB, and fee market initialization @@ -95,19 +98,7 @@ func (s *IntegrationTestSuite) SetupTestWithChainID(chainID testconstants.ChainI // Directly call Reset on subpools to ensure synchronous completion // This prevents race conditions by waiting for the reset to complete // before continuing with test setup - mpool := nw.App.GetMempool() - if evmMempoolCast, ok := mpool.(*evmmempool.ExperimentalEVMMempool); ok { - blockchain := evmMempoolCast.GetBlockchain() - txPool := evmMempoolCast.GetTxPool() - - oldHead := blockchain.CurrentBlock() - blockchain.NotifyNewBlock() - newHead := blockchain.CurrentBlock() - - for _, subpool := range txPool.Subpools { - subpool.Reset(oldHead, newHead) - } - } + s.TrySetupMempool() // Ensure mempool is in ready state by verifying block height s.Require().Equal(int64(3), nw.GetContext().BlockHeight()) @@ -122,9 +113,60 @@ func (s *IntegrationTestSuite) SetupTestWithChainID(chainID testconstants.ChainI // Enforces deterministic mempool state for tests evmmempool.AllowUnsafeSyncInsert = true +} - s.network = nw - s.factory = tf +// TrySetupMempool sets up the ExperimentalEVMMempool or ExclusiveMempool, if +// one of those are the configured mempool on the suite. +func (s *IntegrationTestSuite) TrySetupMempool() { + s.TrySetupExclusiveMempool() + s.TrySetupExperimentalMempool() +} + +// TrySetupKrakataoMempool sets up the ExclusiveMempool, if it is the configured +// mempool on the suite. +func (s *IntegrationTestSuite) TrySetupExclusiveMempool() { + mp, ok := s.network.App.GetMempool().(*evmmempool.KrakatoaMempool) + if !ok { + return + } + + blockchain := mp.GetBlockchain() + txPool := mp.GetTxPool() + + oldHead := blockchain.CurrentBlock() + blockchain.NotifyNewBlock() + newHead := blockchain.CurrentBlock() + + mp.RecheckCosmosTxs(newHead) + for _, subpool := range txPool.Subpools { + subpool.Reset(oldHead, newHead) + } +} + +// TrySetupExperimentalEVMMempool sets up the ExperimentalEVMMempool, if it is +// the configured mempool on the suite. +func (s *IntegrationTestSuite) TrySetupExperimentalMempool() { + mp, ok := s.network.App.GetMempool().(*evmmempool.ExperimentalEVMMempool) + if !ok { + return + } + + blockchain := mp.GetBlockchain() + txPool := mp.GetTxPool() + + oldHead := blockchain.CurrentBlock() + blockchain.NotifyNewBlock() + newHead := blockchain.CurrentBlock() + + for _, subpool := range txPool.Subpools { + subpool.Reset(oldHead, newHead) + } +} + +// IsExclusiveMempool returns true if the app mempool is the exclusive mempool +func (s *IntegrationTestSuite) IsExclusiveMempool() bool { + _, ok := s.network.App.GetMempool().(*evmmempool.KrakatoaMempool) + return ok } // FundAccount funds an account with a specific amount of a given denomination. diff --git a/tests/systemtests/main_test.go b/tests/systemtests/main_test.go index 94310f10c..047c497e1 100644 --- a/tests/systemtests/main_test.go +++ b/tests/systemtests/main_test.go @@ -43,13 +43,44 @@ func TestMempoolTxBroadcasting(t *testing.T) { } func TestMinimumGasPricesZero(t *testing.T) { - suite.RunWithSharedSuite(t, mempool.RunMinimumGasPricesZero) + suite.RunWithSharedSuite(t, mempool.RunMinimumGasPricesZero, suite.MinimumGasPriceZeroArgs()...) } func TestMempoolCosmosTxsCompatibility(t *testing.T) { suite.RunWithSharedSuite(t, mempool.RunCosmosTxsCompatibility) } +/* +* Exclusive Mempool Tests + */ +func TestExclusiveMempoolTxsOrdering(t *testing.T) { + suite.RunWithSharedSuite(t, mempool.RunTxsOrdering, suite.ExlcusiveMempoolArgs()...) +} + +func TestExclusiveMempoolTxsReplacement(t *testing.T) { + suite.RunWithSharedSuite(t, mempool.RunTxsReplacement, suite.ExlcusiveMempoolArgs()...) +} + +func TestExclusiveMempoolTxsReplacementWithCosmosTx(t *testing.T) { + suite.RunWithSharedSuite(t, mempool.RunTxsReplacementWithCosmosTx, suite.ExlcusiveMempoolArgs()...) +} + +func TestExclusiveMempoolMixedTxsReplacementLegacyAndDynamicFee(t *testing.T) { + suite.RunWithSharedSuite(t, mempool.RunMixedTxsReplacementLegacyAndDynamicFee, suite.ExlcusiveMempoolMinGasPriceZeroArgs()...) +} + +func TestExclusiveMempoolTxBroadcasting(t *testing.T) { + suite.RunWithSharedSuite(t, mempool.RunTxBroadcasting, suite.ExlcusiveMempoolArgs()...) +} + +func TestExclusiveMempoolMinimumGasPricesZero(t *testing.T) { + suite.RunWithSharedSuite(t, mempool.RunMinimumGasPricesZero, suite.ExlcusiveMempoolArgs()...) +} + +func TestExclusiveMempoolCosmosTxsCompatibility(t *testing.T) { + suite.RunWithSharedSuite(t, mempool.RunCosmosTxsCompatibility, suite.ExlcusiveMempoolArgs()...) +} + // /* // * EIP-712 Tests // */ diff --git a/tests/systemtests/mempool/test_broadcast.go b/tests/systemtests/mempool/test_broadcast.go index dae330ff2..fa0c37012 100644 --- a/tests/systemtests/mempool/test_broadcast.go +++ b/tests/systemtests/mempool/test_broadcast.go @@ -272,7 +272,7 @@ func RunTxBroadcasting(t *testing.T, base *suite.BaseTestSuite) { // Users must receive error feedback for duplicate submissions _, err = s.SendTx(t, s.Node(0), signer.ID, 0, s.GasPriceMultiplier(10), nil) require.Error(t, err, "duplicate tx via JSON-RPC must return error") - require.Contains(t, err.Error(), "already known", "error should indicate transaction is already known") + require.Contains(t, err.Error(), "already", "error should indicate transaction is already known") t.Logf("Duplicate transaction correctly rejected with 'already known' error") @@ -342,7 +342,7 @@ func RunTxBroadcasting(t *testing.T, base *suite.BaseTestSuite) { // the RPC layer should still detect and reject the duplicate _, err = s.SendTx(t, s.Node(1), signer.ID, 0, s.GasPriceMultiplier(10), nil) require.Error(t, err, "duplicate tx via JSON-RPC should return error even after gossip") - require.Contains(t, err.Error(), "already known", "error should indicate transaction is already known") + require.Contains(t, err.Error(), "already", "error should indicate transaction is already known") t.Logf("JSON-RPC correctly rejects duplicate that node already has from gossip") @@ -368,15 +368,10 @@ func RunTxBroadcasting(t *testing.T, base *suite.BaseTestSuite) { s := NewTestSuite(base) // First, setup the chain with default configuration - s.SetupTest(t) - - // Configure the chain for broadcast testing: - // 1. Set mempool type to "app" so CometBFT uses AppMempool/AppReactor for gossip. - // Without this, the default "flood" reactor doesn't gossip txs that bypass - // CometBFT's CListMempool (which happens when UseAppMempool=true). - // 2. Slow down block production to give time to verify gossip before blocks commit. - s.ModifyCometMempool(t, "app") - s.ModifyConsensusTimeout(t, "5s") + // + // Slow down block production to give time to verify gossip before blocks + // commit. + s.SetupTestWithTimeoutCommit(t, 5*time.Second) for _, to := range testOptions { s.SetOptions(to) diff --git a/tests/systemtests/mempool/test_exceptions.go b/tests/systemtests/mempool/test_exceptions.go index 08bc5ff56..1198cff48 100644 --- a/tests/systemtests/mempool/test_exceptions.go +++ b/tests/systemtests/mempool/test_exceptions.go @@ -50,7 +50,7 @@ func RunMinimumGasPricesZero(t *testing.T, base *suite.BaseTestSuite) { } s := NewTestSuite(base) - s.SetupTest(t, suite.MinimumGasPriceZeroArgs()...) + s.SetupTest(t) for _, to := range testOptions { s.SetOptions(to) diff --git a/tests/systemtests/mempool/test_suite.go b/tests/systemtests/mempool/test_suite.go index ef8a4be69..fd58840a3 100644 --- a/tests/systemtests/mempool/test_suite.go +++ b/tests/systemtests/mempool/test_suite.go @@ -3,14 +3,9 @@ package mempool import ( - "fmt" - "os" - "path/filepath" "testing" "time" - "github.com/creachadair/tomledit" - "github.com/creachadair/tomledit/parser" "github.com/stretchr/testify/require" "github.com/cosmos/evm/tests/systemtests/suite" @@ -27,8 +22,12 @@ func NewTestSuite(base *suite.BaseTestSuite) *TestSuite { return &TestSuite{BaseTestSuite: base} } -func (s *TestSuite) SetupTest(t *testing.T, nodeStartArgs ...string) { - s.BaseTestSuite.SetupTest(t, nodeStartArgs...) +func (s *TestSuite) SetupTest(t *testing.T) { + s.BaseTestSuite.SetupTest(t) +} + +func (s *TestSuite) SetupTestWithTimeoutCommit(t *testing.T, tc time.Duration) { + s.BaseTestSuite.SetupTest(t, suite.WithTimeoutCommit(tc)) } // GetCurrentBlockHeight returns the current block height from the specified node @@ -106,92 +105,3 @@ func (c *TestContext) PromoteExpTxs(count int) { c.ExpPending = append(c.ExpPending, promoted...) c.ExpQueued = c.ExpQueued[count:] } - -// ModifyCometMempool modifies the mempool type in the config.toml -// for all nodes and restarts the chain with the new configuration. -func (s *TestSuite) ModifyCometMempool(t *testing.T, mempoolType string) { - t.Helper() - - // Stop the chain if running - if s.ChainStarted { - s.ResetChain(t) - } - - // Modify config.toml for each node - for i := 0; i < s.NodesCount(); i++ { - nodeDir := s.NodeDir(i) - configPath := filepath.Join(nodeDir, "config", "config.toml") - - err := editToml(configPath, func(doc *tomledit.Document) { - setValue(doc, mempoolType, "mempool", "type") - }) - require.NoError(t, err, "failed to modify config.toml for node %d", i) - } - - // Restart the chain with modified config - s.StartChain(t, suite.DefaultNodeArgs()...) - s.AwaitNBlocks(t, 2) -} - -// ModifyConsensusTimeout modifies the consensus timeout_commit in the config.toml -// for all nodes and restarts the chain with the new configuration. -func (s *TestSuite) ModifyConsensusTimeout(t *testing.T, timeout string) { - t.Helper() - - // Stop the chain if running - if s.ChainStarted { - s.ResetChain(t) - } - - // Modify config.toml for each node - for i := 0; i < s.NodesCount(); i++ { - nodeDir := s.NodeDir(i) - configPath := filepath.Join(nodeDir, "config", "config.toml") - - err := editToml(configPath, func(doc *tomledit.Document) { - setValue(doc, timeout, "consensus", "timeout_commit") - }) - require.NoError(t, err, "failed to modify config.toml for node %d", i) - } - - // Restart the chain with modified config - s.StartChain(t, suite.DefaultNodeArgs()...) - s.AwaitNBlocks(t, 2) -} - -// editToml is a helper to edit TOML files -func editToml(filename string, f func(doc *tomledit.Document)) error { - tomlFile, err := os.OpenFile(filename, os.O_RDWR, 0o600) - if err != nil { - return fmt.Errorf("failed to open file: %w", err) - } - defer tomlFile.Close() - - doc, err := tomledit.Parse(tomlFile) - if err != nil { - return fmt.Errorf("failed to parse toml: %w", err) - } - - f(doc) - - if _, err := tomlFile.Seek(0, 0); err != nil { - return fmt.Errorf("failed to seek: %w", err) - } - if err := tomlFile.Truncate(0); err != nil { - return fmt.Errorf("failed to truncate: %w", err) - } - if err := tomledit.Format(tomlFile, doc); err != nil { - return fmt.Errorf("failed to format: %w", err) - } - - return nil -} - -// setValue sets a value in a TOML document -func setValue(doc *tomledit.Document, newVal string, xpath ...string) { - e := doc.First(xpath...) - if e == nil { - panic(fmt.Sprintf("not found: %v", xpath)) - } - e.Value = parser.MustValue(fmt.Sprintf("%q", newVal)) -} diff --git a/tests/systemtests/suite/test_suite.go b/tests/systemtests/suite/test_suite.go index 1ded0af8c..59bb2c7bc 100644 --- a/tests/systemtests/suite/test_suite.go +++ b/tests/systemtests/suite/test_suite.go @@ -4,10 +4,16 @@ import ( "crypto/ecdsa" "fmt" "math/big" + "os" + "path/filepath" "slices" + "strings" "sync" "testing" + "time" + "github.com/creachadair/tomledit" + "github.com/creachadair/tomledit/parser" "github.com/ethereum/go-ethereum/common" "github.com/stretchr/testify/require" @@ -35,11 +41,15 @@ type BaseTestSuite struct { accountsByID map[string]*TestAccount // Chain management - chainMu sync.Mutex - currentNodeArgs []string + chainMu sync.Mutex + currentNodeArgs []string + currentNodeConfig TestSetupConfig // Most recently retrieved base fee baseFee *big.Int + + // Extra node start args on top of default + nodeStartArgs []string } func NewBaseTestSuite(t *testing.T) *BaseTestSuite { @@ -99,9 +109,15 @@ func GetSharedSuite(t *testing.T) *BaseTestSuite { } // RunWithSharedSuite retrieves the shared suite instance and executes the provided test function. -func RunWithSharedSuite(t *testing.T, fn func(*testing.T, *BaseTestSuite)) { +func RunWithSharedSuite(t *testing.T, fn func(*testing.T, *BaseTestSuite), nodeStartArgs ...string) { t.Helper() - fn(t, GetSharedSuite(t)) + suite := GetSharedSuite(t) + suite.SetNodeStartArgs(nodeStartArgs...) + fn(t, suite) +} + +func (suite *BaseTestSuite) SetNodeStartArgs(nodeStartArgs ...string) { + suite.nodeStartArgs = nodeStartArgs } // TestAccount aggregates account metadata usable across both Ethereum and Cosmos flows. @@ -118,12 +134,33 @@ type TestAccount struct { Cosmos *clients.CosmosAccount } +type TestSetupConfig struct { + timeoutCommit time.Duration +} + +func (tc TestSetupConfig) Equals(other TestSetupConfig) bool { + return tc.timeoutCommit == other.timeoutCommit +} + +type TestSetupConfigOption func(*TestSetupConfig) + +func WithTimeoutCommit(tc time.Duration) TestSetupConfigOption { + return func(tsc *TestSetupConfig) { + tsc.timeoutCommit = tc + } +} + // SetupTest initializes the test suite by resetting and starting the chain, then awaiting 2 blocks -func (s *BaseTestSuite) SetupTest(t *testing.T, nodeStartArgs ...string) { +func (s *BaseTestSuite) SetupTest(t *testing.T, opts ...TestSetupConfigOption) { t.Helper() - if len(nodeStartArgs) == 0 { - nodeStartArgs = DefaultNodeArgs() + var cfg TestSetupConfig + for _, opt := range opts { + opt(&cfg) + } + + if len(s.nodeStartArgs) == 0 { + s.nodeStartArgs = DefaultNodeArgs() } s.LockChain() @@ -131,9 +168,10 @@ func (s *BaseTestSuite) SetupTest(t *testing.T, nodeStartArgs ...string) { if !s.ChainStarted { s.currentNodeArgs = nil + s.currentNodeConfig = TestSetupConfig{} } - if s.ChainStarted && slices.Equal(nodeStartArgs, s.currentNodeArgs) { + if s.ChainStarted && slices.Equal(s.nodeStartArgs, s.currentNodeArgs) && s.currentNodeConfig == cfg { // Chain already running with desired configuration; nothing to do. return } @@ -142,11 +180,30 @@ func (s *BaseTestSuite) SetupTest(t *testing.T, nodeStartArgs ...string) { s.ResetChain(t) } - s.StartChain(t, nodeStartArgs...) - s.currentNodeArgs = append([]string(nil), nodeStartArgs...) + if s.IsExclusiveMempool() { + s.ModifyCometMempool(t, "app") + } else { + // if not set, default to flood mempool + s.ModifyCometMempool(t, "flood") + } + + if cfg.timeoutCommit > time.Duration(0) { + s.ModifyConsensusTimeout(t, cfg.timeoutCommit.String()) + } else { + // if not set, default to 2s + s.ModifyConsensusTimeout(t, time.Duration(2*time.Second).String()) + } + + s.StartChain(t, s.nodeStartArgs...) + s.currentNodeConfig = cfg s.AwaitNBlocks(t, 2) } +// IsExclusiveMempool returns true if the node was started with the operate-exclusively flag +func (s *BaseTestSuite) IsExclusiveMempool() bool { + return strings.Contains(strings.Join(s.nodeStartArgs, " "), "operate-exclusively") +} + // LockChain acquires exclusive control over the underlying chain lifecycle. func (s *BaseTestSuite) LockChain() { s.chainMu.Lock() @@ -156,3 +213,73 @@ func (s *BaseTestSuite) LockChain() { func (s *BaseTestSuite) UnlockChain() { s.chainMu.Unlock() } + +// ModifyCometMempool modifies the mempool type in the config.toml +func (s *BaseTestSuite) ModifyCometMempool(t *testing.T, mempoolType string) { + t.Helper() + + // Modify config.toml for each node + for i := 0; i < s.NodesCount(); i++ { + nodeDir := s.NodeDir(i) + configPath := filepath.Join(nodeDir, "config", "config.toml") + + err := editToml(configPath, func(doc *tomledit.Document) { + setValue(doc, mempoolType, "mempool", "type") + }) + require.NoError(t, err, "failed to modify config.toml for node %d", i) + } +} + +// ModifyConsensusTimeout modifies the consensus timeout_commit in the config.toml +// for all nodes and restarts the chain with the new configuration. +func (s *BaseTestSuite) ModifyConsensusTimeout(t *testing.T, timeout string) { + t.Helper() + + // Modify config.toml for each node + for i := 0; i < s.NodesCount(); i++ { + nodeDir := s.NodeDir(i) + configPath := filepath.Join(nodeDir, "config", "config.toml") + + err := editToml(configPath, func(doc *tomledit.Document) { + setValue(doc, timeout, "consensus", "timeout_commit") + }) + require.NoError(t, err, "failed to modify config.toml for node %d", i) + } +} + +// editToml is a helper to edit TOML files +func editToml(filename string, f func(doc *tomledit.Document)) error { + tomlFile, err := os.OpenFile(filename, os.O_RDWR, 0o600) + if err != nil { + return fmt.Errorf("failed to open file: %w", err) + } + defer tomlFile.Close() + + doc, err := tomledit.Parse(tomlFile) + if err != nil { + return fmt.Errorf("failed to parse toml: %w", err) + } + + f(doc) + + if _, err := tomlFile.Seek(0, 0); err != nil { + return fmt.Errorf("failed to seek: %w", err) + } + if err := tomlFile.Truncate(0); err != nil { + return fmt.Errorf("failed to truncate: %w", err) + } + if err := tomledit.Format(tomlFile, doc); err != nil { + return fmt.Errorf("failed to format: %w", err) + } + + return nil +} + +// setValue sets a value in a TOML document +func setValue(doc *tomledit.Document, newVal string, xpath ...string) { + e := doc.First(xpath...) + if e == nil { + panic(fmt.Sprintf("not found: %v", xpath)) + } + e.Value = parser.MustValue(fmt.Sprintf("%q", newVal)) +} diff --git a/tests/systemtests/suite/types.go b/tests/systemtests/suite/types.go index 6ddcbabf1..66f5a8be8 100644 --- a/tests/systemtests/suite/types.go +++ b/tests/systemtests/suite/types.go @@ -11,6 +11,10 @@ const ( NodeArgsJsonrpcAllowUnprotectedTxs = "--json-rpc.allow-unprotected-txs=true" NodeArgsMinimumGasPrice = "--minimum-gas-prices=0.000001atest" NodeArgsMaxTxs = "--mempool.max-txs=0" + + NodeArgOperateExclusively = "--evm.mempool.operate-exclusively=true" + NodeArgPendingTxProposalTimeout = "--evm.mempool.pending-tx-proposal-timeout=200ms" + NodeArgInsertQueueSize = "--evm.mempool.insert-queue-size=1000" ) // TestOptions defines the options for a test case. @@ -62,3 +66,19 @@ func MinimumGasPriceZeroArgs() []string { // Add the zero minimum gas price argument return append(DefaultNodeArgs(), "--minimum-gas-prices=0atest") } + +// ExlcusiveMempoolArgs returns the node arguments to run with an exclusive app +// mempool. +func ExlcusiveMempoolArgs() []string { + return append(DefaultNodeArgs(), + NodeArgOperateExclusively, + NodeArgInsertQueueSize, + NodeArgPendingTxProposalTimeout, + ) +} + +// ExlcusiveMempoolArgs returns the node arguments to run with an exclusive app +// mempool and no min gas price. +func ExlcusiveMempoolMinGasPriceZeroArgs() []string { + return append(ExlcusiveMempoolArgs(), "--minimum-gas-prices=0atest") +} diff --git a/testutil/app/adapter.go b/testutil/app/adapter.go index 8ee85bc69..a0fa90a83 100644 --- a/testutil/app/adapter.go +++ b/testutil/app/adapter.go @@ -38,9 +38,9 @@ func NewEvmAppAdapter(app evm.TestApp) *EvmAppAdapter { // ToEvmAppCreator validates that the provided factory returns an app // implementing the desired interface T and then wraps it behind the keeper // adapter so downstream helpers can keep using evm.EvmApp. -func ToEvmAppCreator[T any](create func(string, uint64, ...func(*baseapp.BaseApp)) evm.EvmApp, ifaceName string) func(string, uint64, ...func(*baseapp.BaseApp)) evm.EvmApp { - return func(chainID string, evmChainID uint64, customBaseAppOptions ...func(*baseapp.BaseApp)) evm.EvmApp { - app := create(chainID, evmChainID, customBaseAppOptions...) +func ToEvmAppCreator[T any](create func(string, uint64, bool, ...func(*baseapp.BaseApp)) evm.EvmApp, ifaceName string) func(string, uint64, bool, ...func(*baseapp.BaseApp)) evm.EvmApp { + return func(chainID string, evmChainID uint64, excluisveMempool bool, customBaseAppOptions ...func(*baseapp.BaseApp)) evm.EvmApp { + app := create(chainID, evmChainID, excluisveMempool, customBaseAppOptions...) if _, ok := app.(T); !ok { panic(fmt.Sprintf("CreateEvmApp must implement %s", ifaceName)) } diff --git a/testutil/integration/evm/network/config.go b/testutil/integration/evm/network/config.go index c49cee125..6655e8cdd 100644 --- a/testutil/integration/evm/network/config.go +++ b/testutil/integration/evm/network/config.go @@ -45,6 +45,8 @@ type Config struct { otherCoinDenoms []string preFundedAccounts []sdktypes.AccAddress balances []banktypes.Balance + + exclusiveMempool bool } type CustomGenesisState map[string]interface{} @@ -213,3 +215,11 @@ func WithConsensusParams(params *cmtproto.ConsensusParams) ConfigOption { } } } + +// WithExclusiveMempool informs the app that it is running as the only mempool +// in the application and it must manage rechecks. +func WithExclusiveMempool() ConfigOption { + return func(cfg *Config) { + cfg.exclusiveMempool = true + } +} diff --git a/testutil/integration/evm/network/network.go b/testutil/integration/evm/network/network.go index 907af9894..b68d78b2f 100644 --- a/testutil/integration/evm/network/network.go +++ b/testutil/integration/evm/network/network.go @@ -53,7 +53,7 @@ type Network interface { GetMintClient() minttypes.QueryClient } -type CreateEvmApp func(chainID string, evmChainID uint64, customBaseAppOptions ...func(*baseapp.BaseApp)) evm.EvmApp +type CreateEvmApp func(chainID string, evmChainID uint64, exclusiveMempool bool, customBaseAppOptions ...func(*baseapp.BaseApp)) evm.EvmApp var _ Network = (*IntegrationNetwork)(nil) @@ -92,7 +92,7 @@ func New(createEvmApp CreateEvmApp, opts ...ConfigOption) *IntegrationNetwork { } // create a new testing app with the following params - evmApp := createEvmApp(cfg.chainID, cfg.eip155ChainID.Uint64(), cfg.customBaseAppOpts...) + evmApp := createEvmApp(cfg.chainID, cfg.eip155ChainID.Uint64(), cfg.exclusiveMempool, cfg.customBaseAppOpts...) err := network.configureAndInitChain(evmApp) if err != nil { panic(err) diff --git a/x/vm/keeper/keeper.go b/x/vm/keeper/keeper.go index 69e137cf8..efefe61e4 100644 --- a/x/vm/keeper/keeper.go +++ b/x/vm/keeper/keeper.go @@ -86,7 +86,7 @@ type Keeper struct { // evmMempool is the custom EVM appside mempool // if it is nil, the default comet mempool will be used - evmMempool *evmmempool.ExperimentalEVMMempool + evmMempool evmmempool.NotifiedMempool // virtualFeeCollection enabling will use "Virtual" methods from the bank module to accumulate // fees to the fee collector module in the endBlocker instead of using regular sends during tx execution. @@ -440,16 +440,12 @@ func (k Keeper) KVStoreKeys() map[string]storetypes.StoreKey { return k.storeKeys } -// SetEvmMempool sets the evm mempool -func (k *Keeper) SetEvmMempool(evmMempool *evmmempool.ExperimentalEVMMempool) { +// SetMempool sets the mempool that is notified of new blocks via the +// EndBlocker. +func (k *Keeper) SetEvmMempool(evmMempool evmmempool.NotifiedMempool) { k.evmMempool = evmMempool } -// GetEvmMempool returns the evm mempool -func (k Keeper) GetEvmMempool() *evmmempool.ExperimentalEVMMempool { - return k.evmMempool -} - // SetHeaderHash sets current block hash into EIP-2935 compatible storage contract. func (k Keeper) SetHeaderHash(ctx sdk.Context) { ctx, span := ctx.StartSpan(tracer, "SetHeaderHash", trace.WithAttributes(