diff --git a/.gitignore b/.gitignore
index 7f200c8be..9bef8967a 100644
--- a/.gitignore
+++ b/.gitignore
@@ -68,6 +68,8 @@ dist/
build/
# --- allow SDK docs build/ dirs (override rule above) ---
+!sdk/next/build/
+!sdk/next/build/**
!sdk/v0.53/build/
!sdk/v0.53/build/**
!sdk/v0.50/build/
diff --git a/CLAUDE.md b/CLAUDE.md
index ed1aa3c92..beda6dd76 100644
--- a/CLAUDE.md
+++ b/CLAUDE.md
@@ -112,4 +112,72 @@ When updating documentation:
- Navigation structure must be updated in `docs.json` when adding new pages
- Interactive RPC documentation is generated from the source `methods.mdx` file
- Test findings in `tests/README.md` track documentation accuracy against implementation
-- Use relative imports for snippets and components (e.g., `/snippets/icons.mdx`)
\ No newline at end of file
+- Use relative imports for snippets and components (e.g., `/snippets/icons.mdx`)
+
+## Documentation Style Guide
+
+### Writing Standards
+
+When writing or updating documentation, follow these style guidelines:
+
+- No bold text
+- No italics
+- No first person pronouns (I, we, us, our)
+- Be concise and direct
+- Write naturally, as a human would
+- No horizontal line breaks (---)
+- No emojis
+- Use paragraphs where appropriate for readability
+- Use unordered lists only where they improve clarity
+- Use ordered lists only for sequential steps or ranked items
+- Be clear, concise, and coherent
+
+### Mintlify Syntax
+
+The documentation uses Mintlify-specific MDX components. Use these components appropriately:
+
+**Callouts**: Use sparingly and only where they add value. Available types:
+```mdx
+General information or tips
+Informational content
+Important warnings or cautions
+Helpful tips or best practices
+Success messages or confirmations
+```
+
+**Code Blocks**: Always specify the language for syntax highlighting:
+```mdx
+```javascript
+const example = "code";
+```
+```
+
+**Tabs**: For showing multiple options or examples:
+```mdx
+
+
+ Content for JavaScript
+
+
+ Content for Python
+
+
+```
+
+**Accordions**: For collapsible content sections:
+```mdx
+
+ Hidden content here
+
+```
+
+**Cards**: For grouped navigation or features:
+```mdx
+
+
+ Description
+
+
+```
+
+Use these components only where appropriate. Do not overuse callouts or formatting that may distract from the technical content.
\ No newline at end of file
diff --git a/sdk/next/build.mdx b/sdk/next/build.mdx
new file mode 100644
index 000000000..866bb68d1
--- /dev/null
+++ b/sdk/next/build.mdx
@@ -0,0 +1,27 @@
+---
+title: "Overview"
+description: "Version: v0.53"
+---
+
+Learn how to build a complete blockchain application using the Cosmos SDK. This section covers everything from creating your chain's core application logic to implementing advanced features like vote extensions and ABCI customization.
+
+
+
+ Build your blockchain application with guides on app.go setup, dependency injection, mempool configuration, and vote extensions.
+
+
+ Leverage powerful SDK packages including depinject for dependency injection and collections for state management.
+
+
+ Learn how to programmatically create, sign, and broadcast transactions in your application.
+
+
+ Test your chain with fuzz testing and simulation frameworks to ensure robustness and catch edge cases.
+
+
+ Customize blockchain behavior with ABCI methods including PrepareProposal, ProcessProposal, and vote extensions.
+
+
+ Develop custom modules with comprehensive guides on module architecture, message handling, and state management.
+
+
diff --git a/sdk/next/build/abci/checktx.mdx b/sdk/next/build/abci/checktx.mdx
new file mode 100644
index 000000000..e6fcdb8f0
--- /dev/null
+++ b/sdk/next/build/abci/checktx.mdx
@@ -0,0 +1,1577 @@
+---
+title: CheckTx
+description: >-
+ CheckTx is called by the BaseApp when comet receives a transaction from a
+ client, over the p2p network or RPC. The CheckTx method is responsible for
+ validating the transaction and returning an error if the transaction is
+ invalid.
+---
+
+CheckTx is called by the `BaseApp` when CometBFT receives a transaction from a client, over the p2p network or RPC. The CheckTx method is responsible for validating the transaction and returning an error if the transaction is invalid.
+
+```mermaid
+graph TD
+ subgraph SDK[Cosmos SDK]
+ B[BaseApp]
+ A[AnteHandlers]
+ B <-->|Validate TX| A
+ end
+ C[CometBFT] <-->|CheckTx|SDK
+ U((User)) -->|Submit TX| C
+ N[P2P] -->|Receive TX| C
+```
+
+```go expandable
+package baseapp
+
+import (
+
+ "context"
+ "errors"
+ "fmt"
+ "sort"
+ "strings"
+ "time"
+
+ abcitypes "github.com/cometbft/cometbft/abci/types"
+ abci "github.com/cometbft/cometbft/api/cometbft/abci/v1"
+ cmtproto "github.com/cometbft/cometbft/api/cometbft/types/v1"
+ "github.com/cosmos/gogoproto/proto"
+ "google.golang.org/grpc/codes"
+ grpcstatus "google.golang.org/grpc/status"
+
+ corecomet "cosmossdk.io/core/comet"
+ coreheader "cosmossdk.io/core/header"
+ errorsmod "cosmossdk.io/errors"
+ "cosmossdk.io/store/rootmulti"
+ snapshottypes "cosmossdk.io/store/snapshots/types"
+ storetypes "cosmossdk.io/store/types"
+ "github.com/cosmos/cosmos-sdk/codec"
+ "github.com/cosmos/cosmos-sdk/telemetry"
+ sdk "github.com/cosmos/cosmos-sdk/types"
+ sdkerrors "github.com/cosmos/cosmos-sdk/types/errors"
+)
+
+// Supported ABCI Query prefixes and paths
+const (
+ QueryPathApp = "app"
+ QueryPathCustom = "custom"
+ QueryPathP2P = "p2p"
+ QueryPathStore = "store"
+
+ QueryPathBroadcastTx = "/cosmos.tx.v1beta1.Service/BroadcastTx"
+)
+
+// InitChain implements the ABCI interface. It initializes the application's state
+// and sets up the initial validator set.
+func (app *BaseApp)
+
+InitChain(req *abci.InitChainRequest) (*abci.InitChainResponse, error) {
+ if req.ChainId != app.chainID {
+ return nil, fmt.Errorf("invalid chain-id on InitChain; expected: %s, got: %s", app.chainID, req.ChainId)
+}
+
+ // On a new chain, we consider the init chain block height as 0, even though
+ // req.InitialHeight is 1 by default.
+ initHeader := cmtproto.Header{
+ ChainID: req.ChainId,
+ Time: req.Time
+}
+
+app.logger.Info("InitChain", "initialHeight", req.InitialHeight, "chainID", req.ChainId)
+
+ // Set the initial height, which will be used to determine if we are proposing
+ // or processing the first block or not.
+ app.initialHeight = req.InitialHeight
+ if app.initialHeight == 0 { // If initial height is 0, set it to 1
+ app.initialHeight = 1
+}
+
+ // if req.InitialHeight is > 1, then we set the initial version on all stores
+ if req.InitialHeight > 1 {
+ initHeader.Height = req.InitialHeight
+ if err := app.cms.SetInitialVersion(req.InitialHeight); err != nil {
+ return nil, err
+}
+
+}
+
+ // initialize states with a correct header
+ app.setState(execModeFinalize, initHeader)
+
+app.setState(execModeCheck, initHeader)
+
+ // Store the consensus params in the BaseApp's param store. Note, this must be
+ // done after the finalizeBlockState and context have been set as it's persisted
+ // to state.
+ if req.ConsensusParams != nil {
+ err := app.StoreConsensusParams(app.finalizeBlockState.Context(), *req.ConsensusParams)
+ if err != nil {
+ return nil, err
+}
+
+}
+
+defer func() {
+ // InitChain represents the state of the application BEFORE the first block,
+ // i.e. the genesis block. This means that when processing the app's InitChain
+ // handler, the block height is zero by default. However, after Commit is called
+ // the height needs to reflect the true block height.
+ initHeader.Height = req.InitialHeight
+ app.checkState.SetContext(app.checkState.Context().WithBlockHeader(initHeader).
+ WithHeaderInfo(coreheader.Info{
+ ChainID: req.ChainId,
+ Height: req.InitialHeight,
+ Time: req.Time,
+}))
+
+app.finalizeBlockState.SetContext(app.finalizeBlockState.Context().WithBlockHeader(initHeader).
+ WithHeaderInfo(coreheader.Info{
+ ChainID: req.ChainId,
+ Height: req.InitialHeight,
+ Time: req.Time,
+}))
+}()
+ if app.initChainer == nil {
+ return &abci.InitChainResponse{
+}, nil
+}
+
+ // add block gas meter for any genesis transactions (allow infinite gas)
+
+app.finalizeBlockState.SetContext(app.finalizeBlockState.Context().WithBlockGasMeter(storetypes.NewInfiniteGasMeter()))
+
+res, err := app.initChainer(app.finalizeBlockState.Context(), req)
+ if err != nil {
+ return nil, err
+}
+ if len(req.Validators) > 0 {
+ if len(req.Validators) != len(res.Validators) {
+ return nil, fmt.Errorf(
+ "len(RequestInitChain.Validators) != len(GenesisValidators) (%d != %d)",
+ len(req.Validators), len(res.Validators),
+ )
+}
+
+sort.Sort(abcitypes.ValidatorUpdates(req.Validators))
+ for i := range res.Validators {
+ if !proto.Equal(&res.Validators[i], &req.Validators[i]) {
+ return nil, fmt.Errorf("genesisValidators[%d] != req.Validators[%d] ", i, i)
+}
+
+}
+
+}
+
+ // NOTE: We don't commit, but FinalizeBlock for block InitialHeight starts from
+ // this FinalizeBlockState.
+ return &abci.InitChainResponse{
+ ConsensusParams: res.ConsensusParams,
+ Validators: res.Validators,
+ AppHash: app.LastCommitID().Hash,
+}, nil
+}
+
+// Info implements the ABCI interface. It returns information about the application.
+func (app *BaseApp)
+
+Info(_ *abci.InfoRequest) (*abci.InfoResponse, error) {
+ lastCommitID := app.cms.LastCommitID()
+ appVersion := InitialAppVersion
+ if lastCommitID.Version > 0 {
+ ctx, err := app.CreateQueryContext(lastCommitID.Version, false)
+ if err != nil {
+ return nil, fmt.Errorf("failed creating query context: %w", err)
+}
+
+appVersion, err = app.AppVersion(ctx)
+ if err != nil {
+ return nil, fmt.Errorf("failed getting app version: %w", err)
+}
+
+}
+
+return &abci.InfoResponse{
+ Data: app.name,
+ Version: app.version,
+ AppVersion: appVersion,
+ LastBlockHeight: lastCommitID.Version,
+ LastBlockAppHash: lastCommitID.Hash,
+}, nil
+}
+
+// Query implements the ABCI interface. It delegates to CommitMultiStore if it
+// implements Queryable.
+func (app *BaseApp)
+
+Query(_ context.Context, req *abci.QueryRequest) (resp *abci.QueryResponse, err error) {
+ // add panic recovery for all queries
+ //
+ // Ref: https://github.com/cosmos/cosmos-sdk/pull/8039
+ defer func() {
+ if r := recover(); r != nil {
+ resp = queryResult(errorsmod.Wrapf(sdkerrors.ErrPanic, "%v", r), app.trace)
+}
+
+}()
+
+ // when a client did not provide a query height, manually inject the latest
+ if req.Height == 0 {
+ req.Height = app.LastBlockHeight()
+}
+
+telemetry.IncrCounter(1, "query", "count")
+
+telemetry.IncrCounter(1, "query", req.Path)
+ start := telemetry.Now()
+
+defer telemetry.MeasureSince(start, req.Path)
+ if req.Path == QueryPathBroadcastTx {
+ return queryResult(errorsmod.Wrap(sdkerrors.ErrInvalidRequest, "can't route a broadcast tx message"), app.trace), nil
+}
+
+ // handle gRPC routes first rather than calling splitPath because '/' characters
+ // are used as part of gRPC paths
+ if grpcHandler := app.grpcQueryRouter.Route(req.Path); grpcHandler != nil {
+ return app.handleQueryGRPC(grpcHandler, req), nil
+}
+ path := SplitABCIQueryPath(req.Path)
+ if len(path) == 0 {
+ return queryResult(errorsmod.Wrap(sdkerrors.ErrUnknownRequest, "no query path provided"), app.trace), nil
+}
+ switch path[0] {
+ case QueryPathApp:
+ // "/app" prefix for special application queries
+ resp = handleQueryApp(app, path, req)
+ case QueryPathStore:
+ resp = handleQueryStore(app, path, *req)
+ case QueryPathP2P:
+ resp = handleQueryP2P(app, path)
+
+default:
+ resp = queryResult(errorsmod.Wrap(sdkerrors.ErrUnknownRequest, "unknown query path"), app.trace)
+}
+
+return resp, nil
+}
+
+// ListSnapshots implements the ABCI interface. It delegates to app.snapshotManager if set.
+func (app *BaseApp)
+
+ListSnapshots(req *abci.ListSnapshotsRequest) (*abci.ListSnapshotsResponse, error) {
+ resp := &abci.ListSnapshotsResponse{
+ Snapshots: []*abci.Snapshot{
+}}
+ if app.snapshotManager == nil {
+ return resp, nil
+}
+
+snapshots, err := app.snapshotManager.List()
+ if err != nil {
+ app.logger.Error("failed to list snapshots", "err", err)
+
+return nil, err
+}
+ for _, snapshot := range snapshots {
+ abciSnapshot, err := snapshot.ToABCI()
+ if err != nil {
+ app.logger.Error("failed to convert ABCI snapshots", "err", err)
+
+return nil, err
+}
+
+resp.Snapshots = append(resp.Snapshots, &abciSnapshot)
+}
+
+return resp, nil
+}
+
+// LoadSnapshotChunk implements the ABCI interface. It delegates to app.snapshotManager if set.
+func (app *BaseApp)
+
+LoadSnapshotChunk(req *abci.LoadSnapshotChunkRequest) (*abci.LoadSnapshotChunkResponse, error) {
+ if app.snapshotManager == nil {
+ return &abci.LoadSnapshotChunkResponse{
+}, nil
+}
+
+chunk, err := app.snapshotManager.LoadChunk(req.Height, req.Format, req.Chunk)
+ if err != nil {
+ app.logger.Error(
+ "failed to load snapshot chunk",
+ "height", req.Height,
+ "format", req.Format,
+ "chunk", req.Chunk,
+ "err", err,
+ )
+
+return nil, err
+}
+
+return &abci.LoadSnapshotChunkResponse{
+ Chunk: chunk
+}, nil
+}
+
+// OfferSnapshot implements the ABCI interface. It delegates to app.snapshotManager if set.
+func (app *BaseApp)
+
+OfferSnapshot(req *abci.OfferSnapshotRequest) (*abci.OfferSnapshotResponse, error) {
+ if app.snapshotManager == nil {
+ app.logger.Error("snapshot manager not configured")
+
+return &abci.OfferSnapshotResponse{
+ Result: abci.OFFER_SNAPSHOT_RESULT_ABORT
+}, nil
+}
+ if req.Snapshot == nil {
+ app.logger.Error("received nil snapshot")
+
+return &abci.OfferSnapshotResponse{
+ Result: abci.OFFER_SNAPSHOT_RESULT_REJECT
+}, nil
+}
+
+snapshot, err := snapshottypes.SnapshotFromABCI(req.Snapshot)
+ if err != nil {
+ app.logger.Error("failed to decode snapshot metadata", "err", err)
+
+return &abci.OfferSnapshotResponse{
+ Result: abci.OFFER_SNAPSHOT_RESULT_REJECT
+}, nil
+}
+
+err = app.snapshotManager.Restore(snapshot)
+ switch {
+ case err == nil:
+ return &abci.OfferSnapshotResponse{
+ Result: abci.OFFER_SNAPSHOT_RESULT_ACCEPT
+}, nil
+ case errors.Is(err, snapshottypes.ErrUnknownFormat):
+ return &abci.OfferSnapshotResponse{
+ Result: abci.OFFER_SNAPSHOT_RESULT_REJECT_FORMAT
+}, nil
+ case errors.Is(err, snapshottypes.ErrInvalidMetadata):
+ app.logger.Error(
+ "rejecting invalid snapshot",
+ "height", req.Snapshot.Height,
+ "format", req.Snapshot.Format,
+ "err", err,
+ )
+
+return &abci.OfferSnapshotResponse{
+ Result: abci.OFFER_SNAPSHOT_RESULT_REJECT
+}, nil
+
+ default:
+ // CometBFT errors are defined here: https://github.com/cometbft/cometbft/blob/main/statesync/syncer.go
+ // It may happen that in case of a CometBFT error, such as a timeout (which occurs after two minutes),
+ // the process is aborted. This is done intentionally because deleting the database programmatically
+ // can lead to more complicated situations.
+ app.logger.Error(
+ "failed to restore snapshot",
+ "height", req.Snapshot.Height,
+ "format", req.Snapshot.Format,
+ "err", err,
+ )
+
+ // We currently don't support resetting the IAVL stores and retrying a
+ // different snapshot, so we ask CometBFT to abort all snapshot restoration.
+ return &abci.OfferSnapshotResponse{
+ Result: abci.OFFER_SNAPSHOT_RESULT_ABORT
+}, nil
+}
+}
+
+// ApplySnapshotChunk implements the ABCI interface. It delegates to app.snapshotManager if set.
+func (app *BaseApp)
+
+ApplySnapshotChunk(req *abci.ApplySnapshotChunkRequest) (*abci.ApplySnapshotChunkResponse, error) {
+ if app.snapshotManager == nil {
+ app.logger.Error("snapshot manager not configured")
+
+return &abci.ApplySnapshotChunkResponse{
+ Result: abci.APPLY_SNAPSHOT_CHUNK_RESULT_ABORT
+}, nil
+}
+
+ _, err := app.snapshotManager.RestoreChunk(req.Chunk)
+ switch {
+ case err == nil:
+ return &abci.ApplySnapshotChunkResponse{
+ Result: abci.APPLY_SNAPSHOT_CHUNK_RESULT_ACCEPT
+}, nil
+ case errors.Is(err, snapshottypes.ErrChunkHashMismatch):
+ app.logger.Error(
+ "chunk checksum mismatch; rejecting sender and requesting refetch",
+ "chunk", req.Index,
+ "sender", req.Sender,
+ "err", err,
+ )
+
+return &abci.ApplySnapshotChunkResponse{
+ Result: abci.APPLY_SNAPSHOT_CHUNK_RESULT_RETRY,
+ RefetchChunks: []uint32{
+ req.Index
+},
+ RejectSenders: []string{
+ req.Sender
+},
+}, nil
+
+ default:
+ app.logger.Error("failed to restore snapshot", "err", err)
+
+return &abci.ApplySnapshotChunkResponse{
+ Result: abci.APPLY_SNAPSHOT_CHUNK_RESULT_ABORT
+}, nil
+}
+}
+
+// CheckTx implements the ABCI interface and executes a tx in CheckTx mode. In
+// CheckTx mode, messages are not executed. This means messages are only validated
+// and only the AnteHandler is executed. State is persisted to the BaseApp's
+// internal CheckTx state if the AnteHandler passes. Otherwise, the ResponseCheckTx
+// will contain relevant error information. Regardless of tx execution outcome,
+// the ResponseCheckTx will contain relevant gas execution context.
+func (app *BaseApp)
+
+CheckTx(req *abci.CheckTxRequest) (*abci.CheckTxResponse, error) {
+ var mode execMode
+ switch {
+ case req.Type == abci.CHECK_TX_TYPE_CHECK:
+ mode = execModeCheck
+ case req.Type == abci.CHECK_TX_TYPE_RECHECK:
+ mode = execModeReCheck
+
+ default:
+ return nil, fmt.Errorf("unknown RequestCheckTx type: %s", req.Type)
+}
+ if app.checkTxHandler == nil {
+ gInfo, result, anteEvents, err := app.runTx(mode, req.Tx, nil)
+ if err != nil {
+ return responseCheckTxWithEvents(err, gInfo.GasWanted, gInfo.GasUsed, anteEvents, app.trace), nil
+}
+
+return &abci.CheckTxResponse{
+ GasWanted: int64(gInfo.GasWanted), // TODO: Should type accept unsigned ints?
+ GasUsed: int64(gInfo.GasUsed), // TODO: Should type accept unsigned ints?
+ Log: result.Log,
+ Data: result.Data,
+ Events: sdk.MarkEventsToIndex(result.Events, app.indexEvents),
+}, nil
+}
+
+return app.checkTxHandler(app.runTx, req)
+}
+
+// PrepareProposal implements the PrepareProposal ABCI method and returns a
+// ResponsePrepareProposal object to the client. The PrepareProposal method is
+// responsible for allowing the block proposer to perform application-dependent
+// work in a block before proposing it.
+//
+// Transactions can be modified, removed, or added by the application. Since the
+// application maintains its own local mempool, it will ignore the transactions
+// provided to it in RequestPrepareProposal. Instead, it will determine which
+// transactions to return based on the mempool's semantics and the MaxTxBytes
+// provided by the client's request.
+//
+// Ref: https://github.com/cosmos/cosmos-sdk/blob/main/docs/architecture/adr-060-abci-1.0.md
+// Ref: https://github.com/cometbft/cometbft/blob/main/spec/abci/abci%2B%2B_basic_concepts.md
+func (app *BaseApp)
+
+PrepareProposal(req *abci.PrepareProposalRequest) (resp *abci.PrepareProposalResponse, err error) {
+ if app.prepareProposal == nil {
+ return nil, errors.New("PrepareProposal handler not set")
+}
+
+ // Always reset state given that PrepareProposal can timeout and be called
+ // again in a subsequent round.
+ header := cmtproto.Header{
+ ChainID: app.chainID,
+ Height: req.Height,
+ Time: req.Time,
+ ProposerAddress: req.ProposerAddress,
+ NextValidatorsHash: req.NextValidatorsHash,
+ AppHash: app.LastCommitID().Hash,
+}
+
+app.setState(execModePrepareProposal, header)
+
+ // CometBFT must never call PrepareProposal with a height of 0.
+ //
+ // Ref: https://github.com/cometbft/cometbft/blob/059798a4f5b0c9f52aa8655fa619054a0154088c/spec/core/state.md?plain=1#L37-L38
+ if req.Height < 1 {
+ return nil, errors.New("PrepareProposal called with invalid height")
+}
+
+app.prepareProposalState.SetContext(app.getContextForProposal(app.prepareProposalState.Context(), req.Height).
+ WithVoteInfos(toVoteInfo(req.LocalLastCommit.Votes)). // this is a set of votes that are not finalized yet, wait for commit
+ WithBlockHeight(req.Height).
+ WithProposer(req.ProposerAddress).
+ WithExecMode(sdk.ExecModePrepareProposal).
+ WithCometInfo(corecomet.Info{
+ Evidence: sdk.ToSDKEvidence(req.Misbehavior),
+ ValidatorsHash: req.NextValidatorsHash,
+ ProposerAddress: req.ProposerAddress,
+ LastCommit: sdk.ToSDKExtendedCommitInfo(req.LocalLastCommit),
+}).
+ WithHeaderInfo(coreheader.Info{
+ ChainID: app.chainID,
+ Height: req.Height,
+ Time: req.Time,
+}))
+
+app.prepareProposalState.SetContext(app.prepareProposalState.Context().
+ WithConsensusParams(app.GetConsensusParams(app.prepareProposalState.Context())).
+ WithBlockGasMeter(app.getBlockGasMeter(app.prepareProposalState.Context())))
+
+defer func() {
+ if err := recover(); err != nil {
+ app.logger.Error(
+ "panic recovered in PrepareProposal",
+ "height", req.Height,
+ "time", req.Time,
+ "panic", err,
+ )
+
+resp = &abci.PrepareProposalResponse{
+ Txs: req.Txs
+}
+
+}
+
+}()
+
+resp, err = app.prepareProposal(app.prepareProposalState.Context(), req)
+ if err != nil {
+ app.logger.Error("failed to prepare proposal", "height", req.Height, "time", req.Time, "err", err)
+
+return &abci.PrepareProposalResponse{
+ Txs: req.Txs
+}, nil
+}
+
+return resp, nil
+}
+
+// ProcessProposal implements the ProcessProposal ABCI method and returns a
+// ResponseProcessProposal object to the client. The ProcessProposal method is
+// responsible for allowing execution of application-dependent work in a proposed
+// block. Note, the application defines the exact implementation details of
+// ProcessProposal. In general, the application must at the very least ensure
+// that all transactions are valid. If all transactions are valid, then we inform
+// CometBFT that the Status is ACCEPT. However, the application is also able
+// to implement optimizations such as executing the entire proposed block
+// immediately.
+//
+// If a panic is detected during execution of an application's ProcessProposal
+// handler, it will be recovered and we will reject the proposal.
+//
+// Ref: https://github.com/cosmos/cosmos-sdk/blob/main/docs/architecture/adr-060-abci-1.0.md
+// Ref: https://github.com/cometbft/cometbft/blob/main/spec/abci/abci%2B%2B_basic_concepts.md
+func (app *BaseApp)
+
+ProcessProposal(req *abci.ProcessProposalRequest) (resp *abci.ProcessProposalResponse, err error) {
+ if app.processProposal == nil {
+ return nil, errors.New("ProcessProposal handler not set")
+}
+
+ // CometBFT must never call ProcessProposal with a height of 0.
+ // Ref: https://github.com/cometbft/cometbft/blob/059798a4f5b0c9f52aa8655fa619054a0154088c/spec/core/state.md?plain=1#L37-L38
+ if req.Height < 1 {
+ return nil, errors.New("ProcessProposal called with invalid height")
+}
+
+ // Always reset state given that ProcessProposal can timeout and be called
+ // again in a subsequent round.
+ header := cmtproto.Header{
+ ChainID: app.chainID,
+ Height: req.Height,
+ Time: req.Time,
+ ProposerAddress: req.ProposerAddress,
+ NextValidatorsHash: req.NextValidatorsHash,
+ AppHash: app.LastCommitID().Hash,
+}
+
+app.setState(execModeProcessProposal, header)
+
+ // Since the application can get access to FinalizeBlock state and write to it,
+ // we must be sure to reset it in case ProcessProposal timeouts and is called
+ // again in a subsequent round. However, we only want to do this after we've
+ // processed the first block, as we want to avoid overwriting the finalizeState
+ // after state changes during InitChain.
+ if req.Height > app.initialHeight {
+ // abort any running OE
+ app.optimisticExec.Abort()
+
+app.setState(execModeFinalize, header)
+}
+
+app.processProposalState.SetContext(app.getContextForProposal(app.processProposalState.Context(), req.Height).
+ WithVoteInfos(req.ProposedLastCommit.Votes). // this is a set of votes that are not finalized yet, wait for commit
+ WithBlockHeight(req.Height).
+ WithHeaderHash(req.Hash).
+ WithProposer(req.ProposerAddress).
+ WithCometInfo(corecomet.Info{
+ ProposerAddress: req.ProposerAddress,
+ ValidatorsHash: req.NextValidatorsHash,
+ Evidence: sdk.ToSDKEvidence(req.Misbehavior),
+ LastCommit: sdk.ToSDKCommitInfo(req.ProposedLastCommit),
+},
+ ).
+ WithExecMode(sdk.ExecModeProcessProposal).
+ WithHeaderInfo(coreheader.Info{
+ ChainID: app.chainID,
+ Height: req.Height,
+ Time: req.Time,
+}))
+
+app.processProposalState.SetContext(app.processProposalState.Context().
+ WithConsensusParams(app.GetConsensusParams(app.processProposalState.Context())).
+ WithBlockGasMeter(app.getBlockGasMeter(app.processProposalState.Context())))
+
+defer func() {
+ if err := recover(); err != nil {
+ app.logger.Error(
+ "panic recovered in ProcessProposal",
+ "height", req.Height,
+ "time", req.Time,
+ "hash", fmt.Sprintf("%X", req.Hash),
+ "panic", err,
+ )
+
+resp = &abci.ProcessProposalResponse{
+ Status: abci.PROCESS_PROPOSAL_STATUS_REJECT
+}
+
+}
+
+}()
+
+resp, err = app.processProposal(app.processProposalState.Context(), req)
+ if err != nil {
+ app.logger.Error("failed to process proposal", "height", req.Height, "time", req.Time, "hash", fmt.Sprintf("%X", req.Hash), "err", err)
+
+return &abci.ProcessProposalResponse{
+ Status: abci.PROCESS_PROPOSAL_STATUS_REJECT
+}, nil
+}
+
+ // Only execute optimistic execution if the proposal is accepted, OE is
+ // enabled and the block height is greater than the initial height. During
+ // the first block we'll be carrying state from InitChain, so it would be
+ // impossible for us to easily revert.
+ // After the first block has been processed, the next blocks will get executed
+ // optimistically, so that when the ABCI client calls `FinalizeBlock` the app
+ // can have a response ready.
+ if resp.Status == abci.PROCESS_PROPOSAL_STATUS_ACCEPT &&
+ app.optimisticExec.Enabled() &&
+ req.Height > app.initialHeight {
+ app.optimisticExec.Execute(req)
+}
+
+return resp, nil
+}
+
+// ExtendVote implements the ExtendVote ABCI method and returns a ResponseExtendVote.
+// It calls the application's ExtendVote handler which is responsible for performing
+// application-specific business logic when sending a pre-commit for the NEXT
+// block height. The extensions response may be non-deterministic but must always
+// be returned, even if empty.
+//
+// Agreed upon vote extensions are made available to the proposer of the next
+// height and are committed in the subsequent height, i.e. H+2. An error is
+// returned if vote extensions are not enabled or if extendVote fails or panics.
+func (app *BaseApp)
+
+ExtendVote(_ context.Context, req *abci.ExtendVoteRequest) (resp *abci.ExtendVoteResponse, err error) {
+ // Always reset state given that ExtendVote and VerifyVoteExtension can timeout
+ // and be called again in a subsequent round.
+ var ctx sdk.Context
+
+ // If we're extending the vote for the initial height, we need to use the
+ // finalizeBlockState context, otherwise we don't get the uncommitted data
+ // from InitChain.
+ if req.Height == app.initialHeight {
+ ctx, _ = app.finalizeBlockState.Context().CacheContext()
+}
+
+else {
+ ms := app.cms.CacheMultiStore()
+
+ctx = sdk.NewContext(ms, false, app.logger).WithStreamingManager(app.streamingManager).WithChainID(app.chainID).WithBlockHeight(req.Height)
+}
+ if app.extendVote == nil {
+ return nil, errors.New("application ExtendVote handler not set")
+}
+
+ // If vote extensions are not enabled, as a safety precaution, we return an
+ // error.
+ cp := app.GetConsensusParams(ctx)
+
+ // Note: In this case, we do want to extend vote if the height is equal or
+ // greater than VoteExtensionsEnableHeight. This differs from the check done
+ // in ValidateVoteExtensions and PrepareProposal in which we'll check for
+ // vote extensions on VoteExtensionsEnableHeight+1.
+ extsEnabled := cp.Feature != nil && req.Height >= cp.Feature.VoteExtensionsEnableHeight.Value && cp.Feature.VoteExtensionsEnableHeight.Value != 0
+ if !extsEnabled {
+ // check abci params
+ extsEnabled = cp.Abci != nil && req.Height >= cp.Abci.VoteExtensionsEnableHeight && cp.Abci.VoteExtensionsEnableHeight != 0
+ if !extsEnabled {
+ return nil, fmt.Errorf("vote extensions are not enabled; unexpected call to ExtendVote at height %d", req.Height)
+}
+
+}
+
+ctx = ctx.
+ WithConsensusParams(cp).
+ WithBlockGasMeter(storetypes.NewInfiniteGasMeter()).
+ WithBlockHeight(req.Height).
+ WithHeaderHash(req.Hash).
+ WithExecMode(sdk.ExecModeVoteExtension).
+ WithHeaderInfo(coreheader.Info{
+ ChainID: app.chainID,
+ Height: req.Height,
+ Hash: req.Hash,
+})
+
+ // add a deferred recover handler in case extendVote panics
+ defer func() {
+ if r := recover(); r != nil {
+ app.logger.Error(
+ "panic recovered in ExtendVote",
+ "height", req.Height,
+ "hash", fmt.Sprintf("%X", req.Hash),
+ "panic", err,
+ )
+
+err = fmt.Errorf("recovered application panic in ExtendVote: %v", r)
+}
+
+}()
+
+resp, err = app.extendVote(ctx, req)
+ if err != nil {
+ app.logger.Error("failed to extend vote", "height", req.Height, "hash", fmt.Sprintf("%X", req.Hash), "err", err)
+
+return &abci.ExtendVoteResponse{
+ VoteExtension: []byte{
+}}, nil
+}
+
+return resp, err
+}
+
+// VerifyVoteExtension implements the VerifyVoteExtension ABCI method and returns
+// a ResponseVerifyVoteExtension. It calls the applications' VerifyVoteExtension
+// handler which is responsible for performing application-specific business
+// logic in verifying a vote extension from another validator during the pre-commit
+// phase. The response MUST be deterministic. An error is returned if vote
+// extensions are not enabled or if verifyVoteExt fails or panics.
+// We highly recommend a size validation due to performance degradation,
+// see more here /cometbft/v0.38/docs/qa/CometBFT-QA-38#vote-extensions-testbed
+func (app *BaseApp)
+
+VerifyVoteExtension(req *abci.VerifyVoteExtensionRequest) (resp *abci.VerifyVoteExtensionResponse, err error) {
+ if app.verifyVoteExt == nil {
+ return nil, errors.New("application VerifyVoteExtension handler not set")
+}
+
+var ctx sdk.Context
+
+ // If we're verifying the vote for the initial height, we need to use the
+ // finalizeBlockState context, otherwise we don't get the uncommitted data
+ // from InitChain.
+ if req.Height == app.initialHeight {
+ ctx, _ = app.finalizeBlockState.Context().CacheContext()
+}
+
+else {
+ ms := app.cms.CacheMultiStore()
+
+ctx = sdk.NewContext(ms, false, app.logger).WithStreamingManager(app.streamingManager).WithChainID(app.chainID).WithBlockHeight(req.Height)
+}
+
+ // If vote extensions are not enabled, as a safety precaution, we return an
+ // error.
+ cp := app.GetConsensusParams(ctx)
+
+ // Note: we verify vote extensions on VoteExtensionsEnableHeight+1. Check
+ // comment in ExtendVote and ValidateVoteExtensions for more details.
+ extsEnabled := cp.Feature.VoteExtensionsEnableHeight != nil && req.Height >= cp.Feature.VoteExtensionsEnableHeight.Value && cp.Feature.VoteExtensionsEnableHeight.Value != 0
+ if !extsEnabled {
+ // check abci params
+ extsEnabled = cp.Abci != nil && req.Height >= cp.Abci.VoteExtensionsEnableHeight && cp.Abci.VoteExtensionsEnableHeight != 0
+ if !extsEnabled {
+ return nil, fmt.Errorf("vote extensions are not enabled; unexpected call to VerifyVoteExtension at height %d", req.Height)
+}
+
+}
+
+ // add a deferred recover handler in case verifyVoteExt panics
+ defer func() {
+ if r := recover(); r != nil {
+ app.logger.Error(
+ "panic recovered in VerifyVoteExtension",
+ "height", req.Height,
+ "hash", fmt.Sprintf("%X", req.Hash),
+ "validator", fmt.Sprintf("%X", req.ValidatorAddress),
+ "panic", r,
+ )
+
+err = fmt.Errorf("recovered application panic in VerifyVoteExtension: %v", r)
+}
+
+}()
+
+ctx = ctx.
+ WithConsensusParams(cp).
+ WithBlockGasMeter(storetypes.NewInfiniteGasMeter()).
+ WithBlockHeight(req.Height).
+ WithHeaderHash(req.Hash).
+ WithExecMode(sdk.ExecModeVerifyVoteExtension).
+ WithHeaderInfo(coreheader.Info{
+ ChainID: app.chainID,
+ Height: req.Height,
+ Hash: req.Hash,
+})
+
+resp, err = app.verifyVoteExt(ctx, req)
+ if err != nil {
+ app.logger.Error("failed to verify vote extension", "height", req.Height, "err", err)
+
+return &abci.VerifyVoteExtensionResponse{
+ Status: abci.VERIFY_VOTE_EXTENSION_STATUS_REJECT
+}, nil
+}
+
+return resp, err
+}
+
+// internalFinalizeBlock executes the block, called by the Optimistic
+// Execution flow or by the FinalizeBlock ABCI method. The context received is
+// only used to handle early cancellation, for anything related to state app.finalizeBlockState.Context()
+// must be used.
+func (app *BaseApp)
+
+internalFinalizeBlock(ctx context.Context, req *abci.FinalizeBlockRequest) (*abci.FinalizeBlockResponse, error) {
+ var events []abci.Event
+ if err := app.checkHalt(req.Height, req.Time); err != nil {
+ return nil, err
+}
+ if err := app.validateFinalizeBlockHeight(req); err != nil {
+ return nil, err
+}
+ if app.cms.TracingEnabled() {
+ app.cms.SetTracingContext(storetypes.TraceContext(
+ map[string]any{"blockHeight": req.Height
+},
+ ))
+}
+ header := cmtproto.Header{
+ ChainID: app.chainID,
+ Height: req.Height,
+ Time: req.Time,
+ ProposerAddress: req.ProposerAddress,
+ NextValidatorsHash: req.NextValidatorsHash,
+ AppHash: app.LastCommitID().Hash,
+}
+
+ // finalizeBlockState should be set on InitChain or ProcessProposal. If it is
+ // nil, it means we are replaying this block and we need to set the state here
+ // given that during block replay ProcessProposal is not executed by CometBFT.
+ if app.finalizeBlockState == nil {
+ app.setState(execModeFinalize, header)
+}
+
+ // Context is now updated with Header information.
+ app.finalizeBlockState.SetContext(app.finalizeBlockState.Context().
+ WithBlockHeader(header).
+ WithHeaderHash(req.Hash).
+ WithHeaderInfo(coreheader.Info{
+ ChainID: app.chainID,
+ Height: req.Height,
+ Time: req.Time,
+ Hash: req.Hash,
+ AppHash: app.LastCommitID().Hash,
+}).
+ WithConsensusParams(app.GetConsensusParams(app.finalizeBlockState.Context())).
+ WithVoteInfos(req.DecidedLastCommit.Votes).
+ WithExecMode(sdk.ExecModeFinalize).
+ WithCometInfo(corecomet.Info{
+ Evidence: sdk.ToSDKEvidence(req.Misbehavior),
+ ValidatorsHash: req.NextValidatorsHash,
+ ProposerAddress: req.ProposerAddress,
+ LastCommit: sdk.ToSDKCommitInfo(req.DecidedLastCommit),
+}))
+
+ // GasMeter must be set after we get a context with updated consensus params.
+ gasMeter := app.getBlockGasMeter(app.finalizeBlockState.Context())
+
+app.finalizeBlockState.SetContext(app.finalizeBlockState.Context().WithBlockGasMeter(gasMeter))
+ if app.checkState != nil {
+ app.checkState.SetContext(app.checkState.Context().
+ WithBlockGasMeter(gasMeter).
+ WithHeaderHash(req.Hash))
+}
+
+preblockEvents, err := app.preBlock(req)
+ if err != nil {
+ return nil, err
+}
+
+events = append(events, preblockEvents...)
+
+beginBlock, err := app.beginBlock(req)
+ if err != nil {
+ return nil, err
+}
+
+ // First check for an abort signal after beginBlock, as it's the first place
+ // we spend any significant amount of time.
+ select {
+ case <-ctx.Done():
+ return nil, ctx.Err()
+
+default:
+ // continue
+}
+
+events = append(events, beginBlock.Events...)
+
+ // Reset the gas meter so that the AnteHandlers aren't required to
+ gasMeter = app.getBlockGasMeter(app.finalizeBlockState.Context())
+
+app.finalizeBlockState.SetContext(app.finalizeBlockState.Context().WithBlockGasMeter(gasMeter))
+
+ // Iterate over all raw transactions in the proposal and attempt to execute
+ // them, gathering the execution results.
+ //
+ // NOTE: Not all raw transactions may adhere to the sdk.Tx interface, e.g.
+ // vote extensions, so skip those.
+ txResults := make([]*abci.ExecTxResult, 0, len(req.Txs))
+ for _, rawTx := range req.Txs {
+ response := app.deliverTx(rawTx)
+
+ // check after every tx if we should abort
+ select {
+ case <-ctx.Done():
+ return nil, ctx.Err()
+
+default:
+ // continue
+}
+
+txResults = append(txResults, response)
+}
+ if app.finalizeBlockState.ms.TracingEnabled() {
+ app.finalizeBlockState.ms = app.finalizeBlockState.ms.SetTracingContext(nil).(storetypes.CacheMultiStore)
+}
+
+endBlock, err := app.endBlock(app.finalizeBlockState.Context())
+ if err != nil {
+ return nil, err
+}
+
+ // check after endBlock if we should abort, to avoid propagating the result
+ select {
+ case <-ctx.Done():
+ return nil, ctx.Err()
+
+default:
+ // continue
+}
+
+events = append(events, endBlock.Events...)
+ cp := app.GetConsensusParams(app.finalizeBlockState.Context())
+
+return &abci.FinalizeBlockResponse{
+ Events: events,
+ TxResults: txResults,
+ ValidatorUpdates: endBlock.ValidatorUpdates,
+ ConsensusParamUpdates: &cp,
+}, nil
+}
+
+// FinalizeBlock will execute the block proposal provided by RequestFinalizeBlock.
+// Specifically, it will execute an application's BeginBlock (if defined), followed
+// by the transactions in the proposal, finally followed by the application's
+// EndBlock (if defined).
+//
+// For each raw transaction, i.e. a byte slice, BaseApp will only execute it if
+// it adheres to the sdk.Tx interface. Otherwise, the raw transaction will be
+// skipped. This is to support compatibility with proposers injecting vote
+// extensions into the proposal, which should not themselves be executed in cases
+// where they adhere to the sdk.Tx interface.
+func (app *BaseApp)
+
+FinalizeBlock(req *abci.FinalizeBlockRequest) (res *abci.FinalizeBlockResponse, err error) {
+ defer func() {
+ // call the streaming service hooks with the FinalizeBlock messages
+ for _, streamingListener := range app.streamingManager.ABCIListeners {
+ if err := streamingListener.ListenFinalizeBlock(app.finalizeBlockState.Context(), *req, *res); err != nil {
+ app.logger.Error("ListenFinalizeBlock listening hook failed", "height", req.Height, "err", err)
+}
+
+}
+
+}()
+ if app.optimisticExec.Initialized() {
+ // check if the hash we got is the same as the one we are executing
+ aborted := app.optimisticExec.AbortIfNeeded(req.Hash)
+ // Wait for the OE to finish, regardless of whether it was aborted or not
+ res, err = app.optimisticExec.WaitResult()
+
+ // only return if we are not aborting
+ if !aborted {
+ if res != nil {
+ res.AppHash = app.workingHash()
+}
+
+return res, err
+}
+
+ // if it was aborted, we need to reset the state
+ app.finalizeBlockState = nil
+ app.optimisticExec.Reset()
+}
+
+ // if no OE is running, just run the block (this is either a block replay or a OE that got aborted)
+
+res, err = app.internalFinalizeBlock(context.Background(), req)
+ if res != nil {
+ res.AppHash = app.workingHash()
+}
+
+return res, err
+}
+
+// checkHalt checks if height or time exceeds halt-height or halt-time respectively.
+func (app *BaseApp)
+
+checkHalt(height int64, time time.Time)
+
+error {
+ var halt bool
+ switch {
+ case app.haltHeight > 0 && uint64(height) >= app.haltHeight:
+ halt = true
+ case app.haltTime > 0 && time.Unix() >= int64(app.haltTime):
+ halt = true
+}
+ if halt {
+ return fmt.Errorf("halt per configuration height %d time %d", app.haltHeight, app.haltTime)
+}
+
+return nil
+}
+
+// Commit implements the ABCI interface. It will commit all state that exists in
+// the deliver state's multi-store and includes the resulting commit ID in the
+// returned abci.ResponseCommit. Commit will set the check state based on the
+// latest header and reset the deliver state. Also, if a non-zero halt height is
+// defined in config, Commit will execute a deferred function call to check
+// against that height and gracefully halt if it matches the latest committed
+// height.
+func (app *BaseApp)
+
+Commit() (*abci.CommitResponse, error) {
+ header := app.finalizeBlockState.Context().BlockHeader()
+ retainHeight := app.GetBlockRetentionHeight(header.Height)
+ if app.precommiter != nil {
+ app.precommiter(app.finalizeBlockState.Context())
+}
+
+rms, ok := app.cms.(*rootmulti.Store)
+ if ok {
+ rms.SetCommitHeader(header)
+}
+
+app.cms.Commit()
+ resp := &abci.CommitResponse{
+ RetainHeight: retainHeight,
+}
+ abciListeners := app.streamingManager.ABCIListeners
+ if len(abciListeners) > 0 {
+ ctx := app.finalizeBlockState.Context()
+ blockHeight := ctx.BlockHeight()
+ changeSet := app.cms.PopStateCache()
+ for _, abciListener := range abciListeners {
+ if err := abciListener.ListenCommit(ctx, *resp, changeSet); err != nil {
+ app.logger.Error("Commit listening hook failed", "height", blockHeight, "err", err)
+}
+
+}
+
+}
+
+ // Reset the CheckTx state to the latest committed.
+ //
+ // NOTE: This is safe because CometBFT holds a lock on the mempool for
+ // Commit. Use the header from this latest block.
+ app.setState(execModeCheck, header)
+
+app.finalizeBlockState = nil
+ if app.prepareCheckStater != nil {
+ app.prepareCheckStater(app.checkState.Context())
+}
+
+ // The SnapshotIfApplicable method will create the snapshot by starting the goroutine
+ app.snapshotManager.SnapshotIfApplicable(header.Height)
+
+return resp, nil
+}
+
+// workingHash gets the apphash that will be finalized in commit.
+// These writes will be persisted to the root multi-store (app.cms)
+
+and flushed to
+// disk in the Commit phase. This means when the ABCI client requests Commit(), the application
+// state transitions will be flushed to disk and as a result, but we already have
+// an application Merkle root.
+func (app *BaseApp)
+
+workingHash() []byte {
+ // Write the FinalizeBlock state into branched storage and commit the MultiStore.
+ // The write to the FinalizeBlock state writes all state transitions to the root
+ // MultiStore (app.cms)
+
+so when Commit()
+
+is called it persists those values.
+ app.finalizeBlockState.ms.Write()
+
+ // Get the hash of all writes in order to return the apphash to CometBFT in finalizeBlock.
+ commitHash := app.cms.WorkingHash()
+
+app.logger.Debug("hash of all writes", "workingHash", fmt.Sprintf("%X", commitHash))
+
+return commitHash
+}
+
+func handleQueryApp(app *BaseApp, path []string, req *abci.QueryRequest) *abci.QueryResponse {
+ if len(path) >= 2 {
+ switch path[1] {
+ case "simulate":
+ txBytes := req.Data
+
+ gInfo, res, err := app.Simulate(txBytes)
+ if err != nil {
+ return queryResult(errorsmod.Wrap(err, "failed to simulate tx"), app.trace)
+}
+ simRes := &sdk.SimulationResponse{
+ GasInfo: gInfo,
+ Result: res,
+}
+
+bz, err := codec.ProtoMarshalJSON(simRes, app.interfaceRegistry)
+ if err != nil {
+ return queryResult(errorsmod.Wrap(err, "failed to JSON encode simulation response"), app.trace)
+}
+
+return &abci.QueryResponse{
+ Codespace: sdkerrors.RootCodespace,
+ Height: req.Height,
+ Value: bz,
+}
+ case "version":
+ return &abci.QueryResponse{
+ Codespace: sdkerrors.RootCodespace,
+ Height: req.Height,
+ Value: []byte(app.version),
+}
+
+default:
+ return queryResult(errorsmod.Wrapf(sdkerrors.ErrUnknownRequest, "unknown query: %s", path), app.trace)
+}
+
+}
+
+return queryResult(
+ errorsmod.Wrap(
+ sdkerrors.ErrUnknownRequest,
+ "expected second parameter to be either 'simulate' or 'version', neither was present",
+ ), app.trace)
+}
+
+func handleQueryStore(app *BaseApp, path []string, req abci.QueryRequest) *abci.QueryResponse {
+ // "/store" prefix for store queries
+ queryable, ok := app.cms.(storetypes.Queryable)
+ if !ok {
+ return queryResult(errorsmod.Wrap(sdkerrors.ErrUnknownRequest, "multi-store does not support queries"), app.trace)
+}
+
+req.Path = "/" + strings.Join(path[1:], "/")
+ if req.Height <= 1 && req.Prove {
+ return queryResult(
+ errorsmod.Wrap(
+ sdkerrors.ErrInvalidRequest,
+ "cannot query with proof when height <= 1; please provide a valid height",
+ ), app.trace)
+}
+ sdkReq := storetypes.RequestQuery(req)
+
+resp, err := queryable.Query(&sdkReq)
+ if err != nil {
+ return queryResult(err, app.trace)
+}
+
+resp.Height = req.Height
+ abciResp := abci.QueryResponse(*resp)
+
+return &abciResp
+}
+
+func handleQueryP2P(app *BaseApp, path []string) *abci.QueryResponse {
+ // "/p2p" prefix for p2p queries
+ if len(path) < 4 {
+ return queryResult(errorsmod.Wrap(sdkerrors.ErrUnknownRequest, "path should be p2p filter "), app.trace)
+}
+
+var resp *abci.QueryResponse
+
+ cmd, typ, arg := path[1], path[2], path[3]
+ switch cmd {
+ case "filter":
+ switch typ {
+ case "addr":
+ resp = app.FilterPeerByAddrPort(arg)
+ case "id":
+ resp = app.FilterPeerByID(arg)
+}
+
+default:
+ resp = queryResult(errorsmod.Wrap(sdkerrors.ErrUnknownRequest, "expected second parameter to be 'filter'"), app.trace)
+}
+
+return resp
+}
+
+// SplitABCIQueryPath splits a string path using the delimiter '/'.
+//
+// e.g. "this/is/funny" becomes []string{"this", "is", "funny"}
+
+
+func SplitABCIQueryPath(requestPath string) (path []string) {
+ path = strings.Split(requestPath, "/")
+
+ // first element is empty string
+ if len(path) > 0 && path[0] == "" {
+ path = path[1:]
+}
+
+return path
+}
+
+// FilterPeerByAddrPort filters peers by address/port.
+func (app *BaseApp)
+
+FilterPeerByAddrPort(info string) *abci.QueryResponse {
+ if app.addrPeerFilter != nil {
+ return app.addrPeerFilter(info)
+}
+
+return &abci.QueryResponse{
+}
+}
+
+// FilterPeerByID filters peers by node ID.
+func (app *BaseApp)
+
+FilterPeerByID(info string) *abci.QueryResponse {
+ if app.idPeerFilter != nil {
+ return app.idPeerFilter(info)
+}
+
+return &abci.QueryResponse{
+}
+}
+
+// getContextForProposal returns the correct Context for PrepareProposal and
+// ProcessProposal. We use finalizeBlockState on the first block to be able to
+// access any state changes made in InitChain.
+func (app *BaseApp)
+
+getContextForProposal(ctx sdk.Context, height int64)
+
+sdk.Context {
+ if height == app.initialHeight {
+ ctx, _ = app.finalizeBlockState.Context().CacheContext()
+
+ // clear all context data set during InitChain to avoid inconsistent behavior
+ ctx = ctx.WithHeaderInfo(coreheader.Info{
+}).WithBlockHeader(cmtproto.Header{
+})
+
+return ctx
+}
+
+return ctx
+}
+
+func (app *BaseApp)
+
+handleQueryGRPC(handler GRPCQueryHandler, req *abci.QueryRequest) *abci.QueryResponse {
+ ctx, err := app.CreateQueryContext(req.Height, req.Prove)
+ if err != nil {
+ return queryResult(err, app.trace)
+}
+
+resp, err := handler(ctx, req)
+ if err != nil {
+ resp = queryResult(gRPCErrorToSDKError(err), app.trace)
+
+resp.Height = req.Height
+ return resp
+}
+
+return resp
+}
+
+func gRPCErrorToSDKError(err error)
+
+error {
+ status, ok := grpcstatus.FromError(err)
+ if !ok {
+ return errorsmod.Wrap(sdkerrors.ErrInvalidRequest, err.Error())
+}
+ switch status.Code() {
+ case codes.NotFound:
+ return errorsmod.Wrap(sdkerrors.ErrKeyNotFound, err.Error())
+ case codes.InvalidArgument:
+ return errorsmod.Wrap(sdkerrors.ErrInvalidRequest, err.Error())
+ case codes.FailedPrecondition:
+ return errorsmod.Wrap(sdkerrors.ErrInvalidRequest, err.Error())
+ case codes.Unauthenticated:
+ return errorsmod.Wrap(sdkerrors.ErrUnauthorized, err.Error())
+
+default:
+ return errorsmod.Wrap(sdkerrors.ErrUnknownRequest, err.Error())
+}
+}
+
+func checkNegativeHeight(height int64)
+
+error {
+ if height < 0 {
+ return errorsmod.Wrap(sdkerrors.ErrInvalidRequest, "cannot query with height < 0; please provide a valid height")
+}
+
+return nil
+}
+
+// CreateQueryContext creates a new sdk.Context for a query, taking as args
+// the block height and whether the query needs a proof or not.
+func (app *BaseApp)
+
+CreateQueryContext(height int64, prove bool) (sdk.Context, error) {
+ if err := checkNegativeHeight(height); err != nil {
+ return sdk.Context{
+}, err
+}
+
+ // use custom query multi-store if provided
+ qms := app.qms
+ if qms == nil {
+ qms = app.cms.(storetypes.MultiStore)
+}
+ lastBlockHeight := qms.LatestVersion()
+ if lastBlockHeight == 0 {
+ return sdk.Context{
+}, errorsmod.Wrapf(sdkerrors.ErrInvalidHeight, "%s is not ready; please wait for first block", app.Name())
+}
+ if height > lastBlockHeight {
+ return sdk.Context{
+},
+ errorsmod.Wrap(
+ sdkerrors.ErrInvalidHeight,
+ "cannot query with height in the future; please provide a valid height",
+ )
+}
+
+ // when a client did not provide a query height, manually inject the latest
+ if height == 0 {
+ height = lastBlockHeight
+}
+ if height <= 1 && prove {
+ return sdk.Context{
+},
+ errorsmod.Wrap(
+ sdkerrors.ErrInvalidRequest,
+ "cannot query with proof when height <= 1; please provide a valid height",
+ )
+}
+
+cacheMS, err := qms.CacheMultiStoreWithVersion(height)
+ if err != nil {
+ return sdk.Context{
+},
+ errorsmod.Wrapf(
+ sdkerrors.ErrNotFound,
+ "failed to load state at height %d; %s (latest height: %d)", height, err, lastBlockHeight,
+ )
+}
+
+ // branch the commit multi-store for safety
+ ctx := sdk.NewContext(cacheMS, true, app.logger).
+ WithMinGasPrices(app.minGasPrices).
+ WithGasMeter(storetypes.NewGasMeter(app.queryGasLimit)).
+ WithHeaderInfo(coreheader.Info{
+ ChainID: app.chainID,
+ Height: height,
+}).
+ WithBlockHeader(app.checkState.Context().BlockHeader()).
+ WithBlockHeight(height)
+ if height != lastBlockHeight {
+ rms, ok := app.cms.(*rootmulti.Store)
+ if ok {
+ cInfo, err := rms.GetCommitInfo(height)
+ if cInfo != nil && err == nil {
+ ctx = ctx.WithHeaderInfo(coreheader.Info{
+ Height: height,
+ Time: cInfo.Timestamp
+})
+}
+
+}
+
+}
+
+return ctx, nil
+}
+
+// GetBlockRetentionHeight returns the height for which all blocks below this height
+// are pruned from CometBFT. Given a commitment height and a non-zero local
+// minRetainBlocks configuration, the retentionHeight is the smallest height that
+// satisfies:
+//
+// - Unbonding (safety threshold)
+
+time: The block interval in which validators
+// can be economically punished for misbehavior. Blocks in this interval must be
+// auditable e.g. by the light client.
+//
+// - Logical store snapshot interval: The block interval at which the underlying
+// logical store database is persisted to disk, e.g. every 10000 heights. Blocks
+// since the last IAVL snapshot must be available for replay on application restart.
+//
+// - State sync snapshots: Blocks since the oldest available snapshot must be
+// available for state sync nodes to catch up (oldest because a node may be
+// restoring an old snapshot while a new snapshot was taken).
+//
+// - Local (minRetainBlocks)
+
+config: Archive nodes may want to retain more or
+// all blocks, e.g. via a local config option min-retain-blocks. There may also
+// be a need to vary retention for other nodes, e.g. sentry nodes which do not
+// need historical blocks.
+func (app *BaseApp)
+
+GetBlockRetentionHeight(commitHeight int64)
+
+int64 {
+ // pruning is disabled if minRetainBlocks is zero
+ if app.minRetainBlocks == 0 {
+ return 0
+}
+ minNonZero := func(x, y int64)
+
+int64 {
+ switch {
+ case x == 0:
+ return y
+ case y == 0:
+ return x
+ case x < y:
+ return x
+
+ default:
+ return y
+}
+
+}
+
+ // Define retentionHeight as the minimum value that satisfies all non-zero
+ // constraints. All blocks below (commitHeight-retentionHeight)
+
+are pruned
+ // from CometBFT.
+ var retentionHeight int64
+
+ // Define the number of blocks needed to protect against misbehaving validators
+ // which allows light clients to operate safely. Note, we piggyback on the
+ // evidence parameters instead of computing an estimated number of blocks based
+ // on the unbonding period and block commitment time as the two should be
+ // equivalent.
+ cp := app.GetConsensusParams(app.finalizeBlockState.Context())
+ if cp.Evidence != nil && cp.Evidence.MaxAgeNumBlocks > 0 {
+ retentionHeight = commitHeight - cp.Evidence.MaxAgeNumBlocks
+}
+ if app.snapshotManager != nil {
+ snapshotRetentionHeights := app.snapshotManager.GetSnapshotBlockRetentionHeights()
+ if snapshotRetentionHeights > 0 {
+ retentionHeight = minNonZero(retentionHeight, commitHeight-snapshotRetentionHeights)
+}
+
+}
+ v := commitHeight - int64(app.minRetainBlocks)
+
+retentionHeight = minNonZero(retentionHeight, v)
+ if retentionHeight <= 0 {
+ // prune nothing in the case of a non-positive height
+ return 0
+}
+
+return retentionHeight
+}
+
+// toVoteInfo converts the new ExtendedVoteInfo to VoteInfo.
+func toVoteInfo(votes []abci.ExtendedVoteInfo) []abci.VoteInfo {
+ legacyVotes := make([]abci.VoteInfo, len(votes))
+ for i, vote := range votes {
+ legacyVotes[i] = abci.VoteInfo{
+ Validator: abci.Validator{
+ Address: vote.Validator.Address,
+ Power: vote.Validator.Power,
+},
+ BlockIdFlag: vote.BlockIdFlag,
+}
+
+}
+
+return legacyVotes
+}
+```
+
+## CheckTx Handler
+
+`CheckTxHandler` allows users to extend the logic of `CheckTx`. `CheckTxHandler` is called by passing context and the transaction bytes received through ABCI. It is required that the handler returns deterministic results given the same transaction bytes.
+
+
+we return the raw decoded transaction here to avoid decoding it twice.
+
+
+```go
+type CheckTxHandler func(ctx sdk.Context, tx []byte) (Tx, error)
+```
+
+Setting a custom `CheckTxHandler` is optional. It can be done from your app.go file:
+
+```go expandable
+func NewSimApp(
+ logger log.Logger,
+ db corestore.KVStoreWithBatch,
+ traceStore io.Writer,
+ loadLatest bool,
+ appOpts servertypes.AppOptions,
+ baseAppOptions ...func(*baseapp.BaseApp),
+) *SimApp {
+ ...
+ // Create CheckTxHandler
+ checktxHandler := abci.NewCustomCheckTxHandler(...)
+
+app.SetCheckTxHandler(checktxHandler)
+ ...
+}
+```
diff --git a/sdk/next/build/abci/introduction.mdx b/sdk/next/build/abci/introduction.mdx
new file mode 100644
index 000000000..5ab70267f
--- /dev/null
+++ b/sdk/next/build/abci/introduction.mdx
@@ -0,0 +1,56 @@
+---
+title: Introduction
+description: >-
+ ABCI, Application Blockchain Interface is the interface between CometBFT and
+ the application. More information about ABCI can be found here. CometBFT
+ version 0.38 included a new version of ABCI (called ABCI 2.0) which added
+ several new methods.
+---
+
+## What is ABCI?
+
+ABCI, Application Blockchain Interface is the interface between CometBFT and the application. More information about ABCI can be found [here](/cometbft/v0.38/spec/abci/Overview). CometBFT version 0.38 included a new version of ABCI (called ABCI 2.0) which added several new methods.
+
+The 5 methods introduced in ABCI 2.0 are:
+
+* `PrepareProposal`
+* `ProcessProposal`
+* `ExtendVote`
+* `VerifyVoteExtension`
+* `FinalizeBlock`
+
+## The Flow
+
+## PrepareProposal
+
+Based on validator voting power, CometBFT chooses a block proposer and calls `PrepareProposal` on the block proposer's application (Cosmos SDK). The selected block proposer is responsible for collecting outstanding transactions from the mempool, adhering to the application's specifications. The application can enforce custom transaction ordering and incorporate additional transactions, potentially generated from vote extensions in the previous block.
+
+To perform this manipulation on the application side, a custom handler must be implemented. By default, the Cosmos SDK provides `PrepareProposalHandler`, used in conjunction with an application-specific mempool. A custom handler can be written by an application developer. If a noop handler is provided, all transactions are considered valid.
+
+Please note that vote extensions will only be available on the following height in which vote extensions are enabled. More information about vote extensions can be found [here](/sdk/v0.53/build/abci/vote-extensions).
+
+After creating the proposal, the proposer returns it to CometBFT.
+
+PrepareProposal CAN be non-deterministic.
+
+## ProcessProposal
+
+This method allows validators to perform application-specific checks on the block proposal and is called on all validators. This is an important step in the consensus process, as it ensures that the block is valid and meets the requirements of the application. For example, validators could check that the block contains all the required transactions or that the block does not create any invalid state transitions.
+
+The implementation of `ProcessProposal` MUST be deterministic.
+
+## ExtendVote and VerifyVoteExtensions
+
+These methods allow applications to extend the voting process by requiring validators to perform additional actions beyond simply validating blocks.
+
+If vote extensions are enabled, `ExtendVote` will be called on every validator and each one will return its vote extension which is in practice a bunch of bytes. As mentioned above, this data (vote extension) can only be retrieved in the next block height during `PrepareProposal`. Additionally, this data can be arbitrary, but in the provided tutorials, it serves as an oracle or proof of transactions in the mempool. Essentially, vote extensions are processed and injected as transactions. Examples of use-cases for vote extensions include prices for a price oracle or encryption shares for an encrypted transaction mempool. `ExtendVote` CAN be non-deterministic.
+
+`VerifyVoteExtensions` is performed on every validator multiple times in order to verify other validators' vote extensions. This check is submitted to validate the integrity and validity of the vote extensions, preventing malicious or invalid vote extensions.
+
+Additionally, applications must keep the vote extension data concise as it can degrade the performance of their chain, see testing results [here](/cometbft/v0.38/docs/qa/CometBFT-QA-38#vote-extensions-testbed).
+
+`VerifyVoteExtensions` MUST be deterministic.
+
+## FinalizeBlock
+
+`FinalizeBlock` is then called and is responsible for updating the state of the blockchain and making the block available to users.
diff --git a/sdk/next/build/abci/prepare-proposal.mdx b/sdk/next/build/abci/prepare-proposal.mdx
new file mode 100644
index 000000000..5d76f39eb
--- /dev/null
+++ b/sdk/next/build/abci/prepare-proposal.mdx
@@ -0,0 +1,667 @@
+---
+title: Prepare Proposal
+---
+
+`PrepareProposal` handles construction of the block, meaning that when a proposer
+is preparing to propose a block, it requests the application to evaluate a
+`RequestPrepareProposal`, which contains a series of transactions from CometBFT's
+mempool. At this point, the application has complete control over the proposal.
+It can modify, delete, and inject transactions from its own app-side mempool into
+the proposal or even ignore all the transactions altogether. What the application
+does with the transactions provided to it by `RequestPrepareProposal` has no
+effect on CometBFT's mempool.
+
+Note that the application defines the semantics of the `PrepareProposal` and it
+MAY be non-deterministic and is only executed by the current block proposer.
+
+Now, reading mempool twice in the previous sentence is confusing, let's break it down.
+CometBFT has a mempool that handles gossiping transactions to other nodes
+in the network. The order of these transactions is determined by CometBFT's mempool,
+using FIFO as the sole ordering mechanism. It's worth noting that the priority mempool
+in Comet was removed or deprecated.
+However, since the application is able to fully inspect
+all transactions, it can provide greater control over transaction ordering.
+Allowing the application to handle ordering enables the application to define how
+it would like the block constructed.
+
+The Cosmos SDK defines the `DefaultProposalHandler` type, which provides applications with
+`PrepareProposal` and `ProcessProposal` handlers. If you decide to implement your
+own `PrepareProposal` handler, you must ensure that the transactions
+selected DO NOT exceed the maximum block gas (if set) and the maximum bytes provided
+by `req.MaxBytes`.
+
+```go expandable
+package baseapp
+
+import (
+
+ "bytes"
+ "context"
+ "fmt"
+ "slices"
+ "github.com/cockroachdb/errors"
+ abci "github.com/cometbft/cometbft/abci/types"
+ cryptoenc "github.com/cometbft/cometbft/crypto/encoding"
+ cmtprotocrypto "github.com/cometbft/cometbft/proto/tendermint/crypto"
+ cmtproto "github.com/cometbft/cometbft/proto/tendermint/types"
+ cmttypes "github.com/cometbft/cometbft/types"
+ protoio "github.com/cosmos/gogoproto/io"
+ "github.com/cosmos/gogoproto/proto"
+ "cosmossdk.io/core/comet"
+
+ sdk "github.com/cosmos/cosmos-sdk/types"
+ "github.com/cosmos/cosmos-sdk/types/mempool"
+)
+
+type (
+ // ValidatorStore defines the interface contract required for verifying vote
+ // extension signatures. Typically, this will be implemented by the x/staking
+ // module, which has knowledge of the CometBFT public key.
+ ValidatorStore interface {
+ GetPubKeyByConsAddr(context.Context, sdk.ConsAddress) (cmtprotocrypto.PublicKey, error)
+}
+
+ // GasTx defines the contract that a transaction with a gas limit must implement.
+ GasTx interface {
+ GetGas()
+
+uint64
+}
+)
+
+// ValidateVoteExtensions defines a helper function for verifying vote extension
+// signatures that may be passed or manually injected into a block proposal from
+// a proposer in PrepareProposal. It returns an error if any signature is invalid
+// or if unexpected vote extensions and/or signatures are found or less than 2/3
+// power is received.
+// NOTE: From v0.50.5 `currentHeight` and `chainID` arguments are ignored for fixing an issue.
+// They will be removed from the function in v0.51+.
+func ValidateVoteExtensions(
+ ctx sdk.Context,
+ valStore ValidatorStore,
+ _ int64,
+ _ string,
+ extCommit abci.ExtendedCommitInfo,
+)
+
+error {
+ // Get values from context
+ cp := ctx.ConsensusParams()
+ currentHeight := ctx.HeaderInfo().Height
+ chainID := ctx.HeaderInfo().ChainID
+ commitInfo := ctx.CometInfo().GetLastCommit()
+
+ // Check that both extCommit + commit are ordered in accordance with vp/address.
+ if err := validateExtendedCommitAgainstLastCommit(extCommit, commitInfo); err != nil {
+ return err
+}
+
+ // Start checking vote extensions only **after** the vote extensions enable
+ // height, because when `currentHeight == VoteExtensionsEnableHeight`
+ // PrepareProposal doesn't get any vote extensions in its request.
+ extsEnabled := cp.Abci != nil && currentHeight > cp.Abci.VoteExtensionsEnableHeight && cp.Abci.VoteExtensionsEnableHeight != 0
+ marshalDelimitedFn := func(msg proto.Message) ([]byte, error) {
+ var buf bytes.Buffer
+ if err := protoio.NewDelimitedWriter(&buf).WriteMsg(msg); err != nil {
+ return nil, err
+}
+
+return buf.Bytes(), nil
+}
+
+var (
+ // Total voting power of all vote extensions.
+ totalVP int64
+ // Total voting power of all validators that submitted valid vote extensions.
+ sumVP int64
+ )
+ for _, vote := range extCommit.Votes {
+ totalVP += vote.Validator.Power
+
+ // Only check + include power if the vote is a commit vote. There must be super-majority, otherwise the
+ // previous block (the block the vote is for)
+
+could not have been committed.
+ if vote.BlockIdFlag != cmtproto.BlockIDFlagCommit {
+ continue
+}
+ if !extsEnabled {
+ if len(vote.VoteExtension) > 0 {
+ return fmt.Errorf("vote extensions disabled; received non-empty vote extension at height %d", currentHeight)
+}
+ if len(vote.ExtensionSignature) > 0 {
+ return fmt.Errorf("vote extensions disabled; received non-empty vote extension signature at height %d", currentHeight)
+}
+
+continue
+}
+ if len(vote.ExtensionSignature) == 0 {
+ return fmt.Errorf("vote extensions enabled; received empty vote extension signature at height %d", currentHeight)
+}
+ valConsAddr := sdk.ConsAddress(vote.Validator.Address)
+
+pubKeyProto, err := valStore.GetPubKeyByConsAddr(ctx, valConsAddr)
+ if err != nil {
+ return fmt.Errorf("failed to get validator %X public key: %w", valConsAddr, err)
+}
+
+cmtPubKey, err := cryptoenc.PubKeyFromProto(pubKeyProto)
+ if err != nil {
+ return fmt.Errorf("failed to convert validator %X public key: %w", valConsAddr, err)
+}
+ cve := cmtproto.CanonicalVoteExtension{
+ Extension: vote.VoteExtension,
+ Height: currentHeight - 1, // the vote extension was signed in the previous height
+ Round: int64(extCommit.Round),
+ ChainId: chainID,
+}
+
+extSignBytes, err := marshalDelimitedFn(&cve)
+ if err != nil {
+ return fmt.Errorf("failed to encode CanonicalVoteExtension: %w", err)
+}
+ if !cmtPubKey.VerifySignature(extSignBytes, vote.ExtensionSignature) {
+ return fmt.Errorf("failed to verify validator %X vote extension signature", valConsAddr)
+}
+
+sumVP += vote.Validator.Power
+}
+
+ // This check is probably unnecessary, but better safe than sorry.
+ if totalVP <= 0 {
+ return fmt.Errorf("total voting power must be positive, got: %d", totalVP)
+}
+
+ // If the sum of the voting power has not reached (2/3 + 1)
+
+we need to error.
+ if requiredVP := ((totalVP * 2) / 3) + 1; sumVP < requiredVP {
+ return fmt.Errorf(
+ "insufficient cumulative voting power received to verify vote extensions; got: %d, expected: >=%d",
+ sumVP, requiredVP,
+ )
+}
+
+return nil
+}
+
+// validateExtendedCommitAgainstLastCommit validates an ExtendedCommitInfo against a LastCommit. Specifically,
+// it checks that the ExtendedCommit + LastCommit (for the same height), are consistent with each other + that
+// they are ordered correctly (by voting power)
+
+in accordance with
+// [comet](https://github.com/cometbft/cometbft/blob/4ce0277b35f31985bbf2c25d3806a184a4510010/types/validator_set.go#L784).
+func validateExtendedCommitAgainstLastCommit(ec abci.ExtendedCommitInfo, lc comet.CommitInfo)
+
+error {
+ // check that the rounds are the same
+ if ec.Round != lc.Round() {
+ return fmt.Errorf("extended commit round %d does not match last commit round %d", ec.Round, lc.Round())
+}
+
+ // check that the # of votes are the same
+ if len(ec.Votes) != lc.Votes().Len() {
+ return fmt.Errorf("extended commit votes length %d does not match last commit votes length %d", len(ec.Votes), lc.Votes().Len())
+}
+
+ // check sort order of extended commit votes
+ if !slices.IsSortedFunc(ec.Votes, func(vote1, vote2 abci.ExtendedVoteInfo)
+
+int {
+ if vote1.Validator.Power == vote2.Validator.Power {
+ return bytes.Compare(vote1.Validator.Address, vote2.Validator.Address) // addresses sorted in ascending order (used to break vp conflicts)
+}
+
+return -int(vote1.Validator.Power - vote2.Validator.Power) // vp sorted in descending order
+}) {
+ return fmt.Errorf("extended commit votes are not sorted by voting power")
+}
+ addressCache := make(map[string]struct{
+}, len(ec.Votes))
+ // check the consistency between LastCommit and ExtendedCommit
+ for i, vote := range ec.Votes {
+ // cache addresses to check for duplicates
+ if _, ok := addressCache[string(vote.Validator.Address)]; ok {
+ return fmt.Errorf("extended commit vote address %X is duplicated", vote.Validator.Address)
+}
+
+addressCache[string(vote.Validator.Address)] = struct{
+}{
+}
+ if !bytes.Equal(vote.Validator.Address, lc.Votes().Get(i).Validator().Address()) {
+ return fmt.Errorf("extended commit vote address %X does not match last commit vote address %X", vote.Validator.Address, lc.Votes().Get(i).Validator().Address())
+}
+ if vote.Validator.Power != lc.Votes().Get(i).Validator().Power() {
+ return fmt.Errorf("extended commit vote power %d does not match last commit vote power %d", vote.Validator.Power, lc.Votes().Get(i).Validator().Power())
+}
+
+}
+
+return nil
+}
+
+type (
+ // ProposalTxVerifier defines the interface that is implemented by BaseApp,
+ // that any custom ABCI PrepareProposal and ProcessProposal handler can use
+ // to verify a transaction.
+ ProposalTxVerifier interface {
+ PrepareProposalVerifyTx(tx sdk.Tx) ([]byte, error)
+
+ProcessProposalVerifyTx(txBz []byte) (sdk.Tx, error)
+
+TxDecode(txBz []byte) (sdk.Tx, error)
+
+TxEncode(tx sdk.Tx) ([]byte, error)
+}
+
+ // DefaultProposalHandler defines the default ABCI PrepareProposal and
+ // ProcessProposal handlers.
+ DefaultProposalHandler struct {
+ mempool mempool.Mempool
+ txVerifier ProposalTxVerifier
+ txSelector TxSelector
+ signerExtAdapter mempool.SignerExtractionAdapter
+}
+)
+
+func NewDefaultProposalHandler(mp mempool.Mempool, txVerifier ProposalTxVerifier) *DefaultProposalHandler {
+ return &DefaultProposalHandler{
+ mempool: mp,
+ txVerifier: txVerifier,
+ txSelector: NewDefaultTxSelector(),
+ signerExtAdapter: mempool.NewDefaultSignerExtractionAdapter(),
+}
+}
+
+// SetTxSelector sets the TxSelector function on the DefaultProposalHandler.
+func (h *DefaultProposalHandler)
+
+SetTxSelector(ts TxSelector) {
+ h.txSelector = ts
+}
+
+// PrepareProposalHandler returns the default implementation for processing an
+// ABCI proposal. The application's mempool is enumerated and all valid
+// transactions are added to the proposal. Transactions are valid if they:
+//
+// 1)
+
+Successfully encode to bytes.
+// 2)
+
+Are valid (i.e. pass runTx, AnteHandler only).
+//
+// Enumeration is halted once RequestPrepareProposal.MaxBytes of transactions is
+// reached or the mempool is exhausted.
+//
+// Note:
+//
+// - Step (2)
+
+is identical to the validation step performed in
+// DefaultProcessProposal. It is very important that the same validation logic
+// is used in both steps, and applications must ensure that this is the case in
+// non-default handlers.
+//
+// - If no mempool is set or if the mempool is a no-op mempool, the transactions
+// requested from CometBFT will simply be returned, which, by default, are in
+// FIFO order.
+func (h *DefaultProposalHandler)
+
+PrepareProposalHandler()
+
+sdk.PrepareProposalHandler {
+ return func(ctx sdk.Context, req *abci.RequestPrepareProposal) (*abci.ResponsePrepareProposal, error) {
+ var maxBlockGas uint64
+ if b := ctx.ConsensusParams().Block; b != nil {
+ maxBlockGas = uint64(b.MaxGas)
+}
+
+defer h.txSelector.Clear()
+
+ // If the mempool is nil or NoOp we simply return the transactions
+ // requested from CometBFT, which, by default, should be in FIFO order.
+ //
+ // Note, we still need to ensure the transactions returned respect req.MaxTxBytes.
+ _, isNoOp := h.mempool.(mempool.NoOpMempool)
+ if h.mempool == nil || isNoOp {
+ for _, txBz := range req.Txs {
+ tx, err := h.txVerifier.TxDecode(txBz)
+ if err != nil {
+ return nil, err
+}
+ stop := h.txSelector.SelectTxForProposal(ctx, uint64(req.MaxTxBytes), maxBlockGas, tx, txBz)
+ if stop {
+ break
+}
+
+}
+
+return &abci.ResponsePrepareProposal{
+ Txs: h.txSelector.SelectedTxs(ctx)
+}, nil
+}
+ selectedTxsSignersSeqs := make(map[string]uint64)
+
+var (
+ resError error
+ selectedTxsNums int
+ invalidTxs []sdk.Tx // invalid txs to be removed out of the loop to avoid deadlock
+ )
+
+mempool.SelectBy(ctx, h.mempool, req.Txs, func(memTx sdk.Tx)
+
+bool {
+ unorderedTx, ok := memTx.(sdk.TxWithUnordered)
+ isUnordered := ok && unorderedTx.GetUnordered()
+ txSignersSeqs := make(map[string]uint64)
+
+ // if the tx is unordered, we don't need to check the sequence, we just add it
+ if !isUnordered {
+ signerData, err := h.signerExtAdapter.GetSigners(memTx)
+ if err != nil {
+ // propagate the error to the caller
+ resError = err
+ return false
+}
+
+ // If the signers aren't in selectedTxsSignersSeqs then we haven't seen them before
+ // so we add them and continue given that we don't need to check the sequence.
+ shouldAdd := true
+ for _, signer := range signerData {
+ seq, ok := selectedTxsSignersSeqs[signer.Signer.String()]
+ if !ok {
+ txSignersSeqs[signer.Signer.String()] = signer.Sequence
+ continue
+}
+
+ // If we have seen this signer before in this block, we must make
+ // sure that the current sequence is seq+1; otherwise it is invalid
+ // and we skip it.
+ if seq+1 != signer.Sequence {
+ shouldAdd = false
+ break
+}
+
+txSignersSeqs[signer.Signer.String()] = signer.Sequence
+}
+ if !shouldAdd {
+ return true
+}
+
+}
+
+ // NOTE: Since transaction verification was already executed in CheckTx,
+ // which calls mempool.Insert, in theory everything in the pool should be
+ // valid. But some mempool implementations may insert invalid txs, so we
+ // check again.
+ txBz, err := h.txVerifier.PrepareProposalVerifyTx(memTx)
+ if err != nil {
+ invalidTxs = append(invalidTxs, memTx)
+}
+
+else {
+ stop := h.txSelector.SelectTxForProposal(ctx, uint64(req.MaxTxBytes), maxBlockGas, memTx, txBz)
+ if stop {
+ return false
+}
+ txsLen := len(h.txSelector.SelectedTxs(ctx))
+ // If the tx is unordered, we don't need to update the sender sequence.
+ if !isUnordered {
+ for sender, seq := range txSignersSeqs {
+ // If txsLen != selectedTxsNums is true, it means that we've
+ // added a new tx to the selected txs, so we need to update
+ // the sequence of the sender.
+ if txsLen != selectedTxsNums {
+ selectedTxsSignersSeqs[sender] = seq
+}
+
+else if _, ok := selectedTxsSignersSeqs[sender]; !ok {
+ // The transaction hasn't been added but it passed the
+ // verification, so we know that the sequence is correct.
+ // So we set this sender's sequence to seq-1, in order
+ // to avoid unnecessary calls to PrepareProposalVerifyTx.
+ selectedTxsSignersSeqs[sender] = seq - 1
+}
+
+}
+
+}
+
+selectedTxsNums = txsLen
+}
+
+return true
+})
+ if resError != nil {
+ return nil, resError
+}
+ for _, tx := range invalidTxs {
+ err := h.mempool.Remove(tx)
+ if err != nil && !errors.Is(err, mempool.ErrTxNotFound) {
+ return nil, err
+}
+
+}
+
+return &abci.ResponsePrepareProposal{
+ Txs: h.txSelector.SelectedTxs(ctx)
+}, nil
+}
+}
+
+// ProcessProposalHandler returns the default implementation for processing an
+// ABCI proposal. Every transaction in the proposal must pass 2 conditions:
+//
+// 1. The transaction bytes must decode to a valid transaction.
+// 2. The transaction must be valid (i.e. pass runTx, AnteHandler only)
+//
+// If any transaction fails to pass either condition, the proposal is rejected.
+// Note that step (2)
+
+is identical to the validation step performed in
+// DefaultPrepareProposal. It is very important that the same validation logic
+// is used in both steps, and applications must ensure that this is the case in
+// non-default handlers.
+func (h *DefaultProposalHandler)
+
+ProcessProposalHandler()
+
+sdk.ProcessProposalHandler {
+ // If the mempool is nil or NoOp we simply return ACCEPT,
+ // because PrepareProposal may have included txs that could fail verification.
+ _, isNoOp := h.mempool.(mempool.NoOpMempool)
+ if h.mempool == nil || isNoOp {
+ return NoOpProcessProposal()
+}
+
+return func(ctx sdk.Context, req *abci.RequestProcessProposal) (*abci.ResponseProcessProposal, error) {
+ var totalTxGas uint64
+
+ var maxBlockGas int64
+ if b := ctx.ConsensusParams().Block; b != nil {
+ maxBlockGas = b.MaxGas
+}
+ for _, txBytes := range req.Txs {
+ tx, err := h.txVerifier.ProcessProposalVerifyTx(txBytes)
+ if err != nil {
+ return &abci.ResponseProcessProposal{
+ Status: abci.ResponseProcessProposal_REJECT
+}, nil
+}
+ if maxBlockGas > 0 {
+ gasTx, ok := tx.(GasTx)
+ if ok {
+ totalTxGas += gasTx.GetGas()
+}
+ if totalTxGas > uint64(maxBlockGas) {
+ return &abci.ResponseProcessProposal{
+ Status: abci.ResponseProcessProposal_REJECT
+}, nil
+}
+
+}
+
+}
+
+return &abci.ResponseProcessProposal{
+ Status: abci.ResponseProcessProposal_ACCEPT
+}, nil
+}
+}
+
+// NoOpPrepareProposal defines a no-op PrepareProposal handler. It will always
+// return the transactions sent by the client's request.
+func NoOpPrepareProposal()
+
+sdk.PrepareProposalHandler {
+ return func(_ sdk.Context, req *abci.RequestPrepareProposal) (*abci.ResponsePrepareProposal, error) {
+ return &abci.ResponsePrepareProposal{
+ Txs: req.Txs
+}, nil
+}
+}
+
+// NoOpProcessProposal defines a no-op ProcessProposal Handler. It will always
+// return ACCEPT.
+func NoOpProcessProposal()
+
+sdk.ProcessProposalHandler {
+ return func(_ sdk.Context, _ *abci.RequestProcessProposal) (*abci.ResponseProcessProposal, error) {
+ return &abci.ResponseProcessProposal{
+ Status: abci.ResponseProcessProposal_ACCEPT
+}, nil
+}
+}
+
+// NoOpExtendVote defines a no-op ExtendVote handler. It will always return an
+// empty byte slice as the vote extension.
+func NoOpExtendVote()
+
+sdk.ExtendVoteHandler {
+ return func(_ sdk.Context, _ *abci.RequestExtendVote) (*abci.ResponseExtendVote, error) {
+ return &abci.ResponseExtendVote{
+ VoteExtension: []byte{
+}}, nil
+}
+}
+
+// NoOpVerifyVoteExtensionHandler defines a no-op VerifyVoteExtension handler. It
+// will always return an ACCEPT status with no error.
+func NoOpVerifyVoteExtensionHandler()
+
+sdk.VerifyVoteExtensionHandler {
+ return func(_ sdk.Context, _ *abci.RequestVerifyVoteExtension) (*abci.ResponseVerifyVoteExtension, error) {
+ return &abci.ResponseVerifyVoteExtension{
+ Status: abci.ResponseVerifyVoteExtension_ACCEPT
+}, nil
+}
+}
+
+// TxSelector defines a helper type that assists in selecting transactions during
+// mempool transaction selection in PrepareProposal. It keeps track of the total
+// number of bytes and total gas of the selected transactions. It also keeps
+// track of the selected transactions themselves.
+type TxSelector interface {
+ // SelectedTxs should return a copy of the selected transactions.
+ SelectedTxs(ctx context.Context) [][]byte
+
+ // Clear should clear the TxSelector, nulling out all relevant fields.
+ Clear()
+
+ // SelectTxForProposal should attempt to select a transaction for inclusion in
+ // a proposal based on inclusion criteria defined by the TxSelector. It must
+ // return if the caller should halt the transaction selection loop
+ // (typically over a mempool)
+
+or otherwise.
+ SelectTxForProposal(ctx context.Context, maxTxBytes, maxBlockGas uint64, memTx sdk.Tx, txBz []byte)
+
+bool
+}
+
+type defaultTxSelector struct {
+ totalTxBytes uint64
+ totalTxGas uint64
+ selectedTxs [][]byte
+}
+
+func NewDefaultTxSelector()
+
+TxSelector {
+ return &defaultTxSelector{
+}
+}
+
+func (ts *defaultTxSelector)
+
+SelectedTxs(_ context.Context) [][]byte {
+ txs := make([][]byte, len(ts.selectedTxs))
+
+copy(txs, ts.selectedTxs)
+
+return txs
+}
+
+func (ts *defaultTxSelector)
+
+Clear() {
+ ts.totalTxBytes = 0
+ ts.totalTxGas = 0
+ ts.selectedTxs = nil
+}
+
+func (ts *defaultTxSelector)
+
+SelectTxForProposal(_ context.Context, maxTxBytes, maxBlockGas uint64, memTx sdk.Tx, txBz []byte)
+
+bool {
+ txSize := uint64(cmttypes.ComputeProtoSizeForTxs([]cmttypes.Tx{
+ txBz
+}))
+
+var txGasLimit uint64
+ if memTx != nil {
+ if gasTx, ok := memTx.(GasTx); ok {
+ txGasLimit = gasTx.GetGas()
+}
+
+}
+
+ // only add the transaction to the proposal if we have enough capacity
+ if (txSize + ts.totalTxBytes) <= maxTxBytes {
+ // If there is a max block gas limit, add the tx only if the limit has
+ // not been met.
+ if maxBlockGas > 0 {
+ if (txGasLimit + ts.totalTxGas) <= maxBlockGas {
+ ts.totalTxGas += txGasLimit
+ ts.totalTxBytes += txSize
+ ts.selectedTxs = append(ts.selectedTxs, txBz)
+}
+
+}
+
+else {
+ ts.totalTxBytes += txSize
+ ts.selectedTxs = append(ts.selectedTxs, txBz)
+}
+
+}
+
+ // check if we've reached capacity; if so, we cannot select any more transactions
+ return ts.totalTxBytes >= maxTxBytes || (maxBlockGas > 0 && (ts.totalTxGas >= maxBlockGas))
+}
+```
+
+This default implementation can be overridden by the application developer in
+favor of a custom implementation in [`app_di.go`](/sdk/v0.53/build/building-apps/app-go-di):
+
+```go
+prepareOpt := func(app *baseapp.BaseApp) {
+ abciPropHandler := baseapp.NewDefaultProposalHandler(mempool, app)
+
+app.SetPrepareProposal(abciPropHandler.PrepareProposalHandler())
+}
+
+baseAppOptions = append(baseAppOptions, prepareOpt)
+```
diff --git a/sdk/next/build/abci/process-proposal.mdx b/sdk/next/build/abci/process-proposal.mdx
new file mode 100644
index 000000000..80131fc14
--- /dev/null
+++ b/sdk/next/build/abci/process-proposal.mdx
@@ -0,0 +1,654 @@
+---
+title: Process Proposal
+---
+
+`ProcessProposal` handles the validation of a proposal from `PrepareProposal`,
+which also includes a block header. After a block has been proposed,
+the other validators have the right to accept or reject that block. The validator in the
+default implementation of `PrepareProposal` runs basic validity checks on each
+transaction.
+
+Note that `ProcessProposal` MUST be deterministic. Non-deterministic behaviors will cause apphash mismatches.
+This means if `ProcessProposal` panics or fails and we reject, all honest validator
+processes should reject (i.e., prevote nil). If so, CometBFT will start a new round with a new block proposal and the same cycle will happen with `PrepareProposal`
+and `ProcessProposal` for the new proposal.
+
+Here is the implementation of the default implementation:
+
+```go expandable
+package baseapp
+
+import (
+
+ "bytes"
+ "context"
+ "fmt"
+ "slices"
+ "github.com/cockroachdb/errors"
+ abci "github.com/cometbft/cometbft/abci/types"
+ cryptoenc "github.com/cometbft/cometbft/crypto/encoding"
+ cmtprotocrypto "github.com/cometbft/cometbft/proto/tendermint/crypto"
+ cmtproto "github.com/cometbft/cometbft/proto/tendermint/types"
+ cmttypes "github.com/cometbft/cometbft/types"
+ protoio "github.com/cosmos/gogoproto/io"
+ "github.com/cosmos/gogoproto/proto"
+ "cosmossdk.io/core/comet"
+
+ sdk "github.com/cosmos/cosmos-sdk/types"
+ "github.com/cosmos/cosmos-sdk/types/mempool"
+)
+
+type (
+ // ValidatorStore defines the interface contract required for verifying vote
+ // extension signatures. Typically, this will be implemented by the x/staking
+ // module, which has knowledge of the CometBFT public key.
+ ValidatorStore interface {
+ GetPubKeyByConsAddr(context.Context, sdk.ConsAddress) (cmtprotocrypto.PublicKey, error)
+}
+
+ // GasTx defines the contract that a transaction with a gas limit must implement.
+ GasTx interface {
+ GetGas()
+
+uint64
+}
+)
+
+// ValidateVoteExtensions defines a helper function for verifying vote extension
+// signatures that may be passed or manually injected into a block proposal from
+// a proposer in PrepareProposal. It returns an error if any signature is invalid
+// or if unexpected vote extensions and/or signatures are found or less than 2/3
+// power is received.
+// NOTE: From v0.50.5 `currentHeight` and `chainID` arguments are ignored for fixing an issue.
+// They will be removed from the function in v0.51+.
+func ValidateVoteExtensions(
+ ctx sdk.Context,
+ valStore ValidatorStore,
+ _ int64,
+ _ string,
+ extCommit abci.ExtendedCommitInfo,
+)
+
+error {
+ // Get values from context
+ cp := ctx.ConsensusParams()
+ currentHeight := ctx.HeaderInfo().Height
+ chainID := ctx.HeaderInfo().ChainID
+ commitInfo := ctx.CometInfo().GetLastCommit()
+
+ // Check that both extCommit + commit are ordered in accordance with vp/address.
+ if err := validateExtendedCommitAgainstLastCommit(extCommit, commitInfo); err != nil {
+ return err
+}
+
+ // Start checking vote extensions only **after** the vote extensions enable
+ // height, because when `currentHeight == VoteExtensionsEnableHeight`
+ // PrepareProposal doesn't get any vote extensions in its request.
+ extsEnabled := cp.Abci != nil && currentHeight > cp.Abci.VoteExtensionsEnableHeight && cp.Abci.VoteExtensionsEnableHeight != 0
+ marshalDelimitedFn := func(msg proto.Message) ([]byte, error) {
+ var buf bytes.Buffer
+ if err := protoio.NewDelimitedWriter(&buf).WriteMsg(msg); err != nil {
+ return nil, err
+}
+
+return buf.Bytes(), nil
+}
+
+var (
+ // Total voting power of all vote extensions.
+ totalVP int64
+ // Total voting power of all validators that submitted valid vote extensions.
+ sumVP int64
+ )
+ for _, vote := range extCommit.Votes {
+ totalVP += vote.Validator.Power
+
+ // Only check + include power if the vote is a commit vote. There must be super-majority, otherwise the
+ // previous block (the block the vote is for)
+
+could not have been committed.
+ if vote.BlockIdFlag != cmtproto.BlockIDFlagCommit {
+ continue
+}
+ if !extsEnabled {
+ if len(vote.VoteExtension) > 0 {
+ return fmt.Errorf("vote extensions disabled; received non-empty vote extension at height %d", currentHeight)
+}
+ if len(vote.ExtensionSignature) > 0 {
+ return fmt.Errorf("vote extensions disabled; received non-empty vote extension signature at height %d", currentHeight)
+}
+
+continue
+}
+ if len(vote.ExtensionSignature) == 0 {
+ return fmt.Errorf("vote extensions enabled; received empty vote extension signature at height %d", currentHeight)
+}
+ valConsAddr := sdk.ConsAddress(vote.Validator.Address)
+
+pubKeyProto, err := valStore.GetPubKeyByConsAddr(ctx, valConsAddr)
+ if err != nil {
+ return fmt.Errorf("failed to get validator %X public key: %w", valConsAddr, err)
+}
+
+cmtPubKey, err := cryptoenc.PubKeyFromProto(pubKeyProto)
+ if err != nil {
+ return fmt.Errorf("failed to convert validator %X public key: %w", valConsAddr, err)
+}
+ cve := cmtproto.CanonicalVoteExtension{
+ Extension: vote.VoteExtension,
+ Height: currentHeight - 1, // the vote extension was signed in the previous height
+ Round: int64(extCommit.Round),
+ ChainId: chainID,
+}
+
+extSignBytes, err := marshalDelimitedFn(&cve)
+ if err != nil {
+ return fmt.Errorf("failed to encode CanonicalVoteExtension: %w", err)
+}
+ if !cmtPubKey.VerifySignature(extSignBytes, vote.ExtensionSignature) {
+ return fmt.Errorf("failed to verify validator %X vote extension signature", valConsAddr)
+}
+
+sumVP += vote.Validator.Power
+}
+
+ // This check is probably unnecessary, but better safe than sorry.
+ if totalVP <= 0 {
+ return fmt.Errorf("total voting power must be positive, got: %d", totalVP)
+}
+
+ // If the sum of the voting power has not reached (2/3 + 1)
+
+we need to error.
+ if requiredVP := ((totalVP * 2) / 3) + 1; sumVP < requiredVP {
+ return fmt.Errorf(
+ "insufficient cumulative voting power received to verify vote extensions; got: %d, expected: >=%d",
+ sumVP, requiredVP,
+ )
+}
+
+return nil
+}
+
+// validateExtendedCommitAgainstLastCommit validates an ExtendedCommitInfo against a LastCommit. Specifically,
+// it checks that the ExtendedCommit + LastCommit (for the same height), are consistent with each other + that
+// they are ordered correctly (by voting power)
+
+in accordance with
+// [comet](https://github.com/cometbft/cometbft/blob/4ce0277b35f31985bbf2c25d3806a184a4510010/types/validator_set.go#L784).
+func validateExtendedCommitAgainstLastCommit(ec abci.ExtendedCommitInfo, lc comet.CommitInfo)
+
+error {
+ // check that the rounds are the same
+ if ec.Round != lc.Round() {
+ return fmt.Errorf("extended commit round %d does not match last commit round %d", ec.Round, lc.Round())
+}
+
+ // check that the # of votes are the same
+ if len(ec.Votes) != lc.Votes().Len() {
+ return fmt.Errorf("extended commit votes length %d does not match last commit votes length %d", len(ec.Votes), lc.Votes().Len())
+}
+
+ // check sort order of extended commit votes
+ if !slices.IsSortedFunc(ec.Votes, func(vote1, vote2 abci.ExtendedVoteInfo)
+
+int {
+ if vote1.Validator.Power == vote2.Validator.Power {
+ return bytes.Compare(vote1.Validator.Address, vote2.Validator.Address) // addresses sorted in ascending order (used to break vp conflicts)
+}
+
+return -int(vote1.Validator.Power - vote2.Validator.Power) // vp sorted in descending order
+}) {
+ return fmt.Errorf("extended commit votes are not sorted by voting power")
+}
+ addressCache := make(map[string]struct{
+}, len(ec.Votes))
+ // check the consistency between LastCommit and ExtendedCommit
+ for i, vote := range ec.Votes {
+ // cache addresses to check for duplicates
+ if _, ok := addressCache[string(vote.Validator.Address)]; ok {
+ return fmt.Errorf("extended commit vote address %X is duplicated", vote.Validator.Address)
+}
+
+addressCache[string(vote.Validator.Address)] = struct{
+}{
+}
+ if !bytes.Equal(vote.Validator.Address, lc.Votes().Get(i).Validator().Address()) {
+ return fmt.Errorf("extended commit vote address %X does not match last commit vote address %X", vote.Validator.Address, lc.Votes().Get(i).Validator().Address())
+}
+ if vote.Validator.Power != lc.Votes().Get(i).Validator().Power() {
+ return fmt.Errorf("extended commit vote power %d does not match last commit vote power %d", vote.Validator.Power, lc.Votes().Get(i).Validator().Power())
+}
+
+}
+
+return nil
+}
+
+type (
+ // ProposalTxVerifier defines the interface that is implemented by BaseApp,
+ // that any custom ABCI PrepareProposal and ProcessProposal handler can use
+ // to verify a transaction.
+ ProposalTxVerifier interface {
+ PrepareProposalVerifyTx(tx sdk.Tx) ([]byte, error)
+
+ProcessProposalVerifyTx(txBz []byte) (sdk.Tx, error)
+
+TxDecode(txBz []byte) (sdk.Tx, error)
+
+TxEncode(tx sdk.Tx) ([]byte, error)
+}
+
+ // DefaultProposalHandler defines the default ABCI PrepareProposal and
+ // ProcessProposal handlers.
+ DefaultProposalHandler struct {
+ mempool mempool.Mempool
+ txVerifier ProposalTxVerifier
+ txSelector TxSelector
+ signerExtAdapter mempool.SignerExtractionAdapter
+}
+)
+
+func NewDefaultProposalHandler(mp mempool.Mempool, txVerifier ProposalTxVerifier) *DefaultProposalHandler {
+ return &DefaultProposalHandler{
+ mempool: mp,
+ txVerifier: txVerifier,
+ txSelector: NewDefaultTxSelector(),
+ signerExtAdapter: mempool.NewDefaultSignerExtractionAdapter(),
+}
+}
+
+// SetTxSelector sets the TxSelector function on the DefaultProposalHandler.
+func (h *DefaultProposalHandler)
+
+SetTxSelector(ts TxSelector) {
+ h.txSelector = ts
+}
+
+// PrepareProposalHandler returns the default implementation for processing an
+// ABCI proposal. The application's mempool is enumerated and all valid
+// transactions are added to the proposal. Transactions are valid if they:
+//
+// 1)
+
+Successfully encode to bytes.
+// 2)
+
+Are valid (i.e. pass runTx, AnteHandler only).
+//
+// Enumeration is halted once RequestPrepareProposal.MaxBytes of transactions is
+// reached or the mempool is exhausted.
+//
+// Note:
+//
+// - Step (2)
+
+is identical to the validation step performed in
+// DefaultProcessProposal. It is very important that the same validation logic
+// is used in both steps, and applications must ensure that this is the case in
+// non-default handlers.
+//
+// - If no mempool is set or if the mempool is a no-op mempool, the transactions
+// requested from CometBFT will simply be returned, which, by default, are in
+// FIFO order.
+func (h *DefaultProposalHandler)
+
+PrepareProposalHandler()
+
+sdk.PrepareProposalHandler {
+ return func(ctx sdk.Context, req *abci.RequestPrepareProposal) (*abci.ResponsePrepareProposal, error) {
+ var maxBlockGas uint64
+ if b := ctx.ConsensusParams().Block; b != nil {
+ maxBlockGas = uint64(b.MaxGas)
+}
+
+defer h.txSelector.Clear()
+
+ // If the mempool is nil or NoOp we simply return the transactions
+ // requested from CometBFT, which, by default, should be in FIFO order.
+ //
+ // Note, we still need to ensure the transactions returned respect req.MaxTxBytes.
+ _, isNoOp := h.mempool.(mempool.NoOpMempool)
+ if h.mempool == nil || isNoOp {
+ for _, txBz := range req.Txs {
+ tx, err := h.txVerifier.TxDecode(txBz)
+ if err != nil {
+ return nil, err
+}
+ stop := h.txSelector.SelectTxForProposal(ctx, uint64(req.MaxTxBytes), maxBlockGas, tx, txBz)
+ if stop {
+ break
+}
+
+}
+
+return &abci.ResponsePrepareProposal{
+ Txs: h.txSelector.SelectedTxs(ctx)
+}, nil
+}
+ selectedTxsSignersSeqs := make(map[string]uint64)
+
+var (
+ resError error
+ selectedTxsNums int
+ invalidTxs []sdk.Tx // invalid txs to be removed out of the loop to avoid deadlock
+ )
+
+mempool.SelectBy(ctx, h.mempool, req.Txs, func(memTx sdk.Tx)
+
+bool {
+ unorderedTx, ok := memTx.(sdk.TxWithUnordered)
+ isUnordered := ok && unorderedTx.GetUnordered()
+ txSignersSeqs := make(map[string]uint64)
+
+ // if the tx is unordered, we don't need to check the sequence, we just add it
+ if !isUnordered {
+ signerData, err := h.signerExtAdapter.GetSigners(memTx)
+ if err != nil {
+ // propagate the error to the caller
+ resError = err
+ return false
+}
+
+ // If the signers aren't in selectedTxsSignersSeqs then we haven't seen them before
+ // so we add them and continue given that we don't need to check the sequence.
+ shouldAdd := true
+ for _, signer := range signerData {
+ seq, ok := selectedTxsSignersSeqs[signer.Signer.String()]
+ if !ok {
+ txSignersSeqs[signer.Signer.String()] = signer.Sequence
+ continue
+}
+
+ // If we have seen this signer before in this block, we must make
+ // sure that the current sequence is seq+1; otherwise it is invalid
+ // and we skip it.
+ if seq+1 != signer.Sequence {
+ shouldAdd = false
+ break
+}
+
+txSignersSeqs[signer.Signer.String()] = signer.Sequence
+}
+ if !shouldAdd {
+ return true
+}
+
+}
+
+ // NOTE: Since transaction verification was already executed in CheckTx,
+ // which calls mempool.Insert, in theory everything in the pool should be
+ // valid. But some mempool implementations may insert invalid txs, so we
+ // check again.
+ txBz, err := h.txVerifier.PrepareProposalVerifyTx(memTx)
+ if err != nil {
+ invalidTxs = append(invalidTxs, memTx)
+}
+
+else {
+ stop := h.txSelector.SelectTxForProposal(ctx, uint64(req.MaxTxBytes), maxBlockGas, memTx, txBz)
+ if stop {
+ return false
+}
+ txsLen := len(h.txSelector.SelectedTxs(ctx))
+ // If the tx is unordered, we don't need to update the sender sequence.
+ if !isUnordered {
+ for sender, seq := range txSignersSeqs {
+ // If txsLen != selectedTxsNums is true, it means that we've
+ // added a new tx to the selected txs, so we need to update
+ // the sequence of the sender.
+ if txsLen != selectedTxsNums {
+ selectedTxsSignersSeqs[sender] = seq
+}
+
+else if _, ok := selectedTxsSignersSeqs[sender]; !ok {
+ // The transaction hasn't been added but it passed the
+ // verification, so we know that the sequence is correct.
+ // So we set this sender's sequence to seq-1, in order
+ // to avoid unnecessary calls to PrepareProposalVerifyTx.
+ selectedTxsSignersSeqs[sender] = seq - 1
+}
+
+}
+
+}
+
+selectedTxsNums = txsLen
+}
+
+return true
+})
+ if resError != nil {
+ return nil, resError
+}
+ for _, tx := range invalidTxs {
+ err := h.mempool.Remove(tx)
+ if err != nil && !errors.Is(err, mempool.ErrTxNotFound) {
+ return nil, err
+}
+
+}
+
+return &abci.ResponsePrepareProposal{
+ Txs: h.txSelector.SelectedTxs(ctx)
+}, nil
+}
+}
+
+// ProcessProposalHandler returns the default implementation for processing an
+// ABCI proposal. Every transaction in the proposal must pass 2 conditions:
+//
+// 1. The transaction bytes must decode to a valid transaction.
+// 2. The transaction must be valid (i.e. pass runTx, AnteHandler only)
+//
+// If any transaction fails to pass either condition, the proposal is rejected.
+// Note that step (2)
+
+is identical to the validation step performed in
+// DefaultPrepareProposal. It is very important that the same validation logic
+// is used in both steps, and applications must ensure that this is the case in
+// non-default handlers.
+func (h *DefaultProposalHandler)
+
+ProcessProposalHandler()
+
+sdk.ProcessProposalHandler {
+ // If the mempool is nil or NoOp we simply return ACCEPT,
+ // because PrepareProposal may have included txs that could fail verification.
+ _, isNoOp := h.mempool.(mempool.NoOpMempool)
+ if h.mempool == nil || isNoOp {
+ return NoOpProcessProposal()
+}
+
+return func(ctx sdk.Context, req *abci.RequestProcessProposal) (*abci.ResponseProcessProposal, error) {
+ var totalTxGas uint64
+
+ var maxBlockGas int64
+ if b := ctx.ConsensusParams().Block; b != nil {
+ maxBlockGas = b.MaxGas
+}
+ for _, txBytes := range req.Txs {
+ tx, err := h.txVerifier.ProcessProposalVerifyTx(txBytes)
+ if err != nil {
+ return &abci.ResponseProcessProposal{
+ Status: abci.ResponseProcessProposal_REJECT
+}, nil
+}
+ if maxBlockGas > 0 {
+ gasTx, ok := tx.(GasTx)
+ if ok {
+ totalTxGas += gasTx.GetGas()
+}
+ if totalTxGas > uint64(maxBlockGas) {
+ return &abci.ResponseProcessProposal{
+ Status: abci.ResponseProcessProposal_REJECT
+}, nil
+}
+
+}
+
+}
+
+return &abci.ResponseProcessProposal{
+ Status: abci.ResponseProcessProposal_ACCEPT
+}, nil
+}
+}
+
+// NoOpPrepareProposal defines a no-op PrepareProposal handler. It will always
+// return the transactions sent by the client's request.
+func NoOpPrepareProposal()
+
+sdk.PrepareProposalHandler {
+ return func(_ sdk.Context, req *abci.RequestPrepareProposal) (*abci.ResponsePrepareProposal, error) {
+ return &abci.ResponsePrepareProposal{
+ Txs: req.Txs
+}, nil
+}
+}
+
+// NoOpProcessProposal defines a no-op ProcessProposal Handler. It will always
+// return ACCEPT.
+func NoOpProcessProposal()
+
+sdk.ProcessProposalHandler {
+ return func(_ sdk.Context, _ *abci.RequestProcessProposal) (*abci.ResponseProcessProposal, error) {
+ return &abci.ResponseProcessProposal{
+ Status: abci.ResponseProcessProposal_ACCEPT
+}, nil
+}
+}
+
+// NoOpExtendVote defines a no-op ExtendVote handler. It will always return an
+// empty byte slice as the vote extension.
+func NoOpExtendVote()
+
+sdk.ExtendVoteHandler {
+ return func(_ sdk.Context, _ *abci.RequestExtendVote) (*abci.ResponseExtendVote, error) {
+ return &abci.ResponseExtendVote{
+ VoteExtension: []byte{
+}}, nil
+}
+}
+
+// NoOpVerifyVoteExtensionHandler defines a no-op VerifyVoteExtension handler. It
+// will always return an ACCEPT status with no error.
+func NoOpVerifyVoteExtensionHandler()
+
+sdk.VerifyVoteExtensionHandler {
+ return func(_ sdk.Context, _ *abci.RequestVerifyVoteExtension) (*abci.ResponseVerifyVoteExtension, error) {
+ return &abci.ResponseVerifyVoteExtension{
+ Status: abci.ResponseVerifyVoteExtension_ACCEPT
+}, nil
+}
+}
+
+// TxSelector defines a helper type that assists in selecting transactions during
+// mempool transaction selection in PrepareProposal. It keeps track of the total
+// number of bytes and total gas of the selected transactions. It also keeps
+// track of the selected transactions themselves.
+type TxSelector interface {
+ // SelectedTxs should return a copy of the selected transactions.
+ SelectedTxs(ctx context.Context) [][]byte
+
+ // Clear should clear the TxSelector, nulling out all relevant fields.
+ Clear()
+
+ // SelectTxForProposal should attempt to select a transaction for inclusion in
+ // a proposal based on inclusion criteria defined by the TxSelector. It must
+ // return if the caller should halt the transaction selection loop
+ // (typically over a mempool)
+
+or otherwise.
+ SelectTxForProposal(ctx context.Context, maxTxBytes, maxBlockGas uint64, memTx sdk.Tx, txBz []byte)
+
+bool
+}
+
+type defaultTxSelector struct {
+ totalTxBytes uint64
+ totalTxGas uint64
+ selectedTxs [][]byte
+}
+
+func NewDefaultTxSelector()
+
+TxSelector {
+ return &defaultTxSelector{
+}
+}
+
+func (ts *defaultTxSelector)
+
+SelectedTxs(_ context.Context) [][]byte {
+ txs := make([][]byte, len(ts.selectedTxs))
+
+copy(txs, ts.selectedTxs)
+
+return txs
+}
+
+func (ts *defaultTxSelector)
+
+Clear() {
+ ts.totalTxBytes = 0
+ ts.totalTxGas = 0
+ ts.selectedTxs = nil
+}
+
+func (ts *defaultTxSelector)
+
+SelectTxForProposal(_ context.Context, maxTxBytes, maxBlockGas uint64, memTx sdk.Tx, txBz []byte)
+
+bool {
+ txSize := uint64(cmttypes.ComputeProtoSizeForTxs([]cmttypes.Tx{
+ txBz
+}))
+
+var txGasLimit uint64
+ if memTx != nil {
+ if gasTx, ok := memTx.(GasTx); ok {
+ txGasLimit = gasTx.GetGas()
+}
+
+}
+
+ // only add the transaction to the proposal if we have enough capacity
+ if (txSize + ts.totalTxBytes) <= maxTxBytes {
+ // If there is a max block gas limit, add the tx only if the limit has
+ // not been met.
+ if maxBlockGas > 0 {
+ if (txGasLimit + ts.totalTxGas) <= maxBlockGas {
+ ts.totalTxGas += txGasLimit
+ ts.totalTxBytes += txSize
+ ts.selectedTxs = append(ts.selectedTxs, txBz)
+}
+
+}
+
+else {
+ ts.totalTxBytes += txSize
+ ts.selectedTxs = append(ts.selectedTxs, txBz)
+}
+
+}
+
+ // check if we've reached capacity; if so, we cannot select any more transactions
+ return ts.totalTxBytes >= maxTxBytes || (maxBlockGas > 0 && (ts.totalTxGas >= maxBlockGas))
+}
+```
+
+Like `PrepareProposal`, this implementation is the default and can be modified by
+the application developer in [`app_di.go`](/sdk/v0.53/build/building-apps/app-go-di). If you decide to implement
+your own `ProcessProposal` handler, you must ensure that the transactions
+provided in the proposal DO NOT exceed the maximum block gas and `maxtxbytes` (if set).
+
+```go
+processOpt := func(app *baseapp.BaseApp) {
+ abciPropHandler := baseapp.NewDefaultProposalHandler(mempool, app)
+
+app.SetProcessProposal(abciPropHandler.ProcessProposalHandler())
+}
+
+baseAppOptions = append(baseAppOptions, processOpt)
+```
diff --git a/sdk/next/build/abci/vote-extensions.mdx b/sdk/next/build/abci/vote-extensions.mdx
new file mode 100644
index 000000000..3725ab5f3
--- /dev/null
+++ b/sdk/next/build/abci/vote-extensions.mdx
@@ -0,0 +1,129 @@
+---
+title: Vote Extensions
+---
+
+
+**Synopsis**
+This section describes how the application can define and use vote extensions
+defined in ABCI++.
+
+
+## Extend Vote
+
+ABCI 2.0 (colloquially called ABCI++) allows an application to extend a pre-commit vote with arbitrary data. This process does NOT have to be deterministic, and the data returned can be unique to the
+validator process. The Cosmos SDK defines [`baseapp.ExtendVoteHandler`](https://github.com/cosmos/cosmos-sdk/blob/v0.53.0/types/abci.go#L32):
+
+```go
+type ExtendVoteHandler func(Context, *abci.RequestExtendVote) (*abci.ResponseExtendVote, error)
+```
+
+An application can set this handler in `app.go` via the `baseapp.SetExtendVoteHandler`
+`BaseApp` option function. The `sdk.ExtendVoteHandler`, if defined, is called during
+the `ExtendVote` ABCI method. Note that if an application decides to implement
+`baseapp.ExtendVoteHandler`, it MUST return a non-nil `VoteExtension`. However, the vote
+extension can be empty. See [here](/cometbft/v0.38/spec/abci/Methods#extendvote)
+for more details.
+
+There are many decentralized censorship-resistant use cases for vote extensions.
+For example, a validator may want to submit prices for a price oracle or encryption
+shares for an encrypted transaction mempool. Note, an application should be careful
+to consider the size of the vote extensions as they could increase latency in block
+production. See [here](/cometbft/v0.38/docs/qa/CometBFT-QA-38#vote-extensions-testbed)
+for more details.
+
+Click [here](/sdk/v0.53/build/abci/vote-extensions) if you would like a walkthrough of how to implement vote extensions.
+
+## Verify Vote Extension
+
+Similar to extending a vote, an application can also verify vote extensions from
+other validators when validating their pre-commits. For a given vote extension,
+this process MUST be deterministic. The Cosmos SDK defines [`sdk.VerifyVoteExtensionHandler`](https://github.com/cosmos/cosmos-sdk/blob/v0.50.1/types/abci.go#L29-L31):
+
+```go
+type VerifyVoteExtensionHandler func(Context, *abci.RequestVerifyVoteExtension) (*abci.ResponseVerifyVoteExtension, error)
+```
+
+An application can set this handler in `app.go` via the `baseapp.SetVerifyVoteExtensionHandler`
+`BaseApp` option function. The `sdk.VerifyVoteExtensionHandler`, if defined, is called
+during the `VerifyVoteExtension` ABCI method. If an application defines a vote
+extension handler, it should also define a verification handler. Note, not all
+validators will share the same view of what vote extensions they verify depending
+on how votes are propagated. See [here](/cometbft/v0.38/spec/abci/Methods#verifyvoteextension)
+for more details.
+
+Additionally, please keep in mind that performance can be degraded if vote extensions are too big ([Link](/cometbft/v0.38/docs/qa/CometBFT-QA-38#vote-extensions-testbed)), so we highly recommend a size validation in `VerifyVoteExtensions`.
+
+## Vote Extension Propagation
+
+The agreed upon vote extensions at height `H` are provided to the proposing validator
+at height `H+1` during `PrepareProposal`. As a result, the vote extensions are
+not natively provided or exposed to the remaining validators during `ProcessProposal`.
+As a result, if an application requires that the agreed upon vote extensions from
+height `H` are available to all validators at `H+1`, the application must propagate
+these vote extensions manually in the block proposal itself. This can be done by
+"injecting" them into the block proposal, since the `Txs` field in `PrepareProposal`
+is just a slice of byte slices.
+
+`FinalizeBlock` will ignore any byte slice that doesn't implement an `sdk.Tx`, so
+any injected vote extensions will safely be ignored in `FinalizeBlock`. For more
+details on propagation, see the [ABCI++ 2.0 ADR](https://github.com/cosmos/cosmos-sdk/blob/main/docs/architecture/adr-064-abci-2.0.md#vote-extension-propagation--verification).
+
+### Recovery of injected Vote Extensions
+
+As stated before, vote extensions can be injected into a block proposal (along with
+other transactions in the `Txs` field). The Cosmos SDK provides a pre-FinalizeBlock
+hook to allow applications to recover vote extensions, perform any necessary
+computation on them, and then store the results in the cached store. These results
+will be available to the application during the subsequent `FinalizeBlock` call.
+
+An example of what a pre-FinalizeBlock hook could look like is shown below:
+
+```go expandable
+app.SetPreBlocker(func(ctx sdk.Context, req *abci.RequestFinalizeBlock)
+
+error {
+ allVEs := []VE{
+} // store all parsed vote extensions here
+ for _, tx := range req.Txs {
+ // define a custom function that tries to parse the tx as a vote extension
+ ve, ok := parseVoteExtension(tx)
+ if !ok {
+ continue
+}
+
+allVEs = append(allVEs, ve)
+}
+
+ // perform any necessary computation on the vote extensions and store the result
+ // in the cached store
+ result := compute(allVEs)
+ err := storeVEResult(ctx, result)
+ if err != nil {
+ return err
+}
+
+return nil
+})
+```
+
+Then, in an app's module, the application can retrieve the result of the computation
+of vote extensions from the cached store:
+
+```go expandable
+func (k Keeper)
+
+BeginBlocker(ctx context.Context)
+
+error {
+ // retrieve the result of the computation of vote extensions from the cached store
+ result, err := k.GetVEResult(ctx)
+ if err != nil {
+ return err
+}
+
+ // use the result of the computation of vote extensions
+ k.setSomething(result)
+
+return nil
+}
+```
diff --git a/sdk/next/build/architecture.mdx b/sdk/next/build/architecture.mdx
new file mode 100644
index 000000000..4d0a30633
--- /dev/null
+++ b/sdk/next/build/architecture.mdx
@@ -0,0 +1,89 @@
+---
+title: "Architecture Decision Records (ADR)"
+description: "Version: v0.53"
+---
+
+This is a location to record all high-level architecture decisions in the Cosmos-SDK.
+
+An Architectural Decision (**AD**) is a software design choice that addresses a functional or non-functional requirement that is architecturally significant. An Architecturally Significant Requirement (**ASR**) is a requirement that has a measurable effect on a software system’s architecture and quality. An Architectural Decision Record (**ADR**) captures a single AD, such as often done when writing personal notes or meeting minutes; the collection of ADRs created and maintained in a project constitute its decision log. All these are within the topic of Architectural Knowledge Management (AKM).
+
+You can read more about the ADR concept in this [blog post](https://product.reverb.com/documenting-architecture-decisions-the-reverb-way-a3563bb24bd0#.78xhdix6t).
+
+## Rationale[](#rationale "Direct link to Rationale")
+
+ADRs are intended to be the primary mechanism for proposing new feature designs and new processes, for collecting community input on an issue, and for documenting the design decisions. An ADR should provide:
+
+* Context on the relevant goals and the current state
+* Proposed changes to achieve the goals
+* Summary of pros and cons
+* References
+* Changelog
+
+Note the distinction between an ADR and a spec. The ADR provides the context, intuition, reasoning, and justification for a change in architecture, or for the architecture of something new. The spec is a much more compressed and streamlined summary of everything as it stands today.
+
+If recorded decisions turned out to be lacking, convene a discussion, record the new decisions here, and then modify the code to match.
+
+## Creating new ADR[](#creating-new-adr "Direct link to Creating new ADR")
+
+Read about the [PROCESS](/sdk/v0.53/build/architecture/PROCESS).
+
+### Use RFC 2119 Keywords[](#use-rfc-2119-keywords "Direct link to Use RFC 2119 Keywords")
+
+When writing ADRs, follow the same best practices for writing RFCs. When writing RFCs, key words are used to signify the requirements in the specification. These words are often capitalized: "MUST," "MUST NOT," "REQUIRED," "SHALL," "SHALL NOT," "SHOULD," "SHOULD NOT," "RECOMMENDED," "MAY," and "OPTIONAL." They are to be interpreted as described in [RFC 2119](https://datatracker.ietf.org/doc/html/rfc2119).
+
+## ADR Table of Contents[](#adr-table-of-contents "Direct link to ADR Table of Contents")
+
+### Accepted[](#accepted "Direct link to Accepted")
+
+* [ADR 002: SDK Documentation Structure](/sdk/v0.53/build/architecture/adr-002-docs-structure)
+* [ADR 004: Split Denomination Keys](/sdk/v0.53/build/architecture/adr-004-split-denomination-keys)
+* [ADR 006: Secret Store Replacement](/sdk/v0.53/build/architecture/adr-006-secret-store-replacement)
+* [ADR 009: Evidence Module](/sdk/v0.53/build/architecture/adr-009-evidence-module)
+* [ADR 010: Modular AnteHandler](/sdk/v0.53/build/architecture/adr-010-modular-antehandler)
+* [ADR 019: Protocol Buffer State Encoding](/sdk/v0.53/build/architecture/adr-019-protobuf-state-encoding)
+* [ADR 020: Protocol Buffer Transaction Encoding](/sdk/v0.53/build/architecture/adr-020-protobuf-transaction-encoding)
+* [ADR 021: Protocol Buffer Query Encoding](/sdk/v0.53/build/architecture/adr-021-protobuf-query-encoding)
+* [ADR 023: Protocol Buffer Naming and Versioning](/sdk/v0.53/build/architecture/adr-023-protobuf-naming)
+* [ADR 029: Fee Grant Module](/sdk/v0.53/build/architecture/adr-029-fee-grant-module)
+* [ADR 030: Message Authorization Module](/sdk/v0.53/build/architecture/adr-030-authz-module)
+* [ADR 031: Protobuf Msg Services](/sdk/v0.53/build/architecture/adr-031-msg-service)
+* [ADR 055: ORM](/sdk/v0.53/build/architecture/adr-055-orm)
+* [ADR 058: Auto-Generated CLI](/sdk/v0.53/build/architecture/adr-058-auto-generated-cli)
+* [ADR 060: ABCI 1.0 (Phase I)](/sdk/v0.53/build/architecture/adr-060-abci-1.0)
+* [ADR 061: Liquid Staking](/sdk/v0.53/build/architecture/adr-061-liquid-staking)
+
+### Proposed[](#proposed "Direct link to Proposed")
+
+* [ADR 003: Dynamic Capability Store](/sdk/v0.53/build/architecture/adr-003-dynamic-capability-store)
+* [ADR 011: Generalize Genesis Accounts](/sdk/v0.53/build/architecture/adr-011-generalize-genesis-accounts)
+* [ADR 012: State Accessors](/sdk/v0.53/build/architecture/adr-012-state-accessors)
+* [ADR 013: Metrics](/sdk/v0.53/build/architecture/adr-013-metrics)
+* [ADR 016: Validator Consensus Key Rotation](/sdk/v0.53/build/architecture/adr-016-validator-consensus-key-rotation)
+* [ADR 017: Historical Header Module](/sdk/v0.53/build/architecture/adr-017-historical-header-module)
+* [ADR 018: Extendable Voting Periods](/sdk/v0.53/build/architecture/adr-018-extendable-voting-period)
+* [ADR 022: Custom baseapp panic handling](/sdk/v0.53/build/architecture/adr-022-custom-panic-handling)
+* [ADR 024: Coin Metadata](/sdk/v0.53/build/architecture/adr-024-coin-metadata)
+* [ADR 027: Deterministic Protobuf Serialization](/sdk/v0.53/build/architecture/adr-027-deterministic-protobuf-serialization)
+* [ADR 028: Public Key Addresses](/sdk/v0.53/build/architecture/adr-028-public-key-addresses)
+* [ADR 032: Typed Events](/sdk/v0.53/build/architecture/adr-032-typed-events)
+* [ADR 033: Inter-module RPC](/sdk/v0.53/build/architecture/adr-033-protobuf-inter-module-comm)
+* [ADR 035: Rosetta API Support](/sdk/v0.53/build/architecture/adr-035-rosetta-api-support)
+* [ADR 037: Governance Split Votes](/sdk/v0.53/build/architecture/adr-037-gov-split-vote)
+* [ADR 038: State Listening](/sdk/v0.53/build/architecture/adr-038-state-listening)
+* [ADR 039: Epoched Staking](/sdk/v0.53/build/architecture/adr-039-epoched-staking)
+* [ADR 040: Storage and SMT State Commitments](/sdk/v0.53/build/architecture/adr-040-storage-and-smt-state-commitments)
+* [ADR 046: Module Params](/sdk/v0.53/build/architecture/adr-046-module-params)
+* [ADR 054: Semver Compatible SDK Modules](/sdk/v0.53/build/architecture/adr-054-semver-compatible-modules)
+* [ADR 057: App Wiring](/sdk/v0.53/build/architecture/adr-057-app-wiring)
+* [ADR 059: Test Scopes](/sdk/v0.53/build/architecture/adr-059-test-scopes)
+* [ADR 062: Collections State Layer](/sdk/v0.53/build/architecture/adr-062-collections-state-layer)
+* [ADR 063: Core Module API](/sdk/v0.53/build/architecture/adr-063-core-module-api)
+* [ADR 065: Store V2](/sdk/v0.53/build/architecture/adr-065-store-v2)
+* [ADR 076: Transaction Malleability Risk Review and Recommendations](/sdk/v0.53/build/architecture/adr-076-tx-malleability)
+
+### Draft[](#draft "Direct link to Draft")
+
+* [ADR 044: Guidelines for Updating Protobuf Definitions](/sdk/v0.53/build/architecture/adr-044-protobuf-updates-guidelines)
+* [ADR 047: Extend Upgrade Plan](/sdk/v0.53/build/architecture/adr-047-extend-upgrade-plan)
+* [ADR 053: Go Module Refactoring](/sdk/v0.53/build/architecture/adr-053-go-module-refactoring)
+* [ADR 068: Preblock](/sdk/v0.53/build/architecture/adr-068-preblock)
diff --git a/sdk/next/build/architecture/PROCESS.mdx b/sdk/next/build/architecture/PROCESS.mdx
new file mode 100644
index 000000000..129e3f7f9
--- /dev/null
+++ b/sdk/next/build/architecture/PROCESS.mdx
@@ -0,0 +1,60 @@
+---
+title: ADR Creation Process
+---
+
+1. Copy the `adr-template.md` file. Use the following filename pattern: `adr-next_number-title.md`
+2. Create a draft Pull Request if you want to get early feedback.
+3. Make sure the context and solution are clear and well documented.
+4. Add an entry to the list in the [README](/sdk/v0.53/build/architecture/README) file.
+5. Create a Pull Request to propose a new ADR.
+
+## What is an ADR?
+
+An ADR is a document to document an implementation and design that may or may not have been discussed in an RFC. While an RFC is meant to replace synchronous communication in a distributed environment, an ADR is meant to document an already made decision. An ADR won't come with much of a communication overhead because the discussion was recorded in an RFC or a synchronous discussion. If the consensus came from a synchronous discussion, then a short excerpt should be added to the ADR to explain the goals.
+
+## ADR life cycle
+
+ADR creation is an **iterative** process. Instead of having a high amount of communication overhead, an ADR is used when there is already a decision made and implementation details need to be added. The ADR should document what the collective consensus for the specific issue is and how to solve it.
+
+1. Every ADR should start with either an RFC or a discussion where consensus has been met.
+
+2. Once consensus is met, a GitHub Pull Request (PR) is created with a new document based on the `adr-template.md`.
+
+3. If a *proposed* ADR is merged, then it should clearly document outstanding issues either in ADR document notes or in a GitHub Issue.
+
+4. The PR SHOULD always be merged. In the case of a faulty ADR, we still prefer to merge it with a *rejected* status. The only time the ADR SHOULD NOT be merged is if the author abandons it.
+
+5. Merged ADRs SHOULD NOT be pruned.
+
+### ADR status
+
+Status has two components:
+
+```text
+{CONSENSUS STATUS} {IMPLEMENTATION STATUS}
+```
+
+IMPLEMENTATION STATUS is either `Implemented` or `Not Implemented`.
+
+#### Consensus Status
+
+```text
+DRAFT -> PROPOSED -> LAST CALL yyyy-mm-dd -> ACCEPTED | REJECTED -> SUPERSEDED by ADR-xxx
+ \ |
+ \ |
+ v v
+ ABANDONED
+```
+
+* `DRAFT`: \[optional] an ADR which is a work in progress, not being ready for a general review. This is to present an early work and get early feedback in a Draft Pull Request form.
+* `PROPOSED`: an ADR covering a full solution architecture and still in the review - project stakeholders haven't reached an agreement yet.
+* `LAST CALL `: \[optional] Notifies that we are close to accepting updates. Changing a status to `LAST CALL` means that social consensus (of Cosmos SDK maintainers) has been reached, and we still want to give it a time to let the community react or analyze.
+* `ACCEPTED`: ADR which will represent a currently implemented or to be implemented architecture design.
+* `REJECTED`: ADR can go from PROPOSED or ACCEPTED to rejected if the consensus among project stakeholders will decide so.
+* `SUPERSEDED by ADR-xxx`: ADR which has been superseded by a new ADR.
+* `ABANDONED`: the ADR is no longer pursued by the original authors.
+
+## Language used in ADR
+
+* The context/background should be written in the present tense.
+* Avoid using a first, personal form.
diff --git a/sdk/next/build/architecture/README.mdx b/sdk/next/build/architecture/README.mdx
new file mode 100644
index 000000000..d442fb26d
--- /dev/null
+++ b/sdk/next/build/architecture/README.mdx
@@ -0,0 +1,97 @@
+---
+title: Architecture Decision Records (ADR)
+description: >-
+ This is a location to record all high-level architecture decisions in the
+ Cosmos-SDK.
+---
+
+This is a location to record all high-level architecture decisions in the Cosmos-SDK.
+
+An Architectural Decision (**AD**) is a software design choice that addresses a functional or non-functional requirement that is architecturally significant.
+An Architecturally Significant Requirement (**ASR**) is a requirement that has a measurable effect on a software system’s architecture and quality.
+An Architectural Decision Record (**ADR**) captures a single AD, such as often done when writing personal notes or meeting minutes; the collection of ADRs created and maintained in a project constitute its decision log. All these are within the topic of Architectural Knowledge Management (AKM).
+
+You can read more about the ADR concept in this [blog post](https://product.reverb.com/documenting-architecture-decisions-the-reverb-way-a3563bb24bd0#.78xhdix6t).
+
+## Rationale
+
+ADRs are intended to be the primary mechanism for proposing new feature designs and new processes, for collecting community input on an issue, and for documenting the design decisions.
+An ADR should provide:
+
+* Context on the relevant goals and the current state
+* Proposed changes to achieve the goals
+* Summary of pros and cons
+* References
+* Changelog
+
+Note the distinction between an ADR and a spec. The ADR provides the context, intuition, reasoning, and
+justification for a change in architecture, or for the architecture of something
+new. The spec is a much more compressed and streamlined summary of everything as
+it stands today.
+
+If recorded decisions turned out to be lacking, convene a discussion, record the new decisions here, and then modify the code to match.
+
+## Creating new ADR
+
+Read about the [PROCESS](/sdk/v0.53/build/architecture/PROCESS).
+
+### Use RFC 2119 Keywords
+
+When writing ADRs, follow the same best practices for writing RFCs. When writing RFCs, key words are used to signify the requirements in the specification. These words are often capitalized: "MUST," "MUST NOT," "REQUIRED," "SHALL," "SHALL NOT," "SHOULD," "SHOULD NOT," "RECOMMENDED," "MAY," and "OPTIONAL." They are to be interpreted as described in [RFC 2119](https://datatracker.ietf.org/doc/html/rfc2119).
+
+## ADR Table of Contents
+
+### Accepted
+
+* [ADR 002: SDK Documentation Structure](/sdk/v0.53/build/architecture/adr-002-docs-structure)
+* [ADR 004: Split Denomination Keys](/sdk/v0.53/build/architecture/adr-004-split-denomination-keys)
+* [ADR 006: Secret Store Replacement](/sdk/v0.53/build/architecture/adr-006-secret-store-replacement)
+* [ADR 009: Evidence Module](/sdk/v0.53/build/architecture/adr-009-evidence-module)
+* [ADR 010: Modular AnteHandler](/sdk/v0.53/build/architecture/adr-010-modular-antehandler)
+* [ADR 019: Protocol Buffer State Encoding](/sdk/v0.53/build/architecture/adr-019-protobuf-state-encoding)
+* [ADR 020: Protocol Buffer Transaction Encoding](/sdk/v0.53/build/architecture/adr-020-protobuf-transaction-encoding)
+* [ADR 021: Protocol Buffer Query Encoding](/sdk/v0.53/build/architecture/adr-021-protobuf-query-encoding)
+* [ADR 023: Protocol Buffer Naming and Versioning](/sdk/v0.53/build/architecture/adr-023-protobuf-naming)
+* [ADR 029: Fee Grant Module](/sdk/v0.53/build/architecture/adr-029-fee-grant-module)
+* [ADR 030: Message Authorization Module](/sdk/v0.53/build/architecture/adr-030-authz-module)
+* [ADR 031: Protobuf Msg Services](/sdk/v0.53/build/architecture/adr-031-msg-service)
+* [ADR 055: ORM](/sdk/v0.53/build/architecture/adr-055-orm)
+* [ADR 058: Auto-Generated CLI](/sdk/v0.53/build/architecture/adr-058-auto-generated-cli)
+* [ADR 060: ABCI 1.0 (Phase I)](/sdk/v0.53/build/architecture/adr-060-abci-1.0)
+* [ADR 061: Liquid Staking](/sdk/v0.53/build/architecture/adr-061-liquid-staking)
+
+### Proposed
+
+* [ADR 003: Dynamic Capability Store](/sdk/v0.53/build/architecture/adr-003-dynamic-capability-store)
+* [ADR 011: Generalize Genesis Accounts](/sdk/v0.53/build/architecture/adr-011-generalize-genesis-accounts)
+* [ADR 012: State Accessors](/sdk/v0.53/build/architecture/adr-012-state-accessors)
+* [ADR 013: Metrics](/sdk/v0.53/build/architecture/adr-013-metrics)
+* [ADR 016: Validator Consensus Key Rotation](/sdk/v0.53/build/architecture/adr-016-validator-consensus-key-rotation)
+* [ADR 017: Historical Header Module](/sdk/v0.53/build/architecture/adr-017-historical-header-module)
+* [ADR 018: Extendable Voting Periods](/sdk/v0.53/build/architecture/adr-018-extendable-voting-period)
+* [ADR 022: Custom baseapp panic handling](/sdk/v0.53/build/architecture/adr-022-custom-panic-handling)
+* [ADR 024: Coin Metadata](/sdk/v0.53/build/architecture/adr-024-coin-metadata)
+* [ADR 027: Deterministic Protobuf Serialization](/sdk/v0.53/build/architecture/adr-027-deterministic-protobuf-serialization)
+* [ADR 028: Public Key Addresses](/sdk/v0.53/build/architecture/adr-028-public-key-addresses)
+* [ADR 032: Typed Events](/sdk/v0.53/build/architecture/adr-032-typed-events)
+* [ADR 033: Inter-module RPC](/sdk/v0.53/build/architecture/adr-033-protobuf-inter-module-comm)
+* [ADR 035: Rosetta API Support](/sdk/v0.53/build/architecture/adr-035-rosetta-api-support)
+* [ADR 037: Governance Split Votes](/sdk/v0.53/build/architecture/adr-037-gov-split-vote)
+* [ADR 038: State Listening](/sdk/v0.53/build/architecture/adr-038-state-listening)
+* [ADR 039: Epoched Staking](/sdk/v0.53/build/architecture/adr-039-epoched-staking)
+* [ADR 040: Storage and SMT State Commitments](/sdk/v0.53/build/architecture/adr-040-storage-and-smt-state-commitments)
+* [ADR 046: Module Params](/sdk/v0.53/build/architecture/adr-046-module-params)
+* [ADR 054: Semver Compatible SDK Modules](/sdk/v0.53/build/architecture/adr-054-semver-compatible-modules)
+* [ADR 057: App Wiring](/sdk/v0.53/build/architecture/adr-057-app-wiring)
+* [ADR 059: Test Scopes](/sdk/v0.53/build/architecture/adr-059-test-scopes)
+* [ADR 062: Collections State Layer](/sdk/v0.53/build/architecture/adr-062-collections-state-layer)
+* [ADR 063: Core Module API](/sdk/v0.53/build/architecture/adr-063-core-module-api)
+* [ADR 065: Store V2](/sdk/v0.53/build/architecture/adr-065-store-v2)
+* [ADR 076: Transaction Malleability Risk Review and Recommendations](/sdk/v0.53/build/architecture/adr-076-tx-malleability)
+
+### Draft
+
+* [ADR 044: Guidelines for Updating Protobuf Definitions](/sdk/v0.53/build/architecture/adr-044-protobuf-updates-guidelines)
+* [ADR 047: Extend Upgrade Plan](/sdk/v0.53/build/architecture/adr-047-extend-upgrade-plan)
+* [ADR 053: Go Module Refactoring](/sdk/v0.53/build/architecture/adr-053-go-module-refactoring)
+* [ADR 068: Preblock](/sdk/v0.53/build/architecture/adr-068-preblock)
diff --git a/sdk/next/build/architecture/adr-002-docs-structure.mdx b/sdk/next/build/architecture/adr-002-docs-structure.mdx
new file mode 100644
index 000000000..d4944c69f
--- /dev/null
+++ b/sdk/next/build/architecture/adr-002-docs-structure.mdx
@@ -0,0 +1,92 @@
+---
+title: 'ADR 002: SDK Documentation Structure'
+description: >-
+ There is a need for a scalable structure of the Cosmos SDK documentation.
+ Current documentation includes a lot of non-related Cosmos SDK material, is
+ difficult to maintain and hard to follow as a user.
+---
+
+## Context
+
+There is a need for a scalable structure of the Cosmos SDK documentation. Current documentation includes a lot of non-related Cosmos SDK material, is difficult to maintain and hard to follow as a user.
+
+Ideally, we would have:
+
+* All docs related to dev frameworks or tools live in their respective github repos (sdk repo would contain sdk docs, hub repo would contain hub docs, lotion repo would contain lotion docs, etc.)
+* All other docs (faqs, whitepaper, high-level material about Cosmos) would live on the website.
+
+## Decision
+
+Re-structure the `/docs` folder of the Cosmos SDK github repo as follows:
+
+```text expandable
+docs/
+├── README
+├── intro/
+├── concepts/
+│ ├── baseapp
+│ ├── types
+│ ├── store
+│ ├── server
+│ ├── modules/
+│ │ ├── keeper
+│ │ ├── handler
+│ │ ├── cli
+│ ├── gas
+│ └── commands
+├── clients/
+│ ├── lite/
+│ ├── service-providers
+├── modules/
+├── spec/
+├── translations/
+└── architecture/
+```
+
+The files in each sub-folder do not matter and will likely change. What matters is the sectioning:
+
+* `README`: Landing page of the docs.
+* `intro`: Introductory material. Goal is to have a short explainer of the Cosmos SDK and then channel people to the resource they need. The [Cosmos SDK tutorial](https://github.com/cosmos/sdk-application-tutorial/) will be highlighted, as well as the `godocs`.
+* `concepts`: Contains high-level explanations of the abstractions of the Cosmos SDK. It does not contain specific code implementation and does not need to be updated often. **It is not an API specification of the interfaces**. API spec is the `godoc`.
+* `clients`: Contains specs and info about the various Cosmos SDK clients.
+* `spec`: Contains specs of modules, and others.
+* `modules`: Contains links to `godocs` and the spec of the modules.
+* `architecture`: Contains architecture-related docs like the present one.
+* `translations`: Contains different translations of the documentation.
+
+Website docs sidebar will only include the following sections:
+
+* `README`
+* `intro`
+* `concepts`
+* `clients`
+
+`architecture` need not be displayed on the website.
+
+## Status
+
+Accepted
+
+## Consequences
+
+### Positive
+
+* Much clearer organization of the Cosmos SDK docs.
+* The `/docs` folder now only contains Cosmos SDK and gaia related material. Later, it will only contain Cosmos SDK related material.
+* Developers only have to update `/docs` folder when they open a PR (and not `/examples` for example).
+* Easier for developers to find what they need to update in the docs thanks to reworked architecture.
+* Cleaner vuepress build for website docs.
+* Will help build an executable doc (cf [Link](https://github.com/cosmos/cosmos-sdk/issues/2611))
+
+### Neutral
+
+* We need to move a bunch of deprecated stuff to `/_attic` folder.
+* We need to integrate content in `sdk/docs/core` in `concepts`.
+* We need to move all the content that currently lives in `docs` and does not fit in new structure (like `lotion`, intro material, whitepaper) to the website repository.
+* Update `DOCS_README.md`
+
+## References
+
+* [Link](https://github.com/cosmos/cosmos-sdk/issues/1460)
+* [Link](https://github.com/cosmos/cosmos-sdk/pull/2695)
+* [Link](https://github.com/cosmos/cosmos-sdk/issues/2611)
diff --git a/sdk/next/build/architecture/adr-003-dynamic-capability-store.mdx b/sdk/next/build/architecture/adr-003-dynamic-capability-store.mdx
new file mode 100644
index 000000000..a98442e8f
--- /dev/null
+++ b/sdk/next/build/architecture/adr-003-dynamic-capability-store.mdx
@@ -0,0 +1,392 @@
+---
+title: 'ADR 003: Dynamic Capability Store'
+description: '12 December 2019: Initial version 02 April 2020: Memory Store Revisions'
+---
+
+## Changelog
+
+* 12 December 2019: Initial version
+* 02 April 2020: Memory Store Revisions
+
+## Context
+
+Full implementation of the [IBC specification](https://github.com/cosmos/ibc) requires the ability to create and authenticate object-capability keys at runtime (i.e., during transaction execution),
+as described in [ICS 5](https://github.com/cosmos/ibc/tree/master/spec/core/ics-005-port-allocation#technical-specification). In the IBC specification, capability keys are created for each newly initialized
+port & channel, and are used to authenticate future usage of the port or channel. Since channels and potentially ports can be initialized during transaction execution, the state machine must be able to create
+object-capability keys at this time.
+
+At present, the Cosmos SDK does not have the ability to do this. Object-capability keys are currently pointers (memory addresses) of `StoreKey` structs created at application initialisation in `app.go` ([example](https://github.com/cosmos/gaia/blob/dcbddd9f04b3086c0ad07ee65de16e7adedc7da4/app/app.go#L132))
+and passed to Keepers as fixed arguments ([example](https://github.com/cosmos/gaia/blob/dcbddd9f04b3086c0ad07ee65de16e7adedc7da4/app/app.go#L160)). Keepers cannot create or store capability keys during transaction execution — although they could call `NewKVStoreKey` and take the memory address
+of the returned struct, storing this in the Merklised store would result in a consensus fault, since the memory address will be different on each machine (this is intentional — were this not the case, the keys would be predictable and couldn't serve as object capabilities).
+
+Keepers need a way to keep a private map of store keys which can be altered during transaction execution, along with a suitable mechanism for regenerating the unique memory addresses (capability keys) in this map whenever the application is started or restarted, along with a mechanism to revert capability creation on tx failure.
+This ADR proposes such an interface & mechanism.
+
+## Decision
+
+The Cosmos SDK will include a new `CapabilityKeeper` abstraction, which is responsible for provisioning,
+tracking, and authenticating capabilities at runtime. During application initialisation in `app.go`,
+the `CapabilityKeeper` will be hooked up to modules through unique function references
+(by calling `ScopeToModule`, defined below) so that it can identify the calling module when later
+invoked.
+
+When the initial state is loaded from disk, the `CapabilityKeeper`'s `Initialize` function will create
+new capability keys for all previously allocated capability identifiers (allocated during execution of
+past transactions and assigned to particular modes), and keep them in a memory-only store while the
+chain is running.
+
+The `CapabilityKeeper` will include a persistent `KVStore`, a `MemoryStore`, and an in-memory map.
+The persistent `KVStore` tracks which capability is owned by which modules.
+The `MemoryStore` stores a forward mapping that map from module name, capability tuples to capability names and
+a reverse mapping that map from module name, capability name to the capability index.
+Since we cannot marshal the capability into a `KVStore` and unmarshal without changing the memory location of the capability,
+the reverse mapping in the KVStore will simply map to an index. This index can then be used as a key in the ephemeral
+go-map to retrieve the capability at the original memory location.
+
+The `CapabilityKeeper` will define the following types & functions:
+
+The `Capability` is similar to `StoreKey`, but has a globally unique `Index()` instead of
+a name. A `String()` method is provided for debugging.
+
+A `Capability` is simply a struct, the address of which is taken for the actual capability.
+
+```go
+type Capability struct {
+ index uint64
+}
+```
+
+A `CapabilityKeeper` contains a persistent store key, memory store key, and mapping of allocated module names.
+
+```go
+type CapabilityKeeper struct {
+ persistentKey StoreKey
+ memKey StoreKey
+ capMap map[uint64]*Capability
+ moduleNames map[string]interface{
+}
+
+sealed bool
+}
+```
+
+The `CapabilityKeeper` provides the ability to create *scoped* sub-keepers which are tied to a
+particular module name. These `ScopedCapabilityKeeper`s must be created at application initialisation
+and passed to modules, which can then use them to claim capabilities they receive and retrieve
+capabilities which they own by name, in addition to creating new capabilities & authenticating capabilities
+passed by other modules.
+
+```go
+type ScopedCapabilityKeeper struct {
+ persistentKey StoreKey
+ memKey StoreKey
+ capMap map[uint64]*Capability
+ moduleName string
+}
+```
+
+`ScopeToModule` is used to create a scoped sub-keeper with a particular name, which must be unique.
+It MUST be called before `InitializeAndSeal`.
+
+```go expandable
+func (ck CapabilityKeeper)
+
+ScopeToModule(moduleName string)
+
+ScopedCapabilityKeeper {
+ if k.sealed {
+ panic("cannot scope to module via a sealed capability keeper")
+}
+ if _, ok := k.scopedModules[moduleName]; ok {
+ panic(fmt.Sprintf("cannot create multiple scoped keepers for the same module name: %s", moduleName))
+}
+
+k.scopedModules[moduleName] = struct{
+}{
+}
+
+return ScopedKeeper{
+ cdc: k.cdc,
+ storeKey: k.storeKey,
+ memKey: k.memKey,
+ capMap: k.capMap,
+ module: moduleName,
+}
+}
+```
+
+`InitializeAndSeal` MUST be called exactly once, after loading the initial state and creating all
+necessary `ScopedCapabilityKeeper`s, in order to populate the memory store with newly-created
+capability keys in accordance with the keys previously claimed by particular modules and prevent the
+creation of any new `ScopedCapabilityKeeper`s.
+
+```go expandable
+func (ck CapabilityKeeper)
+
+InitializeAndSeal(ctx Context) {
+ if ck.sealed {
+ panic("capability keeper is sealed")
+}
+ persistentStore := ctx.KVStore(ck.persistentKey)
+ map := ctx.KVStore(ck.memKey)
+
+ // initialise memory store for all names in persistent store
+ for index, value := range persistentStore.Iter() {
+ capability = &CapabilityKey{
+ index: index
+}
+ for moduleAndCapability := range value {
+ moduleName, capabilityName := moduleAndCapability.Split("/")
+
+memStore.Set(moduleName + "/fwd/" + capability, capabilityName)
+
+memStore.Set(moduleName + "/rev/" + capabilityName, index)
+
+ck.capMap[index] = capability
+}
+
+}
+
+ck.sealed = true
+}
+```
+
+`NewCapability` can be called by any module to create a new unique, unforgeable object-capability
+reference. The newly created capability is automatically persisted; the calling module need not
+call `ClaimCapability`.
+
+```go expandable
+func (sck ScopedCapabilityKeeper)
+
+NewCapability(ctx Context, name string) (Capability, error) {
+ // check name not taken in memory store
+ if capStore.Get("rev/" + name) != nil {
+ return nil, errors.New("name already taken")
+}
+
+ // fetch the current index
+ index := persistentStore.Get("index")
+
+ // create a new capability
+ capability := &CapabilityKey{
+ index: index
+}
+
+ // set persistent store
+ persistentStore.Set(index, Set.singleton(sck.moduleName + "/" + name))
+
+ // update the index
+ index++
+ persistentStore.Set("index", index)
+
+ // set forward mapping in memory store from capability to name
+ memStore.Set(sck.moduleName + "/fwd/" + capability, name)
+
+ // set reverse mapping in memory store from name to index
+ memStore.Set(sck.moduleName + "/rev/" + name, index)
+
+ // set the in-memory mapping from index to capability pointer
+ capMap[index] = capability
+
+ // return the newly created capability
+ return capability
+}
+```
+
+`AuthenticateCapability` can be called by any module to check that a capability
+does in fact correspond to a particular name (the name can be untrusted user input)
+with which the calling module previously associated it.
+
+```go
+func (sck ScopedCapabilityKeeper)
+
+AuthenticateCapability(name string, capability Capability)
+
+bool {
+ // return whether forward mapping in memory store matches name
+ return memStore.Get(sck.moduleName + "/fwd/" + capability) === name
+}
+```
+
+`ClaimCapability` allows a module to claim a capability key which it has received from another module
+so that future `GetCapability` calls will succeed.
+
+`ClaimCapability` MUST be called if a module which receives a capability wishes to access it by name
+in the future. Capabilities are multi-owner, so if multiple modules have a single `Capability` reference,
+they will all own it.
+
+```go expandable
+func (sck ScopedCapabilityKeeper)
+
+ClaimCapability(ctx Context, capability Capability, name string)
+
+error {
+ persistentStore := ctx.KVStore(sck.persistentKey)
+
+ // set forward mapping in memory store from capability to name
+ memStore.Set(sck.moduleName + "/fwd/" + capability, name)
+
+ // set reverse mapping in memory store from name to capability
+ memStore.Set(sck.moduleName + "/rev/" + name, capability)
+
+ // update owner set in persistent store
+ owners := persistentStore.Get(capability.Index())
+
+owners.add(sck.moduleName + "/" + name)
+
+persistentStore.Set(capability.Index(), owners)
+}
+```
+
+`GetCapability` allows a module to fetch a capability which it has previously claimed by name.
+The module is not allowed to retrieve capabilities which it does not own.
+
+```go
+func (sck ScopedCapabilityKeeper)
+
+GetCapability(ctx Context, name string) (Capability, error) {
+ // fetch the index of capability using reverse mapping in memstore
+ index := memStore.Get(sck.moduleName + "/rev/" + name)
+
+ // fetch capability from go-map using index
+ capability := capMap[index]
+
+ // return the capability
+ return capability
+}
+```
+
+`ReleaseCapability` allows a module to release a capability which it had previously claimed. If no
+more owners exist, the capability will be deleted globally.
+
+```go expandable
+func (sck ScopedCapabilityKeeper)
+
+ReleaseCapability(ctx Context, capability Capability)
+
+err {
+ persistentStore := ctx.KVStore(sck.persistentKey)
+ name := capStore.Get(sck.moduleName + "/fwd/" + capability)
+ if name == nil {
+ return error("capability not owned by module")
+}
+
+ // delete forward mapping in memory store
+ memoryStore.Delete(sck.moduleName + "/fwd/" + capability, name)
+
+ // delete reverse mapping in memory store
+ memoryStore.Delete(sck.moduleName + "/rev/" + name, capability)
+
+ // update owner set in persistent store
+ owners := persistentStore.Get(capability.Index())
+
+owners.remove(sck.moduleName + "/" + name)
+ if owners.size() > 0 {
+ // there are still other owners, keep the capability around
+ persistentStore.Set(capability.Index(), owners)
+}
+
+else {
+ // no more owners, delete the capability
+ persistentStore.Delete(capability.Index())
+
+delete(capMap[capability.Index()])
+}
+}
+```
+
+### Usage patterns
+
+#### Initialisation
+
+Any modules which use dynamic capabilities must be provided a `ScopedCapabilityKeeper` in `app.go`:
+
+```go
+ck := NewCapabilityKeeper(persistentKey, memoryKey)
+
+mod1Keeper := NewMod1Keeper(ck.ScopeToModule("mod1"), ....)
+
+mod2Keeper := NewMod2Keeper(ck.ScopeToModule("mod2"), ....)
+
+// other initialisation logic ...
+
+// load initial state...
+
+ck.InitializeAndSeal(initialContext)
+```
+
+#### Creating, passing, claiming and using capabilities
+
+Consider the case where `mod1` wants to create a capability, associate it with a resource (e.g. an IBC channel) by name, then pass it to `mod2` which will use it later:
+
+Module 1 would have the following code:
+
+```go
+capability := scopedCapabilityKeeper.NewCapability(ctx, "resourceABC")
+
+mod2Keeper.SomeFunction(ctx, capability, args...)
+```
+
+`SomeFunction`, running in module 2, could then claim the capability:
+
+```go
+func (k Mod2Keeper)
+
+SomeFunction(ctx Context, capability Capability) {
+ k.sck.ClaimCapability(ctx, capability, "resourceABC")
+ // other logic...
+}
+```
+
+Later on, module 2 can retrieve that capability by name and pass it to module 1, which will authenticate it against the resource:
+
+```go
+func (k Mod2Keeper)
+
+SomeOtherFunction(ctx Context, name string) {
+ capability := k.sck.GetCapability(ctx, name)
+
+mod1.UseResource(ctx, capability, "resourceABC")
+}
+```
+
+Module 1 will then check that this capability key is authenticated to use the resource before allowing module 2 to use it:
+
+```go
+func (k Mod1Keeper)
+
+UseResource(ctx Context, capability Capability, resource string) {
+ if !k.sck.AuthenticateCapability(name, capability) {
+ return errors.New("unauthenticated")
+}
+ // do something with the resource
+}
+```
+
+If module 2 passed the capability key to module 3, module 3 could then claim it and call module 1 just like module 2 did
+(in which case module 1, module 2, and module 3 would all be able to use this capability).
+
+## Status
+
+Proposed.
+
+## Consequences
+
+### Positive
+
+* Dynamic capability support.
+* Allows CapabilityKeeper to return same capability pointer from go-map while reverting any writes to the persistent `KVStore` and in-memory `MemoryStore` on tx failure.
+
+### Negative
+
+* Requires an additional keeper.
+* Some overlap with existing `StoreKey` system (in the future they could be combined, since this is a superset functionality-wise).
+* Requires an extra level of indirection in the reverse mapping, since MemoryStore must map to index which must then be used as key in a go map to retrieve the actual capability
+
+### Neutral
+
+(none known)
+
+## References
+
+* [Original discussion](https://github.com/cosmos/cosmos-sdk/pull/5230#discussion_r343978513)
diff --git a/sdk/next/build/architecture/adr-004-split-denomination-keys.mdx b/sdk/next/build/architecture/adr-004-split-denomination-keys.mdx
new file mode 100644
index 000000000..f1183dea4
--- /dev/null
+++ b/sdk/next/build/architecture/adr-004-split-denomination-keys.mdx
@@ -0,0 +1,129 @@
+---
+title: 'ADR 004: Split Denomination Keys'
+description: >-
+ 2020-01-08: Initial version 2020-01-09: Alterations to handle vesting accounts
+ 2020-01-14: Updates from review feedback 2020-01-30: Updates from
+ implementation
+---
+
+## Changelog
+
+* 2020-01-08: Initial version
+* 2020-01-09: Alterations to handle vesting accounts
+* 2020-01-14: Updates from review feedback
+* 2020-01-30: Updates from implementation
+
+### Glossary
+
+* denom / denomination key -- unique token identifier.
+
+## Context
+
+With permissionless IBC, anyone will be able to send arbitrary denominations to any other account. Currently, all non-zero balances are stored along with the account in an `sdk.Coins` struct, which creates a potential denial-of-service concern, as too many denominations will become expensive to load & store each time the account is modified. See issues [5467](https://github.com/cosmos/cosmos-sdk/issues/5467) and [4982](https://github.com/cosmos/cosmos-sdk/issues/4982) for additional context.
+
+Simply rejecting incoming deposits after a denomination count limit doesn't work, since it opens up a griefing vector: someone could send a user lots of nonsensical coins over IBC, and then prevent the user from receiving real denominations (such as staking rewards).
+
+## Decision
+
+Balances shall be stored per-account & per-denomination under a denomination- and account-unique key, thus enabling O(1) read & write access to the balance of a particular account in a particular denomination.
+
+### Account interface (x/auth)
+
+`GetCoins()` and `SetCoins()` will be removed from the account interface, since coin balances will
+now be stored in & managed by the bank module.
+
+The vesting account interface will replace `SpendableCoins` in favor of `LockedCoins` which does
+not require the account balance anymore. In addition, `TrackDelegation()` will now accept the
+account balance of all tokens denominated in the vesting balance instead of loading the entire
+account balance.
+
+Vesting accounts will continue to store original vesting, delegated free, and delegated
+vesting coins (which is safe since these cannot contain arbitrary denominations).
+
+### Bank keeper (x/bank)
+
+The following APIs will be added to the `x/bank` keeper:
+
+* `GetAllBalances(ctx Context, addr AccAddress) Coins`
+* `GetBalance(ctx Context, addr AccAddress, denom string) Coin`
+* `SetBalance(ctx Context, addr AccAddress, coin Coin)`
+* `LockedCoins(ctx Context, addr AccAddress) Coins`
+* `SpendableCoins(ctx Context, addr AccAddress) Coins`
+
+Additional APIs may be added to facilitate iteration and auxiliary functionality not essential to
+core functionality or persistence.
+
+Balances will be stored first by the address, then by the denomination (the reverse is also possible,
+but retrieval of all balances for a single account is presumed to be more frequent):
+
+```go expandable
+var BalancesPrefix = []byte("balances")
+
+func (k Keeper)
+
+SetBalance(ctx Context, addr AccAddress, balance Coin)
+
+error {
+ if !balance.IsValid() {
+ return err
+}
+ store := ctx.KVStore(k.storeKey)
+ balancesStore := prefix.NewStore(store, BalancesPrefix)
+ accountStore := prefix.NewStore(balancesStore, addr.Bytes())
+ bz := Marshal(balance)
+
+accountStore.Set([]byte(balance.Denom), bz)
+
+return nil
+}
+```
+
+This will result in the balances being indexed by the byte representation of
+`balances/{address}/{denom}`.
+
+`DelegateCoins()` and `UndelegateCoins()` will be altered to only load each individual
+account balance by denomination found in the (un)delegation amount. As a result,
+any mutations to the account balance by will made by denomination.
+
+`SubtractCoins()` and `AddCoins()` will be altered to read & write the balances
+directly instead of calling `GetCoins()` / `SetCoins()` (which no longer exist).
+
+`trackDelegation()` and `trackUndelegation()` will be altered to no longer update
+account balances.
+
+External APIs will need to scan all balances under an account to retain backwards-compatibility. It
+is advised that these APIs use `GetBalance` and `SetBalance` instead of `GetAllBalances` when
+possible as to not load the entire account balance.
+
+### Supply module
+
+The supply module, in order to implement the total supply invariant, will now need
+to scan all accounts & call `GetAllBalances` using the `x/bank` Keeper, then sum
+the balances and check that they match the expected total supply.
+
+## Status
+
+Accepted.
+
+## Consequences
+
+### Positive
+
+* O(1) reads & writes of balances (with respect to the number of denominations for
+ which an account has non-zero balances). Note, this does not relate to the actual
+ I/O cost, rather the total number of direct reads needed.
+
+### Negative
+
+* Slightly less efficient reads/writes when reading & writing all balances of a
+ single account in a transaction.
+
+### Neutral
+
+None in particular.
+
+## References
+
+* Ref: [Link](https://github.com/cosmos/cosmos-sdk/issues/4982)
+* Ref: [Link](https://github.com/cosmos/cosmos-sdk/issues/5467)
+* Ref: [Link](https://github.com/cosmos/cosmos-sdk/issues/5492)
diff --git a/sdk/next/build/architecture/adr-006-secret-store-replacement.mdx b/sdk/next/build/architecture/adr-006-secret-store-replacement.mdx
new file mode 100644
index 000000000..1070d7885
--- /dev/null
+++ b/sdk/next/build/architecture/adr-006-secret-store-replacement.mdx
@@ -0,0 +1,59 @@
+---
+title: 'ADR 006: Secret Store Replacement'
+description: >-
+ July 29th, 2019: Initial draft September 11th, 2019: Work has started November
+ 4th: Cosmos SDK changes merged in November 18th: Gaia changes merged in
+---
+
+## Changelog
+
+* July 29th, 2019: Initial draft
+* September 11th, 2019: Work has started
+* November 4th: Cosmos SDK changes merged in
+* November 18th: Gaia changes merged in
+
+## Context
+
+Currently, a Cosmos SDK application's CLI directory stores key material and metadata in a plain text database in the user's home directory. Key material is encrypted by a passphrase, protected by the bcrypt hashing algorithm. Metadata (e.g. addresses, public keys, key storage details) is available in plain text.
+
+This is not desirable for a number of reasons. Perhaps the biggest reason is insufficient security protection of key material and metadata. Leaking the plain text allows an attacker to surveil what keys a given computer controls via a number of techniques, like compromised dependencies without any privileged execution. This could be followed by a more targeted attack on a particular user/computer.
+
+All modern desktop operating systems (Ubuntu, Debian, macOS, Windows) provide a built-in secret store that is designed to allow applications to store information that is isolated from all other applications and requires passphrase entry to access the data.
+
+We are seeking a solution that provides a common abstraction layer to the many different backends and reasonable fallback for minimal platforms that don't provide a native secret store.
+
+## Decision
+
+We recommend replacing the current Keybase backend based on LevelDB with [Keyring](https://github.com/99designs/keyring) by 99designs. This application is designed to provide a common abstraction and uniform interface between many secret stores and is used by the AWS Vault application by 99designs.
+
+This appears to fulfill the requirement of protecting both key material and metadata from rogue software on a user's machine.
+
+## Status
+
+Accepted
+
+## Consequences
+
+### Positive
+
+Increased safety for users.
+
+### Negative
+
+Users must manually migrate.
+
+Testing against all supported backends is difficult.
+
+Running tests locally on a Mac requires numerous repetitive password entries.
+
+### Neutral
+
+No neutral consequences identified.
+
+## References
+
+* \#4754 Switch secret store to the keyring secret store (original PR by @poldsam) \[**CLOSED**]
+* \#5029 Add support for github.com/99designs/keyring-backed keybases \[**MERGED**]
+* \#5097 Add keys migrate command \[**MERGED**]
+* \#5180 Drop on-disk keybase in favor of keyring \[*PENDING\_REVIEW*]
+* cosmos/gaia#164 Drop on-disk keybase in favor of keyring (gaia's changes) \[*PENDING\_REVIEW*]
diff --git a/sdk/next/build/architecture/adr-007-specialization-groups.mdx b/sdk/next/build/architecture/adr-007-specialization-groups.mdx
new file mode 100644
index 000000000..d949cb574
--- /dev/null
+++ b/sdk/next/build/architecture/adr-007-specialization-groups.mdx
@@ -0,0 +1,198 @@
+---
+title: 'ADR 007: Specialization Groups'
+description: '2019 Jul 31: Initial Draft'
+---
+
+## Changelog
+
+* 2019 Jul 31: Initial Draft
+
+## Context
+
+This idea was first conceived of in order to fulfill the use case of the
+creation of a decentralized Computer Emergency Response Team (dCERT), whose
+members would be elected by a governing community and would fulfill the role of
+coordinating the community under emergency situations. This thinking
+can be further abstracted into the conception of "blockchain specialization
+groups".
+
+The creation of these groups are the beginning of specialization capabilities
+within a wider blockchain community which could be used to enable a certain
+level of delegated responsibilities. Examples of specialization which could be
+beneficial to a blockchain community include: code auditing, emergency response,
+code development etc. This type of community organization paves the way for
+individual stakeholders to delegate votes by issue type, if in the future
+governance proposals include a field for issue type.
+
+## Decision
+
+A specialization group can be broadly broken down into the following functions
+(herein containing examples):
+
+* Membership Admittance
+* Membership Acceptance
+* Membership Revocation
+ * (probably) Without Penalty
+ * member steps down (self-Revocation)
+ * replaced by new member from governance
+ * (probably) With Penalty
+ * due to breach of soft-agreement (determined through governance)
+ * due to breach of hard-agreement (determined by code)
+* Execution of Duties
+ * Special transactions which only execute for members of a specialization
+ group (for example, dCERT members voting to turn off transaction routes in
+ an emergency scenario)
+* Compensation
+ * Group compensation (further distribution decided by the specialization group)
+ * Individual compensation for all constituents of a group from the
+ greater community
+
+Membership admittance to a specialization group could take place over a wide
+variety of mechanisms. The most obvious example is through a general vote among
+the entire community, however in certain systems a community may want to allow
+the members already in a specialization group to internally elect new members,
+or maybe the community may assign a permission to a particular specialization
+group to appoint members to other 3rd party groups. The sky is really the limit
+as to how membership admittance can be structured. We attempt to capture
+some of these possiblities in a common interface dubbed the `Electionator`. For
+its initial implementation as a part of this ADR we recommend that the general
+election abstraction (`Electionator`) is provided as well as a basic
+implementation of that abstraction which allows for a continuous election of
+members of a specialization group.
+
+```golang expandable
+// The Electionator abstraction covers the concept space for
+// a wide variety of election kinds.
+type Electionator interface {
+
+ // is the election object accepting votes.
+ Active()
+
+bool
+
+ // functionality to execute for when a vote is cast in this election, here
+ // the vote field is anticipated to be marshalled into a vote type used
+ // by an election.
+ //
+ // NOTE There are no explicit ids here. Just votes which pertain specifically
+ // to one electionator. Anyone can create and send a vote to the electionator item
+ // which will presumably attempt to marshal those bytes into a particular struct
+ // and apply the vote information in some arbitrary way. There can be multiple
+ // Electionators within the Cosmos-Hub for multiple specialization groups, votes
+ // would need to be routed to the Electionator upstream of here.
+ Vote(addr sdk.AccAddress, vote []byte)
+
+ // here lies all functionality to authenticate and execute changes for
+ // when a member accepts being elected
+ AcceptElection(sdk.AccAddress)
+
+ // Register a revoker object
+ RegisterRevoker(Revoker)
+
+ // No more revokers may be registered after this function is called
+ SealRevokers()
+
+ // register hooks to call when an election actions occur
+ RegisterHooks(ElectionatorHooks)
+
+ // query for the current winner(s)
+
+of this election based on arbitrary
+ // election ruleset
+ QueryElected() []sdk.AccAddress
+
+ // query metadata for an address in the election this
+ // could include for example position that an address
+ // is being elected for within a group
+ //
+ // this metadata may be directly related to
+ // voting information and/or privileges enabled
+ // to members within a group.
+ QueryMetadata(sdk.AccAddress) []byte
+}
+
+// ElectionatorHooks, once registered with an Electionator,
+// trigger execution of relevant interface functions when
+// Electionator events occur.
+type ElectionatorHooks interface {
+ AfterVoteCast(addr sdk.AccAddress, vote []byte)
+
+AfterMemberAccepted(addr sdk.AccAddress)
+
+AfterMemberRevoked(addr sdk.AccAddress, cause []byte)
+}
+
+// Revoker defines the function required for a membership revocation rule-set
+// used by a specialization group. This could be used to create self revoking,
+// and evidence based revoking, etc. Revokers types may be created and
+// reused for different election types.
+//
+// When revoking the "cause" bytes may be arbitrarily marshalled into evidence,
+// memos, etc.
+type Revoker interface {
+ RevokeName()
+
+string // identifier for this revoker type
+ RevokeMember(addr sdk.AccAddress, cause []byte)
+
+error
+}
+```
+
+Certain level of commonality likely exists between the existing code within
+`x/governance` and required functionality of elections. This common
+functionality should be abstracted during implementation. Similarly for each
+vote implementation client CLI/REST functionality should be abstracted
+to be reused for multiple elections.
+
+The specialization group abstraction firstly extends the `Electionator`
+but also further defines traits of the group.
+
+```golang expandable
+type SpecializationGroup interface {
+ Electionator
+ GetName()
+
+string
+ GetDescription()
+
+string
+
+ // general soft contract the group is expected
+ // to fulfill with the greater community
+ GetContract()
+
+string
+
+ // messages which can be executed by the members of the group
+ Handler(ctx sdk.Context, msg sdk.Msg)
+
+sdk.Result
+
+ // logic to be executed at endblock, this may for instance
+ // include payment of a stipend to the group members
+ // for participation in the security group.
+ EndBlocker(ctx sdk.Context)
+}
+```
+
+## Status
+
+> Proposed
+
+## Consequences
+
+### Positive
+
+* increases specialization capabilities of a blockchain
+* improve abstractions in `x/gov/` such that they can be used with specialization groups
+
+### Negative
+
+* could be used to increase centralization within a community
+
+### Neutral
+
+## References
+
+* [dCERT ADR](/sdk/v0.50/build/architecture/adr-008-dCERT-group)
diff --git a/sdk/next/build/architecture/adr-008-dCERT-group.mdx b/sdk/next/build/architecture/adr-008-dCERT-group.mdx
new file mode 100644
index 000000000..b4b045d57
--- /dev/null
+++ b/sdk/next/build/architecture/adr-008-dCERT-group.mdx
@@ -0,0 +1,174 @@
+---
+title: 'ADR 008: Decentralized Computer Emergency Response Team (dCERT) Group'
+description: '2019 Jul 31: Initial Draft'
+---
+
+## Changelog
+
+* 2019 Jul 31: Initial Draft
+
+## Context
+
+In order to reduce the number of parties involved with handling sensitive
+information in an emergency scenario, we propose the creation of a
+specialization group named The Decentralized Computer Emergency Response Team
+(dCERT). Initially this group's role is intended to serve as coordinators
+between various actors within a blockchain community such as validators,
+bug-hunters, and developers. During a time of crisis, the dCERT group would
+aggregate and relay input from a variety of stakeholders to the developers who
+are actively devising a patch to the software, this way sensitive information
+does not need to be publicly disclosed while some input from the community can
+still be gained.
+
+Additionally, a special privilege is proposed for the dCERT group: the capacity
+to "circuit-break" (aka. temporarily disable) a particular message path. Note
+that this privilege should be enabled/disabled globally with a governance
+parameter such that this privilege could start disabled and later be enabled
+through a parameter change proposal, once a dCERT group has been established.
+
+In the future it is foreseeable that the community may wish to expand the roles
+of dCERT with further responsibilities such as the capacity to "pre-approve" a
+security update on behalf of the community prior to a full community
+wide vote whereby the sensitive information would be revealed prior to a
+vulnerability being patched on the live network.
+
+## Decision
+
+The dCERT group is proposed to include an implementation of a `SpecializationGroup`
+as defined in [ADR 007](/sdk/v0.50/build/architecture/adr-007-specialization-groups). This will include the
+implementation of:
+
+* continuous voting
+* slashing due to breach of soft contract
+* revoking a member due to breach of soft contract
+* emergency disband of the entire dCERT group (ex. for colluding maliciously)
+* compensation stipend from the community pool or other means decided by
+ governance
+
+This system necessitates the following new parameters:
+
+* blockly stipend allowance per dCERT member
+* maximum number of dCERT members
+* required staked slashable tokens for each dCERT member
+* quorum for suspending a particular member
+* proposal wager for disbanding the dCERT group
+* stabilization period for dCERT member transition
+* circuit break dCERT privileges enabled
+
+These parameters are expected to be implemented through the param keeper such
+that governance may change them at any given point.
+
+### Continuous Voting Electionator
+
+An `Electionator` object is to be implemented as continuous voting and with the
+following specifications:
+
+* All delegation addresses may submit votes at any point which updates their
+ preferred representation on the dCERT group.
+* Preferred representation may be arbitrarily split between addresses (ex. 50%
+ to John, 25% to Sally, 25% to Carol)
+* In order for a new member to be added to the dCERT group they must
+ send a transaction accepting their admission at which point the validity of
+ their admission is to be confirmed.
+ * A sequence number is assigned when a member is added to dCERT group.
+ If a member leaves the dCERT group and then enters back, a new sequence number
+ is assigned.
+* Addresses which control the greatest amount of preferred-representation are
+ eligible to join the dCERT group (up the *maximum number of dCERT members*).
+ If the dCERT group is already full and new member is admitted, the existing
+ dCERT member with the lowest amount of votes is kicked from the dCERT group.
+ * In the split situation where the dCERT group is full but a vying candidate
+ has the same amount of vote as an existing dCERT member, the existing
+ member should maintain its position.
+ * In the split situation where somebody must be kicked out but the two
+ addresses with the smallest number of votes have the same number of votes,
+ the address with the smallest sequence number maintains its position.
+* A stabilization period can be optionally included to reduce the
+ "flip-flopping" of the dCERT membership tail members. If a stabilization
+ period is provided which is greater than 0, when members are kicked due to
+ insufficient support, a queue entry is created which documents which member is
+ to replace which other member. While this entry is in the queue, no new entries
+ to kick that same dCERT member can be made. When the entry matures at the
+ duration of the stabilization period, the new member is instantiated, and old
+ member kicked.
+
+### Staking/Slashing
+
+All members of the dCERT group must stake tokens *specifically* to maintain
+eligibility as a dCERT member. These tokens can be staked directly by the vying
+dCERT member or out of the good will of a 3rd party (who shall gain no on-chain
+benefits for doing so). This staking mechanism should use the existing global
+unbonding time of tokens staked for network validator security. A dCERT member
+can *only be* a member if it has the required tokens staked under this
+mechanism. If those tokens are unbonded then the dCERT member must be
+automatically kicked from the group.
+
+Slashing of a particular dCERT member due to soft-contract breach should be
+performed by governance on a per member basis based on the magnitude of the
+breach. The process flow is anticipated to be that a dCERT member is suspended
+by the dCERT group prior to being slashed by governance.
+
+Membership suspension by the dCERT group takes place through a voting procedure
+by the dCERT group members. After this suspension has taken place, a governance
+proposal to slash the dCERT member must be submitted, if the proposal is not
+approved by the time the rescinding member has completed unbonding their
+tokens, then the tokens are no longer staked and unable to be slashed.
+
+Additionally in the case of an emergency situation of a colluding and malicious
+dCERT group, the community needs the capability to disband the entire dCERT
+group and likely fully slash them. This could be achieved though a special new
+proposal type (implemented as a general governance proposal) which would halt
+the functionality of the dCERT group until the proposal was concluded. This
+special proposal type would likely need to also have a fairly large wager which
+could be slashed if the proposal creator was malicious. The reason a large
+wager should be required is because as soon as the proposal is made, the
+capability of the dCERT group to halt message routes is put on temporarily
+suspended, meaning that a malicious actor who created such a proposal could
+then potentially exploit a bug during this period of time, with no dCERT group
+capable of shutting down the exploitable message routes.
+
+### dCERT membership transactions
+
+Active dCERT members
+
+* change of the description of the dCERT group
+* circuit break a message route
+* vote to suspend a dCERT member.
+
+Here circuit-breaking refers to the capability to disable a groups of messages,
+This could for instance mean: "disable all staking-delegation messages", or
+"disable all distribution messages". This could be accomplished by verifying
+that the message route has not been "circuit-broken" at CheckTx time (in
+`baseapp/baseapp.go`).
+
+"unbreaking" a circuit is anticipated only to occur during a hard fork upgrade
+meaning that no capability to unbreak a message route on a live chain is
+required.
+
+Note also, that if there was a problem with governance voting (for instance a
+capability to vote many times) then governance would be broken and should be
+halted with this mechanism, it would be then up to the validator set to
+coordinate and hard-fork upgrade to a patched version of the software where
+governance is re-enabled (and fixed). If the dCERT group abuses this privilege
+they should all be severely slashed.
+
+## Status
+
+> Proposed
+
+## Consequences
+
+### Positive
+
+* Potential to reduces the number of parties to coordinate with during an emergency
+* Reduction in possibility of disclosing sensitive information to malicious parties
+
+### Negative
+
+* Centralization risks
+
+### Neutral
+
+## References
+
+[Specialization Groups ADR](/sdk/v0.50/build/architecture/adr-007-specialization-groups)
diff --git a/sdk/next/build/architecture/adr-009-evidence-module.mdx b/sdk/next/build/architecture/adr-009-evidence-module.mdx
new file mode 100644
index 000000000..0b22fbf7c
--- /dev/null
+++ b/sdk/next/build/architecture/adr-009-evidence-module.mdx
@@ -0,0 +1,218 @@
+---
+title: 'ADR 009: Evidence Module'
+description: '2019 July 31: Initial draft 2019 October 24: Initial implementation'
+---
+
+## Changelog
+
+* 2019 July 31: Initial draft
+* 2019 October 24: Initial implementation
+
+## Status
+
+Accepted
+
+## Context
+
+In order to support building highly secure, robust and interoperable blockchain
+applications, it is vital for the Cosmos SDK to expose a mechanism in which arbitrary
+evidence can be submitted, evaluated and verified resulting in some agreed upon
+penalty for any misbehavior committed by a validator, such as equivocation (double-voting),
+signing when unbonded, signing an incorrect state transition (in the future), etc.
+Furthermore, such a mechanism is paramount for any
+[IBC](https://github.com/cosmos/ics/blob/master/ibc/2_IBC_ARCHITECTURE.md) or
+cross-chain validation protocol implementation in order to support the ability
+for any misbehavior to be relayed back from a collateralized chain to a primary
+chain so that the equivocating validator(s) can be slashed.
+
+## Decision
+
+We will implement an evidence module in the Cosmos SDK supporting the following
+functionality:
+
+* Provide developers with the abstractions and interfaces necessary to define
+ custom evidence messages, message handlers, and methods to slash and penalize
+ accordingly for misbehavior.
+* Support the ability to route evidence messages to handlers in any module to
+ determine the validity of submitted misbehavior.
+* Support the ability, through governance, to modify slashing penalties of any
+ evidence type.
+* Querier implementation to support querying params, evidence types, params, and
+ all submitted valid misbehavior.
+
+### Types
+
+First, we define the `Evidence` interface type. The `x/evidence` module may implement
+its own types that can be used by many chains (e.g. `CounterFactualEvidence`).
+In addition, other modules may implement their own `Evidence` types in a similar
+manner in which governance is extensible. It is important to note any concrete
+type implementing the `Evidence` interface may include arbitrary fields such as
+an infraction time. We want the `Evidence` type to remain as flexible as possible.
+
+When submitting evidence to the `x/evidence` module, the concrete type must provide
+the validator's consensus address, which should be known by the `x/slashing`
+module (assuming the infraction is valid), the height at which the infraction
+occurred and the validator's power at same height in which the infraction occurred.
+
+```go expandable
+type Evidence interface {
+ Route()
+
+string
+ Type()
+
+string
+ String()
+
+string
+ Hash()
+
+HexBytes
+ ValidateBasic()
+
+error
+
+ // The consensus address of the malicious validator at time of infraction
+ GetConsensusAddress()
+
+ConsAddress
+
+ // Height at which the infraction occurred
+ GetHeight()
+
+int64
+
+ // The total power of the malicious validator at time of infraction
+ GetValidatorPower()
+
+int64
+
+ // The total validator set power at time of infraction
+ GetTotalPower()
+
+int64
+}
+```
+
+### Routing & Handling
+
+Each `Evidence` type must map to a specific unique route and be registered with
+the `x/evidence` module. It accomplishes this through the `Router` implementation.
+
+```go
+type Router interface {
+ AddRoute(r string, h Handler)
+
+Router
+ HasRoute(r string)
+
+bool
+ GetRoute(path string)
+
+Handler
+ Seal()
+}
+```
+
+Upon successful routing through the `x/evidence` module, the `Evidence` type
+is passed through a `Handler`. This `Handler` is responsible for executing all
+corresponding business logic necessary for verifying the evidence as valid. In
+addition, the `Handler` may execute any necessary slashing and potential jailing.
+Since slashing fractions will typically result from some form of static functions,
+allow the `Handler` to do this provides the greatest flexibility. An example could
+be `k * evidence.GetValidatorPower()` where `k` is an on-chain parameter controlled
+by governance. The `Evidence` type should provide all the external information
+necessary in order for the `Handler` to make the necessary state transitions.
+If no error is returned, the `Evidence` is considered valid.
+
+```go
+type Handler func(Context, Evidence)
+
+error
+```
+
+### Submission
+
+`Evidence` is submitted through a `MsgSubmitEvidence` message type which is internally
+handled by the `x/evidence` module's `SubmitEvidence`.
+
+```go expandable
+type MsgSubmitEvidence struct {
+ Evidence
+}
+
+func handleMsgSubmitEvidence(ctx Context, keeper Keeper, msg MsgSubmitEvidence)
+
+Result {
+ if err := keeper.SubmitEvidence(ctx, msg.Evidence); err != nil {
+ return err.Result()
+}
+
+ // emit events...
+
+ return Result{
+ // ...
+}
+}
+```
+
+The `x/evidence` module's keeper is responsible for matching the `Evidence` against
+the module's router and invoking the corresponding `Handler` which may include
+slashing and jailing the validator. Upon success, the submitted evidence is persisted.
+
+```go
+func (k Keeper)
+
+SubmitEvidence(ctx Context, evidence Evidence)
+
+error {
+ handler := keeper.router.GetRoute(evidence.Route())
+ if err := handler(ctx, evidence); err != nil {
+ return ErrInvalidEvidence(keeper.codespace, err)
+}
+
+keeper.setEvidence(ctx, evidence)
+
+return nil
+}
+```
+
+### Genesis
+
+Finally, we need to represent the genesis state of the `x/evidence` module. The
+module only needs a list of all submitted valid infractions and any necessary params
+for which the module needs in order to handle submitted evidence. The `x/evidence`
+module will naturally define and route native evidence types for which it'll most
+likely need slashing penalty constants for.
+
+```go
+type GenesisState struct {
+ Params Params
+ Infractions []Evidence
+}
+```
+
+## Consequences
+
+### Positive
+
+* Allows the state machine to process misbehavior submitted on-chain and penalize
+ validators based on agreed upon slashing parameters.
+* Allows evidence types to be defined and handled by any module. This further allows
+ slashing and jailing to be defined by more complex mechanisms.
+* Does not solely rely on Tendermint to submit evidence.
+
+### Negative
+
+* No easy way to introduce new evidence types through governance on a live chain
+ due to the inability to introduce the new evidence type's corresponding handler
+
+### Neutral
+
+* Should we persist infractions indefinitely? Or should we rather rely on events?
+
+## References
+
+* [ICS](https://github.com/cosmos/ics)
+* [IBC Architecture](https://github.com/cosmos/ics/blob/master/ibc/1_IBC_ARCHITECTURE.md)
+* [Tendermint Fork Accountability](https://github.com/tendermint/spec/blob/7b3138e69490f410768d9b1ffc7a17abc23ea397/spec/consensus/fork-accountability.md)
diff --git a/sdk/next/build/architecture/adr-010-modular-antehandler.mdx b/sdk/next/build/architecture/adr-010-modular-antehandler.mdx
new file mode 100644
index 000000000..fda64342e
--- /dev/null
+++ b/sdk/next/build/architecture/adr-010-modular-antehandler.mdx
@@ -0,0 +1,320 @@
+---
+title: 'ADR 010: Modular AnteHandler'
+description: '2019 Aug 31: Initial draft 2021 Sep 14: Superseded by ADR-045'
+---
+
+## Changelog
+
+* 2019 Aug 31: Initial draft
+* 2021 Sep 14: Superseded by ADR-045
+
+## Status
+
+SUPERSEDED by ADR-045
+
+## Context
+
+The current AnteHandler design allows users to either use the default AnteHandler provided in `x/auth` or to build their own AnteHandler from scratch. Ideally AnteHandler functionality is split into multiple, modular functions that can be chained together along with custom ante-functions so that users do not have to rewrite common antehandler logic when they want to implement custom behavior.
+
+For example, let's say a user wants to implement some custom signature verification logic. In the current codebase, the user would have to write their own Antehandler from scratch largely reimplementing much of the same code and then set their own custom, monolithic antehandler in the baseapp. Instead, we would like to allow users to specify custom behavior when necessary and combine them with default ante-handler functionality in a way that is as modular and flexible as possible.
+
+## Proposals
+
+### Per-Module AnteHandler
+
+One approach is to use the [ModuleManager](https://pkg.go.dev/github.com/cosmos/cosmos-sdk/types/module) and have each module implement its own antehandler if it requires custom antehandler logic. The ModuleManager can then be passed in an AnteHandler order in the same way it has an order for BeginBlockers and EndBlockers. The ModuleManager returns a single AnteHandler function that will take in a tx and run each module's `AnteHandle` in the specified order. The module manager's AnteHandler is set as the baseapp's AnteHandler.
+
+Pros:
+
+1. Simple to implement
+2. Utilizes the existing ModuleManager architecture
+
+Cons:
+
+1. Improves granularity but still cannot get more granular than a per-module basis. e.g. If auth's `AnteHandle` function is in charge of validating memo and signatures, users cannot swap the signature-checking functionality while keeping the rest of auth's `AnteHandle` functionality.
+2. Module AnteHandler are run one after the other. There is no way for one AnteHandler to wrap or "decorate" another.
+
+### Decorator Pattern
+
+The [weave project](https://github.com/iov-one/weave) achieves AnteHandler modularity through the use of a decorator pattern. The interface is designed as follows:
+
+```go
+// Decorator wraps a Handler to provide common functionality
+// like authentication, or fee-handling, to many Handlers
+type Decorator interface {
+ Check(ctx Context, store KVStore, tx Tx, next Checker) (*CheckResult, error)
+
+Deliver(ctx Context, store KVStore, tx Tx, next Deliverer) (*DeliverResult, error)
+}
+```
+
+Each decorator works like a modularized Cosmos SDK antehandler function, but it can take in a `next` argument that may be another decorator or a Handler (which does not take in a next argument). These decorators can be chained together, one decorator being passed in as the `next` argument of the previous decorator in the chain. The chain ends in a Router which can take a tx and route to the appropriate msg handler.
+
+A key benefit of this approach is that one Decorator can wrap its internal logic around the next Checker/Deliverer. A weave Decorator may do the following:
+
+```go
+// Example Decorator's Deliver function
+func (example Decorator)
+
+Deliver(ctx Context, store KVStore, tx Tx, next Deliverer) {
+ // Do some pre-processing logic
+
+ res, err := next.Deliver(ctx, store, tx)
+
+ // Do some post-processing logic given the result and error
+}
+```
+
+Pros:
+
+1. Weave Decorators can wrap over the next decorator/handler in the chain. The ability to both pre-process and post-process may be useful in certain settings.
+2. Provides a nested modular structure that isn't possible in the solution above, while also allowing for a linear one-after-the-other structure like the solution above.
+
+Cons:
+
+1. It is hard to understand at first glance the state updates that would occur after a Decorator runs given the `ctx`, `store`, and `tx`. A Decorator can have an arbitrary number of nested Decorators being called within its function body, each possibly doing some pre- and post-processing before calling the next decorator on the chain. Thus to understand what a Decorator is doing, one must also understand what every other decorator further along the chain is also doing. This can get quite complicated to understand. A linear, one-after-the-other approach while less powerful, may be much easier to reason about.
+
+### Chained Micro-Functions
+
+The benefit of Weave's approach is that the Decorators can be very concise, which when chained together allows for maximum customizability. However, the nested structure can get quite complex and thus hard to reason about.
+
+Another approach is to split the AnteHandler functionality into tightly scoped "micro-functions", while preserving the one-after-the-other ordering that would come from the ModuleManager approach.
+
+We can then have a way to chain these micro-functions so that they run one after the other. Modules may define multiple ante micro-functions and then also provide a default per-module AnteHandler that implements a default, suggested order for these micro-functions.
+
+Users can order the AnteHandlers easily by simply using the ModuleManager. The ModuleManager will take in a list of AnteHandlers and return a single AnteHandler that runs each AnteHandler in the order of the list provided. If the user is comfortable with the default ordering of each module, this is as simple as providing a list with each module's antehandler (exactly the same as BeginBlocker and EndBlocker).
+
+If however, users wish to change the order or add, modify, or delete ante micro-functions in anyway; they can always define their own ante micro-functions and add them explicitly to the list that gets passed into module manager.
+
+#### Default Workflow
+
+This is an example of a user's AnteHandler if they choose not to make any custom micro-functions.
+
+##### Cosmos SDK code
+
+```go expandable
+// Chains together a list of AnteHandler micro-functions that get run one after the other.
+// Returned AnteHandler will abort on first error.
+func Chainer(order []AnteHandler)
+
+AnteHandler {
+ return func(ctx Context, tx Tx, simulate bool) (newCtx Context, err error) {
+ for _, ante := range order {
+ ctx, err := ante(ctx, tx, simulate)
+ if err != nil {
+ return ctx, err
+}
+
+}
+
+return ctx, err
+}
+}
+```
+
+```go expandable
+// AnteHandler micro-function to verify signatures
+func VerifySignatures(ctx Context, tx Tx, simulate bool) (newCtx Context, err error) {
+ // verify signatures
+ // Returns InvalidSignature Result and abort=true if sigs invalid
+ // Return OK result and abort=false if sigs are valid
+}
+
+// AnteHandler micro-function to validate memo
+func ValidateMemo(ctx Context, tx Tx, simulate bool) (newCtx Context, err error) {
+ // validate memo
+}
+
+// Auth defines its own default ante-handler by chaining its micro-functions in a recommended order
+AuthModuleAnteHandler := Chainer([]AnteHandler{
+ VerifySignatures, ValidateMemo
+})
+```
+
+```go expandable
+// Distribution micro-function to deduct fees from tx
+func DeductFees(ctx Context, tx Tx, simulate bool) (newCtx Context, err error) {
+ // Deduct fees from tx
+ // Abort if insufficient funds in account to pay for fees
+}
+
+// Distribution micro-function to check if fees > mempool parameter
+func CheckMempoolFees(ctx Context, tx Tx, simulate bool) (newCtx Context, err error) {
+ // If CheckTx: Abort if the fees are less than the mempool's minFee parameter
+}
+
+// Distribution defines its own default ante-handler by chaining its micro-functions in a recommended order
+DistrModuleAnteHandler := Chainer([]AnteHandler{
+ CheckMempoolFees, DeductFees
+})
+```
+
+```go
+type ModuleManager struct {
+ // other fields
+ AnteHandlerOrder []AnteHandler
+}
+
+func (mm ModuleManager)
+
+GetAnteHandler()
+
+AnteHandler {
+ retun Chainer(mm.AnteHandlerOrder)
+}
+```
+
+##### User Code
+
+```go
+// Note: Since user is not making any custom modifications, we can just SetAnteHandlerOrder with the default AnteHandlers provided by each module in our preferred order
+moduleManager.SetAnteHandlerOrder([]AnteHandler(AuthModuleAnteHandler, DistrModuleAnteHandler))
+
+app.SetAnteHandler(mm.GetAnteHandler())
+```
+
+#### Custom Workflow
+
+This is an example workflow for a user that wants to implement custom antehandler logic. In this example, the user wants to implement custom signature verification and change the order of antehandler so that validate memo runs before signature verification.
+
+##### User Code
+
+```go
+// User can implement their own custom signature verification antehandler micro-function
+func CustomSigVerify(ctx Context, tx Tx, simulate bool) (newCtx Context, err error) {
+ // do some custom signature verification logic
+}
+```
+
+```go
+// Micro-functions allow users to change order of when they get executed, and swap out default ante-functionality with their own custom logic.
+// Note that users can still chain the default distribution module handler, and auth micro-function along with their custom ante function
+moduleManager.SetAnteHandlerOrder([]AnteHandler(ValidateMemo, CustomSigVerify, DistrModuleAnteHandler))
+```
+
+Pros:
+
+1. Allows for ante functionality to be as modular as possible.
+2. For users that do not need custom ante-functionality, there is little difference between how antehandlers work and how BeginBlock and EndBlock work in ModuleManager.
+3. Still easy to understand
+
+Cons:
+
+1. Cannot wrap antehandlers with decorators like you can with Weave.
+
+### Simple Decorators
+
+This approach takes inspiration from Weave's decorator design while trying to minimize the number of breaking changes to the Cosmos SDK and maximizing simplicity. Like Weave decorators, this approach allows one `AnteDecorator` to wrap the next AnteHandler to do pre- and post-processing on the result. This is useful since decorators can do defer/cleanups after an AnteHandler returns as well as perform some setup beforehand. Unlike Weave decorators, these `AnteDecorator` functions can only wrap over the AnteHandler rather than the entire handler execution path. This is deliberate as we want decorators from different modules to perform authentication/validation on a `tx`. However, we do not want decorators being capable of wrapping and modifying the results of a `MsgHandler`.
+
+In addition, this approach will not break any core Cosmos SDK API's. Since we preserve the notion of an AnteHandler and still set a single AnteHandler in baseapp, the decorator is simply an additional approach available for users that desire more customization. The API of modules (namely `x/auth`) may break with this approach, but the core API remains untouched.
+
+Allow Decorator interface that can be chained together to create a Cosmos SDK AnteHandler.
+
+This allows users to choose between implementing an AnteHandler by themselves and setting it in the baseapp, or use the decorator pattern to chain their custom decorators with the Cosmos SDK provided decorators in the order they wish.
+
+```go
+// An AnteDecorator wraps an AnteHandler, and can do pre- and post-processing on the next AnteHandler
+type AnteDecorator interface {
+ AnteHandle(ctx Context, tx Tx, simulate bool, next AnteHandler) (newCtx Context, err error)
+}
+```
+
+```go expandable
+// ChainAnteDecorators will recursively link all of the AnteDecorators in the chain and return a final AnteHandler function
+// This is done to preserve the ability to set a single AnteHandler function in the baseapp.
+func ChainAnteDecorators(chain ...AnteDecorator)
+
+AnteHandler {
+ if len(chain) == 1 {
+ return func(ctx Context, tx Tx, simulate bool) {
+ chain[0].AnteHandle(ctx, tx, simulate, nil)
+}
+
+}
+
+return func(ctx Context, tx Tx, simulate bool) {
+ chain[0].AnteHandle(ctx, tx, simulate, ChainAnteDecorators(chain[1:]))
+}
+}
+```
+
+#### Example Code
+
+Define AnteDecorator functions
+
+```go expandable
+// Setup GasMeter, catch OutOfGasPanic and handle appropriately
+type SetUpContextDecorator struct{
+}
+
+func (sud SetUpContextDecorator)
+
+AnteHandle(ctx Context, tx Tx, simulate bool, next AnteHandler) (newCtx Context, err error) {
+ ctx.GasMeter = NewGasMeter(tx.Gas)
+
+defer func() {
+ // recover from OutOfGas panic and handle appropriately
+}
+
+return next(ctx, tx, simulate)
+}
+
+// Signature Verification decorator. Verify Signatures and move on
+type SigVerifyDecorator struct{
+}
+
+func (svd SigVerifyDecorator)
+
+AnteHandle(ctx Context, tx Tx, simulate bool, next AnteHandler) (newCtx Context, err error) {
+ // verify sigs. Return error if invalid
+
+ // call next antehandler if sigs ok
+ return next(ctx, tx, simulate)
+}
+
+// User-defined Decorator. Can choose to pre- and post-process on AnteHandler
+type UserDefinedDecorator struct{
+ // custom fields
+}
+
+func (udd UserDefinedDecorator)
+
+AnteHandle(ctx Context, tx Tx, simulate bool, next AnteHandler) (newCtx Context, err error) {
+ // pre-processing logic
+
+ ctx, err = next(ctx, tx, simulate)
+
+ // post-processing logic
+}
+```
+
+Link AnteDecorators to create a final AnteHandler. Set this AnteHandler in baseapp.
+
+```go
+// Create final antehandler by chaining the decorators together
+ antehandler := ChainAnteDecorators(NewSetUpContextDecorator(), NewSigVerifyDecorator(), NewUserDefinedDecorator())
+
+// Set chained Antehandler in the baseapp
+bapp.SetAnteHandler(antehandler)
+```
+
+Pros:
+
+1. Allows one decorator to pre- and post-process the next AnteHandler, similar to the Weave design.
+2. Do not need to break baseapp API. Users can still set a single AnteHandler if they choose.
+
+Cons:
+
+1. Decorator pattern may have a deeply nested structure that is hard to understand, this is mitigated by having the decorator order explicitly listed in the `ChainAnteDecorators` function.
+2. Does not make use of the ModuleManager design. Since this is already being used for BeginBlocker/EndBlocker, this proposal seems unaligned with that design pattern.
+
+## Consequences
+
+Since pros and cons are written for each approach, it is omitted from this section
+
+## References
+
+* [#4572](https://github.com/cosmos/cosmos-sdk/issues/4572): Modular AnteHandler Issue
+* [#4582](https://github.com/cosmos/cosmos-sdk/pull/4583): Initial Implementation of Per-Module AnteHandler Approach
+* [Weave Decorator Code](https://github.com/iov-one/weave/blob/master/handler.go#L35)
+* [Weave Design Videos](https://vimeo.com/showcase/6189877)
diff --git a/sdk/next/build/architecture/adr-011-generalize-genesis-accounts.mdx b/sdk/next/build/architecture/adr-011-generalize-genesis-accounts.mdx
new file mode 100644
index 000000000..d0190bc67
--- /dev/null
+++ b/sdk/next/build/architecture/adr-011-generalize-genesis-accounts.mdx
@@ -0,0 +1,188 @@
+---
+title: 'ADR 011: Generalize Genesis Accounts'
+description: '2019-08-30: initial draft'
+---
+
+## Changelog
+
+* 2019-08-30: initial draft
+
+## Context
+
+Currently, the Cosmos SDK allows for custom account types; the `auth` keeper stores any type fulfilling its `Account` interface. However `auth` does not handle exporting or loading accounts to/from a genesis file, this is done by `genaccounts`, which only handles one of 4 concrete account types (`BaseAccount`, `ContinuousVestingAccount`, `DelayedVestingAccount` and `ModuleAccount`).
+
+Projects desiring to use custom accounts (say custom vesting accounts) need to fork and modify `genaccounts`.
+
+## Decision
+
+In summary, we will (un)marshal all accounts (interface types) directly using amino, rather than converting to `genaccounts`’s `GenesisAccount` type. Since doing this removes the majority of `genaccounts`'s code, we will merge `genaccounts` into `auth`. Marshalled accounts will be stored in `auth`'s genesis state.
+
+Detailed changes:
+
+### 1) (Un)Marshal accounts directly using amino
+
+The `auth` module's `GenesisState` gains a new field `Accounts`. Note these aren't of type `exported.Account` for reasons outlined in section 3.
+
+```go
+// GenesisState - all auth state that must be provided at genesis
+type GenesisState struct {
+ Params Params `json:"params" yaml:"params"`
+ Accounts []GenesisAccount `json:"accounts" yaml:"accounts"`
+}
+```
+
+Now `auth`'s `InitGenesis` and `ExportGenesis` (un)marshal accounts as well as the defined params.
+
+```go expandable
+// InitGenesis - Init store state from genesis data
+func InitGenesis(ctx sdk.Context, ak AccountKeeper, data GenesisState) {
+ ak.SetParams(ctx, data.Params)
+ // load the accounts
+ for _, a := range data.Accounts {
+ acc := ak.NewAccount(ctx, a) // set account number
+ ak.SetAccount(ctx, acc)
+}
+}
+
+// ExportGenesis returns a GenesisState for a given context and keeper
+func ExportGenesis(ctx sdk.Context, ak AccountKeeper)
+
+GenesisState {
+ params := ak.GetParams(ctx)
+
+var genAccounts []exported.GenesisAccount
+ ak.IterateAccounts(ctx, func(account exported.Account)
+
+bool {
+ genAccount := account.(exported.GenesisAccount)
+
+genAccounts = append(genAccounts, genAccount)
+
+return false
+})
+
+return NewGenesisState(params, genAccounts)
+}
+```
+
+### 2) Register custom account types on the `auth` codec
+
+The `auth` codec must have all custom account types registered to marshal them. We will follow the pattern established in `gov` for proposals.
+
+An example custom account definition:
+
+```go
+import authtypes "github.com/cosmos/cosmos-sdk/x/auth/types"
+
+// Register the module account type with the auth module codec so it can decode module accounts stored in a genesis file
+func init() {
+ authtypes.RegisterAccountTypeCodec(ModuleAccount{
+}, "cosmos-sdk/ModuleAccount")
+}
+
+type ModuleAccount struct {
+ ...
+```
+
+The `auth` codec definition:
+
+```go expandable
+var ModuleCdc *codec.LegacyAmino
+
+func init() {
+ ModuleCdc = codec.NewLegacyAmino()
+ // register module msg's and Account interface
+ ...
+ // leave the codec unsealed
+}
+
+// RegisterAccountTypeCodec registers an external account type defined in another module for the internal ModuleCdc.
+func RegisterAccountTypeCodec(o interface{
+}, name string) {
+ ModuleCdc.RegisterConcrete(o, name, nil)
+}
+```
+
+### 3) Genesis validation for custom account types
+
+Modules implement a `ValidateGenesis` method. As `auth` does not know of account implementations, accounts will need to validate themselves.
+
+We will unmarshal accounts into a `GenesisAccount` interface that includes a `Validate` method.
+
+```go
+type GenesisAccount interface {
+ exported.Account
+ Validate()
+
+error
+}
+```
+
+Then the `auth` `ValidateGenesis` function becomes:
+
+```go expandable
+// ValidateGenesis performs basic validation of auth genesis data returning an
+// error for any failed validation criteria.
+func ValidateGenesis(data GenesisState)
+
+error {
+ // Validate params
+ ...
+
+ // Validate accounts
+ addrMap := make(map[string]bool, len(data.Accounts))
+ for _, acc := range data.Accounts {
+
+ // check for duplicated accounts
+ addrStr := acc.GetAddress().String()
+ if _, ok := addrMap[addrStr]; ok {
+ return fmt.Errorf("duplicate account found in genesis state; address: %s", addrStr)
+}
+
+addrMap[addrStr] = true
+
+ // check account specific validation
+ if err := acc.Validate(); err != nil {
+ return fmt.Errorf("invalid account found in genesis state; address: %s, error: %s", addrStr, err.Error())
+}
+
+
+}
+
+return nil
+}
+```
+
+### 4) Move add-genesis-account cli to `auth`
+
+The `genaccounts` module contains a cli command to add base or vesting accounts to a genesis file.
+
+This will be moved to `auth`. We will leave it to projects to write their own commands to add custom accounts. An extensible cli handler, similar to `gov`, could be created but it is not worth the complexity for this minor use case.
+
+### 5) Update module and vesting accounts
+
+Under the new scheme, module and vesting account types need some minor updates:
+
+* Type registration on `auth`'s codec (shown above)
+* A `Validate` method for each `Account` concrete type
+
+## Status
+
+Proposed
+
+## Consequences
+
+### Positive
+
+* custom accounts can be used without needing to fork `genaccounts`
+* reduction in lines of code
+
+### Negative
+
+### Neutral
+
+* `genaccounts` module no longer exists
+* accounts in genesis files are stored under `accounts` in `auth` rather than in the `genaccounts` module.
+ -`add-genesis-account` cli command now in `auth`
+
+## References
diff --git a/sdk/next/build/architecture/adr-012-state-accessors.mdx b/sdk/next/build/architecture/adr-012-state-accessors.mdx
new file mode 100644
index 000000000..f167091f4
--- /dev/null
+++ b/sdk/next/build/architecture/adr-012-state-accessors.mdx
@@ -0,0 +1,219 @@
+---
+title: 'ADR 012: State Accessors'
+description: '2019 Sep 04: Initial draft'
+---
+
+## Changelog
+
+* 2019 Sep 04: Initial draft
+
+## Context
+
+Cosmos SDK modules currently use the `KVStore` interface and `Codec` to access their respective state. While
+this provides a large degree of freedom to module developers, it is hard to modularize and the UX is
+mediocre.
+
+First, each time a module tries to access the state, it has to marshal the value and set or get the
+value and finally unmarshal. Usually this is done by declaring `Keeper.GetXXX` and `Keeper.SetXXX` functions,
+which are repetitive and hard to maintain.
+
+Second, this makes it harder to align with the object capability theorem: the right to access the
+state is defined as a `StoreKey`, which gives full access on the entire Merkle tree, so a module cannot
+send the access right to a specific key-value pair (or a set of key-value pairs) to another module safely.
+
+Finally, because the getter/setter functions are defined as methods of a module's `Keeper`, the reviewers
+have to consider the whole Merkle tree space when they reviewing a function accessing any part of the state.
+There is no static way to know which part of the state that the function is accessing (and which is not).
+
+## Decision
+
+We will define a type named `Value`:
+
+```go
+type Value struct {
+ m Mapping
+ key []byte
+}
+```
+
+The `Value` works as a reference for a key-value pair in the state, where `Value.m` defines the key-value
+space it will access and `Value.key` defines the exact key for the reference.
+
+We will define a type named `Mapping`:
+
+```go
+type Mapping struct {
+ storeKey sdk.StoreKey
+ cdc *codec.LegacyAmino
+ prefix []byte
+}
+```
+
+The `Mapping` works as a reference for a key-value space in the state, where `Mapping.storeKey` defines
+the IAVL (sub-)tree and `Mapping.prefix` defines the optional subspace prefix.
+
+We will define the following core methods for the `Value` type:
+
+```go expandable
+// Get and unmarshal stored data, noop if not exists, panic if cannot unmarshal
+func (Value)
+
+Get(ctx Context, ptr interface{
+}) {
+}
+
+// Get and unmarshal stored data, return error if not exists or cannot unmarshal
+func (Value)
+
+GetSafe(ctx Context, ptr interface{
+}) {
+}
+
+// Get stored data as raw byte slice
+func (Value)
+
+GetRaw(ctx Context) []byte {
+}
+
+// Marshal and set a raw value
+func (Value)
+
+Set(ctx Context, o interface{
+}) {
+}
+
+// Check if a raw value exists
+func (Value)
+
+Exists(ctx Context)
+
+bool {
+}
+
+// Delete a raw value value
+func (Value)
+
+Delete(ctx Context) {
+}
+```
+
+We will define the following core methods for the `Mapping` type:
+
+```go expandable
+// Constructs key-value pair reference corresponding to the key argument in the Mapping space
+func (Mapping)
+
+Value(key []byte)
+
+Value {
+}
+
+// Get and unmarshal stored data, noop if not exists, panic if cannot unmarshal
+func (Mapping)
+
+Get(ctx Context, key []byte, ptr interface{
+}) {
+}
+
+// Get and unmarshal stored data, return error if not exists or cannot unmarshal
+func (Mapping)
+
+GetSafe(ctx Context, key []byte, ptr interface{
+})
+
+// Get stored data as raw byte slice
+func (Mapping)
+
+GetRaw(ctx Context, key []byte) []byte {
+}
+
+// Marshal and set a raw value
+func (Mapping)
+
+Set(ctx Context, key []byte, o interface{
+}) {
+}
+
+// Check if a raw value exists
+func (Mapping)
+
+Has(ctx Context, key []byte)
+
+bool {
+}
+
+// Delete a raw value value
+func (Mapping)
+
+Delete(ctx Context, key []byte) {
+}
+```
+
+Each method of the `Mapping` type that is passed the arguments `ctx`, `key`, and `args...` will proxy
+the call to `Mapping.Value(key)` with arguments `ctx` and `args...`.
+
+In addition, we will define and provide a common set of types derived from the `Value` type:
+
+```go
+type Boolean struct {
+ Value
+}
+
+type Enum struct {
+ Value
+}
+
+type Integer struct {
+ Value; enc IntEncoding
+}
+
+type String struct {
+ Value
+}
+// ...
+```
+
+Where the encoding schemes can be different, `o` arguments in core methods are typed, and `ptr` arguments
+in core methods are replaced by explicit return types.
+
+Finally, we will define a family of types derived from the `Mapping` type:
+
+```go
+type Indexer struct {
+ m Mapping
+ enc IntEncoding
+}
+```
+
+Where the `key` argument in core method is typed.
+
+Some of the properties of the accessor types are:
+
+* State access happens only when a function which takes a `Context` as an argument is invoked
+* Accessor type structs give rights to access the state only that the struct is referring, no other
+* Marshalling/Unmarshalling happens implicitly within the core methods
+
+## Status
+
+Proposed
+
+## Consequences
+
+### Positive
+
+* Serialization will be done automatically
+* Shorter code size, less boilerplate, better UX
+* References to the state can be transferred safely
+* Explicit scope of accessing
+
+### Negative
+
+* Serialization format will be hidden
+* Different architecture from the current, but the use of accessor types can be opt-in
+* Type-specific types (e.g. `Boolean` and `Integer`) have to be defined manually
+
+### Neutral
+
+## References
+
+* [#4554](https://github.com/cosmos/cosmos-sdk/issues/4554)
diff --git a/sdk/next/build/architecture/adr-013-metrics.mdx b/sdk/next/build/architecture/adr-013-metrics.mdx
new file mode 100644
index 000000000..a2491b43d
--- /dev/null
+++ b/sdk/next/build/architecture/adr-013-metrics.mdx
@@ -0,0 +1,171 @@
+---
+title: 'ADR 013: Observability'
+description: '20-01-2020: Initial Draft'
+---
+
+## Changelog
+
+* 20-01-2020: Initial Draft
+
+## Status
+
+Proposed
+
+## Context
+
+Telemetry is paramount into debugging and understanding what the application is doing and how it is
+performing. We aim to expose metrics from modules and other core parts of the Cosmos SDK.
+
+In addition, we should aim to support multiple configurable sinks that an operator may choose from.
+By default, when telemetry is enabled, the application should track and expose metrics that are
+stored in-memory. The operator may choose to enable additional sinks, where we support only
+[Prometheus](https://prometheus.io/) for now, as it's battle-tested, simple to setup, open source,
+and is rich with ecosystem tooling.
+
+We must also aim to integrate metrics into the Cosmos SDK in the most seamless way possible such that
+metrics may be added or removed at will and without much friction. To do this, we will use the
+[go-metrics](https://github.com/hashicorp/go-metrics) library.
+
+Finally, operators may enable telemetry along with specific configuration options. If enabled, metrics
+will be exposed via `/metrics?format={text|prometheus}` via the API server.
+
+## Decision
+
+We will add an additional configuration block to `app.toml` that defines telemetry settings:
+
+```toml expandable
+###############################################################################
+### Telemetry Configuration ###
+###############################################################################
+
+[telemetry]
+
+# Prefixed with keys to separate services
+service-name = {{ .Telemetry.ServiceName }}
+
+# Enabled enables the application telemetry functionality. When enabled,
+# an in-memory sink is also enabled by default. Operators may also enabled
+# other sinks such as Prometheus.
+enabled = {{ .Telemetry.Enabled }}
+
+# Enable prefixing gauge values with hostname
+enable-hostname = {{ .Telemetry.EnableHostname }}
+
+# Enable adding hostname to labels
+enable-hostname-label = {{ .Telemetry.EnableHostnameLabel }}
+
+# Enable adding service to labels
+enable-service-label = {{ .Telemetry.EnableServiceLabel }}
+
+# PrometheusRetentionTime, when positive, enables a Prometheus metrics sink.
+prometheus-retention-time = {{ .Telemetry.PrometheusRetentionTime }}
+```
+
+The given configuration allows for two sinks -- in-memory and Prometheus. We create a `Metrics`
+type that performs all the bootstrapping for the operator, so capturing metrics becomes seamless.
+
+```go expandable
+// Metrics defines a wrapper around application telemetry functionality. It allows
+// metrics to be gathered at any point in time. When creating a Metrics object,
+// internally, a global metrics is registered with a set of sinks as configured
+// by the operator. In addition to the sinks, when a process gets a SIGUSR1, a
+// dump of formatted recent metrics will be sent to STDERR.
+type Metrics struct {
+ memSink *metrics.InmemSink
+ prometheusEnabled bool
+}
+
+// Gather collects all registered metrics and returns a GatherResponse where the
+// metrics are encoded depending on the type. Metrics are either encoded via
+// Prometheus or JSON if in-memory.
+func (m *Metrics)
+
+Gather(format string) (GatherResponse, error) {
+ switch format {
+ case FormatPrometheus:
+ return m.gatherPrometheus()
+ case FormatText:
+ return m.gatherGeneric()
+ case FormatDefault:
+ return m.gatherGeneric()
+
+default:
+ return GatherResponse{
+}, fmt.Errorf("unsupported metrics format: %s", format)
+}
+}
+```
+
+In addition, `Metrics` allows us to gather the current set of metrics at any given point in time. An
+operator may also choose to send a signal, SIGUSR1, to dump and print formatted metrics to STDERR.
+
+During an application's bootstrapping and construction phase, if `Telemetry.Enabled` is `true`, the
+API server will create an instance of a reference to `Metrics` object and will register a metrics
+handler accordingly.
+
+```go expandable
+func (s *Server)
+
+Start(cfg config.Config)
+
+error {
+ // ...
+ if cfg.Telemetry.Enabled {
+ m, err := telemetry.New(cfg.Telemetry)
+ if err != nil {
+ return err
+}
+
+s.metrics = m
+ s.registerMetrics()
+}
+
+ // ...
+}
+
+func (s *Server)
+
+registerMetrics() {
+ metricsHandler := func(w http.ResponseWriter, r *http.Request) {
+ format := strings.TrimSpace(r.FormValue("format"))
+
+gr, err := s.metrics.Gather(format)
+ if err != nil {
+ rest.WriteErrorResponse(w, http.StatusBadRequest, fmt.Sprintf("failed to gather metrics: %s", err))
+
+return
+}
+
+w.Header().Set("Content-Type", gr.ContentType)
+ _, _ = w.Write(gr.Metrics)
+}
+
+s.Router.HandleFunc("/metrics", metricsHandler).Methods("GET")
+}
+```
+
+Application developers may track counters, gauges, summaries, and key/value metrics. There is no
+additional lifting required by modules to leverage profiling metrics. To do so, it's as simple as:
+
+```go
+func (k BaseKeeper)
+
+MintCoins(ctx sdk.Context, moduleName string, amt sdk.Coins)
+
+error {
+ defer metrics.MeasureSince(time.Now(), "MintCoins")
+ // ...
+}
+```
+
+## Consequences
+
+### Positive
+
+* Exposure into the performance and behavior of an application
+
+### Negative
+
+### Neutral
+
+## References
diff --git a/sdk/next/build/architecture/adr-014-proportional-slashing.mdx b/sdk/next/build/architecture/adr-014-proportional-slashing.mdx
new file mode 100644
index 000000000..8a7a59449
--- /dev/null
+++ b/sdk/next/build/architecture/adr-014-proportional-slashing.mdx
@@ -0,0 +1,90 @@
+---
+title: 'ADR 14: Proportional Slashing'
+description: >-
+ 2019-10-15: Initial draft 2020-05-25: Removed correlation root slashing
+ 2020-07-01: Updated to include S-curve function instead of linear
+---
+
+## Changelog
+
+* 2019-10-15: Initial draft
+* 2020-05-25: Removed correlation root slashing
+* 2020-07-01: Updated to include S-curve function instead of linear
+
+## Context
+
+In Proof of Stake-based chains, centralization of consensus power amongst a small set of validators can cause harm to the network due to increased risk of censorship, liveness failure, fork attacks, etc. However, while this centralization causes a negative externality to the network, it is not directly felt by the delegators contributing towards delegating towards already large validators. We would like a way to pass on the negative externality cost of centralization onto those large validators and their delegators.
+
+## Decision
+
+### Design
+
+To solve this problem, we will implement a procedure called Proportional Slashing. The desire is that the larger a validator is, the more they should be slashed. The first naive attempt is to make a validator's slash percent proportional to their share of consensus voting power.
+
+```text
+slash_amount = k * power // power is the faulting validator's voting power and k is some on-chain constant
+```
+
+However, this will incentivize validators with large amounts of stake to split up their voting power amongst accounts (sybil attack), so that if they fault, they all get slashed at a lower percent. The solution to this is to take into account not just a validator's own voting percentage, but also the voting percentage of all the other validators who get slashed in a specified time frame.
+
+```text
+slash_amount = k * (power_1 + power_2 + ... + power_n) // where power_i is the voting power of the ith validator faulting in the specified time frame and k is some on-chain constant
+```
+
+Now, if someone splits a validator of 10% into two validators of 5% each which both fault, then they both fault in the same time frame, they both will get slashed at the sum 10% amount.
+
+However in practice, we likely don't want a linear relation between amount of stake at fault, and the percentage of stake to slash. In particular, solely 5% of stake double signing effectively did nothing to majorly threaten security, whereas 30% of stake being at fault clearly merits a large slashing factor, due to being very close to the point at which Tendermint security is threatened. A linear relation would require a factor of 6 gap between these two, whereas the difference in risk posed to the network is much larger. We propose using S-curves (formally [logistic functions](https://en.wikipedia.org/wiki/Logistic_function) to solve this). S-Curves capture the desired criterion quite well. They allow the slashing factor to be minimal for small values, and then grow very rapidly near some threshold point where the risk posed becomes notable.
+
+#### Parameterization
+
+This requires parameterizing a logistic function. It is very well understood how to parameterize this. It has four parameters:
+
+1. A minimum slashing factor
+2. A maximum slashing factor
+3. The inflection point of the S-curve (essentially where do you want to center the S)
+4. The rate of growth of the S-curve (How elongated is the S)
+
+#### Correlation across non-sybil validators
+
+One will note, that this model doesn't differentiate between multiple validators run by the same operators vs validators run by different operators. This can be seen as an additional benefit in fact. It incentivizes validators to differentiate their setups from other validators, to avoid having correlated faults with them or else they risk a higher slash. So for example, operators should avoid using the same popular cloud hosting platforms or using the same Staking as a Service providers. This will lead to a more resilient and decentralized network.
+
+#### Griefing
+
+Griefing, the act of intentionally getting oneself slashed in order to make another's slash worse, could be a concern here. However, using the protocol described here, the attacker also gets equally impacted by the grief as the victim, so it would not provide much benefit to the griefer.
+
+### Implementation
+
+In the slashing module, we will add two queues that will track all of the recent slash events. For double sign faults, we will define "recent slashes" as ones that have occurred within the last `unbonding period`. For liveness faults, we will define "recent slashes" as ones that have occurred withing the last `jail period`.
+
+```go
+type SlashEvent struct {
+ Address sdk.ValAddress
+ ValidatorVotingPercent sdk.Dec
+ SlashedSoFar sdk.Dec
+}
+```
+
+These slash events will be pruned from the queue once they are older than their respective "recent slash period".
+
+Whenever a new slash occurs, a `SlashEvent` struct is created with the faulting validator's voting percent and a `SlashedSoFar` of 0. Because recent slash events are pruned before the unbonding period and unjail period expires, it should not be possible for the same validator to have multiple SlashEvents in the same Queue at the same time.
+
+We then will iterate over all the SlashEvents in the queue, adding their `ValidatorVotingPercent` to calculate the new percent to slash all the validators in the queue at, using the "Square of Sum of Roots" formula introduced above.
+
+Once we have the `NewSlashPercent`, we then iterate over all the `SlashEvent`s in the queue once again, and if `NewSlashPercent > SlashedSoFar` for that SlashEvent, we call the `staking.Slash(slashEvent.Address, slashEvent.Power, Math.Min(Math.Max(minSlashPercent, NewSlashPercent - SlashedSoFar), maxSlashPercent)` (we pass in the power of the validator before any slashes occurred, so that we slash the right amount of tokens). We then set `SlashEvent.SlashedSoFar` amount to `NewSlashPercent`.
+
+## Status
+
+Proposed
+
+## Consequences
+
+### Positive
+
+* Increases decentralization by disincentivizing delegating to large validators
+* Incentivizes Decorrelation of Validators
+* More severely punishes attacks than accidental faults
+* More flexibility in slashing rates parameterization
+
+### Negative
+
+* More computationally expensive than current implementation. Will require more data about "recent slashing events" to be stored on chain.
diff --git a/sdk/next/build/architecture/adr-016-validator-consensus-key-rotation.mdx b/sdk/next/build/architecture/adr-016-validator-consensus-key-rotation.mdx
new file mode 100644
index 000000000..f8e5161f9
--- /dev/null
+++ b/sdk/next/build/architecture/adr-016-validator-consensus-key-rotation.mdx
@@ -0,0 +1,132 @@
+---
+title: 'ADR 016: Validator Consensus Key Rotation'
+description: '2019 Oct 23: Initial draft 2019 Nov 28: Add key rotation fee'
+---
+
+## Changelog
+
+* 2019 Oct 23: Initial draft
+* 2019 Nov 28: Add key rotation fee
+
+## Context
+
+Validator consensus key rotation feature has been discussed and requested for a long time, for the sake of safer validator key management policy (e.g. [Link](https://github.com/tendermint/tendermint/issues/1136)). So, we suggest one of the simplest form of validator consensus key rotation implementation mostly onto Cosmos SDK.
+
+We don't need to make any update on consensus logic in Tendermint because Tendermint does not have any mapping information of consensus key and validator operator key, meaning that from Tendermint point of view, a consensus key rotation of a validator is simply a replacement of a consensus key to another.
+
+Also, it should be noted that this ADR includes only the simplest form of consensus key rotation without considering multiple consensus keys concept. Such multiple consensus keys concept shall remain a long term goal of Tendermint and Cosmos SDK.
+
+## Decision
+
+### Pseudo procedure for consensus key rotation
+
+* create new random consensus key.
+* create and broadcast a transaction with a `MsgRotateConsPubKey` that states the new consensus key is now coupled with the validator operator with signature from the validator's operator key.
+* old consensus key becomes unable to participate on consensus immediately after the update of key mapping state on-chain.
+* start validating with new consensus key.
+* validators using HSM and KMS should update the consensus key in HSM to use the new rotated key after the height `h` when `MsgRotateConsPubKey` committed to the blockchain.
+
+### Considerations
+
+* consensus key mapping information management strategy
+ * store history of each key mapping changes in the kvstore.
+ * the state machine can search corresponding consensus key paired with given validator operator for any arbitrary height in a recent unbonding period.
+ * the state machine does not need any historical mapping information which is past more than unbonding period.
+* key rotation costs related to LCD and IBC
+ * LCD and IBC will have traffic/computation burden when there exists frequent power changes
+ * In current Tendermint design, consensus key rotations are seen as power changes from LCD or IBC perspective
+ * Therefore, to minimize unnecessary frequent key rotation behavior, we limited maximum number of rotation in recent unbonding period and also applied exponentially increasing rotation fee
+* limits
+ * a validator cannot rotate its consensus key more than `MaxConsPubKeyRotations` time for any unbonding period, to prevent spam.
+ * parameters can be decided by governance and stored in genesis file.
+* key rotation fee
+ * a validator should pay `KeyRotationFee` to rotate the consensus key which is calculated as below
+ * `KeyRotationFee` = (max(`VotingPowerPercentage` *100, 1)* `InitialKeyRotationFee`) \* 2^(number of rotations in `ConsPubKeyRotationHistory` in recent unbonding period)
+* evidence module
+ * evidence module can search corresponding consensus key for any height from slashing keeper so that it can decide which consensus key is supposed to be used for given height.
+* abci.ValidatorUpdate
+ * tendermint already has ability to change a consensus key by ABCI communication(`ValidatorUpdate`).
+ * validator consensus key update can be done via creating new + delete old by change the power to zero.
+ * therefore, we expect we even do not need to change tendermint codebase at all to implement this feature.
+* new genesis parameters in `staking` module
+ * `MaxConsPubKeyRotations` : maximum number of rotation can be executed by a validator in recent unbonding period. default value 10 is suggested(11th key rotation will be rejected)
+ * `InitialKeyRotationFee` : the initial key rotation fee when no key rotation has happened in recent unbonding period. default value 1atom is suggested(1atom fee for the first key rotation in recent unbonding period)
+
+### Workflow
+
+1. The validator generates a new consensus keypair.
+
+2. The validator generates and signs a `MsgRotateConsPubKey` tx with their operator key and new ConsPubKey
+
+ ```go
+ type MsgRotateConsPubKey struct {
+ ValidatorAddress sdk.ValAddress
+ NewPubKey crypto.PubKey
+ }
+ ```
+
+3. `handleMsgRotateConsPubKey` gets `MsgRotateConsPubKey`, calls `RotateConsPubKey` with emits event
+
+4. `RotateConsPubKey`
+
+ * checks if `NewPubKey` is not duplicated on `ValidatorsByConsAddr`
+ * checks if the validator is does not exceed parameter `MaxConsPubKeyRotations` by iterating `ConsPubKeyRotationHistory`
+ * checks if the signing account has enough balance to pay `KeyRotationFee`
+ * pays `KeyRotationFee` to community fund
+ * overwrites `NewPubKey` in `validator.ConsPubKey`
+ * deletes old `ValidatorByConsAddr`
+ * `SetValidatorByConsAddr` for `NewPubKey`
+ * Add `ConsPubKeyRotationHistory` for tracking rotation
+
+ ```go
+ type ConsPubKeyRotationHistory struct {
+ OperatorAddress sdk.ValAddress
+ OldConsPubKey crypto.PubKey
+ NewConsPubKey crypto.PubKey
+ RotatedHeight int64
+ }
+ ```
+
+5. `ApplyAndReturnValidatorSetUpdates` checks if there is `ConsPubKeyRotationHistory` with `ConsPubKeyRotationHistory.RotatedHeight == ctx.BlockHeight()` and if so, generates 2 `ValidatorUpdate` , one for a remove validator and one for create new validator
+
+ ```go
+ abci.ValidatorUpdate{
+ PubKey: cmttypes.TM2PB.PubKey(OldConsPubKey),
+ Power: 0,
+ }
+
+ abci.ValidatorUpdate{
+ PubKey: cmttypes.TM2PB.PubKey(NewConsPubKey),
+ Power: v.ConsensusPower(),
+ }
+ ```
+
+6. at `previousVotes` Iteration logic of `AllocateTokens`, `previousVote` using `OldConsPubKey` match up with `ConsPubKeyRotationHistory`, and replace validator for token allocation
+
+7. Migrate `ValidatorSigningInfo` and `ValidatorMissedBlockBitArray` from `OldConsPubKey` to `NewConsPubKey`
+
+* Note : All above features shall be implemented in `staking` module.
+
+## Status
+
+Proposed
+
+## Consequences
+
+### Positive
+
+* Validators can immediately or periodically rotate their consensus key to have better security policy
+* improved security against Long-Range attacks ([Link](https://nearprotocol.com/blog/long-range-attacks-and-a-new-fork-choice-rule)) given a validator throws away the old consensus key(s)
+
+### Negative
+
+* Slash module needs more computation because it needs to lookup corresponding consensus key of validators for each height
+* frequent key rotations will make light client bisection less efficient
+
+### Neutral
+
+## References
+
+* on tendermint repo : [Link](https://github.com/tendermint/tendermint/issues/1136)
+* on cosmos-sdk repo : [Link](https://github.com/cosmos/cosmos-sdk/issues/5231)
+* about multiple consensus keys : [Link](https://github.com/tendermint/tendermint/issues/1758#issuecomment-545291698)
diff --git a/sdk/next/build/architecture/adr-017-historical-header-module.mdx b/sdk/next/build/architecture/adr-017-historical-header-module.mdx
new file mode 100644
index 000000000..d7ba2ea10
--- /dev/null
+++ b/sdk/next/build/architecture/adr-017-historical-header-module.mdx
@@ -0,0 +1,70 @@
+---
+title: 'ADR 17: Historical Header Module'
+description: >-
+ 26 November 2019: Start of first version 2 December 2019: Final draft of first
+ version
+---
+
+## Changelog
+
+* 26 November 2019: Start of first version
+* 2 December 2019: Final draft of first version
+
+## Context
+
+In order for the Cosmos SDK to implement the [IBC specification](https://github.com/cosmos/ics), modules within the Cosmos SDK must have the ability to introspect recent consensus states (validator sets & commitment roots) as proofs of these values on other chains must be checked during the handshakes.
+
+## Decision
+
+The application MUST store the most recent `n` headers in a persistent store. At first, this store MAY be the current Merklised store. A non-Merklised store MAY be used later as no proofs are necessary.
+
+The application MUST store this information by storing new headers immediately when handling `abci.RequestBeginBlock`:
+
+```go
+func BeginBlock(ctx sdk.Context, keeper HistoricalHeaderKeeper, req abci.RequestBeginBlock)
+
+abci.ResponseBeginBlock {
+ info := HistoricalInfo{
+ Header: ctx.BlockHeader(),
+ ValSet: keeper.StakingKeeper.GetAllValidators(ctx), // note that this must be stored in a canonical order
+}
+
+keeper.SetHistoricalInfo(ctx, ctx.BlockHeight(), info)
+ n := keeper.GetParamRecentHeadersToStore()
+
+keeper.PruneHistoricalInfo(ctx, ctx.BlockHeight() - n)
+ // continue handling request
+}
+```
+
+Alternatively, the application MAY store only the hash of the validator set.
+
+The application MUST make these past `n` committed headers available for querying by Cosmos SDK modules through the `Keeper`'s `GetHistoricalInfo` function. This MAY be implemented in a new module, or it MAY also be integrated into an existing one (likely `x/staking` or `x/ibc`).
+
+`n` MAY be configured as a parameter store parameter, in which case it could be changed by `ParameterChangeProposal`s, although it will take some blocks for the stored information to catch up if `n` is increased.
+
+## Status
+
+Proposed.
+
+## Consequences
+
+Implementation of this ADR will require changes to the Cosmos SDK. It will not require changes to Tendermint.
+
+### Positive
+
+* Easy retrieval of headers & state roots for recent past heights by modules anywhere in the Cosmos SDK.
+* No RPC calls to Tendermint required.
+* No ABCI alterations required.
+
+### Negative
+
+* Duplicates `n` headers data in Tendermint & the application (additional disk usage) - in the long term, an approach such as [this](https://github.com/tendermint/tendermint/issues/4210) might be preferable.
+
+### Neutral
+
+(none known)
+
+## References
+
+* [ICS 2: "Consensus state introspection"](https://github.com/cosmos/ibc/tree/master/spec/core/ics-002-client-semantics#consensus-state-introspection)
diff --git a/sdk/next/build/architecture/adr-018-extendable-voting-period.mdx b/sdk/next/build/architecture/adr-018-extendable-voting-period.mdx
new file mode 100644
index 000000000..077078cf8
--- /dev/null
+++ b/sdk/next/build/architecture/adr-018-extendable-voting-period.mdx
@@ -0,0 +1,69 @@
+---
+title: 'ADR 18: Extendable Voting Periods'
+description: '1 January 2020: Start of first version'
+---
+
+## Changelog
+
+* 1 January 2020: Start of first version
+
+## Context
+
+Currently the voting period for all governance proposals is the same. However, this is suboptimal as all governance proposals do not require the same time period. For more non-contentious proposals, they can be dealt with more efficiently with a faster period, while more contentious or complex proposals may need a longer period for extended discussion/consideration.
+
+## Decision
+
+We would like to design a mechanism for making the voting period of a governance proposal variable based on the demand of voters. We would like it to be based on the view of the governance participants, rather than just the proposer of a governance proposal (thus, allowing the proposer to select the voting period length is not sufficient).
+
+However, we would like to avoid the creation of an entire second voting process to determine the length of the voting period, as it just pushed the problem to determining the length of that first voting period.
+
+Thus, we propose the following mechanism:
+
+### Params
+
+* The current gov param `VotingPeriod` is to be replaced by a `MinVotingPeriod` param. This is the default voting period that all governance proposal voting periods start with.
+* There is a new gov param called `MaxVotingPeriodExtension`.
+
+### Mechanism
+
+There is a new `Msg` type called `MsgExtendVotingPeriod`, which can be sent by any staked account during a proposal's voting period. It allows the sender to unilaterally extend the length of the voting period by `MaxVotingPeriodExtension * sender's share of voting power`. Every address can only call `MsgExtendVotingPeriod` once per proposal.
+
+So for example, if the `MaxVotingPeriodExtension` is set to 100 Days, then anyone with 1% of voting power can extend the voting power by 1 day. If 33% of voting power has sent the message, the voting period will be extended by 33 days. Thus, if absolutely everyone chooses to extend the voting period, the absolute maximum voting period will be `MinVotingPeriod + MaxVotingPeriodExtension`.
+
+This system acts as a sort of distributed coordination, where individual stakers choosing to extend or not, allows the system the guage the conentiousness/complexity of the proposal. It is extremely unlikely that many stakers will choose to extend at the exact same time, it allows stakers to view how long others have already extended thus far, to decide whether or not to extend further.
+
+### Dealing with Unbonding/Redelegation
+
+There is one thing that needs to be addressed. How to deal with redelegation/unbonding during the voting period. If a staker of 5% calls `MsgExtendVotingPeriod` and then unbonds, does the voting period then decrease by 5 days again? This is not good as it can give people a false sense of how long they have to make their decision. For this reason, we want to design it such that the voting period length can only be extended, not shortened. To do this, the current extension amount is based on the highest percent that voted extension at any time. This is best explained by example:
+
+1. Let's say 2 stakers of voting power 4% and 3% respectively vote to extend. The voting period will be extended by 7 days.
+2. Now the staker of 3% decides to unbond before the end of the voting period. The voting period extension remains 7 days.
+3. Now, let's say another staker of 2% voting power decides to extend voting period. There is now 6% of active voting power choosing the extend. The voting power remains 7 days.
+4. If a fourth staker of 10% chooses to extend now, there is a total of 16% of active voting power wishing to extend. The voting period will be extended to 16 days.
+
+### Delegators
+
+Just like votes in the actual voting period, delegators automatically inherit the extension of their validators. If their validator chooses to extend, their voting power will be used in the validator's extension. However, the delegator is unable to override their validator and "unextend" as that would contradict the "voting power length can only be ratcheted up" principle described in the previous section. However, a delegator may choose the extend using their personal voting power, if their validator has not done so.
+
+## Status
+
+Proposed
+
+## Consequences
+
+### Positive
+
+* More complex/contentious governance proposals will have more time to properly digest and deliberate
+
+### Negative
+
+* Governance process becomes more complex and requires more understanding to interact with effectively
+* Can no longer predict when a governance proposal will end. Can't assume order in which governance proposals will end.
+
+### Neutral
+
+* The minimum voting period can be made shorter
+
+## References
+
+* [Cosmos Forum post where idea first originated](https://forum.cosmos.network/t/proposal-draft-reduce-governance-voting-period-to-7-days/3032/9)
diff --git a/sdk/next/build/architecture/adr-019-protobuf-state-encoding.mdx b/sdk/next/build/architecture/adr-019-protobuf-state-encoding.mdx
new file mode 100644
index 000000000..2b24ddec8
--- /dev/null
+++ b/sdk/next/build/architecture/adr-019-protobuf-state-encoding.mdx
@@ -0,0 +1,401 @@
+---
+title: 'ADR 019: Protocol Buffer State Encoding'
+---
+
+## Changelog
+
+* 2020 Feb 15: Initial Draft
+* 2020 Feb 24: Updates to handle messages with interface fields
+* 2020 Apr 27: Convert usages of `oneof` for interfaces to `Any`
+* 2020 May 15: Describe `cosmos_proto` extensions and amino compatibility
+* 2020 Dec 4: Move and rename `MarshalAny` and `UnmarshalAny` into the `codec.Codec` interface.
+* 2021 Feb 24: Remove mentions of `HybridCodec`, which has been abandoned in [#6843](https://github.com/cosmos/cosmos-sdk/pull/6843).
+
+## Status
+
+Accepted
+
+## Context
+
+Currently, the Cosmos SDK utilizes [go-amino](https://github.com/tendermint/go-amino/) for binary
+and JSON object encoding over the wire bringing parity between logical objects and persistence objects.
+
+From the Amino docs:
+
+> Amino is an object encoding specification. It is a subset of Proto3 with an extension for interface
+> support. See the [Proto3 spec](https://developers.google.com/protocol-buffers/docs/proto3) for more
+> information on Proto3, which Amino is largely compatible with (but not with Proto2).
+>
+> The goal of the Amino encoding protocol is to bring parity into logic objects and persistence objects.
+
+Amino also aims to have the following goals (not a complete list):
+
+* Binary bytes must be decode-able with a schema.
+* Schema must be upgradeable.
+* The encoder and decoder logic must be reasonably simple.
+
+However, we believe that Amino does not fulfill these goals completely and does not fully meet the
+needs of a truly flexible cross-language and multi-client compatible encoding protocol in the Cosmos SDK.
+Namely, Amino has proven to be a big pain-point in regards to supporting object serialization across
+clients written in various languages while providing virtually little in the way of true backwards
+compatibility and upgradeability. Furthermore, through profiling and various benchmarks, Amino has
+been shown to be an extremely large performance bottleneck in the Cosmos SDK 1. This is
+largely reflected in the performance of simulations and application transaction throughput.
+
+Thus, we need to adopt an encoding protocol that meets the following criteria for state serialization:
+
+* Language agnostic
+* Platform agnostic
+* Rich client support and thriving ecosystem
+* High performance
+* Minimal encoded message size
+* Codegen-based over reflection-based
+* Supports backward and forward compatibility
+
+Note, migrating away from Amino should be viewed as a two-pronged approach, state and client encoding.
+This ADR focuses on state serialization in the Cosmos SDK state machine. A corresponding ADR will be
+made to address client-side encoding.
+
+## Decision
+
+We will adopt [Protocol Buffers](https://developers.google.com/protocol-buffers) for serializing
+persisted structured data in the Cosmos SDK while providing a clean mechanism and developer UX for
+applications wishing to continue to use Amino. We will provide this mechanism by updating modules to
+accept a codec interface, `Marshaler`, instead of a concrete Amino codec. Furthermore, the Cosmos SDK
+will provide two concrete implementations of the `Marshaler` interface: `AminoCodec` and `ProtoCodec`.
+
+* `AminoCodec`: Uses Amino for both binary and JSON encoding.
+* `ProtoCodec`: Uses Protobuf for both binary and JSON encoding.
+
+Modules will use whichever codec that is instantiated in the app. By default, the Cosmos SDK's `simapp`
+instantiates a `ProtoCodec` as the concrete implementation of `Marshaler`, inside the `MakeTestEncodingConfig`
+function. This can be easily overwritten by app developers if they so desire.
+
+The ultimate goal will be to replace Amino JSON encoding with Protobuf encoding and thus have
+modules accept and/or extend `ProtoCodec`. Until then, Amino JSON is still provided for legacy use-cases.
+A handful of places in the Cosmos SDK still have Amino JSON hardcoded, such as the Legacy API REST endpoints
+and the `x/params` store. They are planned to be converted to Protobuf in a gradual manner.
+
+### Module Codecs
+
+Modules that do not require the ability to work with and serialize interfaces, the path to Protobuf
+migration is pretty straightforward. These modules are to simply migrate any existing types that
+are encoded and persisted via their concrete Amino codec to Protobuf and have their keeper accept a
+`Marshaler` that will be a `ProtoCodec`. This migration is simple as things will just work as-is.
+
+Note, any business logic that needs to encode primitive types like `bool` or `int64` should use
+[gogoprotobuf](https://github.com/cosmos/gogoproto) Value types.
+
+Example:
+
+```go
+ts, err := gogotypes.TimestampProto(completionTime)
+ if err != nil {
+ // ...
+}
+ bz := cdc.MustMarshal(ts)
+```
+
+However, modules can vary greatly in purpose and design and so we must support the ability for modules
+to be able to encode and work with interfaces (e.g. `Account` or `Content`). For these modules, they
+must define their own codec interface that extends `Marshaler`. These specific interfaces are unique
+to the module and will contain method contracts that know how to serialize the needed interfaces.
+
+Example:
+
+```go expandable
+// x/auth/types/codec.go
+
+type Codec interface {
+ codec.Codec
+
+ MarshalAccount(acc exported.Account) ([]byte, error)
+
+UnmarshalAccount(bz []byte) (exported.Account, error)
+
+MarshalAccountJSON(acc exported.Account) ([]byte, error)
+
+UnmarshalAccountJSON(bz []byte) (exported.Account, error)
+}
+```
+
+### Usage of `Any` to encode interfaces
+
+In general, module-level .proto files should define messages which encode interfaces
+using [`google.protobuf.Any`](https://github.com/protocolbuffers/protobuf/blob/master/src/google/protobuf/any.proto).
+After [extension discussion](https://github.com/cosmos/cosmos-sdk/issues/6030),
+this was chosen as the preferred alternative to application-level `oneof`s
+as in our original protobuf design. The arguments in favor of `Any` can be
+summarized as follows:
+
+* `Any` provides a simpler, more consistent client UX for dealing with
+ interfaces than app-level `oneof`s that will need to be coordinated more
+ carefully across applications. Creating a generic transaction
+ signing library using `oneof`s may be cumbersome and critical logic may need
+ to be reimplemented for each chain
+* `Any` provides more resistance against human error than `oneof`
+* `Any` is generally simpler to implement for both modules and apps
+
+The main counter-argument to using `Any` centers around its additional space
+and possibly performance overhead. The space overhead could be dealt with using
+compression at the persistence layer in the future and the performance impact
+is likely to be small. Thus, not using `Any` is seem as a pre-mature optimization,
+with user experience as the higher order concern.
+
+Note, that given the Cosmos SDK's decision to adopt the `Codec` interfaces described
+above, apps can still choose to use `oneof` to encode state and transactions
+but it is not the recommended approach. If apps do choose to use `oneof`s
+instead of `Any` they will likely lose compatibility with client apps that
+support multiple chains. Thus developers should think carefully about whether
+they care more about what is possibly a pre-mature optimization or end-user
+and client developer UX.
+
+### Safe usage of `Any`
+
+By default, the [gogo protobuf implementation of `Any`](https://pkg.go.dev/github.com/cosmos/gogoproto/types)
+uses [global type registration](https://github.com/cosmos/gogoproto/blob/master/proto/properties.go#L540)
+to decode values packed in `Any` into concrete
+go types. This introduces a vulnerability where any malicious module
+in the dependency tree could register a type with the global protobuf registry
+and cause it to be loaded and unmarshaled by a transaction that referenced
+it in the `type_url` field.
+
+To prevent this, we introduce a type registration mechanism for decoding `Any`
+values into concrete types through the `InterfaceRegistry` interface which
+bears some similarity to type registration with Amino:
+
+```go expandable
+type InterfaceRegistry interface {
+ // RegisterInterface associates protoName as the public name for the
+ // interface passed in as iface
+ // Ex:
+ // registry.RegisterInterface("cosmos_sdk.Msg", (*sdk.Msg)(nil))
+
+RegisterInterface(protoName string, iface interface{
+})
+
+ // RegisterImplementations registers impls as a concrete implementations of
+ // the interface iface
+ // Ex:
+ // registry.RegisterImplementations((*sdk.Msg)(nil), &MsgSend{
+}, &MsgMultiSend{
+})
+
+RegisterImplementations(iface interface{
+}, impls ...proto.Message)
+}
+```
+
+In addition to serving as a whitelist, `InterfaceRegistry` can also serve
+to communicate the list of concrete types that satisfy an interface to clients.
+
+In .proto files:
+
+* fields which accept interfaces should be annotated with `cosmos_proto.accepts_interface`
+ using the same full-qualified name passed as `protoName` to `InterfaceRegistry.RegisterInterface`
+* interface implementations should be annotated with `cosmos_proto.implements_interface`
+ using the same full-qualified name passed as `protoName` to `InterfaceRegistry.RegisterInterface`
+
+In the future, `protoName`, `cosmos_proto.accepts_interface`, `cosmos_proto.implements_interface`
+may be used via code generation, reflection &/or static linting.
+
+The same struct that implements `InterfaceRegistry` will also implement an
+interface `InterfaceUnpacker` to be used for unpacking `Any`s:
+
+```go
+type InterfaceUnpacker interface {
+ // UnpackAny unpacks the value in any to the interface pointer passed in as
+ // iface. Note that the type in any must have been registered with
+ // RegisterImplementations as a concrete type for that interface
+ // Ex:
+ // var msg sdk.Msg
+ // err := ctx.UnpackAny(any, &msg)
+ // ...
+ UnpackAny(any *Any, iface interface{
+})
+
+error
+}
+```
+
+Note that `InterfaceRegistry` usage does not deviate from standard protobuf
+usage of `Any`, it just introduces a security and introspection layer for
+golang usage.
+
+`InterfaceRegistry` will be a member of `ProtoCodec`
+described above. In order for modules to register interface types, app modules
+can optionally implement the following interface:
+
+```go
+type InterfaceModule interface {
+ RegisterInterfaceTypes(InterfaceRegistry)
+}
+```
+
+The module manager will include a method to call `RegisterInterfaceTypes` on
+every module that implements it in order to populate the `InterfaceRegistry`.
+
+### Using `Any` to encode state
+
+The Cosmos SDK will provide support methods `MarshalInterface` and `UnmarshalInterface` to hide a complexity of wrapping interface types into `Any` and allow easy serialization.
+
+```go expandable
+import "github.com/cosmos/cosmos-sdk/codec"
+
+// note: eviexported.Evidence is an interface type
+func MarshalEvidence(cdc codec.BinaryCodec, e eviexported.Evidence) ([]byte, error) {
+ return cdc.MarshalInterface(e)
+}
+
+func UnmarshalEvidence(cdc codec.BinaryCodec, bz []byte) (eviexported.Evidence, error) {
+ var evi eviexported.Evidence
+ err := cdc.UnmarshalInterface(&evi, bz)
+
+return err, nil
+}
+```
+
+### Using `Any` in `sdk.Msg`s
+
+A similar concept is to be applied for messages that contain interfaces fields.
+For example, we can define `MsgSubmitEvidence` as follows where `Evidence` is
+an interface:
+
+```protobuf
+// x/evidence/types/types.proto
+
+message MsgSubmitEvidence {
+ bytes submitter = 1
+ [
+ (gogoproto.casttype) = "github.com/cosmos/cosmos-sdk/types.AccAddress"
+ ];
+ google.protobuf.Any evidence = 2;
+}
+```
+
+Note that in order to unpack the evidence from `Any` we do need a reference to
+`InterfaceRegistry`. In order to reference evidence in methods like
+`ValidateBasic` which shouldn't have to know about the `InterfaceRegistry`, we
+introduce an `UnpackInterfaces` phase to deserialization which unpacks
+interfaces before they're needed.
+
+### Unpacking Interfaces
+
+To implement the `UnpackInterfaces` phase of deserialization which unpacks
+interfaces wrapped in `Any` before they're needed, we create an interface
+that `sdk.Msg`s and other types can implement:
+
+```go
+type UnpackInterfacesMessage interface {
+ UnpackInterfaces(InterfaceUnpacker)
+
+error
+}
+```
+
+We also introduce a private `cachedValue interface{}` field onto the `Any`
+struct itself with a public getter `GetCachedValue() interface{}`.
+
+The `UnpackInterfaces` method is to be invoked during message deserialization right
+after `Unmarshal` and any interface values packed in `Any`s will be decoded
+and stored in `cachedValue` for reference later.
+
+Then unpacked interface values can safely be used in any code afterwards
+without knowledge of the `InterfaceRegistry`
+and messages can introduce a simple getter to cast the cached value to the
+correct interface type.
+
+This has the added benefit that unmarshaling of `Any` values only happens once
+during initial deserialization rather than every time the value is read. Also,
+when `Any` values are first packed (for instance in a call to
+`NewMsgSubmitEvidence`), the original interface value is cached so that
+unmarshaling isn't needed to read it again.
+
+`MsgSubmitEvidence` could implement `UnpackInterfaces`, plus a convenience getter
+`GetEvidence` as follows:
+
+```go
+func (msg MsgSubmitEvidence)
+
+UnpackInterfaces(ctx sdk.InterfaceRegistry)
+
+error {
+ var evi eviexported.Evidence
+ return ctx.UnpackAny(msg.Evidence, *evi)
+}
+
+func (msg MsgSubmitEvidence)
+
+GetEvidence()
+
+eviexported.Evidence {
+ return msg.Evidence.GetCachedValue().(eviexported.Evidence)
+}
+```
+
+### Amino Compatibility
+
+Our custom implementation of `Any` can be used transparently with Amino if used
+with the proper codec instance. What this means is that interfaces packed within
+`Any`s will be amino marshaled like regular Amino interfaces (assuming they
+have been registered properly with Amino).
+
+In order for this functionality to work:
+
+* **all legacy code must use `*codec.LegacyAmino` instead of `*amino.Codec` which is
+ now a wrapper which properly handles `Any`**
+* **all new code should use `Marshaler` which is compatible with both amino and
+ protobuf**
+* Also, before v0.39, `codec.LegacyAmino` will be renamed to `codec.LegacyAmino`.
+
+### Why Wasn't X Chosen Instead
+
+For a more complete comparison to alternative protocols, see [here](https://codeburst.io/json-vs-protocol-buffers-vs-flatbuffers-a4247f8bda6f).
+
+### Cap'n Proto
+
+While [Cap’n Proto](https://capnproto.org/) does seem like an advantageous alternative to Protobuf
+due to it's native support for interfaces/generics and built in canonicalization, it does lack the
+rich client ecosystem compared to Protobuf and is a bit less mature.
+
+### FlatBuffers
+
+[FlatBuffers](https://google.github.io/flatbuffers/) is also a potentially viable alternative, with the
+primary difference being that FlatBuffers does not need a parsing/unpacking step to a secondary
+representation before you can access data, often coupled with per-object memory allocation.
+
+However, it would require great efforts into research and full understanding the scope of the migration
+and path forward -- which isn't immediately clear. In addition, FlatBuffers aren't designed for
+untrusted inputs.
+
+## Future Improvements & Roadmap
+
+In the future we may consider a compression layer right above the persistence
+layer which doesn't change tx or merkle tree hashes, but reduces the storage
+overhead of `Any`. In addition, we may adopt protobuf naming conventions which
+make type URLs a bit more concise while remaining descriptive.
+
+Additional code generation support around the usage of `Any` is something that
+could also be explored in the future to make the UX for go developers more
+seamless.
+
+## Consequences
+
+### Positive
+
+* Significant performance gains.
+* Supports backward and forward type compatibility.
+* Better support for cross-language clients.
+
+### Negative
+
+* Learning curve required to understand and implement Protobuf messages.
+* Slightly larger message size due to use of `Any`, although this could be offset
+ by a compression layer in the future
+
+### Neutral
+
+## References
+
+1. [Link](https://github.com/cosmos/cosmos-sdk/issues/4977)
+2. [Link](https://github.com/cosmos/cosmos-sdk/issues/5444)
diff --git a/sdk/next/build/architecture/adr-020-protobuf-transaction-encoding.mdx b/sdk/next/build/architecture/adr-020-protobuf-transaction-encoding.mdx
new file mode 100644
index 000000000..01b863abe
--- /dev/null
+++ b/sdk/next/build/architecture/adr-020-protobuf-transaction-encoding.mdx
@@ -0,0 +1,491 @@
+---
+title: 'ADR 020: Protocol Buffer Transaction Encoding'
+---
+
+## Changelog
+
+* 2020 March 06: Initial Draft
+* 2020 March 12: API Updates
+* 2020 April 13: Added details on interface `oneof` handling
+* 2020 April 30: Switch to `Any`
+* 2020 May 14: Describe public key encoding
+* 2020 June 08: Store `TxBody` and `AuthInfo` as bytes in `SignDoc`; Document `TxRaw` as broadcast and storage type.
+* 2020 August 07: Use ADR 027 for serializing `SignDoc`.
+* 2020 August 19: Move sequence field from `SignDoc` to `SignerInfo`, as discussed in [#6966](https://github.com/cosmos/cosmos-sdk/issues/6966).
+* 2020 September 25: Remove `PublicKey` type in favor of `secp256k1.PubKey`, `ed25519.PubKey` and `multisig.LegacyAminoPubKey`.
+* 2020 October 15: Add `GetAccount` and `GetAccountWithHeight` methods to the `AccountRetriever` interface.
+* 2021 Feb 24: The Cosmos SDK does not use Tendermint's `PubKey` interface anymore, but its own `cryptotypes.PubKey`. Updates to reflect this.
+* 2021 May 3: Rename `clientCtx.JSONMarshaler` to `clientCtx.JSONCodec`.
+* 2021 June 10: Add `clientCtx.Codec: codec.Codec`.
+
+## Status
+
+Accepted
+
+## Context
+
+This ADR is a continuation of the motivation, design, and context established in
+[ADR 019](/sdk/v0.50/build/architecture/adr-019-protobuf-state-encoding), namely, we aim to design the
+Protocol Buffer migration path for the client-side of the Cosmos SDK.
+
+Specifically, the client-side migration path primarily includes tx generation and
+signing, message construction and routing, in addition to CLI & REST handlers and
+business logic (i.e. queriers).
+
+With this in mind, we will tackle the migration path via two main areas, txs and
+querying. However, this ADR solely focuses on transactions. Querying should be
+addressed in a future ADR, but it should build off of these proposals.
+
+Based on detailed discussions ([#6030](https://github.com/cosmos/cosmos-sdk/issues/6030)
+and [#6078](https://github.com/cosmos/cosmos-sdk/issues/6078)), the original
+design for transactions was changed substantially from an `oneof` /JSON-signing
+approach to the approach described below.
+
+## Decision
+
+### Transactions
+
+Since interface values are encoded with `google.protobuf.Any` in state (see [ADR 019](/sdk/v0.53/build/architecture/adr-019-protobuf-state-encoding)),
+`sdk.Msg`s are encoding with `Any` in transactions.
+
+One of the main goals of using `Any` to encode interface values is to have a
+core set of types which is reused by apps so that
+clients can safely be compatible with as many chains as possible.
+
+It is one of the goals of this specification to provide a flexible cross-chain transaction
+format that can serve a wide variety of use cases without breaking client
+compatibility.
+
+In order to facilitate signing, transactions are separated into `TxBody`,
+which will be re-used by `SignDoc` below, and `signatures`:
+
+```protobuf expandable
+// types/types.proto
+package cosmos_sdk.v1;
+
+message Tx {
+ TxBody body = 1;
+ AuthInfo auth_info = 2;
+ // A list of signatures that matches the length and order of AuthInfo's signer_infos to
+ // allow connecting signature meta information like public key and signing mode by position.
+ repeated bytes signatures = 3;
+}
+
+// A variant of Tx that pins the signer's exact binary represenation of body and
+// auth_info. This is used for signing, broadcasting and verification. The binary
+// `serialize(tx: TxRaw)` is stored in Tendermint and the hash `sha256(serialize(tx: TxRaw))`
+// becomes the "txhash", commonly used as the transaction ID.
+message TxRaw {
+ // A protobuf serialization of a TxBody that matches the representation in SignDoc.
+ bytes body = 1;
+ // A protobuf serialization of an AuthInfo that matches the representation in SignDoc.
+ bytes auth_info = 2;
+ // A list of signatures that matches the length and order of AuthInfo's signer_infos to
+ // allow connecting signature meta information like public key and signing mode by position.
+ repeated bytes signatures = 3;
+}
+
+message TxBody {
+ // A list of messages to be executed. The required signers of those messages define
+ // the number and order of elements in AuthInfo's signer_infos and Tx's signatures.
+ // Each required signer address is added to the list only the first time it occurs.
+ //
+ // By convention, the first required signer (usually from the first message) is referred
+ // to as the primary signer and pays the fee for the whole transaction.
+ repeated google.protobuf.Any messages = 1;
+ string memo = 2;
+ int64 timeout_height = 3;
+ repeated google.protobuf.Any extension_options = 1023;
+}
+
+message AuthInfo {
+ // This list defines the signing modes for the required signers. The number
+ // and order of elements must match the required signers from TxBody's messages.
+ // The first element is the primary signer and the one which pays the fee.
+ repeated SignerInfo signer_infos = 1;
+ // The fee can be calculated based on the cost of evaluating the body and doing signature verification of the signers. This can be estimated via simulation.
+ Fee fee = 2;
+}
+
+message SignerInfo {
+ // The public key is optional for accounts that already exist in state. If unset, the
+ // verifier can use the required signer address for this position and lookup the public key.
+ google.protobuf.Any public_key = 1;
+ // ModeInfo describes the signing mode of the signer and is a nested
+ // structure to support nested multisig pubkey's
+ ModeInfo mode_info = 2;
+ // sequence is the sequence of the account, which describes the
+ // number of committed transactions signed by a given address. It is used to prevent
+ // replay attacks.
+ uint64 sequence = 3;
+}
+
+message ModeInfo {
+ oneof sum {
+ Single single = 1;
+ Multi multi = 2;
+ }
+
+ // Single is the mode info for a single signer. It is structured as a message
+ // to allow for additional fields such as locale for SIGN_MODE_TEXTUAL in the future
+ message Single {
+ SignMode mode = 1;
+ }
+
+ // Multi is the mode info for a multisig public key
+ message Multi {
+ // bitarray specifies which keys within the multisig are signing
+ CompactBitArray bitarray = 1;
+ // mode_infos is the corresponding modes of the signers of the multisig
+ // which could include nested multisig public keys
+ repeated ModeInfo mode_infos = 2;
+ }
+}
+
+enum SignMode {
+ SIGN_MODE_UNSPECIFIED = 0;
+
+ SIGN_MODE_DIRECT = 1;
+
+ SIGN_MODE_TEXTUAL = 2;
+
+ SIGN_MODE_LEGACY_AMINO_JSON = 127;
+}
+```
+
+As will be discussed below, in order to include as much of the `Tx` as possible
+in the `SignDoc`, `SignerInfo` is separated from signatures so that only the
+raw signatures themselves live outside of what is signed over.
+
+Because we are aiming for a flexible, extensible cross-chain transaction
+format, new transaction processing options should be added to `TxBody` as soon
+those use cases are discovered, even if they can't be implemented yet.
+
+Because there is coordination overhead in this, `TxBody` includes an
+`extension_options` field which can be used for any transaction processing
+options that are not already covered. App developers should, nevertheless,
+attempt to upstream important improvements to `Tx`.
+
+### Signing
+
+All of the signing modes below aim to provide the following guarantees:
+
+* **No Malleability**: `TxBody` and `AuthInfo` cannot change once the transaction
+ is signed
+* **Predictable Gas**: if I am signing a transaction where I am paying a fee,
+ the final gas is fully dependent on what I am signing
+
+These guarantees give the maximum amount confidence to message signers that
+manipulation of `Tx`s by intermediaries can't result in any meaningful changes.
+
+#### `SIGN_MODE_DIRECT`
+
+The "direct" signing behavior is to sign the raw `TxBody` bytes as broadcast over
+the wire. This has the advantages of:
+
+* requiring the minimum additional client capabilities beyond a standard protocol
+ buffers implementation
+* leaving effectively zero holes for transaction malleability (i.e. there are no
+ subtle differences between the signing and encoding formats which could
+ potentially be exploited by an attacker)
+
+Signatures are structured using the `SignDoc` below which reuses the serialization of
+`TxBody` and `AuthInfo` and only adds the fields which are needed for signatures:
+
+```protobuf
+// types/types.proto
+message SignDoc {
+ // A protobuf serialization of a TxBody that matches the representation in TxRaw.
+ bytes body = 1;
+ // A protobuf serialization of an AuthInfo that matches the representation in TxRaw.
+ bytes auth_info = 2;
+ string chain_id = 3;
+ uint64 account_number = 4;
+}
+```
+
+In order to sign in the default mode, clients take the following steps:
+
+1. Serialize `TxBody` and `AuthInfo` using any valid protobuf implementation.
+2. Create a `SignDoc` and serialize it using [ADR 027](/sdk/v0.50/build/architecture/adr-027-deterministic-protobuf-serialization).
+3. Sign the encoded `SignDoc` bytes.
+4. Build a `TxRaw` and serialize it for broadcasting.
+
+Signature verification is based on comparing the raw `TxBody` and `AuthInfo`
+bytes encoded in `TxRaw` not based on any ["canonicalization"](https://github.com/regen-network/canonical-proto3)
+algorithm which creates added complexity for clients in addition to preventing
+some forms of upgradeability (to be addressed later in this document).
+
+Signature verifiers do:
+
+1. Deserialize a `TxRaw` and pull out `body` and `auth_info`.
+2. Create a list of required signer addresses from the messages.
+3. For each required signer:
+ * Pull account number and sequence from the state.
+ * Obtain the public key either from state or `AuthInfo`'s `signer_infos`.
+ * Create a `SignDoc` and serialize it using [ADR 027](/sdk/v0.50/build/architecture/adr-027-deterministic-protobuf-serialization).
+ * Verify the signature at the same list position against the serialized `SignDoc`.
+
+#### `SIGN_MODE_LEGACY_AMINO`
+
+In order to support legacy wallets and exchanges, Amino JSON will be temporarily
+supported transaction signing. Once wallets and exchanges have had a
+chance to upgrade to protobuf based signing, this option will be disabled. In
+the meantime, it is foreseen that disabling the current Amino signing would cause
+too much breakage to be feasible. Note that this is mainly a requirement of the
+Cosmos Hub and other chains may choose to disable Amino signing immediately.
+
+Legacy clients will be able to sign a transaction using the current Amino
+JSON format and have it encoded to protobuf using the REST `/tx/encode`
+endpoint before broadcasting.
+
+#### `SIGN_MODE_TEXTUAL`
+
+As was discussed extensively in [#6078](https://github.com/cosmos/cosmos-sdk/issues/6078),
+there is a desire for a human-readable signing encoding, especially for hardware
+wallets like the [Ledger](https://www.ledger.com) which display
+transaction contents to users before signing. JSON was an attempt at this but
+falls short of the ideal.
+
+`SIGN_MODE_TEXTUAL` is intended as a placeholder for a human-readable
+encoding which will replace Amino JSON. This new encoding should be even more
+focused on readability than JSON, possibly based on formatting strings like
+[MessageFormat](http://userguide.icu-project.org/formatparse/messages).
+
+In order to ensure that the new human-readable format does not suffer from
+transaction malleability issues, `SIGN_MODE_TEXTUAL`
+requires that the *human-readable bytes are concatenated with the raw `SignDoc`*
+to generate sign bytes.
+
+Multiple human-readable formats (maybe even localized messages) may be supported
+by `SIGN_MODE_TEXTUAL` when it is implemented.
+
+### Unknown Field Filtering
+
+Unknown fields in protobuf messages should generally be rejected by transaction
+processors because:
+
+* important data may be present in the unknown fields, that if ignored, will
+ cause unexpected behavior for clients
+* they present a malleability vulnerability where attackers can bloat tx size
+ by adding random uninterpreted data to unsigned content (i.e. the master `Tx`,
+ not `TxBody`)
+
+There are also scenarios where we may choose to safely ignore unknown fields
+([Link](https://github.com/cosmos/cosmos-sdk/issues/6078#issuecomment-624400188)) to
+provide graceful forwards compatibility with newer clients.
+
+We propose that field numbers with bit 11 set (for most use cases this is
+the range of 1024-2047) be considered non-critical fields that can safely be
+ignored if unknown.
+
+To handle this we will need an unknown field filter that:
+
+* always rejects unknown fields in unsigned content (i.e. top-level `Tx` and
+ unsigned parts of `AuthInfo` if present based on the signing mode)
+* rejects unknown fields in all messages (including nested `Any`s) other than
+ fields with bit 11 set
+
+This will likely need to be a custom protobuf parser pass that takes message bytes
+and `FileDescriptor`s and returns a boolean result.
+
+### Public Key Encoding
+
+Public keys in the Cosmos SDK implement the `cryptotypes.PubKey` interface.
+We propose to use `Any` for protobuf encoding as we are doing with other interfaces (for example, in `BaseAccount.PubKey` and `SignerInfo.PublicKey`).
+The following public keys are implemented: secp256k1, secp256r1, ed25519 and legacy-multisignature.
+
+Ex:
+
+```protobuf
+message PubKey {
+ bytes key = 1;
+}
+```
+
+`multisig.LegacyAminoPubKey` has an array of `Any`'s member to support any
+protobuf public key type.
+
+Apps should only attempt to handle a registered set of public keys that they
+have tested. The provided signature verification ante handler decorators will
+enforce this.
+
+### CLI & REST
+
+Currently, the REST and CLI handlers encode and decode types and txs via Amino
+JSON encoding using a concrete Amino codec. Being that some of the types dealt with
+in the client can be interfaces, similar to how we described in [ADR 019](/sdk/v0.50/build/architecture/adr-019-protobuf-state-encoding),
+the client logic will now need to take a codec interface that knows not only how
+to handle all the types, but also knows how to generate transactions, signatures,
+and messages.
+
+```go expandable
+type AccountRetriever interface {
+ GetAccount(clientCtx Context, addr sdk.AccAddress) (client.Account, error)
+
+GetAccountWithHeight(clientCtx Context, addr sdk.AccAddress) (client.Account, int64, error)
+
+EnsureExists(clientCtx client.Context, addr sdk.AccAddress)
+
+error
+ GetAccountNumberSequence(clientCtx client.Context, addr sdk.AccAddress) (uint64, uint64, error)
+}
+
+type Generator interface {
+ NewTx()
+
+TxBuilder
+ NewFee()
+
+ClientFee
+ NewSignature()
+
+ClientSignature
+ MarshalTx(tx types.Tx) ([]byte, error)
+}
+
+type TxBuilder interface {
+ GetTx()
+
+sdk.Tx
+
+ SetMsgs(...sdk.Msg)
+
+error
+ GetSignatures() []sdk.Signature
+ SetSignatures(...sdk.Signature)
+
+GetFee()
+
+sdk.Fee
+ SetFee(sdk.Fee)
+
+GetMemo()
+
+string
+ SetMemo(string)
+}
+```
+
+We then update `Context` to have new fields: `Codec`, `TxGenerator`,
+and `AccountRetriever`, and we update `AppModuleBasic.GetTxCmd` to take
+a `Context` which should have all of these fields pre-populated.
+
+Each client method should then use one of the `Init` methods to re-initialize
+the pre-populated `Context`. `tx.GenerateOrBroadcastTx` can be used to
+generate or broadcast a transaction. For example:
+
+```go expandable
+import "github.com/spf13/cobra"
+import "github.com/cosmos/cosmos-sdk/client"
+import "github.com/cosmos/cosmos-sdk/client/tx"
+
+func NewCmdDoSomething(clientCtx client.Context) *cobra.Command {
+ return &cobra.Command{
+ RunE: func(cmd *cobra.Command, args []string)
+
+error {
+ clientCtx := ctx.InitWithInput(cmd.InOrStdin())
+ msg := NewSomeMsg{...
+}
+
+tx.GenerateOrBroadcastTx(clientCtx, msg)
+},
+}
+}
+```
+
+## Future Improvements
+
+### `SIGN_MODE_TEXTUAL` specification
+
+A concrete specification and implementation of `SIGN_MODE_TEXTUAL` is intended
+as a near-term future improvement so that the ledger app and other wallets
+can gracefully transition away from Amino JSON.
+
+### `SIGN_MODE_DIRECT_AUX`
+
+(\*Documented as option (3) in [Link](https://github.com/cosmos/cosmos-sdk/issues/6078#issuecomment-628026933))
+
+We could add a mode `SIGN_MODE_DIRECT_AUX`
+to support scenarios where multiple signatures
+are being gathered into a single transaction but the message composer does not
+yet know which signatures will be included in the final transaction. For instance,
+I may have a 3/5 multisig wallet and want to send a `TxBody` to all 5
+signers to see who signs first. As soon as I have 3 signatures then I will go
+ahead and build the full transaction.
+
+With `SIGN_MODE_DIRECT`, each signer needs
+to sign the full `AuthInfo` which includes the full list of all signers and
+their signing modes, making the above scenario very hard.
+
+`SIGN_MODE_DIRECT_AUX` would allow "auxiliary" signers to create their signature
+using only `TxBody` and their own `PublicKey`. This allows the full list of
+signers in `AuthInfo` to be delayed until signatures have been collected.
+
+An "auxiliary" signer is any signer besides the primary signer who is paying
+the fee. For the primary signer, the full `AuthInfo` is actually needed to calculate gas and fees
+because that is dependent on how many signers and which key types and signing
+modes they are using. Auxiliary signers, however, do not need to worry about
+fees or gas and thus can just sign `TxBody`.
+
+To generate a signature in `SIGN_MODE_DIRECT_AUX` these steps would be followed:
+
+1. Encode `SignDocAux` (with the same requirement that fields must be serialized
+ in order):
+
+ ```protobuf expandable
+ // types/types.proto
+ message SignDocAux {
+ bytes body_bytes = 1;
+ // PublicKey is included in SignDocAux :
+ // 1. as a special case for multisig public keys. For multisig public keys,
+ // the signer should use the top-level multisig public key they are signing
+ // against, not their own public key. This is to prevent against a form
+ // of malleability where a signature could be taken out of context of the
+ // multisig key that was intended to be signed for
+ // 2. to guard against scenario where configuration information is encoded
+ // in public keys (it has been proposed) such that two keys can generate
+ // the same signature but have different security properties
+ //
+ // By including it here, the composer of AuthInfo cannot reference the
+ // a public key variant the signer did not intend to use
+ PublicKey public_key = 2;
+ string chain_id = 3;
+ uint64 account_number = 4;
+ }
+ ```
+
+2. Sign the encoded `SignDocAux` bytes
+
+3. Send their signature and `SignerInfo` to primary signer who will then
+ sign and broadcast the final transaction (with `SIGN_MODE_DIRECT` and `AuthInfo`
+ added) once enough signatures have been collected
+
+### `SIGN_MODE_DIRECT_RELAXED`
+
+(*Documented as option (1)(a) in [Link](https://github.com/cosmos/cosmos-sdk/issues/6078#issuecomment-628026933)*)
+
+This is a variation of `SIGN_MODE_DIRECT` where multiple signers wouldn't need to
+coordinate public keys and signing modes in advance. It would involve an alternate
+`SignDoc` similar to `SignDocAux` above with fee. This could be added in the future
+if client developers found the burden of collecting public keys and modes in advance
+too burdensome.
+
+## Consequences
+
+### Positive
+
+* Significant performance gains.
+* Supports backward and forward type compatibility.
+* Better support for cross-language clients.
+* Multiple signing modes allow for greater protocol evolution
+
+### Negative
+
+* `google.protobuf.Any` type URLs increase transaction size although the effect
+ may be negligible or compression may be able to mitigate it.
+
+### Neutral
+
+## References
diff --git a/sdk/next/build/architecture/adr-021-protobuf-query-encoding.mdx b/sdk/next/build/architecture/adr-021-protobuf-query-encoding.mdx
new file mode 100644
index 000000000..419ef0881
--- /dev/null
+++ b/sdk/next/build/architecture/adr-021-protobuf-query-encoding.mdx
@@ -0,0 +1,274 @@
+---
+title: 'ADR 021: Protocol Buffer Query Encoding'
+description: '2020 March 27: Initial Draft'
+---
+
+## Changelog
+
+* 2020 March 27: Initial Draft
+
+## Status
+
+Accepted
+
+## Context
+
+This ADR is a continuation of the motivation, design, and context established in
+[ADR 019](/sdk/v0.50/build/architecture/adr-019-protobuf-state-encoding) and
+[ADR 020](/sdk/v0.50/build/architecture/adr-020-protobuf-transaction-encoding), namely, we aim to design the
+Protocol Buffer migration path for the client-side of the Cosmos SDK.
+
+This ADR continues from [ADD 020](/sdk/v0.50/build/architecture/adr-020-protobuf-transaction-encoding)
+to specify the encoding of queries.
+
+## Decision
+
+### Custom Query Definition
+
+Modules define custom queries through a protocol buffers `service` definition.
+These `service` definitions are generally associated with and used by the
+GRPC protocol. However, the protocol buffers specification indicates that
+they can be used more generically by any request/response protocol that uses
+protocol buffer encoding. Thus, we can use `service` definitions for specifying
+custom ABCI queries and even reuse a substantial amount of the GRPC infrastructure.
+
+Each module with custom queries should define a service canonically named `Query`:
+
+```protobuf
+// x/bank/types/types.proto
+
+service Query {
+ rpc QueryBalance(QueryBalanceParams) returns (cosmos_sdk.v1.Coin) { }
+ rpc QueryAllBalances(QueryAllBalancesParams) returns (QueryAllBalancesResponse) { }
+}
+```
+
+#### Handling of Interface Types
+
+Modules that use interface types and need true polymorphism generally force a
+`oneof` up to the app-level that provides the set of concrete implementations of
+that interface that the app supports. While app's are welcome to do the same for
+queries and implement an app-level query service, it is recommended that modules
+provide query methods that expose these interfaces via `google.protobuf.Any`.
+There is a concern on the transaction level that the overhead of `Any` is too
+high to justify its usage. However for queries this is not a concern, and
+providing generic module-level queries that use `Any` does not preclude apps
+from also providing app-level queries that return use the app-level `oneof`s.
+
+A hypothetical example for the `gov` module would look something like:
+
+```protobuf expandable
+// x/gov/types/types.proto
+
+import "google/protobuf/any.proto";
+
+service Query {
+ rpc GetProposal(GetProposalParams) returns (AnyProposal) { }
+}
+
+message AnyProposal {
+ ProposalBase base = 1;
+ google.protobuf.Any content = 2;
+}
+```
+
+### Custom Query Implementation
+
+In order to implement the query service, we can reuse the existing [gogo protobuf](https://github.com/cosmos/gogoproto)
+grpc plugin, which for a service named `Query` generates an interface named
+`QueryServer` as below:
+
+```go
+type QueryServer interface {
+ QueryBalance(context.Context, *QueryBalanceParams) (*types.Coin, error)
+
+QueryAllBalances(context.Context, *QueryAllBalancesParams) (*QueryAllBalancesResponse, error)
+}
+```
+
+The custom queries for our module are implemented by implementing this interface.
+
+The first parameter in this generated interface is a generic `context.Context`,
+whereas querier methods generally need an instance of `sdk.Context` to read
+from the store. Since arbitrary values can be attached to `context.Context`
+using the `WithValue` and `Value` methods, the Cosmos SDK should provide a function
+`sdk.UnwrapSDKContext` to retrieve the `sdk.Context` from the provided
+`context.Context`.
+
+An example implementation of `QueryBalance` for the bank module as above would
+look something like:
+
+```go
+type Querier struct {
+ Keeper
+}
+
+func (q Querier)
+
+QueryBalance(ctx context.Context, params *types.QueryBalanceParams) (*sdk.Coin, error) {
+ balance := q.GetBalance(sdk.UnwrapSDKContext(ctx), params.Address, params.Denom)
+
+return &balance, nil
+}
+```
+
+### Custom Query Registration and Routing
+
+Query server implementations as above would be registered with `AppModule`s using
+a new method `RegisterQueryService(grpc.Server)` which could be implemented simply
+as below:
+
+```go
+// x/bank/module.go
+func (am AppModule)
+
+RegisterQueryService(server grpc.Server) {
+ types.RegisterQueryServer(server, keeper.Querier{
+ am.keeper
+})
+}
+```
+
+Underneath the hood, a new method `RegisterService(sd *grpc.ServiceDesc, handler interface{})`
+will be added to the existing `baseapp.QueryRouter` to add the queries to the custom
+query routing table (with the routing method being described below).
+The signature for this method matches the existing
+`RegisterServer` method on the GRPC `Server` type where `handler` is the custom
+query server implementation described above.
+
+GRPC-like requests are routed by the service name (ex. `cosmos_sdk.x.bank.v1.Query`)
+and method name (ex. `QueryBalance`) combined with `/`s to form a full
+method name (ex. `/cosmos_sdk.x.bank.v1.Query/QueryBalance`). This gets translated
+into an ABCI query as `custom/cosmos_sdk.x.bank.v1.Query/QueryBalance`. Service handlers
+registered with `QueryRouter.RegisterService` will be routed this way.
+
+Beyond the method name, GRPC requests carry a protobuf encoded payload, which maps naturally
+to `RequestQuery.Data`, and receive a protobuf encoded response or error. Thus
+there is a quite natural mapping of GRPC-like rpc methods to the existing
+`sdk.Query` and `QueryRouter` infrastructure.
+
+This basic specification allows us to reuse protocol buffer `service` definitions
+for ABCI custom queries substantially reducing the need for manual decoding and
+encoding in query methods.
+
+### GRPC Protocol Support
+
+In addition to providing an ABCI query pathway, we can easily provide a GRPC
+proxy server that routes requests in the GRPC protocol to ABCI query requests
+under the hood. In this way, clients could use their host languages' existing
+GRPC implementations to make direct queries against Cosmos SDK app's using
+these `service` definitions. In order for this server to work, the `QueryRouter`
+on `BaseApp` will need to expose the service handlers registered with
+`QueryRouter.RegisterService` to the proxy server implementation. Nodes could
+launch the proxy server on a separate port in the same process as the ABCI app
+with a command-line flag.
+
+### REST Queries and Swagger Generation
+
+[grpc-gateway](https://github.com/grpc-ecosystem/grpc-gateway) is a project that
+translates REST calls into GRPC calls using special annotations on service
+methods. Modules that want to expose REST queries should add `google.api.http`
+annotations to their `rpc` methods as in this example below.
+
+```protobuf expandable
+// x/bank/types/types.proto
+
+service Query {
+ rpc QueryBalance(QueryBalanceParams) returns (cosmos_sdk.v1.Coin) {
+ option (google.api.http) = {
+ get: "/x/bank/v1/balance/{address}/{denom}"
+ };
+ }
+ rpc QueryAllBalances(QueryAllBalancesParams) returns (QueryAllBalancesResponse) {
+ option (google.api.http) = {
+ get: "/x/bank/v1/balances/{address}"
+ };
+ }
+}
+```
+
+grpc-gateway will work direcly against the GRPC proxy described above which will
+translate requests to ABCI queries under the hood. grpc-gateway can also
+generate Swagger definitions automatically.
+
+In the current implementation of REST queries, each module needs to implement
+REST queries manually in addition to ABCI querier methods. Using the grpc-gateway
+approach, there will be no need to generate separate REST query handlers, just
+query servers as described above as grpc-gateway handles the translation of protobuf
+to REST as well as Swagger definitions.
+
+The Cosmos SDK should provide CLI commands for apps to start GRPC gateway either in
+a separate process or the same process as the ABCI app, as well as provide a
+command for generating grpc-gateway proxy `.proto` files and the `swagger.json`
+file.
+
+### Client Usage
+
+The gogo protobuf grpc plugin generates client interfaces in addition to server
+interfaces. For the `Query` service defined above we would get a `QueryClient`
+interface like:
+
+```go
+type QueryClient interface {
+ QueryBalance(ctx context.Context, in *QueryBalanceParams, opts ...grpc.CallOption) (*types.Coin, error)
+
+QueryAllBalances(ctx context.Context, in *QueryAllBalancesParams, opts ...grpc.CallOption) (*QueryAllBalancesResponse, error)
+}
+```
+
+Via a small patch to gogo protobuf ([gogo/protobuf#675](https://github.com/gogo/protobuf/pull/675))
+we have tweaked the grpc codegen to use an interface rather than concrete type
+for the generated client struct. This allows us to also reuse the GRPC infrastructure
+for ABCI client queries.
+
+1Context`will receive a new method`QueryConn`that returns a`ClientConn\`
+that routes calls to ABCI queries
+
+Clients (such as CLI methods) will then be able to call query methods like this:
+
+```go
+clientCtx := client.NewContext()
+ queryClient := types.NewQueryClient(clientCtx.QueryConn())
+ params := &types.QueryBalanceParams{
+ addr, denom
+}
+
+result, err := queryClient.QueryBalance(gocontext.Background(), params)
+```
+
+### Testing
+
+Tests would be able to create a query client directly from keeper and `sdk.Context`
+references using a `QueryServerTestHelper` as below:
+
+```go
+queryHelper := baseapp.NewQueryServerTestHelper(ctx)
+
+types.RegisterQueryServer(queryHelper, keeper.Querier{
+ app.BankKeeper
+})
+ queryClient := types.NewQueryClient(queryHelper)
+```
+
+## Future Improvements
+
+## Consequences
+
+### Positive
+
+* greatly simplified querier implementation (no manual encoding/decoding)
+* easy query client generation (can use existing grpc and swagger tools)
+* no need for REST query implementations
+* type safe query methods (generated via grpc plugin)
+* going forward, there will be less breakage of query methods because of the
+ backwards compatibility guarantees provided by buf
+
+### Negative
+
+* all clients using the existing ABCI/REST queries will need to be refactored
+ for both the new GRPC/REST query paths as well as protobuf/proto-json encoded
+ data, but this is more or less unavoidable in the protobuf refactoring
+
+### Neutral
+
+## References
diff --git a/sdk/next/build/architecture/adr-022-custom-panic-handling.mdx b/sdk/next/build/architecture/adr-022-custom-panic-handling.mdx
new file mode 100644
index 000000000..d30fcfa09
--- /dev/null
+++ b/sdk/next/build/architecture/adr-022-custom-panic-handling.mdx
@@ -0,0 +1,265 @@
+---
+title: "ADR 022: Custom BaseApp panic handling"
+description: "2020 Apr 24: Initial Draft 2021 Sep 14: Superseded by ADR-045"
+---
+
+## Changelog
+
+- 2020 Apr 24: Initial Draft
+- 2021 Sep 14: Superseded by ADR-045
+
+## Status
+
+SUPERSEDED by ADR-045
+
+## Context
+
+The current implementation of BaseApp does not allow developers to write custom error handlers during panic recovery
+[runTx()](https://github.com/cosmos/cosmos-sdk/blob/bad4ca75f58b182f600396ca350ad844c18fc80b/baseapp/baseapp.go#L539)
+method. We think that this method can be more flexible and can give Cosmos SDK users more options for customizations without
+the need to rewrite whole BaseApp. Also there's one special case for `sdk.ErrorOutOfGas` error handling, that case
+might be handled in a "standard" way (middleware) alongside the others.
+
+We propose middleware-solution, which could help developers implement the following cases:
+
+- add external logging (let's say sending reports to external services like [Sentry](https://sentry.io));
+- call panic for specific error cases;
+
+It will also make `OutOfGas` case and `default` case one of the middlewares.
+`Default` case wraps recovery object to an error and logs it ([example middleware implementation](#Recovery-middleware)).
+
+Our project has a sidecar service running alongside the blockchain node (smart contracts virtual machine). It is
+essential that node `<->` sidecar connectivity stays stable for TXs processing. So when the communication breaks we need
+to crash the node and reboot it once the problem is solved. That behavior makes node's state machine execution
+deterministic. As all keeper panics are caught by runTx's `defer()` handler, we have to adjust the BaseApp code
+in order to customize it.
+
+## Decision
+
+### Design
+
+#### Overview
+
+Instead of hardcoding custom error handling into BaseApp we suggest using set of middlewares which can be customized
+externally and will allow developers use as many custom error handlers as they want. Implementation with tests
+can be found [here](https://github.com/cosmos/cosmos-sdk/pull/6053).
+
+#### Implementation details
+
+##### Recovery handler
+
+New `RecoveryHandler` type added. `recoveryObj` input argument is an object returned by the standard Go function
+`recover()` from the `builtin` package.
+
+```go
+type RecoveryHandler func(recoveryObj interface{
+})
+
+error
+```
+
+Handler should type assert (or other methods) an object to define if object should be handled.
+`nil` should be returned if input object can't be handled by that `RecoveryHandler` (not a handler's target type).
+Not `nil` error should be returned if input object was handled and middleware chain execution should be stopped.
+
+An example:
+
+```go
+func exampleErrHandler(recoveryObj interface{
+})
+
+error {
+ err, ok := recoveryObj.(error)
+ if !ok {
+ return nil
+}
+ if someSpecificError.Is(err) {
+ panic(customPanicMsg)
+}
+
+else {
+ return nil
+}
+}
+```
+
+This example breaks the application execution, but it also might enrich the error's context like the `OutOfGas` handler.
+
+##### Recovery middleware
+
+We also add a middleware type (decorator). That function type wraps `RecoveryHandler` and returns the next middleware in
+execution chain and handler's `error`. Type is used to separate actual `recovery()` object handling from middleware
+chain processing.
+
+```go
+type recoveryMiddleware func(recoveryObj interface{
+}) (recoveryMiddleware, error)
+
+func newRecoveryMiddleware(handler RecoveryHandler, next recoveryMiddleware)
+
+recoveryMiddleware {
+ return func(recoveryObj interface{
+}) (recoveryMiddleware, error) {
+ if err := handler(recoveryObj); err != nil {
+ return nil, err
+}
+
+return next, nil
+}
+}
+```
+
+Function receives a `recoveryObj` object and returns:
+
+- (next `recoveryMiddleware`, `nil`) if object wasn't handled (not a target type) by `RecoveryHandler`;
+- (`nil`, not nil `error`) if input object was handled and other middlewares in the chain should not be executed;
+- (`nil`, `nil`) in case of invalid behavior. Panic recovery might not have been properly handled;
+ this can be avoided by always using a `default` as a rightmost middleware in the chain (always returns an `error`');
+
+`OutOfGas` middleware example:
+
+```go expandable
+func newOutOfGasRecoveryMiddleware(gasWanted uint64, ctx sdk.Context, next recoveryMiddleware)
+
+recoveryMiddleware {
+ handler := func(recoveryObj interface{
+})
+
+error {
+ err, ok := recoveryObj.(sdk.ErrorOutOfGas)
+ if !ok {
+ return nil
+}
+
+return errorsmod.Wrap(
+ sdkerrors.ErrOutOfGas, fmt.Sprintf(
+ "out of gas in location: %v; gasWanted: %d, gasUsed: %d", err.Descriptor, gasWanted, ctx.GasMeter().GasConsumed(),
+ ),
+ )
+}
+
+return newRecoveryMiddleware(handler, next)
+}
+```
+
+`Default` middleware example:
+
+```go
+func newDefaultRecoveryMiddleware()
+
+recoveryMiddleware {
+ handler := func(recoveryObj interface{
+})
+
+error {
+ return errorsmod.Wrap(
+ sdkerrors.ErrPanic, fmt.Sprintf("recovered: %v\nstack:\n%v", recoveryObj, string(debug.Stack())),
+ )
+}
+
+return newRecoveryMiddleware(handler, nil)
+}
+```
+
+##### Recovery processing
+
+Basic chain of middlewares processing would look like:
+
+```go
+func processRecovery(recoveryObj interface{
+}, middleware recoveryMiddleware)
+
+error {
+ if middleware == nil {
+ return nil
+}
+
+next, err := middleware(recoveryObj)
+ if err != nil {
+ return err
+}
+ if next == nil {
+ return nil
+}
+
+return processRecovery(recoveryObj, next)
+}
+```
+
+That way we can create a middleware chain which is executed from left to right, the rightmost middleware is a
+`default` handler which must return an `error`.
+
+##### BaseApp changes
+
+The `default` middleware chain must exist in a `BaseApp` object. `Baseapp` modifications:
+
+```go expandable
+type BaseApp struct {
+ // ...
+ runTxRecoveryMiddleware recoveryMiddleware
+}
+
+func NewBaseApp(...) {
+ // ...
+ app.runTxRecoveryMiddleware = newDefaultRecoveryMiddleware()
+}
+
+func (app *BaseApp)
+
+runTx(...) {
+ // ...
+ defer func() {
+ if r := recover(); r != nil {
+ recoveryMW := newOutOfGasRecoveryMiddleware(gasWanted, ctx, app.runTxRecoveryMiddleware)
+
+err, result = processRecovery(r, recoveryMW), nil
+}
+
+gInfo = sdk.GasInfo{
+ GasWanted: gasWanted,
+ GasUsed: ctx.GasMeter().GasConsumed()
+}
+
+}()
+ // ...
+}
+```
+
+Developers can add their custom `RecoveryHandler`s by providing `AddRunTxRecoveryHandler` as a BaseApp option parameter to the `NewBaseapp` constructor:
+
+```go
+func (app *BaseApp)
+
+AddRunTxRecoveryHandler(handlers ...RecoveryHandler) {
+ for _, h := range handlers {
+ app.runTxRecoveryMiddleware = newRecoveryMiddleware(h, app.runTxRecoveryMiddleware)
+}
+}
+```
+
+This method would prepend handlers to an existing chain.
+
+## Consequences
+
+### Positive
+
+- Developers of Cosmos SDK based projects can add custom panic handlers to:
+ - add error context for custom panic sources (panic inside of custom keepers);
+ - emit `panic()`: passthrough recovery object to the Tendermint core;
+ - other necessary handling;
+- Developers can use standard Cosmos SDK `BaseApp` implementation, rather that rewriting it in their projects;
+- Proposed solution doesn't break the current "standard" `runTx()` flow;
+
+### Negative
+
+- Introduces changes to the execution model design.
+
+### Neutral
+
+- `OutOfGas` error handler becomes one of the middlewares;
+- Default panic handler becomes one of the middlewares;
+
+## References
+
+- [PR-6053 with proposed solution](https://github.com/cosmos/cosmos-sdk/pull/6053)
+- [Similar solution. ADR-010 Modular AnteHandler](https://github.com/cosmos/cosmos-sdk/blob/main/docs/architecture/adr-010-modular-antehandler.md)
diff --git a/sdk/next/build/architecture/adr-023-protobuf-naming.mdx b/sdk/next/build/architecture/adr-023-protobuf-naming.mdx
new file mode 100644
index 000000000..219e5044e
--- /dev/null
+++ b/sdk/next/build/architecture/adr-023-protobuf-naming.mdx
@@ -0,0 +1,266 @@
+---
+title: 'ADR 023: Protocol Buffer Naming and Versioning Conventions'
+description: '2020 April 27: Initial Draft 2020 August 5: Update guidelines'
+---
+
+## Changelog
+
+* 2020 April 27: Initial Draft
+* 2020 August 5: Update guidelines
+
+## Status
+
+Accepted
+
+## Context
+
+Protocol Buffers provide a basic [style guide](https://developers.google.com/protocol-buffers/docs/style)
+and [Buf](https://buf.build/docs/style-guide) builds upon that. To the
+extent possible, we want to follow industry accepted guidelines and wisdom for
+the effective usage of protobuf, deviating from those only when there is clear
+rationale for our use case.
+
+### Adoption of `Any`
+
+The adoption of `google.protobuf.Any` as the recommended approach for encoding
+interface types (as opposed to `oneof`) makes package naming a central part
+of the encoding as fully-qualified message names now appear in encoded
+messages.
+
+### Current Directory Organization
+
+Thus far we have mostly followed [Buf's](https://buf.build) [DEFAULT](https://buf.build/docs/lint-checkers#default)
+recommendations, with the minor deviation of disabling [`PACKAGE_DIRECTORY_MATCH`](https://buf.build/docs/lint-checkers#file_layout)
+which although being convenient for developing code comes with the warning
+from Buf that:
+
+> you will have a very bad time with many Protobuf plugins across various languages if you do not do this
+
+### Adoption of gRPC Queries
+
+In [ADR 021](/sdk/v0.53/build/architecture/adr-021-protobuf-query-encoding), gRPC was adopted for Protobuf
+native queries. The full gRPC service path thus becomes a key part of ABCI query
+path. In the future, gRPC queries may be allowed from within persistent scripts
+by technologies such as CosmWasm and these query routes would be stored within
+script binaries.
+
+## Decision
+
+The goal of this ADR is to provide thoughtful naming conventions that:
+
+* encourage a good user experience for when users interact directly with
+ .proto files and fully-qualified protobuf names
+* balance conciseness against the possibility of either over-optimizing (making
+ names too short and cryptic) or under-optimizing (just accepting bloated names
+ with lots of redundant information)
+
+These guidelines are meant to act as a style guide for both the Cosmos SDK and
+third-party modules.
+
+As a starting point, we should adopt all of the [DEFAULT](https://buf.build/docs/lint-checkers#default)
+checkers in [Buf's](https://buf.build) including [`PACKAGE_DIRECTORY_MATCH`](https://buf.build/docs/lint-checkers#file_layout),
+except:
+
+* [PACKAGE\_VERSION\_SUFFIX](https://buf.build/docs/lint-checkers#package_version_suffix)
+* [SERVICE\_SUFFIX](https://buf.build/docs/lint-checkers#service_suffix)
+
+Further guidelines to be described below.
+
+### Principles
+
+#### Concise and Descriptive Names
+
+Names should be descriptive enough to convey their meaning and distinguish
+them from other names.
+
+Given that we are using fully-qualifed names within
+`google.protobuf.Any` as well as within gRPC query routes, we should aim to
+keep names concise, without going overboard. The general rule of thumb should
+be if a shorter name would convey more or else the same thing, pick the shorter
+name.
+
+For instance, `cosmos.bank.MsgSend` (19 bytes) conveys roughly the same information
+as `cosmos_sdk.x.bank.v1.MsgSend` (28 bytes) but is more concise.
+
+Such conciseness makes names both more pleasant to work with and take up less
+space within transactions and on the wire.
+
+We should also resist the temptation to over-optimize, by making names
+cryptically short with abbreviations. For instance, we shouldn't try to
+reduce `cosmos.bank.MsgSend` to `csm.bk.MSnd` just to save a few bytes.
+
+The goal is to make names ***concise but not cryptic***.
+
+#### Names are for Clients First
+
+Package and type names should be chosen for the benefit of users, not
+necessarily because of legacy concerns related to the go code-base.
+
+#### Plan for Longevity
+
+In the interests of long-term support, we should plan on the names we do
+choose to be in usage for a long time, so now is the opportunity to make
+the best choices for the future.
+
+### Versioning
+
+#### Guidelines on Stable Package Versions
+
+In general, schema evolution is the way to update protobuf schemas. That means that new fields,
+messages, and RPC methods are *added* to existing schemas and old fields, messages and RPC methods
+are maintained as long as possible.
+
+Breaking things is often unacceptable in a blockchain scenario. For instance, immutable smart contracts
+may depend on certain data schemas on the host chain. If the host chain breaks those schemas, the smart
+contract may be irreparably broken. Even when things can be fixed (for instance in client software),
+this often comes at a high cost.
+
+Instead of breaking things, we should make every effort to evolve schemas rather than just breaking them.
+[Buf](https://buf.build) breaking change detection should be used on all stable (non-alpha or beta) packages
+to prevent such breakage.
+
+With that in mind, different stable versions (i.e. `v1` or `v2`) of a package should more or less be considered
+different packages and this should be last resort approach for upgrading protobuf schemas. Scenarios where creating
+a `v2` may make sense are:
+
+* we want to create a new module with similar functionality to an existing module and adding `v2` is the most natural
+ way to do this. In that case, there are really just two different, but similar modules with different APIs.
+* we want to add a new revamped API for an existing module and it's just too cumbersome to add it to the existing package,
+ so putting it in `v2` is cleaner for users. In this case, care should be made to not deprecate support for
+ `v1` if it is actively used in immutable smart contracts.
+
+#### Guidelines on unstable (alpha and beta) package versions
+
+The following guidelines are recommended for marking packages as alpha or beta:
+
+* marking something as `alpha` or `beta` should be a last resort and just putting something in the
+ stable package (i.e. `v1` or `v2`) should be preferred
+* a package *should* be marked as `alpha` *if and only if* there are active discussions to remove
+ or significantly alter the package in the near future
+* a package *should* be marked as `beta` *if and only if* there is an active discussion to
+ significantly refactor/rework the functionality in the near future but not remove it
+* modules *can and should* have types in both stable (i.e. `v1` or `v2`) and unstable (`alpha` or `beta`) packages.
+
+*`alpha` and `beta` should not be used to avoid responsibility for maintaining compatibility.*
+Whenever code is released into the wild, especially on a blockchain, there is a high cost to changing things. In some
+cases, for instance with immutable smart contracts, a breaking change may be impossible to fix.
+
+When marking something as `alpha` or `beta`, maintainers should ask the questions:
+
+* what is the cost of asking others to change their code vs the benefit of us maintaining the optionality to change it?
+* what is the plan for moving this to `v1` and how will that affect users?
+
+`alpha` or `beta` should really be used to communicate "changes are planned".
+
+As a case study, gRPC reflection is in the package `grpc.reflection.v1alpha`. It hasn't been changed since
+2017 and it is now used in other widely used software like gRPCurl. Some folks probably use it in production services
+and so if they actually went and changed the package to `grpc.reflection.v1`, some software would break and
+they probably don't want to do that... So now the `v1alpha` package is more or less the de-facto `v1`. Let's not do that.
+
+The following are guidelines for working with non-stable packages:
+
+* [Buf's recommended version suffix](https://buf.build/docs/lint-checkers#package_version_suffix)
+ (ex. `v1alpha1`) *should* be used for non-stable packages
+* non-stable packages should generally be excluded from breaking change detection
+* immutable smart contract modules (i.e. CosmWasm) *should* block smart contracts/persistent
+ scripts from interacting with `alpha`/`beta` packages
+
+#### Omit v1 suffix
+
+Instead of using [Buf's recommended version suffix](https://buf.build/docs/lint-checkers#package_version_suffix),
+we can omit `v1` for packages that don't actually have a second version. This
+allows for more concise names for common use cases like `cosmos.bank.Send`.
+Packages that do have a second or third version can indicate that with `.v2`
+or `.v3`.
+
+### Package Naming
+
+#### Adopt a short, unique top-level package name
+
+Top-level packages should adopt a short name that is known to not collide with
+other names in common usage within the Cosmos ecosystem. In the near future, a
+registry should be created to reserve and index top-level package names used
+within the Cosmos ecosystem. Because the Cosmos SDK is intended to provide
+the top-level types for the Cosmos project, the top-level package name `cosmos`
+is recommended for usage within the Cosmos SDK instead of the longer `cosmos_sdk`.
+[ICS](https://github.com/cosmos/ics) specifications could consider a
+short top-level package like `ics23` based upon the standard number.
+
+#### Limit sub-package depth
+
+Sub-package depth should be increased with caution. Generally a single
+sub-package is needed for a module or a library. Even though `x` or `modules`
+is used in source code to denote modules, this is often unnecessary for .proto
+files as modules are the primary thing sub-packages are used for. Only items which
+are known to be used infrequently should have deep sub-package depths.
+
+For the Cosmos SDK, it is recommended that we simply write `cosmos.bank`,
+`cosmos.gov`, etc. rather than `cosmos.x.bank`. In practice, most non-module
+types can go straight in the `cosmos` package or we can introduce a
+`cosmos.base` package if needed. Note that this naming *will not* change
+go package names, i.e. the `cosmos.bank` protobuf package will still live in
+`x/bank`.
+
+### Message Naming
+
+Message type names should be as concise possible without losing clarity. `sdk.Msg`
+types which are used in transactions will retain the `Msg` prefix as that provides
+helpful context.
+
+### Service and RPC Naming
+
+[ADR 021](/sdk/v0.53/build/architecture/adr-021-protobuf-query-encoding) specifies that modules should
+implement a gRPC query service. We should consider the principle of conciseness
+for query service and RPC names as these may be called from persistent script
+modules such as CosmWasm. Also, users may use these query paths from tools like
+[gRPCurl](https://github.com/fullstorydev/grpcurl). As an example, we can shorten
+`/cosmos_sdk.x.bank.v1.QueryService/QueryBalance` to
+`/cosmos.bank.Query/Balance` without losing much useful information.
+
+RPC request and response types *should* follow the `ServiceNameMethodNameRequest`/
+`ServiceNameMethodNameResponse` naming convention. i.e. for an RPC method named `Balance`
+on the `Query` service, the request and response types would be `QueryBalanceRequest`
+and `QueryBalanceResponse`. This will be more self-explanatory than `BalanceRequest`
+and `BalanceResponse`.
+
+#### Use just `Query` for the query service
+
+Instead of [Buf's default service suffix recommendation](https://github.com/cosmos/cosmos-sdk/pull/6033),
+we should simply use the shorter `Query` for query services.
+
+For other types of gRPC services, we should consider sticking with Buf's
+default recommendation.
+
+#### Omit `Get` and `Query` from query service RPC names
+
+`Get` and `Query` should be omitted from `Query` service names because they are
+redundant in the fully-qualified name. For instance, `/cosmos.bank.Query/QueryBalance`
+just says `Query` twice without any new information.
+
+## Future Improvements
+
+A registry of top-level package names should be created to coordinate naming
+across the ecosystem, prevent collisions, and also help developers discover
+useful schemas. A simple starting point would be a git repository with
+community-based governance.
+
+## Consequences
+
+### Positive
+
+* names will be more concise and easier to read and type
+* all transactions using `Any` will be at shorter (`_sdk.x` and `.v1` will be removed)
+* `.proto` file imports will be more standard (without `"third_party/proto"` in
+ the path)
+* code generation will be easier for clients because .proto files will be
+ in a single `proto/` directory which can be copied rather than scattered
+ throughout the Cosmos SDK
+
+### Negative
+
+### Neutral
+
+* `.proto` files will need to be reorganized and refactored
+* some modules may need to be marked as alpha or beta
+
+## References
diff --git a/sdk/next/build/architecture/adr-024-coin-metadata.mdx b/sdk/next/build/architecture/adr-024-coin-metadata.mdx
new file mode 100644
index 000000000..b6acb5d53
--- /dev/null
+++ b/sdk/next/build/architecture/adr-024-coin-metadata.mdx
@@ -0,0 +1,145 @@
+---
+title: 'ADR 024: Coin Metadata'
+description: '05/19/2020: Initial draft'
+---
+
+## Changelog
+
+* 05/19/2020: Initial draft
+
+## Status
+
+Proposed
+
+## Context
+
+Assets in the Cosmos SDK are represented via a `Coins` type that consists of an `amount` and a `denom`,
+where the `amount` can be any arbitrarily large or small value. In addition, the Cosmos SDK uses an
+account-based model where there are two types of primary accounts -- basic accounts and module accounts.
+All account types have a set of balances that are composed of `Coins`. The `x/bank` module keeps
+track of all balances for all accounts and also keeps track of the total supply of balances in an
+application.
+
+With regards to a balance `amount`, the Cosmos SDK assumes a static and fixed unit of denomination,
+regardless of the denomination itself. In other words, clients and apps built atop a Cosmos-SDK-based
+chain may choose to define and use arbitrary units of denomination to provide a richer UX, however, by
+the time a tx or operation reaches the Cosmos SDK state machine, the `amount` is treated as a single
+unit. For example, for the Cosmos Hub (Gaia), clients assume 1 ATOM = 10^6 uatom, and so all txs and
+operations in the Cosmos SDK work off of units of 10^6.
+
+This clearly provides a poor and limited UX especially as interoperability of networks increases and
+as a result the total amount of asset types increases. We propose to have `x/bank` additionally keep
+track of metadata per `denom` in order to help clients, wallet providers, and explorers improve their
+UX and remove the requirement for making any assumptions on the unit of denomination.
+
+## Decision
+
+The `x/bank` module will be updated to store and index metadata by `denom`, specifically the "base" or
+smallest unit -- the unit the Cosmos SDK state-machine works with.
+
+Metadata may also include a non-zero length list of denominations. Each entry contains the name of
+the denomination `denom`, the exponent to the base and a list of aliases. An entry is to be
+interpreted as `1 denom = 10^exponent base_denom` (e.g. `1 ETH = 10^18 wei` and `1 uatom = 10^0 uatom`).
+
+There are two denominations that are of high importance for clients: the `base`, which is the smallest
+possible unit and the `display`, which is the unit that is commonly referred to in human communication
+and on exchanges. The values in those fields link to an entry in the list of denominations.
+
+The list in `denom_units` and the `display` entry may be changed via governance.
+
+As a result, we can define the type as follows:
+
+```protobuf expandable
+message DenomUnit {
+ string denom = 1;
+ uint32 exponent = 2;
+ repeated string aliases = 3;
+}
+
+message Metadata {
+ string description = 1;
+ repeated DenomUnit denom_units = 2;
+ string base = 3;
+ string display = 4;
+}
+```
+
+As an example, the ATOM's metadata can be defined as follows:
+
+```json expandable
+{
+ "name": "atom",
+ "description": "The native staking token of the Cosmos Hub.",
+ "denom_units": [
+ {
+ "denom": "uatom",
+ "exponent": 0,
+ "aliases": [
+ "microatom"
+ ],
+
+},
+ {
+ "denom": "matom",
+ "exponent": 3,
+ "aliases": [
+ "milliatom"
+ ]
+
+},
+ {
+ "denom": "atom",
+ "exponent": 6,
+ }
+ ],
+ "base": "uatom",
+ "display": "atom",
+}
+```
+
+Given the above metadata, a client may infer the following things:
+
+* 4.3atom = 4.3 \* (10^6) = 4,300,000uatom
+* The string "atom" can be used as a display name in a list of tokens.
+* The balance 4300000 can be displayed as 4,300,000uatom or 4,300matom or 4.3atom.
+ The `display` denomination 4.3atom is a good default if the authors of the client don't make
+ an explicit decision to choose a different representation.
+
+A client should be able to query for metadata by denom both via the CLI and REST interfaces. In
+addition, we will add handlers to these interfaces to convert from any unit to another given unit,
+as the base framework for this already exists in the Cosmos SDK.
+
+Finally, we need to ensure metadata exists in the `GenesisState` of the `x/bank` module which is also
+indexed by the base `denom`.
+
+```go
+type GenesisState struct {
+ SendEnabled bool `json:"send_enabled" yaml:"send_enabled"`
+ Balances []Balance `json:"balances" yaml:"balances"`
+ Supply sdk.Coins `json:"supply" yaml:"supply"`
+ DenomMetadata []Metadata `json:"denom_metadata" yaml:"denom_metadata"`
+}
+```
+
+## Future Work
+
+In order for clients to avoid having to convert assets to the base denomination -- either manually or
+via an endpoint, we may consider supporting automatic conversion of a given unit input.
+
+## Consequences
+
+### Positive
+
+* Provides clients, wallet providers and block explorers with additional data on
+ asset denomination to improve UX and remove any need to make assumptions on
+ denomination units.
+
+### Negative
+
+* A small amount of required additional storage in the `x/bank` module. The amount
+ of additional storage should be minimal as the amount of total assets should not
+ be large.
+
+### Neutral
+
+## References
diff --git a/sdk/next/build/architecture/adr-027-deterministic-protobuf-serialization.mdx b/sdk/next/build/architecture/adr-027-deterministic-protobuf-serialization.mdx
new file mode 100644
index 000000000..271c3ee1c
--- /dev/null
+++ b/sdk/next/build/architecture/adr-027-deterministic-protobuf-serialization.mdx
@@ -0,0 +1,317 @@
+---
+title: 'ADR 027: Deterministic Protobuf Serialization'
+description: '2020-08-07: Initial Draft 2020-09-01: Further clarify rules'
+---
+
+## Changelog
+
+* 2020-08-07: Initial Draft
+* 2020-09-01: Further clarify rules
+
+## Status
+
+Proposed
+
+## Abstract
+
+Fully deterministic structure serialization, which works across many languages and clients,
+is needed when signing messages. We need to be sure that whenever we serialize
+a data structure, no matter in which supported language, the raw bytes
+will stay the same.
+[Protobuf](https://developers.google.com/protocol-buffers/docs/proto3)
+serialization is not bijective (i.e. there exist a practically unlimited number of
+valid binary representations for a given protobuf document)1.
+
+This document describes a deterministic serialization scheme for
+a subset of protobuf documents, that covers this use case but can be reused in
+other cases as well.
+
+### Context
+
+For signature verification in Cosmos SDK, the signer and verifier need to agree on
+the same serialization of a `SignDoc` as defined in
+[ADR-020](/sdk/v0.50/build/architecture/adr-020-protobuf-transaction-encoding) without transmitting the
+serialization.
+
+Currently, for block signatures we are using a workaround: we create a new [TxRaw](https://github.com/cosmos/cosmos-sdk/blob/9e85e81e0e8140067dd893421290c191529c148c/proto/cosmos/tx/v1beta1/tx.proto#L30)
+instance (as defined in [adr-020-protobuf-transaction-encoding](https://github.com/cosmos/cosmos-sdk/blob/main/docs/architecture/adr-020-protobuf-transaction-encoding.md#transactions))
+by converting all [Tx](https://github.com/cosmos/cosmos-sdk/blob/9e85e81e0e8140067dd893421290c191529c148c/proto/cosmos/tx/v1beta1/tx.proto#L13)
+fields to bytes on the client side. This adds an additional manual
+step when sending and signing transactions.
+
+### Decision
+
+The following encoding scheme is to be used by other ADRs,
+and in particular for `SignDoc` serialization.
+
+## Specification
+
+### Scope
+
+This ADR defines a protobuf3 serializer. The output is a valid protobuf
+serialization, such that every protobuf parser can parse it.
+
+No maps are supported in version 1 due to the complexity of defining a
+deterministic serialization. This might change in future. Implementations must
+reject documents containing maps as invalid input.
+
+### Background - Protobuf3 Encoding
+
+Most numeric types in protobuf3 are encoded as
+[varints](https://developers.google.com/protocol-buffers/docs/encoding#varints).
+Varints are at most 10 bytes, and since each varint byte has 7 bits of data,
+varints are a representation of `uint70` (70-bit unsigned integer). When
+encoding, numeric values are casted from their base type to `uint70`, and when
+decoding, the parsed `uint70` is casted to the appropriate numeric type.
+
+The maximum valid value for a varint that complies with protobuf3 is
+`FF FF FF FF FF FF FF FF FF 7F` (i.e. `2**70 -1`). If the field type is
+`{,u,s}int64`, the highest 6 bits of the 70 are dropped during decoding,
+introducing 6 bits of malleability. If the field type is `{,u,s}int32`, the
+highest 38 bits of the 70 are dropped during decoding, introducing 38 bits of
+malleability.
+
+Among other sources of non-determinism, this ADR eliminates the possibility of
+encoding malleability.
+
+### Serialization rules
+
+The serialization is based on the
+[protobuf3 encoding](https://developers.google.com/protocol-buffers/docs/encoding)
+with the following additions:
+
+1. Fields must be serialized only once in ascending order
+2. Extra fields or any extra data must not be added
+3. [Default values](https://developers.google.com/protocol-buffers/docs/proto3#default)
+ must be omitted
+4. `repeated` fields of scalar numeric types must use
+ [packed encoding](https://developers.google.com/protocol-buffers/docs/encoding#packed)
+5. Varint encoding must not be longer than needed:
+ * No trailing zero bytes (in little endian, i.e. no leading zeroes in big
+ endian). Per rule 3 above, the default value of `0` must be omitted, so
+ this rule does not apply in such cases.
+ * The maximum value for a varint must be `FF FF FF FF FF FF FF FF FF 01`.
+ In other words, when decoded, the highest 6 bits of the 70-bit unsigned
+ integer must be `0`. (10-byte varints are 10 groups of 7 bits, i.e.
+ 70 bits, of which only the lowest 70-6=64 are useful.)
+ * The maximum value for 32-bit values in varint encoding must be `FF FF FF FF 0F`
+ with one exception (below). In other words, when decoded, the highest 38
+ bits of the 70-bit unsigned integer must be `0`.
+ * The one exception to the above is *negative* `int32`, which must be
+ encoded using the full 10 bytes for sign extension2.
+ * The maximum value for Boolean values in varint encoding must be `01` (i.e.
+ it must be `0` or `1`). Per rule 3 above, the default value of `0` must
+ be omitted, so if a Boolean is included it must have a value of `1`.
+
+While rule number 1. and 2. should be pretty straight forward and describe the
+default behavior of all protobuf encoders the author is aware of, the 3rd rule
+is more interesting. After a protobuf3 deserialization you cannot differentiate
+between unset fields and fields set to the default value3. At
+serialization level however, it is possible to set the fields with an empty
+value or omitting them entirely. This is a significant difference to e.g. JSON
+where a property can be empty (`""`, `0`), `null` or undefined, leading to 3
+different documents.
+
+Omitting fields set to default values is valid because the parser must assign
+the default value to fields missing in the serialization4. For scalar
+types, omitting defaults is required by the spec5. For `repeated`
+fields, not serializing them is the only way to express empty lists. Enums must
+have a first element of numeric value 0, which is the default6. And
+message fields default to unset7.
+
+Omitting defaults allows for some amount of forward compatibility: users of
+newer versions of a protobuf schema produce the same serialization as users of
+older versions as long as newly added fields are not used (i.e. set to their
+default value).
+
+### Implementation
+
+There are three main implementation strategies, ordered from the least to the
+most custom development:
+
+* **Use a protobuf serializer that follows the above rules by default.** E.g.
+ [gogoproto](https://pkg.go.dev/github.com/cosmos/gogoproto/gogoproto) is known to
+ be compliant by in most cases, but not when certain annotations such as
+ `nullable = false` are used. It might also be an option to configure an
+ existing serializer accordingly.
+
+* **Normalize default values before encoding them.** If your serializer follows
+ rule 1. and 2. and allows you to explicitly unset fields for serialization,
+ you can normalize default values to unset. This can be done when working with
+ [protobuf.js](https://www.npmjs.com/package/protobufjs):
+
+ ```js
+ const bytes = SignDoc.encode({
+ bodyBytes: body.length > 0 ? body : null, // normalize empty bytes to unset
+ authInfoBytes: authInfo.length > 0 ? authInfo : null, // normalize empty bytes to unset
+ chainId: chainId || null, // normalize "" to unset
+ accountNumber: accountNumber || null, // normalize 0 to unset
+ accountSequence: accountSequence || null, // normalize 0 to unset
+ }).finish();
+ ```
+
+* **Use a hand-written serializer for the types you need.** If none of the above
+ ways works for you, you can write a serializer yourself. For SignDoc this
+ would look something like this in Go, building on existing protobuf utilities:
+
+ ```go expandable
+ if !signDoc.body_bytes.empty() {
+ buf.WriteUVarInt64(0xA) // wire type and field number for body_bytes
+ buf.WriteUVarInt64(signDoc.body_bytes.length())
+
+ buf.WriteBytes(signDoc.body_bytes)
+ }
+ if !signDoc.auth_info.empty() {
+ buf.WriteUVarInt64(0x12) // wire type and field number for auth_info
+ buf.WriteUVarInt64(signDoc.auth_info.length())
+
+ buf.WriteBytes(signDoc.auth_info)
+ }
+ if !signDoc.chain_id.empty() {
+ buf.WriteUVarInt64(0x1a) // wire type and field number for chain_id
+ buf.WriteUVarInt64(signDoc.chain_id.length())
+
+ buf.WriteBytes(signDoc.chain_id)
+ }
+ if signDoc.account_number != 0 {
+ buf.WriteUVarInt64(0x20) // wire type and field number for account_number
+ buf.WriteUVarInt(signDoc.account_number)
+ }
+ if signDoc.account_sequence != 0 {
+ buf.WriteUVarInt64(0x28) // wire type and field number for account_sequence
+ buf.WriteUVarInt(signDoc.account_sequence)
+ }
+ ```
+
+### Test vectors
+
+Given the protobuf definition `Article.proto`
+
+```protobuf expandable
+package blog;
+syntax = "proto3";
+
+enum Type {
+ UNSPECIFIED = 0;
+ IMAGES = 1;
+ NEWS = 2;
+};
+
+enum Review {
+ UNSPECIFIED = 0;
+ ACCEPTED = 1;
+ REJECTED = 2;
+};
+
+message Article {
+ string title = 1;
+ string description = 2;
+ uint64 created = 3;
+ uint64 updated = 4;
+ bool public = 5;
+ bool promoted = 6;
+ Type type = 7;
+ Review review = 8;
+ repeated string comments = 9;
+ repeated string backlinks = 10;
+};
+```
+
+serializing the values
+
+```yaml
+title: "The world needs change 🌳"
+description: ""
+created: 1596806111080
+updated: 0
+public: true
+promoted: false
+type: Type.NEWS
+review: Review.UNSPECIFIED
+comments: ["Nice one", "Thank you"]
+backlinks: []
+```
+
+must result in the serialization
+
+```text
+0a1b54686520776f726c64206e65656473206368616e676520f09f8cb318e8bebec8bc2e280138024a084e696365206f6e654a095468616e6b20796f75
+```
+
+When inspecting the serialized document, you see that every second field is
+omitted:
+
+```shell
+$ echo 0a1b54686520776f726c64206e65656473206368616e676520f09f8cb318e8bebec8bc2e280138024a084e696365206f6e654a095468616e6b20796f75 | xxd -r -p | protoc --decode_raw
+1: "The world needs change \360\237\214\263"
+3: 1596806111080
+5: 1
+7: 2
+9: "Nice one"
+9: "Thank you"
+```
+
+## Consequences
+
+Having such an encoding available allows us to get deterministic serialization
+for all protobuf documents we need in the context of Cosmos SDK signing.
+
+### Positive
+
+* Well defined rules that can be verified independent of a reference
+ implementation
+* Simple enough to keep the barrier to implement transaction signing low
+* It allows us to continue to use 0 and other empty values in SignDoc, avoiding
+ the need to work around 0 sequences. This does not imply the change from
+ [Link](https://github.com/cosmos/cosmos-sdk/pull/6949) should not be merged, but not
+ too important anymore.
+
+### Negative
+
+* When implementing transaction signing, the encoding rules above must be
+ understood and implemented.
+* The need for rule number 3. adds some complexity to implementations.
+* Some data structures may require custom code for serialization. Thus
+ the code is not very portable - it will require additional work for each
+ client implementing serialization to properly handle custom data structures.
+
+### Neutral
+
+### Usage in Cosmos SDK
+
+For the reasons mentioned above ("Negative" section) we prefer to keep workarounds
+for shared data structure. Example: the aforementioned `TxRaw` is using raw bytes
+as a workaround. This allows them to use any valid Protobuf library without
+the need of implementing a custom serializer that adheres to this standard (and related risks of bugs).
+
+## References
+
+* 1 *When a message is serialized, there is no guaranteed order for
+ how its known or unknown fields should be written. Serialization order is an
+ implementation detail and the details of any particular implementation may
+ change in the future. Therefore, protocol buffer parsers must be able to parse
+ fields in any order.* from
+ [Link](https://developers.google.com/protocol-buffers/docs/encoding#order)
+* 2 [Link](https://developers.google.com/protocol-buffers/docs/encoding#signed_integers)
+* 3 *Note that for scalar message fields, once a message is parsed
+ there's no way of telling whether a field was explicitly set to the default
+ value (for example whether a boolean was set to false) or just not set at all:
+ you should bear this in mind when defining your message types. For example,
+ don't have a boolean that switches on some behavior when set to false if you
+ don't want that behavior to also happen by default.* from
+ [Link](https://developers.google.com/protocol-buffers/docs/proto3#default)
+* 4 *When a message is parsed, if the encoded message does not
+ contain a particular singular element, the corresponding field in the parsed
+ object is set to the default value for that field.* from
+ [Link](https://developers.google.com/protocol-buffers/docs/proto3#default)
+* 5 *Also note that if a scalar message field is set to its default,
+ the value will not be serialized on the wire.* from
+ [Link](https://developers.google.com/protocol-buffers/docs/proto3#default)
+* 6 *For enums, the default value is the first defined enum value,
+ which must be 0.* from
+ [Link](https://developers.google.com/protocol-buffers/docs/proto3#default)
+* 7 *For message fields, the field is not set. Its exact value is
+ language-dependent.* from
+ [Link](https://developers.google.com/protocol-buffers/docs/proto3#default)
+* Encoding rules and parts of the reasoning taken from
+ [canonical-proto3 Aaron Craelius](https://github.com/regen-network/canonical-proto3)
diff --git a/sdk/next/build/architecture/adr-028-public-key-addresses.mdx b/sdk/next/build/architecture/adr-028-public-key-addresses.mdx
new file mode 100644
index 000000000..abceb92bb
--- /dev/null
+++ b/sdk/next/build/architecture/adr-028-public-key-addresses.mdx
@@ -0,0 +1,358 @@
+---
+title: 'ADR 028: Public Key Addresses'
+description: '2020/08/18: Initial version 2021/01/15: Analysis and algorithm update'
+---
+
+## Changelog
+
+* 2020/08/18: Initial version
+* 2021/01/15: Analysis and algorithm update
+
+## Status
+
+Proposed
+
+## Abstract
+
+This ADR defines an address format for all addressable Cosmos SDK accounts. That includes: new public key algorithms, multisig public keys, and module accounts.
+
+## Context
+
+Issue [#3685](https://github.com/cosmos/cosmos-sdk/issues/3685) identified that public key
+address spaces are currently overlapping. We confirmed that it significantly decreases security of Cosmos SDK.
+
+### Problem
+
+An attacker can control an input for an address generation function. This leads to a birthday attack, which significantly decreases the security space.
+To overcome this, we need to separate the inputs for different kind of account types:
+a security break of one account type shouldn't impact the security of other account types.
+
+### Initial proposals
+
+One initial proposal was extending the address length and
+adding prefixes for different types of addresses.
+
+@ethanfrey explained an alternate approach originally used in [Link](https://github.com/iov-one/weave):
+
+> I spent quite a bit of time thinking about this issue while building weave... The other cosmos Sdk.
+> Basically I define a condition to be a type and format as human readable string with some binary data appended. This condition is hashed into an Address (again at 20 bytes). The use of this prefix makes it impossible to find a preimage for a given address with a different condition (eg ed25519 vs secp256k1).
+> This is explained in depth here [Link](https://weave.readthedocs.io/en/latest/design/permissions.html)
+> And the code is here, look mainly at the top where we process conditions. [Link](https://github.com/iov-one/weave/blob/master/conditions.go)
+
+And explained how this approach should be sufficiently collision resistant:
+
+> Yeah, AFAIK, 20 bytes should be collision resistance when the preimages are unique and not malleable. A space of 2^160 would expect some collision to be likely around 2^80 elements (birthday paradox). And if you want to find a collision for some existing element in the database, it is still 2^160. 2^80 only is if all these elements are written to state.
+> The good example you brought up was eg. a public key bytes being a valid public key on two algorithms supported by the codec. Meaning if either was broken, you would break accounts even if they were secured with the safer variant. This is only as the issue when no differentiating type info is present in the preimage (before hashing into an address).
+> I would like to hear an argument if the 20 bytes space is an actual issue for security, as I would be happy to increase my address sizes in weave. I just figured cosmos and ethereum and bitcoin all use 20 bytes, it should be good enough. And the arguments above which made me feel it was secure. But I have not done a deeper analysis.
+
+This led to the first proposal (which we proved to be not good enough):
+we concatenate a key type with a public key, hash it and take the first 20 bytes of that hash, summarized as `sha256(keyTypePrefix || keybytes)[:20]`.
+
+### Review and Discussions
+
+In [#5694](https://github.com/cosmos/cosmos-sdk/issues/5694) we discussed various solutions.
+We agreed that 20 bytes it's not future proof, and extending the address length is the only way to allow addresses of different types, various signature types, etc.
+This disqualifies the initial proposal.
+
+In the issue we discussed various modifications:
+
+* Choice of the hash function.
+* Move the prefix out of the hash function: `keyTypePrefix + sha256(keybytes)[:20]` \[post-hash-prefix-proposal].
+* Use double hashing: `sha256(keyTypePrefix + sha256(keybytes)[:20])`.
+* Increase to keybytes hash slice from 20 byte to 32 or 40 bytes. We concluded that 32 bytes, produced by a good hash functions is future secure.
+
+### Requirements
+
+* Support currently used tools - we don't want to break an ecosystem, or add a long adaptation period. Ref: [Link](https://github.com/cosmos/cosmos-sdk/issues/8041)
+* Try to keep the address length small - addresses are widely used in state, both as part of a key and object value.
+
+### Scope
+
+This ADR only defines a process for the generation of address bytes. For end-user interactions with addresses (through the API, or CLI, etc.), we still use bech32 to format these addresses as strings. This ADR doesn't change that.
+Using Bech32 for string encoding gives us support for checksum error codes and handling of user typos.
+
+## Decision
+
+We define the following account types, for which we define the address function:
+
+1. simple accounts: represented by a regular public key (ie: secp256k1, ed25519)
+2. naive multisig: accounts composed by other addressable objects (ie: naive multisig)
+3. composed accounts with a native address key (ie: bls12_381, group module accounts)
+4. module accounts: basically any accounts which cannot sign transactions and which are managed internally by modules
+
+### Legacy Public Key Addresses Don't Change
+
+Currently (Jan 2021), the only officially supported Cosmos SDK user accounts are `secp256k1` basic accounts and legacy amino multisig.
+They are used in existing Cosmos SDK zones. They use the following address formats:
+
+* secp256k1: `ripemd160(sha256(pk_bytes))[:20]`
+* legacy amino multisig: `sha256(aminoCdc.Marshal(pk))[:20]`
+
+We don't want to change existing addresses. So the addresses for these two key types will remain the same.
+
+The current multisig public keys use amino serialization to generate the address. We will retain
+those public keys and their address formatting, and call them "legacy amino" multisig public keys
+in protobuf. We will also create multisig public keys without amino addresses to be described below.
+
+### Hash Function Choice
+
+As in other parts of the Cosmos SDK, we will use `sha256`.
+
+### Basic Address
+
+We start with defining a base algorithm for generating addresses which we will call `Hash`. Notably, it's used for accounts represented by a single key pair. For each public key schema we have to have an associated `typ` string, explained in the next section. `hash` is the cryptographic hash function defined in the previous section.
+
+```go
+const A_LEN = 32
+
+func Hash(typ string, key []byte) []byte {
+ return hash(hash(typ) + key)[:A_LEN]
+}
+```
+
+The `+` is bytes concatenation, which doesn't use any separator.
+
+This algorithm is the outcome of a consultation session with a professional cryptographer.
+Motivation: this algorithm keeps the address relatively small (length of the `typ` doesn't impact the length of the final address)
+and it's more secure than \[post-hash-prefix-proposal] (which uses the first 20 bytes of a pubkey hash, significantly reducing the address space).
+Moreover the cryptographer motivated the choice of adding `typ` in the hash to protect against a switch table attack.
+
+`address.Hash` is a low level function to generate *base* addresses for new key types. Example:
+
+* BLS: `address.Hash("bls", pubkey)`
+
+### Composed Addresses
+
+For simple composed accounts (like a new naive multisig) we generalize the `address.Hash`. The address is constructed by recursively creating addresses for the sub accounts, sorting the addresses and composing them into a single address. It ensures that the ordering of keys doesn't impact the resulting address.
+
+```go
+// We don't need a PubKey interface - we need anything which is addressable.
+type Addressable interface {
+ Address() []byte
+}
+
+func Composed(typ string, subaccounts []Addressable) []byte {
+ addresses = map(subaccounts, \a -> LengthPrefix(a.Address()))
+
+addresses = sort(addresses)
+
+return address.Hash(typ, addresses[0] + ... + addresses[n])
+}
+```
+
+The `typ` parameter should be a schema descriptor, containing all significant attributes with deterministic serialization (eg: utf8 string).
+`LengthPrefix` is a function which prepends 1 byte to the address. The value of that byte is the length of the address bits before prepending. The address must be at most 255 bits long.
+We are using `LengthPrefix` to eliminate conflicts - it assures, that for 2 lists of addresses: `as = {a1, a2, ..., an}` and `bs = {b1, b2, ..., bm}` such that every `bi` and `ai` is at most 255 long, `concatenate(map(as, (a) => LengthPrefix(a))) = map(bs, (b) => LengthPrefix(b))` if `as = bs`.
+
+Implementation Tip: account implementations should cache addresses.
+
+#### Multisig Addresses
+
+For a new multisig public keys, we define the `typ` parameter not based on any encoding scheme (amino or protobuf). This avoids issues with non-determinism in the encoding scheme.
+
+Example:
+
+```protobuf
+package cosmos.crypto.multisig;
+
+message PubKey {
+ uint32 threshold = 1;
+ repeated google.protobuf.Any pubkeys = 2;
+}
+```
+
+```go expandable
+func (multisig PubKey)
+
+Address() {
+ // first gather all nested pub keys
+ var keys []address.Addressable // cryptotypes.PubKey implements Addressable
+ for _, _key := range multisig.Pubkeys {
+ keys = append(keys, key.GetCachedValue().(cryptotypes.PubKey))
+}
+
+ // form the type from the message name (cosmos.crypto.multisig.PubKey)
+
+and the threshold joined together
+ prefix := fmt.Sprintf("%s/%d", proto.MessageName(multisig), multisig.Threshold)
+
+ // use the Composed function defined above
+ return address.Composed(prefix, keys)
+}
+```
+
+### Derived Addresses
+
+We must be able to cryptographically derive one address from another one. The derivation process must guarantee hash properties, hence we use the already defined `Hash` function:
+
+```go
+func Derive(address, derivationKey []byte) []byte {
+ return Hash(addres, derivationKey)
+}
+```
+
+### Module Account Addresses
+
+A module account will have `"module"` type. Module accounts can have sub accounts. The submodule account will be created based on module name, and sequence of derivation keys. Typically, the first derivation key should be a class of the derived accounts. The derivation process has a defined order: module name, submodule key, subsubmodule key... An example module account is created using:
+
+```go
+address.Module(moduleName, key)
+```
+
+An example sub-module account is created using:
+
+```go
+groupPolicyAddresses := []byte{1
+}
+
+address.Module(moduleName, groupPolicyAddresses, policyID)
+```
+
+The `address.Module` function is using `address.Hash` with `"module"` as the type argument, and byte representation of the module name concatenated with submodule key. The two last component must be uniquely separated to avoid potential clashes (example: modulename="ab" & submodulekey="bc" will have the same derivation key as modulename="a" & submodulekey="bbc").
+We use a null byte (`'\x00'`) to separate module name from the submodule key. This works, because null byte is not a part of a valid module name. Finally, the sub-submodule accounts are created by applying the `Derive` function recursively.
+We could use `Derive` function also in the first step (rather than concatenating module name with zero byte and the submodule key). We decided to do concatenation to avoid one level of derivation and speed up computation.
+
+For backward compatibility with the existing `authtypes.NewModuleAddress`, we add a special case in `Module` function: when no derivation key is provided, we fallback to the "legacy" implementation.
+
+```go
+func Module(moduleName string, derivationKeys ...[]byte) []byte{
+ if len(derivationKeys) == 0 {
+ return authtypes.NewModuleAddress(modulenName) // legacy case
+}
+ submoduleAddress := Hash("module", []byte(moduleName) + 0 + key)
+
+return fold((a, k) => Derive(a, k), subsubKeys, submoduleAddress)
+}
+```
+
+**Example 1** A lending BTC pool address would be:
+
+```go
+btcPool := address.Module("lending", btc.Address()
+})
+```
+
+If we want to create an address for a module account depending on more than one key, we can concatenate them:
+
+```go
+btcAtomAMM := address.Module("amm", btc.Address() + atom.Address()
+})
+```
+
+**Example 2** a smart-contract address could be constructed by:
+
+```go
+smartContractAddr = Module("mySmartContractVM", smartContractsNamespace, smartContractKey
+})
+
+// which equals to:
+smartContractAddr = Derived(
+ Module("mySmartContractVM", smartContractsNamespace),
+ []{
+ smartContractKey
+})
+```
+
+### Schema Types
+
+A `typ` parameter used in `Hash` function SHOULD be unique for each account type.
+Since all Cosmos SDK account types are serialized in the state, we propose to use the protobuf message name string.
+
+Example: all public key types have a unique protobuf message type similar to:
+
+```protobuf
+package cosmos.crypto.secp256k1;
+
+message PubKey {
+ bytes key = 1;
+}
+```
+
+All protobuf messages have unique fully qualified names, in this example `cosmos.crypto.secp256k1.PubKey`.
+These names are derived directly from .proto files in a standardized way and used
+in other places such as the type URL in `Any`s. We can easily obtain the name using
+`proto.MessageName(msg)`.
+
+## Consequences
+
+### Backwards Compatibility
+
+This ADR is compatible with what was committed and directly supported in the Cosmos SDK repository.
+
+### Positive
+
+* a simple algorithm for generating addresses for new public keys, complex accounts and modules
+* the algorithm generalizes *native composed keys*
+* increased security and collision resistance of addresses
+* the approach is extensible for future use-cases - one can use other address types, as long as they don't conflict with the address length specified here (20 or 32 bytes).
+* support new account types.
+
+### Negative
+
+* addresses do not communicate key type, a prefixed approach would have done this
+* addresses are 60% longer and will consume more storage space
+* requires a refactor of KVStore store keys to handle variable length addresses
+
+### Neutral
+
+* protobuf message names are used as key type prefixes
+
+## Further Discussions
+
+Some accounts can have a fixed name or may be constructed in other way (eg: modules). We were discussing an idea of an account with a predefined name (eg: `me.regen`), which could be used by institutions.
+Without going into details, these kinds of addresses are compatible with the hash based addresses described here as long as they don't have the same length.
+More specifically, any special account address must not have a length equal to 20 or 32 bytes.
+
+## Appendix: Consulting session
+
+End of Dec 2020 we had a session with [Alan Szepieniec](https://scholar.google.be/citations?user=4LyZn8oAAAAJ\&hl=en) to consult the approach presented above.
+
+Alan general observations:
+
+* we don’t need 2-preimage resistance
+* we need 32bytes address space for collision resistance
+* when an attacker can control an input for object with an address then we have a problem with birthday attack
+* there is an issue with smart-contracts for hashing
+* sha2 mining can be use to breaking address pre-image
+
+Hashing algorithm
+
+* any attack breaking blake3 will break blake2
+* Alan is pretty confident about the current security analysis of the blake hash algorithm. It was a finalist, and the author is well known in security analysis.
+
+Algorithm:
+
+* Alan recommends to hash the prefix: `address(pub_key) = hash(hash(key_type) + pub_key)[:32]`, main benefits:
+ * we are free to user arbitrary long prefix names
+ * we still don’t risk collisions
+ * switch tables
+* discussion about penalization -> about adding prefix post hash
+* Aaron asked about post hash prefixes (`address(pub_key) = key_type + hash(pub_key)`) and differences. Alan noted that this approach has longer address space and it’s stronger.
+
+Algorithm for complex / composed keys:
+
+* merging tree like addresses with same algorithm are fine
+
+Module addresses: Should module addresses have different size to differentiate it?
+
+* we will need to set a pre-image prefix for module addresse to keept them in 32-byte space: `hash(hash('module') + module_key)`
+* Aaron observation: we already need to deal with variable length (to not break secp256k1 keys).
+
+Discssion about arithmetic hash function for ZKP
+
+* Posseidon / Rescue
+* Problem: much bigger risk because we don’t know much techniques and history of crypto-analysis of arithmetic constructions. It’s still a new ground and area of active research.
+
+Post quantum signature size
+
+* Alan suggestion: Falcon: speed / size ration - very good.
+* Aaron - should we think about it?
+ Alan: based on early extrapolation this thing will get able to break EC cryptography in 2050 . But that’s a lot of uncertainty. But there is magic happening with recurions / linking / simulation and that can speedup the progress.
+
+Other ideas
+
+* Let’s say we use same key and two different address algorithms for 2 different use cases. Is it still safe to use it? Alan: if we want to hide the public key (which is not our use case), then it’s less secure but there are fixes.
+
+### References
+
+* [Notes](https://hackmd.io/_NGWI4xZSbKzj1BkCqyZMw)
diff --git a/sdk/next/build/architecture/adr-029-fee-grant-module.mdx b/sdk/next/build/architecture/adr-029-fee-grant-module.mdx
new file mode 100644
index 000000000..7358777c9
--- /dev/null
+++ b/sdk/next/build/architecture/adr-029-fee-grant-module.mdx
@@ -0,0 +1,161 @@
+---
+title: 'ADR 029: Fee Grant Module'
+description: >-
+ 2020/08/18: Initial Draft 2021/05/05: Removed height based expiration support
+ and simplified naming.
+---
+
+## Changelog
+
+* 2020/08/18: Initial Draft
+* 2021/05/05: Removed height based expiration support and simplified naming.
+
+## Status
+
+Accepted
+
+## Context
+
+In order to make blockchain transactions, the signing account must possess a sufficient balance of the right denomination
+in order to pay fees. There are classes of transactions where needing to maintain a wallet with sufficient fees is a
+barrier to adoption.
+
+For instance, when proper permissions are setup, someone may temporarily delegate the ability to vote on proposals to
+a "burner" account that is stored on a mobile phone with only minimal security.
+
+Other use cases include workers tracking items in a supply chain or farmers submitting field data for analytics
+or compliance purposes.
+
+For all of these use cases, UX would be significantly enhanced by obviating the need for these accounts to always
+maintain the appropriate fee balance. This is especially true if we wanted to achieve enterprise adoption for something
+like supply chain tracking.
+
+While one solution would be to have a service that fills up these accounts automatically with the appropriate fees, a better UX
+would be provided by allowing these accounts to pull from a common fee pool account with proper spending limits.
+A single pool would reduce the churn of making lots of small "fill up" transactions and also more effectively leverages
+the resources of the organization setting up the pool.
+
+## Decision
+
+As a solution we propose a module, `x/feegrant` which allows one account, the "granter" to grant another account, the "grantee"
+an allowance to spend the granter's account balance for fees within certain well-defined limits.
+
+Fee allowances are defined by the extensible `FeeAllowanceI` interface:
+
+```go expandable
+type FeeAllowanceI {
+ // Accept can use fee payment requested as well as timestamp of the current block
+ // to determine whether or not to process this. This is checked in
+ // Keeper.UseGrantedFees and the return values should match how it is handled there.
+ //
+ // If it returns an error, the fee payment is rejected, otherwise it is accepted.
+ // The FeeAllowance implementation is expected to update it's internal state
+ // and will be saved again after an acceptance.
+ //
+ // If remove is true (regardless of the error), the FeeAllowance will be deleted from storage
+ // (eg. when it is used up). (See call to RevokeFeeAllowance in Keeper.UseGrantedFees)
+
+Accept(ctx sdk.Context, fee sdk.Coins, msgs []sdk.Msg) (remove bool, err error)
+
+ // ValidateBasic should evaluate this FeeAllowance for internal consistency.
+ // Don't allow negative amounts, or negative periods for example.
+ ValidateBasic()
+
+error
+}
+```
+
+Two basic fee allowance types, `BasicAllowance` and `PeriodicAllowance` are defined to support known use cases:
+
+```protobuf expandable
+// BasicAllowance implements FeeAllowanceI with a one-time grant of tokens
+// that optionally expires. The delegatee can use up to SpendLimit to cover fees.
+message BasicAllowance {
+ // spend_limit specifies the maximum amount of tokens that can be spent
+ // by this allowance and will be updated as tokens are spent. If it is
+ // empty, there is no spend limit and any amount of coins can be spent.
+ repeated cosmos_sdk.v1.Coin spend_limit = 1;
+
+ // expiration specifies an optional time when this allowance expires
+ google.protobuf.Timestamp expiration = 2;
+}
+
+// PeriodicAllowance extends FeeAllowanceI to allow for both a maximum cap,
+// as well as a limit per time period.
+message PeriodicAllowance {
+ BasicAllowance basic = 1;
+
+ // period specifies the time duration in which period_spend_limit coins can
+ // be spent before that allowance is reset
+ google.protobuf.Duration period = 2;
+
+ // period_spend_limit specifies the maximum number of coins that can be spent
+ // in the period
+ repeated cosmos_sdk.v1.Coin period_spend_limit = 3;
+
+ // period_can_spend is the number of coins left to be spent before the period_reset time
+ repeated cosmos_sdk.v1.Coin period_can_spend = 4;
+
+ // period_reset is the time at which this period resets and a new one begins,
+ // it is calculated from the start time of the first transaction after the
+ // last period ended
+ google.protobuf.Timestamp period_reset = 5;
+}
+
+```
+
+Allowances can be granted and revoked using `MsgGrantAllowance` and `MsgRevokeAllowance`:
+
+```protobuf expandable
+// MsgGrantAllowance adds permission for Grantee to spend up to Allowance
+// of fees from the account of Granter.
+message MsgGrantAllowance {
+ string granter = 1;
+ string grantee = 2;
+ google.protobuf.Any allowance = 3;
+ }
+
+ // MsgRevokeAllowance removes any existing FeeAllowance from Granter to Grantee.
+ message MsgRevokeAllowance {
+ string granter = 1;
+ string grantee = 2;
+ }
+```
+
+In order to use allowances in transactions, we add a new field `granter` to the transaction `Fee` type:
+
+```protobuf
+package cosmos.tx.v1beta1;
+
+message Fee {
+ repeated cosmos.base.v1beta1.Coin amount = 1;
+ uint64 gas_limit = 2;
+ string payer = 3;
+ string granter = 4;
+}
+```
+
+`granter` must either be left empty or must correspond to an account which has granted
+a fee allowance to fee payer (either the first signer or the value of the `payer` field).
+
+A new `AnteDecorator` named `DeductGrantedFeeDecorator` will be created in order to process transactions with `fee_payer`
+set and correctly deduct fees based on fee allowances.
+
+## Consequences
+
+### Positive
+
+* improved UX for use cases where it is cumbersome to maintain an account balance just for fees
+
+### Negative
+
+### Neutral
+
+* a new field must be added to the transaction `Fee` message and a new `AnteDecorator` must be
+ created to use it
+
+## References
+
+* Blog article describing initial work: [Link](https://medium.com/regen-network/hacking-the-cosmos-cosmwasm-and-key-management-a08b9f561d1b)
+* Initial public specification: [Link](https://gist.github.com/aaronc/b60628017352df5983791cad30babe56)
+* Original subkeys proposal from B-harvest which influenced this design: [Link](https://github.com/cosmos/cosmos-sdk/issues/4480)
diff --git a/sdk/next/build/architecture/adr-030-authz-module.mdx b/sdk/next/build/architecture/adr-030-authz-module.mdx
new file mode 100644
index 000000000..1c328b39b
--- /dev/null
+++ b/sdk/next/build/architecture/adr-030-authz-module.mdx
@@ -0,0 +1,287 @@
+---
+title: 'ADR 030: Authorization Module'
+---
+
+## Changelog
+
+* 2019-11-06: Initial Draft
+* 2020-10-12: Updated Draft
+* 2020-11-13: Accepted
+* 2020-05-06: proto API updates, use `sdk.Msg` instead of `sdk.ServiceMsg` (the latter concept was removed from Cosmos SDK)
+* 2022-04-20: Updated the `SendAuthorization` proto docs to clarify the `SpendLimit` is a required field. (Generic authorization can be used with bank msg type url to create limit less bank authorization)
+
+## Status
+
+Accepted
+
+## Abstract
+
+This ADR defines the `x/authz` module which allows accounts to grant authorizations to perform actions
+on behalf of that account to other accounts.
+
+## Context
+
+The concrete use cases which motivated this module include:
+
+* the desire to delegate the ability to vote on proposals to other accounts besides the account which one has
+ delegated stake
+* "sub-keys" functionality, as originally proposed in [#4480](https://github.com/cosmos/cosmos-sdk/issues/4480) which
+ is a term used to describe the functionality provided by this module together with
+ the `fee_grant` module from [ADR 029](/sdk/v0.50/build/architecture/adr-029-fee-grant-module) and the [group module](https://github.com/cosmos/cosmos-sdk/tree/main/x/group).
+
+The "sub-keys" functionality roughly refers to the ability for one account to grant some subset of its capabilities to
+other accounts with possibly less robust, but easier to use security measures. For instance, a master account representing
+an organization could grant the ability to spend small amounts of the organization's funds to individual employee accounts.
+Or an individual (or group) with a multisig wallet could grant the ability to vote on proposals to any one of the member
+keys.
+
+The current implementation is based on work done by the [Gaian's team at Hackatom Berlin 2019](https://github.com/cosmos-gaians/cosmos-sdk/tree/hackatom/x/delegation).
+
+## Decision
+
+We will create a module named `authz` which provides functionality for
+granting arbitrary privileges from one account (the *granter*) to another account (the *grantee*). Authorizations
+must be granted for a particular `Msg` service methods one by one using an implementation
+of `Authorization` interface.
+
+### Types
+
+Authorizations determine exactly what privileges are granted. They are extensible
+and can be defined for any `Msg` service method even outside of the module where
+the `Msg` method is defined. `Authorization`s reference `Msg`s using their TypeURL.
+
+#### Authorization
+
+```go expandable
+type Authorization interface {
+ proto.Message
+
+ // MsgTypeURL returns the fully-qualified Msg TypeURL (as described in ADR 020),
+ // which will process and accept or reject a request.
+ MsgTypeURL()
+
+string
+
+ // Accept determines whether this grant permits the provided sdk.Msg to be performed, and if
+ // so provides an upgraded authorization instance.
+ Accept(ctx sdk.Context, msg sdk.Msg) (AcceptResponse, error)
+
+ // ValidateBasic does a simple validation check that
+ // doesn't require access to any other information.
+ ValidateBasic()
+
+error
+}
+
+// AcceptResponse instruments the controller of an authz message if the request is accepted
+// and if it should be updated or deleted.
+type AcceptResponse struct {
+ // If Accept=true, the controller can accept and authorization and handle the update.
+ Accept bool
+ // If Delete=true, the controller must delete the authorization object and release
+ // storage resources.
+ Delete bool
+ // Controller, who is calling Authorization.Accept must check if `Updated != nil`. If yes,
+ // it must use the updated version and handle the update on the storage level.
+ Updated Authorization
+}
+```
+
+For example a `SendAuthorization` like this is defined for `MsgSend` that takes
+a `SpendLimit` and updates it down to zero:
+
+```go expandable
+type SendAuthorization struct {
+ // SpendLimit specifies the maximum amount of tokens that can be spent
+ // by this authorization and will be updated as tokens are spent. This field is required. (Generic authorization
+ // can be used with bank msg type url to create limit less bank authorization).
+ SpendLimit sdk.Coins
+}
+
+func (a SendAuthorization)
+
+MsgTypeURL()
+
+string {
+ return sdk.MsgTypeURL(&MsgSend{
+})
+}
+
+func (a SendAuthorization)
+
+Accept(ctx sdk.Context, msg sdk.Msg) (authz.AcceptResponse, error) {
+ mSend, ok := msg.(*MsgSend)
+ if !ok {
+ return authz.AcceptResponse{
+}, sdkerrors.ErrInvalidType.Wrap("type mismatch")
+}
+
+limitLeft, isNegative := a.SpendLimit.SafeSub(mSend.Amount)
+ if isNegative {
+ return authz.AcceptResponse{
+}, sdkerrors.ErrInsufficientFunds.Wrapf("requested amount is more than spend limit")
+}
+ if limitLeft.IsZero() {
+ return authz.AcceptResponse{
+ Accept: true,
+ Delete: true
+}, nil
+}
+
+return authz.AcceptResponse{
+ Accept: true,
+ Delete: false,
+ Updated: &SendAuthorization{
+ SpendLimit: limitLeft
+}}, nil
+}
+```
+
+A different type of capability for `MsgSend` could be implemented
+using the `Authorization` interface with no need to change the underlying
+`bank` module.
+
+##### Small notes on `AcceptResponse`
+
+* The `AcceptResponse.Accept` field will be set to `true` if the authorization is accepted.
+ However, if it is rejected, the function `Accept` will raise an error (without setting `AcceptResponse.Accept` to `false`).
+
+* The `AcceptResponse.Updated` field will be set to a non-nil value only if there is a real change to the authorization.
+ If authorization remains the same (as is, for instance, always the case for a [`GenericAuthorization`](#genericauthorization)),
+ the field will be `nil`.
+
+### `Msg` Service
+
+```protobuf expandable
+service Msg {
+ // Grant grants the provided authorization to the grantee on the granter's
+ // account with the provided expiration time.
+ rpc Grant(MsgGrant) returns (MsgGrantResponse);
+
+ // Exec attempts to execute the provided messages using
+ // authorizations granted to the grantee. Each message should have only
+ // one signer corresponding to the granter of the authorization.
+ rpc Exec(MsgExec) returns (MsgExecResponse);
+
+ // Revoke revokes any authorization corresponding to the provided method name on the
+ // granter's account that has been granted to the grantee.
+ rpc Revoke(MsgRevoke) returns (MsgRevokeResponse);
+}
+
+// Grant gives permissions to execute
+// the provided method with expiration time.
+message Grant {
+ google.protobuf.Any authorization = 1 [(cosmos_proto.accepts_interface) = "cosmos.authz.v1beta1.Authorization"];
+ google.protobuf.Timestamp expiration = 2 [(gogoproto.stdtime) = true, (gogoproto.nullable) = false];
+}
+
+message MsgGrant {
+ string granter = 1;
+ string grantee = 2;
+
+ Grant grant = 3 [(gogoproto.nullable) = false];
+}
+
+message MsgExecResponse {
+ cosmos.base.abci.v1beta1.Result result = 1;
+}
+
+message MsgExec {
+ string grantee = 1;
+ // Authorization Msg requests to execute. Each msg must implement Authorization interface
+ repeated google.protobuf.Any msgs = 2 [(cosmos_proto.accepts_interface) = "cosmos.base.v1beta1.Msg"];;
+}
+```
+
+### Router Middleware
+
+The `authz` `Keeper` will expose a `DispatchActions` method which allows other modules to send `Msg`s
+to the router based on `Authorization` grants:
+
+```go
+type Keeper interface {
+ // DispatchActions routes the provided msgs to their respective handlers if the grantee was granted an authorization
+ // to send those messages by the first (and only)
+
+signer of each msg.
+ DispatchActions(ctx sdk.Context, grantee sdk.AccAddress, msgs []sdk.Msg)
+
+sdk.Result`
+}
+```
+
+### CLI
+
+#### `tx exec` Method
+
+When a CLI user wants to run a transaction on behalf of another account using `MsgExec`, they
+can use the `exec` method. For instance `gaiacli tx gov vote 1 yes --from --generate-only | gaiacli tx authz exec --send-as --from `
+would send a transaction like this:
+
+```go
+MsgExec {
+ Grantee: mykey,
+ Msgs: []sdk.Msg{
+ MsgVote {
+ ProposalID: 1,
+ Voter: cosmos3thsdgh983egh823
+ Option: Yes
+}
+
+}
+}
+```
+
+#### `tx grant --from `
+
+This CLI command will send a `MsgGrant` transaction. `authorization` should be encoded as
+JSON on the CLI.
+
+#### `tx revoke --from `
+
+This CLI command will send a `MsgRevoke` transaction.
+
+### Built-in Authorizations
+
+#### `SendAuthorization`
+
+```protobuf
+// SendAuthorization allows the grantee to spend up to spend_limit coins from
+// the granter's account.
+message SendAuthorization {
+ repeated cosmos.base.v1beta1.Coin spend_limit = 1;
+}
+```
+
+#### `GenericAuthorization`
+
+```protobuf
+// GenericAuthorization gives the grantee unrestricted permissions to execute
+// the provided method on behalf of the granter's account.
+message GenericAuthorization {
+ option (cosmos_proto.implements_interface) = "Authorization";
+
+ // Msg, identified by it's type URL, to grant unrestricted permissions to execute
+ string msg = 1;
+}
+```
+
+## Consequences
+
+### Positive
+
+* Users will be able to authorize arbitrary actions on behalf of their accounts to other
+ users, improving key management for many use cases
+* The solution is more generic than previously considered approaches and the
+ `Authorization` interface approach can be extended to cover other use cases by
+ SDK users
+
+### Negative
+
+### Neutral
+
+## References
+
+* Initial Hackatom implementation: [Link](https://github.com/cosmos-gaians/cosmos-sdk/tree/hackatom/x/delegation)
+* Post-Hackatom spec: [Link](https://gist.github.com/aaronc/b60628017352df5983791cad30babe56#delegation-module)
+* B-Harvest subkeys spec: [Link](https://github.com/cosmos/cosmos-sdk/issues/4480)
diff --git a/sdk/next/build/architecture/adr-031-msg-service.mdx b/sdk/next/build/architecture/adr-031-msg-service.mdx
new file mode 100644
index 000000000..1e9bdb4eb
--- /dev/null
+++ b/sdk/next/build/architecture/adr-031-msg-service.mdx
@@ -0,0 +1,217 @@
+---
+title: 'ADR 031: Protobuf Msg Services'
+description: >-
+ 2020-10-05: Initial Draft 2021-04-21: Remove ServiceMsgs to follow Protobuf
+ Any's spec, see #9063.
+---
+
+## Changelog
+
+* 2020-10-05: Initial Draft
+* 2021-04-21: Remove `ServiceMsg`s to follow Protobuf `Any`'s spec, see [#9063](https://github.com/cosmos/cosmos-sdk/issues/9063).
+
+## Status
+
+Accepted
+
+## Abstract
+
+We want to leverage protobuf `service` definitions for defining `Msg`s which will give us significant developer UX
+improvements in terms of the code that is generated and the fact that return types will now be well defined.
+
+## Context
+
+Currently `Msg` handlers in the Cosmos SDK do have return values that are placed in the `data` field of the response.
+These return values, however, are not specified anywhere except in the golang handler code.
+
+In early conversations [it was proposed](https://docs.google.com/document/d/1eEgYgvgZqLE45vETjhwIw4VOqK-5hwQtZtjVbiXnIGc/edit)
+that `Msg` return types be captured using a protobuf extension field, ex:
+
+```protobuf
+package cosmos.gov;
+
+message MsgSubmitProposal
+ option (cosmos_proto.msg_return) = “uint64”;
+ string delegator_address = 1;
+ string validator_address = 2;
+ repeated sdk.Coin amount = 3;
+}
+```
+
+This was never adopted, however.
+
+Having a well-specified return value for `Msg`s would improve client UX. For instance,
+in `x/gov`, `MsgSubmitProposal` returns the proposal ID as a big-endian `uint64`.
+This isn’t really documented anywhere and clients would need to know the internals
+of the Cosmos SDK to parse that value and return it to users.
+
+Also, there may be cases where we want to use these return values programatically.
+For instance, [Link](https://github.com/cosmos/cosmos-sdk/issues/7093) proposes a method for
+doing inter-module Ocaps using the `Msg` router. A well-defined return type would
+improve the developer UX for this approach.
+
+In addition, handler registration of `Msg` types tends to add a bit of
+boilerplate on top of keepers and is usually done through manual type switches.
+This isn't necessarily bad, but it does add overhead to creating modules.
+
+## Decision
+
+We decide to use protobuf `service` definitions for defining `Msg`s as well as
+the code generated by them as a replacement for `Msg` handlers.
+
+Below we define how this will look for the `SubmitProposal` message from `x/gov` module.
+We start with a `Msg` `service` definition:
+
+```protobuf expandable
+package cosmos.gov;
+
+service Msg {
+ rpc SubmitProposal(MsgSubmitProposal) returns (MsgSubmitProposalResponse);
+}
+
+// Note that for backwards compatibility this uses MsgSubmitProposal as the request
+// type instead of the more canonical MsgSubmitProposalRequest
+message MsgSubmitProposal {
+ google.protobuf.Any content = 1;
+ string proposer = 2;
+}
+
+message MsgSubmitProposalResponse {
+ uint64 proposal_id;
+}
+```
+
+While this is most commonly used for gRPC, overloading protobuf `service` definitions like this does not violate
+the intent of the [protobuf spec](https://developers.google.com/protocol-buffers/docs/proto3#services) which says:
+
+> If you don’t want to use gRPC, it’s also possible to use protocol buffers with your own RPC implementation.
+> With this approach, we would get an auto-generated `MsgServer` interface:
+
+In addition to clearly specifying return types, this has the benefit of generating client and server code. On the server
+side, this is almost like an automatically generated keeper method and could maybe be used intead of keepers eventually
+(see [#7093](https://github.com/cosmos/cosmos-sdk/issues/7093)):
+
+```go
+package gov
+
+type MsgServer interface {
+ SubmitProposal(context.Context, *MsgSubmitProposal) (*MsgSubmitProposalResponse, error)
+}
+```
+
+On the client side, developers could take advantage of this by creating RPC implementations that encapsulate transaction
+logic. Protobuf libraries that use asynchronous callbacks, like [protobuf.js](https://github.com/protobufjs/protobuf.js#using-services)
+could use this to register callbacks for specific messages even for transactions that include multiple `Msg`s.
+
+Each `Msg` service method should have exactly one request parameter: its corresponding `Msg` type. For example, the `Msg` service method `/cosmos.gov.v1beta1.Msg/SubmitProposal` above has exactly one request parameter, namely the `Msg` type `/cosmos.gov.v1beta1.MsgSubmitProposal`. It is important the reader understands clearly the nomenclature difference between a `Msg` service (a Protobuf service) and a `Msg` type (a Protobuf message), and the differences in their fully-qualified name.
+
+This convention has been decided over the more canonical `Msg...Request` names mainly for backwards compatibility, but also for better readability in `TxBody.messages` (see [Encoding section](#encoding) below): transactions containing `/cosmos.gov.MsgSubmitProposal` read better than those containing `/cosmos.gov.v1beta1.MsgSubmitProposalRequest`.
+
+One consequence of this convention is that each `Msg` type can be the request parameter of only one `Msg` service method. However, we consider this limitation a good practice in explicitness.
+
+### Encoding
+
+Encoding of transactions generated with `Msg` services do not differ from current Protobuf transaction encoding as defined in [ADR-020](/sdk/v0.50/build/architecture/adr-020-protobuf-transaction-encoding). We are encoding `Msg` types (which are exactly `Msg` service methods' request parameters) as `Any` in `Tx`s which involves packing the
+binary-encoded `Msg` with its type URL.
+
+### Decoding
+
+Since `Msg` types are packed into `Any`, decoding transactions messages are done by unpacking `Any`s into `Msg` types. For more information, please refer to [ADR-020](/sdk/v0.50/build/architecture/adr-020-protobuf-transaction-encoding#transactions).
+
+### Routing
+
+We propose to add a `msg_service_router` in BaseApp. This router is a key/value map which maps `Msg` types' `type_url`s to their corresponding `Msg` service method handler. Since there is a 1-to-1 mapping between `Msg` types and `Msg` service method, the `msg_service_router` has exactly one entry per `Msg` service method.
+
+When a transaction is processed by BaseApp (in CheckTx or in DeliverTx), its `TxBody.messages` are decoded as `Msg`s. Each `Msg`'s `type_url` is matched against an entry in the `msg_service_router`, and the respective `Msg` service method handler is called.
+
+For backward compatibility, the old handlers are not removed yet. If BaseApp receives a legacy `Msg` with no corresponding entry in the `msg_service_router`, it will be routed via its legacy `Route()` method into the legacy handler.
+
+### Module Configuration
+
+In [ADR 021](/sdk/v0.50/build/architecture/adr-021-protobuf-query-encoding), we introduced a method `RegisterQueryService`
+to `AppModule` which allows for modules to register gRPC queriers.
+
+To register `Msg` services, we attempt a more extensible approach by converting `RegisterQueryService`
+to a more generic `RegisterServices` method:
+
+```go expandable
+type AppModule interface {
+ RegisterServices(Configurator)
+ ...
+}
+
+type Configurator interface {
+ QueryServer()
+
+grpc.Server
+ MsgServer()
+
+grpc.Server
+}
+
+// example module:
+func (am AppModule)
+
+RegisterServices(cfg Configurator) {
+ types.RegisterQueryServer(cfg.QueryServer(), keeper)
+
+types.RegisterMsgServer(cfg.MsgServer(), keeper)
+}
+```
+
+The `RegisterServices` method and the `Configurator` interface are intended to
+evolve to satisfy the use cases discussed in [#7093](https://github.com/cosmos/cosmos-sdk/issues/7093)
+and [#7122](https://github.com/cosmos/cosmos-sdk/issues/7421).
+
+When `Msg` services are registered, the framework *should* verify that all `Msg` types
+implement the `sdk.Msg` interface and throw an error during initialization rather
+than later when transactions are processed.
+
+### `Msg` Service Implementation
+
+Just like query services, `Msg` service methods can retrieve the `sdk.Context`
+from the `context.Context` parameter method using the `sdk.UnwrapSDKContext`
+method:
+
+```go
+package gov
+
+func (k Keeper)
+
+SubmitProposal(goCtx context.Context, params *types.MsgSubmitProposal) (*MsgSubmitProposalResponse, error) {
+ ctx := sdk.UnwrapSDKContext(goCtx)
+ ...
+}
+```
+
+The `sdk.Context` should have an `EventManager` already attached by BaseApp's `msg_service_router`.
+
+Separate handler definition is no longer needed with this approach.
+
+## Consequences
+
+This design changes how a module functionality is exposed and accessed. It deprecates the existing `Handler` interface and `AppModule.Route` in favor of [Protocol Buffer Services](https://developers.google.com/protocol-buffers/docs/proto3#services) and Service Routing described above. This dramatically simplifies the code. We don't need to create handlers and keepers any more. Use of Protocol Buffer auto-generated clients clearly separates the communication interfaces between the module and a modules user. The control logic (aka handlers and keepers) is not exposed any more. A module interface can be seen as a black box accessible through a client API. It's worth to note that the client interfaces are also generated by Protocol Buffers.
+
+This also allows us to change how we perform functional tests. Instead of mocking AppModules and Router, we will mock a client (server will stay hidden). More specifically: we will never mock `moduleA.MsgServer` in `moduleB`, but rather `moduleA.MsgClient`. One can think about it as working with external services (eg DBs, or online servers...). We assume that the transmission between clients and servers is correctly handled by generated Protocol Buffers.
+
+Finally, closing a module to client API opens desirable OCAP patterns discussed in ADR-033. Since server implementation and interface is hidden, nobody can hold "keepers"/servers and will be forced to relay on the client interface, which will drive developers for correct encapsulation and software engineering patterns.
+
+### Pros
+
+* communicates return type clearly
+* manual handler registration and return type marshaling is no longer needed, just implement the interface and register it
+* communication interface is automatically generated, the developer can now focus only on the state transition methods - this would improve the UX of [#7093](https://github.com/cosmos/cosmos-sdk/issues/7093) approach (1) if we chose to adopt that
+* generated client code could be useful for clients and tests
+* dramatically reduces and simplifies the code
+
+### Cons
+
+* using `service` definitions outside the context of gRPC could be confusing (but doesn’t violate the proto3 spec)
+
+## References
+
+* [Initial Github Issue #7122](https://github.com/cosmos/cosmos-sdk/issues/7122)
+* [proto 3 Language Guide: Defining Services](https://developers.google.com/protocol-buffers/docs/proto3#services)
+* [Initial pre-`Any` `Msg` designs](https://docs.google.com/document/d/1eEgYgvgZqLE45vETjhwIw4VOqK-5hwQtZtjVbiXnIGc)
+* [ADR 020](/sdk/v0.50/build/architecture/adr-020-protobuf-transaction-encoding)
+* [ADR 021](/sdk/v0.50/build/architecture/adr-021-protobuf-query-encoding)
diff --git a/sdk/next/build/architecture/adr-032-typed-events.mdx b/sdk/next/build/architecture/adr-032-typed-events.mdx
new file mode 100644
index 000000000..d72607679
--- /dev/null
+++ b/sdk/next/build/architecture/adr-032-typed-events.mdx
@@ -0,0 +1,351 @@
+---
+title: 'ADR 032: Typed Events'
+description: '28-Sept-2020: Initial Draft'
+---
+
+## Changelog
+
+* 28-Sept-2020: Initial Draft
+
+## Authors
+
+* Anil Kumar (@anilcse)
+* Jack Zampolin (@jackzampolin)
+* Adam Bozanich (@boz)
+
+## Status
+
+Proposed
+
+## Abstract
+
+Currently in the Cosmos SDK, events are defined in the handlers for each message as well as `BeginBlock` and `EndBlock`. Each module doesn't have types defined for each event, they are implemented as `map[string]string`. Above all else this makes these events difficult to consume as it requires a great deal of raw string matching and parsing. This proposal focuses on updating the events to use **typed events** defined in each module such that emiting and subscribing to events will be much easier. This workflow comes from the experience of the Akash Network team.
+
+## Context
+
+Currently in the Cosmos SDK, events are defined in the handlers for each message, meaning each module doesn't have a cannonical set of types for each event. Above all else this makes these events difficult to consume as it requires a great deal of raw string matching and parsing. This proposal focuses on updating the events to use **typed events** defined in each module such that emiting and subscribing to events will be much easier. This workflow comes from the experience of the Akash Network team.
+
+[Our platform](http://github.com/ovrclk/akash) requires a number of programatic on chain interactions both on the provider (datacenter - to bid on new orders and listen for leases created) and user (application developer - to send the app manifest to the provider) side. In addition the Akash team is now maintaining the IBC [`relayer`](https://github.com/ovrclk/relayer), another very event driven process. In working on these core pieces of infrastructure, and integrating lessons learned from Kubernetes developement, our team has developed a standard method for defining and consuming typed events in Cosmos SDK modules. We have found that it is extremely useful in building this type of event driven application.
+
+As the Cosmos SDK gets used more extensively for apps like `peggy`, other peg zones, IBC, DeFi, etc... there will be an exploding demand for event driven applications to support new features desired by users. We propose upstreaming our findings into the Cosmos SDK to enable all Cosmos SDK applications to quickly and easily build event driven apps to aid their core application. Wallets, exchanges, explorers, and defi protocols all stand to benefit from this work.
+
+If this proposal is accepted, users will be able to build event driven Cosmos SDK apps in go by just writing `EventHandler`s for their specific event types and passing them to `EventEmitters` that are defined in the Cosmos SDK.
+
+The end of this proposal contains a detailed example of how to consume events after this refactor.
+
+This proposal is specifically about how to consume these events as a client of the blockchain, not for intermodule communication.
+
+## Decision
+
+**Step-1**: Implement additional functionality in the `types` package: `EmitTypedEvent` and `ParseTypedEvent` functions
+
+```go expandable
+// types/events.go
+
+// EmitTypedEvent takes typed event and emits converting it into sdk.Event
+func (em *EventManager)
+
+EmitTypedEvent(event proto.Message)
+
+error {
+ evtType := proto.MessageName(event)
+
+evtJSON, err := codec.ProtoMarshalJSON(event)
+ if err != nil {
+ return err
+}
+
+var attrMap map[string]json.RawMessage
+ err = json.Unmarshal(evtJSON, &attrMap)
+ if err != nil {
+ return err
+}
+
+var attrs []abci.EventAttribute
+ for k, v := range attrMap {
+ attrs = append(attrs, abci.EventAttribute{
+ Key: []byte(k),
+ Value: v,
+})
+}
+
+em.EmitEvent(Event{
+ Type: evtType,
+ Attributes: attrs,
+})
+
+return nil
+}
+
+// ParseTypedEvent converts abci.Event back to typed event
+func ParseTypedEvent(event abci.Event) (proto.Message, error) {
+ concreteGoType := proto.MessageType(event.Type)
+ if concreteGoType == nil {
+ return nil, fmt.Errorf("failed to retrieve the message of type %q", event.Type)
+}
+
+var value reflect.Value
+ if concreteGoType.Kind() == reflect.Ptr {
+ value = reflect.New(concreteGoType.Elem())
+}
+
+else {
+ value = reflect.Zero(concreteGoType)
+}
+
+protoMsg, ok := value.Interface().(proto.Message)
+ if !ok {
+ return nil, fmt.Errorf("%q does not implement proto.Message", event.Type)
+}
+ attrMap := make(map[string]json.RawMessage)
+ for _, attr := range event.Attributes {
+ attrMap[string(attr.Key)] = attr.Value
+}
+
+attrBytes, err := json.Marshal(attrMap)
+ if err != nil {
+ return nil, err
+}
+
+err = jsonpb.Unmarshal(strings.NewReader(string(attrBytes)), protoMsg)
+ if err != nil {
+ return nil, err
+}
+
+return protoMsg, nil
+}
+```
+
+Here, the `EmitTypedEvent` is a method on `EventManager` which takes typed event as input and apply json serialization on it. Then it maps the JSON key/value pairs to `event.Attributes` and emits it in form of `sdk.Event`. `Event.Type` will be the type URL of the proto message.
+
+When we subscribe to emitted events on the CometBFT websocket, they are emitted in the form of an `abci.Event`. `ParseTypedEvent` parses the event back to it's original proto message.
+
+**Step-2**: Add proto definitions for typed events for msgs in each module:
+
+For example, let's take `MsgSubmitProposal` of `gov` module and implement this event's type.
+
+```protobuf
+// proto/cosmos/gov/v1beta1/gov.proto
+// Add typed event definition
+
+package cosmos.gov.v1beta1;
+
+message EventSubmitProposal {
+ string from_address = 1;
+ uint64 proposal_id = 2;
+ TextProposal proposal = 3;
+}
+```
+
+**Step-3**: Refactor event emission to use the typed event created and emit using `sdk.EmitTypedEvent`:
+
+```go expandable
+// x/gov/handler.go
+func handleMsgSubmitProposal(ctx sdk.Context, keeper keeper.Keeper, msg types.MsgSubmitProposalI) (*sdk.Result, error) {
+ ...
+ types.Context.EventManager().EmitTypedEvent(
+ &EventSubmitProposal{
+ FromAddress: fromAddress,
+ ProposalId: id,
+ Proposal: proposal,
+},
+ )
+ ...
+}
+```
+
+### How to subscribe to these typed events in `Client`
+
+> NOTE: Full code example below
+
+Users will be able to subscribe using `client.Context.Client.Subscribe` and consume events which are emitted using `EventHandler`s.
+
+Akash Network has built a simple [`pubsub`](https://github.com/ovrclk/akash/blob/90d258caeb933b611d575355b8df281208a214f8/pubsub/bus.go#L20). This can be used to subscribe to `abci.Events` and [publish](https://github.com/ovrclk/akash/blob/90d258caeb933b611d575355b8df281208a214f8/events/publish.go#L21) them as typed events.
+
+Please see the below code sample for more detail on this flow looks for clients.
+
+## Consequences
+
+### Positive
+
+* Improves consistency of implementation for the events currently in the Cosmos SDK
+* Provides a much more ergonomic way to handle events and facilitates writing event driven applications
+* This implementation will support a middleware ecosystem of `EventHandler`s
+
+### Negative
+
+## Detailed code example of publishing events
+
+This ADR also proposes adding affordances to emit and consume these events. This way developers will only need to write
+`EventHandler`s which define the actions they desire to take.
+
+```go expandable
+// EventEmitter is a type that describes event emitter functions
+// This should be defined in `types/events.go`
+type EventEmitter func(context.Context, client.Context, ...EventHandler)
+
+error
+
+// EventHandler is a type of function that handles events coming out of the event bus
+// This should be defined in `types/events.go`
+type EventHandler func(proto.Message)
+
+error
+
+// Sample use of the functions below
+func main() {
+ ctx, cancel := context.WithCancel(context.Background())
+ if err := TxEmitter(ctx, client.Context{
+}.WithNodeURI("tcp://localhost:26657"), SubmitProposalEventHandler); err != nil {
+ cancel()
+
+panic(err)
+}
+
+return
+}
+
+// SubmitProposalEventHandler is an example of an event handler that prints proposal details
+// when any EventSubmitProposal is emitted.
+func SubmitProposalEventHandler(ev proto.Message) (err error) {
+ switch event := ev.(type) {
+ // Handle governance proposal events creation events
+ case govtypes.EventSubmitProposal:
+ // Users define business logic here e.g.
+ fmt.Println(ev.FromAddress, ev.ProposalId, ev.Proposal)
+
+return nil
+ default:
+ return nil
+}
+}
+
+// TxEmitter is an example of an event emitter that emits just transaction events. This can and
+// should be implemented somewhere in the Cosmos SDK. The Cosmos SDK can include an EventEmitters for tm.event='Tx'
+// and/or tm.event='NewBlock' (the new block events may contain typed events)
+
+func TxEmitter(ctx context.Context, cliCtx client.Context, ehs ...EventHandler) (err error) {
+ // Instantiate and start CometBFT RPC client
+ client, err := cliCtx.GetNode()
+ if err != nil {
+ return err
+}
+ if err = client.Start(); err != nil {
+ return err
+}
+
+ // Start the pubsub bus
+ bus := pubsub.NewBus()
+
+defer bus.Close()
+
+ // Initialize a new error group
+ eg, ctx := errgroup.WithContext(ctx)
+
+ // Publish chain events to the pubsub bus
+ eg.Go(func()
+
+error {
+ return PublishChainTxEvents(ctx, client, bus, simapp.ModuleBasics)
+})
+
+ // Subscribe to the bus events
+ subscriber, err := bus.Subscribe()
+ if err != nil {
+ return err
+}
+
+ // Handle all the events coming out of the bus
+ eg.Go(func()
+
+error {
+ var err error
+ for {
+ select {
+ case <-ctx.Done():
+ return nil
+ case <-subscriber.Done():
+ return nil
+ case ev := <-subscriber.Events():
+ for _, eh := range ehs {
+ if err = eh(ev); err != nil {
+ break
+}
+
+}
+
+}
+
+}
+
+return nil
+})
+
+return group.Wait()
+}
+
+// PublishChainTxEvents events using cmtclient. Waits on context shutdown signals to exit.
+func PublishChainTxEvents(ctx context.Context, client cmtclient.EventsClient, bus pubsub.Bus, mb module.BasicManager) (err error) {
+ // Subscribe to transaction events
+ txch, err := client.Subscribe(ctx, "txevents", "tm.event='Tx'", 100)
+ if err != nil {
+ return err
+}
+
+ // Unsubscribe from transaction events on function exit
+ defer func() {
+ err = client.UnsubscribeAll(ctx, "txevents")
+}()
+
+ // Use errgroup to manage concurrency
+ g, ctx := errgroup.WithContext(ctx)
+
+ // Publish transaction events in a goroutine
+ g.Go(func()
+
+error {
+ var err error
+ for {
+ select {
+ case <-ctx.Done():
+ break
+ case ed := <-ch:
+ switch evt := ed.Data.(type) {
+ case cmttypes.EventDataTx:
+ if !evt.Result.IsOK() {
+ continue
+}
+ // range over events, parse them using the basic manager and
+ // send them to the pubsub bus
+ for _, abciEv := range events {
+ typedEvent, err := sdk.ParseTypedEvent(abciEv)
+ if err != nil {
+ return er
+}
+ if err := bus.Publish(typedEvent); err != nil {
+ bus.Close()
+
+return
+}
+
+continue
+}
+
+}
+
+}
+
+}
+
+return err
+})
+
+ // Exit on error or context cancelation
+ return g.Wait()
+}
+```
+
+## References
+
+* [Publish Custom Events via a bus](https://github.com/ovrclk/akash/blob/90d258caeb933b611d575355b8df281208a214f8/events/publish.go#L19-L58)
+* [Consuming the events in `Client`](https://github.com/ovrclk/deploy/blob/bf6c633ab6c68f3026df59efd9982d6ca1bf0561/cmd/event-handlers.go#L57)
diff --git a/sdk/next/build/architecture/adr-033-protobuf-inter-module-comm.mdx b/sdk/next/build/architecture/adr-033-protobuf-inter-module-comm.mdx
new file mode 100644
index 000000000..140803f28
--- /dev/null
+++ b/sdk/next/build/architecture/adr-033-protobuf-inter-module-comm.mdx
@@ -0,0 +1,455 @@
+---
+title: 'ADR 033: Protobuf-based Inter-Module Communication'
+description: '2020-10-05: Initial Draft'
+---
+
+## Changelog
+
+* 2020-10-05: Initial Draft
+
+## Status
+
+Proposed
+
+## Abstract
+
+This ADR introduces a system for permissioned inter-module communication leveraging the protobuf `Query` and `Msg`
+service definitions defined in [ADR 021](/sdk/v0.50/build/architecture/adr-021-protobuf-query-encoding) and
+[ADR 031](/sdk/v0.50/build/architecture/adr-031-msg-service) which provides:
+
+* stable protobuf based module interfaces to potentially later replace the keeper paradigm
+* stronger inter-module object capabilities (OCAPs) guarantees
+* module accounts and sub-account authorization
+
+## Context
+
+In the current Cosmos SDK documentation on the [Object-Capability Model](/sdk/v0.50/learn/advanced/ocap), it is stated that:
+
+> We assume that a thriving ecosystem of Cosmos SDK modules that are easy to compose into a blockchain application will contain faulty or malicious modules.
+
+There is currently not a thriving ecosystem of Cosmos SDK modules. We hypothesize that this is in part due to:
+
+1. lack of a stable v1.0 Cosmos SDK to build modules off of. Module interfaces are changing, sometimes dramatically, from
+ point release to point release, often for good reasons, but this does not create a stable foundation to build on.
+2. lack of a properly implemented object capability or even object-oriented encapsulation system which makes refactors
+ of module keeper interfaces inevitable because the current interfaces are poorly constrained.
+
+### `x/bank` Case Study
+
+Currently the `x/bank` keeper gives pretty much unrestricted access to any module which references it. For instance, the
+`SetBalance` method allows the caller to set the balance of any account to anything, bypassing even proper tracking of supply.
+
+There appears to have been some later attempts to implement some semblance of OCAPs using module-level minting, staking
+and burning permissions. These permissions allow a module to mint, burn or delegate tokens with reference to the module’s
+own account. These permissions are actually stored as a `[]string` array on the `ModuleAccount` type in state.
+
+However, these permissions don’t really do much. They control what modules can be referenced in the `MintCoins`,
+`BurnCoins` and `DelegateCoins***` methods, but for one there is no unique object capability token that controls access —
+just a simple string. So the `x/upgrade` module could mint tokens for the `x/staking` module simple by calling
+`MintCoins(“staking”)`. Furthermore, all modules which have access to these keeper methods, also have access to
+`SetBalance` negating any other attempt at OCAPs and breaking even basic object-oriented encapsulation.
+
+## Decision
+
+Based on [ADR-021](/sdk/v0.50/build/architecture/adr-021-protobuf-query-encoding) and [ADR-031](/sdk/v0.50/build/architecture/adr-031-msg-service), we introduce the
+Inter-Module Communication framework for secure module authorization and OCAPs.
+When implemented, this could also serve as an alternative to the existing paradigm of passing keepers between
+modules. The approach outlined here-in is intended to form the basis of a Cosmos SDK v1.0 that provides the necessary
+stability and encapsulation guarantees that allow a thriving module ecosystem to emerge.
+
+Of particular note — the decision is to *enable* this functionality for modules to adopt at their own discretion.
+Proposals to migrate existing modules to this new paradigm will have to be a separate conversation, potentially
+addressed as amendments to this ADR.
+
+### New "Keeper" Paradigm
+
+In [ADR 021](/sdk/v0.50/build/architecture/adr-021-protobuf-query-encoding), a mechanism for using protobuf service definitions to define queriers
+was introduced and in [ADR 31](/sdk/v0.50/build/architecture/adr-031-msg-service), a mechanism for using protobuf service to define `Msg`s was added.
+Protobuf service definitions generate two golang interfaces representing the client and server sides of a service plus
+some helper code. Here is a minimal example for the bank `cosmos.bank.Msg/Send` message type:
+
+```go
+package bank
+
+type MsgClient interface {
+ Send(context.Context, *MsgSend, opts ...grpc.CallOption) (*MsgSendResponse, error)
+}
+
+type MsgServer interface {
+ Send(context.Context, *MsgSend) (*MsgSendResponse, error)
+}
+```
+
+[ADR 021](/sdk/v0.50/build/architecture/adr-021-protobuf-query-encoding) and [ADR 31](/sdk/v0.50/build/architecture/adr-031-msg-service) specifies how modules can implement the generated `QueryServer`
+and `MsgServer` interfaces as replacements for the legacy queriers and `Msg` handlers respectively.
+
+In this ADR we explain how modules can make queries and send `Msg`s to other modules using the generated `QueryClient`
+and `MsgClient` interfaces and propose this mechanism as a replacement for the existing `Keeper` paradigm. To be clear,
+this ADR does not necessitate the creation of new protobuf definitions or services. Rather, it leverages the same proto
+based service interfaces already used by clients for inter-module communication.
+
+Using this `QueryClient`/`MsgClient` approach has the following key benefits over exposing keepers to external modules:
+
+1. Protobuf types are checked for breaking changes using [buf](https://buf.build/docs/breaking-overview) and because of
+ the way protobuf is designed this will give us strong backwards compatibility guarantees while allowing for forward
+ evolution.
+2. The separation between the client and server interfaces will allow us to insert permission checking code in between
+ the two which checks if one module is authorized to send the specified `Msg` to the other module providing a proper
+ object capability system (see below).
+3. The router for inter-module communication gives us a convenient place to handle rollback of transactions,
+ enabling atomicy of operations ([currently a problem](https://github.com/cosmos/cosmos-sdk/issues/8030)). Any failure within a module-to-module call would result in a failure of the entire
+ transaction
+
+This mechanism has the added benefits of:
+
+* reducing boilerplate through code generation, and
+* allowing for modules in other languages either via a VM like CosmWasm or sub-processes using gRPC
+
+### Inter-module Communication
+
+To use the `Client` generated by the protobuf compiler we need a `grpc.ClientConn` [interface](https://github.com/grpc/grpc-go/blob/v1.49.x/clientconn.go#L441-L450)
+implementation. For this we introduce
+a new type, `ModuleKey`, which implements the `grpc.ClientConn` interface. `ModuleKey` can be thought of as the "private
+key" corresponding to a module account, where authentication is provided through use of a special `Invoker()` function,
+described in more detail below.
+
+Blockchain users (external clients) use their account's private key to sign transactions containing `Msg`s where they are listed as signers (each
+message specifies required signers with `Msg.GetSigner`). The authentication checks is performed by `AnteHandler`.
+
+Here, we extend this process, by allowing modules to be identified in `Msg.GetSigners`. When a module wants to trigger the execution a `Msg` in another module,
+its `ModuleKey` acts as the sender (through the `ClientConn` interface we describe below) and is set as a sole "signer". It's worth to note
+that we don't use any cryptographic signature in this case.
+For example, module `A` could use its `A.ModuleKey` to create `MsgSend` object for `/cosmos.bank.Msg/Send` transaction. `MsgSend` validation
+will assure that the `from` account (`A.ModuleKey` in this case) is the signer.
+
+Here's an example of a hypothetical module `foo` interacting with `x/bank`:
+
+```go expandable
+package foo
+
+type FooMsgServer {
+ // ...
+
+ bankQuery bank.QueryClient
+ bankMsg bank.MsgClient
+}
+
+func NewFooMsgServer(moduleKey RootModuleKey, ...)
+
+FooMsgServer {
+ // ...
+
+ return FooMsgServer {
+ // ...
+ modouleKey: moduleKey,
+ bankQuery: bank.NewQueryClient(moduleKey),
+ bankMsg: bank.NewMsgClient(moduleKey),
+}
+}
+
+func (foo *FooMsgServer)
+
+Bar(ctx context.Context, req *MsgBarRequest) (*MsgBarResponse, error) {
+ balance, err := foo.bankQuery.Balance(&bank.QueryBalanceRequest{
+ Address: fooMsgServer.moduleKey.Address(),
+ Denom: "foo"
+})
+
+ ...
+
+ res, err := foo.bankMsg.Send(ctx, &bank.MsgSendRequest{
+ FromAddress: fooMsgServer.moduleKey.Address(), ...
+})
+
+ ...
+}
+```
+
+This design is also intended to be extensible to cover use cases of more fine grained permissioning like minting by
+denom prefix being restricted to certain modules (as discussed in
+[#7459](https://github.com/cosmos/cosmos-sdk/pull/7459#discussion_r529545528)).
+
+### `ModuleKey`s and `ModuleID`s
+
+A `ModuleKey` can be thought of as a "private key" for a module account and a `ModuleID` can be thought of as the
+corresponding "public key". From the [ADR 028](/sdk/v0.50/build/architecture/adr-028-public-key-addresses), modules can have both a root module account and any number of sub-accounts
+or derived accounts that can be used for different pools (ex. staking pools) or managed accounts (ex. group
+accounts). We can also think of module sub-accounts as similar to derived keys - there is a root key and then some
+derivation path. `ModuleID` is a simple struct which contains the module name and optional "derivation" path,
+and forms its address based on the `AddressHash` method from [the ADR-028](https://github.com/cosmos/cosmos-sdk/blob/main/docs/architecture/adr-028-public-key-addresses.md):
+
+```go
+type ModuleID struct {
+ ModuleName string
+ Path []byte
+}
+
+func (key ModuleID)
+
+Address() []byte {
+ return AddressHash(key.ModuleName, key.Path)
+}
+```
+
+In addition to being able to generate a `ModuleID` and address, a `ModuleKey` contains a special function called
+`Invoker` which is the key to safe inter-module access. The `Invoker` creates an `InvokeFn` closure which is used as an `Invoke` method in
+the `grpc.ClientConn` interface and under the hood is able to route messages to the appropriate `Msg` and `Query` handlers
+performing appropriate security checks on `Msg`s. This allows for even safer inter-module access than keeper's whose
+private member variables could be manipulated through reflection. Golang does not support reflection on a function
+closure's captured variables and direct manipulation of memory would be needed for a truly malicious module to bypass
+the `ModuleKey` security.
+
+The two `ModuleKey` types are `RootModuleKey` and `DerivedModuleKey`:
+
+```go expandable
+type Invoker func(callInfo CallInfo)
+
+func(ctx context.Context, request, response interface{
+}, opts ...interface{
+})
+
+error
+
+type CallInfo {
+ Method string
+ Caller ModuleID
+}
+
+type RootModuleKey struct {
+ moduleName string
+ invoker Invoker
+}
+
+func (rm RootModuleKey)
+
+Derive(path []byte)
+
+DerivedModuleKey { /* ... */
+}
+
+type DerivedModuleKey struct {
+ moduleName string
+ path []byte
+ invoker Invoker
+}
+```
+
+A module can get access to a `DerivedModuleKey`, using the `Derive(path []byte)` method on `RootModuleKey` and then
+would use this key to authenticate `Msg`s from a sub-account. Ex:
+
+```go
+package foo
+
+func (fooMsgServer *MsgServer)
+
+Bar(ctx context.Context, req *MsgBar) (*MsgBarResponse, error) {
+ derivedKey := fooMsgServer.moduleKey.Derive(req.SomePath)
+ bankMsgClient := bank.NewMsgClient(derivedKey)
+
+res, err := bankMsgClient.Balance(ctx, &bank.MsgSend{
+ FromAddress: derivedKey.Address(), ...
+})
+ ...
+}
+```
+
+In this way, a module can gain permissioned access to a root account and any number of sub-accounts and send
+authenticated `Msg`s from these accounts. The `Invoker` `callInfo.Caller` parameter is used under the hood to
+distinguish between different module accounts, but either way the function returned by `Invoker` only allows `Msg`s
+from either the root or a derived module account to pass through.
+
+Note that `Invoker` itself returns a function closure based on the `CallInfo` passed in. This will allow client implementations
+in the future that cache the invoke function for each method type avoiding the overhead of hash table lookup.
+This would reduce the performance overhead of this inter-module communication method to the bare minimum required for
+checking permissions.
+
+To re-iterate, the closure only allows access to authorized calls. There is no access to anything else regardless of any
+name impersonation.
+
+Below is a rough sketch of the implementation of `grpc.ClientConn.Invoke` for `RootModuleKey`:
+
+```go
+func (key RootModuleKey)
+
+Invoke(ctx context.Context, method string, args, reply interface{
+}, opts ...grpc.CallOption)
+
+error {
+ f := key.invoker(CallInfo {
+ Method: method,
+ Caller: ModuleID {
+ ModuleName: key.moduleName
+}})
+
+return f(ctx, args, reply)
+}
+```
+
+### `AppModule` Wiring and Requirements
+
+In [ADR 031](/sdk/v0.50/build/architecture/adr-031-msg-service), the `AppModule.RegisterService(Configurator)` method was introduced. To support
+inter-module communication, we extend the `Configurator` interface to pass in the `ModuleKey` and to allow modules to
+specify their dependencies on other modules using `RequireServer()`:
+
+```go
+type Configurator interface {
+ MsgServer()
+
+grpc.Server
+ QueryServer()
+
+grpc.Server
+
+ ModuleKey()
+
+ModuleKey
+ RequireServer(msgServer interface{
+})
+}
+```
+
+The `ModuleKey` is passed to modules in the `RegisterService` method itself so that `RegisterServices` serves as a single
+entry point for configuring module services. This is intended to also have the side-effect of greatly reducing boilerplate in
+`app.go`. For now, `ModuleKey`s will be created based on `AppModuleBasic.Name()`, but a more flexible system may be
+introduced in the future. The `ModuleManager` will handle creation of module accounts behind the scenes.
+
+Because modules do not get direct access to each other anymore, modules may have unfulfilled dependencies. To make sure
+that module dependencies are resolved at startup, the `Configurator.RequireServer` method should be added. The `ModuleManager`
+will make sure that all dependencies declared with `RequireServer` can be resolved before the app starts. An example
+module `foo` could declare it's dependency on `x/bank` like this:
+
+```go
+package foo
+
+func (am AppModule)
+
+RegisterServices(cfg Configurator) {
+ cfg.RequireServer((*bank.QueryServer)(nil))
+
+cfg.RequireServer((*bank.MsgServer)(nil))
+}
+```
+
+### Security Considerations
+
+In addition to checking for `ModuleKey` permissions, a few additional security precautions will need to be taken by
+the underlying router infrastructure.
+
+#### Recursion and Re-entry
+
+Recursive or re-entrant method invocations pose a potential security threat. This can be a problem if Module A
+calls Module B and Module B calls module A again in the same call.
+
+One basic way for the router system to deal with this is to maintain a call stack which prevents a module from
+being referenced more than once in the call stack so that there is no re-entry. A `map[string]interface{}` table
+in the router could be used to perform this security check.
+
+#### Queries
+
+Queries in Cosmos SDK are generally un-permissioned so allowing one module to query another module should not pose
+any major security threats assuming basic precautions are taken. The basic precaution that the router system will
+need to take is making sure that the `sdk.Context` passed to query methods does not allow writing to the store. This
+can be done for now with a `CacheMultiStore` as is currently done for `BaseApp` queries.
+
+### Internal Methods
+
+In many cases, we may wish for modules to call methods on other modules which are not exposed to clients at all. For this
+purpose, we add the `InternalServer` method to `Configurator`:
+
+```go
+type Configurator interface {
+ MsgServer()
+
+grpc.Server
+ QueryServer()
+
+grpc.Server
+ InternalServer()
+
+grpc.Server
+}
+```
+
+As an example, x/slashing's Slash must call x/staking's Slash, but we don't want to expose x/staking's Slash to end users
+and clients.
+
+Internal protobuf services will be defined in a corresponding `internal.proto` file in the given module's
+proto package.
+
+Services registered against `InternalServer` will be callable from other modules but not by external clients.
+
+An alternative solution to internal-only methods could involve hooks / plugins as discussed [here](https://github.com/cosmos/cosmos-sdk/pull/7459#issuecomment-733807753).
+A more detailed evaluation of a hooks / plugin system will be addressed later in follow-ups to this ADR or as a separate
+ADR.
+
+### Authorization
+
+By default, the inter-module router requires that messages are sent by the first signer returned by `GetSigners`. The
+inter-module router should also accept authorization middleware such as that provided by [ADR 030](https://github.com/cosmos/cosmos-sdk/blob/main/docs/architecture/adr-030-authz-module.md).
+This middleware will allow accounts to otherwise specific module accounts to perform actions on their behalf.
+Authorization middleware should take into account the need to grant certain modules effectively "admin" privileges to
+other modules. This will be addressed in separate ADRs or updates to this ADR.
+
+### Future Work
+
+Other future improvements may include:
+
+* custom code generation that:
+ * simplifies interfaces (ex. generates code with `sdk.Context` instead of `context.Context`)
+ * optimizes inter-module calls - for instance caching resolved methods after first invocation
+* combining `StoreKey`s and `ModuleKey`s into a single interface so that modules have a single OCAPs handle
+* code generation which makes inter-module communication more performant
+* decoupling `ModuleKey` creation from `AppModuleBasic.Name()` so that app's can override root module account names
+* inter-module hooks and plugins
+
+## Alternatives
+
+### MsgServices vs `x/capability`
+
+The `x/capability` module does provide a proper object-capability implementation that can be used by any module in the
+Cosmos SDK and could even be used for inter-module OCAPs as described in [#5931](https://github.com/cosmos/cosmos-sdk/issues/5931).
+
+The advantages of the approach described in this ADR are mostly around how it integrates with other parts of the Cosmos SDK,
+specifically:
+
+* protobuf so that:
+ * code generation of interfaces can be leveraged for a better dev UX
+ * module interfaces are versioned and checked for breakage using [buf](https://docs.buf.build/breaking-overview)
+* sub-module accounts as per ADR 028
+* the general `Msg` passing paradigm and the way signers are specified by `GetSigners`
+
+Also, this is a complete replacement for keepers and could be applied to *all* inter-module communication whereas the
+`x/capability` approach in #5931 would need to be applied method by method.
+
+## Consequences
+
+### Backwards Compatibility
+
+This ADR is intended to provide a pathway to a scenario where there is greater long term compatibility between modules.
+In the short-term, this will likely result in breaking certain `Keeper` interfaces which are too permissive and/or
+replacing `Keeper` interfaces altogether.
+
+### Positive
+
+* an alternative to keepers which can more easily lead to stable inter-module interfaces
+* proper inter-module OCAPs
+* improved module developer DevX, as commented on by several particpants on
+ [Architecture Review Call, Dec 3](https://hackmd.io/E0wxxOvRQ5qVmTf6N_k84Q)
+* lays the groundwork for what can be a greatly simplified `app.go`
+* router can be setup to enforce atomic transactions for module-to-module calls
+
+### Negative
+
+* modules which adopt this will need significant refactoring
+
+### Neutral
+
+## Test Cases \[optional]
+
+## References
+
+* [ADR 021](/sdk/v0.50/build/architecture/adr-021-protobuf-query-encoding)
+* [ADR 031](/sdk/v0.50/build/architecture/adr-031-msg-service)
+* [ADR 028](/sdk/v0.50/build/architecture/adr-028-public-key-addresses)
+* [ADR 030 draft](https://github.com/cosmos/cosmos-sdk/pull/7105)
+* [Object-Capability Model](https://docs.network.com/main/core/ocap)
diff --git a/sdk/next/build/architecture/adr-034-account-rekeying.mdx b/sdk/next/build/architecture/adr-034-account-rekeying.mdx
new file mode 100644
index 000000000..61151d065
--- /dev/null
+++ b/sdk/next/build/architecture/adr-034-account-rekeying.mdx
@@ -0,0 +1,79 @@
+---
+title: 'ADR 034: Account Rekeying'
+description: '30-09-2020: Initial Draft'
+---
+
+## Changelog
+
+* 30-09-2020: Initial Draft
+
+## Status
+
+PROPOSED
+
+## Abstract
+
+Account rekeying is a process hat allows an account to replace its authentication pubkey with a new one.
+
+## Context
+
+Currently, in the Cosmos SDK, the address of an auth `BaseAccount` is based on the hash of the public key. Once an account is created, the public key for the account is set in stone, and cannot be changed. This can be a problem for users, as key rotation is a useful security practice, but is not possible currently. Furthermore, as multisigs are a type of pubkey, once a multisig for an account is set, it can not be updated. This is problematic, as multisigs are often used by organizations or companies, who may need to change their set of multisig signers for internal reasons.
+
+Transferring all the assets of an account to a new account with the updated pubkey is not sufficient, because some "engagements" of an account are not easily transferable. For example, in staking, to transfer bonded Atoms, an account would have to unbond all delegations and wait the three week unbonding period. Even more significantly, for validator operators, ownership over a validator is not transferrable at all, meaning that the operator key for a validator can never be updated, leading to poor operational security for validators.
+
+## Decision
+
+We propose the addition of a new feature to `x/auth` that allows accounts to update the public key associated with their account, while keeping the address the same.
+
+This is possible because the Cosmos SDK `BaseAccount` stores the public key for an account in state, instead of making the assumption that the public key is included in the transaction (whether explicitly or implicitly through the signature) as in other blockchains such as Bitcoin and Ethereum. Because the public key is stored on chain, it is okay for the public key to not hash to the address of an account, as the address is not pertinent to the signature checking process.
+
+To build this system, we design a new Msg type as follows:
+
+```protobuf
+service Msg {
+ rpc ChangePubKey(MsgChangePubKey) returns (MsgChangePubKeyResponse);
+}
+
+message MsgChangePubKey {
+ string address = 1;
+ google.protobuf.Any pub_key = 2;
+}
+
+message MsgChangePubKeyResponse {}
+```
+
+The MsgChangePubKey transaction needs to be signed by the existing pubkey in state.
+
+Once, approved, the handler for this message type, which takes in the AccountKeeper, will update the in-state pubkey for the account and replace it with the pubkey from the Msg.
+
+An account that has had its pubkey changed cannot be automatically pruned from state. This is because if pruned, the original pubkey of the account would be needed to recreate the same address, but the owner of the address may not have the original pubkey anymore. Currently, we do not automatically prune any accounts anyways, but we would like to keep this option open the road (this is the purpose of account numbers). To resolve this, we charge an additional gas fee for this operation to compensate for this this externality (this bound gas amount is configured as parameter `PubKeyChangeCost`). The bonus gas is charged inside the handler, using the `ConsumeGas` function. Furthermore, in the future, we can allow accounts that have rekeyed manually prune themselves using a new Msg type such as `MsgDeleteAccount`. Manually pruning accounts can give a gas refund as an incentive for performing the action.
+
+```go
+amount := ak.GetParams(ctx).PubKeyChangeCost
+ ctx.GasMeter().ConsumeGas(amount, "pubkey change fee")
+```
+
+Every time a key for an address is changed, we will store a log of this change in the state of the chain, thus creating a stack of all previous keys for an address and the time intervals for which they were active. This allows dapps and clients to easily query past keys for an account which may be useful for features such as verifying timestamped off-chain signed messages.
+
+## Consequences
+
+### Positive
+
+* Will allow users and validator operators to employ better operational security practices with key rotation.
+* Will allow organizations or groups to easily change and add/remove multisig signers.
+
+### Negative
+
+Breaks the current assumed relationship between address and pubkeys as H(pubkey) = address. This has a couple of consequences.
+
+* This makes wallets that support this feature more complicated. For example, if an address on chain was updated, the corresponding key in the CLI wallet also needs to be updated.
+* Cannot automatically prune accounts with 0 balance that have had their pubkey changed.
+
+### Neutral
+
+* While the purpose of this is intended to allow the owner of an account to update to a new pubkey they own, this could technically also be used to transfer ownership of an account to a new owner. For example, this could be use used to sell a staked position without unbonding or an account that has vesting tokens. However, the friction of this is very high as this would essentially have to be done as a very specific OTC trade. Furthermore, additional constraints could be added to prevent accouns with Vesting tokens to use this feature.
+* Will require that PubKeys for an account are included in the genesis exports.
+
+## References
+
+* [Link](https://www.algorand.com/resources/blog/announcing-rekeying)
diff --git a/sdk/next/build/architecture/adr-035-rosetta-api-support.mdx b/sdk/next/build/architecture/adr-035-rosetta-api-support.mdx
new file mode 100644
index 000000000..8ea06bf2d
--- /dev/null
+++ b/sdk/next/build/architecture/adr-035-rosetta-api-support.mdx
@@ -0,0 +1,227 @@
+---
+title: 'ADR 035: Rosetta API Support'
+description: >-
+ Jonathan Gimeno (@jgimeno) David Grierson (@senormonito) Alessio Treglia
+ (@alessio) Frojdy Dymylja (@fdymylja)
+---
+
+## Authors
+
+* Jonathan Gimeno (@jgimeno)
+* David Grierson (@senormonito)
+* Alessio Treglia (@alessio)
+* Frojdy Dymylja (@fdymylja)
+
+## Changelog
+
+* 2021-05-12: the external library [cosmos-rosetta-gateway](https://github.com/tendermint/cosmos-rosetta-gateway) has been moved within the Cosmos SDK.
+
+## Context
+
+[Rosetta API](https://www.rosetta-api.org/) is an open-source specification and set of tools developed by Coinbase to
+standardise blockchain interactions.
+
+Through the use of a standard API for integrating blockchain applications it will
+
+* Be easier for a user to interact with a given blockchain
+* Allow exchanges to integrate new blockchains quickly and easily
+* Enable application developers to build cross-blockchain applications such as block explorers, wallets and dApps at
+ considerably lower cost and effort.
+
+## Decision
+
+It is clear that adding Rosetta API support to the Cosmos SDK will bring value to all the developers and
+Cosmos SDK based chains in the ecosystem. How it is implemented is key.
+
+The driving principles of the proposed design are:
+
+1. **Extensibility:** it must be as riskless and painless as possible for application developers to set-up network
+ configurations to expose Rosetta API-compliant services.
+2. **Long term support:** This proposal aims to provide support for all the supported Cosmos SDK release series.
+3. **Cost-efficiency:** Backporting changes to Rosetta API specifications from `master` to the various stable
+ branches of Cosmos SDK is a cost that needs to be reduced.
+
+We will achieve these delivering on these principles by the following:
+
+1. There will be a package `rosetta/lib`
+ for the implementation of the core Rosetta API features, particularly:
+ a. The types and interfaces (`Client`, `OfflineClient`...), this separates design from implementation detail.
+ b. The `Server` functionality as this is independent of the Cosmos SDK version.
+ c. The `Online/OfflineNetwork`, which is not exported, and implements the rosetta API using the `Client` interface to query the node, build tx and so on.
+ d. The `errors` package to extend rosetta errors.
+2. Due to differences between the Cosmos release series, each series will have its own specific implementation of `Client` interface.
+3. There will be two options for starting an API service in applications:
+ a. API shares the application process
+ b. API-specific process.
+
+## Architecture
+
+### The External Repo
+
+As section will describe the proposed external library, including the service implementation, plus the defined types and interfaces.
+
+#### Server
+
+`Server` is a simple `struct` that is started and listens to the port specified in the settings. This is meant to be used across all the Cosmos SDK versions that are actively supported.
+
+The constructor follows:
+
+`func NewServer(settings Settings) (Server, error)`
+
+`Settings`, which are used to construct a new server, are the following:
+
+```go expandable
+// Settings define the rosetta server settings
+type Settings struct {
+ // Network contains the information regarding the network
+ Network *types.NetworkIdentifier
+ // Client is the online API handler
+ Client crgtypes.Client
+ // Listen is the address the handler will listen at
+ Listen string
+ // Offline defines if the rosetta service should be exposed in offline mode
+ Offline bool
+ // Retries is the number of readiness checks that will be attempted when instantiating the handler
+ // valid only for online API
+ Retries int
+ // RetryWait is the time that will be waited between retries
+ RetryWait time.Duration
+}
+```
+
+#### Types
+
+Package types uses a mixture of rosetta types and custom defined type wrappers, that the client must parse and return while executing operations.
+
+##### Interfaces
+
+Every SDK version uses a different format to connect (rpc, gRPC, etc), query and build transactions, we have abstracted this in what is the `Client` interface.
+The client uses rosetta types, while the `Online/OfflineNetwork` takes care of returning correctly parsed rosetta responses and errors.
+
+Each Cosmos SDK release series will have their own `Client` implementations.
+Developers can implement their own custom `Client`s as required.
+
+```go expandable
+// Client defines the API the client implementation should provide.
+type Client interface {
+ // Needed if the client needs to perform some action before connecting.
+ Bootstrap()
+
+error
+ // Ready checks if the servicer constraints for queries are satisfied
+ // for example the node might still not be ready, it's useful in process
+ // when the rosetta instance might come up before the node itself
+ // the servicer must return nil if the node is ready
+ Ready()
+
+error
+
+ // Data API
+
+ // Balances fetches the balance of the given address
+ // if height is not nil, then the balance will be displayed
+ // at the provided height, otherwise last block balance will be returned
+ Balances(ctx context.Context, addr string, height *int64) ([]*types.Amount, error)
+ // BlockByHashAlt gets a block and its transaction at the provided height
+ BlockByHash(ctx context.Context, hash string) (BlockResponse, error)
+ // BlockByHeightAlt gets a block given its height, if height is nil then last block is returned
+ BlockByHeight(ctx context.Context, height *int64) (BlockResponse, error)
+ // BlockTransactionsByHash gets the block, parent block and transactions
+ // given the block hash.
+ BlockTransactionsByHash(ctx context.Context, hash string) (BlockTransactionsResponse, error)
+ // BlockTransactionsByHash gets the block, parent block and transactions
+ // given the block hash.
+ BlockTransactionsByHeight(ctx context.Context, height *int64) (BlockTransactionsResponse, error)
+ // GetTx gets a transaction given its hash
+ GetTx(ctx context.Context, hash string) (*types.Transaction, error)
+ // GetUnconfirmedTx gets an unconfirmed Tx given its hash
+ // NOTE(fdymylja): NOT IMPLEMENTED YET!
+ GetUnconfirmedTx(ctx context.Context, hash string) (*types.Transaction, error)
+ // Mempool returns the list of the current non confirmed transactions
+ Mempool(ctx context.Context) ([]*types.TransactionIdentifier, error)
+ // Peers gets the peers currently connected to the node
+ Peers(ctx context.Context) ([]*types.Peer, error)
+ // Status returns the node status, such as sync data, version etc
+ Status(ctx context.Context) (*types.SyncStatus, error)
+
+ // Construction API
+
+ // PostTx posts txBytes to the node and returns the transaction identifier plus metadata related
+ // to the transaction itself.
+ PostTx(txBytes []byte) (res *types.TransactionIdentifier, meta map[string]interface{
+}, err error)
+ // ConstructionMetadataFromOptions
+ ConstructionMetadataFromOptions(ctx context.Context, options map[string]interface{
+}) (meta map[string]interface{
+}, err error)
+
+OfflineClient
+}
+
+// OfflineClient defines the functionalities supported without having access to the node
+type OfflineClient interface {
+ NetworkInformationProvider
+ // SignedTx returns the signed transaction given the tx bytes (msgs)
+
+plus the signatures
+ SignedTx(ctx context.Context, txBytes []byte, sigs []*types.Signature) (signedTxBytes []byte, err error)
+ // TxOperationsAndSignersAccountIdentifiers returns the operations related to a transaction and the account
+ // identifiers if the transaction is signed
+ TxOperationsAndSignersAccountIdentifiers(signed bool, hexBytes []byte) (ops []*types.Operation, signers []*types.AccountIdentifier, err error)
+ // ConstructionPayload returns the construction payload given the request
+ ConstructionPayload(ctx context.Context, req *types.ConstructionPayloadsRequest) (resp *types.ConstructionPayloadsResponse, err error)
+ // PreprocessOperationsToOptions returns the options given the preprocess operations
+ PreprocessOperationsToOptions(ctx context.Context, req *types.ConstructionPreprocessRequest) (options map[string]interface{
+}, err error)
+ // AccountIdentifierFromPublicKey returns the account identifier given the public key
+ AccountIdentifierFromPublicKey(pubKey *types.PublicKey) (*types.AccountIdentifier, error)
+}
+```
+
+### 2. Cosmos SDK Implementation
+
+The Cosmos SDK implementation, based on version, takes care of satisfying the `Client` interface.
+In Stargate, Launchpad and 0.37, we have introduced the concept of rosetta.Msg, this message is not in the shared repository as the sdk.Msg type differs between Cosmos SDK versions.
+
+The rosetta.Msg interface follows:
+
+```go
+// Msg represents a cosmos-sdk message that can be converted from and to a rosetta operation.
+type Msg interface {
+ sdk.Msg
+ ToOperations(withStatus, hasError bool) []*types.Operation
+ FromOperations(ops []*types.Operation) (sdk.Msg, error)
+}
+```
+
+Hence developers who want to extend the rosetta set of supported operations just need to extend their module's sdk.Msgs with the `ToOperations` and `FromOperations` methods.
+
+### 3. API service invocation
+
+As stated at the start, application developers will have two methods for invocation of the Rosetta API service:
+
+1. Shared process for both application and API
+2. Standalone API service
+
+#### Shared Process (Only Stargate)
+
+Rosetta API service could run within the same execution process as the application. This would be enabled via app.toml settings, and if gRPC is not enabled the rosetta instance would be spinned in offline mode (tx building capabilities only).
+
+#### Separate API service
+
+Client application developers can write a new command to launch a Rosetta API server as a separate process too, using the rosetta command contained in the `/server/rosetta` package. Construction of the command depends on Cosmos SDK version. Examples can be found inside `simd` for stargate, and `contrib/rosetta/simapp` for other release series.
+
+## Status
+
+Proposed
+
+## Consequences
+
+### Positive
+
+* Out-of-the-box Rosetta API support within Cosmos SDK.
+* Blockchain interface standardisation
+
+## References
+
+* [Link](https://www.rosetta-api.org/)
diff --git a/sdk/next/build/architecture/adr-036-arbitrary-signature.mdx b/sdk/next/build/architecture/adr-036-arbitrary-signature.mdx
new file mode 100644
index 000000000..58a72c9bf
--- /dev/null
+++ b/sdk/next/build/architecture/adr-036-arbitrary-signature.mdx
@@ -0,0 +1,135 @@
+---
+title: 'ADR 036: Arbitrary Message Signature Specification'
+description: 28/10/2020 - Initial draft
+---
+
+## Changelog
+
+* 28/10/2020 - Initial draft
+
+## Authors
+
+* Antoine Herzog (@antoineherzog)
+* Zaki Manian (@zmanian)
+* Aleksandr Bezobchuk (alexanderbez) \[1]
+* Frojdi Dymylja (@fdymylja)
+
+## Status
+
+Draft
+
+## Abstract
+
+Currently, in the Cosmos SDK, there is no convention to sign arbitrary message like on Ethereum. We propose with this specification, for Cosmos SDK ecosystem, a way to sign and validate off-chain arbitrary messages.
+
+This specification serves the purpose of covering every use case, this means that cosmos-sdk applications developers decide how to serialize and represent `Data` to users.
+
+## Context
+
+Having the ability to sign messages off-chain has proven to be a fundamental aspect of nearly any blockchain. The notion of signing messages off-chain has many added benefits such as saving on computational costs and reducing transaction throughput and overhead. Within the context of the Cosmos, some of the major applications of signing such data includes, but is not limited to, providing a cryptographic secure and verifiable means of proving validator identity and possibly associating it with some other framework or organization. In addition, having the ability to sign Cosmos messages with a Ledger or similar HSM device.
+
+Further context and use cases can be found in the references links.
+
+## Decision
+
+The aim is being able to sign arbitrary messages, even using Ledger or similar HSM devices.
+
+As a result signed messages should look roughly like Cosmos SDK messages but **must not** be a valid on-chain transaction. `chain-id`, `account_number` and `sequence` can all be assigned invalid values.
+
+Cosmos SDK 0.40 also introduces a concept of “auth\_info” this can specify SIGN\_MODES.
+
+A spec should include an `auth_info` that supports SIGN\_MODE\_DIRECT and SIGN\_MODE\_LEGACY\_AMINO.
+
+Create the `offchain` proto definitions, we extend the auth module with `offchain` package to offer functionalities to verify and sign offline messages.
+
+An offchain transaction follows these rules:
+
+* the memo must be empty
+* nonce, sequence number must be equal to 0
+* chain-id must be equal to “”
+* fee gas must be equal to 0
+* fee amount must be an empty array
+
+Verification of an offchain transaction follows the same rules as an onchain one, except for the spec differences highlighted above.
+
+The first message added to the `offchain` package is `MsgSignData`.
+
+`MsgSignData` allows developers to sign arbitrary bytes valid offchain only. Where `Signer` is the account address of the signer. `Data` is arbitrary bytes which can represent `text`, `files`, `object`s. It's applications developers decision how `Data` should be deserialized, serialized and the object it can represent in their context.
+
+It's applications developers decision how `Data` should be treated, by treated we mean the serialization and deserialization process and the Object `Data` should represent.
+
+Proto definition:
+
+```protobuf
+// MsgSignData defines an arbitrary, general-purpose, off-chain message
+message MsgSignData {
+ // Signer is the sdk.AccAddress of the message signer
+ bytes Signer = 1 [(gogoproto.jsontag) = "signer", (gogoproto.casttype) = "github.com/cosmos/cosmos-sdk/types.AccAddress"];
+ // Data represents the raw bytes of the content that is signed (text, json, etc)
+ bytes Data = 2 [(gogoproto.jsontag) = "data"];
+}
+```
+
+Signed MsgSignData json example:
+
+```json expandable
+{
+ "type": "cosmos-sdk/StdTx",
+ "value": {
+ "msg": [
+ {
+ "type": "sign/MsgSignData",
+ "value": {
+ "signer": "cosmos1hftz5ugqmpg9243xeegsqqav62f8hnywsjr4xr",
+ "data": "cmFuZG9t"
+ }
+ }
+ ],
+ "fee": {
+ "amount": [],
+ "gas": "0"
+ },
+ "signatures": [
+ {
+ "pub_key": {
+ "type": "tendermint/PubKeySecp256k1",
+ "value": "AqnDSiRoFmTPfq97xxEb2VkQ/Hm28cPsqsZm9jEVsYK9"
+ },
+ "signature": "8y8i34qJakkjse9pOD2De+dnlc4KvFgh0wQpes4eydN66D9kv7cmCEouRrkka9tlW9cAkIL52ErB+6ye7X5aEg=="
+ }
+ ],
+ "memo": ""
+ }
+}
+```
+
+## Consequences
+
+There is a specification on how messages, that are not meant to be broadcast to a live chain, should be formed.
+
+### Backwards Compatibility
+
+Backwards compatibility is maintained as this is a new message spec definition.
+
+### Positive
+
+* A common format that can be used by multiple applications to sign and verify off-chain messages.
+* The specification is primitive which means it can cover every use case without limiting what is possible to fit inside it.
+* It gives room for other off-chain messages specifications that aim to target more specific and common use cases such as off-chain-based authN/authZ layers \[2].
+
+### Negative
+
+* Current proposal requires a fixed relationship between an account address and a public key.
+* Doesn't work with multisig accounts.
+
+## Further discussion
+
+* Regarding security in `MsgSignData`, the developer using `MsgSignData` is in charge of making the content laying in `Data` non-replayable when, and if, needed.
+* the offchain package will be further extended with extra messages that target specific use cases such as, but not limited to, authentication in applications, payment channels, L2 solutions in general.
+
+## References
+
+1. [Link](https://github.com/cosmos/ics/pull/33)
+2. [Link](https://github.com/cosmos/cosmos-sdk/pull/7727#discussion_r515668204)
+3. [Link](https://github.com/cosmos/cosmos-sdk/pull/7727#issuecomment-722478477)
+4. [Link](https://github.com/cosmos/cosmos-sdk/pull/7727#issuecomment-721062923)
diff --git a/sdk/next/build/architecture/adr-037-gov-split-vote.mdx b/sdk/next/build/architecture/adr-037-gov-split-vote.mdx
new file mode 100644
index 000000000..88cac8578
--- /dev/null
+++ b/sdk/next/build/architecture/adr-037-gov-split-vote.mdx
@@ -0,0 +1,114 @@
+---
+title: 'ADR 037: Governance split votes'
+description: '2020/10/28: Intial draft'
+---
+
+## Changelog
+
+* 2020/10/28: Intial draft
+
+## Status
+
+Accepted
+
+## Abstract
+
+This ADR defines a modification to the governance module that would allow a staker to split their votes into several voting options. For example, it could use 70% of its voting power to vote Yes and 30% of its voting power to vote No.
+
+## Context
+
+Currently, an address can cast a vote with only one options (Yes/No/Abstain/NoWithVeto) and use their full voting power behind that choice.
+
+However, often times the entity owning that address might not be a single individual. For example, a company might have different stakeholders who want to vote differently, and so it makes sense to allow them to split their voting power. Another example use case is exchanges. Many centralized exchanges often stake a portion of their users' tokens in their custody. Currently, it is not possible for them to do "passthrough voting" and giving their users voting rights over their tokens. However, with this system, exchanges can poll their users for voting preferences, and then vote on-chain proportionally to the results of the poll.
+
+## Decision
+
+We modify the vote structs to be
+
+```go
+type WeightedVoteOption struct {
+ Option string
+ Weight sdk.Dec
+}
+
+type Vote struct {
+ ProposalID int64
+ Voter sdk.Address
+ Options []WeightedVoteOption
+}
+```
+
+And for backwards compatibility, we introduce `MsgVoteWeighted` while keeping `MsgVote`.
+
+```go expandable
+type MsgVote struct {
+ ProposalID int64
+ Voter sdk.Address
+ Option Option
+}
+
+type MsgVoteWeighted struct {
+ ProposalID int64
+ Voter sdk.Address
+ Options []WeightedVoteOption
+}
+```
+
+The `ValidateBasic` of a `MsgVoteWeighted` struct would require that
+
+1. The sum of all the Rates is equal to 1.0
+2. No Option is repeated
+
+The governance tally function will iterate over all the options in a vote and add to the tally the result of the voter's voting power \* the rate for that option.
+
+```go
+tally() {
+ results := map[types.VoteOption]sdk.Dec
+ for _, vote := range votes {
+ for i, weightedOption := range vote.Options {
+ results[weightedOption.Option] += getVotingPower(vote.voter) * weightedOption.Weight
+}
+
+}
+}
+```
+
+The CLI command for creating a multi-option vote would be as such:
+
+```shell
+simd tx gov vote 1 "yes=0.6,no=0.3,abstain=0.05,no_with_veto=0.05" --from mykey
+```
+
+To create a single-option vote a user can do either
+
+```shell
+simd tx gov vote 1 "yes=1" --from mykey
+```
+
+or
+
+```shell
+simd tx gov vote 1 yes --from mykey
+```
+
+to maintain backwards compatibility.
+
+## Consequences
+
+### Backwards Compatibility
+
+* Previous VoteMsg types will remain the same and so clients will not have to update their procedure unless they want to support the WeightedVoteMsg feature.
+* When querying a Vote struct from state, its structure will be different, and so clients wanting to display all voters and their respective votes will have to handle the new format and the fact that a single voter can have split votes.
+* The result of querying the tally function should have the same API for clients.
+
+### Positive
+
+* Can make the voting process more accurate for addresses representing multiple stakeholders, often some of the largest addresses.
+
+### Negative
+
+* Is more complex than simple voting, and so may be harder to explain to users. However, this is mostly mitigated because the feature is opt-in.
+
+### Neutral
+
+* Relatively minor change to governance tally function.
diff --git a/sdk/next/build/architecture/adr-038-state-listening.mdx b/sdk/next/build/architecture/adr-038-state-listening.mdx
new file mode 100644
index 000000000..eb1e42b0e
--- /dev/null
+++ b/sdk/next/build/architecture/adr-038-state-listening.mdx
@@ -0,0 +1,858 @@
+---
+title: 'ADR 038: KVStore state listening'
+---
+
+## Changelog
+
+* 11/23/2020: Initial draft
+* 10/06/2022: Introduce plugin system based on hashicorp/go-plugin
+* 10/14/2022:
+ * Add `ListenCommit`, flatten the state writes in a block to a single batch.
+ * Remove listeners from cache stores, should only listen to `rootmulti.Store`.
+ * Remove `HaltAppOnDeliveryError()`, the errors are propagated by default, the implementations should return nil if don't want to propogate errors.
+* 26/05/2023: Update with ABCI 2.0
+
+## Status
+
+Proposed
+
+## Abstract
+
+This ADR defines a set of changes to enable listening to state changes of individual KVStores and exposing these data to consumers.
+
+## Context
+
+Currently, KVStore data can be remotely accessed through [Queries](https://github.com/cosmos/cosmos-sdk/blob/master/docs/building-modules/messages-and-queries.md#queries)
+which proceed either through Tendermint and the ABCI, or through the gRPC server.
+In addition to these request/response queries, it would be beneficial to have a means of listening to state changes as they occur in real time.
+
+## Decision
+
+We will modify the `CommitMultiStore` interface and its concrete (`rootmulti`) implementations and introduce a new `listenkv.Store` to allow listening to state changes in underlying KVStores. We don't need to listen to cache stores, because we can't be sure that the writes will be committed eventually, and the writes are duplicated in `rootmulti.Store` eventually, so we should only listen to `rootmulti.Store`.
+We will introduce a plugin system for configuring and running streaming services that write these state changes and their surrounding ABCI message context to different destinations.
+
+### Listening
+
+In a new file, `store/types/listening.go`, we will create a `MemoryListener` struct for streaming out protobuf encoded KV pairs state changes from a KVStore.
+The `MemoryListener` will be used internally by the concrete `rootmulti` implementation to collect state changes from KVStores.
+
+```go expandable
+// MemoryListener listens to the state writes and accumulate the records in memory.
+type MemoryListener struct {
+ stateCache []StoreKVPair
+}
+
+// NewMemoryListener creates a listener that accumulate the state writes in memory.
+func NewMemoryListener() *MemoryListener {
+ return &MemoryListener{
+}
+}
+
+// OnWrite writes state change events to the internal cache
+func (fl *MemoryListener)
+
+OnWrite(storeKey StoreKey, key []byte, value []byte, delete bool) {
+ fl.stateCache = append(fl.stateCache, StoreKVPair{
+ StoreKey: storeKey.Name(),
+ Delete: delete,
+ Key: key,
+ Value: value,
+})
+}
+
+// PopStateCache returns the current state caches and set to nil
+func (fl *MemoryListener)
+
+PopStateCache() []StoreKVPair {
+ res := fl.stateCache
+ fl.stateCache = nil
+ return res
+}
+```
+
+We will also define a protobuf type for the KV pairs. In addition to the key and value fields this message
+will include the StoreKey for the originating KVStore so that we can collect information from separate KVStores and determine the source of each KV pair.
+
+```protobuf
+message StoreKVPair {
+ optional string store_key = 1; // the store key for the KVStore this pair originates from
+ required bool set = 2; // true indicates a set operation, false indicates a delete operation
+ required bytes key = 3;
+ required bytes value = 4;
+}
+```
+
+### ListenKVStore
+
+We will create a new `Store` type `listenkv.Store` that the `rootmulti` store will use to wrap a `KVStore` to enable state listening.
+We will configure the `Store` with a `MemoryListener` which will collect state changes for output to specific destinations.
+
+```go expandable
+// Store implements the KVStore interface with listening enabled.
+// Operations are traced on each core KVStore call and written to any of the
+// underlying listeners with the proper key and operation permissions
+type Store struct {
+ parent types.KVStore
+ listener *types.MemoryListener
+ parentStoreKey types.StoreKey
+}
+
+// NewStore returns a reference to a new traceKVStore given a parent
+// KVStore implementation and a buffered writer.
+func NewStore(parent types.KVStore, psk types.StoreKey, listener *types.MemoryListener) *Store {
+ return &Store{
+ parent: parent, listener: listener, parentStoreKey: psk
+}
+}
+
+// Set implements the KVStore interface. It traces a write operation and
+// delegates the Set call to the parent KVStore.
+func (s *Store)
+
+Set(key []byte, value []byte) {
+ types.AssertValidKey(key)
+
+s.parent.Set(key, value)
+
+s.listener.OnWrite(s.parentStoreKey, key, value, false)
+}
+
+// Delete implements the KVStore interface. It traces a write operation and
+// delegates the Delete call to the parent KVStore.
+func (s *Store)
+
+Delete(key []byte) {
+ s.parent.Delete(key)
+
+s.listener.OnWrite(s.parentStoreKey, key, nil, true)
+}
+```
+
+### MultiStore interface updates
+
+We will update the `CommitMultiStore` interface to allow us to wrap a `Memorylistener` to a specific `KVStore`.
+Note that the `MemoryListener` will be attached internally by the concrete `rootmulti` implementation.
+
+```go
+type CommitMultiStore interface {
+ ...
+
+ // AddListeners adds a listener for the KVStore belonging to the provided StoreKey
+ AddListeners(keys []StoreKey)
+
+ // PopStateCache returns the accumulated state change messages from MemoryListener
+ PopStateCache() []StoreKVPair
+}
+```
+
+### MultiStore implementation updates
+
+We will adjust the `rootmulti` `GetKVStore` method to wrap the returned `KVStore` with a `listenkv.Store` if listening is turned on for that `Store`.
+
+```go expandable
+func (rs *Store)
+
+GetKVStore(key types.StoreKey)
+
+types.KVStore {
+ store := rs.stores[key].(types.KVStore)
+ if rs.TracingEnabled() {
+ store = tracekv.NewStore(store, rs.traceWriter, rs.traceContext)
+}
+ if rs.ListeningEnabled(key) {
+ store = listenkv.NewStore(store, key, rs.listeners[key])
+}
+
+return store
+}
+```
+
+We will implement `AddListeners` to manage KVStore listeners internally and implement `PopStateCache`
+for a means of retrieving the current state.
+
+```go
+// AddListeners adds state change listener for a specific KVStore
+func (rs *Store)
+
+AddListeners(keys []types.StoreKey) {
+ listener := types.NewMemoryListener()
+ for i := range keys {
+ rs.listeners[keys[i]] = listener
+}
+}
+```
+
+```go
+func (rs *Store)
+
+PopStateCache() []types.StoreKVPair {
+ var cache []types.StoreKVPair
+ for _, ls := range rs.listeners {
+ cache = append(cache, ls.PopStateCache()...)
+}
+
+sort.SliceStable(cache, func(i, j int)
+
+bool {
+ return cache[i].StoreKey < cache[j].StoreKey
+})
+
+return cache
+}
+```
+
+We will also adjust the `rootmulti` `CacheMultiStore` and `CacheMultiStoreWithVersion` methods to enable listening in
+the cache layer.
+
+```go expandable
+func (rs *Store)
+
+CacheMultiStore()
+
+types.CacheMultiStore {
+ stores := make(map[types.StoreKey]types.CacheWrapper)
+ for k, v := range rs.stores {
+ store := v.(types.KVStore)
+ // Wire the listenkv.Store to allow listeners to observe the writes from the cache store,
+ // set same listeners on cache store will observe duplicated writes.
+ if rs.ListeningEnabled(k) {
+ store = listenkv.NewStore(store, k, rs.listeners[k])
+}
+
+stores[k] = store
+}
+
+return cachemulti.NewStore(rs.db, stores, rs.keysByName, rs.traceWriter, rs.getTracingContext())
+}
+```
+
+```go expandable
+func (rs *Store)
+
+CacheMultiStoreWithVersion(version int64) (types.CacheMultiStore, error) {
+ // ...
+
+ // Wire the listenkv.Store to allow listeners to observe the writes from the cache store,
+ // set same listeners on cache store will observe duplicated writes.
+ if rs.ListeningEnabled(key) {
+ cacheStore = listenkv.NewStore(cacheStore, key, rs.listeners[key])
+}
+
+cachedStores[key] = cacheStore
+}
+
+return cachemulti.NewStore(rs.db, cachedStores, rs.keysByName, rs.traceWriter, rs.getTracingContext()), nil
+}
+```
+
+### Exposing the data
+
+#### Streaming Service
+
+We will introduce a new `ABCIListener` interface that plugs into the BaseApp and relays ABCI requests and responses
+so that the service can group the state changes with the ABCI requests.
+
+```go
+// baseapp/streaming.go
+
+// ABCIListener is the interface that we're exposing as a streaming service.
+type ABCIListener interface {
+ // ListenFinalizeBlock updates the streaming service with the latest FinalizeBlock messages
+ ListenFinalizeBlock(ctx context.Context, req abci.RequestFinalizeBlock, res abci.ResponseFinalizeBlock)
+
+error
+ // ListenCommit updates the steaming service with the latest Commit messages and state changes
+ ListenCommit(ctx context.Context, res abci.ResponseCommit, changeSet []*StoreKVPair)
+
+error
+}
+```
+
+#### BaseApp Registration
+
+We will add a new method to the `BaseApp` to enable the registration of `StreamingService`s:
+
+```go
+// SetStreamingService is used to set a streaming service into the BaseApp hooks and load the listeners into the multistore
+func (app *BaseApp)
+
+SetStreamingService(s ABCIListener) {
+ // register the StreamingService within the BaseApp
+ // BaseApp will pass BeginBlock, DeliverTx, and EndBlock requests and responses to the streaming services to update their ABCI context
+ app.abciListeners = append(app.abciListeners, s)
+}
+```
+
+We will add two new fields to the `BaseApp` struct:
+
+```go expandable
+type BaseApp struct {
+
+ ...
+
+ // abciListenersAsync for determining if abciListeners will run asynchronously.
+ // When abciListenersAsync=false and stopNodeOnABCIListenerErr=false listeners will run synchronized but will not stop the node.
+ // When abciListenersAsync=true stopNodeOnABCIListenerErr will be ignored.
+ abciListenersAsync bool
+
+ // stopNodeOnABCIListenerErr halts the node when ABCI streaming service listening results in an error.
+ // stopNodeOnABCIListenerErr=true must be paired with abciListenersAsync=false.
+ stopNodeOnABCIListenerErr bool
+}
+```
+
+#### ABCI Event Hooks
+
+We will modify the `FinalizeBlock` and `Commit` methods to pass ABCI requests and responses
+to any streaming service hooks registered with the `BaseApp`.
+
+```go expandable
+func (app *BaseApp)
+
+FinalizeBlock(req abci.RequestFinalizeBlock)
+
+abci.ResponseFinalizeBlock {
+ var abciRes abci.ResponseFinalizeBlock
+ defer func() {
+ // call the streaming service hook with the FinalizeBlock messages
+ for _, abciListener := range app.abciListeners {
+ ctx := app.finalizeState.ctx
+ blockHeight := ctx.BlockHeight()
+ if app.abciListenersAsync {
+ go func(req abci.RequestFinalizeBlock, res abci.ResponseFinalizeBlock) {
+ if err := app.abciListener.FinalizeBlock(blockHeight, req, res); err != nil {
+ app.logger.Error("FinalizeBlock listening hook failed", "height", blockHeight, "err", err)
+}
+
+}(req, abciRes)
+}
+
+else {
+ if err := app.abciListener.ListenFinalizeBlock(blockHeight, req, res); err != nil {
+ app.logger.Error("FinalizeBlock listening hook failed", "height", blockHeight, "err", err)
+ if app.stopNodeOnABCIListenerErr {
+ os.Exit(1)
+}
+
+}
+
+}
+
+}
+
+}()
+
+ ...
+
+ return abciRes
+}
+```
+
+```go expandable
+func (app *BaseApp)
+
+Commit()
+
+abci.ResponseCommit {
+
+ ...
+ res := abci.ResponseCommit{
+ Data: commitID.Hash,
+ RetainHeight: retainHeight,
+}
+
+ // call the streaming service hook with the Commit messages
+ for _, abciListener := range app.abciListeners {
+ ctx := app.deliverState.ctx
+ blockHeight := ctx.BlockHeight()
+ changeSet := app.cms.PopStateCache()
+ if app.abciListenersAsync {
+ go func(res abci.ResponseCommit, changeSet []store.StoreKVPair) {
+ if err := app.abciListener.ListenCommit(ctx, res, changeSet); err != nil {
+ app.logger.Error("ListenCommit listening hook failed", "height", blockHeight, "err", err)
+}
+
+}(res, changeSet)
+}
+
+else {
+ if err := app.abciListener.ListenCommit(ctx, res, changeSet); err != nil {
+ app.logger.Error("ListenCommit listening hook failed", "height", blockHeight, "err", err)
+ if app.stopNodeOnABCIListenerErr {
+ os.Exit(1)
+}
+
+}
+
+}
+
+}
+
+ ...
+
+ return res
+}
+```
+
+#### Go Plugin System
+
+We propose a plugin architecture to load and run `Streaming` plugins and other types of implementations. We will introduce a plugin
+system over gRPC that is used to load and run Cosmos-SDK plugins. The plugin system uses [hashicorp/go-plugin](https://github.com/hashicorp/go-plugin).
+Each plugin must have a struct that implements the `plugin.Plugin` interface and an `Impl` interface for processing messages over gRPC.
+Each plugin must also have a message protocol defined for the gRPC service:
+
+```go expandable
+// streaming/plugins/abci/{
+ plugin_version
+}/interface.go
+
+// Handshake is a common handshake that is shared by streaming and host.
+// This prevents users from executing bad plugins or executing a plugin
+// directory. It is a UX feature, not a security feature.
+var Handshake = plugin.HandshakeConfig{
+ ProtocolVersion: 1,
+ MagicCookieKey: "ABCI_LISTENER_PLUGIN",
+ MagicCookieValue: "ef78114d-7bdf-411c-868f-347c99a78345",
+}
+
+// ListenerPlugin is the base struc for all kinds of go-plugin implementations
+// It will be included in interfaces of different Plugins
+type ABCIListenerPlugin struct {
+ // GRPCPlugin must still implement the Plugin interface
+ plugin.Plugin
+ // Concrete implementation, written in Go. This is only used for plugins
+ // that are written in Go.
+ Impl baseapp.ABCIListener
+}
+
+func (p *ListenerGRPCPlugin)
+
+GRPCServer(_ *plugin.GRPCBroker, s *grpc.Server)
+
+error {
+ RegisterABCIListenerServiceServer(s, &GRPCServer{
+ Impl: p.Impl
+})
+
+return nil
+}
+
+func (p *ListenerGRPCPlugin)
+
+GRPCClient(
+ _ context.Context,
+ _ *plugin.GRPCBroker,
+ c *grpc.ClientConn,
+) (interface{
+}, error) {
+ return &GRPCClient{
+ client: NewABCIListenerServiceClient(c)
+}, nil
+}
+```
+
+The `plugin.Plugin` interface has two methods `Client` and `Server`. For our GRPC service these are `GRPCClient` and `GRPCServer`
+The `Impl` field holds the concrete implementation of our `baseapp.ABCIListener` interface written in Go.
+Note: this is only used for plugin implementations written in Go.
+
+The advantage of having such a plugin system is that within each plugin authors can define the message protocol in a way that fits their use case.
+For example, when state change listening is desired, the `ABCIListener` message protocol can be defined as below (*for illustrative purposes only*).
+When state change listening is not desired than `ListenCommit` can be omitted from the protocol.
+
+```protobuf expandable
+syntax = "proto3";
+
+...
+
+message Empty {}
+
+message ListenFinalizeBlockRequest {
+ RequestFinalizeBlock req = 1;
+ ResponseFinalizeBlock res = 2;
+}
+message ListenCommitRequest {
+ int64 block_height = 1;
+ ResponseCommit res = 2;
+ repeated StoreKVPair changeSet = 3;
+}
+
+// plugin that listens to state changes
+service ABCIListenerService {
+ rpc ListenFinalizeBlock(ListenFinalizeBlockRequest) returns (Empty);
+ rpc ListenCommit(ListenCommitRequest) returns (Empty);
+}
+```
+
+```protobuf
+...
+// plugin that doesn't listen to state changes
+service ABCIListenerService {
+ rpc ListenFinalizeBlock(ListenFinalizeBlockRequest) returns (Empty);
+ rpc ListenCommit(ListenCommitRequest) returns (Empty);
+}
+```
+
+Implementing the service above:
+
+```go expandable
+// streaming/plugins/abci/{
+ plugin_version
+}/grpc.go
+
+var (
+ _ baseapp.ABCIListener = (*GRPCClient)(nil)
+)
+
+// GRPCClient is an implementation of the ABCIListener and ABCIListenerPlugin interfaces that talks over RPC.
+type GRPCClient struct {
+ client ABCIListenerServiceClient
+}
+
+func (m *GRPCClient)
+
+ListenFinalizeBlock(goCtx context.Context, req abci.RequestFinalizeBlock, res abci.ResponseFinalizeBlock)
+
+error {
+ ctx := sdk.UnwrapSDKContext(goCtx)
+ _, err := m.client.ListenDeliverTx(ctx, &ListenDeliverTxRequest{
+ BlockHeight: ctx.BlockHeight(),
+ Req: req,
+ Res: res
+})
+
+return err
+}
+
+func (m *GRPCClient)
+
+ListenCommit(goCtx context.Context, res abci.ResponseCommit, changeSet []store.StoreKVPair)
+
+error {
+ ctx := sdk.UnwrapSDKContext(goCtx)
+ _, err := m.client.ListenCommit(ctx, &ListenCommitRequest{
+ BlockHeight: ctx.BlockHeight(),
+ Res: res,
+ ChangeSet: changeSet
+})
+
+return err
+}
+
+// GRPCServer is the gRPC server that GRPCClient talks to.
+type GRPCServer struct {
+ // This is the real implementation
+ Impl baseapp.ABCIListener
+}
+
+func (m *GRPCServer)
+
+ListenFinalizeBlock(ctx context.Context, req *ListenFinalizeBlockRequest) (*Empty, error) {
+ return &Empty{
+}, m.Impl.ListenFinalizeBlock(ctx, req.Req, req.Res)
+}
+
+func (m *GRPCServer)
+
+ListenCommit(ctx context.Context, req *ListenCommitRequest) (*Empty, error) {
+ return &Empty{
+}, m.Impl.ListenCommit(ctx, req.Res, req.ChangeSet)
+}
+```
+
+And the pre-compiled Go plugin `Impl`(*this is only used for plugins that are written in Go*):
+
+```go expandable
+// streaming/plugins/abci/{
+ plugin_version
+}/impl/plugin.go
+
+// Plugins are pre-compiled and loaded by the plugin system
+
+// ABCIListener is the implementation of the baseapp.ABCIListener interface
+type ABCIListener struct{
+}
+
+func (m *ABCIListenerPlugin)
+
+ListenFinalizeBlock(ctx context.Context, req abci.RequestFinalizeBlock, res abci.ResponseFinalizeBlock)
+
+error {
+ // send data to external system
+}
+
+func (m *ABCIListenerPlugin)
+
+ListenCommit(ctx context.Context, res abci.ResponseCommit, changeSet []store.StoreKVPair)
+
+error {
+ // send data to external system
+}
+
+func main() {
+ plugin.Serve(&plugin.ServeConfig{
+ HandshakeConfig: grpc_abci_v1.Handshake,
+ Plugins: map[string]plugin.Plugin{
+ "grpc_plugin_v1": &grpc_abci_v1.ABCIListenerGRPCPlugin{
+ Impl: &ABCIListenerPlugin{
+}},
+},
+
+ // A non-nil value here enables gRPC serving for this streaming...
+ GRPCServer: plugin.DefaultGRPCServer,
+})
+}
+```
+
+We will introduce a plugin loading system that will return `(interface{}, error)`.
+This provides the advantage of using versioned plugins where the plugin interface and gRPC protocol change over time.
+In addition, it allows for building independent plugin that can expose different parts of the system over gRPC.
+
+```go expandable
+func NewStreamingPlugin(name string, logLevel string) (interface{
+}, error) {
+ logger := hclog.New(&hclog.LoggerOptions{
+ Output: hclog.DefaultOutput,
+ Level: toHclogLevel(logLevel),
+ Name: fmt.Sprintf("plugin.%s", name),
+})
+
+ // We're a host. Start by launching the streaming process.
+ env := os.Getenv(GetPluginEnvKey(name))
+ client := plugin.NewClient(&plugin.ClientConfig{
+ HandshakeConfig: HandshakeMap[name],
+ Plugins: PluginMap,
+ Cmd: exec.Command("sh", "-c", env),
+ Logger: logger,
+ AllowedProtocols: []plugin.Protocol{
+ plugin.ProtocolNetRPC, plugin.ProtocolGRPC
+},
+})
+
+ // Connect via RPC
+ rpcClient, err := client.Client()
+ if err != nil {
+ return nil, err
+}
+
+ // Request streaming plugin
+ return rpcClient.Dispense(name)
+}
+```
+
+We propose a `RegisterStreamingPlugin` function for the App to register `NewStreamingPlugin`s with the App's BaseApp.
+Streaming plugins can be of `Any` type; therefore, the function takes in an interface vs a concrete type.
+For example, we could have plugins of `ABCIListener`, `WasmListener` or `IBCListener`. Note that `RegisterStreamingPluing` function
+is helper function and not a requirement. Plugin registration can easily be moved from the App to the BaseApp directly.
+
+```go expandable
+// baseapp/streaming.go
+
+// RegisterStreamingPlugin registers streaming plugins with the App.
+// This method returns an error if a plugin is not supported.
+func RegisterStreamingPlugin(
+ bApp *BaseApp,
+ appOpts servertypes.AppOptions,
+ keys map[string]*types.KVStoreKey,
+ streamingPlugin interface{
+},
+)
+
+error {
+ switch t := streamingPlugin.(type) {
+ case ABCIListener:
+ registerABCIListenerPlugin(bApp, appOpts, keys, t)
+
+default:
+ return fmt.Errorf("unexpected plugin type %T", t)
+}
+
+return nil
+}
+```
+
+```go expandable
+func registerABCIListenerPlugin(
+ bApp *BaseApp,
+ appOpts servertypes.AppOptions,
+ keys map[string]*store.KVStoreKey,
+ abciListener ABCIListener,
+) {
+ asyncKey := fmt.Sprintf("%s.%s.%s", StreamingTomlKey, StreamingABCITomlKey, StreamingABCIAsync)
+ async := cast.ToBool(appOpts.Get(asyncKey))
+ stopNodeOnErrKey := fmt.Sprintf("%s.%s.%s", StreamingTomlKey, StreamingABCITomlKey, StreamingABCIStopNodeOnErrTomlKey)
+ stopNodeOnErr := cast.ToBool(appOpts.Get(stopNodeOnErrKey))
+ keysKey := fmt.Sprintf("%s.%s.%s", StreamingTomlKey, StreamingABCITomlKey, StreamingABCIKeysTomlKey)
+ exposeKeysStr := cast.ToStringSlice(appOpts.Get(keysKey))
+ exposedKeys := exposeStoreKeysSorted(exposeKeysStr, keys)
+
+bApp.cms.AddListeners(exposedKeys)
+
+app.SetStreamingManager(
+ storetypes.StreamingManager{
+ ABCIListeners: []storetypes.ABCIListener{
+ abciListener
+},
+ StopNodeOnErr: stopNodeOnErr,
+},
+ )
+}
+```
+
+```go expandable
+func exposeAll(list []string)
+
+bool {
+ for _, ele := range list {
+ if ele == "*" {
+ return true
+}
+
+}
+
+return false
+}
+
+func exposeStoreKeys(keysStr []string, keys map[string]*types.KVStoreKey) []types.StoreKey {
+ var exposeStoreKeys []types.StoreKey
+ if exposeAll(keysStr) {
+ exposeStoreKeys = make([]types.StoreKey, 0, len(keys))
+ for _, storeKey := range keys {
+ exposeStoreKeys = append(exposeStoreKeys, storeKey)
+}
+
+}
+
+else {
+ exposeStoreKeys = make([]types.StoreKey, 0, len(keysStr))
+ for _, keyStr := range keysStr {
+ if storeKey, ok := keys[keyStr]; ok {
+ exposeStoreKeys = append(exposeStoreKeys, storeKey)
+}
+
+}
+
+}
+ // sort storeKeys for deterministic output
+ sort.SliceStable(exposeStoreKeys, func(i, j int)
+
+bool {
+ return exposeStoreKeys[i].Name() < exposeStoreKeys[j].Name()
+})
+
+return exposeStoreKeys
+}
+```
+
+The `NewStreamingPlugin` and `RegisterStreamingPlugin` functions are used to register a plugin with the App's BaseApp.
+
+e.g. in `NewSimApp`:
+
+```go expandable
+func NewSimApp(
+ logger log.Logger,
+ db dbm.DB,
+ traceStore io.Writer,
+ loadLatest bool,
+ appOpts servertypes.AppOptions,
+ baseAppOptions ...func(*baseapp.BaseApp),
+) *SimApp {
+
+ ...
+ keys := sdk.NewKVStoreKeys(
+ authtypes.StoreKey, banktypes.StoreKey, stakingtypes.StoreKey,
+ minttypes.StoreKey, distrtypes.StoreKey, slashingtypes.StoreKey,
+ govtypes.StoreKey, paramstypes.StoreKey, ibchost.StoreKey, upgradetypes.StoreKey,
+ evidencetypes.StoreKey, ibctransfertypes.StoreKey, capabilitytypes.StoreKey,
+ )
+
+ ...
+
+ // register streaming services
+ streamingCfg := cast.ToStringMap(appOpts.Get(baseapp.StreamingTomlKey))
+ for service := range streamingCfg {
+ pluginKey := fmt.Sprintf("%s.%s.%s", baseapp.StreamingTomlKey, service, baseapp.StreamingPluginTomlKey)
+ pluginName := strings.TrimSpace(cast.ToString(appOpts.Get(pluginKey)))
+ if len(pluginName) > 0 {
+ logLevel := cast.ToString(appOpts.Get(flags.FlagLogLevel))
+
+plugin, err := streaming.NewStreamingPlugin(pluginName, logLevel)
+ if err != nil {
+ tmos.Exit(err.Error())
+}
+ if err := baseapp.RegisterStreamingPlugin(bApp, appOpts, keys, plugin); err != nil {
+ tmos.Exit(err.Error())
+}
+
+}
+
+}
+
+return app
+```
+
+#### Configuration
+
+The plugin system will be configured within an App's TOML configuration files.
+
+```toml expandable
+# gRPC streaming
+[streaming]
+
+# ABCI streaming service
+[streaming.abci]
+
+# The plugin version to use for ABCI listening
+plugin = "abci_v1"
+
+# List of kv store keys to listen to for state changes.
+# Set to ["*"] to expose all keys.
+keys = ["*"]
+
+# Enable abciListeners to run asynchronously.
+# When abciListenersAsync=false and stopNodeOnABCIListenerErr=false listeners will run synchronized but will not stop the node.
+# When abciListenersAsync=true stopNodeOnABCIListenerErr will be ignored.
+async = false
+
+# Whether to stop the node on message deliver error.
+stop-node-on-err = true
+```
+
+There will be four parameters for configuring `ABCIListener` plugin: `streaming.abci.plugin`, `streaming.abci.keys`, `streaming.abci.async` and `streaming.abci.stop-node-on-err`.
+`streaming.abci.plugin` is the name of the plugin we want to use for streaming, `streaming.abci.keys` is a set of store keys for stores it listens to,
+`streaming.abci.async` is bool enabling asynchronous listening and `streaming.abci.stop-node-on-err` is a bool that stops the node when true and when operating
+on synchronized mode `streaming.abci.async=false`. Note that `streaming.abci.stop-node-on-err=true` will be ignored if `streaming.abci.async=true`.
+
+The configuration above support additional streaming plugins by adding the plugin to the `[streaming]` configuration section
+and registering the plugin with `RegisterStreamingPlugin` helper function.
+
+Note the that each plugin must include `streaming.{service}.plugin` property as it is a requirement for doing the lookup and registration of the plugin
+with the App. All other properties are unique to the individual services.
+
+#### Encoding and decoding streams
+
+ADR-038 introduces the interfaces and types for streaming state changes out from KVStores, associating this
+data with their related ABCI requests and responses, and registering a service for consuming this data and streaming it to some destination in a final format.
+Instead of prescribing a final data format in this ADR, it is left to a specific plugin implementation to define and document this format.
+We take this approach because flexibility in the final format is necessary to support a wide range of streaming service plugins. For example,
+the data format for a streaming service that writes the data out to a set of files will differ from the data format that is written to a Kafka topic.
+
+## Consequences
+
+These changes will provide a means of subscribing to KVStore state changes in real time.
+
+### Backwards Compatibility
+
+* This ADR changes the `CommitMultiStore` interface, implementations supporting the previous version of this interface will not support the new one
+
+### Positive
+
+* Ability to listen to KVStore state changes in real time and expose these events to external consumers
+
+### Negative
+
+* Changes `CommitMultiStore` interface and its implementations
+
+### Neutral
+
+* Introduces additional- but optional- complexity to configuring and running a cosmos application
+* If an application developer opts to use these features to expose data, they need to be aware of the ramifications/risks of that data exposure as it pertains to the specifics of their application
diff --git a/sdk/next/build/architecture/adr-039-epoched-staking.mdx b/sdk/next/build/architecture/adr-039-epoched-staking.mdx
new file mode 100644
index 000000000..7111cf41d
--- /dev/null
+++ b/sdk/next/build/architecture/adr-039-epoched-staking.mdx
@@ -0,0 +1,125 @@
+---
+title: 'ADR 039: Epoched Staking'
+description: '10-Feb-2021: Initial Draft'
+---
+
+## Changelog
+
+* 10-Feb-2021: Initial Draft
+
+## Authors
+
+* Dev Ojha (@valardragon)
+* Sunny Aggarwal (@sunnya97)
+
+## Status
+
+Proposed
+
+## Abstract
+
+This ADR updates the proof of stake module to buffer the staking weight updates for a number of blocks before updating the consensus' staking weights. The length of the buffer is dubbed an epoch. The prior functionality of the staking module is then a special case of the abstracted module, with the epoch being set to 1 block.
+
+## Context
+
+The current proof of stake module takes the design decision to apply staking weight changes to the consensus engine immediately. This means that delegations and unbonds get applied immediately to the validator set. This decision was primarily done as it was implementationally simplest, and because we at the time believed that this would lead to better UX for clients.
+
+An alternative design choice is to allow buffering staking updates (delegations, unbonds, validators joining) for a number of blocks. This 'epoch'd proof of stake consensus provides the guarantee that the consensus weights for validators will not change mid-epoch, except in the event of a slash condition.
+
+Additionally, the UX hurdle may not be as significant as was previously thought. This is because it is possible to provide users immediate acknowledgement that their bond was recorded and will be executed.
+
+Furthermore, it has become clearer over time that immediate execution of staking events comes with limitations, such as:
+
+* Threshold based cryptography. One of the main limitations is that because the validator set can change so regularly, it makes the running of multiparty computation by a fixed validator set difficult. Many threshold-based cryptographic features for blockchains such as randomness beacons and threshold decryption require a computationally-expensive DKG process (will take much longer than 1 block to create). To productively use these, we need to guarantee that the result of the DKG will be used for a reasonably long time. It wouldn't be feasible to rerun the DKG every block. By epoching staking, it guarantees we'll only need to run a new DKG once every epoch.
+
+* Light client efficiency. This would lessen the overhead for IBC when there is high churn in the validator set. In the Tendermint light client bisection algorithm, the number of headers you need to verify is related to bounding the difference in validator sets between a trusted header and the latest header. If the difference is too great, you verify more header in between the two. By limiting the frequency of validator set changes, we can reduce the worst case size of IBC lite client proofs, which occurs when a validator set has high churn.
+
+* Fairness of deterministic leader election. Currently we have no ways of reasoning of fairness of deterministic leader election in the presence of staking changes without epochs (tendermint/spec#217). Breaking fairness of leader election is profitable for validators, as they earn additional rewards from being the proposer. Adding epochs at least makes it easier for our deterministic leader election to match something we can prove secure. (Albeit, we still haven’t proven if our current algorithm is fair with > 2 validators in the presence of stake changes)
+
+* Staking derivative design. Currently, reward distribution is done lazily using the F1 fee distribution. While saving computational complexity, lazy accounting requires a more stateful staking implementation. Right now, each delegation entry has to track the time of last withdrawal. Handling this can be a challenge for some staking derivatives designs that seek to provide fungibility for all tokens staked to a single validator. Force-withdrawing rewards to users can help solve this, however it is infeasible to force-withdraw rewards to users on a per block basis. With epochs, a chain could more easily alter the design to have rewards be forcefully withdrawn (iterating over delegator accounts only once per-epoch), and can thus remove delegation timing from state. This may be useful for certain staking derivative designs.
+
+## Design considerations
+
+### Slashing
+
+There is a design consideration for whether to apply a slash immediately or at the end of an epoch. A slash event should apply to only members who are actually staked during the time of the infraction, namely during the epoch the slash event occurred.
+
+Applying it immediately can be viewed as offering greater consensus layer security, at potential costs to the aforementioned usecases. The benefits of immediate slashing for consensus layer security can be all be obtained by executing the validator jailing immediately (thus removing it from the validator set), and delaying the actual slash change to the validator's weight until the epoch boundary. For the use cases mentioned above, workarounds can be integrated to avoid problems, as follows:
+
+* For threshold based cryptography, this setting will have the threshold cryptography use the original epoch weights, while consensus has an update that lets it more rapidly benefit from additional security. If the threshold based cryptography blocks liveness of the chain, then we have effectively raised the liveness threshold of the remaining validators for the rest of the epoch. (Alternatively, jailed nodes could still contribute shares) This plan will fail in the extreme case that more than 1/3rd of the validators have been jailed within a single epoch. For such an extreme scenario, the chain already have its own custom incident response plan, and defining how to handle the threshold cryptography should be a part of that.
+* For light client efficiency, there can be a bit included in the header indicating an intra-epoch slash (ala [Link](https://github.com/tendermint/spec/issues/199)).
+* For fairness of deterministic leader election, applying a slash or jailing within an epoch would break the guarantee we were seeking to provide. This then re-introduces a new (but significantly simpler) problem for trying to provide fairness guarantees. Namely, that validators can adversarially elect to remove themself from the set of proposers. From a security perspective, this could potentially be handled by two different mechanisms (or prove to still be too difficult to achieve). One is making a security statement acknowledging the ability for an adversary to force an ahead-of-time fixed threshold of users to drop out of the proposer set within an epoch. The second method would be to parameterize such that the cost of a slash within the epoch far outweights benefits due to being a proposer. However, this latter criterion is quite dubious, since being a proposer can have many advantageous side-effects in chains with complex state machines. (Namely, DeFi games such as Fomo3D)
+* For staking derivative design, there is no issue introduced. This does not increase the state size of staking records, since whether a slash has occurred is fully queryable given the validator address.
+
+### Token lockup
+
+When someone makes a transaction to delegate, even though they are not immediately staked, their tokens should be moved into a pool managed by the staking module which will then be used at the end of an epoch. This prevents concerns where they stake, and then spend those tokens not realizing they were already allocated for staking, and thus having their staking tx fail.
+
+### Pipelining the epochs
+
+For threshold based cryptography in particular, we need a pipeline for epoch changes. This is because when we are in epoch N, we want the epoch N+1 weights to be fixed so that the validator set can do the DKG accordingly. So if we are currently in epoch N, the stake weights for epoch N+1 should already be fixed, and new stake changes should be getting applied to epoch N + 2.
+
+This can be handled by making a parameter for the epoch pipeline length. This parameter should not be alterable except during hard forks, to mitigate implementation complexity of switching the pipeline length.
+
+With pipeline length 1, if I redelegate during epoch N, then my redelegation is applied prior to the beginning of epoch N+1.
+With pipeline length 2, if I redelegate during epoch N, then my redelegation is applied prior to the beginning of epoch N+2.
+
+### Rewards
+
+Even though all staking updates are applied at epoch boundaries, rewards can still be distributed immediately when they are claimed. This is because they do not affect the current stake weights, as we do not implement auto-bonding of rewards. If such a feature were to be implemented, it would have to be setup so that rewards are auto-bonded at the epoch boundary.
+
+### Parameterizing the epoch length
+
+When choosing the epoch length, there is a trade-off queued state/computation buildup, and countering the previously discussed limitations of immediate execution if they apply to a given chain.
+
+Until an ABCI mechanism for variable block times is introduced, it is ill-advised to be using high epoch lengths due to the computation buildup. This is because when a block's execution time is greater than the expected block time from Tendermint, rounds may increment.
+
+## Decision
+
+**Step-1**: Implement buffering of all staking and slashing messages.
+
+First we create a pool for storing tokens that are being bonded, but should be applied at the epoch boundary called the `EpochDelegationPool`. Then, we have two separate queues, one for staking, one for slashing. We describe what happens on each message being delivered below:
+
+### Staking messages
+
+* **MsgCreateValidator**: Move user's self-bond to `EpochDelegationPool` immediately. Queue a message for the epoch boundary to handle the self-bond, taking the funds from the `EpochDelegationPool`. If Epoch execution fail, return back funds from `EpochDelegationPool` to user's account.
+* **MsgEditValidator**: Validate message and if valid queue the message for execution at the end of the Epoch.
+* **MsgDelegate**: Move user's funds to `EpochDelegationPool` immediately. Queue a message for the epoch boundary to handle the delegation, taking the funds from the `EpochDelegationPool`. If Epoch execution fail, return back funds from `EpochDelegationPool` to user's account.
+* **MsgBeginRedelegate**: Validate message and if valid queue the message for execution at the end of the Epoch.
+* **MsgUndelegate**: Validate message and if valid queue the message for execution at the end of the Epoch.
+
+### Slashing messages
+
+* **MsgUnjail**: Validate message and if valid queue the message for execution at the end of the Epoch.
+* **Slash Event**: Whenever a slash event is created, it gets queued in the slashing module to apply at the end of the epoch. The queues should be setup such that this slash applies immediately.
+
+### Evidence Messages
+
+* **MsgSubmitEvidence**: This gets executed immediately, and the validator gets jailed immediately. However in slashing, the actual slash event gets queued.
+
+Then we add methods to the end blockers, to ensure that at the epoch boundary the queues are cleared and delegation updates are applied.
+
+**Step-2**: Implement querying of queued staking txs.
+
+When querying the staking activity of a given address, the status should return not only the amount of tokens staked, but also if there are any queued stake events for that address. This will require more work to be done in the querying logic, to trace the queued upcoming staking events.
+
+As an initial implementation, this can be implemented as a linear search over all queued staking events. However, for chains that need long epochs, they should eventually build additional support for nodes that support querying to be able to produce results in constant time. (This is do-able by maintaining an auxilliary hashmap for indexing upcoming staking events by address)
+
+**Step-3**: Adjust gas
+
+Currently gas represents the cost of executing a transaction when its done immediately. (Merging together costs of p2p overhead, state access overhead, and computational overhead) However, now a transaction can cause computation in a future block, namely at the epoch boundary.
+
+To handle this, we should initially include parameters for estimating the amount of future computation (denominated in gas), and add that as a flat charge needed for the message.
+We leave it as out of scope for how to weight future computation versus current computation in gas pricing, and have it set such that the are weighted equally for now.
+
+## Consequences
+
+### Positive
+
+* Abstracts the proof of stake module that allows retaining the existing functionality
+* Enables new features such as validator-set based threshold cryptography
+
+### Negative
+
+* Increases complexity of integrating more complex gas pricing mechanisms, as they now have to consider future execution costs as well.
+* When epoch > 1, validators can no longer leave the network immediately, and must wait until an epoch boundary.
diff --git a/sdk/next/build/architecture/adr-040-storage-and-smt-state-commitments.mdx b/sdk/next/build/architecture/adr-040-storage-and-smt-state-commitments.mdx
new file mode 100644
index 000000000..cce1c598d
--- /dev/null
+++ b/sdk/next/build/architecture/adr-040-storage-and-smt-state-commitments.mdx
@@ -0,0 +1,297 @@
+---
+title: 'ADR 040: Storage and SMT State Commitments'
+description: '2020-01-15: Draft'
+---
+
+## Changelog
+
+* 2020-01-15: Draft
+
+## Status
+
+DRAFT Not Implemented
+
+## Abstract
+
+Sparse Merkle Tree ([SMT](https://osf.io/8mcnh/)) is a version of a Merkle Tree with various storage and performance optimizations. This ADR defines a separation of state commitments from data storage and the Cosmos SDK transition from IAVL to SMT.
+
+## Context
+
+Currently, Cosmos SDK uses IAVL for both state [commitments](https://cryptography.fandom.com/wiki/Commitment_scheme) and data storage.
+
+IAVL has effectively become an orphaned project within the Cosmos ecosystem and it's proven to be an inefficient state commitment data structure.
+In the current design, IAVL is used for both data storage and as a Merkle Tree for state commitments. IAVL is meant to be a standalone Merkelized key/value database, however it's using a KV DB engine to store all tree nodes. So, each node is stored in a separate record in the KV DB. This causes many inefficiencies and problems:
+
+* Each object query requires a tree traversal from the root. Subsequent queries for the same object are cached on the Cosmos SDK level.
+* Each edge traversal requires a DB query.
+* Creating snapshots is [expensive](https://github.com/cosmos/cosmos-sdk/issues/7215#issuecomment-684804950). It takes about 30 seconds to export less than 100 MB of state (as of March 2020).
+* Updates in IAVL may trigger tree reorganization and possible O(log(n)) hashes re-computation, which can become a CPU bottleneck.
+* The node structure is pretty expensive - it contains a standard tree node elements (key, value, left and right element) and additional metadata such as height, version (which is not required by the Cosmos SDK). The entire node is hashed, and that hash is used as the key in the underlying database, [ref](https://github.com/cosmos/iavl/blob/master/docs/node/node.md).
+
+Moreover, the IAVL project lacks support and a maintainer and we already see better and well-established alternatives. Instead of optimizing the IAVL, we are looking into other solutions for both storage and state commitments.
+
+## Decision
+
+We propose to separate the concerns of state commitment (**SC**), needed for consensus, and state storage (**SS**), needed for state machine. Finally we replace IAVL with [Celestia's SMT](https://github.com/lazyledger/smt). Celestia SMT is based on Diem (called jellyfish) design \[\*] - it uses a compute-optimized SMT by replacing subtrees with only default values with a single node (same approach is used by Ethereum2) and implements compact proofs.
+
+The storage model presented here doesn't deal with data structure nor serialization. It's a Key-Value database, where both key and value are binaries. The storage user is responsible for data serialization.
+
+### Decouple state commitment from storage
+
+Separation of storage and commitment (by the SMT) will allow the optimization of different components according to their usage and access patterns.
+
+`SC` (SMT) is used to commit to a data and compute Merkle proofs. `SS` is used to directly access data. To avoid collisions, both `SS` and `SC` will use a separate storage namespace (they could use the same database underneath). `SS` will store each record directly (mapping `(key, value)` as `key → value`).
+
+SMT is a merkle tree structure: we don't store keys directly. For every `(key, value)` pair, `hash(key)` is used as leaf path (we hash a key to uniformly distribute leaves in the tree) and `hash(value)` as the leaf contents. The tree structure is specified in more depth [below](#smt-for-state-commitment).
+
+For data access we propose 2 additional KV buckets (implemented as namespaces for the key-value pairs, sometimes called [column family](https://github.com/facebook/rocksdb/wiki/Terminology)):
+
+1. B1: `key → value`: the principal object storage, used by a state machine, behind the Cosmos SDK `KVStore` interface: provides direct access by key and allows prefix iteration (KV DB backend must support it).
+2. B2: `hash(key) → key`: a reverse index to get a key from an SMT path. Internally the SMT will store `(key, value)` as `prefix || hash(key) || hash(value)`. So, we can get an object value by composing `hash(key) → B2 → B1`.
+3. We could use more buckets to optimize the app usage if needed.
+
+We propose to use a KV database for both `SS` and `SC`. The store interface will allow to use the same physical DB backend for both `SS` and `SC` as well two separate DBs. The latter option allows for the separation of `SS` and `SC` into different hardware units, providing support for more complex setup scenarios and improving overall performance: one can use different backends (eg RocksDB and Badger) as well as independently tuning the underlying DB configuration.
+
+### Requirements
+
+State Storage requirements:
+
+* range queries
+* quick (key, value) access
+* creating a snapshot
+* historical versioning
+* pruning (garbage collection)
+
+State Commitment requirements:
+
+* fast updates
+* tree path should be short
+* query historical commitment proofs using ICS-23 standard
+* pruning (garbage collection)
+
+### SMT for State Commitment
+
+A Sparse Merkle tree is based on the idea of a complete Merkle tree of an intractable size. The assumption here is that as the size of the tree is intractable, there would only be a few leaf nodes with valid data blocks relative to the tree size, rendering a sparse tree.
+
+The full specification can be found at [Celestia](https://github.com/celestiaorg/celestia-specs/blob/ec98170398dfc6394423ee79b00b71038879e211/src/specs/data_structures.md#sparse-merkle-tree). In summary:
+
+* The SMT consists of a binary Merkle tree, constructed in the same fashion as described in [Certificate Transparency (RFC-6962)](https://tools.ietf.org/html/rfc6962), but using as the hashing function SHA-2-256 as defined in [FIPS 180-4](https://doi.org/10.6028/NIST.FIPS.180-4).
+* Leaves and internal nodes are hashed differently: the one-byte `0x00` is prepended for leaf nodes while `0x01` is prepended for internal nodes.
+* Default values are given to leaf nodes with empty leaves.
+* While the above rule is sufficient to pre-compute the values of intermediate nodes that are roots of empty subtrees, a further simplification is to extend this default value to all nodes that are roots of empty subtrees. The 32-byte zero is used as the default value. This rule takes precedence over the above one.
+* An internal node that is the root of a subtree that contains exactly one non-empty leaf is replaced by that leaf's leaf node.
+
+### Snapshots for storage sync and state versioning
+
+Below, with simple *snapshot* we refer to a database snapshot mechanism, not to a *ABCI snapshot sync*. The latter will be referred as *snapshot sync* (which will directly use DB snapshot as described below).
+
+Database snapshot is a view of DB state at a certain time or transaction. It's not a full copy of a database (it would be too big). Usually a snapshot mechanism is based on a *copy on write* and it allows DB state to be efficiently delivered at a certain stage.
+Some DB engines support snapshotting. Hence, we propose to reuse that functionality for the state sync and versioning (described below). We limit the supported DB engines to ones which efficiently implement snapshots. In a final section we discuss the evaluated DBs.
+
+One of the Stargate core features is a *snapshot sync* delivered in the `/snapshot` package. It provides a way to trustlessly sync a blockchain without repeating all transactions from the genesis. This feature is implemented in Cosmos SDK and requires storage support. Currently IAVL is the only supported backend. It works by streaming to a client a snapshot of a `SS` at a certain version together with a header chain.
+
+A new database snapshot will be created in every `EndBlocker` and identified by a block height. The `root` store keeps track of the available snapshots to offer `SS` at a certain version. The `root` store implements the `RootStore` interface described below. In essence, `RootStore` encapsulates a `Committer` interface. `Committer` has a `Commit`, `SetPruning`, `GetPruning` functions which will be used for creating and removing snapshots. The `rootStore.Commit` function creates a new snapshot and increments the version on each call, and checks if it needs to remove old versions. We will need to update the SMT interface to implement the `Committer` interface.
+NOTE: `Commit` must be called exactly once per block. Otherwise we risk going out of sync for the version number and block height.
+NOTE: For the Cosmos SDK storage, we may consider splitting that interface into `Committer` and `PruningCommitter` - only the multiroot should implement `PruningCommitter` (cache and prefix store don't need pruning).
+
+Number of historical versions for `abci.RequestQuery` and state sync snapshots is part of a node configuration, not a chain configuration (configuration implied by the blockchain consensus). A configuration should allow to specify number of past blocks and number of past blocks modulo some number (eg: 100 past blocks and one snapshot every 100 blocks for past 2000 blocks). Archival nodes can keep all past versions.
+
+Pruning old snapshots is effectively done by a database. Whenever we update a record in `SC`, SMT won't update nodes - instead it creates new nodes on the update path, without removing the old one. Since we are snapshotting each block, we need to change that mechanism to immediately remove orphaned nodes from the database. This is a safe operation - snapshots will keep track of the records and make it available when accessing past versions.
+
+To manage the active snapshots we will either use a DB *max number of snapshots* option (if available), or we will remove DB snapshots in the `EndBlocker`. The latter option can be done efficiently by identifying snapshots with block height and calling a store function to remove past versions.
+
+#### Accessing old state versions
+
+One of the functional requirements is to access old state. This is done through `abci.RequestQuery` structure. The version is specified by a block height (so we query for an object by a key `K` at block height `H`). The number of old versions supported for `abci.RequestQuery` is configurable. Accessing an old state is done by using available snapshots.
+`abci.RequestQuery` doesn't need old state of `SC` unless the `prove=true` parameter is set. The SMT merkle proof must be included in the `abci.ResponseQuery` only if both `SC` and `SS` have a snapshot for requested version.
+
+Moreover, Cosmos SDK could provide a way to directly access a historical state. However, a state machine shouldn't do that - since the number of snapshots is configurable, it would lead to nondeterministic execution.
+
+We positively [validated](https://github.com/cosmos/cosmos-sdk/discussions/8297) a versioning and snapshot mechanism for querying old state with regards to the database we evaluated.
+
+### State Proofs
+
+For any object stored in State Store (SS), we have corresponding object in `SC`. A proof for object `V` identified by a key `K` is a branch of `SC`, where the path corresponds to the key `hash(K)`, and the leaf is `hash(K, V)`.
+
+### Rollbacks
+
+We need to be able to process transactions and roll-back state updates if a transaction fails. This can be done in the following way: during transaction processing, we keep all state change requests (writes) in a `CacheWrapper` abstraction (as it's done today). Once we finish the block processing, in the `Endblocker`, we commit a root store - at that time, all changes are written to the SMT and to the `SS` and a snapshot is created.
+
+### Committing to an object without saving it
+
+We identified use-cases, where modules will need to save an object commitment without storing an object itself. Sometimes clients are receiving complex objects, and they have no way to prove a correctness of that object without knowing the storage layout. For those use cases it would be easier to commit to the object without storing it directly.
+
+### Refactor MultiStore
+
+The Stargate `/store` implementation (store/v1) adds an additional layer in the SDK store construction - the `MultiStore` structure. The multistore exists to support the modularity of the Cosmos SDK - each module is using its own instance of IAVL, but in the current implementation, all instances share the same database. The latter indicates, however, that the implementation doesn't provide true modularity. Instead it causes problems related to race condition and atomic DB commits (see: [#6370](https://github.com/cosmos/cosmos-sdk/issues/6370) and [discussion](https://github.com/cosmos/cosmos-sdk/discussions/8297#discussioncomment-757043)).
+
+We propose to reduce the multistore concept from the SDK, and to use a single instance of `SC` and `SS` in a `RootStore` object. To avoid confusion, we should rename the `MultiStore` interface to `RootStore`. The `RootStore` will have the following interface; the methods for configuring tracing and listeners are omitted for brevity.
+
+```go expandable
+// Used where read-only access to versions is needed.
+type BasicRootStore interface {
+ Store
+ GetKVStore(StoreKey)
+
+KVStore
+ CacheRootStore()
+
+CacheRootStore
+}
+
+// Used as the main app state, replacing CommitMultiStore.
+type CommitRootStore interface {
+ BasicRootStore
+ Committer
+ Snapshotter
+
+ GetVersion(uint64) (BasicRootStore, error)
+
+SetInitialVersion(uint64)
+
+error
+
+ ... // Trace and Listen methods
+}
+
+// Replaces CacheMultiStore for branched state.
+type CacheRootStore interface {
+ BasicRootStore
+ Write()
+
+ ... // Trace and Listen methods
+}
+
+// Example of constructor parameters for the concrete type.
+type RootStoreConfig struct {
+ Upgrades *StoreUpgrades
+ InitialVersion uint64
+
+ ReservePrefix(StoreKey, StoreType)
+}
+```
+
+{/* TODO: Review whether these types can be further reduced or simplified */}
+{/* TODO: RootStorePersistentCache type */}
+
+In contrast to `MultiStore`, `RootStore` doesn't allow to dynamically mount sub-stores or provide an arbitrary backing DB for individual sub-stores.
+
+NOTE: modules will be able to use a special commitment and their own DBs. For example: a module which will use ZK proofs for state can store and commit this proof in the `RootStore` (usually as a single record) and manage the specialized store privately or using the `SC` low level interface.
+
+#### Compatibility support
+
+To ease the transition to this new interface for users, we can create a shim which wraps a `CommitMultiStore` but provides a `CommitRootStore` interface, and expose functions to safely create and access the underlying `CommitMultiStore`.
+
+The new `RootStore` and supporting types can be implemented in a `store/v2alpha1` package to avoid breaking existing code.
+
+#### Merkle Proofs and IBC
+
+Currently, an IBC (v1.0) Merkle proof path consists of two elements (`["", ""]`), with each key corresponding to a separate proof. These are each verified according to individual [ICS-23 specs](https://github.com/cosmos/ibc-go/blob/f7051429e1cf833a6f65d51e6c3df1609290a549/modules/core/23-commitment/types/merkle.go#L17), and the result hash of each step is used as the committed value of the next step, until a root commitment hash is obtained.
+The root hash of the proof for `""` is hashed with the `""` to validate against the App Hash.
+
+This is not compatible with the `RootStore`, which stores all records in a single Merkle tree structure, and won't produce separate proofs for the store- and record-key. Ideally, the store-key component of the proof could just be omitted, and updated to use a "no-op" spec, so only the record-key is used. However, because the IBC verification code hardcodes the `"ibc"` prefix and applies it to the SDK proof as a separate element of the proof path, this isn't possible without a breaking change. Breaking this behavior would severely impact the Cosmos ecosystem which already widely adopts the IBC module. Requesting an update of the IBC module across the chains is a time consuming effort and not easily feasible.
+
+As a workaround, the `RootStore` will have to use two separate SMTs (they could use the same underlying DB): one for IBC state and one for everything else. A simple Merkle map that reference these SMTs will act as a Merkle Tree to create a final App hash. The Merkle map is not stored in a DBs - it's constructed in the runtime. The IBC substore key must be `"ibc"`.
+
+The workaround can still guarantee atomic syncs: the [proposed DB backends](#evaluated-kv-databases) support atomic transactions and efficient rollbacks, which will be used in the commit phase.
+
+The presented workaround can be used until the IBC module is fully upgraded to supports single-element commitment proofs.
+
+### Optimization: compress module key prefixes
+
+We consider a compression of prefix keys by creating a mapping from module key to an integer, and serializing the integer using varint coding. Varint coding assures that different values don't have common byte prefix. For Merkle Proofs we can't use prefix compression - so it should only apply for the `SS` keys. Moreover, the prefix compression should be only applied for the module namespace. More precisely:
+
+* each module has its own namespace;
+* when accessing a module namespace we create a KVStore with embedded prefix;
+* that prefix will be compressed only when accessing and managing `SS`.
+
+We need to assure that the codes won't change. We can fix the mapping in a static variable (provided by an app) or SS state under a special key.
+
+TODO: need to make decision about the key compression.
+
+## Optimization: SS key compression
+
+Some objects may be saved with key, which contains a Protobuf message type. Such keys are long. We could save a lot of space if we can map Protobuf message types in varints.
+
+TODO: finalize this or move to another ADR.
+
+## Migration
+
+Using the new store will require a migration. 2 Migrations are proposed:
+
+1. Genesis export -- it will reset the blockchain history.
+2. In place migration: we can reuse `UpgradeKeeper.SetUpgradeHandler` to provide the migration logic:
+
+```go
+app.UpgradeKeeper.SetUpgradeHandler("adr-40", func(ctx sdk.Context, plan upgradetypes.Plan, vm module.VersionMap) (module.VersionMap, error) {
+ storev2.Migrate(iavlstore, v2.store)
+
+ // RunMigrations returns the VersionMap
+ // with the updated module ConsensusVersions
+ return app.mm.RunMigrations(ctx, vm)
+})
+```
+
+The `Migrate` function will read all entries from a store/v1 DB and save them to the AD-40 combined KV store.
+Cache layer should not be used and the operation must finish with a single Commit call.
+
+Inserting records to the `SC` (SMT) component is the bottleneck. Unfortunately SMT doesn't support batch transactions.
+Adding batch transactions to `SC` layer is considered as a feature after the main release.
+
+## Consequences
+
+### Backwards Compatibility
+
+This ADR doesn't introduce any Cosmos SDK level API changes.
+
+We change the storage layout of the state machine, a storage hard fork and network upgrade is required to incorporate these changes. SMT provides a merkle proof functionality, however it is not compatible with ICS23. Updating the proofs for ICS23 compatibility is required.
+
+### Positive
+
+* Decoupling state from state commitment introduce better engineering opportunities for further optimizations and better storage patterns.
+* Performance improvements.
+* Joining SMT based camp which has wider and proven adoption than IAVL. Example projects which decided on SMT: Ethereum2, Diem (Libra), Trillan, Tezos, Celestia.
+* Multistore removal fixes a longstanding issue with the current MultiStore design.
+* Simplifies merkle proofs - all modules, except IBC, have only one pass for merkle proof.
+
+### Negative
+
+* Storage migration
+* LL SMT doesn't support pruning - we will need to add and test that functionality.
+* `SS` keys will have an overhead of a key prefix. This doesn't impact `SC` because all keys in `SC` have same size (they are hashed).
+
+### Neutral
+
+* Deprecating IAVL, which is one of the core proposals of Cosmos Whitepaper.
+
+## Alternative designs
+
+Most of the alternative designs were evaluated in [state commitments and storage report](https://paper.dropbox.com/published/State-commitments-and-storage-review--BDvA1MLwRtOx55KRihJ5xxLbBw-KeEB7eOd11pNrZvVtqUgL3h).
+
+Ethereum research published [Verkle Trie](https://dankradfeist.de/ethereum/2021/06/18/verkle-trie-for-eth1.html) - an idea of combining polynomial commitments with merkle tree in order to reduce the tree height. This concept has a very good potential, but we think it's too early to implement it. The current, SMT based design could be easily updated to the Verkle Trie once other research implement all necessary libraries. The main advantage of the design described in this ADR is the separation of state commitments from the data storage and designing a more powerful interface.
+
+## Further Discussions
+
+### Evaluated KV Databases
+
+We verified existing databases KV databases for evaluating snapshot support. The following databases provide efficient snapshot mechanism: Badger, RocksDB, [Pebble](https://github.com/cockroachdb/pebble). Databases which don't provide such support or are not production ready: boltdb, leveldb, goleveldb, membdb, lmdb.
+
+### RDBMS
+
+Use of RDBMS instead of simple KV store for state. Use of RDBMS will require a Cosmos SDK API breaking change (`KVStore` interface) and will allow better data extraction and indexing solutions. Instead of saving an object as a single blob of bytes, we could save it as record in a table in the state storage layer, and as a `hash(key, protobuf(object))` in the SMT as outlined above. To verify that an object registered in RDBMS is same as the one committed to SMT, one will need to load it from RDBMS, marshal using protobuf, hash and do SMT search.
+
+### Off Chain Store
+
+We were discussing use case where modules can use a support database, which is not automatically committed. Module will responsible for having a sound storage model and can optionally use the feature discussed in \_*Committing to an object without saving it* section.
+
+## References
+
+* [IAVL What's Next?](https://github.com/cosmos/cosmos-sdk/issues/7100)
+* [IAVL overview](https://docs.google.com/document/d/16Z_hW2rSAmoyMENO-RlAhQjAG3mSNKsQueMnKpmcBv0/edit#heading=h.yd2th7x3o1iv) of it's state v0.15
+* [State commitments and storage report](https://paper.dropbox.com/published/State-commitments-and-storage-review--BDvA1MLwRtOx55KRihJ5xxLbBw-KeEB7eOd11pNrZvVtqUgL3h)
+* [Celestia (LazyLedger) SMT](https://github.com/lazyledger/smt)
+* Facebook Diem (Libra) SMT [design](https://developers.diem.com/papers/jellyfish-merkle-tree/2021-01-14.pdf)
+* [Trillian Revocation Transparency](https://github.com/google/trillian/blob/master/docs/papers/RevocationTransparency.pdf), [Trillian Verifiable Data Structures](https://github.com/google/trillian/blob/master/docs/papers/VerifiableDataStructures.pdf).
+* Design and implementation [discussion](https://github.com/cosmos/cosmos-sdk/discussions/8297).
+* [How to Upgrade IBC Chains and their Clients](https://github.com/cosmos/ibc-go/blob/main/ibc/upgrades/quick-guide.md)
+* [ADR-40 Effect on IBC](https://github.com/cosmos/ibc-go/discussions/256)
diff --git a/sdk/next/build/architecture/adr-041-in-place-store-migrations.mdx b/sdk/next/build/architecture/adr-041-in-place-store-migrations.mdx
new file mode 100644
index 000000000..85215874e
--- /dev/null
+++ b/sdk/next/build/architecture/adr-041-in-place-store-migrations.mdx
@@ -0,0 +1,183 @@
+---
+title: 'ADR 041: In-Place Store Migrations'
+description: '17.02.2021: Initial Draft'
+---
+
+## Changelog
+
+* 17.02.2021: Initial Draft
+
+## Status
+
+Accepted
+
+## Abstract
+
+This ADR introduces a mechanism to perform in-place state store migrations during chain software upgrades.
+
+## Context
+
+When a chain upgrade introduces state-breaking changes inside modules, the current procedure consists of exporting the whole state into a JSON file (via the `simd export` command), running migration scripts on the JSON file (`simd genesis migrate` command), clearing the stores (`simd unsafe-reset-all` command), and starting a new chain with the migrated JSON file as new genesis (optionally with a custom initial block height). An example of such a procedure can be seen [in the Cosmos Hub 3->4 migration guide](https://github.com/cosmos/gaia/blob/v4.0.3/docs/migration/cosmoshub-3.md#upgrade-procedure).
+
+This procedure is cumbersome for multiple reasons:
+
+* The procedure takes time. It can take hours to run the `export` command, plus some additional hours to run `InitChain` on the fresh chain using the migrated JSON.
+* The exported JSON file can be heavy (\~100MB-1GB), making it difficult to view, edit and transfer, which in turn introduces additional work to solve these problems (such as [streaming genesis](https://github.com/cosmos/cosmos-sdk/issues/6936)).
+
+## Decision
+
+We propose a migration procedure based on modifying the KV store in-place without involving the JSON export-process-import flow described above.
+
+### Module `ConsensusVersion`
+
+We introduce a new method on the `AppModule` interface:
+
+```go
+type AppModule interface {
+ // --snip--
+ ConsensusVersion()
+
+uint64
+}
+```
+
+This methods returns an `uint64` which serves as state-breaking version of the module. It MUST be incremented on each consensus-breaking change introduced by the module. To avoid potential errors with default values, the initial version of a module MUST be set to 1. In the Cosmos SDK, version 1 corresponds to the modules in the v0.41 series.
+
+### Module-Specific Migration Functions
+
+For each consensus-breaking change introduced by the module, a migration script from ConsensusVersion `N` to version `N+1` MUST be registered in the `Configurator` using its newly-added `RegisterMigration` method. All modules receive a reference to the configurator in their `RegisterServices` method on `AppModule`, and this is where the migration functions should be registered. The migration functions should be registered in increasing order.
+
+```go
+func (am AppModule)
+
+RegisterServices(cfg module.Configurator) {
+ // --snip--
+ cfg.RegisterMigration(types.ModuleName, 1, func(ctx sdk.Context)
+
+error {
+ // Perform in-place store migrations from ConsensusVersion 1 to 2.
+})
+
+cfg.RegisterMigration(types.ModuleName, 2, func(ctx sdk.Context)
+
+error {
+ // Perform in-place store migrations from ConsensusVersion 2 to 3.
+})
+ // etc.
+}
+```
+
+For example, if the new ConsensusVersion of a module is `N` , then `N-1` migration functions MUST be registered in the configurator.
+
+In the Cosmos SDK, the migration functions are handled by each module's keeper, because the keeper holds the `sdk.StoreKey` used to perform in-place store migrations. To not overload the keeper, a `Migrator` wrapper is used by each module to handle the migration functions:
+
+```go
+// Migrator is a struct for handling in-place store migrations.
+type Migrator struct {
+ BaseKeeper
+}
+```
+
+Migration functions should live inside the `migrations/` folder of each module, and be called by the Migrator's methods. We propose the format `Migrate{M}to{N}` for method names.
+
+```go
+// Migrate1to2 migrates from version 1 to 2.
+func (m Migrator)
+
+Migrate1to2(ctx sdk.Context)
+
+error {
+ return v2bank.MigrateStore(ctx, m.keeper.storeKey) // v043bank is package `x/bank/migrations/v2`.
+}
+```
+
+Each module's migration functions are specific to the module's store evolutions, and are not described in this ADR. An example of x/bank store key migrations after the introduction of ADR-028 length-prefixed addresses can be seen in this [store.go code](https://github.com/cosmos/cosmos-sdk/blob/36f68eb9e041e20a5bb47e216ac5eb8b91f95471/x/bank/legacy/v043/store.go#L41-L62).
+
+### Tracking Module Versions in `x/upgrade`
+
+We introduce a new prefix store in `x/upgrade`'s store. This store will track each module's current version, it can be modelized as a `map[string]uint64` of module name to module ConsensusVersion, and will be used when running the migrations (see next section for details). The key prefix used is `0x1`, and the key/value format is:
+
+```text
+0x2 | {bytes(module_name)} => BigEndian(module_consensus_version)
+```
+
+The initial state of the store is set from `app.go`'s `InitChainer` method.
+
+The UpgradeHandler signature needs to be updated to take a `VersionMap`, as well as return an upgraded `VersionMap` and an error:
+
+```diff
+- type UpgradeHandler func(ctx sdk.Context, plan Plan)
++ type UpgradeHandler func(ctx sdk.Context, plan Plan, versionMap VersionMap) (VersionMap, error)
+```
+
+To apply an upgrade, we query the `VersionMap` from the `x/upgrade` store and pass it into the handler. The handler runs the actual migration functions (see next section), and if successful, returns an updated `VersionMap` to be stored in state.
+
+```diff expandable
+func (k UpgradeKeeper) ApplyUpgrade(ctx sdk.Context, plan types.Plan) {
+ // --snip--
+- handler(ctx, plan)
++ updatedVM, err := handler(ctx, plan, k.GetModuleVersionMap(ctx)) // k.GetModuleVersionMap() fetches the VersionMap stored in state.
++ if err != nil {
++ return err
++ }
++
++ // Set the updated consensus versions to state
++ k.SetModuleVersionMap(ctx, updatedVM)
+}
+```
+
+A gRPC query endpoint to query the `VersionMap` stored in `x/upgrade`'s state will also be added, so that app developers can double-check the `VersionMap` before the upgrade handler runs.
+
+### Running Migrations
+
+Once all the migration handlers are registered inside the configurator (which happens at startup), running migrations can happen by calling the `RunMigrations` method on `module.Manager`. This function will loop through all modules, and for each module:
+
+* Get the old ConsensusVersion of the module from its `VersionMap` argument (let's call it `M`).
+* Fetch the new ConsensusVersion of the module from the `ConsensusVersion()` method on `AppModule` (call it `N`).
+* If `N>M`, run all registered migrations for the module sequentially `M -> M+1 -> M+2...` until `N`.
+ * There is a special case where there is no ConsensusVersion for the module, as this means that the module has been newly added during the upgrade. In this case, no migration function is run, and the module's current ConsensusVersion is saved to `x/upgrade`'s store.
+
+If a required migration is missing (e.g. if it has not been registered in the `Configurator`), then the `RunMigrations` function will error.
+
+In practice, the `RunMigrations` method should be called from inside an `UpgradeHandler`.
+
+```go
+app.UpgradeKeeper.SetUpgradeHandler("my-plan", func(ctx sdk.Context, plan upgradetypes.Plan, vm module.VersionMap) (module.VersionMap, error) {
+ return app.mm.RunMigrations(ctx, vm)
+})
+```
+
+Assuming a chain upgrades at block `n`, the procedure should run as follows:
+
+* the old binary will halt in `BeginBlock` when starting block `N`. In its store, the ConsensusVersions of the old binary's modules are stored.
+* the new binary will start at block `N`. The UpgradeHandler is set in the new binary, so will run at `BeginBlock` of the new binary. Inside `x/upgrade`'s `ApplyUpgrade`, the `VersionMap` will be retrieved from the (old binary's) store, and passed into the `RunMigrations` functon, migrating all module stores in-place before the modules' own `BeginBlock`s.
+
+## Consequences
+
+### Backwards Compatibility
+
+This ADR introduces a new method `ConsensusVersion()` on `AppModule`, which all modules need to implement. It also alters the UpgradeHandler function signature. As such, it is not backwards-compatible.
+
+While modules MUST register their migration functions when bumping ConsensusVersions, running those scripts using an upgrade handler is optional. An application may perfectly well decide to not call the `RunMigrations` inside its upgrade handler, and continue using the legacy JSON migration path.
+
+### Positive
+
+* Perform chain upgrades without manipulating JSON files.
+* While no benchmark has been made yet, it is probable that in-place store migrations will take less time than JSON migrations. The main reason supporting this claim is that both the `simd export` command on the old binary and the `InitChain` function on the new binary will be skipped.
+
+### Negative
+
+* Module developers MUST correctly track consensus-breaking changes in their modules. If a consensus-breaking change is introduced in a module without its corresponding `ConsensusVersion()` bump, then the `RunMigrations` function won't detect the migration, and the chain upgrade might be unsuccessful. Documentation should clearly reflect this.
+
+### Neutral
+
+* The Cosmos SDK will continue to support JSON migrations via the existing `simd export` and `simd genesis migrate` commands.
+* The current ADR does not allow creating, renaming or deleting stores, only modifying existing store keys and values. The Cosmos SDK already has the `StoreLoader` for those operations.
+
+## Further Discussions
+
+## References
+
+* Initial discussion: [Link](https://github.com/cosmos/cosmos-sdk/discussions/8429)
+* Implementation of `ConsensusVersion` and `RunMigrations`: [Link](https://github.com/cosmos/cosmos-sdk/pull/8485)
+* Issue discussing `x/upgrade` design: [Link](https://github.com/cosmos/cosmos-sdk/issues/8514)
diff --git a/sdk/next/build/architecture/adr-042-group-module.mdx b/sdk/next/build/architecture/adr-042-group-module.mdx
new file mode 100644
index 000000000..ca6575fec
--- /dev/null
+++ b/sdk/next/build/architecture/adr-042-group-module.mdx
@@ -0,0 +1,289 @@
+---
+title: 'ADR 042: Group Module'
+description: '2020/04/09: Initial Draft'
+---
+
+## Changelog
+
+* 2020/04/09: Initial Draft
+
+## Status
+
+Draft
+
+## Abstract
+
+This ADR defines the `x/group` module which allows the creation and management of on-chain multi-signature accounts and enables voting for message execution based on configurable decision policies.
+
+## Context
+
+The legacy amino multi-signature mechanism of the Cosmos SDK has certain limitations:
+
+* Key rotation is not possible, although this can be solved with [account rekeying](/sdk/v0.53/build/architecture/adr-034-account-rekeying).
+* Thresholds can't be changed.
+* UX is cumbersome for non-technical users ([#5661](https://github.com/cosmos/cosmos-sdk/issues/5661)).
+* It requires `legacy_amino` sign mode ([#8141](https://github.com/cosmos/cosmos-sdk/issues/8141)).
+
+While the group module is not meant to be a total replacement for the current multi-signature accounts, it provides a solution to the limitations described above, with a more flexible key management system where keys can be added, updated or removed, as well as configurable thresholds.
+It's meant to be used with other access control modules such as [`x/feegrant`](/sdk/v0.50/build/architecture/adr-029-fee-grant-module) ans [`x/authz`](/sdk/v0.53/build/architecture/adr-030-authz-module) to simplify key management for individuals and organizations.
+
+The proof of concept of the group module can be found in [Link](https://github.com/regen-network/regen-ledger/tree/master/proto/regen/group/v1alpha1) and [Link](https://github.com/regen-network/regen-ledger/tree/master/x/group).
+
+## Decision
+
+We propose merging the `x/group` module with its supporting [ORM/Table Store package](https://github.com/regen-network/regen-ledger/tree/master/orm) ([#7098](https://github.com/cosmos/cosmos-sdk/issues/7098)) into the Cosmos SDK and continuing development here. There will be a dedicated ADR for the ORM package.
+
+### Group
+
+A group is a composition of accounts with associated weights. It is not
+an account and doesn't have a balance. It doesn't in and of itself have any
+sort of voting or decision weight.
+Group members can create proposals and vote on them through group accounts using different decision policies.
+
+It has an `admin` account which can manage members in the group, update the group
+metadata and set a new admin.
+
+```protobuf expandable
+message GroupInfo {
+
+ // group_id is the unique ID of this group.
+ uint64 group_id = 1;
+
+ // admin is the account address of the group's admin.
+ string admin = 2;
+
+ // metadata is any arbitrary metadata to attached to the group.
+ bytes metadata = 3;
+
+ // version is used to track changes to a group's membership structure that
+ // would break existing proposals. Whenever a member weight has changed,
+ // or any member is added or removed, the version is incremented and will
+ // invalidate all proposals from older versions.
+ uint64 version = 4;
+
+ // total_weight is the sum of the group members' weights.
+ string total_weight = 5;
+}
+```
+
+```protobuf expandable
+message GroupMember {
+
+ // group_id is the unique ID of the group.
+ uint64 group_id = 1;
+
+ // member is the member data.
+ Member member = 2;
+}
+
+// Member represents a group member with an account address,
+// non-zero weight and metadata.
+message Member {
+
+ // address is the member's account address.
+ string address = 1;
+
+ // weight is the member's voting weight that should be greater than 0.
+ string weight = 2;
+
+ // metadata is any arbitrary metadata to attached to the member.
+ bytes metadata = 3;
+}
+```
+
+### Group Account
+
+A group account is an account associated with a group and a decision policy.
+A group account does have a balance.
+
+Group accounts are abstracted from groups because a single group may have
+multiple decision policies for different types of actions. Managing group
+membership separately from decision policies results in the least overhead
+and keeps membership consistent across different policies. The pattern that
+is recommended is to have a single master group account for a given group,
+and then to create separate group accounts with different decision policies
+and delegate the desired permissions from the master account to
+those "sub-accounts" using the [`x/authz` module](/sdk/v0.53/build/architecture/adr-030-authz-module).
+
+```protobuf expandable
+message GroupAccountInfo {
+
+ // address is the group account address.
+ string address = 1;
+
+ // group_id is the ID of the Group the GroupAccount belongs to.
+ uint64 group_id = 2;
+
+ // admin is the account address of the group admin.
+ string admin = 3;
+
+ // metadata is any arbitrary metadata of this group account.
+ bytes metadata = 4;
+
+ // version is used to track changes to a group's GroupAccountInfo structure that
+ // invalidates active proposal from old versions.
+ uint64 version = 5;
+
+ // decision_policy specifies the group account's decision policy.
+ google.protobuf.Any decision_policy = 6 [(cosmos_proto.accepts_interface) = "cosmos.group.v1.DecisionPolicy"];
+}
+```
+
+Similarly to a group admin, a group account admin can update its metadata, decision policy or set a new group account admin.
+
+A group account can also be an admin or a member of a group.
+For instance, a group admin could be another group account which could "elects" the members or it could be the same group that elects itself.
+
+### Decision Policy
+
+A decision policy is the mechanism by which members of a group can vote on
+proposals.
+
+All decision policies should have a minimum and maximum voting window.
+The minimum voting window is the minimum duration that must pass in order
+for a proposal to potentially pass, and it may be set to 0. The maximum voting
+window is the maximum time that a proposal may be voted on and executed if
+it reached enough support before it is closed.
+Both of these values must be less than a chain-wide max voting window parameter.
+
+We define the `DecisionPolicy` interface that all decision policies must implement:
+
+```go expandable
+type DecisionPolicy interface {
+ codec.ProtoMarshaler
+
+ ValidateBasic()
+
+error
+ GetTimeout()
+
+types.Duration
+ Allow(tally Tally, totalPower string, votingDuration time.Duration) (DecisionPolicyResult, error)
+
+Validate(g GroupInfo)
+
+error
+}
+
+type DecisionPolicyResult struct {
+ Allow bool
+ Final bool
+}
+```
+
+#### Threshold decision policy
+
+A threshold decision policy defines a minimum support votes (*yes*), based on a tally
+of voter weights, for a proposal to pass. For
+this decision policy, abstain and veto are treated as no support (*no*).
+
+```protobuf
+message ThresholdDecisionPolicy {
+
+ // threshold is the minimum weighted sum of support votes for a proposal to succeed.
+ string threshold = 1;
+
+ // voting_period is the duration from submission of a proposal to the end of voting period
+ // Within this period, votes and exec messages can be submitted.
+ google.protobuf.Duration voting_period = 2 [(gogoproto.nullable) = false];
+}
+```
+
+### Proposal
+
+Any member of a group can submit a proposal for a group account to decide upon.
+A proposal consists of a set of `sdk.Msg`s that will be executed if the proposal
+passes as well as any metadata associated with the proposal. These `sdk.Msg`s get validated as part of the `Msg/CreateProposal` request validation. They should also have their signer set as the group account.
+
+Internally, a proposal also tracks:
+
+* its current `Status`: submitted, closed or aborted
+* its `Result`: unfinalized, accepted or rejected
+* its `VoteState` in the form of a `Tally`, which is calculated on new votes and when executing the proposal.
+
+```protobuf expandable
+// Tally represents the sum of weighted votes.
+message Tally {
+ option (gogoproto.goproto_getters) = false;
+
+ // yes_count is the weighted sum of yes votes.
+ string yes_count = 1;
+
+ // no_count is the weighted sum of no votes.
+ string no_count = 2;
+
+ // abstain_count is the weighted sum of abstainers.
+ string abstain_count = 3;
+
+ // veto_count is the weighted sum of vetoes.
+ string veto_count = 4;
+}
+```
+
+### Voting
+
+Members of a group can vote on proposals. There are four choices to choose while voting - yes, no, abstain and veto. Not
+all decision policies will support them. Votes can contain some optional metadata.
+In the current implementation, the voting window begins as soon as a proposal
+is submitted.
+
+Voting internally updates the proposal `VoteState` as well as `Status` and `Result` if needed.
+
+### Executing Proposals
+
+Proposals will not be automatically executed by the chain in this current design,
+but rather a user must submit a `Msg/Exec` transaction to attempt to execute the
+proposal based on the current votes and decision policy. A future upgrade could
+automate this and have the group account (or a fee granter) pay.
+
+#### Changing Group Membership
+
+In the current implementation, updating a group or a group account after submitting a proposal will make it invalid. It will simply fail if someone calls `Msg/Exec` and will eventually be garbage collected.
+
+### Notes on current implementation
+
+This section outlines the current implementation used in the proof of concept of the group module but this could be subject to changes and iterated on.
+
+#### ORM
+
+The [ORM package](https://github.com/cosmos/cosmos-sdk/discussions/9156) defines tables, sequences and secondary indexes which are used in the group module.
+
+Groups are stored in state as part of a `groupTable`, the `group_id` being an auto-increment integer. Group members are stored in a `groupMemberTable`.
+
+Group accounts are stored in a `groupAccountTable`. The group account address is generated based on an auto-increment integer which is used to derive the group module `RootModuleKey` into a `DerivedModuleKey`, as stated in [ADR-033](/sdk/v0.53/build/architecture/adr-033-protobuf-inter-module-comm#modulekeys-and-moduleids). The group account is added as a new `ModuleAccount` through `x/auth`.
+
+Proposals are stored as part of the `proposalTable` using the `Proposal` type. The `proposal_id` is an auto-increment integer.
+
+Votes are stored in the `voteTable`. The primary key is based on the vote's `proposal_id` and `voter` account address.
+
+#### ADR-033 to route proposal messages
+
+Inter-module communication introduced by [ADR-033](/sdk/v0.53/build/architecture/adr-033-protobuf-inter-module-comm) can be used to route a proposal's messages using the `DerivedModuleKey` corresponding to the proposal's group account.
+
+## Consequences
+
+### Positive
+
+* Improved UX for multi-signature accounts allowing key rotation and custom decision policies.
+
+### Negative
+
+### Neutral
+
+* It uses ADR 033 so it will need to be implemented within the Cosmos SDK, but this doesn't imply necessarily any large refactoring of existing Cosmos SDK modules.
+* The current implementation of the group module uses the ORM package.
+
+## Further Discussions
+
+* Convergence of `/group` and `x/gov` as both support proposals and voting: [Link](https://github.com/cosmos/cosmos-sdk/discussions/9066)
+* `x/group` possible future improvements:
+ * Execute proposals on submission ([Link](https://github.com/regen-network/regen-ledger/issues/288))
+ * Withdraw a proposal ([Link](https://github.com/regen-network/cosmos-modules/issues/41))
+ * Make `Tally` more flexible and support non-binary choices
+
+## References
+
+* Initial specification:
+ * [Link](https://gist.github.com/aaronc/b60628017352df5983791cad30babe56#group-module)
+ * [#5236](https://github.com/cosmos/cosmos-sdk/pull/5236)
+* Proposal to add `x/group` into the Cosmos SDK: [#7633](https://github.com/cosmos/cosmos-sdk/issues/7633)
diff --git a/sdk/next/build/architecture/adr-043-nft-module.mdx b/sdk/next/build/architecture/adr-043-nft-module.mdx
new file mode 100644
index 000000000..1f589f7e8
--- /dev/null
+++ b/sdk/next/build/architecture/adr-043-nft-module.mdx
@@ -0,0 +1,381 @@
+---
+title: 'ADR 43: NFT Module'
+description: >-
+ 2021-05-01: Initial Draft 2021-07-02: Review updates 2022-06-15: Add batch
+ operation 2022-11-11: Remove strict validation of classID and tokenID
+---
+
+## Changelog
+
+* 2021-05-01: Initial Draft
+* 2021-07-02: Review updates
+* 2022-06-15: Add batch operation
+* 2022-11-11: Remove strict validation of classID and tokenID
+
+## Status
+
+PROPOSED
+
+## Abstract
+
+This ADR defines the `x/nft` module which is a generic implementation of NFTs, roughly "compatible" with ERC721. **Applications using the `x/nft` module must implement the following functions**:
+
+* `MsgNewClass` - Receive the user's request to create a class, and call the `NewClass` of the `x/nft` module.
+* `MsgUpdateClass` - Receive the user's request to update a class, and call the `UpdateClass` of the `x/nft` module.
+* `MsgMintNFT` - Receive the user's request to mint a nft, and call the `MintNFT` of the `x/nft` module.
+* `BurnNFT` - Receive the user's request to burn a nft, and call the `BurnNFT` of the `x/nft` module.
+* `UpdateNFT` - Receive the user's request to update a nft, and call the `UpdateNFT` of the `x/nft` module.
+
+## Context
+
+NFTs are more than just crypto art, which is very helpful for accruing value to the Cosmos ecosystem. As a result, Cosmos Hub should implement NFT functions and enable a unified mechanism for storing and sending the ownership representative of NFTs as discussed in [Link](https://github.com/cosmos/cosmos-sdk/discussions/9065).
+
+As discussed in [#9065](https://github.com/cosmos/cosmos-sdk/discussions/9065), several potential solutions can be considered:
+
+* irismod/nft and modules/incubator/nft
+* CW721
+* DID NFTs
+* interNFT
+
+Since functions/use cases of NFTs are tightly connected with their logic, it is almost impossible to support all the NFTs' use cases in one Cosmos SDK module by defining and implementing different transaction types.
+
+Considering generic usage and compatibility of interchain protocols including IBC and Gravity Bridge, it is preferred to have a generic NFT module design which handles the generic NFTs logic.
+This design idea can enable composability that application-specific functions should be managed by other modules on Cosmos Hub or on other Zones by importing the NFT module.
+
+The current design is based on the work done by [IRISnet team](https://github.com/irisnet/irismod/tree/master/modules/nft) and an older implementation in the [Cosmos repository](https://github.com/cosmos/modules/tree/master/incubator/nft).
+
+## Decision
+
+We create a `x/nft` module, which contains the following functionality:
+
+* Store NFTs and track their ownership.
+* Expose `Keeper` interface for composing modules to transfer, mint and burn NFTs.
+* Expose external `Message` interface for users to transfer ownership of their NFTs.
+* Query NFTs and their supply information.
+
+The proposed module is a base module for NFT app logic. It's goal it to provide a common layer for storage, basic transfer functionality and IBC. The module should not be used as a standalone.
+Instead an app should create a specialized module to handle app specific logic (eg: NFT ID construction, royalty), user level minting and burning. Moreover an app specialized module should handle auxiliary data to support the app logic (eg indexes, ORM, business data).
+
+All data carried over IBC must be part of the `NFT` or `Class` type described below. The app specific NFT data should be encoded in `NFT.data` for cross-chain integrity. Other objects related to NFT, which are not important for integrity can be part of the app specific module.
+
+### Types
+
+We propose two main types:
+
+* `Class` -- describes NFT class. We can think about it as a smart contract address.
+* `NFT` -- object representing unique, non fungible asset. Each NFT is associated with a Class.
+
+#### Class
+
+NFT **Class** is comparable to an ERC-721 smart contract (provides description of a smart contract), under which a collection of NFTs can be created and managed.
+
+```protobuf
+message Class {
+ string id = 1;
+ string name = 2;
+ string symbol = 3;
+ string description = 4;
+ string uri = 5;
+ string uri_hash = 6;
+ google.protobuf.Any data = 7;
+}
+```
+
+* `id` is used as the primary index for storing the class; *required*
+* `name` is a descriptive name of the NFT class; *optional*
+* `symbol` is the symbol usually shown on exchanges for the NFT class; *optional*
+* `description` is a detailed description of the NFT class; *optional*
+* `uri` is a URI for the class metadata stored off chain. It should be a JSON file that contains metadata about the NFT class and NFT data schema ([OpenSea example](https://docs.opensea.io/docs/contract-level-metadata)); *optional*
+* `uri_hash` is a hash of the document pointed by uri; *optional*
+* `data` is app specific metadata of the class; *optional*
+
+#### NFT
+
+We define a general model for `NFT` as follows.
+
+```protobuf
+message NFT {
+ string class_id = 1;
+ string id = 2;
+ string uri = 3;
+ string uri_hash = 4;
+ google.protobuf.Any data = 10;
+}
+```
+
+* `class_id` is the identifier of the NFT class where the NFT belongs; *required*
+
+* `id` is an identifier of the NFT, unique within the scope of its class. It is specified by the creator of the NFT and may be expanded to use DID in the future. `class_id` combined with `id` uniquely identifies an NFT and is used as the primary index for storing the NFT; *required*
+
+ ```text
+ {class_id}/{id} --> NFT (bytes)
+ ```
+
+* `uri` is a URI for the NFT metadata stored off chain. Should point to a JSON file that contains metadata about this NFT (Ref: [ERC721 standard and OpenSea extension](https://docs.opensea.io/docs/metadata-standards)); *required*
+
+* `uri_hash` is a hash of the document pointed by uri; *optional*
+
+* `data` is an app specific data of the NFT. CAN be used by composing modules to specify additional properties of the NFT; *optional*
+
+This ADR doesn't specify values that `data` can take; however, best practices recommend upper-level NFT modules clearly specify their contents. Although the value of this field doesn't provide the additional context required to manage NFT records, which means that the field can technically be removed from the specification, the field's existence allows basic informational/UI functionality.
+
+### `Keeper` Interface
+
+```go expandable
+type Keeper interface {
+ NewClass(ctx sdk.Context,class Class)
+
+UpdateClass(ctx sdk.Context,class Class)
+
+Mint(ctx sdk.Context,nft NFT,receiver sdk.AccAddress) // updates totalSupply
+ BatchMint(ctx sdk.Context, tokens []NFT,receiver sdk.AccAddress)
+
+error
+
+ Burn(ctx sdk.Context, classId string, nftId string) // updates totalSupply
+ BatchBurn(ctx sdk.Context, classID string, nftIDs []string)
+
+error
+
+ Update(ctx sdk.Context, nft NFT)
+
+BatchUpdate(ctx sdk.Context, tokens []NFT)
+
+error
+
+ Transfer(ctx sdk.Context, classId string, nftId string, receiver sdk.AccAddress)
+
+BatchTransfer(ctx sdk.Context, classID string, nftIDs []string, receiver sdk.AccAddress)
+
+error
+
+ GetClass(ctx sdk.Context, classId string)
+
+Class
+ GetClasses(ctx sdk.Context) []Class
+
+ GetNFT(ctx sdk.Context, classId string, nftId string)
+
+NFT
+ GetNFTsOfClassByOwner(ctx sdk.Context, classId string, owner sdk.AccAddress) []NFT
+ GetNFTsOfClass(ctx sdk.Context, classId string) []NFT
+
+ GetOwner(ctx sdk.Context, classId string, nftId string)
+
+sdk.AccAddress
+ GetBalance(ctx sdk.Context, classId string, owner sdk.AccAddress)
+
+uint64
+ GetTotalSupply(ctx sdk.Context, classId string)
+
+uint64
+}
+```
+
+Other business logic implementations should be defined in composing modules that import `x/nft` and use its `Keeper`.
+
+### `Msg` Service
+
+```protobuf expandable
+service Msg {
+ rpc Send(MsgSend) returns (MsgSendResponse);
+}
+
+message MsgSend {
+ string class_id = 1;
+ string id = 2;
+ string sender = 3;
+ string reveiver = 4;
+}
+message MsgSendResponse {}
+```
+
+`MsgSend` can be used to transfer the ownership of an NFT to another address.
+
+The implementation outline of the server is as follows:
+
+```go expandable
+type msgServer struct{
+ k Keeper
+}
+
+func (m msgServer)
+
+Send(ctx context.Context, msg *types.MsgSend) (*types.MsgSendResponse, error) {
+ // check current ownership
+ assertEqual(msg.Sender, m.k.GetOwner(msg.ClassId, msg.Id))
+
+ // transfer ownership
+ m.k.Transfer(msg.ClassId, msg.Id, msg.Receiver)
+
+return &types.MsgSendResponse{
+}, nil
+}
+```
+
+The query service methods for the `x/nft` module are:
+
+```protobuf expandable
+service Query {
+ // Balance queries the number of NFTs of a given class owned by the owner, same as balanceOf in ERC721
+ rpc Balance(QueryBalanceRequest) returns (QueryBalanceResponse) {
+ option (google.api.http).get = "/cosmos/nft/v1beta1/balance/{owner}/{class_id}";
+ }
+
+ // Owner queries the owner of the NFT based on its class and id, same as ownerOf in ERC721
+ rpc Owner(QueryOwnerRequest) returns (QueryOwnerResponse) {
+ option (google.api.http).get = "/cosmos/nft/v1beta1/owner/{class_id}/{id}";
+ }
+
+ // Supply queries the number of NFTs from the given class, same as totalSupply of ERC721.
+ rpc Supply(QuerySupplyRequest) returns (QuerySupplyResponse) {
+ option (google.api.http).get = "/cosmos/nft/v1beta1/supply/{class_id}";
+ }
+
+ // NFTs queries all NFTs of a given class or owner,choose at least one of the two, similar to tokenByIndex in ERC721Enumerable
+ rpc NFTs(QueryNFTsRequest) returns (QueryNFTsResponse) {
+ option (google.api.http).get = "/cosmos/nft/v1beta1/nfts";
+ }
+
+ // NFT queries an NFT based on its class and id.
+ rpc NFT(QueryNFTRequest) returns (QueryNFTResponse) {
+ option (google.api.http).get = "/cosmos/nft/v1beta1/nfts/{class_id}/{id}";
+ }
+
+ // Class queries an NFT class based on its id
+ rpc Class(QueryClassRequest) returns (QueryClassResponse) {
+ option (google.api.http).get = "/cosmos/nft/v1beta1/classes/{class_id}";
+ }
+
+ // Classes queries all NFT classes
+ rpc Classes(QueryClassesRequest) returns (QueryClassesResponse) {
+ option (google.api.http).get = "/cosmos/nft/v1beta1/classes";
+ }
+}
+
+// QueryBalanceRequest is the request type for the Query/Balance RPC method
+message QueryBalanceRequest {
+ string class_id = 1;
+ string owner = 2;
+}
+
+// QueryBalanceResponse is the response type for the Query/Balance RPC method
+message QueryBalanceResponse {
+ uint64 amount = 1;
+}
+
+// QueryOwnerRequest is the request type for the Query/Owner RPC method
+message QueryOwnerRequest {
+ string class_id = 1;
+ string id = 2;
+}
+
+// QueryOwnerResponse is the response type for the Query/Owner RPC method
+message QueryOwnerResponse {
+ string owner = 1;
+}
+
+// QuerySupplyRequest is the request type for the Query/Supply RPC method
+message QuerySupplyRequest {
+ string class_id = 1;
+}
+
+// QuerySupplyResponse is the response type for the Query/Supply RPC method
+message QuerySupplyResponse {
+ uint64 amount = 1;
+}
+
+// QueryNFTstRequest is the request type for the Query/NFTs RPC method
+message QueryNFTsRequest {
+ string class_id = 1;
+ string owner = 2;
+ cosmos.base.query.v1beta1.PageRequest pagination = 3;
+}
+
+// QueryNFTsResponse is the response type for the Query/NFTs RPC methods
+message QueryNFTsResponse {
+ repeated cosmos.nft.v1beta1.NFT nfts = 1;
+ cosmos.base.query.v1beta1.PageResponse pagination = 2;
+}
+
+// QueryNFTRequest is the request type for the Query/NFT RPC method
+message QueryNFTRequest {
+ string class_id = 1;
+ string id = 2;
+}
+
+// QueryNFTResponse is the response type for the Query/NFT RPC method
+message QueryNFTResponse {
+ cosmos.nft.v1beta1.NFT nft = 1;
+}
+
+// QueryClassRequest is the request type for the Query/Class RPC method
+message QueryClassRequest {
+ string class_id = 1;
+}
+
+// QueryClassResponse is the response type for the Query/Class RPC method
+message QueryClassResponse {
+ cosmos.nft.v1beta1.Class class = 1;
+}
+
+// QueryClassesRequest is the request type for the Query/Classes RPC method
+message QueryClassesRequest {
+ // pagination defines an optional pagination for the request.
+ cosmos.base.query.v1beta1.PageRequest pagination = 1;
+}
+
+// QueryClassesResponse is the response type for the Query/Classes RPC method
+message QueryClassesResponse {
+ repeated cosmos.nft.v1beta1.Class classes = 1;
+ cosmos.base.query.v1beta1.PageResponse pagination = 2;
+}
+```
+
+### Interoperability
+
+Interoperability is all about reusing assets between modules and chains. The former one is achieved by ADR-33: Protobuf client - server communication. At the time of writing ADR-33 is not finalized. The latter is achieved by IBC. Here we will focus on the IBC side.
+IBC is implemented per module. Here, we aligned that NFTs will be recorded and managed in the x/nft. This requires creation of a new IBC standard and implementation of it.
+
+For IBC interoperability, NFT custom modules MUST use the NFT object type understood by the IBC client. So, for x/nft interoperability, custom NFT implementations (example: x/cryptokitty) should use the canonical x/nft module and proxy all NFT balance keeping functionality to x/nft or else re-implement all functionality using the NFT object type understood by the IBC client. In other words: x/nft becomes the standard NFT registry for all Cosmos NFTs (example: x/cryptokitty will register a kitty NFT in x/nft and use x/nft for book keeping). This was [discussed](https://github.com/cosmos/cosmos-sdk/discussions/9065#discussioncomment-873206) in the context of using x/bank as a general asset balance book. Not using x/nft will require implementing another module for IBC.
+
+## Consequences
+
+### Backward Compatibility
+
+No backward incompatibilities.
+
+### Forward Compatibility
+
+This specification conforms to the ERC-721 smart contract specification for NFT identifiers. Note that ERC-721 defines uniqueness based on (contract address, uint256 tokenId), and we conform to this implicitly because a single module is currently aimed to track NFT identifiers. Note: use of the (mutable) data field to determine uniqueness is not safe.s
+
+### Positive
+
+* NFT identifiers available on Cosmos Hub.
+* Ability to build different NFT modules for the Cosmos Hub, e.g., ERC-721.
+* NFT module which supports interoperability with IBC and other cross-chain infrastructures like Gravity Bridge
+
+### Negative
+
+* New IBC app is required for x/nft
+* CW721 adapter is required
+
+### Neutral
+
+* Other functions need more modules. For example, a custody module is needed for NFT trading function, a collectible module is needed for defining NFT properties.
+
+## Further Discussions
+
+For other kinds of applications on the Hub, more app-specific modules can be developed in the future:
+
+* `x/nft/custody`: custody of NFTs to support trading functionality.
+* `x/nft/marketplace`: selling and buying NFTs using sdk.Coins.
+* `x/fractional`: a module to split an ownership of an asset (NFT or other assets) for multiple stakeholder. `x/group` should work for most of the cases.
+
+Other networks in the Cosmos ecosystem could design and implement their own NFT modules for specific NFT applications and use cases.
+
+## References
+
+* Initial discussion: [Link](https://github.com/cosmos/cosmos-sdk/discussions/9065)
+* x/nft: initialize module: [Link](https://github.com/cosmos/cosmos-sdk/pull/9174)
+* [ADR 033](https://github.com/cosmos/cosmos-sdk/blob/main/docs/architecture/adr-033-protobuf-inter-module-comm.md)
diff --git a/sdk/next/build/architecture/adr-044-protobuf-updates-guidelines.mdx b/sdk/next/build/architecture/adr-044-protobuf-updates-guidelines.mdx
new file mode 100644
index 000000000..b906e02c9
--- /dev/null
+++ b/sdk/next/build/architecture/adr-044-protobuf-updates-guidelines.mdx
@@ -0,0 +1,134 @@
+---
+title: 'ADR 044: Guidelines for Updating Protobuf Definitions'
+description: >-
+ 28.06.2021: Initial Draft 02.12.2021: Add Since: comment for new fields
+ 21.07.2022: Remove the rule of no new Msg in the same proto version.
+---
+
+## Changelog
+
+* 28.06.2021: Initial Draft
+* 02.12.2021: Add `Since:` comment for new fields
+* 21.07.2022: Remove the rule of no new `Msg` in the same proto version.
+
+## Status
+
+Draft
+
+## Abstract
+
+This ADR provides guidelines and recommended practices when updating Protobuf definitions. These guidelines are targeting module developers.
+
+## Context
+
+The Cosmos SDK maintains a set of [Protobuf definitions](https://github.com/cosmos/cosmos-sdk/tree/main/proto/cosmos). It is important to correctly design Protobuf definitions to avoid any breaking changes within the same version. The reasons are to not break tooling (including indexers and explorers), wallets and other third-party integrations.
+
+When making changes to these Protobuf definitions, the Cosmos SDK currently only follows [Buf's](https://docs.buf.build/) recommendations. We noticed however that Buf's recommendations might still result in breaking changes in the SDK in some cases. For example:
+
+* Adding fields to `Msg`s. Adding fields is a not a Protobuf spec-breaking operation. However, when adding new fields to `Msg`s, the unknown field rejection will throw an error when sending the new `Msg` to an older node.
+* Marking fields as `reserved`. Protobuf proposes the `reserved` keyword for removing fields without the need to bump the package version. However, by doing so, client backwards compatibility is broken as Protobuf doesn't generate anything for `reserved` fields. See [#9446](https://github.com/cosmos/cosmos-sdk/issues/9446) for more details on this issue.
+
+Moreover, module developers often face other questions around Protobuf definitions such as "Can I rename a field?" or "Can I deprecate a field?" This ADR aims to answer all these questions by providing clear guidelines about allowed updates for Protobuf definitions.
+
+## Decision
+
+We decide to keep [Buf's](https://docs.buf.build/) recommendations with the following exceptions:
+
+* `UNARY_RPC`: the Cosmos SDK currently does not support streaming RPCs.
+* `COMMENT_FIELD`: the Cosmos SDK allows fields with no comments.
+* `SERVICE_SUFFIX`: we use the `Query` and `Msg` service naming convention, which doesn't use the `-Service` suffix.
+* `PACKAGE_VERSION_SUFFIX`: some packages, such as `cosmos.crypto.ed25519`, don't use a version suffix.
+* `RPC_REQUEST_STANDARD_NAME`: Requests for the `Msg` service don't have the `-Request` suffix to keep backwards compatibility.
+
+On top of Buf's recommendations we add the following guidelines that are specific to the Cosmos SDK.
+
+### Updating Protobuf Definition Without Bumping Version
+
+#### 1. Module developers MAY add new Protobuf definitions
+
+Module developers MAY add new `message`s, new `Service`s, new `rpc` endpoints, and new fields to existing messages. This recommendation follows the Protobuf specification, but is added in this document for clarity, as the SDK requires one additional change.
+
+The SDK requires the Protobuf comment of the new addition to contain one line with the following format:
+
+```protobuf
+// Since: cosmos-sdk {, ...}
+```
+
+Where each `version` denotes a minor ("0.45") or patch ("0.44.5") version from which the field is available. This will greatly help client libraries, who can optionally use reflection or custom code generation to show/hide these fields depending on the targetted node version.
+
+As examples, the following comments are valid:
+
+```protobuf
+// Since: cosmos-sdk 0.44
+
+// Since: cosmos-sdk 0.42.11, 0.44.5
+```
+
+and the following ones are NOT valid:
+
+```protobuf
+// Since cosmos-sdk v0.44
+
+// since: cosmos-sdk 0.44
+
+// Since: cosmos-sdk 0.42.11 0.44.5
+
+// Since: Cosmos SDK 0.42.11, 0.44.5
+```
+
+#### 2. Fields MAY be marked as `deprecated`, and nodes MAY implement a protocol-breaking change for handling these fields
+
+Protobuf supports the [`deprecated` field option](https://developers.google.com/protocol-buffers/docs/proto#options), and this option MAY be used on any field, including `Msg` fields. If a node handles a Protobuf message with a non-empty deprecated field, the node MAY change its behavior upon processing it, even in a protocol-breaking way. When possible, the node MUST handle backwards compatibility without breaking the consensus (unless we increment the proto version).
+
+As an example, the Cosmos SDK v0.42 to v0.43 update contained two Protobuf-breaking changes, listed below. Instead of bumping the package versions from `v1beta1` to `v1`, the SDK team decided to follow this guideline, by reverting the breaking changes, marking those changes as deprecated, and modifying the node implementation when processing messages with deprecated fields. More specifically:
+
+* The Cosmos SDK recently removed support for [time-based software upgrades](https://github.com/cosmos/cosmos-sdk/pull/8849). As such, the `time` field has been marked as deprecated in `cosmos.upgrade.v1beta1.Plan`. Moreover, the node will reject any proposal containing an upgrade Plan whose `time` field is non-empty.
+* The Cosmos SDK now supports [governance split votes](/sdk/v0.50/build/architecture/adr-037-gov-split-vote). When querying for votes, the returned `cosmos.gov.v1beta1.Vote` message has its `option` field (used for 1 vote option) deprecated in favor of its `options` field (allowing multiple vote options). Whenever possible, the SDK still populates the deprecated `option` field, that is, if and only if the `len(options) == 1` and `options[0].Weight == 1.0`.
+
+#### 3. Fields MUST NOT be renamed
+
+Whereas the official Protobuf recommendations do not prohibit renaming fields, as it does not break the Protobuf binary representation, the SDK explicitly forbids renaming fields in Protobuf structs. The main reason for this choice is to avoid introducing breaking changes for clients, which often rely on hard-coded fields from generated types. Moreover, renaming fields will lead to client-breaking JSON representations of Protobuf definitions, used in REST endpoints and in the CLI.
+
+### Incrementing Protobuf Package Version
+
+TODO, needs architecture review. Some topics:
+
+* Bumping versions frequency
+* When bumping versions, should the Cosmos SDK support both versions?
+ * i.e. v1beta1 -> v1, should we have two folders in the Cosmos SDK, and handlers for both versions?
+* mention ADR-023 Protobuf naming
+
+## Consequences
+
+> This section describes the resulting context, after applying the decision. All consequences should be listed here, not just the "positive" ones. A particular decision may have positive, negative, and neutral consequences, but all of them affect the team and project in the future.
+
+### Backwards Compatibility
+
+> All ADRs that introduce backwards incompatibilities must include a section describing these incompatibilities and their severity. The ADR must explain how the author proposes to deal with these incompatibilities. ADR submissions without a sufficient backwards compatibility treatise may be rejected outright.
+
+### Positive
+
+* less pain to tool developers
+* more compatibility in the ecosystem
+* ...
+
+### Negative
+
+`{negative consequences}`
+
+### Neutral
+
+* more rigor in Protobuf review
+
+## Further Discussions
+
+This ADR is still in the DRAFT stage, and the "Incrementing Protobuf Package Version" will be filled in once we make a decision on how to correctly do it.
+
+## Test Cases \[optional]
+
+Test cases for an implementation are mandatory for ADRs that are affecting consensus changes. Other ADRs can choose to include links to test cases if applicable.
+
+## References
+
+* [#9445](https://github.com/cosmos/cosmos-sdk/issues/9445) Release proto definitions v1
+* [#9446](https://github.com/cosmos/cosmos-sdk/issues/9446) Address v1beta1 proto breaking changes
diff --git a/sdk/next/build/architecture/adr-045-check-delivertx-middlewares.mdx b/sdk/next/build/architecture/adr-045-check-delivertx-middlewares.mdx
new file mode 100644
index 000000000..72b2d6522
--- /dev/null
+++ b/sdk/next/build/architecture/adr-045-check-delivertx-middlewares.mdx
@@ -0,0 +1,344 @@
+---
+description: >-
+ 20.08.2021: Initial draft. 07.12.2021: Update tx.Handler interface (\#10693).
+ 17.05.2022: ADR is abandoned, as middlewares are deemed too hard to reason
+ about.
+---
+
+## Changelog
+
+* 20.08.2021: Initial draft.
+* 07.12.2021: Update `tx.Handler` interface ([#10693](https://github.com/cosmos/cosmos-sdk/pull/10693)).
+* 17.05.2022: ADR is abandoned, as middlewares are deemed too hard to reason about.
+
+## Status
+
+ABANDONED. Replacement is being discussed in [#11955](https://github.com/cosmos/cosmos-sdk/issues/11955).
+
+## Abstract
+
+This ADR replaces the current BaseApp `runTx` and antehandlers design with a middleware-based design.
+
+## Context
+
+BaseApp's implementation of ABCI `{Check,Deliver}Tx()` and its own `Simulate()` method call the `runTx` method under the hood, which first runs antehandlers, then executes `Msg`s. However, the [transaction Tips](https://github.com/cosmos/cosmos-sdk/issues/9406) and [refunding unused gas](https://github.com/cosmos/cosmos-sdk/issues/2150) use cases require custom logic to be run after the `Msg`s execution. There is currently no way to achieve this.
+
+An naive solution would be to add post-`Msg` hooks to BaseApp. However, the Cosmos SDK team thinks in parallel about the bigger picture of making app wiring simpler ([#9181](https://github.com/cosmos/cosmos-sdk/discussions/9182)), which includes making BaseApp more lightweight and modular.
+
+## Decision
+
+We decide to transform Baseapp's implementation of ABCI `{Check,Deliver}Tx` and its own `Simulate` methods to use a middleware-based design.
+
+The two following interfaces are the base of the middleware design, and are defined in `types/tx`:
+
+```go
+type Handler interface {
+ CheckTx(ctx context.Context, req Request, checkReq RequestCheckTx) (Response, ResponseCheckTx, error)
+
+DeliverTx(ctx context.Context, req Request) (Response, error)
+
+SimulateTx(ctx context.Context, req Request (Response, error)
+}
+
+type Middleware func(Handler)
+
+Handler
+```
+
+where we define the following arguments and return types:
+
+```go expandable
+type Request struct {
+ Tx sdk.Tx
+ TxBytes []byte
+}
+
+type Response struct {
+ GasWanted uint64
+ GasUsed uint64
+ // MsgResponses is an array containing each Msg service handler's response
+ // type, packed in an Any. This will get proto-serialized into the `Data` field
+ // in the ABCI Check/DeliverTx responses.
+ MsgResponses []*codectypes.Any
+ Log string
+ Events []abci.Event
+}
+
+type RequestCheckTx struct {
+ Type abci.CheckTxType
+}
+
+type ResponseCheckTx struct {
+ Priority int64
+}
+```
+
+Please note that because CheckTx handles separate logic related to mempool priotization, its signature is different than DeliverTx and SimulateTx.
+
+BaseApp holds a reference to a `tx.Handler`:
+
+```go
+type BaseApp struct {
+ // other fields
+ txHandler tx.Handler
+}
+```
+
+Baseapp's ABCI `{Check,Deliver}Tx()` and `Simulate()` methods simply call `app.txHandler.{Check,Deliver,Simulate}Tx()` with the relevant arguments. For example, for `DeliverTx`:
+
+```go expandable
+func (app *BaseApp)
+
+DeliverTx(req abci.RequestDeliverTx)
+
+abci.ResponseDeliverTx {
+ var abciRes abci.ResponseDeliverTx
+ ctx := app.getContextForTx(runTxModeDeliver, req.Tx)
+
+res, err := app.txHandler.DeliverTx(ctx, tx.Request{
+ TxBytes: req.Tx
+})
+ if err != nil {
+ abciRes = sdkerrors.ResponseDeliverTx(err, uint64(res.GasUsed), uint64(res.GasWanted), app.trace)
+
+return abciRes
+}
+
+abciRes, err = convertTxResponseToDeliverTx(res)
+ if err != nil {
+ return sdkerrors.ResponseDeliverTx(err, uint64(res.GasUsed), uint64(res.GasWanted), app.trace)
+}
+
+return abciRes
+}
+
+// convertTxResponseToDeliverTx converts a tx.Response into a abci.ResponseDeliverTx.
+func convertTxResponseToDeliverTx(txRes tx.Response) (abci.ResponseDeliverTx, error) {
+ data, err := makeABCIData(txRes)
+ if err != nil {
+ return abci.ResponseDeliverTx{
+}, nil
+}
+
+return abci.ResponseDeliverTx{
+ Data: data,
+ Log: txRes.Log,
+ Events: txRes.Events,
+}, nil
+}
+
+// makeABCIData generates the Data field to be sent to ABCI Check/DeliverTx.
+func makeABCIData(txRes tx.Response) ([]byte, error) {
+ return proto.Marshal(&sdk.TxMsgData{
+ MsgResponses: txRes.MsgResponses
+})
+}
+```
+
+The implementations are similar for `BaseApp.CheckTx` and `BaseApp.Simulate`.
+
+`baseapp.txHandler`'s three methods' implementations can obviously be monolithic functions, but for modularity we propose a middleware composition design, where a middleware is simply a function that takes a `tx.Handler`, and returns another `tx.Handler` wrapped around the previous one.
+
+### Implementing a Middleware
+
+In practice, middlewares are created by Go function that takes as arguments some parameters needed for the middleware, and returns a `tx.Middleware`.
+
+For example, for creating an arbitrary `MyMiddleware`, we can implement:
+
+```go expandable
+// myTxHandler is the tx.Handler of this middleware. Note that it holds a
+// reference to the next tx.Handler in the stack.
+type myTxHandler struct {
+ // next is the next tx.Handler in the middleware stack.
+ next tx.Handler
+ // some other fields that are relevant to the middleware can be added here
+}
+
+// NewMyMiddleware returns a middleware that does this and that.
+func NewMyMiddleware(arg1, arg2)
+
+tx.Middleware {
+ return func (txh tx.Handler)
+
+tx.Handler {
+ return myTxHandler{
+ next: txh,
+ // optionally, set arg1, arg2... if they are needed in the middleware
+}
+
+}
+}
+
+// Assert myTxHandler is a tx.Handler.
+var _ tx.Handler = myTxHandler{
+}
+
+func (h myTxHandler)
+
+CheckTx(ctx context.Context, req Request, checkReq RequestcheckTx) (Response, ResponseCheckTx, error) {
+ // CheckTx specific pre-processing logic
+
+ // run the next middleware
+ res, checkRes, err := txh.next.CheckTx(ctx, req, checkReq)
+
+ // CheckTx specific post-processing logic
+
+ return res, checkRes, err
+}
+
+func (h myTxHandler)
+
+DeliverTx(ctx context.Context, req Request) (Response, error) {
+ // DeliverTx specific pre-processing logic
+
+ // run the next middleware
+ res, err := txh.next.DeliverTx(ctx, tx, req)
+
+ // DeliverTx specific post-processing logic
+
+ return res, err
+}
+
+func (h myTxHandler)
+
+SimulateTx(ctx context.Context, req Request) (Response, error) {
+ // SimulateTx specific pre-processing logic
+
+ // run the next middleware
+ res, err := txh.next.SimulateTx(ctx, tx, req)
+
+ // SimulateTx specific post-processing logic
+
+ return res, err
+}
+```
+
+### Composing Middlewares
+
+While BaseApp simply holds a reference to a `tx.Handler`, this `tx.Handler` itself is defined using a middleware stack. The Cosmos SDK exposes a base (i.e. innermost) `tx.Handler` called `RunMsgsTxHandler`, which executes messages.
+
+Then, the app developer can compose multiple middlewares on top on the base `tx.Handler`. Each middleware can run pre-and-post-processing logic around its next middleware, as described in the section above. Conceptually, as an example, given the middlewares `A`, `B`, and `C` and the base `tx.Handler` `H` the stack looks like:
+
+```text
+A.pre
+ B.pre
+ C.pre
+ H # The base tx.handler, for example `RunMsgsTxHandler`
+ C.post
+ B.post
+A.post
+```
+
+We define a `ComposeMiddlewares` function for composing middlewares. It takes the base handler as first argument, and middlewares in the "outer to inner" order. For the above stack, the final `tx.Handler` is:
+
+```go
+txHandler := middleware.ComposeMiddlewares(H, A, B, C)
+```
+
+The middleware is set in BaseApp via its `SetTxHandler` setter:
+
+```go
+// simapp/app.go
+ txHandler := middleware.ComposeMiddlewares(...)
+
+app.SetTxHandler(txHandler)
+```
+
+The app developer can define their own middlewares, or use the Cosmos SDK's pre-defined middlewares from `middleware.NewDefaultTxHandler()`.
+
+### Middlewares Maintained by the Cosmos SDK
+
+While the app developer can define and compose the middlewares of their choice, the Cosmos SDK provides a set of middlewares that caters for the ecosystem's most common use cases. These middlewares are:
+
+| Middleware | Description |
+| ----------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
+| RunMsgsTxHandler | This is the base `tx.Handler`. It replaces the old baseapp's `runMsgs`, and executes a transaction's `Msg`s. |
+| TxDecoderMiddleware | This middleware takes in transaction raw bytes, and decodes them into a `sdk.Tx`. It replaces the `baseapp.txDecoder` field, so that BaseApp stays as thin as possible. Since most middlewares read the contents of the `sdk.Tx`, the TxDecoderMiddleware should be run first in the middleware stack. |
+| `{Antehandlers}` | Each antehandler is converted to its own middleware. These middlewares perform signature verification, fee deductions and other validations on the incoming transaction. |
+| IndexEventsTxMiddleware | This is a simple middleware that chooses which events to index in Tendermint. Replaces `baseapp.indexEvents` (which unfortunately still exists in baseapp too, because it's used to index Begin/EndBlock events) |
+| RecoveryTxMiddleware | This index recovers from panics. It replaces baseapp.runTx's panic recovery described in [ADR-022](/sdk/v0.50/build/architecture/adr-022-custom-panic-handling). |
+| GasTxMiddleware | This replaces the [`Setup`](https://github.com/cosmos/cosmos-sdk/blob/v0.43.0/x/auth/ante/setup.go) Antehandler. It sets a GasMeter on sdk.Context. Note that before, GasMeter was set on sdk.Context inside the antehandlers, and there was some mess around the fact that antehandlers had their own panic recovery system so that the GasMeter could be read by baseapp's recovery system. Now, this mess is all removed: one middleware sets GasMeter, another one handles recovery. |
+
+### Similarities and Differences between Antehandlers and Middlewares
+
+The middleware-based design builds upon the existing antehandlers design described in [ADR-010](/sdk/v0.50/build/architecture/adr-010-modular-antehandler). Even though the final decision of ADR-010 was to go with the "Simple Decorators" approach, the middleware design is actually very similar to the other [Decorator Pattern](/sdk/v0.50/build/architecture/adr-010-modular-antehandler#decorator-pattern) proposal, also used in [weave](https://github.com/iov-one/weave).
+
+#### Similarities with Antehandlers
+
+* Designed as chaining/composing small modular pieces.
+* Allow code reuse for `{Check,Deliver}Tx` and for `Simulate`.
+* Set up in `app.go`, and easily customizable by app developers.
+* Order is important.
+
+#### Differences with Antehandlers
+
+* The Antehandlers are run before `Msg` execution, whereas middlewares can run before and after.
+* The middleware approach uses separate methods for `{Check,Deliver,Simulate}Tx`, whereas the antehandlers pass a `simulate bool` flag and uses the `sdkCtx.Is{Check,Recheck}Tx()` flags to determine in which transaction mode we are.
+* The middleware design lets each middleware hold a reference to the next middleware, whereas the antehandlers pass a `next` argument in the `AnteHandle` method.
+* The middleware design use Go's standard `context.Context`, whereas the antehandlers use `sdk.Context`.
+
+## Consequences
+
+### Backwards Compatibility
+
+Since this refactor removes some logic away from BaseApp and into middlewares, it introduces API-breaking changes for app developers. Most notably, instead of creating an antehandler chain in `app.go`, app developers need to create a middleware stack:
+
+```diff expandable
+- anteHandler, err := ante.NewAnteHandler(
+- ante.HandlerOptions{
+- AccountKeeper: app.AccountKeeper,
+- BankKeeper: app.BankKeeper,
+- SignModeHandler: encodingConfig.TxConfig.SignModeHandler(),
+- FeegrantKeeper: app.FeeGrantKeeper,
+- SigGasConsumer: ante.DefaultSigVerificationGasConsumer,
+- },
+-)
++txHandler, err := authmiddleware.NewDefaultTxHandler(authmiddleware.TxHandlerOptions{
++ Debug: app.Trace(),
++ IndexEvents: indexEvents,
++ LegacyRouter: app.legacyRouter,
++ MsgServiceRouter: app.msgSvcRouter,
++ LegacyAnteHandler: anteHandler,
++ TxDecoder: encodingConfig.TxConfig.TxDecoder,
++})
+if err != nil {
+ panic(err)
+}
+- app.SetAnteHandler(anteHandler)
++ app.SetTxHandler(txHandler)
+```
+
+Other more minor API breaking changes will also be provided in the CHANGELOG. As usual, the Cosmos SDK will provide a release migration document for app developers.
+
+This ADR does not introduce any state-machine-, client- or CLI-breaking changes.
+
+### Positive
+
+* Allow custom logic to be run before an after `Msg` execution. This enables the [tips](https://github.com/cosmos/cosmos-sdk/issues/9406) and [gas refund](https://github.com/cosmos/cosmos-sdk/issues/2150) uses cases, and possibly other ones.
+* Make BaseApp more lightweight, and defer complex logic to small modular components.
+* Separate paths for `{Check,Deliver,Simulate}Tx` with different returns types. This allows for improved readability (replace `if sdkCtx.IsRecheckTx() && !simulate {...}` with separate methods) and more flexibility (e.g. returning a `priority` in `ResponseCheckTx`).
+
+### Negative
+
+* It is hard to understand at first glance the state updates that would occur after a middleware runs given the `sdk.Context` and `tx`. A middleware can have an arbitrary number of nested middleware being called within its function body, each possibly doing some pre- and post-processing before calling the next middleware on the chain. Thus to understand what a middleware is doing, one must also understand what every other middleware further along the chain is also doing, and the order of middlewares matters. This can get quite complicated to understand.
+* API-breaking changes for app developers.
+
+### Neutral
+
+No neutral consequences.
+
+## Further Discussions
+
+* [#9934](https://github.com/cosmos/cosmos-sdk/discussions/9934) Decomposing BaseApp's other ABCI methods into middlewares.
+* Replace `sdk.Tx` interface with the concrete protobuf Tx type in the `tx.Handler` methods signature.
+
+## Test Cases
+
+We update the existing baseapp and antehandlers tests to use the new middleware API, but keep the same test cases and logic, to avoid introducing regressions. Existing CLI tests will also be left untouched.
+
+For new middlewares, we introduce unit tests. Since middlewares are purposefully small, unit tests suit well.
+
+## References
+
+* Initial discussion: [Link](https://github.com/cosmos/cosmos-sdk/issues/9585)
+* Implementation: [#9920 BaseApp refactor](https://github.com/cosmos/cosmos-sdk/pull/9920) and [#10028 Antehandlers migration](https://github.com/cosmos/cosmos-sdk/pull/10028)
diff --git a/sdk/next/build/architecture/adr-046-module-params.mdx b/sdk/next/build/architecture/adr-046-module-params.mdx
new file mode 100644
index 000000000..eadad62e7
--- /dev/null
+++ b/sdk/next/build/architecture/adr-046-module-params.mdx
@@ -0,0 +1,191 @@
+---
+title: 'ADR 046: Module Params'
+description: 'Sep 22, 2021: Initial Draft'
+---
+
+## Changelog
+
+* Sep 22, 2021: Initial Draft
+
+## Status
+
+Proposed
+
+## Abstract
+
+This ADR describes an alternative approach to how Cosmos SDK modules use, interact,
+and store their respective parameters.
+
+## Context
+
+Currently, in the Cosmos SDK, modules that require the use of parameters use the
+`x/params` module. The `x/params` works by having modules define parameters,
+typically via a simple `Params` structure, and registering that structure in
+the `x/params` module via a unique `Subspace` that belongs to the respective
+registering module. The registering module then has unique access to its respective
+`Subspace`. Through this `Subspace`, the module can get and set its `Params`
+structure.
+
+In addition, the Cosmos SDK's `x/gov` module has direct support for changing
+parameters on-chain via a `ParamChangeProposal` governance proposal type, where
+stakeholders can vote on suggested parameter changes.
+
+There are various tradeoffs to using the `x/params` module to manage individual
+module parameters. Namely, managing parameters essentially comes for "free" in
+that developers only need to define the `Params` struct, the `Subspace`, and the
+various auxiliary functions, e.g. `ParamSetPairs`, on the `Params` type. However,
+there are some notable drawbacks. These drawbacks include the fact that parameters
+are serialized in state via JSON which is extremely slow. In addition, parameter
+changes via `ParamChangeProposal` governance proposals have no way of reading from
+or writing to state. In other words, it is currently not possible to have any
+state transitions in the application during an attempt to change param(s).
+
+## Decision
+
+We will build off of the alignment of `x/gov` and `x/authz` work per
+[#9810](https://github.com/cosmos/cosmos-sdk/pull/9810). Namely, module developers
+will create one or more unique parameter data structures that must be serialized
+to state. The Param data structures must implement `sdk.Msg` interface with respective
+Protobuf Msg service method which will validate and update the parameters with all
+necessary changes. The `x/gov` module via the work done in
+[#9810](https://github.com/cosmos/cosmos-sdk/pull/9810), will dispatch Param
+messages, which will be handled by Protobuf Msg services.
+
+Note, it is up to developers to decide how to structure their parameters and
+the respective `sdk.Msg` messages. Consider the parameters currently defined in
+`x/auth` using the `x/params` module for parameter management:
+
+```protobuf
+message Params {
+ uint64 max_memo_characters = 1;
+ uint64 tx_sig_limit = 2;
+ uint64 tx_size_cost_per_byte = 3;
+ uint64 sig_verify_cost_ed25519 = 4;
+ uint64 sig_verify_cost_secp256k1 = 5;
+}
+```
+
+Developers can choose to either create a unique data structure for every field in
+`Params` or they can create a single `Params` structure as outlined above in the
+case of `x/auth`.
+
+In the former, `x/params`, approach, a `sdk.Msg` would need to be created for every single
+field along with a handler. This can become burdensome if there are a lot of
+parameter fields. In the latter case, there is only a single data structure and
+thus only a single message handler, however, the message handler might have to be
+more sophisticated in that it might need to understand what parameters are being
+changed vs what parameters are untouched.
+
+Params change proposals are made using the `x/gov` module. Execution is done through
+`x/authz` authorization to the root `x/gov` module's account.
+
+Continuing to use `x/auth`, we demonstrate a more complete example:
+
+```go expandable
+type Params struct {
+ MaxMemoCharacters uint64
+ TxSigLimit uint64
+ TxSizeCostPerByte uint64
+ SigVerifyCostED25519 uint64
+ SigVerifyCostSecp256k1 uint64
+}
+
+type MsgUpdateParams struct {
+ MaxMemoCharacters uint64
+ TxSigLimit uint64
+ TxSizeCostPerByte uint64
+ SigVerifyCostED25519 uint64
+ SigVerifyCostSecp256k1 uint64
+}
+
+type MsgUpdateParamsResponse struct {
+}
+
+func (ms msgServer)
+
+UpdateParams(goCtx context.Context, msg *types.MsgUpdateParams) (*types.MsgUpdateParamsResponse, error) {
+ ctx := sdk.UnwrapSDKContext(goCtx)
+
+ // verification logic...
+
+ // persist params
+ params := ParamsFromMsg(msg)
+
+ms.SaveParams(ctx, params)
+
+return &types.MsgUpdateParamsResponse{
+}, nil
+}
+
+func ParamsFromMsg(msg *types.MsgUpdateParams)
+
+Params {
+ // ...
+}
+```
+
+A gRPC `Service` query should also be provided, for example:
+
+```protobuf expandable
+service Query {
+ // ...
+
+ rpc Params(QueryParamsRequest) returns (QueryParamsResponse) {
+ option (google.api.http).get = "/cosmos//v1beta1/params";
+ }
+}
+
+message QueryParamsResponse {
+ Params params = 1 [(gogoproto.nullable) = false];
+}
+```
+
+## Consequences
+
+As a result of implementing the module parameter methodology, we gain the ability
+for module parameter changes to be stateful and extensible to fit nearly every
+application's use case. We will be able to emit events (and trigger hooks registered
+to that events using the work proposed in [event hooks](https://github.com/cosmos/cosmos-sdk/discussions/9656)),
+call other Msg service methods or perform migration.
+In addition, there will be significant gains in performance when it comes to reading
+and writing parameters from and to state, especially if a specific set of parameters
+are read on a consistent basis.
+
+However, this methodology will require developers to implement more types and
+Msg service methods which can become burdensome if many parameters exist. In addition,
+developers are required to implement persistence logic for module parameters.
+However, this should be trivial.
+
+### Backwards Compatibility
+
+The new method for working with module parameters is naturally not backwards
+compatible with the existing `x/params` module. However, the `x/params` will
+remain in the Cosmos SDK and will be marked as deprecated with no additional
+functionality being added apart from potential bug fixes. Note, the `x/params`
+module may be removed entirely in a future release.
+
+### Positive
+
+* Module parameters are serialized more efficiently
+* Modules are able to react on parameters changes and perform additional actions.
+* Special events can be emitted, allowing hooks to be triggered.
+
+### Negative
+
+* Module parameters becomes slightly more burdensome for module developers:
+ * Modules are now responsible for persisting and retrieving parameter state
+ * Modules are now required to have unique message handlers to handle parameter
+ changes per unique parameter data structure.
+
+### Neutral
+
+* Requires [#9810](https://github.com/cosmos/cosmos-sdk/pull/9810) to be reviewed
+ and merged.
+
+{/* ## Further Discussions While an ADR is in the DRAFT or PROPOSED stage, this section should contain a summary of issues to be solved in future iterations (usually referencing comments from a pull-request discussion). Later, this section can optionally list ideas or improvements the author or reviewers found during the analysis of this ADR. */}
+
+## References
+
+* [Link](https://github.com/cosmos/cosmos-sdk/pull/9810)
+* [Link](https://github.com/cosmos/cosmos-sdk/issues/9438)
+* [Link](https://github.com/cosmos/cosmos-sdk/discussions/9913)
diff --git a/sdk/next/build/architecture/adr-047-extend-upgrade-plan.mdx b/sdk/next/build/architecture/adr-047-extend-upgrade-plan.mdx
new file mode 100644
index 000000000..dd4740e96
--- /dev/null
+++ b/sdk/next/build/architecture/adr-047-extend-upgrade-plan.mdx
@@ -0,0 +1,259 @@
+---
+title: 'ADR 047: Extend Upgrade Plan'
+description: >-
+ Nov, 23, 2021: Initial Draft May, 16, 2023: Proposal ABANDONED. prerun and
+ postrun are not necessary anymore and adding the artifacts brings minor
+ benefits.
+---
+
+## Changelog
+
+* Nov, 23, 2021: Initial Draft
+* May, 16, 2023: Proposal ABANDONED. `pre_run` and `post_run` are not necessary anymore and adding the `artifacts` brings minor benefits.
+
+## Status
+
+ABANDONED
+
+## Abstract
+
+This ADR expands the existing x/upgrade `Plan` proto message to include new fields for defining pre-run and post-run processes within upgrade tooling.
+It also defines a structure for providing downloadable artifacts involved in an upgrade.
+
+## Context
+
+The `upgrade` module in conjunction with Cosmovisor are designed to facilitate and automate a blockchain's transition from one version to another.
+
+Users submit a software upgrade governance proposal containing an upgrade `Plan`.
+The [Plan](https://github.com/cosmos/cosmos-sdk/blob/v0.44.5/proto/cosmos/upgrade/v1beta1/upgrade.proto#L12) currently contains the following fields:
+
+* `name`: A short string identifying the new version.
+* `height`: The chain height at which the upgrade is to be performed.
+* `info`: A string containing information about the upgrade.
+
+The `info` string can be anything.
+However, Cosmovisor will try to use the `info` field to automatically download a new version of the blockchain executable.
+For the auto-download to work, Cosmovisor expects it to be either a stringified JSON object (with a specific structure defined through documentation), or a URL that will return such JSON.
+The JSON object identifies URLs used to download the new blockchain executable for different platforms (OS and Architecture, e.g. "linux/amd64").
+Such a URL can either return the executable file directly or can return an archive containing the executable and possibly other assets.
+
+If the URL returns an archive, it is decompressed into `{DAEMON_HOME}/cosmovisor/{upgrade name}`.
+Then, if `{DAEMON_HOME}/cosmovisor/{upgrade name}/bin/{DAEMON_NAME}` does not exist, but `{DAEMON_HOME}/cosmovisor/{upgrade name}/{DAEMON_NAME}` does, the latter is copied to the former.
+If the URL returns something other than an archive, it is downloaded to `{DAEMON_HOME}/cosmovisor/{upgrade name}/bin/{DAEMON_NAME}`.
+
+If an upgrade height is reached and the new version of the executable version isn't available, Cosmovisor will stop running.
+
+Both `DAEMON_HOME` and `DAEMON_NAME` are [environment variables used to configure Cosmovisor](https://github.com/cosmos/cosmos-sdk/blob/cosmovisor/v1.0.0/cosmovisor/README.md#command-line-arguments-and-environment-variables).
+
+Currently, there is no mechanism that makes Cosmovisor run a command after the upgraded chain has been restarted.
+
+The current upgrade process has this timeline:
+
+1. An upgrade governance proposal is submitted and approved.
+2. The upgrade height is reached.
+3. The `x/upgrade` module writes the `upgrade_info.json` file.
+4. The chain halts.
+5. Cosmovisor backs up the data directory (if set up to do so).
+6. Cosmovisor downloads the new executable (if not already in place).
+7. Cosmovisor executes the `${DAEMON_NAME} pre-upgrade`.
+8. Cosmovisor restarts the app using the new version and same args originally provided.
+
+## Decision
+
+### Protobuf Updates
+
+We will update the `x/upgrade.Plan` message for providing upgrade instructions.
+The upgrade instructions will contain a list of artifacts available for each platform.
+It allows for the definition of a pre-run and post-run commands.
+These commands are not consensus guaranteed; they will be executed by Cosmosvisor (or other) during its upgrade handling.
+
+```protobuf
+message Plan {
+ // ... (existing fields)
+
+ UpgradeInstructions instructions = 6;
+}
+```
+
+The new `UpgradeInstructions instructions` field MUST be optional.
+
+```protobuf
+message UpgradeInstructions {
+ string pre_run = 1;
+ string post_run = 2;
+ repeated Artifact artifacts = 3;
+ string description = 4;
+}
+```
+
+All fields in the `UpgradeInstructions` are optional.
+
+* `pre_run` is a command to run prior to the upgraded chain restarting.
+ If defined, it will be executed after halting and downloading the new artifact but before restarting the upgraded chain.
+ The working directory this command runs from MUST be `{DAEMON_HOME}/cosmovisor/{upgrade name}`.
+ This command MUST behave the same as the current [pre-upgrade](https://github.com/cosmos/cosmos-sdk/blob/v0.44.5/docs/migrations/pre-upgrade.md) command.
+ It does not take in any command-line arguments and is expected to terminate with the following exit codes:
+
+ | Exit status code | How it is handled in Cosmosvisor |
+ | ---------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------- |
+ | `0` | Assumes `pre-upgrade` command executed successfully and continues the upgrade. |
+ | `1` | Default exit code when `pre-upgrade` command has not been implemented. |
+ | `30` | `pre-upgrade` command was executed but failed. This fails the entire upgrade. |
+ | `31` | `pre-upgrade` command was executed but failed. But the command is retried until exit code `1` or `30` are returned. |
+ | If defined, then the app supervisors (e.g. Cosmovisor) MUST NOT run `app pre-run`. | |
+* `post_run` is a command to run after the upgraded chain has been started. If defined, this command MUST be only executed at most once by an upgrading node.
+ The output and exit code SHOULD be logged but SHOULD NOT affect the running of the upgraded chain.
+ The working directory this command runs from MUST be `{DAEMON_HOME}/cosmovisor/{upgrade name}`.
+* `artifacts` define items to be downloaded.
+ It SHOULD have only one entry per platform.
+* `description` contains human-readable information about the upgrade and might contain references to external resources.
+ It SHOULD NOT be used for structured processing information.
+
+```protobuf
+message Artifact {
+ string platform = 1;
+ string url = 2;
+ string checksum = 3;
+ string checksum_algo = 4;
+}
+```
+
+* `platform` is a required string that SHOULD be in the format `{OS}/{CPU}`, e.g. `"linux/amd64"`.
+ The string `"any"` SHOULD also be allowed.
+ An `Artifact` with a `platform` of `"any"` SHOULD be used as a fallback when a specific `{OS}/{CPU}` entry is not found.
+ That is, if an `Artifact` exists with a `platform` that matches the system's OS and CPU, that should be used;
+ otherwise, if an `Artifact` exists with a `platform` of `any`, that should be used;
+ otherwise no artifact should be downloaded.
+* `url` is a required URL string that MUST conform to [RFC 1738: Uniform Resource Locators](https://www.ietf.org/rfc/rfc1738.txt).
+ A request to this `url` MUST return either an executable file or an archive containing either `bin/{DAEMON_NAME}` or `{DAEMON_NAME}`.
+ The URL should not contain checksum - it should be specified by the `checksum` attribute.
+* `checksum` is a checksum of the expected result of a request to the `url`.
+ It is not required, but is recommended.
+ If provided, it MUST be a hex encoded checksum string.
+ Tools utilizing these `UpgradeInstructions` MUST fail if a `checksum` is provided but is different from the checksum of the result returned by the `url`.
+* `checksum_algo` is a string identify the algorithm used to generate the `checksum`.
+ Recommended algorithms: `sha256`, `sha512`.
+ Algorithms also supported (but not recommended): `sha1`, `md5`.
+ If a `checksum` is provided, a `checksum_algo` MUST also be provided.
+
+A `url` is not required to contain a `checksum` query parameter.
+If the `url` does contain a `checksum` query parameter, the `checksum` and `checksum_algo` fields MUST also be populated, and their values MUST match the value of the query parameter.
+For example, if the `url` is `"https://example.com?checksum=md5:d41d8cd98f00b204e9800998ecf8427e"`, then the `checksum` field must be `"d41d8cd98f00b204e9800998ecf8427e"` and the `checksum_algo` field must be `"md5"`.
+
+### Upgrade Module Updates
+
+If an upgrade `Plan` does not use the new `UpgradeInstructions` field, existing functionality will be maintained.
+The parsing of the `info` field as either a URL or `binaries` JSON will be deprecated.
+During validation, if the `info` field is used as such, a warning will be issued, but not an error.
+
+We will update the creation of the `upgrade-info.json` file to include the `UpgradeInstructions`.
+
+We will update the optional validation available via CLI to account for the new `Plan` structure.
+We will add the following validation:
+
+1. If `UpgradeInstructions` are provided:
+ 1. There MUST be at least one entry in `artifacts`.
+ 2. All of the `artifacts` MUST have a unique `platform`.
+ 3. For each `Artifact`, if the `url` contains a `checksum` query parameter:
+ 1. The `checksum` query parameter value MUST be in the format of `{checksum_algo}:{checksum}`.
+ 2. The `{checksum}` from the query parameter MUST equal the `checksum` provided in the `Artifact`.
+ 3. The `{checksum_algo}` from the query parameter MUST equal the `checksum_algo` provided in the `Artifact`.
+2. The following validation is currently done using the `info` field. We will apply similar validation to the `UpgradeInstructions`.
+ For each `Artifact`:
+ 1. The `platform` MUST have the format `{OS}/{CPU}` or be `"any"`.
+ 2. The `url` field MUST NOT be empty.
+ 3. The `url` field MUST be a proper URL.
+ 4. A `checksum` MUST be provided either in the `checksum` field or as a query parameter in the `url`.
+ 5. If the `checksum` field has a value and the `url` also has a `checksum` query parameter, the two values MUST be equal.
+ 6. The `url` MUST return either a file or an archive containing either `bin/{DAEMON_NAME}` or `{DAEMON_NAME}`.
+ 7. If a `checksum` is provided (in the field or as a query param), the checksum of the result of the `url` MUST equal the provided checksum.
+
+Downloading of an `Artifact` will happen the same way that URLs from `info` are currently downloaded.
+
+### Cosmovisor Updates
+
+If the `upgrade-info.json` file does not contain any `UpgradeInstructions`, existing functionality will be maintained.
+
+We will update Cosmovisor to look for and handle the new `UpgradeInstructions` in `upgrade-info.json`.
+If the `UpgradeInstructions` are provided, we will do the following:
+
+1. The `info` field will be ignored.
+2. The `artifacts` field will be used to identify the artifact to download based on the `platform` that Cosmovisor is running in.
+3. If a `checksum` is provided (either in the field or as a query param in the `url`), and the downloaded artifact has a different checksum, the upgrade process will be interrupted and Cosmovisor will exit with an error.
+4. If a `pre_run` command is defined, it will be executed at the same point in the process where the `app pre-upgrade` command would have been executed.
+ It will be executed using the same environment as other commands run by Cosmovisor.
+5. If a `post_run` command is defined, it will be executed after executing the command that restarts the chain.
+ It will be executed in a background process using the same environment as the other commands.
+ Any output generated by the command will be logged.
+ Once complete, the exit code will be logged.
+
+We will deprecate the use of the `info` field for anything other than human readable information.
+A warning will be logged if the `info` field is used to define the assets (either by URL or JSON).
+
+The new upgrade timeline is very similar to the current one. Changes are in bold:
+
+1. An upgrade governance proposal is submitted and approved.
+2. The upgrade height is reached.
+3. The `x/upgrade` module writes the `upgrade_info.json` file **(now possibly with `UpgradeInstructions`)**.
+4. The chain halts.
+5. Cosmovisor backs up the data directory (if set up to do so).
+6. Cosmovisor downloads the new executable (if not already in place).
+7. Cosmovisor executes **the `pre_run` command if provided**, or else the `${DAEMON_NAME} pre-upgrade` command.
+8. Cosmovisor restarts the app using the new version and same args originally provided.
+9. **Cosmovisor immediately runs the `post_run` command in a detached process.**
+
+## Consequences
+
+### Backwards Compatibility
+
+Since the only change to existing definitions is the addition of the `instructions` field to the `Plan` message, and that field is optional, there are no backwards incompatibilities with respects to the proto messages.
+Additionally, current behavior will be maintained when no `UpgradeInstructions` are provided, so there are no backwards incompatibilities with respects to either the upgrade module or Cosmovisor.
+
+### Forwards Compatibility
+
+In order to utilize the `UpgradeInstructions` as part of a software upgrade, both of the following must be true:
+
+1. The chain must already be using a sufficiently advanced version of the Cosmos SDK.
+2. The chain's nodes must be using a sufficiently advanced version of Cosmovisor.
+
+### Positive
+
+1. The structure for defining artifacts is clearer since it is now defined in the proto instead of in documentation.
+2. Availability of a pre-run command becomes more obvious.
+3. A post-run command becomes possible.
+
+### Negative
+
+1. The `Plan` message becomes larger. This is negligible because A) the `x/upgrades` module only stores at most one upgrade plan, and B) upgrades are rare enough that the increased gas cost isn't a concern.
+2. There is no option for providing a URL that will return the `UpgradeInstructions`.
+3. The only way to provide multiple assets (executables and other files) for a platform is to use an archive as the platform's artifact.
+
+### Neutral
+
+1. Existing functionality of the `info` field is maintained when the `UpgradeInstructions` aren't provided.
+
+## Further Discussions
+
+1. [Draft PR #10032 Comment](https://github.com/cosmos/cosmos-sdk/pull/10032/files?authenticity_token=pLtzpnXJJB%2Fif2UWiTp9Td3MvRrBF04DvjSuEjf1azoWdLF%2BSNymVYw9Ic7VkqHgNLhNj6iq9bHQYnVLzMXd4g%3D%3D\&file-filters%5B%5D=.go\&file-filters%5B%5D=.proto#r698708349):
+ Consider different names for `UpgradeInstructions instructions` (either the message type or field name).
+2. [Draft PR #10032 Comment](https://github.com/cosmos/cosmos-sdk/pull/10032/files?authenticity_token=pLtzpnXJJB%2Fif2UWiTp9Td3MvRrBF04DvjSuEjf1azoWdLF%2BSNymVYw9Ic7VkqHgNLhNj6iq9bHQYnVLzMXd4g%3D%3D\&file-filters%5B%5D=.go\&file-filters%5B%5D=.proto#r754655072):
+ 1. Consider putting the `string platform` field inside `UpgradeInstructions` and make `UpgradeInstructions` a repeated field in `Plan`.
+ 2. Consider using a `oneof` field in the `Plan` which could either be `UpgradeInstructions` or else a URL that should return the `UpgradeInstructions`.
+ 3. Consider allowing `info` to either be a JSON serialized version of `UpgradeInstructions` or else a URL that returns that.
+3. [Draft PR #10032 Comment](https://github.com/cosmos/cosmos-sdk/pull/10032/files?authenticity_token=pLtzpnXJJB%2Fif2UWiTp9Td3MvRrBF04DvjSuEjf1azoWdLF%2BSNymVYw9Ic7VkqHgNLhNj6iq9bHQYnVLzMXd4g%3D%3D\&file-filters%5B%5D=.go\&file-filters%5B%5D=.proto#r755462876):
+ Consider not including the `UpgradeInstructions.description` field, using the `info` field for that purpose instead.
+4. [Draft PR #10032 Comment](https://github.com/cosmos/cosmos-sdk/pull/10032/files?authenticity_token=pLtzpnXJJB%2Fif2UWiTp9Td3MvRrBF04DvjSuEjf1azoWdLF%2BSNymVYw9Ic7VkqHgNLhNj6iq9bHQYnVLzMXd4g%3D%3D\&file-filters%5B%5D=.go\&file-filters%5B%5D=.proto#r754643691):
+ Consider allowing multiple artifacts to be downloaded for any given `platform` by adding a `name` field to the `Artifact` message.
+5. [PR #10502 Comment](https://github.com/cosmos/cosmos-sdk/pull/10602#discussion_r781438288)
+ Allow the new `UpgradeInstructions` to be provided via URL.
+6. [PR #10502 Comment](https://github.com/cosmos/cosmos-sdk/pull/10602#discussion_r781438288)
+ Allow definition of a `signer` for assets (as an alternative to using a `checksum`).
+
+## References
+
+* [Current upgrade.proto](https://github.com/cosmos/cosmos-sdk/blob/v0.44.5/proto/cosmos/upgrade/v1beta1/upgrade.proto)
+* [Upgrade Module README](https://github.com/cosmos/cosmos-sdk/blob/v0.44.5/x/upgrade/spec/README.md)
+* [Cosmovisor README](https://github.com/cosmos/cosmos-sdk/blob/cosmovisor/v1.0.0/cosmovisor/README.md)
+* [Pre-upgrade README](https://github.com/cosmos/cosmos-sdk/blob/v0.44.5/docs/migrations/pre-upgrade.md)
+* [Draft/POC PR #10032](https://github.com/cosmos/cosmos-sdk/pull/10032)
+* [RFC 1738: Uniform Resource Locators](https://www.ietf.org/rfc/rfc1738.txt)
diff --git a/sdk/next/build/architecture/adr-048-consensus-fees.mdx b/sdk/next/build/architecture/adr-048-consensus-fees.mdx
new file mode 100644
index 000000000..b4b738c91
--- /dev/null
+++ b/sdk/next/build/architecture/adr-048-consensus-fees.mdx
@@ -0,0 +1,207 @@
+---
+title: 'ADR 048: Multi Tire Gas Price System'
+description: 'Dec 1, 2021: Initial Draft'
+---
+
+## Changelog
+
+* Dec 1, 2021: Initial Draft
+
+## Status
+
+Rejected
+
+## Abstract
+
+This ADR describes a flexible mechanism to maintain a consensus level gas prices, in which one can choose a multi-tier gas price system or EIP-1559 like one through configuration.
+
+## Context
+
+Currently, each validator configures its own `minimal-gas-prices` in `app.yaml`. But setting a proper minimal gas price is critical to protect network from DDoS attack, and it's hard for all the validators to pick a sensible value, so we propose to maintain a gas price in consensus level.
+
+Since tendermint 0.34.20 has supported mempool prioritization, we can take advantage of that to implement more sophisticated gas fee system.
+
+## Multi-Tier Price System
+
+We propose a multi-tier price system on consensus to provide maximum flexibility:
+
+* Tier 1: a constant gas price, which could only be modified occasionally through governance proposal.
+* Tier 2: a dynamic gas price which is adjusted according to previous block load.
+* Tier 3: a dynamic gas price which is adjusted according to previous block load at a higher speed.
+
+The gas price of higher tier should bigger than the lower tier.
+
+The transaction fees are charged with the exact gas price calculated on consensus.
+
+The parameter schema is like this:
+
+```protobuf expandable
+message TierParams {
+ uint32 priority = 1 // priority in tendermint mempool
+ Coin initial_gas_price = 2 //
+ uint32 parent_gas_target = 3 // the target saturation of block
+ uint32 change_denominator = 4 // decides the change speed
+ Coin min_gas_price = 5 // optional lower bound of the price adjustment
+ Coin max_gas_price = 6 // optional upper bound of the price adjustment
+}
+
+message Params {
+ repeated TierParams tiers = 1;
+}
+```
+
+### Extension Options
+
+We need to allow user to specify the tier of service for the transaction, to support it in an extensible way, we add an extension option in `AuthInfo`:
+
+```protobuf
+message ExtensionOptionsTieredTx {
+ uint32 fee_tier = 1
+}
+```
+
+The value of `fee_tier` is just the index to the `tiers` parameter list.
+
+We also change the semantic of existing `fee` field of `Tx`, instead of charging user the exact `fee` amount, we treat it as a fee cap, while the actual amount of fee charged is decided dynamically. If the `fee` is smaller than dynamic one, the transaction won't be included in current block and ideally should stay in the mempool until the consensus gas price drop. The mempool can eventually prune old transactions.
+
+### Tx Prioritization
+
+Transactions are prioritized based on the tier, the higher the tier, the higher the priority.
+
+Within the same tier, follow the default Tendermint order (currently FIFO). Be aware of that the mempool tx ordering logic is not part of consensus and can be modified by malicious validator.
+
+This mechanism can be easily composed with prioritization mechanisms:
+
+* we can add extra tiers out of a user control:
+ * Example 1: user can set tier 0, 10 or 20, but the protocol will create tiers 0, 1, 2 ... 29. For example IBC transactions will go to tier `user_tier + 5`: if user selected tier 1, then the transaction will go to tier 15.
+ * Example 2: we can reserve tier 4, 5, ... only for special transaction types. For example, tier 5 is reserved for evidence tx. So if submits a bank.Send transaction and set tier 5, it will be delegated to tier 3 (the max tier level available for any transaction).
+ * Example 3: we can enforce that all transactions of a sepecific type will go to specific tier. For example, tier 100 will be reserved for evidence transactions and all evidence transactions will always go to that tier.
+
+### `min-gas-prices`
+
+Deprecate the current per-validator `min-gas-prices` configuration, since it would confusing for it to work together with the consensus gas price.
+
+### Adjust For Block Load
+
+For tier 2 and tier 3 transactions, the gas price is adjusted according to previous block load, the logic could be similar to EIP-1559:
+
+```python expandable
+def adjust_gas_price(gas_price, parent_gas_used, tier):
+ if parent_gas_used == tier.parent_gas_target:
+ return gas_price
+ elif parent_gas_used > tier.parent_gas_target:
+ gas_used_delta = parent_gas_used - tier.parent_gas_target
+ gas_price_delta = max(gas_price * gas_used_delta // tier.parent_gas_target // tier.change_speed, 1)
+ return gas_price + gas_price_delta
+ else:
+ gas_used_delta = parent_gas_target - parent_gas_used
+ gas_price_delta = gas_price * gas_used_delta // parent_gas_target // tier.change_speed
+ return gas_price - gas_price_delta
+```
+
+### Block Segment Reservation
+
+Ideally we should reserve block segments for each tier, so the lower tiered transactions won't be completely squeezed out by higher tier transactions, which will force user to use higher tier, and the system degraded to a single tier.
+
+We need help from tendermint to implement this.
+
+## Implementation
+
+We can make each tier's gas price strategy fully configurable in protocol parameters, while providing a sensible default one.
+
+Pseudocode in python-like syntax:
+
+```python expandable
+interface TieredTx:
+ def tier(self) -> int:
+ pass
+
+def tx_tier(tx):
+ if isinstance(tx, TieredTx):
+ return tx.tier()
+ else:
+ # default tier for custom transactions
+ return 0
+ # NOTE: we can add more rules here per "Tx Prioritization" section
+
+class TierParams:
+ 'gas price strategy parameters of one tier'
+ priority: int # priority in tendermint mempool
+ initial_gas_price: Coin
+ parent_gas_target: int
+ change_speed: Decimal # 0 means don't adjust for block load.
+
+class Params:
+ 'protocol parameters'
+ tiers: List[TierParams]
+
+class State:
+ 'consensus state'
+ # total gas used in last block, None when it's the first block
+ parent_gas_used: Optional[int]
+ # gas prices of last block for all tiers
+ gas_prices: List[Coin]
+
+def begin_block():
+ 'Adjust gas prices'
+ for i, tier in enumerate(Params.tiers):
+ if State.parent_gas_used is None:
+ # initialized gas price for the first block
+ State.gas_prices[i] = tier.initial_gas_price
+ else:
+ # adjust gas price according to gas used in previous block
+ State.gas_prices[i] = adjust_gas_price(State.gas_prices[i], State.parent_gas_used, tier)
+
+def mempoolFeeTxHandler_checkTx(ctx, tx):
+ # the minimal-gas-price configured by validator, zero in deliver_tx context
+ validator_price = ctx.MinGasPrice()
+ consensus_price = State.gas_prices[tx_tier(tx)]
+ min_price = max(validator_price, consensus_price)
+
+ # zero means infinity for gas price cap
+ if tx.gas_price() > 0 and tx.gas_price() < min_price:
+ return 'insufficient fees'
+ return next_CheckTx(ctx, tx)
+
+def txPriorityHandler_checkTx(ctx, tx):
+ res, err := next_CheckTx(ctx, tx)
+ # pass priority to tendermint
+ res.Priority = Params.tiers[tx_tier(tx)].priority
+ return res, err
+
+def end_block():
+ 'Update block gas used'
+ State.parent_gas_used = block_gas_meter.consumed()
+```
+
+### DDoS attack protection
+
+To fully saturate the blocks and prevent other transactions from executing, attacker need to use transactions of highest tier, the cost would be significantly higher than the default tier.
+
+If attacker spam with lower tier transactions, user can mitigate by sending higher tier transactions.
+
+## Consequences
+
+### Backwards Compatibility
+
+* New protocol parameters.
+* New consensus states.
+* New/changed fields in transaction body.
+
+### Positive
+
+* The default tier keeps the same predictable gas price experience for client.
+* The higher tier's gas price can adapt to block load.
+* No priority conflict with custom priority based on transaction types, since this proposal only occupy three priority levels.
+* Possibility to compose different priority rules with tiers
+
+### Negative
+
+* Wallets & tools need to update to support the new `tier` parameter, and semantic of `fee` field is changed.
+
+### Neutral
+
+## References
+
+* [Link](https://eips.ethereum.org/EIPS/eip-1559)
+* [Link](https://iohk.io/en/blog/posts/2021/11/26/network-traffic-and-tiered-pricing/)
diff --git a/sdk/next/build/architecture/adr-049-state-sync-hooks.mdx b/sdk/next/build/architecture/adr-049-state-sync-hooks.mdx
new file mode 100644
index 000000000..6b7fd5a90
--- /dev/null
+++ b/sdk/next/build/architecture/adr-049-state-sync-hooks.mdx
@@ -0,0 +1,197 @@
+---
+title: 'ADR 049: State Sync Hooks'
+description: >-
+ Jan 19, 2022: Initial Draft Apr 29, 2022: Safer extension snapshotter
+ interface
+---
+
+## Changelog
+
+* Jan 19, 2022: Initial Draft
+* Apr 29, 2022: Safer extension snapshotter interface
+
+## Status
+
+Implemented
+
+## Abstract
+
+This ADR outlines a hooks-based mechanism for application modules to provide additional state (outside of the IAVL tree) to be used
+during state sync.
+
+## Context
+
+New clients use state-sync to download snapshots of module state from peers. Currently, the snapshot consists of a
+stream of `SnapshotStoreItem` and `SnapshotIAVLItem`, which means that application modules that define their state outside of the IAVL
+tree cannot include their state as part of the state-sync process.
+
+Note, Even though the module state data is outside of the tree, for determinism we require that the hash of the external data should
+be posted in the IAVL tree.
+
+## Decision
+
+A simple proposal based on our existing implementation is that, we can add two new message types: `SnapshotExtensionMeta`
+and `SnapshotExtensionPayload`, and they are appended to the existing multi-store stream with `SnapshotExtensionMeta`
+acting as a delimiter between extensions. As the chunk hashes should be able to ensure data integrity, we don't need
+a delimiter to mark the end of the snapshot stream.
+
+Besides, we provide `Snapshotter` and `ExtensionSnapshotter` interface for modules to implement snapshotters, which will handle both taking
+snapshot and the restoration. Each module could have mutiple snapshotters, and for modules with additional state, they should
+implement `ExtensionSnapshotter` as extension snapshotters. When setting up the application, the snapshot `Manager` should call
+`RegisterExtensions([]ExtensionSnapshotter…)` to register all the extension snapshotters.
+
+```protobuf expandable
+// SnapshotItem is an item contained in a rootmulti.Store snapshot.
+// On top of the exsiting SnapshotStoreItem and SnapshotIAVLItem, we add two new options for the item.
+message SnapshotItem {
+ // item is the specific type of snapshot item.
+ oneof item {
+ SnapshotStoreItem store = 1;
+ SnapshotIAVLItem iavl = 2 [(gogoproto.customname) = "IAVL"];
+ SnapshotExtensionMeta extension = 3;
+ SnapshotExtensionPayload extension_payload = 4;
+ }
+}
+
+// SnapshotExtensionMeta contains metadata about an external snapshotter.
+// One module may need multiple snapshotters, so each module may have multiple SnapshotExtensionMeta.
+message SnapshotExtensionMeta {
+ // the name of the ExtensionSnapshotter, and it is registered to snapshotter manager when setting up the application
+ // name should be unique for each ExtensionSnapshotter as we need to alphabetically order their snapshots to get
+ // deterministic snapshot stream.
+ string name = 1;
+ // this is used by each ExtensionSnapshotter to decide the format of payloads included in SnapshotExtensionPayload message
+ // it is used within the snapshotter/namespace, not global one for all modules
+ uint32 format = 2;
+}
+
+// SnapshotExtensionPayload contains payloads of an external snapshotter.
+message SnapshotExtensionPayload {
+ bytes payload = 1;
+}
+```
+
+When we create a snapshot stream, the `multistore` snapshot is always placed at the beginning of the binary stream, and other extension snapshots are alphabetically ordered by the name of the corresponding `ExtensionSnapshotter`.
+
+The snapshot stream would look like as follows:
+
+```go
+// multi-store snapshot
+{
+ SnapshotStoreItem | SnapshotIAVLItem, ...
+}
+// extension1 snapshot
+SnapshotExtensionMeta
+{
+ SnapshotExtensionPayload, ...
+}
+// extension2 snapshot
+SnapshotExtensionMeta
+{
+ SnapshotExtensionPayload, ...
+}
+```
+
+We add an `extensions` field to snapshot `Manager` for extension snapshotters. The `multistore` snapshotter is a special one and it doesn't need a name because it is always placed at the beginning of the binary stream.
+
+```go expandable
+type Manager struct {
+ store *Store
+ multistore types.Snapshotter
+ extensions map[string]types.ExtensionSnapshotter
+ mtx sync.Mutex
+ operation operation
+ chRestore chan<- io.ReadCloser
+ chRestoreDone <-chan restoreDone
+ restoreChunkHashes [][]byte
+ restoreChunkIndex uint32
+}
+```
+
+For extension snapshotters that implement the `ExtensionSnapshotter` interface, their names should be registered to the snapshot `Manager` by
+calling `RegisterExtensions` when setting up the application. The snapshotters will handle both taking snapshot and restoration.
+
+```go
+// RegisterExtensions register extension snapshotters to manager
+func (m *Manager)
+
+RegisterExtensions(extensions ...types.ExtensionSnapshotter)
+
+error
+```
+
+On top of the existing `Snapshotter` interface for the `multistore`, we add `ExtensionSnapshotter` interface for the extension snapshotters. Three more function signatures: `SnapshotFormat()`, `SupportedFormats()` and `SnapshotName()` are added to `ExtensionSnapshotter`.
+
+```go expandable
+// ExtensionPayloadReader read extension payloads,
+// it returns io.EOF when reached either end of stream or the extension boundaries.
+type ExtensionPayloadReader = func() ([]byte, error)
+
+// ExtensionPayloadWriter is a helper to write extension payloads to underlying stream.
+type ExtensionPayloadWriter = func([]byte)
+
+error
+
+// ExtensionSnapshotter is an extension Snapshotter that is appended to the snapshot stream.
+// ExtensionSnapshotter has a unique name and manages its own internal formats.
+type ExtensionSnapshotter interface {
+ // SnapshotName returns the name of snapshotter, it should be unique in the manager.
+ SnapshotName()
+
+string
+
+ // SnapshotFormat returns the default format used to take a snapshot.
+ SnapshotFormat()
+
+uint32
+
+ // SupportedFormats returns a list of formats it can restore from.
+ SupportedFormats() []uint32
+
+ // SnapshotExtension writes extension payloads into the underlying protobuf stream.
+ SnapshotExtension(height uint64, payloadWriter ExtensionPayloadWriter)
+
+error
+
+ // RestoreExtension restores an extension state snapshot,
+ // the payload reader returns `io.EOF` when reached the extension boundaries.
+ RestoreExtension(height uint64, format uint32, payloadReader ExtensionPayloadReader)
+
+error
+}
+```
+
+## Consequences
+
+As a result of this implementation, we are able to create snapshots of binary chunk stream for the state that we maintain outside of the IAVL Tree, CosmWasm blobs for example. And new clients are able to fetch sanpshots of state for all modules that have implemented the corresponding interface from peer nodes.
+
+### Backwards Compatibility
+
+This ADR introduces new proto message types, add an `extensions` field in snapshot `Manager`, and add new `ExtensionSnapshotter` interface, so this is not backwards compatible if we have extensions.
+
+But for applications that does not have the state data outside of the IAVL tree for any module, the snapshot stream is backwards-compatible.
+
+### Positive
+
+* State maintained outside of IAVL tree like CosmWasm blobs can create snapshots by implementing extension snapshotters, and being fetched by new clients via state-sync.
+
+### Negative
+
+### Neutral
+
+* All modules that maintain state outside of IAVL tree need to implement `ExtensionSnapshotter` and the snapshot `Manager` need to call `RegisterExtensions` when setting up the application.
+
+## Further Discussions
+
+While an ADR is in the DRAFT or PROPOSED stage, this section should contain a summary of issues to be solved in future iterations (usually referencing comments from a pull-request discussion).
+Later, this section can optionally list ideas or improvements the author or reviewers found during the analysis of this ADR.
+
+## Test Cases \[optional]
+
+Test cases for an implementation are mandatory for ADRs that are affecting consensus changes. Other ADRs can choose to include links to test cases if applicable.
+
+## References
+
+* [Link](https://github.com/cosmos/cosmos-sdk/pull/10961)
+* [Link](https://github.com/cosmos/cosmos-sdk/issues/7340)
+* [Link](https://hackmd.io/gJoyev6DSmqqkO667WQlGw)
diff --git a/sdk/next/build/architecture/adr-050-sign-mode-textual-annex1.mdx b/sdk/next/build/architecture/adr-050-sign-mode-textual-annex1.mdx
new file mode 100644
index 000000000..b253f7d73
--- /dev/null
+++ b/sdk/next/build/architecture/adr-050-sign-mode-textual-annex1.mdx
@@ -0,0 +1,364 @@
+---
+title: 'ADR 050: SIGN_MODE_TEXTUAL: Annex 1 Value Renderers'
+---
+
+## Changelog
+
+* Dec 06, 2021: Initial Draft
+* Feb 07, 2022: Draft read and concept-ACKed by the Ledger team.
+* Dec 01, 2022: Remove `Object: ` prefix on Any header screen.
+* Dec 13, 2022: Sign over bytes hash when bytes length > 32.
+* Mar 27, 2023: Update `Any` value renderer to omit message header screen.
+
+## Status
+
+Accepted. Implementation started. Small value renderers details still need to be polished.
+
+## Abstract
+
+This Annex describes value renderers, which are used for displaying Protobuf values in a human-friendly way using a string array.
+
+## Value Renderers
+
+Value Renderers describe how values of different Protobuf types should be encoded as a string array. Value renderers can be formalized as a set of bijective functions `func renderT(value T) []string`, where `T` is one of the below Protobuf types for which this spec is defined.
+
+### Protobuf `number`
+
+* Applies to:
+ * protobuf numeric integer types (`int{32,64}`, `uint{32,64}`, `sint{32,64}`, `fixed{32,64}`, `sfixed{32,64}`)
+ * strings whose `customtype` is `github.com/cosmos/cosmos-sdk/types.Int` or `github.com/cosmos/cosmos-sdk/types.Dec`
+ * bytes whose `customtype` is `github.com/cosmos/cosmos-sdk/types.Int` or `github.com/cosmos/cosmos-sdk/types.Dec`
+* Trailing decimal zeroes are always removed
+* Formatting with `'`s for every three integral digits.
+* Usage of `.` to denote the decimal delimiter.
+
+#### Examples
+
+* `1000` (uint64) -> `1'000`
+* `"1000000.00"` (string representing a Dec) -> `1'000'000`
+* `"1000000.10"` (string representing a Dec) -> `1'000'000.1`
+
+### `coin`
+
+* Applies to `cosmos.base.v1beta1.Coin`.
+* Denoms are converted to `display` denoms using `Metadata` (if available). **This requires a state query**. The definition of `Metadata` can be found in the [bank protobuf definition](https://buf.build/cosmos/cosmos-sdk/docs/main:cosmos.bank.v1beta1#cosmos.bank.v1beta1.Metadata). If the `display` field is empty or nil, then we do not perform any denom conversion.
+* Amounts are converted to `display` denom amounts and rendered as `number`s above
+ * We do not change the capitalization of the denom. In practice, `display` denoms are stored in lowercase in state (e.g. `10 atom`), however they are often showed in UPPERCASE in everyday life (e.g. `10 ATOM`). Value renderers keep the case used in state, but we may recommend chains changing the denom metadata to be uppercase for better user display.
+* One space between the denom and amount (e.g. `10 atom`).
+* In the future, IBC denoms could maybe be converted to DID/IIDs, if we can find a robust way for doing this (ex. `cosmos:cosmos:hub:bank:denom:atom`)
+
+#### Examples
+
+* `1000000000uatom` -> `["1'000 atom"]`, because atom is the metadata's display denom.
+
+### `coins`
+
+* an array of `coin` is display as the concatenation of each `coin` encoded as the specification above, the joined together with the delimiter `", "` (a comma and a space, no quotes around).
+* the list of coins is ordered by unicode code point of the display denom: `A-Z` < `a-z`. For example, the string `aAbBcC` would be sorted `ABCabc`.
+ * if the coins list had 0 items in it then it'll be rendered as `zero`
+
+### Example
+
+* `["3cosm", "2000000uatom"]` -> `2 atom, 3 COSM` (assuming the display denoms are `atom` and `COSM`)
+* `["10atom", "20Acoin"]` -> `20 Acoin, 10 atom` (assuming the display denoms are `atom` and `Acoin`)
+* `[]` -> `zero`
+
+### `repeated`
+
+* Applies to all `repeated` fields, except `cosmos.tx.v1beta1.TxBody#Messages`, which has a particular encoding (see [ADR-050](/sdk/v0.50/build/architecture/adr-050-sign-mode-textual)).
+* A repeated type has the following template:
+
+```
+:
+ (/):
+
+ (/):
+
+End of .
+```
+
+where:
+
+* `field_name` is the Protobuf field name of the repeated field
+* `field_kind`:
+ * if the type of the repeated field is a message, `field_kind` is the message name
+ * if the type of the repeated field is an enum, `field_kind` is the enum name
+ * in any other case, `field_kind` is the protobuf primitive type (e.g. "string" or "bytes")
+* `int` is the length of the array
+* `index` is one based index of the repeated field
+
+#### Examples
+
+Given the proto definition:
+
+```protobuf
+message AllowedMsgAllowance {
+ repeated string allowed_messages = 1;
+}
+```
+
+and initializing with:
+
+```go
+x := []AllowedMsgAllowance{"cosmos.bank.v1beta1.MsgSend", "cosmos.gov.v1.MsgVote"
+}
+```
+
+we have the following value-rendered encoding:
+
+```
+Allowed messages: 2 strings
+Allowed messages (1/2): cosmos.bank.v1beta1.MsgSend
+Allowed messages (2/2): cosmos.gov.v1.MsgVote
+End of Allowed messages
+```
+
+### `message`
+
+* Applies to all Protobuf messages that do not have a custom encoding.
+* Field names follow [sentence case](https://en.wiktionary.org/wiki/sentence_case)
+ * replace each `_` with a space
+ * capitalize first letter of the sentence
+* Field names are ordered by their Protobuf field number
+* Screen title is the field name, and screen content is the value.
+* Nesting:
+
+ * if a field contains a nested message, we value-render the underlying message using the template:
+
+ ```
+ : <1st line of value-rendered message>
+ > // Notice the `>` prefix.
+ ```
+
+ * `>` character is used to denote nesting. For each additional level of nesting, add `>`.
+
+#### Examples
+
+Given the following Protobuf messages:
+
+```protobuf expandable
+enum VoteOption {
+ VOTE_OPTION_UNSPECIFIED = 0;
+ VOTE_OPTION_YES = 1;
+ VOTE_OPTION_ABSTAIN = 2;
+ VOTE_OPTION_NO = 3;
+ VOTE_OPTION_NO_WITH_VETO = 4;
+}
+
+message WeightedVoteOption {
+ VoteOption option = 1;
+ string weight = 2 [(cosmos_proto.scalar) = "cosmos.Dec"];
+}
+
+message Vote {
+ uint64 proposal_id = 1;
+ string voter = 2 [(cosmos_proto.scalar) = "cosmos.AddressString"];
+ reserved 3;
+ repeated WeightedVoteOption options = 4;
+}
+```
+
+we get the following encoding for the `Vote` message:
+
+```
+Vote object
+> Proposal id: 4
+> Voter: cosmos1abc...def
+> Options: 2 WeightedVoteOptions
+> Options (1/2): WeightedVoteOption object
+>> Option: VOTE_OPTION_YES
+>> Weight: 0.7
+> Options (2/2): WeightedVoteOption object
+>> Option: VOTE_OPTION_NO
+>> Weight: 0.3
+> End of Options
+```
+
+### Enums
+
+* Show the enum variant name as string.
+
+#### Examples
+
+See example above with `message Vote{}`.
+
+### `google.protobuf.Any`
+
+* Applies to `google.protobuf.Any`
+* Rendered as:
+
+```
+
+>
+```
+
+There is however one exception: when the underlying message is a Protobuf message that does not have a custom encoding, then the message header screen is omitted, and one level of indentation is removed.
+
+Messages that have a custom encoding, including `google.protobuf.Timestamp`, `google.protobuf.Duration`, `google.protobuf.Any`, `cosmos.base.v1beta1.Coin`, and messages that have an app-defined custom encoding, will preserve their header and indentation level.
+
+#### Examples
+
+Message header screen is stripped, one-level of indentation removed:
+
+```
+/cosmos.gov.v1.Vote
+> Proposal id: 4
+> Vote: cosmos1abc...def
+> Options: 2 WeightedVoteOptions
+> Options (1/2): WeightedVoteOption object
+>> Option: Yes
+>> Weight: 0.7
+> Options (2/2): WeightedVoteOption object
+>> Option: No
+>> Weight: 0.3
+> End of Options
+```
+
+Message with custom encoding:
+
+```
+/cosmos.base.v1beta1.Coin
+> 10uatom
+```
+
+### `google.protobuf.Timestamp`
+
+Rendered using [RFC 3339](https://www.rfc-editor.org/rfc/rfc3339) (a
+simplification of ISO 8601), which is the current recommendation for portable
+time values. The rendering always uses "Z" (UTC) as the timezone. It uses only
+the necessary fractional digits of a second, omitting the fractional part
+entirely if the timestamp has no fractional seconds. (The resulting timestamps
+are not automatically sortable by standard lexicographic order, but we favor
+the legibility of the shorter string.)
+
+#### Examples
+
+The timestamp with 1136214245 seconds and 700000000 nanoseconds is rendered
+as `2006-01-02T15:04:05.7Z`.
+The timestamp with 1136214245 seconds and zero nanoseconds is rendered
+as `2006-01-02T15:04:05Z`.
+
+### `google.protobuf.Duration`
+
+The duration proto expresses a raw number of seconds and nanoseconds.
+This will be rendered as longer time units of days, hours, and minutes,
+plus any remaining seconds, in that order.
+Leading and trailing zero-quantity units will be omitted, but all
+units in between nonzero units will be shown, e.g. ` 3 days, 0 hours, 0 minutes, 5 seconds`.
+
+Even longer time units such as months or years are imprecise.
+Weeks are precise, but not commonly used - `91 days` is more immediately
+legible than `13 weeks`. Although `days` can be problematic,
+e.g. noon to noon on subsequent days can be 23 or 25 hours depending on
+daylight savings transitions, there is significant advantage in using
+strict 24-hour days over using only hours (e.g. `91 days` vs `2184 hours`).
+
+When nanoseconds are nonzero, they will be shown as fractional seconds,
+with only the minimum number of digits, e.g `0.5 seconds`.
+
+A duration of exactly zero is shown as `0 seconds`.
+
+Units will be given as singular (no trailing `s`) when the quantity is exactly one,
+and will be shown in plural otherwise.
+
+Negative durations will be indicated with a leading minus sign (`-`).
+
+Examples:
+
+* `1 day`
+* `30 days`
+* `-1 day, 12 hours`
+* `3 hours, 0 minutes, 53.025 seconds`
+
+### bytes
+
+* Bytes of length shorter or equal to 35 are rendered in hexadecimal, all capital letters, without the `0x` prefix.
+* Bytes of length greater than 35 are hashed using SHA256. The rendered text is `SHA-256=`, followed by the 32-byte hash, in hexadecimal, all capital letters, without the `0x` prefix.
+* The hexadecimal string is finally separated into groups of 4 digits, with a space `' '` as separator. If the bytes length is odd, the 2 remaining hexadecimal characters are at the end.
+
+The number 35 was chosen because it is the longest length where the hashed-and-prefixed representation is longer than the original data directly formatted, using the 3 rules above. More specifically:
+
+* a 35-byte array will have 70 hex characters, plus 17 space characters, resulting in 87 characters.
+* byte arrays starting from length 36 will be be hashed to 32 bytes, which is 64 hex characters plus 15 spaces, and with the `SHA-256=` prefix, it takes 87 characters.
+ Also, secp256k1 public keys have length 33, so their Textual representation is not their hashed value, which we would like to avoid.
+
+Note: Data longer than 35 bytes are not rendered in a way that can be inverted. See ADR-050's [section about invertability](/sdk/v0.50/build/architecture/adr-050-sign-mode-textual#invertible-rendering) for a discussion.
+
+#### Examples
+
+Inputs are displayed as byte arrays.
+
+* `[0]`: `00`
+* `[0,1,2]`: `0001 02`
+* `[0,1,2,..,34]`: `0001 0203 0405 0607 0809 0A0B 0C0D 0E0F 1011 1213 1415 1617 1819 1A1B 1C1D 1E1F 2021 22`
+* `[0,1,2,..,35]`: `SHA-256=5D7E 2D9B 1DCB C85E 7C89 0036 A2CF 2F9F E7B6 6554 F2DF 08CE C6AA 9C0A 25C9 9C21`
+
+### address bytes
+
+We currently use `string` types in protobuf for addresses so this may not be needed, but if any address bytes are used in sign mode textual they should be rendered with bech32 formatting
+
+### strings
+
+Strings are rendered as-is.
+
+### Default Values
+
+* Default Protobuf values for each field are skipped.
+
+#### Example
+
+```protobuf
+message TestData {
+ string signer = 1;
+ string metadata = 2;
+}
+```
+
+```go
+myTestData := TestData{
+ Signer: "cosmos1abc"
+}
+```
+
+We get the following encoding for the `TestData` message:
+
+```
+TestData object
+> Signer: cosmos1abc
+```
+
+### bool
+
+Boolean values are rendered as `True` or `False`.
+
+### \[ABANDONED] Custom `msg_title` instead of Msg `type_url`
+
+*This paragraph is in the Annex for informational purposes only, and will be removed in a next update of the ADR.*
+
+
+
+* all protobuf messages to be used with `SIGN_MODE_TEXTUAL` CAN have a short title associated with them that can be used in format strings whenever the type URL is explicitly referenced via the `cosmos.msg.v1.textual.msg_title` Protobuf message option.
+* if this option is not specified for a Msg, then the Protobuf fully qualified name will be used.
+
+```protobuf
+message MsgSend {
+ option (cosmos.msg.v1.textual.msg_title) = "bank send coins";
+}
+```
+
+* they MUST be unique per message, per chain
+
+#### Examples
+
+* `cosmos.gov.v1.MsgVote` -> `governance v1 vote`
+
+#### Best Pratices
+
+We recommend to use this option only for `Msg`s whose Protobuf fully qualified name can be hard to understand. As such, the two examples above (`MsgSend` and `MsgVote`) are not good examples to be used with `msg_title`. We still allow `msg_title` for chains who might have `Msg`s with complex or non-obvious names.
+
+In those cases, we recommend to drop the version (e.g. `v1`) in the string if there's only one version of the module on chain. This way, the bijective mapping can figure out which message each string corresponds to. If multiple Protobuf versions of the same module exist on the same chain, we recommend keeping the first `msg_title` with version, and the second `msg_title` with version (e.g. `v2`):
+
+* `mychain.mymodule.v1.MsgDo` -> `mymodule do something`
+* `mychain.mymodule.v2.MsgDo` -> `mymodule v2 do something`
+
+
diff --git a/sdk/next/build/architecture/adr-050-sign-mode-textual-annex2.mdx b/sdk/next/build/architecture/adr-050-sign-mode-textual-annex2.mdx
new file mode 100644
index 000000000..b7070587e
--- /dev/null
+++ b/sdk/next/build/architecture/adr-050-sign-mode-textual-annex2.mdx
@@ -0,0 +1,125 @@
+---
+title: 'ADR 050: SIGN_MODE_TEXTUAL: Annex 2 XXX'
+description: 'Oct 3, 2022: Initial Draft'
+---
+
+## Changelog
+
+* Oct 3, 2022: Initial Draft
+
+## Status
+
+DRAFT
+
+## Abstract
+
+This annex provides normative guidance on how devices should render a
+`SIGN_MODE_TEXTUAL` document.
+
+## Context
+
+`SIGN_MODE_TEXTUAL` allows a legible version of a transaction to be signed
+on a hardware security device, such as a Ledger. Early versions of the
+design rendered transactions directly to lines of ASCII text, but this
+proved awkward from its in-band signaling, and for the need to display
+Unicode text within the transaction.
+
+## Decision
+
+`SIGN_MODE_TEXTUAL` renders to an abstract representation, leaving it
+up to device-specific software how to present this representation given the
+capabilities, limitations, and conventions of the deivce.
+
+We offer the following normative guidance:
+
+1. The presentation should be as legible as possible to the user, given
+ the capabilities of the device. If legibility could be sacrificed for other
+ properties, we would recommend just using some other signing mode.
+ Legibility should focus on the common case - it is okay for unusual cases
+ to be less legible.
+
+2. The presentation should be invertible if possible without substantial
+ sacrifice of legibility. Any change to the rendered data should result
+ in a visible change to the presentation. This extends the integrity of the
+ signing to user-visible presentation.
+
+3. The presentation should follow normal conventions of the device,
+ without sacrificing legibility or invertibility.
+
+As an illustration of these principles, here is an example algorithm
+for presentation on a device which can display a single 80-character
+line of printable ASCII characters:
+
+* The presentation is broken into lines, and each line is presented in
+ sequence, with user controls for going forward or backward a line.
+
+* Expert mode screens are only presented if the device is in expert mode.
+
+* Each line of the screen starts with a number of `>` characters equal
+ to the screen's indentation level, followed by a `+` character if this
+ isn't the first line of the screen, followed by a space if either a
+ `>` or a `+` has been emitted,
+ or if this header is followed by a `>`, `+`, or space.
+
+* If the line ends with whitespace or an `@` character, an additional `@`
+ character is appended to the line.
+
+* The following ASCII control characters or backslash (`\`) are converted
+ to a backslash followed by a letter code, in the manner of string literals
+ in many languages:
+
+ * a: U+0007 alert or bell
+ * b: U+0008 backspace
+ * f: U+000C form feed
+ * n: U+000A line feed
+ * r: U+000D carriage return
+ * t: U+0009 horizontal tab
+ * v: U+000B vertical tab
+ * `\`: U+005C backslash
+
+* All other ASCII control characters, plus non-ASCII Unicode code points,
+ are shown as either:
+
+ * `\u` followed by 4 uppercase hex chacters for code points
+ in the basic multilingual plane (BMP).
+
+ * `\U` followed by 8 uppercase hex characters for other code points.
+
+* The screen will be broken into multiple lines to fit the 80-character
+ limit, considering the above transformations in a way that attempts to
+ minimize the number of lines generated. Expanded control or Unicode characters
+ are never split across lines.
+
+Example output:
+
+```
+An introductory line.
+key1: 123456
+key2: a string that ends in whitespace @
+key3: a string that ends in a single ampersand - @@
+ >tricky key4<: note the leading space in the presentation
+introducing an aggregate
+> key5: false
+> key6: a very long line of text, please co\u00F6perate and break into
+>+ multiple lines.
+> Can we do further nesting?
+>> You bet we can!
+```
+
+The inverse mapping gives us the only input which could have
+generated this output (JSON notation for string data):
+
+```
+Indent Text
+------ ----
+0 "An introductory line."
+0 "key1: 123456"
+0 "key2: a string that ends in whitespace "
+0 "key3: a string that ends in a single ampersand - @"
+0 ">tricky key4<: note the leading space in the presentation"
+0 "introducing an aggregate"
+1 "key5: false"
+1 "key6: a very long line of text, please coöperate and break into multiple lines."
+1 "Can we do further nesting?"
+2 "You bet we can!"
+```
diff --git a/sdk/next/build/architecture/adr-050-sign-mode-textual.mdx b/sdk/next/build/architecture/adr-050-sign-mode-textual.mdx
new file mode 100644
index 000000000..ffea66c3a
--- /dev/null
+++ b/sdk/next/build/architecture/adr-050-sign-mode-textual.mdx
@@ -0,0 +1,375 @@
+---
+title: 'ADR 050: SIGN_MODE_TEXTUAL'
+---
+
+## Changelog
+
+* Dec 06, 2021: Initial Draft.
+* Feb 07, 2022: Draft read and concept-ACKed by the Ledger team.
+* May 16, 2022: Change status to Accepted.
+* Aug 11, 2022: Require signing over tx raw bytes.
+* Sep 07, 2022: Add custom `Msg`-renderers.
+* Sep 18, 2022: Structured format instead of lines of text
+* Nov 23, 2022: Specify CBOR encoding.
+* Dec 01, 2022: Link to examples in separate JSON file.
+* Dec 06, 2022: Re-ordering of envelope screens.
+* Dec 14, 2022: Mention exceptions for invertability.
+* Jan 23, 2023: Switch Screen.Text to Title+Content.
+* Mar 07, 2023: Change SignDoc from array to struct containing array.
+* Mar 20, 2023: Introduce a spec version initialized to 0.
+
+## Status
+
+Accepted. Implementation started. Small value renderers details still need to be polished.
+
+Spec version: 0.
+
+## Abstract
+
+This ADR specifies SIGN\_MODE\_TEXTUAL, a new string-based sign mode that is targetted at signing with hardware devices.
+
+## Context
+
+Protobuf-based SIGN\_MODE\_DIRECT was introduced in [ADR-020](/sdk/v0.50/build/architecture/adr-020-protobuf-transaction-encoding) and is intended to replace SIGN\_MODE\_LEGACY\_AMINO\_JSON in most situations, such as mobile wallets and CLI keyrings. However, the [Ledger](https://www.ledger.com/) hardware wallet is still using SIGN\_MODE\_LEGACY\_AMINO\_JSON for displaying the sign bytes to the user. Hardware wallets cannot transition to SIGN\_MODE\_DIRECT as:
+
+* SIGN\_MODE\_DIRECT is binary-based and thus not suitable for display to end-users. Technically, hardware wallets could simply display the sign bytes to the user. But this would be considered as blind signing, and is a security concern.
+* hardware cannot decode the protobuf sign bytes due to memory constraints, as the Protobuf definitions would need to be embedded on the hardware device.
+
+In an effort to remove Amino from the SDK, a new sign mode needs to be created for hardware devices. [Initial discussions](https://github.com/cosmos/cosmos-sdk/issues/6513) propose a text-based sign mode, which this ADR formally specifies.
+
+## Decision
+
+In SIGN\_MODE\_TEXTUAL, a transaction is rendered into a textual representation,
+which is then sent to a secure device or subsystem for the user to review and sign.
+Unlike `SIGN_MODE_DIRECT`, the transmitted data can be simply decoded into legible text
+even on devices with limited processing and display.
+
+The textual representation is a sequence of *screens*.
+Each screen is meant to be displayed in its entirety (if possible) even on a small device like a Ledger.
+A screen is roughly equivalent to a short line of text.
+Large screens can be displayed in several pieces,
+much as long lines of text are wrapped,
+so no hard guidance is given, though 40 characters is a good target.
+A screen is used to display a single key/value pair for scalar values
+(or composite values with a compact notation, such as `Coins`)
+or to introduce or conclude a larger grouping.
+
+The text can contain the full range of Unicode code points, including control characters and nul.
+The device is responsible for deciding how to display characters it cannot render natively.
+See [annex 2](/sdk/v0.50/build/architecture/adr-050-sign-mode-textual-annex2) for guidance.
+
+Screens have a non-negative indentation level to signal composite or nested structures.
+Indentation level zero is the top level.
+Indentation is displayed via some device-specific mechanism.
+Message quotation notation is an appropriate model, such as
+leading `>` characters or vertical bars on more capable displays.
+
+Some screens are marked as *expert* screens,
+meant to be displayed only if the viewer chooses to opt in for the extra detail.
+Expert screens are meant for information that is rarely useful,
+or needs to be present only for signature integrity (see below).
+
+### Invertible Rendering
+
+We require that the rendering of the transaction be invertible:
+there must be a parsing function such that for every transaction,
+when rendered to the textual representation,
+parsing that representation yeilds a proto message equivalent
+to the original under proto equality.
+
+Note that this inverse function does not need to perform correct
+parsing or error signaling for the whole domain of textual data.
+Merely that the range of valid transactions be invertible under
+the composition of rendering and parsing.
+
+Note that the existence of an inverse function ensures that the
+rendered text contains the full information of the original transaction,
+not a hash or subset.
+
+We make an exception for invertibility for data which are too large to
+meaningfully display, such as byte strings longer than 32 bytes. We may then
+selectively render them with a cryptographically-strong hash. In these cases,
+it is still computationally infeasible to find a different transaction which
+has the same rendering. However, we must ensure that the hash computation is
+simple enough to be reliably executed independently, so at least the hash is
+itself reasonably verifiable when the raw byte string is not.
+
+### Chain State
+
+The rendering function (and parsing function) may depend on the current chain state.
+This is useful for reading parameters, such as coin display metadata,
+or for reading user-specific preferences such as language or address aliases.
+Note that if the observed state changes between signature generation
+and the transaction's inclusion in a block, the delivery-time rendering
+might differ. If so, the signature will be invalid and the transaction
+will be rejected.
+
+### Signature and Security
+
+For security, transaction signatures should have three properties:
+
+1. Given the transaction, signatures, and chain state, it must be possible to validate that the signatures matches the transaction,
+ to verify that the signers must have known their respective secret keys.
+
+2. It must be computationally infeasible to find a substantially different transaction for which the given signatures are valid, given the same chain state.
+
+3. The user should be able to give informed consent to the signed data via a simple, secure device with limited display capabilities.
+
+The correctness and security of `SIGN_MODE_TEXTUAL` is guaranteed by demonstrating an inverse function from the rendering to transaction protos.
+This means that it is impossible for a different protocol buffer message to render to the same text.
+
+### Transaction Hash Malleability
+
+When client software forms a transaction, the "raw" transaction (`TxRaw`) is serialized as a proto
+and a hash of the resulting byte sequence is computed.
+This is the `TxHash`, and is used by various services to track the submitted transaction through its lifecycle.
+Various misbehavior is possible if one can generate a modified transaction with a different TxHash
+but for which the signature still checks out.
+
+SIGN\_MODE\_TEXTUAL prevents this transaction malleability by including the TxHash as an expert screen
+in the rendering.
+
+### SignDoc
+
+The SignDoc for `SIGN_MODE_TEXTUAL` is formed from a data structure like:
+
+```go
+type Screen struct {
+ Title string // possibly size limited to, advised to 64 characters
+ Content string // possibly size limited to, advised to 255 characters
+ Indent uint8 // size limited to something small like 16 or 32
+ Expert bool
+}
+
+type SignDocTextual struct {
+ Screens []Screen
+}
+```
+
+We do not plan to use protobuf serialization to form the sequence of bytes
+that will be tranmitted and signed, in order to keep the decoder simple.
+We will use [CBOR](https://cbor.io) ([RFC 8949](https://www.rfc-editor.org/rfc/rfc8949.html)) instead.
+The encoding is defined by the following CDDL ([RFC 8610](https://www.rfc-editor.org/rfc/rfc8610)):
+
+```
+;;; CDDL (RFC 8610) Specification of SignDoc for SIGN_MODE_TEXTUAL.
+;;; Must be encoded using CBOR deterministic encoding (RFC 8949, section 4.2.1).
+
+;; A Textual document is a struct containing one field: an array of screens.
+sign_doc = {
+ screens_key: [* screen],
+}
+
+;; The key is an integer to keep the encoding small.
+screens_key = 1
+
+;; A screen consists of a text string, an indentation, and the expert flag,
+;; represented as an integer-keyed map. All entries are optional
+;; and MUST be omitted from the encoding if empty, zero, or false.
+;; Text defaults to the empty string, indent defaults to zero,
+;; and expert defaults to false.
+screen = {
+ ? title_key: tstr,
+ ? content_key: tstr,
+ ? indent_key: uint,
+ ? expert_key: bool,
+}
+
+;; Keys are small integers to keep the encoding small.
+title_key = 1
+content_key = 2
+indent_key = 3
+expert_key = 4
+```
+
+Defining the sign\_doc as directly an array of screens has also been considered. However, given the possibility of future iterations of this specification, using a single-keyed struct has been chosen over the former proposal, as structs allow for easier backwards-compatibility.
+
+## Details
+
+In the examples that follow, screens will be shown as lines of text,
+indentation is indicated with a leading '>',
+and expert screens are marked with a leading `*`.
+
+### Encoding of the Transaction Envelope
+
+We define "transaction envelope" as all data in a transaction that is not in the `TxBody.Messages` field. Transaction envelope includes fee, signer infos and memo, but don't include `Msg`s. `//` denotes comments and are not shown on the Ledger device.
+
+```protobuf expandable
+Chain ID:
+Account number:
+Sequence:
+Address:
+*Public Key:
+This transaction has Message(s) // Pluralize "Message" only when int>1
+> Message (/): // See value renderers for Any rendering.
+End of Message
+Memo: // Skipped if no memo set.
+Fee: // See value renderers for coins rendering.
+*Fee payer: // Skipped if no fee_payer set.
+*Fee granter: // Skipped if no fee_granter set.
+Tip: // Skippted if no tip.
+Tipper:
+*Gas Limit:
+*Timeout Height: // Skipped if no timeout_height set.
+*Other signer: SignerInfo // Skipped if the transaction only has 1 signer.
+*> Other signer (/):
+*End of other signers
+*Extension options: Any: // Skipped if no body extension options
+*> Extension options (/):
+*End of extension options
+*Non critical extension options: Any: // Skipped if no body non critical extension options
+*> Non critical extension options (/):
+*End of Non critical extension options
+*Hash of raw bytes: // Hex encoding of bytes defined, to prevent tx hash malleability.
+```
+
+### Encoding of the Transaction Body
+
+Transaction Body is the `Tx.TxBody.Messages` field, which is an array of `Any`s, where each `Any` packs a `sdk.Msg`. Since `sdk.Msg`s are widely used, they have a slightly different encoding than usual array of `Any`s (Protobuf: `repeated google.protobuf.Any`) described in Annex 1.
+
+```
+This transaction has message: // Optional 's' for "message" if there's is >1 sdk.Msgs.
+// For each Msg, print the following 2 lines:
+Msg (/): // E.g. Msg (1/2): bank v1beta1 send coins
+
+End of transaction messages
+```
+
+#### Example
+
+Given the following Protobuf message:
+
+```protobuf expandable
+message Grant {
+ google.protobuf.Any authorization = 1 [(cosmos_proto.accepts_interface) = "cosmos.authz.v1beta1.Authorization"];
+ google.protobuf.Timestamp expiration = 2 [(gogoproto.stdtime) = true, (gogoproto.nullable) = false];
+}
+
+message MsgGrant {
+ option (cosmos.msg.v1.signer) = "granter";
+
+ string granter = 1 [(cosmos_proto.scalar) = "cosmos.AddressString"];
+ string grantee = 2 [(cosmos_proto.scalar) = "cosmos.AddressString"];
+}
+```
+
+and a transaction containing 1 such `sdk.Msg`, we get the following encoding:
+
+```
+This transaction has 1 message:
+Msg (1/1): authz v1beta1 grant
+Granter: cosmos1abc...def
+Grantee: cosmos1ghi...jkl
+End of transaction messages
+```
+
+### Custom `Msg` Renderers
+
+Application developers may choose to not follow default renderer value output for their own `Msg`s. In this case, they can implement their own custom `Msg` renderer. This is similar to [EIP4430](https://github.com/ethereum/EIPs/blob/master/EIPS/eip-4430.md), where the smart contract developer chooses the description string to be shown to the end user.
+
+This is done by setting the `cosmos.msg.textual.v1.expert_custom_renderer` Protobuf option to a non-empty string. This option CAN ONLY be set on a Protobuf message representing transaction message object (implementing `sdk.Msg` interface).
+
+```protobuf
+message MsgFooBar {
+ // Optional comments to describe in human-readable language the formatting
+ // rules of the custom renderer.
+ option (cosmos.msg.textual.v1.expert_custom_renderer) = "";
+
+ // proto fields
+}
+```
+
+When this option is set on a `Msg`, a registered function will transform the `Msg` into an array of one or more strings, which MAY use the key/value format (described in point #3) with the expert field prefix (described in point #5) and arbitrary indentation (point #6). These strings MAY be rendered from a `Msg` field using a default value renderer, or they may be generated from several fields using custom logic.
+
+The `` is a string convention chosen by the application developer and is used to identify the custom `Msg` renderer. For example, the documentation or specification of this custom algorithm can reference this identifier. This identifier CAN have a versioned suffix (e.g. `_v1`) to adapt for future changes (which would be consensus-breaking). We also recommend adding Protobuf comments to describe in human language the custom logic used.
+
+Moreover, the renderer must provide 2 functions: one for formatting from Protobuf to string, and one for parsing string to Protobuf. These 2 functions are provided by the application developer. To satisfy point #1, the parse function MUST be the inverse of the formatting function. This property will not be checked by the SDK at runtime. However, we strongly recommend the application developer to include a comprehensive suite in their app repo to test invertibility, as to not introduce security bugs.
+
+### Require signing over the `TxBody` and `AuthInfo` raw bytes
+
+Recall that the transaction bytes merklelized on chain are the Protobuf binary serialization of [TxRaw](hhttps://buf.build/cosmos/cosmos-sdk/sdk/v0.50/main:cosmos.tx.v1beta1#cosmos.tx.v1beta1.TxRaw), which contains the `body_bytes` and `auth_info_bytes`. Moreover, the transaction hash is defined as the SHA256 hash of the `TxRaw` bytes. We require that the user signs over these bytes in SIGN\_MODE\_TEXTUAL, more specifically over the following string:
+
+```
+*Hash of raw bytes:
+```
+
+where:
+
+* `++` denotes concatenation,
+* `HEX` is the hexadecimal representation of the bytes, all in capital letters, no `0x` prefix,
+* and `len()` is encoded as a Big-Endian uint64.
+
+This is to prevent transaction hash malleability. The point #1 about invertiblity assures that transaction `body` and `auth_info` values are not malleable, but the transaction hash still might be malleable with point #1 only, because the SIGN\_MODE\_TEXTUAL strings don't follow the byte ordering defined in `body_bytes` and `auth_info_bytes`. Without this hash, a malicious validator or exchange could intercept a transaction, modify its transaction hash *after* the user signed it using SIGN\_MODE\_TEXTUAL (by tweaking the byte ordering inside `body_bytes` or `auth_info_bytes`), and then submit it to Tendermint.
+
+By including this hash in the SIGN\_MODE\_TEXTUAL signing payload, we keep the same level of guarantees as [SIGN\_MODE\_DIRECT](/sdk/v0.50/build/architecture/adr-020-protobuf-transaction-encoding).
+
+These bytes are only shown in expert mode, hence the leading `*`.
+
+## Updates to the current specification
+
+The current specification is not set in stone, and future iterations are to be expected. We distinguish two categories of updates to this specification:
+
+1. Updates that require changes of the hardware device embedded application.
+2. Updates that only modify the envelope and the value renderers.
+
+Updates in the 1st category include changes of the `Screen` struct or its corresponding CBOR encoding. This type of updates require a modification of the hardware signer application, to be able to decode and parse the new types. Backwards-compatibility must also be guaranteed, so that the new hardware application works with existing versions of the SDK. These updates require the coordination of multiple parties: SDK developers, hardware application developers (currently: Zondax), and client-side developers (e.g. CosmJS). Furthermore, a new submission of the hardware device application may be necessary, which, dependending on the vendor, can take some time. As such, we recommend to avoid this type of updates as much as possible.
+
+Updates in the 2nd category include changes to any of the value renderers or to the transaction envelope. For example, the ordering of fields in the envelope can be swapped, or the timestamp formatting can be modified. Since SIGN\_MODE\_TEXTUAL sends `Screen`s to the hardware device, this type of change do not need a hardware wallet application update. They are however state-machine-breaking, and must be documented as such. They require the coordination of SDK developers with client-side developers (e.g. CosmJS), so that the updates are released on both sides close to each other in time.
+
+We define a spec version, which is an integer that must be incremented on each update of either category. This spec version will be exposed by the SDK's implementation, and can be communicated to clients. For example, SDK v0.50 might use the spec version 1, and SDK v0.51 might use 2; thanks to this versioning, clients can know how to craft SIGN\_MODE\_TEXTUAL transactions based on the target SDK version.
+
+The current spec version is defined in the "Status" section, on the top of this document. It is initialized to `0` to allow flexibility in choosing how to define future versions, as it would allow adding a field either in the SignDoc Go struct or in Protobuf in a backwards-compatible way.
+
+## Additional Formatting by the Hardware Device
+
+See [annex 2](/sdk/v0.50/build/architecture/adr-050-sign-mode-textual-annex2).
+
+## Examples
+
+1. A minimal MsgSend: [see transaction](https://github.com/cosmos/cosmos-sdk/blob/094abcd393379acbbd043996024d66cd65246fb1/tx/textual/internal/testdata/e2e.json#L2-L70).
+2. A transaction with a bit of everything: [see transaction](https://github.com/cosmos/cosmos-sdk/blob/094abcd393379acbbd043996024d66cd65246fb1/tx/textual/internal/testdata/e2e.json#L71-L270).
+
+The examples below are stored in a JSON file with the following fields:
+
+* `proto`: the representation of the transaction in ProtoJSON,
+* `screens`: the transaction rendered into SIGN\_MODE\_TEXTUAL screens,
+* `cbor`: the sign bytes of the transaction, which is the CBOR encoding of the screens.
+
+## Consequences
+
+### Backwards Compatibility
+
+SIGN\_MODE\_TEXTUAL is purely additive, and doesn't break any backwards compatibility with other sign modes.
+
+### Positive
+
+* Human-friendly way of signing in hardware devices.
+* Once SIGN\_MODE\_TEXTUAL is shipped, SIGN\_MODE\_LEGACY\_AMINO\_JSON can be deprecated and removed. On the longer term, once the ecosystem has totally migrated, Amino can be totally removed.
+
+### Negative
+
+* Some fields are still encoded in non-human-readable ways, such as public keys in hexadecimal.
+* New ledger app needs to be released, still unclear
+
+### Neutral
+
+* If the transaction is complex, the string array can be arbitrarily long, and some users might just skip some screens and blind sign.
+
+## Further Discussions
+
+* Some details on value renderers need to be polished, see [Annex 1](/sdk/v0.50/build/architecture/adr-050-sign-mode-textual-annex1).
+* Are ledger apps able to support both SIGN\_MODE\_LEGACY\_AMINO\_JSON and SIGN\_MODE\_TEXTUAL at the same time?
+* Open question: should we add a Protobuf field option to allow app developers to overwrite the textual representation of certain Protobuf fields and message? This would be similar to Ethereum's [EIP4430](https://github.com/ethereum/EIPs/pull/4430), where the contract developer decides on the textual representation.
+* Internationalization.
+
+## References
+
+* [Annex 1](/sdk/v0.50/build/architecture/adr-050-sign-mode-textual-annex1)
+
+* Initial discussion: [Link](https://github.com/cosmos/cosmos-sdk/issues/6513)
+
+* Living document used in the working group: [Link](https://hackmd.io/fsZAO-TfT0CKmLDtfMcKeA?both)
+
+* Working group meeting notes: [Link](https://hackmd.io/7RkGfv_rQAaZzEigUYhcXw)
+
+* Ethereum's "Described Transactions" [Link](https://github.com/ethereum/EIPs/pull/4430)
diff --git a/sdk/next/build/architecture/adr-053-go-module-refactoring.mdx b/sdk/next/build/architecture/adr-053-go-module-refactoring.mdx
new file mode 100644
index 000000000..c87ffa9dd
--- /dev/null
+++ b/sdk/next/build/architecture/adr-053-go-module-refactoring.mdx
@@ -0,0 +1,113 @@
+---
+title: 'ADR 053: Go Module Refactoring'
+description: '2022-04-27: First Draft'
+---
+
+## Changelog
+
+* 2022-04-27: First Draft
+
+## Status
+
+PROPOSED
+
+## Abstract
+
+The current SDK is built as a single monolithic go module. This ADR describes
+how we refactor the SDK into smaller independently versioned go modules
+for ease of maintenance.
+
+## Context
+
+Go modules impose certain requirements on software projects with respect to
+stable version numbers (anything above 0.x) in that [any API breaking changes
+necessitate a major version](https://go.dev/doc/modules/release-workflow#breaking)
+increase which technically creates a new go module
+(with a v2, v3, etc. suffix).
+
+[Keeping modules API compatible](https://go.dev/blog/module-compatibility) in
+this way requires a fair amount of fair thought and discipline.
+
+The Cosmos SDK is a fairly large project which originated before go modules
+came into existence and has always been under a v0.x release even though
+it has been used in production for years now, not because it isn't production
+quality software, but rather because the API compatibility guarantees required
+by go modules are fairly complex to adhere to with such a large project.
+Up to now, it has generally been deemed more important to be able to break the
+API if needed rather than require all users update all package import paths
+to accommodate breaking changes causing v2, v3, etc. releases. This is in
+addition to the other complexities related to protobuf generated code that will
+be addressed in a separate ADR.
+
+Nevertheless, the desire for semantic versioning has been [strong in the
+community](https://github.com/cosmos/cosmos-sdk/discussions/10162) and the
+single go module release process has made it very hard to
+release small changes to isolated features in a timely manner. Release cycles
+often exceed six months which means small improvements done in a day or
+two get bottle-necked by everything else in the monolithic release cycle.
+
+## Decision
+
+To improve the current situation, the SDK is being refactored into multiple
+go modules within the current repository. There has been a [fair amount of
+debate](https://github.com/cosmos/cosmos-sdk/discussions/10582#discussioncomment-1813377)
+as to how to do this, with some developers arguing for larger vs smaller
+module scopes. There are pros and cons to both approaches (which will be
+discussed below in the [Consequences](#consequences) section), but the
+approach being adopted is the following:
+
+* a go module should generally be scoped to a specific coherent set of
+ functionality (such as math, errors, store, etc.)
+* when code is removed from the core SDK and moved to a new module path, every
+ effort should be made to avoid API breaking changes in the existing code using
+ aliases and wrapper types (as done in [Link](https://github.com/cosmos/cosmos-sdk/pull/10779)
+ and [Link](https://github.com/cosmos/cosmos-sdk/pull/11788))
+* new go modules should be moved to a standalone domain (`cosmossdk.io`) before
+ being tagged as `v1.0.0` to accommodate the possibility that they may be
+ better served by a standalone repository in the future
+* all go modules should follow the guidelines in [Link](https://go.dev/blog/module-compatibility)
+ before `v1.0.0` is tagged and should make use of `internal` packages to limit
+ the exposed API surface
+* the new go module's API may deviate from the existing code where there are
+ clear improvements to be made or to remove legacy dependencies (for instance on
+ amino or gogo proto), as long the old package attempts
+ to avoid API breakage with aliases and wrappers
+* care should be taken when simply trying to turn an existing package into a
+ new go module: [Link](https://github.com/golang/go/wiki/Modules#is-it-possible-to-add-a-module-to-a-multi-module-repository).
+ In general, it seems safer to just create a new module path (appending v2, v3, etc.
+ if necessary), rather than trying to make an old package a new module.
+
+## Consequences
+
+### Backwards Compatibility
+
+If the above guidelines are followed to use aliases or wrapper types pointing
+in existing APIs that point back to the new go modules, there should be no or
+very limited breaking changes to existing APIs.
+
+### Positive
+
+* standalone pieces of software will reach `v1.0.0` sooner
+* new features to specific functionality will be released sooner
+
+### Negative
+
+* there will be more go module versions to update in the SDK itself and
+ per-project, although most of these will hopefully be indirect
+
+### Neutral
+
+## Further Discussions
+
+Further discussions are occurring in primarily in
+[Link](https://github.com/cosmos/cosmos-sdk/discussions/10582) and within
+the Cosmos SDK Framework Working Group.
+
+## References
+
+* [Link](https://go.dev/doc/modules/release-workflow)
+* [Link](https://go.dev/blog/module-compatibility)
+* [Link](https://github.com/cosmos/cosmos-sdk/discussions/10162)
+* [Link](https://github.com/cosmos/cosmos-sdk/discussions/10582)
+* [Link](https://github.com/cosmos/cosmos-sdk/pull/10779)
+* [Link](https://github.com/cosmos/cosmos-sdk/pull/11788)
diff --git a/sdk/next/build/architecture/adr-054-semver-compatible-modules.mdx b/sdk/next/build/architecture/adr-054-semver-compatible-modules.mdx
new file mode 100644
index 000000000..bc9d9f188
--- /dev/null
+++ b/sdk/next/build/architecture/adr-054-semver-compatible-modules.mdx
@@ -0,0 +1,799 @@
+---
+title: 'ADR 054: Semver Compatible SDK Modules'
+description: '2022-04-27: First draft'
+---
+
+## Changelog
+
+* 2022-04-27: First draft
+
+## Status
+
+DRAFT
+
+## Abstract
+
+In order to move the Cosmos SDK to a system of decoupled semantically versioned
+modules which can be composed in different combinations (ex. staking v3 with
+bank v1 and distribution v2), we need to reassess how we organize the API surface
+of modules to avoid problems with go semantic import versioning and
+circular dependencies. This ADR explores various approaches we can take to
+addressing these issues.
+
+## Context
+
+There has been [a fair amount of desire](https://github.com/cosmos/cosmos-sdk/discussions/10162)
+in the community for semantic versioning in the SDK and there has been significant
+movement to splitting SDK modules into [standalone go modules](https://github.com/cosmos/cosmos-sdk/issues/11899).
+Both of these will ideally allow the ecosystem to move faster because we won't
+be waiting for all dependencies to update synchronously. For instance, we could
+have 3 versions of the core SDK compatible with the latest 2 releases of
+CosmWasm as well as 4 different versions of staking . This sort of setup would
+allow early adopters to aggressively integrate new versions, while allowing
+more conservative users to be selective about which versions they're ready for.
+
+In order to achieve this, we need to solve the following problems:
+
+1. because of the way [go semantic import versioning](https://research.swtch.com/vgo-import) (SIV)
+ works, moving to SIV naively will actually make it harder to achieve these goals
+2. circular dependencies between modules need to be broken to actually release
+ many modules in the SDK independently
+3. pernicious minor version incompatibilities introduced through correctly
+ [evolving protobuf schemas](https://developers.google.com/protocol-buffers/docs/proto3#updating)
+ without correct [unknown field filtering](/sdk/v0.50/build/architecture/adr-020-protobuf-transaction-encoding#unknown-field-filtering)
+
+Note that all the following discussion assumes that the proto file versioning and state machine versioning of a module
+are distinct in that:
+
+* proto files are maintained in a non-breaking way (using something
+ like [buf breaking](https://docs.buf.build/breaking/overview)
+ to ensure all changes are backwards compatible)
+* proto file versions get bumped much less frequently, i.e. we might maintain `cosmos.bank.v1` through many versions
+ of the bank module state machine
+* state machine breaking changes are more common and ideally this is what we'd want to semantically version with
+ go modules, ex. `x/bank/v2`, `x/bank/v3`, etc.
+
+### Problem 1: Semantic Import Versioning Compatibility
+
+Consider we have a module `foo` which defines the following `MsgDoSomething` and that we've released its state
+machine in go module `example.com/foo`:
+
+```protobuf
+package foo.v1;
+
+message MsgDoSomething {
+ string sender = 1;
+ uint64 amount = 2;
+}
+
+service Msg {
+ DoSomething(MsgDoSomething) returns (MsgDoSomethingResponse);
+}
+```
+
+Now consider that we make a revision to this module and add a new `condition` field to `MsgDoSomething` and also
+add a new validation rule on `amount` requiring it to be non-zero, and that following go semantic versioning we
+release the next state machine version of `foo` as `example.com/foo/v2`.
+
+```protobuf expandable
+// Revision 1
+package foo.v1;
+
+message MsgDoSomething {
+ string sender = 1;
+
+ // amount must be a non-zero integer.
+ uint64 amount = 2;
+
+ // condition is an optional condition on doing the thing.
+ //
+ // Since: Revision 1
+ Condition condition = 3;
+}
+```
+
+Approaching this naively, we would generate the protobuf types for the initial
+version of `foo` in `example.com/foo/types` and we would generate the protobuf
+types for the second version in `example.com/foo/v2/types`.
+
+Now let's say we have a module `bar` which talks to `foo` using this keeper
+interface which `foo` provides:
+
+```go
+type FooKeeper interface {
+ DoSomething(MsgDoSomething)
+
+error
+}
+```
+
+#### Scenario A: Backward Compatibility: Newer Foo, Older Bar
+
+Imagine we have a chain which uses both `foo` and `bar` and wants to upgrade to
+`foo/v2`, but the `bar` module has not upgraded to `foo/v2`.
+
+In this case, the chain will not be able to upgrade to `foo/v2` until `bar`
+has upgraded its references to `example.com/foo/types.MsgDoSomething` to
+`example.com/foo/v2/types.MsgDoSomething`.
+
+Even if `bar`'s usage of `MsgDoSomething` has not changed at all, the upgrade
+will be impossible without this change because `example.com/foo/types.MsgDoSomething`
+and `example.com/foo/v2/types.MsgDoSomething` are fundamentally different
+incompatible structs in the go type system.
+
+#### Scenario B: Forward Compatibility: Older Foo, Newer Bar
+
+Now let's consider the reverse scenario, where `bar` upgrades to `foo/v2`
+by changing the `MsgDoSomething` reference to `example.com/foo/v2/types.MsgDoSomething`
+and releases that as `bar/v2` with some other changes that a chain wants.
+The chain, however, has decided that it thinks the changes in `foo/v2` are too
+risky and that it'd prefer to stay on the initial version of `foo`.
+
+In this scenario, it is impossible to upgrade to `bar/v2` without upgrading
+to `foo/v2` even if `bar/v2` would have worked 100% fine with `foo` other
+than changing the import path to `MsgDoSomething` (meaning that `bar/v2`
+doesn't actually use any new features of `foo/v2`).
+
+Now because of the way go semantic import versioning works, we are locked
+into either using `foo` and `bar` OR `foo/v2` and `bar/v2`. We cannot have
+`foo` + `bar/v2` OR `foo/v2` + `bar`. The go type system doesn't allow this
+even if both versions of these modules are otherwise compatible with each
+other.
+
+#### Naive Mitigation
+
+A naive approach to fixing this would be to not regenerate the protobuf types
+in `example.com/foo/v2/types` but instead just update `example.com/foo/types`
+to reflect the changes needed for `v2` (adding `condition` and requiring
+`amount` to be non-zero). Then we could release a patch of `example.com/foo/types`
+with this update and use that for `foo/v2`. But this change is state machine
+breaking for `v1`. It requires changing the `ValidateBasic` method to reject
+the case where `amount` is zero, and it adds the `condition` field which
+should be rejected based
+on [ADR 020 unknown field filtering](/sdk/v0.50/build/architecture/adr-020-protobuf-transaction-encoding#unknown-field-filtering).
+So adding these changes as a patch on `v1` is actually incorrect based on semantic
+versioning. Chains that want to stay on `v1` of `foo` should not
+be importing these changes because they are incorrect for `v1.`
+
+### Problem 2: Circular dependencies
+
+None of the above approaches allow `foo` and `bar` to be separate modules
+if for some reason `foo` and `bar` depend on each other in different ways.
+For instance, we can't have `foo` import `bar/types` while `bar` imports
+`foo/types`.
+
+We have several cases of circular module dependencies in the SDK
+(ex. staking, distribution and slashing) that are legitimate from a state machine
+perspective. Without separating the API types out somehow, there would be
+no way to independently semantically version these modules without some other
+mitigation.
+
+### Problem 3: Handling Minor Version Incompatibilities
+
+Imagine that we solve the first two problems but now have a scenario where
+`bar/v2` wants the option to use `MsgDoSomething.condition` which only `foo/v2`
+supports. If `bar/v2` works with `foo` `v1` and sets `condition` to some non-nil
+value, then `foo` will silently ignore this field resulting in a silent logic
+possibly dangerous logic error. If `bar/v2` were able to check whether `foo` was
+on `v1` or `v2` and dynamically, it could choose to only use `condition` when
+`foo/v2` is available. Even if `bar/v2` were able to perform this check, however,
+how do we know that it is always performing the check properly. Without
+some sort of
+framework-level [unknown field filtering](/sdk/v0.50/build/architecture/adr-020-protobuf-transaction-encoding#unknown-field-filtering),
+it is hard to know whether these pernicious hard to detect bugs are getting into
+our app and a client-server layer such as [ADR 033: Inter-Module Communication](/sdk/v0.50/build/architecture/adr-033-protobuf-inter-module-comm)
+may be needed to do this.
+
+## Solutions
+
+### Approach A) Separate API and State Machine Modules
+
+One solution (first proposed in [Link](https://github.com/cosmos/cosmos-sdk/discussions/10582)) is to isolate all protobuf
+generated code into a separate module
+from the state machine module. This would mean that we could have state machine
+go modules `foo` and `foo/v2` which could use a types or API go module say
+`foo/api`. This `foo/api` go module would be perpetually on `v1.x` and only
+accept non-breaking changes. This would then allow other modules to be
+compatible with either `foo` or `foo/v2` as long as the inter-module API only
+depends on the types in `foo/api`. It would also allow modules `foo` and `bar`
+to depend on each other in that both of them could depend on `foo/api` and
+`bar/api` without `foo` directly depending on `bar` and vice versa.
+
+This is similar to the naive mitigation described above except that it separates
+the types into separate go modules which in and of itself could be used to
+break circular module dependencies. It has the same problems as the naive solution,
+otherwise, which we could rectify by:
+
+1. removing all state machine breaking code from the API module (ex. `ValidateBasic` and any other interface methods)
+2. embedding the correct file descriptors for unknown field filtering in the binary
+
+#### Migrate all interface methods on API types to handlers
+
+To solve 1), we need to remove all interface implementations from generated
+types and instead use a handler approach which essentially means that given
+a type `X`, we have some sort of resolver which allows us to resolve interface
+implementations for that type (ex. `sdk.Msg` or `authz.Authorization`). For
+example:
+
+```go
+func (k Keeper)
+
+DoSomething(msg MsgDoSomething)
+
+error {
+ var validateBasicHandler ValidateBasicHandler
+ err := k.resolver.Resolve(&validateBasic, msg)
+ if err != nil {
+ return err
+}
+
+err = validateBasicHandler.ValidateBasic()
+ ...
+}
+```
+
+In the case of some methods on `sdk.Msg`, we could replace them with declarative
+annotations. For instance, `GetSigners` can already be replaced by the protobuf
+annotation `cosmos.msg.v1.signer`. In the future, we may consider some sort
+of protobuf validation framework (like [Link](https://github.com/bufbuild/protoc-gen-validate)
+but more Cosmos-specific) to replace `ValidateBasic`.
+
+#### Pinned FileDescriptor's
+
+To solve 2), state machine modules must be able to specify what the version of
+the protobuf files was that they were built against. For instance if the API
+module for `foo` upgrades to `foo/v2`, the original `foo` module still needs
+a copy of the original protobuf files it was built with so that ADR 020
+unknown field filtering will reject `MsgDoSomething` when `condition` is
+set.
+
+The simplest way to do this may be to embed the protobuf `FileDescriptor`s into
+the module itself so that these `FileDescriptor`s are used at runtime rather
+than the ones that are built into the `foo/api` which may be different. Using
+[buf build](https://docs.buf.build/build/usage#output-format), [go embed](https://pkg.go.dev/embed),
+and a build script we can probably come up with a solution for embedding
+`FileDescriptor`s into modules that is fairly straightforward.
+
+#### Potential limitations to generated code
+
+One challenge with this approach is that it places heavy restrictions on what
+can go in API modules and requires that most of this is state machine breaking.
+All or most of the code in the API module would be generated from protobuf
+files, so we can probably control this with how code generation is done, but
+it is a risk to be aware of.
+
+For instance, we do code generation for the ORM that in the future could
+contain optimizations that are state machine breaking. We
+would either need to ensure very carefully that the optimizations aren't
+actually state machine breaking in generated code or separate this generated code
+out from the API module into the state machine module. Both of these mitigations
+are potentially viable but the API module approach does require an extra level
+of care to avoid these sorts of issues.
+
+#### Minor Version Incompatibilities
+
+This approach in and of itself does little to address any potential minor
+version incompatibilities and the
+requisite [unknown field filtering](/sdk/v0.50/build/architecture/adr-020-protobuf-transaction-encoding#unknown-field-filtering).
+Likely some sort of client-server routing layer which does this check such as
+[ADR 033: Inter-Module communication](/sdk/v0.50/build/architecture/adr-033-protobuf-inter-module-comm)
+is required to make sure that this is done properly. We could then allow
+modules to perform a runtime check given a `MsgClient`, ex:
+
+```go
+func (k Keeper)
+
+CallFoo()
+
+error {
+ if k.interModuleClient.MinorRevision(k.fooMsgClient) >= 2 {
+ k.fooMsgClient.DoSomething(&MsgDoSomething{
+ Condition: ...
+})
+}
+
+else {
+ ...
+}
+}
+```
+
+To do the unknown field filtering itself, the ADR 033 router would need to use
+the [protoreflect API](https://pkg.go.dev/google.golang.org/protobuf/reflect/protoreflect)
+to ensure that no fields unknown to the receiving module are set. This could
+result in an undesirable performance hit depending on how complex this logic is.
+
+### Approach B) Changes to Generated Code
+
+An alternate approach to solving the versioning problem is to change how protobuf code is generated and move modules
+mostly or completely in the direction of inter-module communication as described
+in [ADR 033](/sdk/v0.50/build/architecture/adr-033-protobuf-inter-module-comm).
+In this paradigm, a module could generate all the types it needs internally - including the API types of other modules -
+and talk to other modules via a client-server boundary. For instance, if `bar` needs to talk to `foo`, it could
+generate its own version of `MsgDoSomething` as `bar/internal/foo/v1.MsgDoSomething` and just pass this to the
+inter-module router which would somehow convert it to the version which foo needs (ex. `foo/internal.MsgDoSomething`).
+
+Currently, two generated structs for the same protobuf type cannot exist in the same go binary without special
+build flags (see [Link](https://developers.google.com/protocol-buffers/docs/reference/go/faq#fix-namespace-conflict)).
+A relatively simple mitigation to this issue would be to set up the protobuf code to not register protobuf types
+globally if they are generated in an `internal/` package. This will require modules to register their types manually
+with the app-level level protobuf registry, this is similar to what modules already do with the `InterfaceRegistry`
+and amino codec.
+
+If modules *only* do ADR 033 message passing then a naive and non-performant solution for
+converting `bar/internal/foo/v1.MsgDoSomething`
+to `foo/internal.MsgDoSomething` would be marshaling and unmarshaling in the ADR 033 router. This would break down if
+we needed to expose protobuf types in `Keeper` interfaces because the whole point is to try to keep these types
+`internal/` so that we don't end up with all the import version incompatibilities we've described above. However,
+because of the issue with minor version incompatibilities and the need
+for [unknown field filtering](/sdk/v0.50/build/architecture/adr-020-protobuf-transaction-encoding#unknown-field-filtering),
+sticking with the `Keeper` paradigm instead of ADR 033 may be unviable to begin with.
+
+A more performant solution (that could maybe be adapted to work with `Keeper` interfaces) would be to only expose
+getters and setters for generated types and internally store data in memory buffers which could be passed from
+one implementation to another in a zero-copy way.
+
+For example, imagine this protobuf API with only getters and setters is exposed for `MsgSend`:
+
+```go expandable
+type MsgSend interface {
+ proto.Message
+ GetFromAddress()
+
+string
+ GetToAddress()
+
+string
+ GetAmount() []v1beta1.Coin
+ SetFromAddress(string)
+
+SetToAddress(string)
+
+SetAmount([]v1beta1.Coin)
+}
+
+func NewMsgSend()
+
+MsgSend {
+ return &msgSendImpl{
+ memoryBuffers: ...
+}
+}
+```
+
+Under the hood, `MsgSend` could be implemented based on some raw memory buffer in the same way
+that [Cap'n Proto](https://capnproto.org)
+and [FlatBuffers](https://google.github.io/flatbuffers/) so that we could convert between one version of `MsgSend`
+and another without serialization (i.e. zero-copy). This approach would have the added benefits of allowing zero-copy
+message passing to modules written in other languages such as Rust and accessed through a VM or FFI. It could also make
+unknown field filtering in inter-module communication simpler if we require that all new fields are added in sequential
+order, ex. just checking that no field `> 5` is set.
+
+Also, we wouldn't have any issues with state machine breaking code on generated types because all the generated
+code used in the state machine would actually live in the state machine module itself. Depending on how interface
+types and protobuf `Any`s are used in other languages, however, it may still be desirable to take the handler
+approach described in approach A. Either way, types implementing interfaces would still need to be registered
+with an `InterfaceRegistry` as they are now because there would be no way to retrieve them via the global registry.
+
+In order to simplify access to other modules using ADR 033, a public API module (maybe even one
+[remotely generated by Buf](https://docs.buf.build/bsr/remote-generation/go)) could be used by client modules instead
+of requiring to generate all client types internally.
+
+The big downsides of this approach are that it requires big changes to how people use protobuf types and would be a
+substantial rewrite of the protobuf code generator. This new generated code, however, could still be made compatible
+with
+the [`google.golang.org/protobuf/reflect/protoreflect`](https://pkg.go.dev/google.golang.org/protobuf/reflect/protoreflect)
+API in order to work with all standard golang protobuf tooling.
+
+It is possible that the naive approach of marshaling/unmarshaling in the ADR 033 router is an acceptable intermediate
+solution if the changes to the code generator are seen as too complex. However, since all modules would likely need
+to migrate to ADR 033 anyway with this approach, it might be better to do this all at once.
+
+### Approach C) Don't address these issues
+
+If the above solutions are seen as too complex, we can also decide not to do anything explicit to enable better module
+version compatibility, and break circular dependencies.
+
+In this case, when developers are confronted with the issues described above they can require dependencies to update in
+sync (what we do now) or attempt some ad-hoc potentially hacky solution.
+
+One approach is to ditch go semantic import versioning (SIV) altogether. Some people have commented that go's SIV
+(i.e. changing the import path to `foo/v2`, `foo/v3`, etc.) is too restrictive and that it should be optional. The
+golang maintainers disagree and only officially support semantic import versioning. We could, however, take the
+contrarian perspective and get more flexibility by using 0.x-based versioning basically forever.
+
+Module version compatibility could then be achieved using go.mod replace directives to pin dependencies to specific
+compatible 0.x versions. For instance if we knew `foo` 0.2 and 0.3 were both compatible with `bar` 0.3 and 0.4, we
+could use replace directives in our go.mod to stick to the versions of `foo` and `bar` we want. This would work as
+long as the authors of `foo` and `bar` avoid incompatible breaking changes between these modules.
+
+Or, if developers choose to use semantic import versioning, they can attempt the naive solution described above
+and would also need to use special tags and replace directives to make sure that modules are pinned to the correct
+versions.
+
+Note, however, that all of these ad-hoc approaches, would be vulnerable to the minor version compatibility issues
+described above unless [unknown field filtering](/sdk/v0.50/build/architecture/adr-020-protobuf-transaction-encoding#unknown-field-filtering)
+is properly addressed.
+
+### Approach D) Avoid protobuf generated code in public APIs
+
+An alternative approach would be to avoid protobuf generated code in public module APIs. This would help avoid the
+discrepancy between state machine versions and client API versions at the module to module boundaries. It would mean
+that we wouldn't do inter-module message passing based on ADR 033, but rather stick to the existing keeper approach
+and take it one step further by avoiding any protobuf generated code in the keeper interface methods.
+
+Using this approach, our `foo.Keeper.DoSomething` method wouldn't have the generated `MsgDoSomething` struct (which
+comes from the protobuf API), but instead positional parameters. Then in order for `foo/v2` to support the `foo/v1`
+keeper it would simply need to implement both the v1 and v2 keeper APIs. The `DoSomething` method in v2 could have the
+additional `condition` parameter, but this wouldn't be present in v1 at all so there would be no danger of a client
+accidentally setting this when it isn't available.
+
+So this approach would avoid the challenge around minor version incompatibilities because the existing module keeper
+API would not get new fields when they are added to protobuf files.
+
+Taking this approach, however, would likely require making all protobuf generated code internal in order to prevent
+it from leaking into the keeper API. This means we would still need to modify the protobuf code generator to not
+register `internal/` code with the global registry, and we would still need to manually register protobuf
+`FileDescriptor`s (this is probably true in all scenarios). It may, however, be possible to avoid needing to refactor
+interface methods on generated types to handlers.
+
+Also, this approach doesn't address what would be done in scenarios where modules still want to use the message router.
+Either way, we probably still want a way to pass messages from one module to another router safely even if it's just for
+use cases like `x/gov`, `x/authz`, CosmWasm, etc. That would still require most of the things outlined in approach (B),
+although we could advise modules to prefer keepers for communicating with other modules.
+
+The biggest downside of this approach is probably that it requires a strict refactoring of keeper interfaces to avoid
+generated code leaking into the API. This may result in cases where we need to duplicate types that are already defined
+in proto files and then write methods for converting between the golang and protobuf version. This may end up in a lot
+of unnecessary boilerplate and that may discourage modules from actually adopting it and achieving effective version
+compatibility. Approaches (A) and (B), although heavy handed initially, aim to provide a system which once adopted
+more or less gives the developer version compatibility for free with minimal boilerplate. Approach (D) may not be able
+to provide such a straightforward system since it requires a golang API to be defined alongside a protobuf API in a
+way that requires duplication and differing sets of design principles (protobuf APIs encourage additive changes
+while golang APIs would forbid it).
+
+Other downsides to this approach are:
+
+* no clear roadmap to supporting modules in other languages like Rust
+* doesn't get us any closer to proper object capability security (one of the goals of ADR 033)
+* ADR 033 needs to be done properly anyway for the set of use cases which do need it
+
+## Decision
+
+The latest **DRAFT** proposal is:
+
+1. we are alignment on adopting [ADR 033](/sdk/v0.50/build/architecture/adr-033-protobuf-inter-module-comm) not just as an addition to the
+ framework, but as a core replacement to the keeper paradigm entirely.
+2. the ADR 033 inter-module router will accommodate any variation of approach (A) or (B) given the following rules:
+ a. if the client type is the same as the server type then pass it directly through,
+ b. if both client and server use the zero-copy generated code wrappers (which still need to be defined), then pass
+ the memory buffers from one wrapper to the other, or
+ c. marshal/unmarshal types between client and server.
+
+This approach will allow for both maximal correctness and enable a clear path to enabling modules within in other
+languages, possibly executed within a WASM VM.
+
+### Minor API Revisions
+
+To declare minor API revisions of proto files, we propose the following guidelines (which were already documented
+in [cosmos.app.v1alpha module options](https://github.com/cosmos/cosmos-sdk/blob/v0.50/proto/cosmos/app/v1alpha1/module.proto)):
+
+* proto packages which are revised from their initial version (considered revision `0`) should include a `package`
+* comment in some .proto file containing the test `Revision N` at the start of a comment line where `N` is the current
+ revision number.
+* all fields, messages, etc. added in a version beyond the initial revision should add a comment at the start of a
+ comment line of the form `Since: Revision N` where `N` is the non-zero revision it was added.
+
+It is advised that there is a 1:1 correspondence between a state machine module and versioned set of proto files
+which are versioned either as a buf module a go API module or both. If the buf schema registry is used, the version of
+this buf module should always be `1.N` where `N` corresponds to the package revision. Patch releases should be used when
+only documentation comments are updated. It is okay to include proto packages named `v2`, `v3`, etc. in this same
+`1.N` versioned buf module (ex. `cosmos.bank.v2`) as long as all these proto packages consist of a single API intended
+to be served by a single SDK module.
+
+### Introspecting Minor API Revisions
+
+In order for modules to introspect the minor API revision of peer modules, we propose adding the following method
+to `cosmossdk.io/core/intermodule.Client`:
+
+```go
+ServiceRevision(ctx context.Context, serviceName string)
+
+uint64
+```
+
+Modules could all this using the service name statically generated by the go grpc code generator:
+
+```go
+intermoduleClient.ServiceRevision(ctx, bankv1beta1.Msg_ServiceDesc.ServiceName)
+```
+
+In the future, we may decide to extend the code generator used for protobuf services to add a field
+to client types which does this check more concisely, ex:
+
+```go
+package bankv1beta1
+
+type MsgClient interface {
+ Send(context.Context, MsgSend) (MsgSendResponse, error)
+
+ServiceRevision(context.Context)
+
+uint64
+}
+```
+
+### Unknown Field Filtering
+
+To correctly perform [unknown field filtering](/sdk/v0.50/build/architecture/adr-020-protobuf-transaction-encoding#unknown-field-filtering),
+the inter-module router can do one of the following:
+
+* use the `protoreflect` API for messages which support that
+* for gogo proto messages, marshal and use the existing `codec/unknownproto` code
+* for zero-copy messages, do a simple check on the highest set field number (assuming we can require that fields are
+ adding consecutively in increasing order)
+
+### `FileDescriptor` Registration
+
+Because a single go binary may contain different versions of the same generated protobuf code, we cannot rely on the
+global protobuf registry to contain the correct `FileDescriptor`s. Because `appconfig` module configuration is itself
+written in protobuf, we would like to load the `FileDescriptor`s for a module before loading a module itself. So we
+will provide ways to register `FileDescriptor`s at module registration time before instantiation. We propose the
+following `cosmossdk.io/core/appmodule.Option` constructors for the various cases of how `FileDescriptor`s may be
+packaged:
+
+```go expandable
+package appmodule
+
+// this can be used when we are using google.golang.org/protobuf compatible generated code
+// Ex:
+// ProtoFiles(bankv1beta1.File_cosmos_bank_v1beta1_module_proto)
+
+func ProtoFiles(file []protoreflect.FileDescriptor)
+
+Option {
+}
+
+// this can be used when we are using gogo proto generated code.
+func GzippedProtoFiles(file [][]byte)
+
+Option {
+}
+
+// this can be used when we are using buf build to generated a pinned file descriptor
+func ProtoImage(protoImage []byte)
+
+Option {
+}
+```
+
+This approach allows us to support several ways protobuf files might be generated:
+
+* proto files generated internally to a module (use `ProtoFiles`)
+* the API module approach with pinned file descriptors (use `ProtoImage`)
+* gogo proto (use `GzippedProtoFiles`)
+
+### Module Dependency Declaration
+
+One risk of ADR 033 is that dependencies are called at runtime which are not present in the loaded set of SDK modules.\
+Also we want modules to have a way to define a minimum dependency API revision that they require. Therefore, all
+modules should declare their set of dependencies upfront. These dependencies could be defined when a module is
+instantiated, but ideally we know what the dependencies are before instantiation and can statically look at an app
+config and determine whether the set of modules. For example, if `bar` requires `foo` revision `>= 1`, then we
+should be able to know this when creating an app config with two versions of `bar` and `foo`.
+
+We propose defining these dependencies in the proto options of the module config object itself.
+
+### Interface Registration
+
+We will also need to define how interface methods are defined on types that are serialized as `google.protobuf.Any`'s.
+In light of the desire to support modules in other languages, we may want to think of solutions that will accommodate
+other languages such as plugins described briefly in [ADR 033](/sdk/v0.50/build/architecture/adr-033-protobuf-inter-module-comm#internal-methods).
+
+### Testing
+
+In order to ensure that modules are indeed with multiple versions of their dependencies, we plan to provide specialized
+unit and integration testing infrastructure that automatically tests multiple versions of dependencies.
+
+#### Unit Testing
+
+Unit tests should be conducted inside SDK modules by mocking their dependencies. In a full ADR 033 scenario,
+this means that all interaction with other modules is done via the inter-module router, so mocking of dependencies
+means mocking their msg and query server implementations. We will provide both a test runner and fixture to make this
+streamlined. The key thing that the test runner should do to test compatibility is to test all combinations of
+dependency API revisions. This can be done by taking the file descriptors for the dependencies, parsing their comments
+to determine the revisions various elements were added, and then created synthetic file descriptors for each revision
+by subtracting elements that were added later.
+
+Here is a proposed API for the unit test runner and fixture:
+
+```go expandable
+package moduletesting
+
+import (
+
+ "context"
+ "testing"
+ "cosmossdk.io/core/intermodule"
+ "cosmossdk.io/depinject"
+ "google.golang.org/grpc"
+ "google.golang.org/protobuf/proto"
+ "google.golang.org/protobuf/reflect/protodesc"
+)
+
+type TestFixture interface {
+ context.Context
+ intermodule.Client // for making calls to the module we're testing
+ BeginBlock()
+
+EndBlock()
+}
+
+type UnitTestFixture interface {
+ TestFixture
+ grpc.ServiceRegistrar // for registering mock service implementations
+}
+
+type UnitTestConfig struct {
+ ModuleConfig proto.Message // the module's config object
+ DepinjectConfig depinject.Config // optional additional depinject config options
+ DependencyFileDescriptors []protodesc.FileDescriptorProto // optional dependency file descriptors to use instead of the global registry
+}
+
+// Run runs the test function for all combinations of dependency API revisions.
+func (cfg UnitTestConfig)
+
+Run(t *testing.T, f func(t *testing.T, f UnitTestFixture)) {
+ // ...
+}
+```
+
+Here is an example for testing bar calling foo which takes advantage of conditional service revisions in the expected
+mock arguments:
+
+```go expandable
+func TestBar(t *testing.T) {
+ UnitTestConfig{
+ ModuleConfig: &foomodulev1.Module{
+}}.Run(t, func (t *testing.T, f moduletesting.UnitTestFixture) {
+ ctrl := gomock.NewController(t)
+ mockFooMsgServer := footestutil.NewMockMsgServer()
+
+foov1.RegisterMsgServer(f, mockFooMsgServer)
+ barMsgClient := barv1.NewMsgClient(f)
+ if f.ServiceRevision(foov1.Msg_ServiceDesc.ServiceName) >= 1 {
+ mockFooMsgServer.EXPECT().DoSomething(gomock.Any(), &foov1.MsgDoSomething{
+ ...,
+ Condition: ..., // condition is expected in revision >= 1
+}).Return(&foov1.MsgDoSomethingResponse{
+}, nil)
+}
+
+else {
+ mockFooMsgServer.EXPECT().DoSomething(gomock.Any(), &foov1.MsgDoSomething{...
+}).Return(&foov1.MsgDoSomethingResponse{
+}, nil)
+}
+
+res, err := barMsgClient.CallFoo(f, &MsgCallFoo{
+})
+ ...
+})
+}
+```
+
+The unit test runner would make sure that no dependency mocks return arguments which are invalid for the service
+revision being tested to ensure that modules don't incorrectly depend on functionality not present in a given revision.
+
+#### Integration Testing
+
+An integration test runner and fixture would also be provided which instead of using mocks would test actual module
+dependencies in various combinations. Here is the proposed API:
+
+```go expandable
+type IntegrationTestFixture interface {
+ TestFixture
+}
+
+type IntegrationTestConfig struct {
+ ModuleConfig proto.Message // the module's config object
+ DependencyMatrix map[string][]proto.Message // all the dependent module configs
+}
+
+// Run runs the test function for all combinations of dependency modules.
+func (cfg IntegationTestConfig)
+
+Run(t *testing.T, f func (t *testing.T, f IntegrationTestFixture)) {
+ // ...
+}
+```
+
+And here is an example with foo and bar:
+
+```go expandable
+func TestBarIntegration(t *testing.T) {
+ IntegrationTestConfig{
+ ModuleConfig: &barmodulev1.Module{
+},
+ DependencyMatrix: map[string][]proto.Message{
+ "runtime": []proto.Message{ // test against two versions of runtime
+ &runtimev1.Module{
+},
+ &runtimev2.Module{
+},
+},
+ "foo": []proto.Message{ // test against three versions of foo
+ &foomodulev1.Module{
+},
+ &foomodulev2.Module{
+},
+ &foomodulev3.Module{
+},
+}
+
+}
+}.Run(t, func (t *testing.T, f moduletesting.IntegrationTestFixture) {
+ barMsgClient := barv1.NewMsgClient(f)
+
+res, err := barMsgClient.CallFoo(f, &MsgCallFoo{
+})
+ ...
+})
+}
+```
+
+Unlike unit tests, integration tests actually pull in other module dependencies. So that modules can be written
+without direct dependencies on other modules and because golang has no concept of development dependencies, integration
+tests should be written in separate go modules, ex. `example.com/bar/v2/test`. Because this paradigm uses go semantic
+versioning, it is possible to build a single go module which imports 3 versions of bar and 2 versions of runtime and
+can test these all together in the six various combinations of dependencies.
+
+## Consequences
+
+### Backwards Compatibility
+
+Modules which migrate fully to ADR 033 will not be compatible with existing modules which use the keeper paradigm.
+As a temporary workaround we may create some wrapper types that emulate the current keeper interface to minimize
+the migration overhead.
+
+### Positive
+
+* we will be able to deliver interoperable semantically versioned modules which should dramatically increase the
+ ability of the Cosmos SDK ecosystem to iterate on new features
+* it will be possible to write Cosmos SDK modules in other languages in the near future
+
+### Negative
+
+* all modules will need to be refactored somewhat dramatically
+
+### Neutral
+
+* the `cosmossdk.io/core/appconfig` framework will play a more central role in terms of how modules are defined, this
+ is likely generally a good thing but does mean additional changes for users wanting to stick to the pre-depinject way
+ of wiring up modules
+* `depinject` is somewhat less needed or maybe even obviated because of the full ADR 033 approach. If we adopt the
+ core API proposed in [Link](https://github.com/cosmos/cosmos-sdk/pull/12239), then a module would probably always instantiate
+ itself with a method `ProvideModule(appmodule.Service) (appmodule.AppModule, error)`. There is no complex wiring of
+ keeper dependencies in this scenario and dependency injection may not have as much of (or any) use case.
+
+## Further Discussions
+
+The decision described above is considered in draft mode and is pending final buy-in from the team and key stakeholders.
+Key outstanding discussions if we do adopt that direction are:
+
+* how do module clients introspect dependency module API revisions
+* how do modules determine a minor dependency module API revision requirement
+* how do modules appropriately test compatibility with different dependency versions
+* how to register and resolve interface implementations
+* how do modules register their protobuf file descriptors depending on the approach they take to generated code (the
+ API module approach may still be viable as a supported strategy and would need pinned file descriptors)
+
+## References
+
+* [Link](https://github.com/cosmos/cosmos-sdk/discussions/10162)
+* [Link](https://github.com/cosmos/cosmos-sdk/discussions/10582)
+* [Link](https://github.com/cosmos/cosmos-sdk/discussions/10368)
+* [Link](https://github.com/cosmos/cosmos-sdk/pull/11340)
+* [Link](https://github.com/cosmos/cosmos-sdk/issues/11899)
+* [ADR 020](/sdk/v0.50/build/architecture/adr-020-protobuf-transaction-encoding)
+* [ADR 033](/sdk/v0.50/build/architecture/adr-033-protobuf-inter-module-comm)
diff --git a/sdk/next/build/architecture/adr-055-orm.mdx b/sdk/next/build/architecture/adr-055-orm.mdx
new file mode 100644
index 000000000..3b6440498
--- /dev/null
+++ b/sdk/next/build/architecture/adr-055-orm.mdx
@@ -0,0 +1,116 @@
+---
+title: 'ADR 055: ORM'
+description: '2022-04-27: First draft'
+---
+
+## Changelog
+
+* 2022-04-27: First draft
+
+## Status
+
+ACCEPTED Implemented
+
+## Abstract
+
+In order to make it easier for developers to build Cosmos SDK modules and for clients to query, index and verify proofs
+against state data, we have implemented an ORM (object-relational mapping) layer for the Cosmos SDK.
+
+## Context
+
+Historically modules in the Cosmos SDK have always used the key-value store directly and created various handwritten
+functions for managing key format as well as constructing secondary indexes. This consumes a significant amount of
+time when building a module and is error-prone. Because key formats are non-standard, sometimes poorly documented,
+and subject to change, it is hard for clients to generically index, query and verify merkle proofs against state data.
+
+The known first instance of an "ORM" in the Cosmos ecosystem was in [weave](https://github.com/iov-one/weave/tree/master/orm).
+A later version was built for [regen-ledger](https://github.com/regen-network/regen-ledger/tree/157181f955823149e1825263a317ad8e16096da4/orm) for
+use in the group module and later [ported to the SDK](https://github.com/cosmos/cosmos-sdk/tree/35d3312c3be306591fcba39892223f1244c8d108/x/group/internal/orm)
+just for that purpose.
+
+While these earlier designs made it significantly easier to write state machines, they still required a lot of manual
+configuration, didn't expose state format directly to clients, and were limited in their support of different types
+of index keys, composite keys, and range queries.
+
+Discussions about the design continued in [Link](https://github.com/cosmos/cosmos-sdk/discussions/9156) and more
+sophisticated proofs of concept were created in [Link](https://github.com/allinbits/cosmos-sdk-poc/tree/master/runtime/orm)
+and [Link](https://github.com/cosmos/cosmos-sdk/pull/10454).
+
+## Decision
+
+These prior efforts culminated in the creation of the Cosmos SDK `orm` go module which uses protobuf annotations
+for specifying ORM table definitions. This ORM is based on the new `google.golang.org/protobuf/reflect/protoreflect`
+API and supports:
+
+* sorted indexes for all simple protobuf types (except `bytes`, `enum`, `float`, `double`) as well as `Timestamp` and `Duration`
+* unsorted `bytes` and `enum` indexes
+* composite primary and secondary keys
+* unique indexes
+* auto-incrementing `uint64` primary keys
+* complex prefix and range queries
+* paginated queries
+* complete logical decoding of KV-store data
+
+Almost all the information needed to decode state directly is specified in .proto files. Each table definition specifies
+an ID which is unique per .proto file and each index within a table is unique within that table. Clients then only need
+to know the name of a module and the prefix ORM data for a specific .proto file within that module in order to decode
+state data directly. This additional information will be exposed directly through app configs which will be explained
+in a future ADR related to app wiring.
+
+The ORM makes optimizations around storage space by not repeating values in the primary key in the key value
+when storing primary key records. For example, if the object `{"a":0,"b":1}` has the primary key `a`, it will
+be stored in the key value store as `Key: '0', Value: {"b":1}` (with more efficient protobuf binary encoding).
+Also, the generated code from [Link](https://github.com/cosmos/cosmos-proto) does optimizations around the
+`google.golang.org/protobuf/reflect/protoreflect` API to improve performance.
+
+A code generator is included with the ORM which creates type safe wrappers around the ORM's dynamic `Table`
+implementation and is the recommended way for modules to use the ORM.
+
+The ORM tests provide a simplified bank module demonstration which illustrates:
+
+* [ORM proto options](https://github.com/cosmos/cosmos-sdk/blob/0d846ae2f0424b2eb640f6679a703b52d407813d/orm/internal/testpb/bank.proto)
+* [Generated Code](https://github.com/cosmos/cosmos-sdk/blob/0d846ae2f0424b2eb640f6679a703b52d407813d/orm/internal/testpb/bank.cosmos_orm.go)
+* [Example Usage in a Module Keeper](https://github.com/cosmos/cosmos-sdk/blob/0d846ae2f0424b2eb640f6679a703b52d407813d/orm/model/ormdb/module_test.go)
+
+## Consequences
+
+### Backwards Compatibility
+
+State machine code that adopts the ORM will need migrations as the state layout is generally backwards incompatible.
+These state machines will also need to migrate to [Link](https://github.com/cosmos/cosmos-proto) at least for state data.
+
+### Positive
+
+* easier to build modules
+* easier to add secondary indexes to state
+* possible to write a generic indexer for ORM state
+* easier to write clients that do state proofs
+* possible to automatically write query layers rather than needing to manually implement gRPC queries
+
+### Negative
+
+* worse performance than handwritten keys (for now). See [Further Discussions](#further-discussions)
+ for potential improvements
+
+### Neutral
+
+## Further Discussions
+
+Further discussions will happen within the Cosmos SDK Framework Working Group. Current planned and ongoing work includes:
+
+* automatically generate client-facing query layer
+* client-side query libraries that transparently verify light client proofs
+* index ORM data to SQL databases
+* improve performance by:
+ * optimizing existing reflection based code to avoid unnecessary gets when doing deletes & updates of simple tables
+ * more sophisticated code generation such as making fast path reflection even faster (avoiding `switch` statements),
+ or even fully generating code that equals handwritten performance
+
+## References
+
+* [Link](https://github.com/iov-one/weave/tree/master/orm)).
+* [Link](https://github.com/regen-network/regen-ledger/tree/157181f955823149e1825263a317ad8e16096da4/orm)
+* [Link](https://github.com/cosmos/cosmos-sdk/tree/35d3312c3be306591fcba39892223f1244c8d108/x/group/internal/orm)
+* [Link](https://github.com/cosmos/cosmos-sdk/discussions/9156)
+* [Link](https://github.com/allinbits/cosmos-sdk-poc/tree/master/runtime/orm)
+* [Link](https://github.com/cosmos/cosmos-sdk/pull/10454)
diff --git a/sdk/next/build/architecture/adr-057-app-wiring.mdx b/sdk/next/build/architecture/adr-057-app-wiring.mdx
new file mode 100644
index 000000000..815e9bdd5
--- /dev/null
+++ b/sdk/next/build/architecture/adr-057-app-wiring.mdx
@@ -0,0 +1,389 @@
+---
+title: 'ADR 057: App Wiring'
+description: '2022-05-04: Initial Draft 2022-08-19: Updates'
+---
+
+## Changelog
+
+* 2022-05-04: Initial Draft
+* 2022-08-19: Updates
+
+## Status
+
+PROPOSED Implemented
+
+## Abstract
+
+In order to make it easier to build Cosmos SDK modules and apps, we propose a new app wiring system based on
+dependency injection and declarative app configurations to replace the current `app.go` code.
+
+## Context
+
+A number of factors have made the SDK and SDK apps in their current state hard to maintain. A symptom of the current
+state of complexity is [`simapp/app.go`](https://github.com/cosmos/cosmos-sdk/blob/c3edbb22cab8678c35e21fe0253919996b780c01/simapp/app.go)
+which contains almost 100 lines of imports and is otherwise over 600 lines of mostly boilerplate code that is
+generally copied to each new project. (Not to mention the additional boilerplate which gets copied in `simapp/simd`.)
+
+The large amount of boilerplate needed to bootstrap an app has made it hard to release independently versioned go
+modules for Cosmos SDK modules as described in [ADR 053: Go Module Refactoring](/sdk/v0.53/build/architecture/adr-053-go-module-refactoring).
+
+In addition to being very verbose and repetitive, `app.go` also exposes a large surface area for breaking changes
+as most modules instantiate themselves with positional parameters which forces breaking changes anytime a new parameter
+(even an optional one) is needed.
+
+Several attempts were made to improve the current situation including [ADR 033: Internal-Module Communication](/sdk/v0.53/build/architecture/adr-033-protobuf-inter-module-comm)
+and [a proof-of-concept of a new SDK](https://github.com/allinbits/cosmos-sdk-poc). The discussions around these
+designs led to the current solution described here.
+
+## Decision
+
+In order to improve the current situation, a new "app wiring" paradigm has been designed to replace `app.go` which
+involves:
+
+* declaration configuration of the modules in an app which can be serialized to JSON or YAML
+* a dependency-injection (DI) framework for instantiating apps from the that configuration
+
+### Dependency Injection
+
+When examining the code in `app.go` most of the code simply instantiates modules with dependencies provided either
+by the framework (such as store keys) or by other modules (such as keepers). It is generally pretty obvious given
+the context what the correct dependencies actually should be, so dependency-injection is an obvious solution. Rather
+than making developers manually resolve dependencies, a module will tell the DI container what dependency it needs
+and the container will figure out how to provide it.
+
+We explored several existing DI solutions in golang and felt that the reflection-based approach in [uber/dig](https://github.com/uber-go/dig)
+was closest to what we needed but not quite there. Assessing what we needed for the SDK, we designed and built
+the Cosmos SDK [depinject module](https://pkg.go.dev/github.com/cosmos/cosmos-sdk/depinject), which has the following
+features:
+
+* dependency resolution and provision through functional constructors, ex: `func(need SomeDep) (AnotherDep, error)`
+* dependency injection `In` and `Out` structs which support `optional` dependencies
+* grouped-dependencies (many-per-container) through the `ManyPerContainerType` tag interface
+* module-scoped dependencies via `ModuleKey`s (where each module gets a unique dependency)
+* one-per-module dependencies through the `OnePerModuleType` tag interface
+* sophisticated debugging information and container visualization via GraphViz
+
+Here are some examples of how these would be used in an SDK module:
+
+* `StoreKey` could be a module-scoped dependency which is unique per module
+* a module's `AppModule` instance (or the equivalent) could be a `OnePerModuleType`
+* CLI commands could be provided with `ManyPerContainerType`s
+
+Note that even though dependency resolution is dynamic and based on reflection, which could be considered a pitfall
+of this approach, the entire dependency graph should be resolved immediately on app startup and only gets resolved
+once (except in the case of dynamic config reloading which is a separate topic). This means that if there are any
+errors in the dependency graph, they will get reported immediately on startup so this approach is only slightly worse
+than fully static resolution in terms of error reporting and much better in terms of code complexity.
+
+### Declarative App Config
+
+In order to compose modules into an app, a declarative app configuration will be used. This configuration is based off
+of protobuf and its basic structure is very simple:
+
+```protobuf
+package cosmos.app.v1;
+
+message Config {
+ repeated ModuleConfig modules = 1;
+}
+
+message ModuleConfig {
+ string name = 1;
+ google.protobuf.Any config = 2;
+}
+```
+
+(See also [Link](https://github.com/cosmos/cosmos-sdk/blob/6e18f582bf69e3926a1e22a6de3c35ea327aadce/proto/cosmos/app/v1alpha1/config.proto))
+
+The configuration for every module is itself a protobuf message and modules will be identified and loaded based
+on the protobuf type URL of their config object (ex. `cosmos.bank.module.v1.Module`). Modules are given a unique short `name`
+to share resources across different versions of the same module which might have a different protobuf package
+versions (ex. `cosmos.bank.module.v2.Module`). All module config objects should define the `cosmos.app.v1alpha1.module`
+descriptor option which will provide additional useful metadata for the framework and which can also be indexed
+in module registries.
+
+An example app config in YAML might look like this:
+
+```yaml expandable
+modules:
+ - name: baseapp
+ config:
+ "@type": cosmos.baseapp.module.v1.Module
+ begin_blockers: [staking, auth, bank]
+ end_blockers: [bank, auth, staking]
+ init_genesis: [bank, auth, staking]
+ - name: auth
+ config:
+ "@type": cosmos.auth.module.v1.Module
+ bech32_prefix: "foo"
+ - name: bank
+ config:
+ "@type": cosmos.bank.module.v1.Module
+ - name: staking
+ config:
+ "@type": cosmos.staking.module.v1.Module
+```
+
+In the above example, there is a hypothetical `baseapp` module which contains the information around ordering of
+begin blockers, end blockers, and init genesis. Rather than lifting these concerns up to the module config layer,
+they are themselves handled by a module which could allow a convenient way of swapping out different versions of
+baseapp (for instance to target different versions of tendermint), without needing to change the rest of the config.
+The `baseapp` module would then provide to the server framework (which sort of sits outside the ABCI app) an instance
+of `abci.Application`.
+
+In this model, an app is *modules all the way down* and the dependency injection/app config layer is very much
+protocol-agnostic and can adapt to even major breaking changes at the protocol layer.
+
+### Module & Protobuf Registration
+
+In order for the two components of dependency injection and declarative configuration to work together as described,
+we need a way for modules to actually register themselves and provide dependencies to the container.
+
+One additional complexity that needs to be handled at this layer is protobuf registry initialization. Recall that
+in both the current SDK `codec` and the proposed [ADR 054: Protobuf Semver Compatible Codegen](https://github.com/cosmos/cosmos-sdk/pull/11802),
+protobuf types need to be explicitly registered. Given that the app config itself is based on protobuf and
+uses protobuf `Any` types, protobuf registration needs to happen before the app config itself can be decoded. Because
+we don't know which protobuf `Any` types will be needed a priori and modules themselves define those types, we need
+to decode the app config in separate phases:
+
+1. parse app config JSON/YAML as raw JSON and collect required module type URLs (without doing proto JSON decoding)
+2. build a [protobuf type registry](https://pkg.go.dev/google.golang.org/protobuf@v1.28.0/reflect/protoregistry) based
+ on file descriptors and types provided by each required module
+3. decode the app config as proto JSON using the protobuf type registry
+
+Because in [ADR 054: Protobuf Semver Compatible Codegen](https://github.com/cosmos/cosmos-sdk/pull/11802), each module
+might use `internal` generated code which is not registered with the global protobuf registry, this code should provide
+an alternate way to register protobuf types with a type registry. In the same way that `.pb.go` files currently have a
+`var File_foo_proto protoreflect.FileDescriptor` for the file `foo.proto`, generated code should have a new member
+`var Types_foo_proto TypeInfo` where `TypeInfo` is an interface or struct with all the necessary info to register both
+the protobuf generated types and file descriptor.
+
+So a module must provide dependency injection providers and protobuf types, and takes as input its module
+config object which uniquely identifies the module based on its type URL.
+
+With this in mind, we define a global module register which allows module implementations to register themselves
+with the following API:
+
+```go expandable
+// Register registers a module with the provided type name (ex. cosmos.bank.module.v1.Module)
+// and the provided options.
+func Register(configTypeName protoreflect.FullName, option ...Option) { ...
+}
+
+type Option { /* private methods */
+}
+
+// Provide registers dependency injection provider functions which work with the
+// cosmos-sdk container module. These functions can also accept an additional
+// parameter for the module's config object.
+func Provide(providers ...interface{
+})
+
+Option { ...
+}
+
+// Types registers protobuf TypeInfo's with the protobuf registry.
+func Types(types ...TypeInfo)
+
+Option { ...
+}
+```
+
+Ex:
+
+```go expandable
+func init() {
+ appmodule.Register("cosmos.bank.module.v1.Module",
+ appmodule.Types(
+ types.Types_tx_proto,
+ types.Types_query_proto,
+ types.Types_types_proto,
+ ),
+ appmodule.Provide(
+ provideBankModule,
+ )
+ )
+}
+
+type Inputs struct {
+ container.In
+
+ AuthKeeper auth.Keeper
+ DB ormdb.ModuleDB
+}
+
+type Outputs struct {
+ Keeper bank.Keeper
+ AppModule appmodule.AppModule
+}
+
+func ProvideBankModule(config *bankmodulev1.Module, Inputs) (Outputs, error) { ...
+}
+```
+
+Note that in this module, a module configuration object *cannot* register different dependency providers at runtime
+based on the configuration. This is intentional because it allows us to know globally which modules provide which
+dependencies, and it will also allow us to do code generation of the whole app initialization. This
+can help us figure out issues with missing dependencies in an app config if the needed modules are loaded at runtime.
+In cases where required modules are not loaded at runtime, it may be possible to guide users to the correct module if
+through a global Cosmos SDK module registry.
+
+The `*appmodule.Handler` type referenced above is a replacement for the legacy `AppModule` framework, and
+described in [ADR 063: Core Module API](/sdk/v0.53/build/architecture/adr-063-core-module-api).
+
+### New `app.go`
+
+With this setup, `app.go` might now look something like this:
+
+```go expandable
+package main
+
+import (
+
+ // Each go package which registers a module must be imported just for side-effects
+ // so that module implementations are registered.
+ _ "github.com/cosmos/cosmos-sdk/x/auth/module"
+ _ "github.com/cosmos/cosmos-sdk/x/bank/module"
+ _ "github.com/cosmos/cosmos-sdk/x/staking/module"
+ "github.com/cosmos/cosmos-sdk/core/app"
+)
+
+// go:embed app.yaml
+var appConfigYAML []byte
+
+func main() {
+ app.Run(app.LoadYAML(appConfigYAML))
+}
+```
+
+### Application to existing SDK modules
+
+So far we have described a system which is largely agnostic to the specifics of the SDK such as store keys, `AppModule`,
+`BaseApp`, etc. Improvements to these parts of the framework that integrate with the general app wiring framework
+defined here are described in [ADR 063: Core Module API](/sdk/v0.53/build/architecture/adr-063-core-module-api).
+
+### Registration of Inter-Module Hooks
+
+### Registration of Inter-Module Hooks
+
+Some modules define a hooks interface (ex. `StakingHooks`) which allows one module to call back into another module
+when certain events happen.
+
+With the app wiring framework, these hooks interfaces can be defined as a `OnePerModuleType`s and then the module
+which consumes these hooks can collect these hooks as a map of module name to hook type (ex. `map[string]FooHooks`). Ex:
+
+```go expandable
+func init() {
+ appmodule.Register(
+ &foomodulev1.Module{
+},
+ appmodule.Invoke(InvokeSetFooHooks),
+ ...
+ )
+}
+
+func InvokeSetFooHooks(
+ keeper *keeper.Keeper,
+ fooHooks map[string]FooHooks,
+)
+
+error {
+ for k in sort.Strings(maps.Keys(fooHooks)) {
+ keeper.AddFooHooks(fooHooks[k])
+}
+}
+```
+
+Optionally, the module consuming hooks can allow app's to define an order for calling these hooks based on module name
+in its config object.
+
+An alternative way for registering hooks via reflection was considered where all keeper types are inspected to see if
+they implement the hook interface by the modules exposing hooks. This has the downsides of:
+
+* needing to expose all the keepers of all modules to the module providing hooks,
+* not allowing for encapsulating hooks on a different type which doesn't expose all keeper methods,
+* harder to know statically which module expose hooks or are checking for them.
+
+With the approach proposed here, hooks registration will be obviously observable in `app.go` if `depinject` codegen
+(described below) is used.
+
+### Code Generation
+
+The `depinject` framework will optionally allow the app configuration and dependency injection wiring to be code
+generated. This will allow:
+
+* dependency injection wiring to be inspected as regular go code just like the existing `app.go`,
+* dependency injection to be opt-in with manual wiring 100% still possible.
+
+Code generation requires that all providers and invokers and their parameters are exported and in non-internal packages.
+
+### Module Semantic Versioning
+
+When we start creating semantically versioned SDK modules that are in standalone go modules, a state machine breaking
+change to a module should be handled as follows:
+
+* the semantic major version should be incremented, and
+* a new semantically versioned module config protobuf type should be created.
+
+For instance, if we have the SDK module for bank in the go module `github.com/cosmos/cosmos-sdk/x/bank` with the module config type
+`cosmos.bank.module.v1.Module`, and we want to make a state machine breaking change to the module, we would:
+
+* create a new go module `github.com/cosmos/cosmos-sdk/x/bank/v2`,
+* with the module config protobuf type `cosmos.bank.module.v2.Module`.
+
+This *does not* mean that we need to increment the protobuf API version for bank. Both modules can support
+`cosmos.bank.v1`, but `github.com/cosmos/cosmos-sdk/x/bank/v2` will be a separate go module with a separate module config type.
+
+This practice will eventually allow us to use appconfig to load new versions of a module via a configuration change.
+
+Effectively, there should be a 1:1 correspondence between a semantically versioned go module and a
+versioned module config protobuf type, and major versioning bumps should occur whenever state machine breaking changes
+are made to a module.
+
+NOTE: SDK modules that are standalone go modules *should not* adopt semantic versioning until the concerns described in
+[ADR 054: Module Semantic Versioning](/sdk/v0.53/build/architecture/adr-054-semver-compatible-modules) are
+addressed. The short-term solution for this issue was left somewhat unresolved. However, the easiest tactic is
+likely to use a standalone API go module and follow the guidelines described in this comment: [Link](https://github.com/cosmos/cosmos-sdk/pull/11802#issuecomment-1406815181). For the time-being, it is recommended that
+Cosmos SDK modules continue to follow tried and true [0-based versioning](https://0ver.org) until an officially
+recommended solution is provided. This section of the ADR will be updated when that happens and for now, this section
+should be considered as a design recommendation for future adoption of semantic versioning.
+
+## Consequences
+
+### Backwards Compatibility
+
+Modules which work with the new app wiring system do not need to drop their existing `AppModule` and `NewKeeper`
+registration paradigms. These two methods can live side-by-side for as long as is needed.
+
+### Positive
+
+* wiring up new apps will be simpler, more succinct and less error-prone
+* it will be easier to develop and test standalone SDK modules without needing to replicate all of simapp
+* it may be possible to dynamically load modules and upgrade chains without needing to do a coordinated stop and binary
+ upgrade using this mechanism
+* easier plugin integration
+* dependency injection framework provides more automated reasoning about dependencies in the project, with a graph visualization.
+
+### Negative
+
+* it may be confusing when a dependency is missing although error messages, the GraphViz visualization, and global
+ module registration may help with that
+
+### Neutral
+
+* it will require work and education
+
+## Further Discussions
+
+The protobuf type registration system described in this ADR has not been implemented and may need to be reconsidered in
+light of code generation. It may be better to do this type registration with a DI provider.
+
+## References
+
+* [Link](https://github.com/cosmos/cosmos-sdk/blob/c3edbb22cab8678c35e21fe0253919996b780c01/simapp/app.go)
+* [Link](https://github.com/allinbits/cosmos-sdk-poc)
+* [Link](https://github.com/uber-go/dig)
+* [Link](https://github.com/google/wire)
+* [Link](https://pkg.go.dev/github.com/cosmos/cosmos-sdk/container)
+* [Link](https://github.com/cosmos/cosmos-sdk/pull/11802)
+* [ADR 063: Core Module API](/sdk/v0.53/build/architecture/adr-063-core-module-api)
diff --git a/sdk/next/build/architecture/adr-058-auto-generated-cli.mdx b/sdk/next/build/architecture/adr-058-auto-generated-cli.mdx
new file mode 100644
index 000000000..f88c13d20
--- /dev/null
+++ b/sdk/next/build/architecture/adr-058-auto-generated-cli.mdx
@@ -0,0 +1,101 @@
+---
+title: 'ADR 058: Auto-Generated CLI'
+description: '2022-05-04: Initial Draft'
+---
+
+## Changelog
+
+* 2022-05-04: Initial Draft
+
+## Status
+
+ACCEPTED Partially Implemented
+
+## Abstract
+
+In order to make it easier for developers to write Cosmos SDK modules, we provide infrastructure which automatically
+generates CLI commands based on protobuf definitions.
+
+## Context
+
+Current Cosmos SDK modules generally implement a CLI command for every transaction and every query supported by the
+module. These are handwritten for each command and essentially amount to providing some CLI flags or positional
+arguments for specific fields in protobuf messages.
+
+In order to make sure CLI commands are correctly implemented as well as to make sure that the application works
+in end-to-end scenarios, we do integration tests using CLI commands. While these tests are valuable on some-level,
+they can be hard to write and maintain, and run slowly. [Some teams have contemplated](https://github.com/regen-network/regen-ledger/issues/1041)
+moving away from CLI-style integration tests (which are really end-to-end tests) towards narrower integration tests
+which exercise `MsgClient` and `QueryClient` directly. This might involve replacing the current end-to-end CLI
+tests with unit tests as there still needs to be some way to test these CLI commands for full quality assurance.
+
+## Decision
+
+To make module development simpler, we provide infrastructure - in the new [`client/v2`](https://github.com/cosmos/cosmos-sdk/tree/main/client/v2)
+go module - for automatically generating CLI commands based on protobuf definitions to either replace or complement
+handwritten CLI commands. This will mean that when developing a module, it will be possible to skip both writing and
+testing CLI commands as that can all be taken care of by the framework.
+
+The basic design for automatically generating CLI commands is to:
+
+* create one CLI command for each `rpc` method in a protobuf `Query` or `Msg` service
+* create a CLI flag for each field in the `rpc` request type
+* for `query` commands call gRPC and print the response as protobuf JSON or YAML (via the `-o`/`--output` flag)
+* for `tx` commands, create a transaction and apply common transaction flags
+
+In order to make the auto-generated CLI as easy to use (or easier) than handwritten CLI, we need to do custom handling
+of specific protobuf field types so that the input format is easy for humans:
+
+* `Coin`, `Coins`, `DecCoin`, and `DecCoins` should be input using the existing format (i.e. `1000uatom`)
+* it should be possible to specify an address using either the bech32 address string or a named key in the keyring
+* `Timestamp` and `Duration` should accept strings like `2001-01-01T00:00:00Z` and `1h3m` respectively
+* pagination should be handled with flags like `--page-limit`, `--page-offset`, etc.
+* it should be possible to customize any other protobuf type either via its message name or a `cosmos_proto.scalar` annotation
+
+At a basic level it should be possible to generate a command for a single `rpc` method as well as all the commands for
+a whole protobuf `service` definition. It should be possible to mix and match auto-generated and handwritten commands.
+
+## Consequences
+
+### Backwards Compatibility
+
+Existing modules can mix and match auto-generated and handwritten CLI commands so it is up to them as to whether they
+make breaking changes by replacing handwritten commands with slightly different auto-generated ones.
+
+For now the SDK will maintain the existing set of CLI commands for backwards compatibility but new commands will use
+this functionality.
+
+### Positive
+
+* module developers will not need to write CLI commands
+* module developers will not need to test CLI commands
+* [lens](https://github.com/strangelove-ventures/lens) may benefit from this
+
+### Negative
+
+### Neutral
+
+## Further Discussions
+
+We would like to be able to customize:
+
+* short and long usage strings for commands
+* aliases for flags (ex. `-a` for `--amount`)
+* which fields are positional parameters rather than flags
+
+It is an [open discussion](https://github.com/cosmos/cosmos-sdk/pull/11725#issuecomment-1108676129)
+as to whether these customizations options should line in:
+
+* the .proto files themselves,
+* separate config files (ex. YAML), or
+* directly in code
+
+Providing the options in .proto files would allow a dynamic client to automatically generate
+CLI commands on the fly. However, that may pollute the .proto files themselves with information that is only relevant
+for a small subset of users.
+
+## References
+
+* [Link](https://github.com/regen-network/regen-ledger/issues/1041)
+* [Link](https://github.com/cosmos/cosmos-sdk/tree/main/client/v2)
+* [Link](https://github.com/cosmos/cosmos-sdk/pull/11725#issuecomment-1108676129)
diff --git a/sdk/next/build/architecture/adr-059-test-scopes.mdx b/sdk/next/build/architecture/adr-059-test-scopes.mdx
new file mode 100644
index 000000000..903c8c2ab
--- /dev/null
+++ b/sdk/next/build/architecture/adr-059-test-scopes.mdx
@@ -0,0 +1,259 @@
+---
+title: 'ADR 059: Test Scopes'
+description: >-
+ 2022-08-02: Initial Draft 2023-03-02: Add precision for integration tests
+ 2023-03-23: Add precision for E2E tests
+---
+
+## Changelog
+
+* 2022-08-02: Initial Draft
+* 2023-03-02: Add precision for integration tests
+* 2023-03-23: Add precision for E2E tests
+
+## Status
+
+PROPOSED Partially Implemented
+
+## Abstract
+
+Recent work in the SDK aimed at breaking apart the monolithic root go module has highlighted
+shortcomings and inconsistencies in our testing paradigm. This ADR clarifies a common
+language for talking about test scopes and proposes an ideal state of tests at each scope.
+
+## Context
+
+[ADR-053: Go Module Refactoring](https://github.com/cosmos/cosmos-sdk/blob/main/docs/architecture/adr-053-go-module-refactoring.md) expresses our desire for an SDK composed of many
+independently versioned Go modules, and [ADR-057: App Wiring](https://github.com/cosmos/cosmos-sdk/blob/main/docs/architecture/adr-057-app-wiring.md) offers a methodology
+for breaking apart inter-module dependencies through the use of dependency injection. As
+described in [EPIC: Separate all SDK modules into standalone go modules](https://github.com/cosmos/cosmos-sdk/issues/11899), module
+dependencies are particularly complected in the test phase, where simapp is used as
+the key test fixture in setting up and running tests. It is clear that the successful
+completion of Phases 3 and 4 in that EPIC require the resolution of this dependency problem.
+
+In [EPIC: Unit Testing of Modules via Mocks](https://github.com/cosmos/cosmos-sdk/issues/12398) it was thought this Gordian knot could be
+unwound by mocking all dependencies in the test phase for each module, but seeing how these
+refactors were complete rewrites of test suites discussions began around the fate of the
+existing integration tests. One perspective is that they ought to be thrown out, another is
+that integration tests have some utility of their own and a place in the SDK's testing story.
+
+Another point of confusion has been the current state of CLI test suites, [x/auth](https://github.com/cosmos/cosmos-sdk/blob/0f7e56c6f9102cda0ca9aba5b6f091dbca976b5a/x/auth/client/testutil/suite.go#L44-L49) for
+example. In code these are called integration tests, but in reality function as end to end
+tests by starting up a tendermint node and full application. [EPIC: Rewrite and simplify
+CLI tests](https://github.com/cosmos/cosmos-sdk/issues/12696) identifies the ideal state of CLI tests using mocks, but does not address the
+place end to end tests may have in the SDK.
+
+From here we identify three scopes of testing, **unit**, **integration**, **e2e** (end to
+end), seek to define the boundaries of each, their shortcomings (real and imposed), and their
+ideal state in the SDK.
+
+### Unit tests
+
+Unit tests exercise the code contained in a single module (e.g. `/x/bank`) or package
+(e.g. `/client`) in isolation from the rest of the code base. Within this we identify two
+levels of unit tests, *illustrative* and *journey*. The definitions below lean heavily on
+[The BDD Books - Formulation](https://leanpub.com/bddbooks-formulation) section 1.3.
+
+*Illustrative* tests exercise an atomic part of a module in isolation - in this case we
+might do fixture setup/mocking of other parts of the module.
+
+Tests which exercise a whole module's function with dependencies mocked, are *journeys*.
+These are almost like integration tests in that they exercise many things together but still
+use mocks.
+
+Example 1 journey vs illustrative tests - [depinject's BDD style tests](https://github.com/cosmos/cosmos-sdk/blob/main/depinject/features/bindings.feature), show how we can
+rapidly build up many illustrative cases demonstrating behavioral rules without [very much code](https://github.com/cosmos/cosmos-sdk/blob/main/depinject/binding_test.go) while maintaining high level readability.
+
+Example 2 [depinject table driven tests](https://github.com/cosmos/cosmos-sdk/blob/main/depinject/provider_desc_test.go)
+
+Example 3 [Bank keeper tests](https://github.com/cosmos/cosmos-sdk/blob/2bec9d2021918650d3938c3ab242f84289daef80/x/bank/keeper/keeper_test.go#L94-L105) - A mock implementation of `AccountKeeper` is supplied to the keeper constructor.
+
+#### Limitations
+
+Certain modules are tightly coupled beyond the test phase. A recent dependency report for
+`bank -> auth` found 274 total usages of `auth` in `bank`, 50 of which are in
+production code and 224 in test. This tight coupling may suggest that either the modules
+should be merged, or refactoring is required to abstract references to the core types tying
+the modules together. It could also indicate that these modules should be tested together
+in integration tests beyond mocked unit tests.
+
+In some cases setting up a test case for a module with many mocked dependencies can be quite
+cumbersome and the resulting test may only show that the mocking framework works as expected
+rather than working as a functional test of interdependent module behavior.
+
+### Integration tests
+
+Integration tests define and exercise relationships between an arbitrary number of modules
+and/or application subsystems.
+
+Wiring for integration tests is provided by `depinject` and some [helper code](https://github.com/cosmos/cosmos-sdk/blob/2bec9d2021918650d3938c3ab242f84289daef80/testutil/sims/app_helpers.go#L95) starts up
+a running application. A section of the running application may then be tested. Certain
+inputs during different phases of the application life cycle are expected to produce
+invariant outputs without too much concern for component internals. This type of black box
+testing has a larger scope than unit testing.
+
+Example 1 [client/grpc\_query\_test/TestGRPCQuery](https://github.com/cosmos/cosmos-sdk/blob/2bec9d2021918650d3938c3ab242f84289daef80/client/grpc_query_test.go#L111-L129) - This test is misplaced in `/client`,
+but tests the life cycle of (at least) `runtime` and `bank` as they progress through
+startup, genesis and query time. It also exercises the fitness of the client and query
+server without putting bytes on the wire through the use of [QueryServiceTestHelper](https://github.com/cosmos/cosmos-sdk/blob/2bec9d2021918650d3938c3ab242f84289daef80/baseapp/grpcrouter_helpers.go#L31).
+
+Example 2 `x/evidence` Keeper integration tests - Starts up an application composed of [8
+modules](https://github.com/cosmos/cosmos-sdk/blob/2bec9d2021918650d3938c3ab242f84289daef80/x/evidence/testutil/app.yaml#L1) with [5 keepers](https://github.com/cosmos/cosmos-sdk/blob/2bec9d2021918650d3938c3ab242f84289daef80/x/evidence/keeper/keeper_test.go#L101-L106) used in the integration test suite. One test in the suite
+exercises [HandleEquivocationEvidence](https://github.com/cosmos/cosmos-sdk/blob/2bec9d2021918650d3938c3ab242f84289daef80/x/evidence/keeper/infraction_test.go#L42) which contains many interactions with the staking
+keeper.
+
+Example 3 - Integration suite app configurations may also be specified via golang (not
+YAML as above) [statically](https://github.com/cosmos/cosmos-sdk/blob/main/x/nft/testutil/app_config.go) or [dynamically](https://github.com/cosmos/cosmos-sdk/blob/8c23f6f957d1c0bedd314806d1ac65bea59b084c/tests/integration/bank/keeper/keeper_test.go#L129-L134).
+
+#### Limitations
+
+Setting up a particular input state may be more challenging since the application is
+starting from a zero state. Some of this may be addressed by good test fixture
+abstractions with testing of their own. Tests may also be more brittle, and larger
+refactors could impact application initialization in unexpected ways with harder to
+understand errors. This could also be seen as a benefit, and indeed the SDK's current
+integration tests were helpful in tracking down logic errors during earlier stages
+of app-wiring refactors.
+
+### Simulations
+
+Simulations (also called generative testing) are a special case of integration tests where
+deterministically random module operations are executed against a running simapp, building
+blocks on the chain until a specified height is reached. No *specific* assertions are
+made for the state transitions resulting from module operations but any error will halt and
+fail the simulation. Since `crisis` is included in simapp and the simulation runs
+EndBlockers at the end of each block any module invariant violations will also fail
+the simulation.
+
+Modules must implement [AppModuleSimulation.WeightedOperations](https://github.com/cosmos/cosmos-sdk/blob/2bec9d2021918650d3938c3ab242f84289daef80/types/module/simulation.go#L31) to define their
+simulation operations. Note that not all modules implement this which may indicate a
+gap in current simulation test coverage.
+
+Modules not returning simulation operations:
+
+* `auth`
+* `evidence`
+* `mint`
+* `params`
+
+A separate binary, [runsim](https://github.com/cosmos/tools/tree/master/cmd/runsim), is responsible for kicking off some of these tests and
+managing their life cycle.
+
+#### Limitations
+
+* [A success](https://github.com/cosmos/cosmos-sdk/runs/7606931983?check_suite_focus=true) may take a long time to run, 7-10 minutes per simulation in CI.
+* [Timeouts](https://github.com/cosmos/cosmos-sdk/runs/7606932295?check_suite_focus=true) sometimes occur on apparent successes without any indication why.
+* Useful error messages not provided on [failure](https://github.com/cosmos/cosmos-sdk/runs/7606932548?check_suite_focus=true) from CI, requiring a developer to run
+ the simulation locally to reproduce.
+
+### E2E tests
+
+End to end tests exercise the entire system as we understand it in as close an approximation
+to a production environment as is practical. Presently these tests are located at
+[tests/e2e](https://github.com/cosmos/cosmos-sdk/tree/main/tests/e2e) and rely on [testutil/network](https://github.com/cosmos/cosmos-sdk/tree/main/testutil/network) to start up an in-process Tendermint node.
+
+An application should be built as minimally as possible to exercise the desired functionality.
+The SDK uses an application will only the required modules for the tests. The application developer is adviced to use its own application for e2e tests.
+
+#### Limitations
+
+In general the limitations of end to end tests are orchestration and compute cost.
+Scaffolding is required to start up and run a prod-like environment and the this
+process takes much longer to start and run than unit or integration tests.
+
+Global locks present in Tendermint code cause stateful starting/stopping to sometimes hang
+or fail intermittently when run in a CI environment.
+
+The scope of e2e tests has been complected with command line interface testing.
+
+## Decision
+
+We accept these test scopes and identify the following decisions points for each.
+
+| Scope | App Type | Mocks? |
+| ----------- | ------------------- | ------ |
+| Unit | None | Yes |
+| Integration | integration helpers | Some |
+| Simulation | minimal app | No |
+| E2E | minimal app | No |
+
+The decision above is valid for the SDK. An application developer should test their application with their full application instead of the minimal app.
+
+### Unit Tests
+
+All modules must have mocked unit test coverage.
+
+Illustrative tests should outnumber journeys in unit tests.
+
+Unit tests should outnumber integration tests.
+
+Unit tests must not introduce additional dependencies beyond those already present in
+production code.
+
+When module unit test introduction as per [EPIC: Unit testing of modules via mocks](https://github.com/cosmos/cosmos-sdk/issues/12398)
+results in a near complete rewrite of an integration test suite the test suite should be
+retained and moved to `/tests/integration`. We accept the resulting test logic
+duplication but recommend improving the unit test suite through the addition of
+illustrative tests.
+
+### Integration Tests
+
+All integration tests shall be located in `/tests/integration`, even those which do not
+introduce extra module dependencies.
+
+To help limit scope and complexity, it is recommended to use the smallest possible number of
+modules in application startup, i.e. don't depend on simapp.
+
+Integration tests should outnumber e2e tests.
+
+### Simulations
+
+Simulations shall use a minimal application (usually via app wiring). They are located under `/x/{moduleName}/simulation`.
+
+### E2E Tests
+
+Existing e2e tests shall be migrated to integration tests by removing the dependency on the
+test network and in-process Tendermint node to ensure we do not lose test coverage.
+
+The e2e rest runner shall transition from in process Tendermint to a runner powered by
+Docker via [dockertest](https://github.com/ory/dockertest).
+
+E2E tests exercising a full network upgrade shall be written.
+
+The CLI testing aspect of existing e2e tests shall be rewritten using the network mocking
+demonstrated in [PR#12706](https://github.com/cosmos/cosmos-sdk/pull/12706).
+
+## Consequences
+
+### Positive
+
+* test coverage is increased
+* test organization is improved
+* reduced dependency graph size in modules
+* simapp removed as a dependency from modules
+* inter-module dependencies introduced in test code are removed
+* reduced CI run time after transitioning away from in process Tendermint
+
+### Negative
+
+* some test logic duplication between unit and integration tests during transition
+* test written using dockertest DX may be a bit worse
+
+### Neutral
+
+* some discovery required for e2e transition to dockertest
+
+## Further Discussions
+
+It may be useful if test suites could be run in integration mode (with mocked tendermint) or
+with e2e fixtures (with real tendermint and many nodes). Integration fixtures could be used
+for quicker runs, e2e fixures could be used for more battle hardening.
+
+A PoC `x/gov` was completed in PR [#12847](https://github.com/cosmos/cosmos-sdk/pull/12847)
+is in progress for unit tests demonstrating BDD \[Rejected].
+Observing that a strength of BDD specifications is their readability, and a con is the
+cognitive load while writing and maintaining, current consensus is to reserve BDD use
+for places in the SDK where complex rules and module interactions are demonstrated.
+More straightforward or low level test cases will continue to rely on go table tests.
+
+Levels are network mocking in integration and e2e tests are still being worked on and formalized.
diff --git a/sdk/next/build/architecture/adr-060-abci-1.0.mdx b/sdk/next/build/architecture/adr-060-abci-1.0.mdx
new file mode 100644
index 000000000..870fe4c9c
--- /dev/null
+++ b/sdk/next/build/architecture/adr-060-abci-1.0.mdx
@@ -0,0 +1,258 @@
+---
+title: 'ADR 60: ABCI 1.0 Integration (Phase I)'
+description: >-
+ 2022-08-10: Initial Draft (@alexanderbez, @tac0turtle) Nov 12, 2022: Update
+ PrepareProposal and ProcessProposal semantics per the initial implementation
+ PR (@alexanderbez)
+---
+
+## Changelog
+
+* 2022-08-10: Initial Draft (@alexanderbez, @tac0turtle)
+* Nov 12, 2022: Update `PrepareProposal` and `ProcessProposal` semantics per the
+ initial implementation [PR](https://github.com/cosmos/cosmos-sdk/pull/13453) (@alexanderbez)
+
+## Status
+
+ACCEPTED
+
+## Abstract
+
+This ADR describes the initial adoption of [ABCI 1.0](https://github.com/tendermint/tendermint/blob/master/spec/abci%2B%2B/README.md),
+the next evolution of ABCI, within the Cosmos SDK. ABCI 1.0 aims to provide
+application developers with more flexibility and control over application and
+consensus semantics, e.g. in-application mempools, in-process oracles, and
+order-book style matching engines.
+
+## Context
+
+Tendermint will release ABCI 1.0. Notably, at the time of this writing,
+Tendermint is releasing v0.37.0 which will include `PrepareProposal` and `ProcessProposal`.
+
+The `PrepareProposal` ABCI method is concerned with a block proposer requesting
+the application to evaluate a series of transactions to be included in the next
+block, defined as a slice of `TxRecord` objects. The application can either
+accept, reject, or completely ignore some or all of these transactions. This is
+an important consideration to make as the application can essentially define and
+control its own mempool allowing it to define sophisticated transaction priority
+and filtering mechanisms, by completely ignoring the `TxRecords` Tendermint
+sends it, favoring its own transactions. This essentially means that the Tendermint
+mempool acts more like a gossip data structure.
+
+The second ABCI method, `ProcessProposal`, is used to process the block proposer's
+proposal as defined by `PrepareProposal`. It is important to note the following
+with respect to `ProcessProposal`:
+
+* Execution of `ProcessProposal` must be deterministic.
+* There must be coherence between `PrepareProposal` and `ProcessProposal`. In
+ other words, for any two correct processes *p* and *q*, if *q*'s Tendermint
+ calls `RequestProcessProposal` on *up*, *q*'s Application returns
+ ACCEPT in `ResponseProcessProposal`.
+
+It is important to note that in ABCI 1.0 integration, the application
+is NOT responsible for locking semantics -- Tendermint will still be responsible
+for that. In the future, however, the application will be responsible for locking,
+which allows for parallel execution possibilities.
+
+## Decision
+
+We will integrate ABCI 1.0, which will be introduced in Tendermint
+v0.37.0, in the next major release of the Cosmos SDK. We will integrate ABCI 1.0
+methods on the `BaseApp` type. We describe the implementations of the two methods
+individually below.
+
+Prior to describing the implementation of the two new methods, it is important to
+note that the existing ABCI methods, `CheckTx`, `DeliverTx`, etc, still exist and
+serve the same functions as they do now.
+
+### `PrepareProposal`
+
+Prior to evaluating the decision for how to implement `PrepareProposal`, it is
+important to note that `CheckTx` will still be executed and will be responsible
+for evaluating transaction validity as it does now, with one very important
+*additive* distinction.
+
+When executing transactions in `CheckTx`, the application will now add valid
+transactions, i.e. passing the AnteHandler, to its own mempool data structure.
+In order to provide a flexible approach to meet the varying needs of application
+developers, we will define both a mempool interface and a data structure utilizing
+Golang generics, allowing developers to focus only on transaction
+ordering. Developers requiring absolute full control can implement their own
+custom mempool implementation.
+
+We define the general mempool interface as follows (subject to change):
+
+```go expandable
+type Mempool interface {
+ // Insert attempts to insert a Tx into the app-side mempool returning
+ // an error upon failure.
+ Insert(sdk.Context, sdk.Tx)
+
+error
+
+ // Select returns an Iterator over the app-side mempool. If txs are specified,
+ // then they shall be incorporated into the Iterator. The Iterator must
+ // closed by the caller.
+ Select(sdk.Context, [][]byte)
+
+Iterator
+
+ // CountTx returns the number of transactions currently in the mempool.
+ CountTx()
+
+int
+
+ // Remove attempts to remove a transaction from the mempool, returning an error
+ // upon failure.
+ Remove(sdk.Tx)
+
+error
+}
+
+// Iterator defines an app-side mempool iterator interface that is as minimal as
+// possible. The order of iteration is determined by the app-side mempool
+// implementation.
+type Iterator interface {
+ // Next returns the next transaction from the mempool. If there are no more
+ // transactions, it returns nil.
+ Next()
+
+Iterator
+
+ // Tx returns the transaction at the current position of the iterator.
+ Tx()
+
+sdk.Tx
+}
+```
+
+We will define an implementation of `Mempool`, defined by `nonceMempool`, that
+will cover most basic application use-cases. Namely, it will prioritize transactions
+by transaction sender, allowing for multiple transactions from the same sender.
+
+The default app-side mempool implementation, `nonceMempool`, will operate on a
+single skip list data structure. Specifically, transactions with the lowest nonce
+globally are prioritized. Transactions with the same nonce are prioritized by
+sender address.
+
+```go
+type nonceMempool struct {
+ txQueue *huandu.SkipList
+}
+```
+
+Previous discussions1 have come to the agreement that Tendermint will
+perform a request to the application, via `RequestPrepareProposal`, with a certain
+amount of transactions reaped from Tendermint's local mempool. The exact amount
+of transactions reaped will be determined by a local operator configuration.
+This is referred to as the "one-shot approach" seen in discussions.
+
+When Tendermint reaps transactions from the local mempool and sends them to the
+application via `RequestPrepareProposal`, the application will have to evaluate
+the transactions. Specifically, it will need to inform Tendermint if it should
+reject and or include each transaction. Note, the application can even *replace*
+transactions entirely with other transactions.
+
+When evaluating transactions from `RequestPrepareProposal`, the application will
+ignore *ALL* transactions sent to it in the request and instead reap up to
+`RequestPrepareProposal.max_tx_bytes` from its own mempool.
+
+Since an application can technically insert or inject transactions on `Insert`
+during `CheckTx` execution, it is recommended that applications ensure transaction
+validity when reaping transactions during `PrepareProposal`. However, what validity
+exactly means is entirely determined by the application.
+
+The Cosmos SDK will provide a default `PrepareProposal` implementation that simply
+select up to `MaxBytes` *valid* transactions.
+
+However, applications can override this default implementation with their own
+implementation and set that on `BaseApp` via `SetPrepareProposal`.
+
+### `ProcessProposal`
+
+The `ProcessProposal` ABCI method is relatively straightforward. It is responsible
+for ensuring validity of the proposed block containing transactions that were
+selected from the `PrepareProposal` step. However, how an application determines
+validity of a proposed block depends on the application and its varying use cases.
+For most applications, simply calling the `AnteHandler` chain would suffice, but
+there could easily be other applications that need more control over the validation
+process of the proposed block, such as ensuring txs are in a certain order or
+that certain transactions are included. While this theoretically could be achieved
+with a custom `AnteHandler` implementation, it's not the cleanest UX or the most
+efficient solution.
+
+Instead, we will define an additional ABCI interface method on the existing
+`Application` interface, similar to the existing ABCI methods such as `BeginBlock`
+or `EndBlock`. This new interface method will be defined as follows:
+
+```go
+ProcessProposal(sdk.Context, abci.RequestProcessProposal)
+
+error {
+}
+```
+
+Note, we must call `ProcessProposal` with a new internal branched state on the
+`Context` argument as we cannot simply just use the existing `checkState` because
+`BaseApp` already has a modified `checkState` at this point. So when executing
+`ProcessProposal`, we create a similar branched state, `processProposalState`,
+off of `deliverState`. Note, the `processProposalState` is never committed and
+is completely discarded after `ProcessProposal` finishes execution.
+
+The Cosmos SDK will provide a default implementation of `ProcessProposal` in which
+all transactions are validated using the CheckTx flow, i.e. the AnteHandler, and
+will always return ACCEPT unless any transaction cannot be decoded.
+
+### `DeliverTx`
+
+Since transactions are not truly removed from the app-side mempool during
+`PrepareProposal`, since `ProcessProposal` can fail or take multiple rounds and
+we do not want to lose transactions, we need to finally remove the transaction
+from the app-side mempool during `DeliverTx` since during this phase, the
+transactions are being included in the proposed block.
+
+Alternatively, we can keep the transactions as truly being removed during the
+reaping phase in `PrepareProposal` and add them back to the app-side mempool in
+case `ProcessProposal` fails.
+
+## Consequences
+
+### Backwards Compatibility
+
+ABCI 1.0 is naturally not backwards compatible with prior versions of the Cosmos SDK
+and Tendermint. For example, an application that requests `RequestPrepareProposal`
+to the same application that does not speak ABCI 1.0 will naturally fail.
+
+However, in the first phase of the integration, the existing ABCI methods as we
+know them today will still exist and function as they currently do.
+
+### Positive
+
+* Applications now have full control over transaction ordering and priority.
+* Lays the groundwork for the full integration of ABCI 1.0, which will unlock more
+ app-side use cases around block construction and integration with the Tendermint
+ consensus engine.
+
+### Negative
+
+* Requires that the "mempool", as a general data structure that collects and stores
+ uncommitted transactions will be duplicated between both Tendermint and the
+ Cosmos SDK.
+* Additional requests between Tendermint and the Cosmos SDK in the context of
+ block execution. Albeit, the overhead should be negligible.
+* Not backwards compatible with previous versions of Tendermint and the Cosmos SDK.
+
+## Further Discussions
+
+It is possible to design the app-side implementation of the `Mempool[T MempoolTx]`
+in many different ways using different data structures and implementations. All
+of which have different tradeoffs. The proposed solution keeps things simple
+and covers cases that would be required for most basic applications. There are
+tradeoffs that can be made to improve performance of reaping and inserting into
+the provided mempool implementation.
+
+## References
+
+* [Link](https://github.com/tendermint/tendermint/blob/master/spec/abci%2B%2B/README.md)
+* \[1] [Link](https://github.com/tendermint/tendermint/issues/7750#issuecomment-1076806155)
+* \[2] [Link](https://github.com/tendermint/tendermint/issues/7750#issuecomment-1075717151)
diff --git a/sdk/next/build/architecture/adr-061-liquid-staking.mdx b/sdk/next/build/architecture/adr-061-liquid-staking.mdx
new file mode 100644
index 000000000..d6747a305
--- /dev/null
+++ b/sdk/next/build/architecture/adr-061-liquid-staking.mdx
@@ -0,0 +1,82 @@
+---
+title: 'ADR ADR-061: Liquid Staking'
+description: '2022-09-10: Initial Draft (@zmanian)'
+---
+
+## Changelog
+
+* 2022-09-10: Initial Draft (@zmanian)
+
+## Status
+
+ACCEPTED
+
+## Abstract
+
+Add a semi-fungible liquid staking primitive to the default Cosmos SDK staking module. This upgrades proof of stake to enable safe designs with lower overall monetary issuance and integration with numerous liquid staking protocols like Stride, Persistence, Quicksilver, Lido etc.
+
+## Context
+
+The original release of the Cosmos Hub featured the implementation of a ground breaking proof of stake mechanism featuring delegation, slashing, in protocol reward distribution and adaptive issuance. This design was state of the art for 2016 and has been deployed without major changes by many L1 blockchains.
+
+As both Proof of Stake and blockchain use cases have matured, this design has aged poorly and should no longer be considered a good baseline Proof of Stake issuance. In the world of application specific blockchains, there cannot be a one size fits all blockchain but the Cosmos SDK does endeavour to provide a good baseline implementation and one that is suitable for the Cosmos Hub.
+
+The most important deficiency of the legacy staking design is that it composes poorly with on chain protocols for trading, lending, derivatives that are referred to collectively as DeFi. The legacy staking implementation starves these applications of liquidity by increasing the risk free rate adaptively. It basically makes DeFi and staking security somewhat incompatible.
+
+The Osmosis team has adopted the idea of Superfluid and Interfluid staking where assets that are participating in DeFi appliactions can also be used in proof of stake. This requires tight integration with an enshrined set of DeFi applications and thus is unsuitable for the Cosmos SDK.
+
+It's also important to note that Interchain Accounts are available in the default IBC implementation and can be used to [rehypothecate](https://www.investopedia.com/terms/h/hypothecation.asp#toc-what-is-rehypothecation) delegations. Thus liquid staking is already possible and these changes merely improve the UX of liquid staking. Centralized exchanges also rehypothecate staked assets, posing challenges for decentralization. This ADR takes the position that adoption of in-protocol liquid staking is the preferable outcome and provides new levers to incentivize decentralization of stake.
+
+These changes to the staking module have been in development for more than a year and have seen substantial industry adoption who plan to build staking UX. The internal economics at Informal team has also done a review of the impacts of these changes and this review led to the development of the exempt delegation system. This system provides governance with a tuneable parameter for modulating the risks of principal agent problem called the exemption factor.
+
+## Decision
+
+We implement the semi-fungible liquid staking system and exemption factor system within the cosmos sdk. Though registered as fungible assets, these tokenized shares have extremely limited fungibility, only among the specific delegation record that was created when shares were tokenized. These assets can be used for OTC trades but composability with DeFi is limited. The primary expected use case is improving the user experience of liquid staking providers.
+
+A new governance parameter is introduced that defines the ratio of exempt to issued tokenized shares. This is called the exemption factor. A larger exemption factor allows more tokenized shares to be issued for a smaller amount of exempt delegations. If governance is comfortable with how the liquid staking market is evolving, it makes sense to increase this value.
+
+Min self delegation is removed from the staking system with the expectation that it will be replaced by the exempt delegations system. The exempt delegation system allows multiple accounts to demonstrate economic alignment with the validator operator as team members, partners etc. without co-mingling funds. Delegation exemption will likely be required to grow the validators' business under widespread adoption of liquid staking once governance has adjusted the exemption factor.
+
+When shares are tokenized, the underlying shares are transferred to a module account and rewards go to the module account for the TokenizedShareRecord.
+
+There is no longer a mechanism to override the validators vote for TokenizedShares.
+
+### `MsgTokenizeShares`
+
+The MsgTokenizeShares message is used to create tokenize delegated tokens. This message can be executed by any delegator who has positive amount of delegation and after execution the specific amount of delegation disappear from the account and share tokens are provided. Share tokens are denominated in the validator and record id of the underlying delegation.
+
+A user may tokenize some or all of their delegation.
+
+They will receive shares with the denom of `cosmosvaloper1xxxx/5` where 5 is the record id for the validator operator.
+
+MsgTokenizeShares fails if the account is a VestingAccount. Users will have to move vested tokens to a new account and endure the unbonding period. We view this as an acceptable tradeoff vs. the complex book keeping required to track vested tokens.
+
+The total amount of outstanding tokenized shares for the validator is checked against the sum of exempt delegations multiplied by the exemption factor. If the tokenized shares exceeds this limit, execution fails.
+
+MsgTokenizeSharesResponse provides the number of tokens generated and their denom.
+
+### `MsgRedeemTokensforShares`
+
+The MsgRedeemTokensforShares message is used to redeem the delegation from share tokens. This message can be executed by any user who owns share tokens. After execution delegations will appear to the user.
+
+### `MsgTransferTokenizeShareRecord`
+
+The MsgTransferTokenizeShareRecord message is used to transfer the ownership of rewards generated from the tokenized amount of delegation. The tokenize share record is created when a user tokenize his/her delegation and deleted when the full amount of share tokens are redeemed.
+
+This is designed to work with liquid staking designs that do not redeem the tokenized shares and may instead want to keep the shares tokenized.
+
+### `MsgExemptDelegation`
+
+The MsgExemptDelegation message is used to exempt a delegation to a validator. If the exemption factor is greater than 0, this will allow more delegation shares to be issued from the validator.
+
+This design allows the chain to force an amount of self-delegation by validators participating in liquid staking schemes.
+
+## Consequences
+
+### Backwards Compatibility
+
+By setting the exemption factor to zero, this module works like legacy staking. The only substantial change is the removal of min-self-bond and without any tokenized shares, there is no incentive to exempt delegation.
+
+### Positive
+
+This approach should enable integration with liquid staking providers and improved user experience. It provides a pathway to security under non-exponential issuance policies in the baseline staking module.
diff --git a/sdk/next/build/architecture/adr-062-collections-state-layer.mdx b/sdk/next/build/architecture/adr-062-collections-state-layer.mdx
new file mode 100644
index 000000000..37c565dd8
--- /dev/null
+++ b/sdk/next/build/architecture/adr-062-collections-state-layer.mdx
@@ -0,0 +1,119 @@
+---
+title: 'ADR 062: Collections, a simplified storage layer for cosmos-sdk modules.'
+description: '30/11/2022: PROPOSED'
+---
+
+## Changelog
+
+* 30/11/2022: PROPOSED
+
+## Status
+
+PROPOSED - Implemented
+
+## Abstract
+
+We propose a simplified module storage layer which leverages golang generics to allow module developers to handle module
+storage in a simple and straightforward manner, whilst offering safety, extensibility and standardisation.
+
+## Context
+
+Module developers are forced into manually implementing storage functionalities in their modules, those functionalities include
+but are not limited to:
+
+* Defining key to bytes formats.
+* Defining value to bytes formats.
+* Defining secondary indexes.
+* Defining query methods to expose outside to deal with storage.
+* Defining local methods to deal with storage writing.
+* Dealing with genesis imports and exports.
+* Writing tests for all the above.
+
+This brings in a lot of problems:
+
+* It blocks developers from focusing on the most important part: writing business logic.
+* Key to bytes formats are complex and their definition is error-prone, for example:
+ * how do I format time to bytes in such a way that bytes are sorted?
+ * how do I ensure when I don't have namespace collisions when dealing with secondary indexes?
+* The lack of standardisation makes life hard for clients, and the problem is exacerbated when it comes to providing proofs for objects present in state. Clients are forced to maintain a list of object paths to gather proofs.
+
+### Current Solution: ORM
+
+The current SDK proposed solution to this problem is [ORM](https://github.com/cosmos/cosmos-sdk/blob/main/docs/architecture/adr-055-orm.md).
+While ORM offers a lot of good functionality aimed at solving these specific problems, it has some downsides:
+
+* It requires migrations.
+* It uses the newest protobuf golang API, whilst the SDK still mainly uses gogoproto.
+* Integrating ORM into a module would require the developer to deal with two different golang frameworks (golang protobuf + gogoproto) representing the same API objects.
+* It has a high learning curve, even for simple storage layers as it requires developers to have knowledge around protobuf options, custom cosmos-sdk storage extensions, and tooling download. Then after this they still need to learn the code-generated API.
+
+### CosmWasm Solution: cw-storage-plus
+
+The collections API takes inspiration from [cw-storage-plus](https://docs.cosmwasm.com/docs/1.0/smart-contracts/state/cw-plus/),
+which has demonstrated to be a powerful tool for dealing with storage in CosmWasm contracts.
+It's simple, does not require extra tooling, it makes it easy to deal with complex storage structures (indexes, snapshot, etc).
+The API is straightforward and explicit.
+
+## Decision
+
+We propose to port the `collections` API, whose implementation lives in [NibiruChain/collections](https://github.com/NibiruChain/collections) to cosmos-sdk.
+
+Collections implements four different storage handlers types:
+
+* `Map`: which deals with simple `key=>object` mappings.
+* `KeySet`: which acts as a `Set` and only retains keys and no object (usecase: allow-lists).
+* `Item`: which always contains only one object (usecase: Params)
+* `Sequence`: which implements a simple always increasing number (usecase: Nonces)
+* `IndexedMap`: builds on top of `Map` and `KeySet` and allows to create relationships with `Objects` and `Objects` secondary keys.
+
+All the collection APIs build on top of the simple `Map` type.
+
+Collections is fully generic, meaning that anything can be used as `Key` and `Value`. It can be a protobuf object or not.
+
+Collections types, in fact, delegate the duty of serialisation of keys and values to a secondary collections API component called `ValueEncoders` and `KeyEncoders`.
+
+`ValueEncoders` take care of converting a value to bytes (relevant only for `Map`). And offers a plug and play layer which allows us to change how we encode objects,
+which is relevant for swapping serialisation frameworks and enhancing performance.
+`Collections` already comes in with default `ValueEncoders`, specifically for: protobuf objects, special SDK types (sdk.Int, sdk.Dec).
+
+`KeyEncoders` take care of converting keys to bytes, `collections` already comes in with some default `KeyEncoders` for some privimite golang types
+(uint64, string, time.Time, ...) and some widely used sdk types (sdk.Acc/Val/ConsAddress, sdk.Int/Dec, ...).
+These default implementations also offer safety around proper lexicographic ordering and namespace-collision.
+
+Examples of the collections API can be found here:
+
+* introduction: [Link](https://github.com/NibiruChain/collections/tree/main/examples)
+* usage in nibiru: [x/oracle](https://github.com/NibiruChain/nibiru/blob/master/x/oracle/keeper/keeper.go#L32), [x/perp](https://github.com/NibiruChain/nibiru/blob/master/x/perp/keeper/keeper.go#L31)
+* cosmos-sdk's x/staking migrated: [Link](https://github.com/testinginprod/cosmos-sdk/pull/22)
+
+## Consequences
+
+### Backwards Compatibility
+
+The design of `ValueEncoders` and `KeyEncoders` allows modules to retain the same `byte(key)=>byte(value)` mappings, making
+the upgrade to the new storage layer non-state breaking.
+
+### Positive
+
+* ADR aimed at removing code from the SDK rather than adding it. Migrating just `x/staking` to collections would yield to a net decrease in LOC (even considering the addition of collections itself).
+* Simplifies and standardises storage layers across modules in the SDK.
+* Does not require to have to deal with protobuf.
+* It's pure golang code.
+* Generalisation over `KeyEncoders` and `ValueEncoders` allows us to not tie ourself to the data serialisation framework.
+* `KeyEncoders` and `ValueEncoders` can be extended to provide schema reflection.
+
+### Negative
+
+* Golang generics are not as battle-tested as other Golang features, despite being used in production right now.
+* Collection types instantiation needs to be improved.
+
+### Neutral
+
+`{neutral consequences}`
+
+## Further Discussions
+
+* Automatic genesis import/export (not implemented because of API breakage)
+* Schema reflection
+
+## References
diff --git a/sdk/next/build/architecture/adr-063-core-module-api.mdx b/sdk/next/build/architecture/adr-063-core-module-api.mdx
new file mode 100644
index 000000000..ffc259403
--- /dev/null
+++ b/sdk/next/build/architecture/adr-063-core-module-api.mdx
@@ -0,0 +1,615 @@
+---
+title: 'ADR 063: Core Module API'
+description: 2022-08-18 First Draft 2022-12-08 First Draft 2023-01-24 Updates
+---
+
+## Changelog
+
+* 2022-08-18 First Draft
+* 2022-12-08 First Draft
+* 2023-01-24 Updates
+
+## Status
+
+ACCEPTED Partially Implemented
+
+## Abstract
+
+A new core API is proposed as a way to develop cosmos-sdk applications that will eventually replace the existing
+`AppModule` and `sdk.Context` frameworks a set of core services and extension interfaces. This core API aims to:
+
+* be simpler
+* more extensible
+* more stable than the current framework
+* enable deterministic events and queries,
+* support event listeners
+* [ADR 033: Protobuf-based Inter-Module Communication](/sdk/v0.53/build/architecture/adr-033-protobuf-inter-module-comm) clients.
+
+## Context
+
+Historically modules have exposed their functionality to the framework via the `AppModule` and `AppModuleBasic`
+interfaces which have the following shortcomings:
+
+* both `AppModule` and `AppModuleBasic` need to be defined and registered which is counter-intuitive
+* apps need to implement the full interfaces, even parts they don't need (although there are workarounds for this),
+* interface methods depend heavily on unstable third party dependencies, in particular Comet,
+* legacy required methods have littered these interfaces for far too long
+
+In order to interact with the state machine, modules have needed to do a combination of these things:
+
+* get store keys from the app
+* call methods on `sdk.Context` which contains more or less the full set of capability available to modules.
+
+By isolating all the state machine functionality into `sdk.Context`, the set of functionalities available to
+modules are tightly coupled to this type. If there are changes to upstream dependencies (such as Comet)
+or new functionalities are desired (such as alternate store types), the changes need impact `sdk.Context` and all
+consumers of it (basically all modules). Also, all modules now receive `context.Context` and need to convert these
+to `sdk.Context`'s with a non-ergonomic unwrapping function.
+
+Any breaking changes to these interfaces, such as ones imposed by third-party dependencies like Comet, have the
+side effect of forcing all modules in the ecosystem to update in lock-step. This means it is almost impossible to have
+a version of the module which can be run with 2 or 3 different versions of the SDK or 2 or 3 different versions of
+another module. This lock-step coupling slows down overall development within the ecosystem and causes updates to
+components to be delayed longer than they would if things were more stable and loosely coupled.
+
+## Decision
+
+The `core` API proposes a set of core APIs that modules can rely on to interact with the state machine and expose their
+functionalities to it that are designed in a principled way such that:
+
+* tight coupling of dependencies and unrelated functionalities is minimized or eliminated
+* APIs can have long-term stability guarantees
+* the SDK framework is extensible in a safe and straightforward way
+
+The design principles of the core API are as follows:
+
+* everything that a module wants to interact with in the state machine is a service
+* all services coordinate state via `context.Context` and don't try to recreate the "bag of variables" approach of `sdk.Context`
+* all independent services are isolated in independent packages with minimal APIs and minimal dependencies
+* the core API should be minimalistic and designed for long-term support (LTS)
+* a "runtime" module will implement all the "core services" defined by the core API and can handle all module
+ functionalities exposed by core extension interfaces
+* other non-core and/or non-LTS services can be exposed by specific versions of runtime modules or other modules
+ following the same design principles, this includes functionality that interacts with specific non-stable versions of
+ third party dependencies such as Comet
+* the core API doesn't implement *any* functionality, it just defines types
+* go stable API compatibility guidelines are followed: [Link](https://go.dev/blog/module-compatibility)
+
+A "runtime" module is any module which implements the core functionality of composing an ABCI app, which is currently
+handled by `BaseApp` and the `ModuleManager`. Runtime modules which implement the core API are *intentionally* separate
+from the core API in order to enable more parallel versions and forks of the runtime module than is possible with the
+SDK's current tightly coupled `BaseApp` design while still allowing for a high degree of composability and
+compatibility.
+
+Modules which are built only against the core API don't need to know anything about which version of runtime,
+`BaseApp` or Comet in order to be compatible. Modules from the core mainline SDK could be easily composed
+with a forked version of runtime with this pattern.
+
+This design is intended to enable matrices of compatible dependency versions. Ideally a given version of any module
+is compatible with multiple versions of the runtime module and other compatible modules. This will allow dependencies
+to be selectively updated based on battle-testing. More conservative projects may want to update some dependencies
+slower than more fast moving projects.
+
+### Core Services
+
+The following "core services" are defined by the core API. All valid runtime module implementations should provide
+implementations of these services to modules via both [dependency injection](/sdk/v0.53/build/architecture/adr-057-app-wiring) and
+manual wiring. The individual services described below are all bundled in a convenient `appmodule.Service`
+"bundle service" so that for simplicity modules can declare a dependency on a single service.
+
+#### Store Services
+
+Store services will be defined in the `cosmossdk.io/core/store` package.
+
+The generic `store.KVStore` interface is the same as current SDK `KVStore` interface. Store keys have been refactored
+into store services which, instead of expecting the context to know about stores, invert the pattern and allow
+retrieving a store from a generic context. There are three store services for the three types of currently supported
+stores - regular kv-store, memory, and transient:
+
+```go
+type KVStoreService interface {
+ OpenKVStore(context.Context)
+
+KVStore
+}
+
+type MemoryStoreService interface {
+ OpenMemoryStore(context.Context)
+
+KVStore
+}
+
+type TransientStoreService interface {
+ OpenTransientStore(context.Context)
+
+KVStore
+}
+```
+
+Modules can use these services like this:
+
+```go
+func (k msgServer)
+
+Send(ctx context.Context, msg *types.MsgSend) (*types.MsgSendResponse, error) {
+ store := k.kvStoreSvc.OpenKVStore(ctx)
+}
+```
+
+Just as with the current runtime module implementation, modules will not need to explicitly name these store keys,
+but rather the runtime module will choose an appropriate name for them and modules just need to request the
+type of store they need in their dependency injection (or manual) constructors.
+
+#### Event Service
+
+The event `Service` will be defined in the `cosmossdk.io/core/event` package.
+
+The event `Service` allows modules to emit typed and legacy untyped events:
+
+```go expandable
+package event
+
+type Service interface {
+ // EmitProtoEvent emits events represented as a protobuf message (as described in ADR 032).
+ //
+ // Callers SHOULD assume that these events may be included in consensus. These events
+ // MUST be emitted deterministically and adding, removing or changing these events SHOULD
+ // be considered state-machine breaking.
+ EmitProtoEvent(ctx context.Context, event protoiface.MessageV1)
+
+error
+
+ // EmitKVEvent emits an event based on an event and kv-pair attributes.
+ //
+ // These events will not be part of consensus and adding, removing or changing these events is
+ // not a state-machine breaking change.
+ EmitKVEvent(ctx context.Context, eventType string, attrs ...KVEventAttribute)
+
+error
+
+ // EmitProtoEventNonConsensus emits events represented as a protobuf message (as described in ADR 032), without
+ // including it in blockchain consensus.
+ //
+ // These events will not be part of consensus and adding, removing or changing events is
+ // not a state-machine breaking change.
+ EmitProtoEventNonConsensus(ctx context.Context, event protoiface.MessageV1)
+
+error
+}
+```
+
+Typed events emitted with `EmitProto` should be assumed to be part of blockchain consensus (whether they are part of
+the block or app hash is left to the runtime to specify).
+
+Events emitted by `EmitKVEvent` and `EmitProtoEventNonConsensus` are not considered to be part of consensus and cannot be observed
+by other modules. If there is a client-side need to add events in patch releases, these methods can be used.
+
+#### Logger
+
+A logger (`cosmossdk.io/log`) must be supplied using `depinject`, and will
+be made available for modules to use via `depinject.In`.
+Modules using it should follow the current pattern in the SDK by adding the module name before using it.
+
+```go expandable
+type ModuleInputs struct {
+ depinject.In
+
+ Logger log.Logger
+}
+
+func ProvideModule(in ModuleInputs)
+
+ModuleOutputs {
+ keeper := keeper.NewKeeper(
+ in.logger,
+ )
+}
+
+func NewKeeper(logger log.Logger)
+
+Keeper {
+ return Keeper{
+ logger: logger.With(log.ModuleKey, "x/"+types.ModuleName),
+}
+}
+```
+
+### Core `AppModule` extension interfaces
+
+Modules will provide their core services to the runtime module via extension interfaces built on top of the
+`cosmossdk.io/core/appmodule.AppModule` tag interface. This tag interface requires only two empty methods which
+allow `depinject` to identify implementors as `depinject.OnePerModule` types and as app module implementations:
+
+```go
+type AppModule interface {
+ depinject.OnePerModuleType
+
+ // IsAppModule is a dummy method to tag a struct as implementing an AppModule.
+ IsAppModule()
+}
+```
+
+Other core extension interfaces will be defined in `cosmossdk.io/core` should be supported by valid runtime
+implementations.
+
+#### `MsgServer` and `QueryServer` registration
+
+`MsgServer` and `QueryServer` registration is done by implementing the `HasServices` extension interface:
+
+```go
+type HasServices interface {
+ AppModule
+
+ RegisterServices(grpc.ServiceRegistrar)
+}
+```
+
+Because of the `cosmos.msg.v1.service` protobuf option, required for `Msg` services, the same `ServiceRegitrar` can be
+used to register both `Msg` and query services.
+
+#### Genesis
+
+The genesis `Handler` functions - `DefaultGenesis`, `ValidateGenesis`, `InitGenesis` and `ExportGenesis` - are specified
+against the `GenesisSource` and `GenesisTarget` interfaces which will abstract over genesis sources which may be a single
+JSON object or collections of JSON objects that can be efficiently streamed.
+
+```go expandable
+// GenesisSource is a source for genesis data in JSON format. It may abstract over a
+// single JSON object or separate files for each field in a JSON object that can
+// be streamed over. Modules should open a separate io.ReadCloser for each field that
+// is required. When fields represent arrays they can efficiently be streamed
+// over. If there is no data for a field, this function should return nil, nil. It is
+// important that the caller closes the reader when done with it.
+type GenesisSource = func(field string) (io.ReadCloser, error)
+
+// GenesisTarget is a target for writing genesis data in JSON format. It may
+// abstract over a single JSON object or JSON in separate files that can be
+// streamed over. Modules should open a separate io.WriteCloser for each field
+// and should prefer writing fields as arrays when possible to support efficient
+// iteration. It is important the caller closers the writer AND checks the error
+// when done with it. It is expected that a stream of JSON data is written
+// to the writer.
+type GenesisTarget = func(field string) (io.WriteCloser, error)
+```
+
+All genesis objects for a given module are expected to conform to the semantics of a JSON object.
+Each field in the JSON object should be read and written separately to support streaming genesis.
+The [ORM](/sdk/v0.53/build/architecture/adr-055-orm) and [collections](/sdk/v0.53/build/architecture/adr-062-collections-state-layer) both support
+streaming genesis and modules using these frameworks generally do not need to write any manual
+genesis code.
+
+To support genesis, modules should implement the `HasGenesis` extension interface:
+
+```go expandable
+type HasGenesis interface {
+ AppModule
+
+ // DefaultGenesis writes the default genesis for this module to the target.
+ DefaultGenesis(GenesisTarget)
+
+error
+
+ // ValidateGenesis validates the genesis data read from the source.
+ ValidateGenesis(GenesisSource)
+
+error
+
+ // InitGenesis initializes module state from the genesis source.
+ InitGenesis(context.Context, GenesisSource)
+
+error
+
+ // ExportGenesis exports module state to the genesis target.
+ ExportGenesis(context.Context, GenesisTarget)
+
+error
+}
+```
+
+#### Pre Blockers
+
+Modules that have functionality that runs before BeginBlock and should implement the has `HasPreBlocker` interfaces:
+
+```go
+type HasPreBlocker interface {
+ AppModule
+ PreBlock(context.Context)
+
+error
+}
+```
+
+#### Begin and End Blockers
+
+Modules that have functionality that runs before transactions (begin blockers) or after transactions
+(end blockers) should implement the has `HasBeginBlocker` and/or `HasEndBlocker` interfaces:
+
+```go
+type HasBeginBlocker interface {
+ AppModule
+ BeginBlock(context.Context)
+
+error
+}
+
+type HasEndBlocker interface {
+ AppModule
+ EndBlock(context.Context)
+
+error
+}
+```
+
+The `BeginBlock` and `EndBlock` methods will take a `context.Context`, because:
+
+* most modules don't need Comet information other than `BlockInfo` so we can eliminate dependencies on specific
+ Comet versions
+* for the few modules that need Comet block headers and/or return validator updates, specific versions of the
+ runtime module will provide specific functionality for interacting with the specific version(s) of Comet
+ supported
+
+In order for `BeginBlock`, `EndBlock` and `InitGenesis` to send back validator updates and retrieve full Comet
+block headers, the runtime module for a specific version of Comet could provide services like this:
+
+```go
+type ValidatorUpdateService interface {
+ SetValidatorUpdates(context.Context, []abci.ValidatorUpdate)
+}
+```
+
+Header Service defines a way to get header information about a block. This information is generalized for all implementations:
+
+```go
+type Service interface {
+ GetHeaderInfo(context.Context)
+
+Info
+}
+
+type Info struct {
+ Height int64 // Height returns the height of the block
+ Hash []byte // Hash returns the hash of the block header
+ Time time.Time // Time returns the time of the block
+ ChainID string // ChainId returns the chain ID of the block
+}
+```
+
+Comet Service provides a way to get comet specific information:
+
+```go expandable
+type Service interface {
+ GetCometInfo(context.Context)
+
+Info
+}
+
+type CometInfo struct {
+ Evidence []abci.Misbehavior // Misbehavior returns the misbehavior of the block
+ // ValidatorsHash returns the hash of the validators
+ // For Comet, it is the hash of the next validators
+ ValidatorsHash []byte
+ ProposerAddress []byte // ProposerAddress returns the address of the block proposer
+ DecidedLastCommit abci.CommitInfo // DecidedLastCommit returns the last commit info
+}
+```
+
+If a user would like to provide a module other information they would need to implement another service like:
+
+```go
+type RollKit Interface {
+ ...
+}
+```
+
+We know these types will change at the Comet level and that also a very limited set of modules actually need this
+functionality, so they are intentionally kept out of core to keep core limited to the necessary, minimal set of stable
+APIs.
+
+#### Remaining Parts of AppModule
+
+The current `AppModule` framework handles a number of additional concerns which aren't addressed by this core API.
+These include:
+
+* gas
+* block headers
+* upgrades
+* registration of gogo proto and amino interface types
+* cobra query and tx commands
+* gRPC gateway
+* crisis module invariants
+* simulations
+
+Additional `AppModule` extension interfaces either inside or outside of core will need to be specified to handle
+these concerns.
+
+In the case of gogo proto and amino interfaces, the registration of these generally should happen as early
+as possible during initialization and in [ADR 057: App Wiring](/sdk/v0.53/build/architecture/adr-057-app-wiring), protobuf type registration\
+happens before dependency injection (although this could alternatively be done dedicated DI providers).
+
+gRPC gateway registration should probably be handled by the runtime module, but the core API shouldn't depend on gRPC
+gateway types as 1) we are already using an older version and 2) it's possible the framework can do this registration
+automatically in the future. So for now, the runtime module should probably provide some sort of specific type for doing
+this registration ex:
+
+```go
+type GrpcGatewayInfo struct {
+ Handlers []GrpcGatewayHandler
+}
+
+type GrpcGatewayHandler func(ctx context.Context, mux *runtime.ServeMux, client QueryClient)
+
+error
+```
+
+which modules can return in a provider:
+
+```go
+func ProvideGrpcGateway()
+
+GrpcGatewayInfo {
+ return GrpcGatewayinfo {
+ Handlers: []Handler {
+ types.RegisterQueryHandlerClient
+}
+
+}
+}
+```
+
+Crisis module invariants and simulations are subject to potential redesign and should be managed with types
+defined in the crisis and simulation modules respectively.
+
+Extension interface for CLI commands will be provided via the `cosmossdk.io/client/v2` module and its
+[autocli](/sdk/v0.53/build/architecture/adr-058-auto-generated-cli) framework.
+
+#### Example Usage
+
+Here is an example of setting up a hypothetical `foo` v2 module which uses the [ORM](/sdk/v0.53/build/architecture/adr-055-orm) for its state
+management and genesis.
+
+```go expandable
+type Keeper struct {
+ db orm.ModuleDB
+ evtSrv event.Service
+}
+
+func (k Keeper)
+
+RegisterServices(r grpc.ServiceRegistrar) {
+ foov1.RegisterMsgServer(r, k)
+
+foov1.RegisterQueryServer(r, k)
+}
+
+func (k Keeper)
+
+BeginBlock(context.Context)
+
+error {
+ return nil
+}
+
+func ProvideApp(config *foomodulev2.Module, evtSvc event.EventService, db orm.ModuleDB) (Keeper, appmodule.AppModule) {
+ k := &Keeper{
+ db: db, evtSvc: evtSvc
+}
+
+return k, k
+}
+```
+
+### Runtime Compatibility Version
+
+The `core` module will define a static integer var, `cosmossdk.io/core.RuntimeCompatibilityVersion`, which is
+a minor version indicator of the core module that is accessible at runtime. Correct runtime module implementations
+should check this compatibility version and return an error if the current `RuntimeCompatibilityVersion` is higher
+than the version of the core API that this runtime version can support. When new features are adding to the `core`
+module API that runtime modules are required to support, this version should be incremented.
+
+### Runtime Modules
+
+The initial `runtime` module will simply be created within the existing `github.com/cosmos/cosmos-sdk` go module
+under the `runtime` package. This module will be a small wrapper around the existing `BaseApp`, `sdk.Context` and
+module manager and follow the Cosmos SDK's existing [0-based versioning](https://0ver.org). To move to semantic
+versioning as well as runtime modularity, new officially supported runtime modules will be created under the
+`cosmossdk.io/runtime` prefix. For each supported consensus engine a semantically-versioned go module should be created
+with a runtime implementation for that consensus engine. For example:
+
+* `cosmossdk.io/runtime/comet`
+* `cosmossdk.io/runtime/comet/v2`
+* `cosmossdk.io/runtime/rollkit`
+* etc.
+
+These runtime modules should attempt to be semantically versioned even if the underlying consensus engine is not. Also,
+because a runtime module is also a first class Cosmos SDK module, it should have a protobuf module config type.
+A new semantically versioned module config type should be created for each of these runtime module such that there is a
+1:1 correspondence between the go module and module config type. This is the same practice should be followed for every
+semantically versioned Cosmos SDK module as described in [ADR 057: App Wiring](/sdk/v0.53/build/architecture/adr-057-app-wiring).
+
+Currently, `github.com/cosmos/cosmos-sdk/runtime` uses the protobuf config type `cosmos.app.runtime.v1alpha1.Module`.
+When we have a standalone v1 comet runtime, we should use a dedicated protobuf module config type such as
+`cosmos.runtime.comet.v1.Module1`. When we release v2 of the comet runtime (`cosmossdk.io/runtime/comet/v2`) we should
+have a corresponding `cosmos.runtime.comet.v2.Module` protobuf type.
+
+In order to make it easier to support different consensus engines that support the same core module functionality as
+described in this ADR, a common go module should be created with shared runtime components. The easiest runtime components
+to share initially are probably the message/query router, inter-module client, service register, and event router.
+This common runtime module should be created initially as the `cosmossdk.io/runtime/common` go module.
+
+When this new architecture has been implemented, the main dependency for a Cosmos SDK module would be
+`cosmossdk.io/core` and that module should be able to be used with any supported consensus engine (to the extent
+that it does not explicitly depend on consensus engine specific functionality such as Comet's block headers). An
+app developer would then be able to choose which consensus engine they want to use by importing the corresponding
+runtime module. The current `BaseApp` would be refactored into the `cosmossdk.io/runtime/comet` module, the router
+infrastructure in `baseapp/` would be refactored into `cosmossdk.io/runtime/common` and support ADR 033, and eventually
+a dependency on `github.com/cosmos/cosmos-sdk` would no longer be required.
+
+In short, modules would depend primarily on `cosmossdk.io/core`, and each `cosmossdk.io/runtime/{consensus-engine}`
+would implement the `cosmossdk.io/core` functionality for that consensus engine.
+
+On additional piece that would need to be resolved as part of this architecture is how runtimes relate to the server.
+Likely it would make sense to modularize the current server architecture so that it can be used with any runtime even
+if that is based on a consensus engine besides Comet. This means that eventually the Comet runtime would need to
+encapsulate the logic for starting Comet and the ABCI app.
+
+### Testing
+
+A mock implementation of all services should be provided in core to allow for unit testing of modules
+without needing to depend on any particular version of runtime. Mock services should
+allow tests to observe service behavior or provide a non-production implementation - for instance memory
+stores can be used to mock stores.
+
+For integration testing, a mock runtime implementation should be provided that allows composing different app modules
+together for testing without a dependency on runtime or Comet.
+
+## Consequences
+
+### Backwards Compatibility
+
+Early versions of runtime modules should aim to support as much as possible modules built with the existing
+`AppModule`/`sdk.Context` framework. As the core API is more widely adopted, later runtime versions may choose to
+drop support and only support the core API plus any runtime module specific APIs (like specific versions of Comet).
+
+The core module itself should strive to remain at the go semantic version `v1` as long as possible and follow design
+principles that allow for strong long-term support (LTS).
+
+Older versions of the SDK can support modules built against core with adaptors that convert wrap core `AppModule`
+implementations in implementations of `AppModule` that conform to that version of the SDK's semantics as well
+as by providing service implementations by wrapping `sdk.Context`.
+
+### Positive
+
+* better API encapsulation and separation of concerns
+* more stable APIs
+* more framework extensibility
+* deterministic events and queries
+* event listeners
+* inter-module msg and query execution support
+* more explicit support for forking and merging of module versions (including runtime)
+
+### Negative
+
+### Neutral
+
+* modules will need to be refactored to use this API
+* some replacements for `AppModule` functionality still need to be defined in follow-ups
+ (type registration, commands, invariants, simulations) and this will take additional design work
+
+## Further Discussions
+
+* gas
+* block headers
+* upgrades
+* registration of gogo proto and amino interface types
+* cobra query and tx commands
+* gRPC gateway
+* crisis module invariants
+* simulations
+
+## References
+
+* [ADR 033: Protobuf-based Inter-Module Communication](/sdk/v0.53/build/architecture/adr-033-protobuf-inter-module-comm)
+* [ADR 057: App Wiring](/sdk/v0.53/build/architecture/adr-057-app-wiring)
+* [ADR 055: ORM](/sdk/v0.53/build/architecture/adr-055-orm)
+* [ADR 028: Public Key Addresses](/sdk/v0.53/build/architecture/adr-028-public-key-addresses)
+* [Keeping Your Modules Compatible](https://go.dev/blog/module-compatibility)
diff --git a/sdk/next/build/architecture/adr-064-abci-2.0.mdx b/sdk/next/build/architecture/adr-064-abci-2.0.mdx
new file mode 100644
index 000000000..077897bb4
--- /dev/null
+++ b/sdk/next/build/architecture/adr-064-abci-2.0.mdx
@@ -0,0 +1,505 @@
+---
+title: 'ADR 64: ABCI 2.0 Integration (Phase II)'
+---
+
+## Changelog
+
+* 2023-01-17: Initial Draft (@alexanderbez)
+* 2023-04-06: Add upgrading section (@alexanderbez)
+* 2023-04-10: Simplify vote extension state persistence (@alexanderbez)
+* 2023-07-07: Revise vote extension state persistence (@alexanderbez)
+* 2023-08-24: Revise vote extension power calculations and staking interface (@davidterpay)
+
+## Status
+
+ACCEPTED
+
+## Abstract
+
+This ADR outlines the continuation of the efforts to implement ABCI++ in the Cosmos
+SDK outlined in [ADR 060: ABCI 1.0 (Phase I)](/sdk/v0.53/build/architecture/adr-060-abci-1.0).
+
+Specifically, this ADR outlines the design and implementation of ABCI 2.0, which
+includes `ExtendVote`, `VerifyVoteExtension` and `FinalizeBlock`.
+
+## Context
+
+ABCI 2.0 continues the promised updates from ABCI++, specifically three additional
+ABCI methods that the application can implement in order to gain further control,
+insight and customization of the consensus process, unlocking many novel use-cases
+that previously not possible. We describe these three new methods below:
+
+### `ExtendVote`
+
+This method allows each validator process to extend the pre-commit phase of the
+CometBFT consensus process. Specifically, it allows the application to perform
+custom business logic that extends the pre-commit vote and supply additional data
+as part of the vote, although they are signed separately by the same key.
+
+The data, called vote extension, will be broadcast and received together with the
+vote it is extending, and will be made available to the application in the next
+height. Specifically, the proposer of the next block will receive the vote extensions
+in `RequestPrepareProposal.local_last_commit.votes`.
+
+If the application does not have vote extension information to provide, it
+returns a 0-length byte array as its vote extension.
+
+**NOTE**:
+
+* Although each validator process submits its own vote extension, ONLY the *proposer*
+ of the *next* block will receive all the vote extensions included as part of the
+ pre-commit phase of the previous block. This means only the proposer will
+ implicitly have access to all the vote extensions, via `RequestPrepareProposal`,
+ and that not all vote extensions may be included, since a validator does not
+ have to wait for all pre-commits, only 2/3.
+* The pre-commit vote is signed independently from the vote extension.
+
+### `VerifyVoteExtension`
+
+This method allows validators to validate the vote extension data attached to
+each pre-commit message it receives. If the validation fails, the whole pre-commit
+message will be deemed invalid and ignored by CometBFT.
+
+CometBFT uses `VerifyVoteExtension` when validating a pre-commit vote. Specifically,
+for a pre-commit, CometBFT will:
+
+* Reject the message if it doesn't contain a signed vote AND a signed vote extension
+* Reject the message if the vote's signature OR the vote extension's signature fails to verify
+* Reject the message if `VerifyVoteExtension` was rejected by the app
+
+Otherwise, CometBFT will accept the pre-commit message.
+
+Note, this has important consequences on liveness, i.e., if vote extensions repeatedly
+cannot be verified by correct validators, CometBFT may not be able to finalize
+a block even if sufficiently many (+2/3) validators send pre-commit votes for
+that block. Thus, `VerifyVoteExtension` should be used with special care.
+
+CometBFT recommends that an application that detects an invalid vote extension
+SHOULD accept it in `ResponseVerifyVoteExtension` and ignore it in its own logic.
+
+### `FinalizeBlock`
+
+This method delivers a decided block to the application. The application must
+execute the transactions in the block deterministically and update its state
+accordingly. Cryptographic commitments to the block and transaction results,
+returned via the corresponding parameters in `ResponseFinalizeBlock`, are
+included in the header of the next block. CometBFT calls it when a new block
+is decided.
+
+In other words, `FinalizeBlock` encapsulates the current ABCI execution flow of
+`BeginBlock`, one or more `DeliverTx`, and `EndBlock` into a single ABCI method.
+CometBFT will no longer execute requests for these legacy methods and instead
+will just simply call `FinalizeBlock`.
+
+## Decision
+
+We will discuss changes to the Cosmos SDK to implement ABCI 2.0 in two distinct
+phases, `VoteExtensions` and `FinalizeBlock`.
+
+### `VoteExtensions`
+
+Similarly for `PrepareProposal` and `ProcessProposal`, we propose to introduce
+two new handlers that an application can implement in order to provide and verify
+vote extensions.
+
+We propose the following new handlers for applications to implement:
+
+```go
+type ExtendVoteHandler func(sdk.Context, abci.RequestExtendVote)
+
+abci.ResponseExtendVote
+type VerifyVoteExtensionHandler func(sdk.Context, abci.RequestVerifyVoteExtension)
+
+abci.ResponseVerifyVoteExtension
+```
+
+An ephemeral context and state will be supplied to both handlers. The
+context will contain relevant metadata such as the block height and block hash.
+The state will be a cached version of the committed state of the application and
+will be discarded after the execution of the handler, this means that both handlers
+get a fresh state view and no changes made to it will be written.
+
+If an application decides to implement `ExtendVoteHandler`, it must return a
+non-nil `ResponseExtendVote.VoteExtension`.
+
+Recall, an implementation of `ExtendVoteHandler` does NOT need to be deterministic,
+however, given a set of vote extensions, `VerifyVoteExtensionHandler` must be
+deterministic, otherwise the chain may suffer from liveness faults. In addition,
+recall CometBFT proceeds in rounds for each height, so if a decision cannot be
+made about about a block proposal at a given height, CometBFT will proceed to the
+next round and thus will execute `ExtendVote` and `VerifyVoteExtension` again for
+the new round for each validator until 2/3 valid pre-commits can be obtained.
+
+Given the broad scope of potential implementations and use-cases of vote extensions,
+and how to verify them, most applications should choose to implement the handlers
+through a single handler type, which can have any number of dependencies injected
+such as keepers. In addition, this handler type could contain some notion of
+volatile vote extension state management which would assist in vote extension
+verification. This state management could be ephemeral or could be some form of
+on-disk persistence.
+
+Example:
+
+```go expandable
+// VoteExtensionHandler implements an Oracle vote extension handler.
+type VoteExtensionHandler struct {
+ cdc Codec
+ mk MyKeeper
+ state VoteExtState // This could be a map or a DB connection object
+}
+
+// ExtendVoteHandler can do something with h.mk and possibly h.state to create
+// a vote extension, such as fetching a series of prices for supported assets.
+func (h VoteExtensionHandler)
+
+ExtendVoteHandler(ctx sdk.Context, req abci.RequestExtendVote)
+
+abci.ResponseExtendVote {
+ prices := GetPrices(ctx, h.mk.Assets())
+
+bz, err := EncodePrices(h.cdc, prices)
+ if err != nil {
+ panic(fmt.Errorf("failed to encode prices for vote extension: %w", err))
+}
+
+ // store our vote extension at the given height
+ //
+ // NOTE: Vote extensions can be overridden since we can timeout in a round.
+ SetPrices(h.state, req, bz)
+
+return abci.ResponseExtendVote{
+ VoteExtension: bz
+}
+}
+
+// VerifyVoteExtensionHandler can do something with h.state and req to verify
+// the req.VoteExtension field, such as ensuring the provided oracle prices are
+// within some valid range of our prices.
+func (h VoteExtensionHandler)
+
+VerifyVoteExtensionHandler(ctx sdk.Context, req abci.RequestVerifyVoteExtension)
+
+abci.ResponseVerifyVoteExtension {
+ prices, err := DecodePrices(h.cdc, req.VoteExtension)
+ if err != nil {
+ log("failed to decode vote extension", "err", err)
+
+return abci.ResponseVerifyVoteExtension{
+ Status: REJECT
+}
+
+}
+ if err := ValidatePrices(h.state, req, prices); err != nil {
+ log("failed to validate vote extension", "prices", prices, "err", err)
+
+return abci.ResponseVerifyVoteExtension{
+ Status: REJECT
+}
+
+}
+
+ // store updated vote extensions at the given height
+ //
+ // NOTE: Vote extensions can be overridden since we can timeout in a round.
+ SetPrices(h.state, req, req.VoteExtension)
+
+return abci.ResponseVerifyVoteExtension{
+ Status: ACCEPT
+}
+}
+```
+
+#### Vote Extension Propagation & Verification
+
+As mentioned previously, vote extensions for height `H` are only made available
+to the proposer at height `H+1` during `PrepareProposal`. However, in order to
+make vote extensions useful, all validators should have access to the agreed upon
+vote extensions at height `H` during `H+1`.
+
+Since CometBFT includes all the vote extension signatures in `RequestPrepareProposal`,
+we propose that the proposing validator manually "inject" the vote extensions
+along with their respective signatures via a special transaction, `VoteExtsTx`,
+into the block proposal during `PrepareProposal`. The `VoteExtsTx` will be
+populated with a single `ExtendedCommitInfo` object which is received directly
+from `RequestPrepareProposal`.
+
+For convention, the `VoteExtsTx` transaction should be the first transaction in
+the block proposal, although chains can implement their own preferences. For
+safety purposes, we also propose that the proposer itself verify all the vote
+extension signatures it receives in `RequestPrepareProposal`.
+
+A validator, upon a `RequestProcessProposal`, will receive the injected `VoteExtsTx`
+which includes the vote extensions along with their signatures. If no such transaction
+exists, the validator MUST REJECT the proposal.
+
+When a validator inspects a `VoteExtsTx`, it will evaluate each `SignedVoteExtension`.
+For each signed vote extension, the validator will generate the signed bytes and
+verify the signature. At least 2/3 valid signatures, based on voting power, must
+be received in order for the block proposal to be valid, otherwise the validator
+MUST REJECT the proposal.
+
+In order to have the ability to validate signatures, `BaseApp` must have access
+to the `x/staking` module, since this module stores an index from consensus
+address to public key. However, we will avoid a direct dependency on `x/staking`
+and instead rely on an interface instead. In addition, the Cosmos SDK will expose
+a default signature verification method which applications can use:
+
+```go expandable
+type ValidatorStore interface {
+ GetPubKeyByConsAddr(context.Context, sdk.ConsAddress) (cmtprotocrypto.PublicKey, error)
+}
+
+// ValidateVoteExtensions is a function that an application can execute in
+// ProcessProposal to verify vote extension signatures.
+func (app *BaseApp)
+
+ValidateVoteExtensions(ctx sdk.Context, currentHeight int64, extCommit abci.ExtendedCommitInfo)
+
+error {
+ votingPower := 0
+ totalVotingPower := 0
+ for _, vote := range extCommit.Votes {
+ totalVotingPower += vote.Validator.Power
+ if !vote.SignedLastBlock || len(vote.VoteExtension) == 0 {
+ continue
+}
+ valConsAddr := sdk.ConsAddress(vote.Validator.Address)
+
+pubKeyProto, err := valStore.GetPubKeyByConsAddr(ctx, valConsAddr)
+ if err != nil {
+ return fmt.Errorf("failed to get public key for validator %s: %w", valConsAddr, err)
+}
+ if len(vote.ExtensionSignature) == 0 {
+ return fmt.Errorf("received a non-empty vote extension with empty signature for validator %s", valConsAddr)
+}
+
+cmtPubKey, err := cryptoenc.PubKeyFromProto(pubKeyProto)
+ if err != nil {
+ return fmt.Errorf("failed to convert validator %X public key: %w", valConsAddr, err)
+}
+ cve := cmtproto.CanonicalVoteExtension{
+ Extension: vote.VoteExtension,
+ Height: currentHeight - 1, // the vote extension was signed in the previous height
+ Round: int64(extCommit.Round),
+ ChainId: app.GetChainID(),
+}
+
+extSignBytes, err := cosmosio.MarshalDelimited(&cve)
+ if err != nil {
+ return fmt.Errorf("failed to encode CanonicalVoteExtension: %w", err)
+}
+ if !cmtPubKey.VerifySignature(extSignBytes, vote.ExtensionSignature) {
+ return errors.New("received vote with invalid signature")
+}
+
+votingPower += vote.Validator.Power
+}
+ if (votingPower / totalVotingPower) < threshold {
+ return errors.New("not enough voting power for the vote extensions")
+}
+
+return nil
+}
+```
+
+Once at least 2/3 signatures, by voting power, are received and verified, the
+validator can use the vote extensions to derive additional data or come to some
+decision based on the vote extensions.
+
+> NOTE: It is very important to state, that neither the vote propagation technique
+> nor the vote extension verification mechanism described above is required for
+> applications to implement. In other words, a proposer is not required to verify
+> and propagate vote extensions along with their signatures nor are proposers
+> required to verify those signatures. An application can implement its own
+> PKI mechanism and use that to sign and verify vote extensions.
+
+#### Vote Extension Persistence
+
+In certain contexts, it may be useful or necessary for applications to persist
+data derived from vote extensions. In order to facilitate this use case, we propose
+to allow app developers to define a pre-Blocker hook which will be called
+at the very beginning of `FinalizeBlock`, i.e. before `BeginBlock` (see below).
+
+Note, we cannot allow applications to directly write to the application state
+during `ProcessProposal` because during replay, CometBFT will NOT call `ProcessProposal`,
+which would result in an incomplete state view.
+
+```go
+func (a MyApp)
+
+PreBlocker(ctx sdk.Context, req *abci.RequestFinalizeBlock)
+
+error {
+ voteExts := GetVoteExtensions(ctx, req.Txs)
+
+ // Process and perform some compute on vote extensions, storing any resulting
+ // state.
+ if err a.processVoteExtensions(ctx, voteExts); if err != nil {
+ return err
+}
+}
+```
+
+### `FinalizeBlock`
+
+The existing ABCI methods `BeginBlock`, `DeliverTx`, and `EndBlock` have existed
+since the dawn of ABCI-based applications. Thus, applications, tooling, and developers
+have grown used to these methods and their use-cases. Specifically, `BeginBlock`
+and `EndBlock` have grown to be pretty integral and powerful within ABCI-based
+applications. E.g. an application might want to run distribution and inflation
+related operations prior to executing transactions and then have staking related
+changes to happen after executing all transactions.
+
+We propose to keep `BeginBlock` and `EndBlock` within the SDK's core module
+interfaces only so application developers can continue to build against existing
+execution flows. However, we will remove `BeginBlock`, `DeliverTx` and `EndBlock`
+from the SDK's `BaseApp` implementation and thus the ABCI surface area.
+
+What will then exist is a single `FinalizeBlock` execution flow. Specifically, in
+`FinalizeBlock` we will execute the application's `BeginBlock`, followed by
+execution of all the transactions, finally followed by execution of the application's
+`EndBlock`.
+
+Note, we will still keep the existing transaction execution mechanics within
+`BaseApp`, but all notions of `DeliverTx` will be removed, i.e. `deliverState`
+will be replace with `finalizeState`, which will be committed on `Commit`.
+
+However, there are current parameters and fields that exist in the existing
+`BeginBlock` and `EndBlock` ABCI types, such as votes that are used in distribution
+and byzantine validators used in evidence handling. These parameters exist in the
+`FinalizeBlock` request type, and will need to be passed to the application's
+implementations of `BeginBlock` and `EndBlock`.
+
+This means the Cosmos SDK's core module interfaces will need to be updated to
+reflect these parameters. The easiest and most straightforward way to achieve
+this is to just pass `RequestFinalizeBlock` to `BeginBlock` and `EndBlock`.
+Alternatively, we can create dedicated proxy types in the SDK that reflect these
+legacy ABCI types, e.g. `LegacyBeginBlockRequest` and `LegacyEndBlockRequest`. Or,
+we can come up with new types and names altogether.
+
+```go expandable
+func (app *BaseApp)
+
+FinalizeBlock(req abci.RequestFinalizeBlock) (*abci.ResponseFinalizeBlock, error) {
+ ctx := ...
+ if app.preBlocker != nil {
+ ctx := app.finalizeBlockState.ctx
+ rsp, err := app.preBlocker(ctx, req)
+ if err != nil {
+ return nil, err
+}
+ if rsp.ConsensusParamsChanged {
+ app.finalizeBlockState.ctx = ctx.WithConsensusParams(app.GetConsensusParams(ctx))
+}
+
+}
+
+beginBlockResp, err := app.beginBlock(req)
+
+appendBlockEventAttr(beginBlockResp.Events, "begin_block")
+ txExecResults := make([]abci.ExecTxResult, 0, len(req.Txs))
+ for _, tx := range req.Txs {
+ result := app.runTx(runTxModeFinalize, tx)
+
+txExecResults = append(txExecResults, result)
+}
+
+endBlockResp, err := app.endBlock(app.finalizeBlockState.ctx)
+
+appendBlockEventAttr(beginBlockResp.Events, "end_block")
+
+return abci.ResponseFinalizeBlock{
+ TxResults: txExecResults,
+ Events: joinEvents(beginBlockResp.Events, endBlockResp.Events),
+ ValidatorUpdates: endBlockResp.ValidatorUpdates,
+ ConsensusParamUpdates: endBlockResp.ConsensusParamUpdates,
+ AppHash: nil,
+}
+}
+```
+
+#### Events
+
+Many tools, indexers and ecosystem libraries rely on the existence `BeginBlock`
+and `EndBlock` events. Since CometBFT now only exposes `FinalizeBlockEvents`, we
+find that it will still be useful for these clients and tools to still query for
+and rely on existing events, especially since applications will still define
+`BeginBlock` and `EndBlock` implementations.
+
+In order to facilitate existing event functionality, we propose that all `BeginBlock`
+and `EndBlock` events have a dedicated `EventAttribute` with `key=block` and
+`value=begin_block|end_block`. The `EventAttribute` will be appended to each event
+in both `BeginBlock` and `EndBlock` events\`.
+
+### Upgrading
+
+CometBFT defines a consensus parameter, [`VoteExtensionsEnableHeight`](https://github.com/cometbft/cometbft/blob/v0.38.0-alpha.1/spec/abci/abci%2B%2B_app_requirements.md#abciparamsvoteextensionsenableheight),
+which specifies the height at which vote extensions are enabled and **required**.
+If the value is set to zero, which is the default, then vote extensions are
+disabled and an application is not required to implement and use vote extensions.
+
+However, if the value `H` is positive, at all heights greater than the configured
+height `H` vote extensions must be present (even if empty). When the configured
+height `H` is reached, `PrepareProposal` will not include vote extensions yet,
+but `ExtendVote` and `VerifyVoteExtension` will be called. Then, when reaching
+height `H+1`, `PrepareProposal` will include the vote extensions from height `H`.
+
+It is very important to note, for all heights after H:
+
+* Vote extensions CANNOT be disabled
+* They are mandatory, i.e. all pre-commit messages sent MUST have an extension
+ attached (even if empty)
+
+When an application updates to the Cosmos SDK version with CometBFT v0.38 support,
+in the upgrade handler it must ensure to set the consensus parameter
+`VoteExtensionsEnableHeight` to the correct value. E.g. if an application is set
+to perform an upgrade at height `H`, then the value of `VoteExtensionsEnableHeight`
+should be set to any value `>=H+1`. This means that at the upgrade height, `H`,
+vote extensions will not be enabled yet, but at height `H+1` they will be enabled.
+
+## Consequences
+
+### Backwards Compatibility
+
+ABCI 2.0 is naturally not backwards compatible with prior versions of the Cosmos SDK
+and CometBFT. For example, an application that requests `RequestFinalizeBlock`
+to the same application that does not speak ABCI 2.0 will naturally fail.
+
+In addition, `BeginBlock`, `DeliverTx` and `EndBlock` will be removed from the
+application ABCI interfaces and along with the inputs and outputs being modified
+in the module interfaces.
+
+### Positive
+
+* `BeginBlock` and `EndBlock` semantics remain, so burden on application developers
+ should be limited.
+* Less communication overhead as multiple ABCI requests are condensed into a single
+ request.
+* Sets the groundwork for optimistic execution.
+* Vote extensions allow for an entirely new set of application primitives to be
+ developed, such as in-process price oracles and encrypted mempools.
+
+### Negative
+
+* Some existing Cosmos SDK core APIs may need to be modified and thus broken.
+* Signature verification in `ProcessProposal` of 100+ vote extension signatures
+ will add significant performance overhead to `ProcessProposal`. Granted, the
+ signature verification process can happen concurrently using an error group
+ with `GOMAXPROCS` goroutines.
+
+### Neutral
+
+* Having to manually "inject" vote extensions into the block proposal during
+ `PrepareProposal` is an awkward approach and takes up block space unnecessarily.
+* The requirement of `ResetProcessProposalState` can create a footgun for
+ application developers if they're not careful, but this is necessary in order
+ for applications to be able to commit state from vote extension computation.
+
+## Further Discussions
+
+Future discussions include design and implementation of ABCI 3.0, which is a
+continuation of ABCI++ and the general discussion of optimistic execution.
+
+## References
+
+* [ADR 060: ABCI 1.0 (Phase I)](/sdk/v0.53/build/architecture/adr-060-abci-1.0)
diff --git a/sdk/next/build/architecture/adr-065-store-v2.mdx b/sdk/next/build/architecture/adr-065-store-v2.mdx
new file mode 100644
index 000000000..72e48449f
--- /dev/null
+++ b/sdk/next/build/architecture/adr-065-store-v2.mdx
@@ -0,0 +1,293 @@
+---
+title: 'ADR-065: Store V2'
+description: 'Feb 14, 2023: Initial Draft (@alexanderbez)'
+---
+
+## Changelog
+
+* Feb 14, 2023: Initial Draft (@alexanderbez)
+
+## Status
+
+DRAFT
+
+## Abstract
+
+The storage and state primitives that Cosmos SDK based applications have used have
+by and large not changed since the launch of the inaugural Cosmos Hub. The demands
+and needs of Cosmos SDK based applications, from both developer and client UX
+perspectives, have evolved and outgrown the ecosystem since these primitives
+were first introduced.
+
+Over time as these applications have gained significant adoption, many critical
+shortcomings and flaws have been exposed in the state and storage primitives of
+the Cosmos SDK.
+
+In order to keep up with the evolving demands and needs of both clients and developers,
+a major overhaul to these primitives are necessary.
+
+## Context
+
+The Cosmos SDK provides application developers with various storage primitives
+for dealing with application state. Specifically, each module contains its own
+merkle commitment data structure -- an IAVL tree. In this data structure, a module
+can store and retrieve key-value pairs along with Merkle commitments, i.e. proofs,
+to those key-value pairs indicating that they do or do not exist in the global
+application state. This data structure is the base layer `KVStore`.
+
+In addition, the SDK provides abstractions on top of this Merkle data structure.
+Namely, a root multi-store (RMS) is a collection of each module's `KVStore`.
+Through the RMS, the application can serve queries and provide proofs to clients
+in addition to provide a module access to its own unique `KVStore` though the use
+of `StoreKey`, which is an OCAP primitive.
+
+There are further layers of abstraction that sit between the RMS and the underlying
+IAVL `KVStore`. A `GasKVStore` is responsible for tracking gas IO consumption for
+state machine reads and writes. A `CacheKVStore` is responsible for providing a
+way to cache reads and buffer writes to make state transitions atomic, e.g.
+transaction execution or governance proposal execution.
+
+There are a few critical drawbacks to these layers of abstraction and the overall
+design of storage in the Cosmos SDK:
+
+* Since each module has its own IAVL `KVStore`, commitments are not [atomic](https://github.com/cosmos/cosmos-sdk/issues/14625)
+ * Note, we can still allow modules to have their own IAVL `KVStore`, but the
+ IAVL library will need to support the ability to pass a DB instance as an
+ argument to various IAVL APIs.
+* Since IAVL is responsible for both state storage and commitment, running an
+ archive node becomes increasingly expensive as disk space grows exponentially.
+* As the size of a network increases, various performance bottlenecks start to
+ emerge in many areas such as query performance, network upgrades, state
+ migrations, and general application performance.
+* Developer UX is poor as it does not allow application developers to experiment
+ with different types of approaches to storage and commitments, along with the
+ complications of many layers of abstractions referenced above.
+
+See the [Storage Discussion](https://github.com/cosmos/cosmos-sdk/discussions/13545) for more information.
+
+## Alternatives
+
+There was a previous attempt to refactor the storage layer described in [ADR-040](/sdk/v0.50/build/architecture/adr-040-storage-and-smt-state-commitments).
+However, this approach mainly stems on the short comings of IAVL and various performance
+issues around it. While there was a (partial) implementation of [ADR-040](/sdk/v0.50/build/architecture/adr-040-storage-and-smt-state-commitments),
+it was never adopted for a variety of reasons, such as the reliance on using an
+SMT, which was more in a research phase, and some design choices that couldn't
+be fully agreed upon, such as the snap-shotting mechanism that would result in
+massive state bloat.
+
+## Decision
+
+We propose to build upon some of the great ideas introduced in [ADR-040](/sdk/v0.50/build/architecture/adr-040-storage-and-smt-state-commitments),
+while being a bit more flexible with the underlying implementations and overall
+less intrusive. Specifically, we propose to:
+
+* Separate the concerns of state commitment (**SC**), needed for consensus, and
+ state storage (**SS**), needed for state machine and clients.
+* Reduce layers of abstractions necessary between the RMS and underlying stores.
+* Provide atomic module store commitments by providing a batch database object
+ to core IAVL APIs.
+* Reduce complexities in the `CacheKVStore` implementation while also improving
+ performance\[3].
+
+Furthermore, we will keep the IAVL is the backing [commitment](https://cryptography.fandom.com/wiki/Commitment_scheme)
+store for the time being. While we might not fully settle on the use of IAVL in
+the long term, we do not have strong empirical evidence to suggest a better
+alternative. Given that the SDK provides interfaces for stores, it should be sufficient
+to change the backing commitment store in the future should evidence arise to
+warrant a better alternative. However there is promising work being done to IAVL
+that should result in significant performance improvement \[1,2].
+
+### Separating SS and SC
+
+By separating SS and SC, it will allow for us to optimize against primary use cases
+and access patterns to state. Specifically, The SS layer will be responsible for
+direct access to data in the form of (key, value) pairs, whereas the SC layer (IAVL)
+will be responsible for committing to data and providing Merkle proofs.
+
+Note, the underlying physical storage database will be the same between both the
+SS and SC layers. So to avoid collisions between (key, value) pairs, both layers
+will be namespaced.
+
+#### State Commitment (SC)
+
+Given that the existing solution today acts as both SS and SC, we can simply
+repurpose it to act solely as the SC layer without any significant changes to
+access patterns or behavior. In other words, the entire collection of existing
+IAVL-backed module `KVStore`s will act as the SC layer.
+
+However, in order for the SC layer to remain lightweight and not duplicate a
+majority of the data held in the SS layer, we encourage node operators to keep
+tight pruning strategies.
+
+#### State Storage (SS)
+
+In the RMS, we will expose a *single* `KVStore` backed by the same physical
+database that backs the SC layer. This `KVStore` will be explicitly namespaced
+to avoid collisions and will act as the primary storage for (key, value) pairs.
+
+While we most likely will continue the use of `cosmos-db`, or some local interface,
+to allow for flexibility and iteration over preferred physical storage backends
+as research and benchmarking continues. However, we propose to hardcode the use
+of RocksDB as the primary physical storage backend.
+
+Since the SS layer will be implemented as a `KVStore`, it will support the
+following functionality:
+
+* Range queries
+* CRUD operations
+* Historical queries and versioning
+* Pruning
+
+The RMS will keep track of all buffered writes using a dedicated and internal
+`MemoryListener` for each `StoreKey`. For each block height, upon `Commit`, the
+SS layer will write all buffered (key, value) pairs under a [RocksDB user-defined timestamp](https://github.com/facebook/rocksdb/wiki/User-defined-Timestamp-%28Experimental%29) column
+family using the block height as the timestamp, which is an unsigned integer.
+This will allow a client to fetch (key, value) pairs at historical and current
+heights along with making iteration and range queries relatively performant as
+the timestamp is the key suffix.
+
+Note, we choose not to use a more general approach of allowing any embedded key/value
+database, such as LevelDB or PebbleDB, using height key-prefixed keys to
+effectively version state because most of these databases use variable length
+keys which would effectively make actions likes iteration and range queries less
+performant.
+
+Since operators might want pruning strategies to differ in SS compared to SC,
+e.g. having a very tight pruning strategy in SC while having a looser pruning
+strategy for SS, we propose to introduce an additional pruning configuration,
+with parameters that are identical to what exists in the SDK today, and allow
+operators to control the pruning strategy of the SS layer independently of the
+SC layer.
+
+Note, the SC pruning strategy must be congruent with the operator's state sync
+configuration. This is so as to allow state sync snapshots to execute successfully,
+otherwise, a snapshot could be triggered on a height that is not available in SC.
+
+#### State Sync
+
+The state sync process should be largely unaffected by the separation of the SC
+and SS layers. However, if a node syncs via state sync, the SS layer of the node
+will not have the state synced height available, since the IAVL import process is
+not setup in way to easily allow direct key/value insertion. A modification of
+the IAVL import process would be necessary to facilitate having the state sync
+height available.
+
+Note, this is not problematic for the state machine itself because when a query
+is made, the RMS will automatically direct the query correctly (see [Queries](#queries)).
+
+#### Queries
+
+To consolidate the query routing between both the SC and SS layers, we propose to
+have a notion of a "query router" that is constructed in the RMS. This query router
+will be supplied to each `KVStore` implementation. The query router will route
+queries to either the SC layer or the SS layer based on a few parameters. If
+`prove: true`, then the query must be routed to the SC layer. Otherwise, if the
+query height is available in the SS layer, the query will be served from the SS
+layer. Otherwise, we fall back on the SC layer.
+
+If no height is provided, the SS layer will assume the latest height. The SS
+layer will store a reverse index to lookup `LatestVersion -> timestamp(version)`
+which is set on `Commit`.
+
+#### Proofs
+
+Since the SS layer is naturally a storage layer only, without any commitments
+to (key, value) pairs, it cannot provide Merkle proofs to clients during queries.
+
+Since the pruning strategy against the SC layer is configured by the operator,
+we can therefore have the RMS route the query SC layer if the version exists and
+`prove: true`. Otherwise, the query will fall back to the SS layer without a proof.
+
+We could explore the idea of using state snapshots to rebuild an in-memory IAVL
+tree in real time against a version closest to the one provided in the query.
+However, it is not clear what the performance implications will be of this approach.
+
+### Atomic Commitment
+
+We propose to modify the existing IAVL APIs to accept a batch DB object instead
+of relying on an internal batch object in `nodeDB`. Since each underlying IAVL
+`KVStore` shares the same DB in the SC layer, this will allow commits to be
+atomic.
+
+Specifically, we propose to:
+
+* Remove the `dbm.Batch` field from `nodeDB`
+* Update the `SaveVersion` method of the `MutableTree` IAVL type to accept a batch object
+* Update the `Commit` method of the `CommitKVStore` interface to accept a batch object
+* Create a batch object in the RMS during `Commit` and pass this object to each
+ `KVStore`
+* Write the database batch after all stores have committed successfully
+
+Note, this will require IAVL to be updated to not rely or assume on any batch
+being present during `SaveVersion`.
+
+## Consequences
+
+As a result of a new store V2 package, we should expect to see improved performance
+for queries and transactions due to the separation of concerns. We should also
+expect to see improved developer UX around experimentation of commitment schemes
+and storage backends for further performance, in addition to a reduced amount of
+abstraction around KVStores making operations such as caching and state branching
+more intuitive.
+
+However, due to the proposed design, there are drawbacks around providing state
+proofs for historical queries.
+
+### Backwards Compatibility
+
+This ADR proposes changes to the storage implementation in the Cosmos SDK through
+an entirely new package. Interfaces may be borrowed and extended from existing
+types that exist in `store`, but no existing implementations or interfaces will
+be broken or modified.
+
+### Positive
+
+* Improved performance of independent SS and SC layers
+* Reduced layers of abstraction making storage primitives easier to understand
+* Atomic commitments for SC
+* Redesign of storage types and interfaces will allow for greater experimentation
+ such as different physical storage backends and different commitment schemes
+ for different application modules
+
+### Negative
+
+* Providing proofs for historical state is challenging
+
+### Neutral
+
+* Keeping IAVL as the primary commitment data structure, although drastic
+ performance improvements are being made
+
+## Further Discussions
+
+### Module Storage Control
+
+Many modules store secondary indexes that are typically solely used to support
+client queries, but are actually not needed for the state machine's state
+transitions. What this means is that these indexes technically have no reason to
+exist in the SC layer at all, as they take up unnecessary space. It is worth
+exploring what an API would look like to allow modules to indicate what (key, value)
+pairs they want to be persisted in the SC layer, implicitly indicating the SS
+layer as well, as opposed to just persisting the (key, value) pair only in the
+SS layer.
+
+### Historical State Proofs
+
+It is not clear what the importance or demand is within the community of providing
+commitment proofs for historical state. While solutions can be devised such as
+rebuilding trees on the fly based on state snapshots, it is not clear what the
+performance implications are for such solutions.
+
+### Physical DB Backends
+
+This ADR proposes usage of RocksDB to utilize user-defined timestamps as a
+versioning mechanism. However, other physical DB backends are available that may
+offer alternative ways to implement versioning while also providing performance
+improvements over RocksDB. E.g. PebbleDB supports MVCC timestamps as well, but
+we'll need to explore how PebbleDB handles compaction and state growth over time.
+
+## References
+
+* \[1] [Link](https://github.com/cosmos/iavl/pull/676)
+* \[2] [Link](https://github.com/cosmos/iavl/pull/664)
+* \[3] [Link](https://github.com/cosmos/cosmos-sdk/issues/14990)
diff --git a/sdk/next/build/architecture/adr-068-preblock.mdx b/sdk/next/build/architecture/adr-068-preblock.mdx
new file mode 100644
index 000000000..8c76407b7
--- /dev/null
+++ b/sdk/next/build/architecture/adr-068-preblock.mdx
@@ -0,0 +1,65 @@
+---
+title: 'ADR 068: Preblock'
+description: 'Sept 13, 2023: Initial Draft'
+---
+
+## Changelog
+
+* Sept 13, 2023: Initial Draft
+
+## Status
+
+DRAFT
+
+## Abstract
+
+Introduce `PreBlock`, which runs before begin blocker other modules, and allows to modify consensus parameters, and the changes are visible to the following state machine logics.
+
+## Context
+
+When upgrading to sdk 0.47, the storage format for consensus parameters changed, but in the migration block, `ctx.ConsensusParams()` is always `nil`, because it fails to load the old format using new code, it's supposed to be migrated by the `x/upgrade` module first, but unfortunately, the migration happens in `BeginBlocker` handler, which runs after the `ctx` is initialized.
+When we try to solve this, we find the `x/upgrade` module can't modify the context to make the consensus parameters visible for the other modules, the context is passed by value, and sdk team want to keep it that way, that's good for isolations between modules.
+
+## Alternatives
+
+The first alternative solution introduced a `MigrateModuleManager`, which only includes the `x/upgrade` module right now, and baseapp will run their `BeginBlocker`s before the other modules, and reload context's consensus parameters in between.
+
+## Decision
+
+Suggested this new lifecycle method.
+
+### `PreBlocker`
+
+There are two semantics around the new lifecycle method:
+
+* It runs before the `BeginBlocker` of all modules
+* It can modify consensus parameters in storage, and signal the caller through the return value.
+
+When it returns `ConsensusParamsChanged=true`, the caller must refresh the consensus parameter in the finalize context:
+
+```
+app.finalizeBlockState.ctx = app.finalizeBlockState.ctx.WithConsensusParams(app.GetConsensusParams())
+```
+
+The new ctx must be passed to all the other lifecycle methods.
+
+## Consequences
+
+### Backwards Compatibility
+
+### Positive
+
+### Negative
+
+### Neutral
+
+## Further Discussions
+
+## Test Cases
+
+## References
+
+* \[1] [Link](https://github.com/cosmos/cosmos-sdk/issues/16494)
+* \[2] [Link](https://github.com/cosmos/cosmos-sdk/pull/16583)
+* \[3] [Link](https://github.com/cosmos/cosmos-sdk/pull/17421)
+* \[4] [Link](https://github.com/cosmos/cosmos-sdk/pull/17713)
diff --git a/sdk/next/build/architecture/adr-070-unordered-account.mdx b/sdk/next/build/architecture/adr-070-unordered-account.mdx
new file mode 100644
index 000000000..dfb05cf76
--- /dev/null
+++ b/sdk/next/build/architecture/adr-070-unordered-account.mdx
@@ -0,0 +1,351 @@
+---
+title: 'ADR 070: Unordered Transactions'
+---
+
+## Changelog
+
+* Dec 4, 2023: Initial Draft (@yihuang, @tac0turtle, @alexanderbez)
+* Jan 30, 2024: Include section on deterministic transaction encoding
+* Mar 18, 2025: Revise implementation to use Cosmos SDK KV Store and require unique timeouts per-address (@technicallyty)
+* Apr 25, 2025: Add note about rejecting unordered txs with sequence values.
+
+## Status
+
+ACCEPTED Not Implemented
+
+## Abstract
+
+We propose a way to do replay-attack protection without enforcing the order of
+transactions and without requiring the use of monotonically increasing sequences. Instead, we propose
+the use of a time-based, ephemeral sequence.
+
+## Context
+
+Account sequence values serve to prevent replay attacks and ensure transactions from the same sender are included into blocks and executed
+in sequential order. Unfortunately, this makes it difficult to reliably send many concurrent transactions from the
+same sender. Victims of such limitations include IBC relayers and crypto exchanges.
+
+## Decision
+
+We propose adding a boolean field `unordered` and a google.protobuf.Timestamp field `timeout_timestamp` to the transaction body.
+
+Unordered transactions will bypass the traditional account sequence rules and follow the rules described
+below, without impacting traditional ordered transactions which will follow the same sequence rules as before.
+
+We will introduce new storage of time-based, ephemeral unordered sequences using the SDK's existing KV Store library.
+Specifically, we will leverage the existing x/auth KV store to store the unordered sequences.
+
+When an unordered transaction is included in a block, a concatenation of the `timeout_timestamp` and sender’s address bytes
+will be recorded to state (i.e. `542939323/`). In cases of multi-party signing, one entry per signer
+will be recorded to state.
+
+New transactions will be checked against the state to prevent duplicate submissions. To prevent the state from growing indefinitely, we propose the following:
+
+* Define an upper bound for the value of `timeout_timestamp` (i.e. 10 minutes).
+* Add PreBlocker method x/auth that removes state entries with a `timeout_timestamp` earlier than the current block time.
+
+### Transaction Format
+
+```protobuf
+message TxBody {
+ ...
+
+ bool unordered = 4;
+ google.protobuf.Timestamp timeout_timestamp = 5
+}
+```
+
+### Replay Protection
+
+We facilitate replay protection by storing the unordered sequence in the Cosmos SDK KV store. Upon transaction ingress, we check if the transaction's unordered
+sequence exists in state, or if the TTL value is stale, i.e. before the current block time. If so, we reject it. Otherwise,
+we add the unordered sequence to the state. This section of the state will belong to the `x/auth` module.
+
+The state is evaluated during x/auth's `PreBlocker`. All transactions with an unordered sequence earlier than the current block time
+will be deleted.
+
+```go
+func (am AppModule)
+
+PreBlock(ctx context.Context) (appmodule.ResponsePreBlock, error) {
+ err := am.accountKeeper.RemoveExpired(sdk.UnwrapSDKContext(ctx))
+ if err != nil {
+ return nil, err
+}
+
+return &sdk.ResponsePreBlock{
+ ConsensusParamsChanged: false
+}, nil
+}
+```
+
+```golang expandable
+package keeper
+
+import (
+
+ sdk "github.com/cosmos/cosmos-sdk/types"
+ "cosmossdk.io/collections"
+ "cosmossdk.io/core/store"
+)
+
+var (
+ // just arbitrarily picking some upper bound number.
+ unorderedSequencePrefix = collections.NewPrefix(90)
+)
+
+type AccountKeeper struct {
+ // ...
+ unorderedSequences collections.KeySet[collections.Pair[uint64, []byte]]
+}
+
+func (m *AccountKeeper)
+
+Contains(ctx sdk.Context, sender []byte, timestamp uint64) (bool, error) {
+ return m.unorderedSequences.Has(ctx, collections.Join(timestamp, sender))
+}
+
+func (m *AccountKeeper)
+
+Add(ctx sdk.Context, sender []byte, timestamp uint64)
+
+error {
+ return m.unorderedSequences.Set(ctx, collections.Join(timestamp, sender))
+}
+
+func (m *AccountKeeper)
+
+RemoveExpired(ctx sdk.Context)
+
+error {
+ blkTime := ctx.BlockTime().UnixNano()
+
+it, err := m.unorderedSequences.Iterate(ctx, collections.NewPrefixUntilPairRange[uint64, []byte](uint64(blkTime)))
+ if err != nil {
+ return err
+}
+
+defer it.Close()
+
+keys, err := it.Keys()
+ if err != nil {
+ return err
+}
+ for _, key := range keys {
+ if err := m.unorderedSequences.Remove(ctx, key); err != nil {
+ return err
+}
+
+}
+
+return nil
+}
+```
+
+### AnteHandler Decorator
+
+To facilitate bypassing nonce verification, we must modify the existing
+`IncrementSequenceDecorator` AnteHandler decorator to skip the nonce verification
+when the transaction is marked as unordered.
+
+```golang
+func (isd IncrementSequenceDecorator)
+
+AnteHandle(ctx sdk.Context, tx sdk.Tx, simulate bool, next sdk.AnteHandler) (sdk.Context, error) {
+ if tx.UnOrdered() {
+ return next(ctx, tx, simulate)
+}
+
+ // ...
+}
+```
+
+We also introduce a new decorator to perform the unordered transaction verification.
+
+```golang expandable
+package ante
+
+import (
+
+ "slices"
+ "strings"
+ "time"
+
+ sdk "github.com/cosmos/cosmos-sdk/types"
+ sdkerrors "github.com/cosmos/cosmos-sdk/types/errors"
+ authkeeper "github.com/cosmos/cosmos-sdk/x/auth/keeper"
+ authsigning "github.com/cosmos/cosmos-sdk/x/auth/signing"
+
+ errorsmod "cosmossdk.io/errors"
+)
+
+var _ sdk.AnteDecorator = (*UnorderedTxDecorator)(nil)
+
+// UnorderedTxDecorator defines an AnteHandler decorator that is responsible for
+// checking if a transaction is intended to be unordered and, if so, evaluates
+// the transaction accordingly. An unordered transaction will bypass having its
+// nonce incremented, which allows fire-and-forget transaction broadcasting,
+// removing the necessity of ordering on the sender-side.
+//
+// The transaction sender must ensure that unordered=true and a timeout_height
+// is appropriately set. The AnteHandler will check that the transaction is not
+// a duplicate and will evict it from state when the timeout is reached.
+//
+// The UnorderedTxDecorator should be placed as early as possible in the AnteHandler
+// chain to ensure that during DeliverTx, the transaction is added to the unordered sequence state.
+type UnorderedTxDecorator struct {
+ // maxUnOrderedTTL defines the maximum TTL a transaction can define.
+ maxTimeoutDuration time.Duration
+ txManager authkeeper.UnorderedTxManager
+}
+
+func NewUnorderedTxDecorator(
+ utxm authkeeper.UnorderedTxManager,
+) *UnorderedTxDecorator {
+ return &UnorderedTxDecorator{
+ maxTimeoutDuration: 10 * time.Minute,
+ txManager: utxm,
+}
+}
+
+func (d *UnorderedTxDecorator)
+
+AnteHandle(
+ ctx sdk.Context,
+ tx sdk.Tx,
+ _ bool,
+ next sdk.AnteHandler,
+) (sdk.Context, error) {
+ if err := d.ValidateTx(ctx, tx); err != nil {
+ return ctx, err
+}
+
+return next(ctx, tx, false)
+}
+
+func (d *UnorderedTxDecorator)
+
+ValidateTx(ctx sdk.Context, tx sdk.Tx)
+
+error {
+ unorderedTx, ok := tx.(sdk.TxWithUnordered)
+ if !ok || !unorderedTx.GetUnordered() {
+ // If the transaction does not implement unordered capabilities or has the
+ // unordered value as false, we bypass.
+ return nil
+}
+ blockTime := ctx.BlockTime()
+ timeoutTimestamp := unorderedTx.GetTimeoutTimeStamp()
+ if timeoutTimestamp.IsZero() || timeoutTimestamp.Unix() == 0 {
+ return errorsmod.Wrap(
+ sdkerrors.ErrInvalidRequest,
+ "unordered transaction must have timeout_timestamp set",
+ )
+}
+ if timeoutTimestamp.Before(blockTime) {
+ return errorsmod.Wrap(
+ sdkerrors.ErrInvalidRequest,
+ "unordered transaction has a timeout_timestamp that has already passed",
+ )
+}
+ if timeoutTimestamp.After(blockTime.Add(d.maxTimeoutDuration)) {
+ return errorsmod.Wrapf(
+ sdkerrors.ErrInvalidRequest,
+ "unordered tx ttl exceeds %s",
+ d.maxTimeoutDuration.String(),
+ )
+}
+ execMode := ctx.ExecMode()
+ if execMode == sdk.ExecModeSimulate {
+ return nil
+}
+
+signerAddrs, err := getSigners(tx)
+ if err != nil {
+ return err
+}
+ for _, signer := range signerAddrs {
+ contains, err := d.txManager.Contains(ctx, signer, uint64(unorderedTx.GetTimeoutTimeStamp().Unix()))
+ if err != nil {
+ return errorsmod.Wrap(
+ sdkerrors.ErrIO,
+ "failed to check contains",
+ )
+}
+ if contains {
+ return errorsmod.Wrapf(
+ sdkerrors.ErrInvalidRequest,
+ "tx is duplicated for signer %x", signer,
+ )
+}
+ if err := d.txManager.Add(ctx, signer, uint64(unorderedTx.GetTimeoutTimeStamp().Unix())); err != nil {
+ return errorsmod.Wrap(
+ sdkerrors.ErrIO,
+ "failed to add unordered sequence to state",
+ )
+}
+
+}
+
+return nil
+}
+
+func getSigners(tx sdk.Tx) ([][]byte, error) {
+ sigTx, ok := tx.(authsigning.SigVerifiableTx)
+ if !ok {
+ return nil, errorsmod.Wrap(sdkerrors.ErrTxDecode, "invalid tx type")
+}
+
+return sigTx.GetSigners()
+}
+```
+
+### Unordered Sequences
+
+Unordered sequences provide a simple, straightforward mechanism to protect against both transaction malleability and
+transaction duplication. It is important to note that the unordered sequence must still be unique. However,
+the value is not required to be strictly increasing as with regular sequences, and the order in which the node receives
+the transactions no longer matters. Clients can handle building unordered transactions similarly to the code below:
+
+```go
+for _, tx := range txs {
+ tx.SetUnordered(true)
+
+tx.SetTimeoutTimestamp(time.Now() + 1 * time.Nanosecond)
+}
+```
+
+We will reject transactions that have both sequence and unordered timeouts set. We do this to avoid assuming the intent of the user.
+
+### State Management
+
+The storage of unordered sequences will be facilitated using the Cosmos SDK's KV Store service.
+
+## Note On Previous Design Iteration
+
+The previous iteration of unordered transactions worked by using an ad-hoc state-management system that posed severe
+risks and a vector for duplicated tx processing. It relied on graceful app closure which would flush the current state
+of the unordered sequence mapping. If the 2/3's of the network crashed, and the graceful closure did not trigger,
+the system would lose track of all sequences in the mapping, allowing those transactions to be replayed. The
+implementation proposed in the updated version of this ADR solves this by writing directly to the Cosmos KV Store.
+While this is less performant, for the initial implementation, we opted to choose a safer path and postpone performance optimizations until we have more data on real-world impacts and a more battle-tested approach to optimization.
+
+Additionally, the previous iteration relied on using hashes to create what we call an "unordered sequence." There are known
+issues with transaction malleability in Cosmos SDK signing modes. This ADR gets away from this problem by enforcing
+single-use unordered nonces, instead of deriving nonces from bytes in the transaction.
+
+## Consequences
+
+### Positive
+
+* Support unordered transaction inclusion, enabling the ability to "fire and forget" many transactions at once.
+
+### Negative
+
+* Requires additional storage overhead.
+* Requirement of unique timestamps per transaction causes a small amount of additional overhead for clients. Clients must ensure each transaction's timeout timestamp is different. However, nanosecond differentials suffice.
+* Usage of Cosmos SDK KV store is slower in comparison to using a non-merklized store or ad-hoc methods, and block times may slow down as a result.
+
+## References
+
+* [Link](https://github.com/cosmos/cosmos-sdk/issues/13009)
diff --git a/sdk/next/build/architecture/adr-076-tx-malleability.mdx b/sdk/next/build/architecture/adr-076-tx-malleability.mdx
new file mode 100644
index 000000000..02ab032a9
--- /dev/null
+++ b/sdk/next/build/architecture/adr-076-tx-malleability.mdx
@@ -0,0 +1,173 @@
+---
+title: Cosmos SDK Transaction Malleability Risk Review and Recommendations
+description: '2025-03-10: Initial draft (@aaronc)'
+---
+
+## Changelog
+
+* 2025-03-10: Initial draft (@aaronc)
+
+## Status
+
+PROPOSED: Not Implemented
+
+## Abstract
+
+Several encoding and sign mode related issues have historically resulted in the possibility
+that Cosmos SDK transactions may be re-encoded in such a way as to change their hash
+(and in rare cases, their meaning) without invalidating the signature.
+This document details these cases, their potential risks, the extent to which they have been
+addressed, and provides recommendations for future improvements.
+
+## Review
+
+One naive assumption about Cosmos SDK transactions is that hashing the raw bytes of a submitted transaction creates a safe unique identifier for the transaction. In reality, there are multiple ways in which transactions could be manipulated to create different transaction bytes (and as a result different hashes) that still pass signature verification.
+
+This document attempts to enumerate the various potential transaction "malleability" risks that we have identified and the extent to which they have or have not been addressed in various sign modes. We also identify vulnerabilities that could be introduced if developers make changes in the future without careful consideration of the complexities involved with transaction encoding, sign modes and signatures.
+
+### Risks Associated with Malleability
+
+The malleability of transactions poses the following potential risks to end users:
+
+* unsigned data could get added to transactions and be processed by state machines
+* clients often rely on transaction hashes for checking transaction status, but whether or not submitted transaction hashes match processed transaction hashes depends primarily on good network actors rather than fundamental protocol guarantees
+* transactions could potentially get executed more than once (faulty replay protection)
+
+If a client generates a transaction, keeps a record of its hash and then attempts to query nodes to check the transaction's status, this process may falsely conclude that the transaction had not been processed if an intermediary
+processor decoded and re-encoded the transaction with different encoding rules (either maliciously or unintentionally).
+As long as no malleability is present in the signature bytes themselves, clients *should* query transactions by signature instead of hash.
+
+Not being cognizant of this risk may lead clients to submit the same transaction multiple times if they believe that
+earlier transactions had failed or gotten lost in processing.
+This could be an attack vector against users if wallets primarily query transactions by hash.
+
+If the state machine were to rely on transaction hashes as a replay mechanism itself, this would be faulty and not
+provide the intended replay protection. Instead, the state machine should rely on deterministic representations of
+transactions rather than the raw encoding, or other nonces,
+if they want to provide some replay protection that doesn't rely on a monotonically
+increasing account sequence number.
+
+### Sources of Malleability
+
+#### Non-deterministic Protobuf Encoding
+
+Cosmos SDK transactions are encoded using protobuf binary encoding when they are submitted to the network. Protobuf binary is not inherently a deterministic encoding meaning that the same logical payload could have several valid bytes representations. In a basic sense, this means that protobuf in general can be decoded and re-encoded to produce a different byte stream (and thus different hash) without changing the logical meaning of the bytes. [ADR 027: Deterministic Protobuf Serialization](https://github.com/cosmos/cosmos-sdk/blob/main/docs/architecture/adr-027-deterministic-protobuf-serialization.md) describes in detail what needs to be done to produce what we consider to be a "canonical", deterministic protobuf serialization. Briefly, the following sources of malleability at the encoding level have been identified and are addressed by this specification:
+
+* fields can be emitted in any order
+* default field values can be included or omitted, and this doesn't change meaning unless `optional` is used
+* `repeated` fields of scalars may use packed or "regular" encoding
+* `varint`s can include extra ignored bits
+* extra fields may be added and are usually simply ignored by decoders. [ADR 020](https://github.com/cosmos/cosmos-sdk/blob/main/docs/architecture/adr-020-protobuf-transaction-encoding.md#unknown-field-filtering) specifies that in general such extra fields should cause messages and transactions to be rejected)
+
+When using `SIGN_MODE_DIRECT` none of the above malleabilities will be tolerated because:
+
+* signatures of messages and extensions must be done over the raw encoded bytes of those fields
+* the outer tx envelope (`TxRaw`) must follow ADR 027 rules or be rejected
+
+Transactions signed with `SIGN_MODE_LEGACY_AMINO_JSON`, however, have no way of protecting against the above malleabilities because what is signed is a JSON representation of the logical contents of the transaction. These logical contents could have any number of valid protobuf binary encodings, so in general there are no guarantees regarding transaction hash with Amino JSON signing.
+
+In addition to being aware of the general non-determinism of protobuf binary, developers need to pay special attention to make sure that unknown protobuf fields get rejected when developing new capabilities related to protobuf transactions. The protobuf serialization format was designed with the assumption that unknown data known to encoders could safely be ignored by decoders. This assumption may have been fairly safe within the walled garden of Google's centralized infrastructure. However, in distributed blockchain systems, this assumption is generally unsafe. If a newer client encodes a protobuf message with data intended for a newer server, it is not safe for an older server to simply ignore and discard instructions that it does not understand. These instructions could include critical information that the transaction signer is relying upon and just assuming that it is unimportant is not safe.
+
+[ADR 020](https://github.com/cosmos/cosmos-sdk/blob/main/docs/architecture/adr-020-protobuf-transaction-encoding.md#unknown-field-filtering) specifies some provisions for "non-critical" fields which can safely be ignored by older servers. In practice, I have not seen any valid usages of this. It is something in the design that maintainers should be aware of, but it may not be necessary or even 100% safe.
+
+#### Non-deterministic Value Encoding
+
+In addition to the non-determinism present in protobuf binary itself, some protobuf field data is encoded using a micro-format which itself may not be deterministic. Consider for instance integer or decimal encoding. Some decoders may allow for the presence of leading or trailing zeros without changing the logical meaning, ex. `00100` vs `100` or `100.00` vs `100`. So if a sign mode encodes numbers deterministically, but decoders accept multiple representations,
+a user may sign over the value `100` while `0100` gets encoded. This would be possible with Amino JSON to the extent that the integer decoder accepts leading zeros. I believe the current `Int` implementation will reject this, however, it is
+probably possible to encode a octal or hexadecimal representation in the transaction whereas the user signs over a decimal integer.
+
+#### Signature Encoding
+
+Signatures themselves are encoded using a micro-format specific to the signature algorithm being used and sometimes these
+micro-formats can allow for non-determinism (multiple valid bytes for the same signature).
+Most of the signature algorithms supported by the SDK should reject non-canonical bytes in their current implementation.
+However, the `Multisignature` protobuf type uses normal protobuf encoding and there is no check as to whether the
+decoded bytes followed canonical ADR 027 rules or not. Therefore, multisig transactions can have malleability in
+their signatures.
+Any new or custom signature algorithms must make sure that they reject any non-canonical bytes, otherwise even
+with `SIGN_MODE_DIRECT` there can be transaction hash malleability by re-encoding signatures with a non-canonical
+representation.
+
+#### Fields not covered by Amino JSON
+
+Another area that needs to be addressed carefully is the discrepancy between `AminoSignDoc`(see [`aminojson.proto`](https://github.com/cosmos/cosmos-sdk/blob/v0.50/x/tx/signing/aminojson/internal/aminojsonpb/aminojson.proto)) used for `SIGN_MODE_LEGACY_AMINO_JSON` and the actual contents of `TxBody` and `AuthInfo` (see [`tx.proto`](https://github.com/cosmos/cosmos-sdk/blob/v0.50/proto/cosmos/tx/v1beta1/tx.proto)).
+If fields get added to `TxBody` or `AuthInfo`, they must either have a corresponding representing in `AminoSignDoc` or Amino JSON signatures must be rejected when those new fields are set. Making sure that this is done is a
+highly manual process, and developers could easily make the mistake of updating `TxBody` or `AuthInfo`
+without paying any attention to the implementation of `GetSignBytes` for Amino JSON. This is a critical
+vulnerability in which unsigned content can now get into the transaction and signature verification will
+pass.
+
+## Sign Mode Summary and Recommendations
+
+The sign modes officially supported by the SDK are `SIGN_MODE_DIRECT`, `SIGN_MODE_TEXTUAL`, `SIGN_MODE_DIRECT_AUX`,
+and `SIGN_MODE_LEGACY_AMINO_JSON`.
+`SIGN_MODE_LEGACY_AMINO_JSON` is used commonly by wallets and is currently the only sign mode supported on Nano Ledger hardware devices
+(although `SIGN_MODE_TEXTUAL` was designed to also support hardware devices).
+`SIGN_MODE_DIRECT` is the simplest sign mode and its usage is also fairly common.
+`SIGN_MODE_DIRECT_AUX` is a variant of `SIGN_MODE_DIRECT` that can be used by auxiliary signers in a multi-signer
+transaction by those signers who are not paying gas.
+`SIGN_MODE_TEXTUAL` was intended as a replacement for `SIGN_MODE_LEGACY_AMINO_JSON`, but as far as we know it
+has not been adopted by any clients yet and thus is not in active use.
+
+All known malleability concerns have been addressed in the current implementation of `SIGN_MODE_DIRECT`.
+The only known malleability that could occur with a transaction signed with `SIGN_MODE_DIRECT` would
+need to be in the signature bytes themselves.
+Since signatures are not signed over, it is impossible for any sign mode to address this directly
+and instead signature algorithms need to take care to reject any non-canonically encoded signature bytes
+to prevent malleability.
+For the known malleability of the `Multisignature` type, we should make sure that any valid signatures
+were encoded following canonical ADR 027 rules when doing signature verification.
+
+`SIGN_MODE_DIRECT_AUX` provides the same level of safety as `SIGN_MODE_DIRECT` because
+
+* the raw encoded `TxBody` bytes are signed over in `SignDocDirectAux`, and
+* a transaction using `SIGN_MODE_DIRECT_AUX` still requires the primary signer to sign the transaction with `SIGN_MODE_DIRECT`
+
+`SIGN_MODE_TEXTUAL` also provides the same level of safety as `SIGN_MODE_DIRECT` because the hash of the raw encoded
+`TxBody` and `AuthInfo` bytes are signed over.
+
+Unfortunately, the vast majority of unaddressed malleability risks affect `SIGN_MODE_LEGACY_AMINO_JSON` and this
+sign mode is still commonly used.
+It is recommended that the following improvements be made to Amino JSON signing:
+
+* hashes of `TxBody` and `AuthInfo` should be added to `AminoSignDoc` so that encoding-level malleablity is addressed
+* when constructing `AminoSignDoc`, [protoreflect](https://pkg.go.dev/google.golang.org/protobuf/reflect/protoreflect) API should be used to ensure that there no fields in `TxBody` or `AuthInfo` which do not have a mapping in `AminoSignDoc` have been set
+* fields present in `TxBody` or `AuthInfo` that are not present in `AminoSignDoc` (such as extension options) should
+ be added to `AminoSignDoc` if possible
+
+## Testing
+
+To test that transactions are resistant to malleability,
+we can develop a test suite to run against all sign modes that
+attempts to manipulate transaction bytes in the following ways:
+
+* changing protobuf encoding by
+ * reordering fields
+ * setting default values
+ * adding extra bits to varints, or
+ * setting new unknown fields
+* modifying integer and decimal values encoded as strings with leading or trailing zeros
+
+Whenever any of these manipulations is done, we should observe that the sign doc bytes for the sign mode being
+tested also change, meaning that the corresponding signatures will also have to change.
+
+In the case of Amino JSON, we should also develop tests which ensure that if any `TxBody` or `AuthInfo`
+field not supported by Amino's `AminoSignDoc` is set that signing fails.
+
+In the general case of transaction decoding, we should have unit tests to ensure that
+
+* any `TxRaw` bytes which do not follow ADR 027 canonical encoding cause decoding to fail, and
+* any top-level transaction elements including `TxBody`, `AuthInfo`, public keys, and messages which
+ have unknown fields set cause the transaction to be rejected
+ (this ensures that ADR 020 unknown field filtering is properly applied)
+
+For each supported signature algorithm,
+there should also be unit tests to ensure that signatures must be encoded canonically
+or get rejected.
+
+## References
+
+* [ADR 027: Deterministic Protobuf Serialization](https://github.com/cosmos/cosmos-sdk/blob/main/docs/architecture/adr-027-deterministic-protobuf-serialization.md)
+* [ADR 020](https://github.com/cosmos/cosmos-sdk/blob/main/docs/architecture/adr-020-protobuf-transaction-encoding.md#unknown-field-filtering)
+* [`aminojson.proto`](https://github.com/cosmos/cosmos-sdk/blob/v0.50/x/tx/signing/aminojson/internal/aminojsonpb/aminojson.proto)
+* [`tx.proto`](https://github.com/cosmos/cosmos-sdk/blob/v0.50/proto/cosmos/tx/v1beta1/tx.proto)
diff --git a/sdk/next/build/architecture/adr-template.mdx b/sdk/next/build/architecture/adr-template.mdx
new file mode 100644
index 000000000..1ce9549f1
--- /dev/null
+++ b/sdk/next/build/architecture/adr-template.mdx
@@ -0,0 +1,82 @@
+## Changelog
+
+* `{date}`: `{changelog}`
+
+## Status
+
+{DRAFT | PROPOSED} Not Implemented
+
+> Please have a look at the [PROCESS](/sdk/v0.50/build/rfc/PROCESS#adr-status) page.
+> Use DRAFT if the ADR is in a draft stage (draft PR) or PROPOSED if it's in review.
+
+## Abstract
+
+> "If you can't explain it simply, you don't understand it well enough." Provide
+> a simplified and layman-accessible explanation of the ADR.
+> A short (\~200 word) description of the issue being addressed.
+
+## Context
+
+> This section describes the forces at play, including technological, political,
+> social, and project local. These forces are probably in tension, and should be
+> called out as such. The language in this section is value-neutral. It is simply
+> describing facts. It should clearly explain the problem and motivation that the
+> proposal aims to resolve.
+
+`{context body}`
+
+## Alternatives
+
+> This section describes alternative designs to the chosen design. This section
+> is important and if an adr does not have any alternatives then it should be
+> considered that the ADR was not thought through.
+
+## Decision
+
+> This section describes our response to these forces. It is stated in full
+> sentences, with active voice. "We will ..."
+> `{decision body}`
+
+## Consequences
+
+> This section describes the resulting context, after applying the decision. All
+> consequences should be listed here, not just the "positive" ones. A particular
+> decision may have positive, negative, and neutral consequences, but all of them
+> affect the team and project in the future.
+
+### Backwards Compatibility
+
+> All ADRs that introduce backwards incompatibilities must include a section
+> describing these incompatibilities and their severity. The ADR must explain
+> how the author proposes to deal with these incompatibilities. ADR submissions
+> without a sufficient backwards compatibility treatise may be rejected outright.
+
+### Positive
+
+> `{positive consequences}`
+
+### Negative
+
+> `{negative consequences}`
+
+### Neutral
+
+> `{neutral consequences}`
+
+## Further Discussions
+
+> While an ADR is in the DRAFT or PROPOSED stage, this section should contain a
+> summary of issues to be solved in future iterations (usually referencing comments
+> from a pull-request discussion).
+>
+> Later, this section can optionally list ideas or improvements the author or
+> reviewers found during the analysis of this ADR.
+
+## Test Cases \[optional]
+
+Test cases for an implementation are mandatory for ADRs that are affecting consensus
+changes. Other ADRs can choose to include links to test cases if applicable.
+
+## References
+
+* `{reference link}`
diff --git a/sdk/next/build/building-apps/app-go-di.mdx b/sdk/next/build/building-apps/app-go-di.mdx
new file mode 100644
index 000000000..ecc0a33da
--- /dev/null
+++ b/sdk/next/build/building-apps/app-go-di.mdx
@@ -0,0 +1,3321 @@
+---
+title: Overview of app_di.go
+---
+
+
+**Synopsis**
+
+The Cosmos SDK allows much easier wiring of an `app.go` thanks to [runtime](/sdk/v0.53/build/building-apps/runtime) and app wiring.
+Learn more about the rationale of App Wiring in [ADR-057](/sdk/v0.53/build/architecture/adr-057-app-wiring).
+
+
+
+
+**Prerequisite Readings**
+
+* [What is `runtime`?](/sdk/v0.53/build/building-apps/runtime)
+* [Depinject documentation](/sdk/v0.53/build/packages/depinject)
+* [Modules depinject-ready](/sdk/v0.53/build/building-modules/depinject)
+* [ADR 057: App Wiring](/sdk/v0.53/build/architecture/adr-057-app-wiring)
+
+
+
+This section is intended to provide an overview of the `SimApp` `app_di.go` file with App Wiring.
+
+## `app_config.go`
+
+The `app_config.go` file is the single place to configure all modules parameters.
+
+1. Create the `AppConfig` variable:
+
+ ```go expandable
+ package simapp
+
+ import (
+
+ "time"
+ "google.golang.org/protobuf/types/known/durationpb"
+
+ runtimev1alpha1 "cosmossdk.io/api/cosmos/app/runtime/v1alpha1"
+ appv1alpha1 "cosmossdk.io/api/cosmos/app/v1alpha1"
+ authmodulev1 "cosmossdk.io/api/cosmos/auth/module/v1"
+ authzmodulev1 "cosmossdk.io/api/cosmos/authz/module/v1"
+ bankmodulev1 "cosmossdk.io/api/cosmos/bank/module/v1"
+ circuitmodulev1 "cosmossdk.io/api/cosmos/circuit/module/v1"
+ consensusmodulev1 "cosmossdk.io/api/cosmos/consensus/module/v1"
+ distrmodulev1 "cosmossdk.io/api/cosmos/distribution/module/v1"
+ epochsmodulev1 "cosmossdk.io/api/cosmos/epochs/module/v1"
+ evidencemodulev1 "cosmossdk.io/api/cosmos/evidence/module/v1"
+ feegrantmodulev1 "cosmossdk.io/api/cosmos/feegrant/module/v1"
+ genutilmodulev1 "cosmossdk.io/api/cosmos/genutil/module/v1"
+ govmodulev1 "cosmossdk.io/api/cosmos/gov/module/v1"
+ groupmodulev1 "cosmossdk.io/api/cosmos/group/module/v1"
+ mintmodulev1 "cosmossdk.io/api/cosmos/mint/module/v1"
+ nftmodulev1 "cosmossdk.io/api/cosmos/nft/module/v1"
+ protocolpoolmodulev1 "cosmossdk.io/api/cosmos/protocolpool/module/v1"
+ slashingmodulev1 "cosmossdk.io/api/cosmos/slashing/module/v1"
+ stakingmodulev1 "cosmossdk.io/api/cosmos/staking/module/v1"
+ txconfigv1 "cosmossdk.io/api/cosmos/tx/config/v1"
+ upgrademodulev1 "cosmossdk.io/api/cosmos/upgrade/module/v1"
+ vestingmodulev1 "cosmossdk.io/api/cosmos/vesting/module/v1"
+ "cosmossdk.io/core/appconfig"
+ "cosmossdk.io/depinject"
+ _ "cosmossdk.io/x/circuit" // import for side-effects
+ circuittypes "cosmossdk.io/x/circuit/types"
+ _ "cosmossdk.io/x/evidence" // import for side-effects
+ evidencetypes "cosmossdk.io/x/evidence/types"
+ "cosmossdk.io/x/feegrant"
+ _ "cosmossdk.io/x/feegrant/module" // import for side-effects
+ "cosmossdk.io/x/nft"
+ _ "cosmossdk.io/x/nft/module" // import for side-effects
+ _ "cosmossdk.io/x/upgrade" // import for side-effects
+ upgradetypes "cosmossdk.io/x/upgrade/types"
+ "github.com/cosmos/cosmos-sdk/runtime"
+ "github.com/cosmos/cosmos-sdk/types/module"
+ _ "github.com/cosmos/cosmos-sdk/x/auth/tx/config" // import for side-effects
+ authtypes "github.com/cosmos/cosmos-sdk/x/auth/types"
+ _ "github.com/cosmos/cosmos-sdk/x/auth/vesting" // import for side-effects
+ vestingtypes "github.com/cosmos/cosmos-sdk/x/auth/vesting/types"
+ "github.com/cosmos/cosmos-sdk/x/authz"
+ _ "github.com/cosmos/cosmos-sdk/x/authz/module" // import for side-effects
+ _ "github.com/cosmos/cosmos-sdk/x/bank" // import for side-effects
+ banktypes "github.com/cosmos/cosmos-sdk/x/bank/types"
+ _ "github.com/cosmos/cosmos-sdk/x/consensus" // import for side-effects
+ consensustypes "github.com/cosmos/cosmos-sdk/x/consensus/types"
+ _ "github.com/cosmos/cosmos-sdk/x/distribution" // import for side-effects
+ distrtypes "github.com/cosmos/cosmos-sdk/x/distribution/types"
+ _ "github.com/cosmos/cosmos-sdk/x/epochs" // import for side-effects
+ epochstypes "github.com/cosmos/cosmos-sdk/x/epochs/types"
+ "github.com/cosmos/cosmos-sdk/x/genutil"
+ genutiltypes "github.com/cosmos/cosmos-sdk/x/genutil/types"
+ "github.com/cosmos/cosmos-sdk/x/gov"
+ govclient "github.com/cosmos/cosmos-sdk/x/gov/client"
+ govtypes "github.com/cosmos/cosmos-sdk/x/gov/types"
+ "github.com/cosmos/cosmos-sdk/x/group"
+ _ "github.com/cosmos/cosmos-sdk/x/group/module" // import for side-effects
+ _ "github.com/cosmos/cosmos-sdk/x/mint" // import for side-effects
+ minttypes "github.com/cosmos/cosmos-sdk/x/mint/types"
+ _ "github.com/cosmos/cosmos-sdk/x/protocolpool" // import for side-effects
+ protocolpooltypes "github.com/cosmos/cosmos-sdk/x/protocolpool/types"
+ _ "github.com/cosmos/cosmos-sdk/x/slashing" // import for side-effects
+ slashingtypes "github.com/cosmos/cosmos-sdk/x/slashing/types"
+ _ "github.com/cosmos/cosmos-sdk/x/staking" // import for side-effects
+ stakingtypes "github.com/cosmos/cosmos-sdk/x/staking/types"
+ )
+
+ var (
+ // module account permissions
+ moduleAccPerms = []*authmodulev1.ModuleAccountPermission{
+ {
+ Account: authtypes.FeeCollectorName
+ },
+ {
+ Account: distrtypes.ModuleName
+ },
+ {
+ Account: minttypes.ModuleName,
+ Permissions: []string{
+ authtypes.Minter
+ }},
+ {
+ Account: stakingtypes.BondedPoolName,
+ Permissions: []string{
+ authtypes.Burner, stakingtypes.ModuleName
+ }},
+ {
+ Account: stakingtypes.NotBondedPoolName,
+ Permissions: []string{
+ authtypes.Burner, stakingtypes.ModuleName
+ }},
+ {
+ Account: govtypes.ModuleName,
+ Permissions: []string{
+ authtypes.Burner
+ }},
+ {
+ Account: nft.ModuleName
+ },
+ {
+ Account: protocolpooltypes.ModuleName
+ },
+ {
+ Account: protocolpooltypes.ProtocolPoolEscrowAccount
+ },
+ }
+
+ // blocked account addresses
+ blockAccAddrs = []string{
+ authtypes.FeeCollectorName,
+ distrtypes.ModuleName,
+ minttypes.ModuleName,
+ stakingtypes.BondedPoolName,
+ stakingtypes.NotBondedPoolName,
+ nft.ModuleName,
+ // We allow the following module accounts to receive funds:
+ // govtypes.ModuleName
+ }
+
+ ModuleConfig = []*appv1alpha1.ModuleConfig{
+ {
+ Name: runtime.ModuleName,
+ Config: appconfig.WrapAny(&runtimev1alpha1.Module{
+ AppName: "SimApp",
+ // NOTE: upgrade module is required to be prioritized
+ PreBlockers: []string{
+ upgradetypes.ModuleName,
+ authtypes.ModuleName,
+ },
+ // During begin block slashing happens after distr.BeginBlocker so that
+ // there is nothing left over in the validator fee pool, so as to keep the
+ // CanWithdrawInvariant invariant.
+ // NOTE: staking module is required if HistoricalEntries param > 0
+ BeginBlockers: []string{
+ minttypes.ModuleName,
+ distrtypes.ModuleName,
+ protocolpooltypes.ModuleName,
+ slashingtypes.ModuleName,
+ evidencetypes.ModuleName,
+ stakingtypes.ModuleName,
+ authz.ModuleName,
+ epochstypes.ModuleName,
+ },
+ EndBlockers: []string{
+ govtypes.ModuleName,
+ stakingtypes.ModuleName,
+ feegrant.ModuleName,
+ group.ModuleName,
+ protocolpooltypes.ModuleName,
+ },
+ OverrideStoreKeys: []*runtimev1alpha1.StoreKeyConfig{
+ {
+ ModuleName: authtypes.ModuleName,
+ KvStoreKey: "acc",
+ },
+ },
+ // NOTE: The genutils module must occur after staking so that pools are
+ // properly initialized with tokens from genesis accounts.
+ // NOTE: The genutils module must also occur after auth so that it can access the params from auth.
+ InitGenesis: []string{
+ authtypes.ModuleName,
+ banktypes.ModuleName,
+ distrtypes.ModuleName,
+ stakingtypes.ModuleName,
+ slashingtypes.ModuleName,
+ govtypes.ModuleName,
+ minttypes.ModuleName,
+ genutiltypes.ModuleName,
+ evidencetypes.ModuleName,
+ authz.ModuleName,
+ feegrant.ModuleName,
+ nft.ModuleName,
+ group.ModuleName,
+ upgradetypes.ModuleName,
+ vestingtypes.ModuleName,
+ circuittypes.ModuleName,
+ epochstypes.ModuleName,
+ protocolpooltypes.ModuleName,
+ },
+ // When ExportGenesis is not specified, the export genesis module order
+ // is equal to the init genesis order
+ ExportGenesis: []string{
+ consensustypes.ModuleName,
+ authtypes.ModuleName,
+ protocolpooltypes.ModuleName, // Must be exported before bank
+ banktypes.ModuleName,
+ distrtypes.ModuleName,
+ stakingtypes.ModuleName,
+ slashingtypes.ModuleName,
+ govtypes.ModuleName,
+ minttypes.ModuleName,
+ genutiltypes.ModuleName,
+ evidencetypes.ModuleName,
+ authz.ModuleName,
+ feegrant.ModuleName,
+ nft.ModuleName,
+ group.ModuleName,
+ upgradetypes.ModuleName,
+ vestingtypes.ModuleName,
+ circuittypes.ModuleName,
+ epochstypes.ModuleName,
+ },
+ // Uncomment if you want to set a custom migration order here.
+ // OrderMigrations: []string{
+ },
+ }),
+ },
+ {
+ Name: authtypes.ModuleName,
+ Config: appconfig.WrapAny(&authmodulev1.Module{
+ Bech32Prefix: "cosmos",
+ ModuleAccountPermissions: moduleAccPerms,
+ // By default modules authority is the governance module. This is configurable with the following:
+ // Authority: "group", // A custom module authority can be set using a module name
+ // Authority: "cosmos1cwwv22j5ca08ggdv9c2uky355k908694z577tv", // or a specific address
+ }),
+ },
+ {
+ Name: vestingtypes.ModuleName,
+ Config: appconfig.WrapAny(&vestingmodulev1.Module{
+ }),
+ },
+ {
+ Name: banktypes.ModuleName,
+ Config: appconfig.WrapAny(&bankmodulev1.Module{
+ BlockedModuleAccountsOverride: blockAccAddrs,
+ }),
+ },
+ {
+ Name: stakingtypes.ModuleName,
+ Config: appconfig.WrapAny(&stakingmodulev1.Module{
+ // NOTE: specifying a prefix is only necessary when using bech32 addresses
+ // If not specfied, the auth Bech32Prefix appended with "valoper" and "valcons" is used by default
+ Bech32PrefixValidator: "cosmosvaloper",
+ Bech32PrefixConsensus: "cosmosvalcons",
+ }),
+ },
+ {
+ Name: slashingtypes.ModuleName,
+ Config: appconfig.WrapAny(&slashingmodulev1.Module{
+ }),
+ },
+ {
+ Name: "tx",
+ Config: appconfig.WrapAny(&txconfigv1.Config{
+ SkipAnteHandler: true, // Enable this to skip the default antehandlers and set custom ante handlers.
+ }),
+ },
+ {
+ Name: genutiltypes.ModuleName,
+ Config: appconfig.WrapAny(&genutilmodulev1.Module{
+ }),
+ },
+ {
+ Name: authz.ModuleName,
+ Config: appconfig.WrapAny(&authzmodulev1.Module{
+ }),
+ },
+ {
+ Name: upgradetypes.ModuleName,
+ Config: appconfig.WrapAny(&upgrademodulev1.Module{
+ }),
+ },
+ {
+ Name: distrtypes.ModuleName,
+ Config: appconfig.WrapAny(&distrmodulev1.Module{
+ }),
+ },
+ {
+ Name: evidencetypes.ModuleName,
+ Config: appconfig.WrapAny(&evidencemodulev1.Module{
+ }),
+ },
+ {
+ Name: minttypes.ModuleName,
+ Config: appconfig.WrapAny(&mintmodulev1.Module{
+ }),
+ },
+ {
+ Name: group.ModuleName,
+ Config: appconfig.WrapAny(&groupmodulev1.Module{
+ MaxExecutionPeriod: durationpb.New(time.Second * 1209600),
+ MaxMetadataLen: 255,
+ }),
+ },
+ {
+ Name: nft.ModuleName,
+ Config: appconfig.WrapAny(&nftmodulev1.Module{
+ }),
+ },
+ {
+ Name: feegrant.ModuleName,
+ Config: appconfig.WrapAny(&feegrantmodulev1.Module{
+ }),
+ },
+ {
+ Name: govtypes.ModuleName,
+ Config: appconfig.WrapAny(&govmodulev1.Module{
+ }),
+ },
+ {
+ Name: consensustypes.ModuleName,
+ Config: appconfig.WrapAny(&consensusmodulev1.Module{
+ }),
+ },
+ {
+ Name: circuittypes.ModuleName,
+ Config: appconfig.WrapAny(&circuitmodulev1.Module{
+ }),
+ },
+ {
+ Name: epochstypes.ModuleName,
+ Config: appconfig.WrapAny(&epochsmodulev1.Module{
+ }),
+ },
+ {
+ Name: protocolpooltypes.ModuleName,
+ Config: appconfig.WrapAny(&protocolpoolmodulev1.Module{
+ }),
+ },
+ }
+
+ // AppConfig is application configuration (used by depinject)
+
+ AppConfig = depinject.Configs(appconfig.Compose(&appv1alpha1.Config{
+ Modules: ModuleConfig,
+ }),
+ depinject.Supply(
+ // supply custom module basics
+ map[string]module.AppModuleBasic{
+ genutiltypes.ModuleName: genutil.NewAppModuleBasic(genutiltypes.DefaultMessageValidator),
+ govtypes.ModuleName: gov.NewAppModuleBasic(
+ []govclient.ProposalHandler{
+ },
+ ),
+ },
+ ),
+ )
+ )
+ ```
+
+ Where the `appConfig` combines the [runtime](/sdk/v0.53/build/building-apps/runtime) configuration and the (extra) modules configuration.
+
+ ```go expandable
+ //go:build !app_v1
+
+ package simapp
+
+ import (
+
+ "io"
+
+ dbm "github.com/cosmos/cosmos-db"
+
+ clienthelpers "cosmossdk.io/client/v2/helpers"
+ "cosmossdk.io/depinject"
+ "cosmossdk.io/log"
+ storetypes "cosmossdk.io/store/types"
+ circuitkeeper "cosmossdk.io/x/circuit/keeper"
+ evidencekeeper "cosmossdk.io/x/evidence/keeper"
+ feegrantkeeper "cosmossdk.io/x/feegrant/keeper"
+ nftkeeper "cosmossdk.io/x/nft/keeper"
+ upgradekeeper "cosmossdk.io/x/upgrade/keeper"
+ "github.com/cosmos/cosmos-sdk/baseapp"
+ "github.com/cosmos/cosmos-sdk/client"
+ "github.com/cosmos/cosmos-sdk/codec"
+ codectypes "github.com/cosmos/cosmos-sdk/codec/types"
+ "github.com/cosmos/cosmos-sdk/runtime"
+ "github.com/cosmos/cosmos-sdk/server"
+ "github.com/cosmos/cosmos-sdk/server/api"
+ "github.com/cosmos/cosmos-sdk/server/config"
+ servertypes "github.com/cosmos/cosmos-sdk/server/types"
+ testdata_pulsar "github.com/cosmos/cosmos-sdk/testutil/testdata/testpb"
+ "github.com/cosmos/cosmos-sdk/types/module"
+ "github.com/cosmos/cosmos-sdk/x/auth"
+ "github.com/cosmos/cosmos-sdk/x/auth/ante"
+ authkeeper "github.com/cosmos/cosmos-sdk/x/auth/keeper"
+ authsims "github.com/cosmos/cosmos-sdk/x/auth/simulation"
+ authtypes "github.com/cosmos/cosmos-sdk/x/auth/types"
+ authzkeeper "github.com/cosmos/cosmos-sdk/x/authz/keeper"
+ bankkeeper "github.com/cosmos/cosmos-sdk/x/bank/keeper"
+ consensuskeeper "github.com/cosmos/cosmos-sdk/x/consensus/keeper"
+ distrkeeper "github.com/cosmos/cosmos-sdk/x/distribution/keeper"
+ epochskeeper "github.com/cosmos/cosmos-sdk/x/epochs/keeper"
+ govkeeper "github.com/cosmos/cosmos-sdk/x/gov/keeper"
+ groupkeeper "github.com/cosmos/cosmos-sdk/x/group/keeper"
+ mintkeeper "github.com/cosmos/cosmos-sdk/x/mint/keeper"
+ protocolpoolkeeper "github.com/cosmos/cosmos-sdk/x/protocolpool/keeper"
+ slashingkeeper "github.com/cosmos/cosmos-sdk/x/slashing/keeper"
+ stakingkeeper "github.com/cosmos/cosmos-sdk/x/staking/keeper"
+ )
+
+ // DefaultNodeHome default home directories for the application daemon
+ var DefaultNodeHome string
+
+ var (
+ _ runtime.AppI = (*SimApp)(nil)
+ _ servertypes.Application = (*SimApp)(nil)
+ )
+
+ // SimApp extends an ABCI application, but with most of its parameters exported.
+ // They are exported for convenience in creating helper functions, as object
+ // capabilities aren't needed for testing.
+ type SimApp struct {
+ *runtime.App
+ legacyAmino *codec.LegacyAmino
+ appCodec codec.Codec
+ txConfig client.TxConfig
+ interfaceRegistry codectypes.InterfaceRegistry
+
+ // essential keepers
+ AccountKeeper authkeeper.AccountKeeper
+ BankKeeper bankkeeper.BaseKeeper
+ StakingKeeper *stakingkeeper.Keeper
+ SlashingKeeper slashingkeeper.Keeper
+ MintKeeper mintkeeper.Keeper
+ DistrKeeper distrkeeper.Keeper
+ GovKeeper *govkeeper.Keeper
+ UpgradeKeeper *upgradekeeper.Keeper
+ EvidenceKeeper evidencekeeper.Keeper
+ ConsensusParamsKeeper consensuskeeper.Keeper
+ CircuitKeeper circuitkeeper.Keeper
+
+ // supplementary keepers
+ FeeGrantKeeper feegrantkeeper.Keeper
+ GroupKeeper groupkeeper.Keeper
+ AuthzKeeper authzkeeper.Keeper
+ NFTKeeper nftkeeper.Keeper
+ EpochsKeeper epochskeeper.Keeper
+ ProtocolPoolKeeper protocolpoolkeeper.Keeper
+
+ // simulation manager
+ sm *module.SimulationManager
+ }
+
+ func init() {
+ var err error
+ DefaultNodeHome, err = clienthelpers.GetNodeHomeDirectory(".simapp")
+ if err != nil {
+ panic(err)
+ }
+ }
+
+ // NewSimApp returns a reference to an initialized SimApp.
+ func NewSimApp(
+ logger log.Logger,
+ db dbm.DB,
+ traceStore io.Writer,
+ loadLatest bool,
+ appOpts servertypes.AppOptions,
+ baseAppOptions ...func(*baseapp.BaseApp),
+ ) *SimApp {
+ var (
+ app = &SimApp{
+ }
+
+ appBuilder *runtime.AppBuilder
+
+ // merge the AppConfig and other configuration in one config
+ appConfig = depinject.Configs(
+ AppConfig,
+ depinject.Supply(
+ // supply the application options
+ appOpts,
+ // supply the logger
+ logger,
+
+ // ADVANCED CONFIGURATION
+
+ //
+ // AUTH
+ //
+ // For providing a custom function required in auth to generate custom account types
+ // add it below. By default the auth module uses simulation.RandomGenesisAccounts.
+ //
+ // authtypes.RandomGenesisAccountsFn(simulation.RandomGenesisAccounts),
+ //
+ // For providing a custom a base account type add it below.
+ // By default the auth module uses authtypes.ProtoBaseAccount().
+ //
+ // func()
+
+ sdk.AccountI {
+ return authtypes.ProtoBaseAccount()
+ },
+ //
+ // For providing a different address codec, add it below.
+ // By default the auth module uses a Bech32 address codec,
+ // with the prefix defined in the auth module configuration.
+ //
+ // func()
+
+ address.Codec {
+ return <- custom address codec type ->
+ }
+
+ //
+ // STAKING
+ //
+ // For providing a different validator and consensus address codec, add it below.
+ // By default the staking module uses the bech32 prefix provided in the auth config,
+ // and appends "valoper" and "valcons" for validator and consensus addresses respectively.
+ // When providing a custom address codec in auth, custom address codecs must be provided here as well.
+ //
+ // func()
+
+ runtime.ValidatorAddressCodec {
+ return <- custom validator address codec type ->
+ }
+ // func()
+
+ runtime.ConsensusAddressCodec {
+ return <- custom consensus address codec type ->
+ }
+
+ //
+ // MINT
+ //
+
+ // For providing a custom inflation function for x/mint add here your
+ // custom function that implements the minttypes.InflationCalculationFn
+ // interface.
+ ),
+ )
+ )
+ if err := depinject.Inject(appConfig,
+ &appBuilder,
+ &app.appCodec,
+ &app.legacyAmino,
+ &app.txConfig,
+ &app.interfaceRegistry,
+ &app.AccountKeeper,
+ &app.BankKeeper,
+ &app.StakingKeeper,
+ &app.SlashingKeeper,
+ &app.MintKeeper,
+ &app.DistrKeeper,
+ &app.GovKeeper,
+ &app.UpgradeKeeper,
+ &app.AuthzKeeper,
+ &app.EvidenceKeeper,
+ &app.FeeGrantKeeper,
+ &app.GroupKeeper,
+ &app.NFTKeeper,
+ &app.ConsensusParamsKeeper,
+ &app.CircuitKeeper,
+ &app.EpochsKeeper,
+ &app.ProtocolPoolKeeper,
+ ); err != nil {
+ panic(err)
+ }
+
+ // Below we could construct and set an application specific mempool and
+ // ABCI 1.0 PrepareProposal and ProcessProposal handlers. These defaults are
+ // already set in the SDK's BaseApp, this shows an example of how to override
+ // them.
+ //
+ // Example:
+ //
+ // app.App = appBuilder.Build(...)
+ // nonceMempool := mempool.NewSenderNonceMempool()
+ // abciPropHandler := NewDefaultProposalHandler(nonceMempool, app.App.BaseApp)
+ //
+ // app.App.BaseApp.SetMempool(nonceMempool)
+ // app.App.BaseApp.SetPrepareProposal(abciPropHandler.PrepareProposalHandler())
+ // app.App.BaseApp.SetProcessProposal(abciPropHandler.ProcessProposalHandler())
+ //
+ // Alternatively, you can construct BaseApp options, append those to
+ // baseAppOptions and pass them to the appBuilder.
+ //
+ // Example:
+ //
+ // prepareOpt = func(app *baseapp.BaseApp) {
+ // abciPropHandler := baseapp.NewDefaultProposalHandler(nonceMempool, app)
+ // app.SetPrepareProposal(abciPropHandler.PrepareProposalHandler())
+ //
+ }
+ // baseAppOptions = append(baseAppOptions, prepareOpt)
+
+ // create and set dummy vote extension handler
+ voteExtOp := func(bApp *baseapp.BaseApp) {
+ voteExtHandler := NewVoteExtensionHandler()
+
+ voteExtHandler.SetHandlers(bApp)
+ }
+
+ baseAppOptions = append(baseAppOptions, voteExtOp, baseapp.SetOptimisticExecution())
+
+ app.App = appBuilder.Build(db, traceStore, baseAppOptions...)
+
+ // register streaming services
+ if err := app.RegisterStreamingServices(appOpts, app.kvStoreKeys()); err != nil {
+ panic(err)
+ }
+
+ /**** Module Options ****/
+
+ // RegisterUpgradeHandlers is used for registering any on-chain upgrades.
+ app.RegisterUpgradeHandlers()
+
+ // add test gRPC service for testing gRPC queries in isolation
+ testdata_pulsar.RegisterQueryServer(app.GRPCQueryRouter(), testdata_pulsar.QueryImpl{
+ })
+
+ // create the simulation manager and define the order of the modules for deterministic simulations
+ //
+ // NOTE: this is not required apps that don't use the simulator for fuzz testing
+ // transactions
+ overrideModules := map[string]module.AppModuleSimulation{
+ authtypes.ModuleName: auth.NewAppModule(app.appCodec, app.AccountKeeper, authsims.RandomGenesisAccounts, nil),
+ }
+
+ app.sm = module.NewSimulationManagerFromAppModules(app.ModuleManager.Modules, overrideModules)
+
+ app.sm.RegisterStoreDecoders()
+
+ // A custom InitChainer can be set if extra pre-init-genesis logic is required.
+ // By default, when using app wiring enabled module, this is not required.
+ // For instance, the upgrade module will set automatically the module version map in its init genesis thanks to app wiring.
+ // However, when registering a module manually (i.e. that does not support app wiring), the module version map
+ // must be set manually as follow. The upgrade module will de-duplicate the module version map.
+ //
+ // app.SetInitChainer(func(ctx sdk.Context, req *abci.RequestInitChain) (*abci.ResponseInitChain, error) {
+ // app.UpgradeKeeper.SetModuleVersionMap(ctx, app.ModuleManager.GetVersionMap())
+ // return app.App.InitChainer(ctx, req)
+ //
+ })
+
+ // set custom ante handler
+ app.setAnteHandler(app.txConfig)
+ if err := app.Load(loadLatest); err != nil {
+ panic(err)
+ }
+
+ return app
+ }
+
+ // setAnteHandler sets custom ante handlers.
+ // "x/auth/tx" pre-defined ante handler have been disabled in app_config.
+ func (app *SimApp)
+
+ setAnteHandler(txConfig client.TxConfig) {
+ anteHandler, err := NewAnteHandler(
+ HandlerOptions{
+ ante.HandlerOptions{
+ UnorderedNonceManager: app.AccountKeeper,
+ AccountKeeper: app.AccountKeeper,
+ BankKeeper: app.BankKeeper,
+ SignModeHandler: txConfig.SignModeHandler(),
+ FeegrantKeeper: app.FeeGrantKeeper,
+ SigGasConsumer: ante.DefaultSigVerificationGasConsumer,
+ },
+ &app.CircuitKeeper,
+ },
+ )
+ if err != nil {
+ panic(err)
+ }
+
+ // Set the AnteHandler for the app
+ app.SetAnteHandler(anteHandler)
+ }
+
+ // LegacyAmino returns SimApp's amino codec.
+ //
+ // NOTE: This is solely to be used for testing purposes as it may be desirable
+ // for modules to register their own custom testing types.
+ func (app *SimApp)
+
+ LegacyAmino() *codec.LegacyAmino {
+ return app.legacyAmino
+ }
+
+ // AppCodec returns SimApp's app codec.
+ //
+ // NOTE: This is solely to be used for testing purposes as it may be desirable
+ // for modules to register their own custom testing types.
+ func (app *SimApp)
+
+ AppCodec()
+
+ codec.Codec {
+ return app.appCodec
+ }
+
+ // InterfaceRegistry returns SimApp's InterfaceRegistry.
+ func (app *SimApp)
+
+ InterfaceRegistry()
+
+ codectypes.InterfaceRegistry {
+ return app.interfaceRegistry
+ }
+
+ // TxConfig returns SimApp's TxConfig
+ func (app *SimApp)
+
+ TxConfig()
+
+ client.TxConfig {
+ return app.txConfig
+ }
+
+ // GetKey returns the KVStoreKey for the provided store key.
+ //
+ // NOTE: This is solely to be used for testing purposes.
+ func (app *SimApp)
+
+ GetKey(storeKey string) *storetypes.KVStoreKey {
+ sk := app.UnsafeFindStoreKey(storeKey)
+
+ kvStoreKey, ok := sk.(*storetypes.KVStoreKey)
+ if !ok {
+ return nil
+ }
+
+ return kvStoreKey
+ }
+
+ func (app *SimApp)
+
+ kvStoreKeys()
+
+ map[string]*storetypes.KVStoreKey {
+ keys := make(map[string]*storetypes.KVStoreKey)
+ for _, k := range app.GetStoreKeys() {
+ if kv, ok := k.(*storetypes.KVStoreKey); ok {
+ keys[kv.Name()] = kv
+ }
+
+ }
+
+ return keys
+ }
+
+ // SimulationManager implements the SimulationApp interface
+ func (app *SimApp)
+
+ SimulationManager() *module.SimulationManager {
+ return app.sm
+ }
+
+ // RegisterAPIRoutes registers all application module routes with the provided
+ // API server.
+ func (app *SimApp)
+
+ RegisterAPIRoutes(apiSvr *api.Server, apiConfig config.APIConfig) {
+ app.App.RegisterAPIRoutes(apiSvr, apiConfig)
+ // register swagger API in app.go so that other applications can override easily
+ if err := server.RegisterSwaggerAPI(apiSvr.ClientCtx, apiSvr.Router, apiConfig.Swagger); err != nil {
+ panic(err)
+ }
+ }
+
+ // GetMaccPerms returns a copy of the module account permissions
+ //
+ // NOTE: This is solely to be used for testing purposes.
+ func GetMaccPerms()
+
+ map[string][]string {
+ dup := make(map[string][]string)
+ for _, perms := range moduleAccPerms {
+ dup[perms.Account] = perms.Permissions
+ }
+
+ return dup
+ }
+
+ // BlockedAddresses returns all the app's blocked account addresses.
+ func BlockedAddresses()
+
+ map[string]bool {
+ result := make(map[string]bool)
+ if len(blockAccAddrs) > 0 {
+ for _, addr := range blockAccAddrs {
+ result[addr] = true
+ }
+
+ }
+
+ else {
+ for addr := range GetMaccPerms() {
+ result[addr] = true
+ }
+
+ }
+
+ return result
+ }
+ ```
+
+2. Configure the `runtime` module:
+
+ In this configuration, the order at which the modules are defined in PreBlockers, BeginBlocks, and EndBlockers is important.
+ They are named in the order they should be executed by the module manager.
+
+ ```go expandable
+ package simapp
+
+ import (
+
+ "time"
+ "google.golang.org/protobuf/types/known/durationpb"
+
+ runtimev1alpha1 "cosmossdk.io/api/cosmos/app/runtime/v1alpha1"
+ appv1alpha1 "cosmossdk.io/api/cosmos/app/v1alpha1"
+ authmodulev1 "cosmossdk.io/api/cosmos/auth/module/v1"
+ authzmodulev1 "cosmossdk.io/api/cosmos/authz/module/v1"
+ bankmodulev1 "cosmossdk.io/api/cosmos/bank/module/v1"
+ circuitmodulev1 "cosmossdk.io/api/cosmos/circuit/module/v1"
+ consensusmodulev1 "cosmossdk.io/api/cosmos/consensus/module/v1"
+ distrmodulev1 "cosmossdk.io/api/cosmos/distribution/module/v1"
+ epochsmodulev1 "cosmossdk.io/api/cosmos/epochs/module/v1"
+ evidencemodulev1 "cosmossdk.io/api/cosmos/evidence/module/v1"
+ feegrantmodulev1 "cosmossdk.io/api/cosmos/feegrant/module/v1"
+ genutilmodulev1 "cosmossdk.io/api/cosmos/genutil/module/v1"
+ govmodulev1 "cosmossdk.io/api/cosmos/gov/module/v1"
+ groupmodulev1 "cosmossdk.io/api/cosmos/group/module/v1"
+ mintmodulev1 "cosmossdk.io/api/cosmos/mint/module/v1"
+ nftmodulev1 "cosmossdk.io/api/cosmos/nft/module/v1"
+ protocolpoolmodulev1 "cosmossdk.io/api/cosmos/protocolpool/module/v1"
+ slashingmodulev1 "cosmossdk.io/api/cosmos/slashing/module/v1"
+ stakingmodulev1 "cosmossdk.io/api/cosmos/staking/module/v1"
+ txconfigv1 "cosmossdk.io/api/cosmos/tx/config/v1"
+ upgrademodulev1 "cosmossdk.io/api/cosmos/upgrade/module/v1"
+ vestingmodulev1 "cosmossdk.io/api/cosmos/vesting/module/v1"
+ "cosmossdk.io/core/appconfig"
+ "cosmossdk.io/depinject"
+ _ "cosmossdk.io/x/circuit" // import for side-effects
+ circuittypes "cosmossdk.io/x/circuit/types"
+ _ "cosmossdk.io/x/evidence" // import for side-effects
+ evidencetypes "cosmossdk.io/x/evidence/types"
+ "cosmossdk.io/x/feegrant"
+ _ "cosmossdk.io/x/feegrant/module" // import for side-effects
+ "cosmossdk.io/x/nft"
+ _ "cosmossdk.io/x/nft/module" // import for side-effects
+ _ "cosmossdk.io/x/upgrade" // import for side-effects
+ upgradetypes "cosmossdk.io/x/upgrade/types"
+ "github.com/cosmos/cosmos-sdk/runtime"
+ "github.com/cosmos/cosmos-sdk/types/module"
+ _ "github.com/cosmos/cosmos-sdk/x/auth/tx/config" // import for side-effects
+ authtypes "github.com/cosmos/cosmos-sdk/x/auth/types"
+ _ "github.com/cosmos/cosmos-sdk/x/auth/vesting" // import for side-effects
+ vestingtypes "github.com/cosmos/cosmos-sdk/x/auth/vesting/types"
+ "github.com/cosmos/cosmos-sdk/x/authz"
+ _ "github.com/cosmos/cosmos-sdk/x/authz/module" // import for side-effects
+ _ "github.com/cosmos/cosmos-sdk/x/bank" // import for side-effects
+ banktypes "github.com/cosmos/cosmos-sdk/x/bank/types"
+ _ "github.com/cosmos/cosmos-sdk/x/consensus" // import for side-effects
+ consensustypes "github.com/cosmos/cosmos-sdk/x/consensus/types"
+ _ "github.com/cosmos/cosmos-sdk/x/distribution" // import for side-effects
+ distrtypes "github.com/cosmos/cosmos-sdk/x/distribution/types"
+ _ "github.com/cosmos/cosmos-sdk/x/epochs" // import for side-effects
+ epochstypes "github.com/cosmos/cosmos-sdk/x/epochs/types"
+ "github.com/cosmos/cosmos-sdk/x/genutil"
+ genutiltypes "github.com/cosmos/cosmos-sdk/x/genutil/types"
+ "github.com/cosmos/cosmos-sdk/x/gov"
+ govclient "github.com/cosmos/cosmos-sdk/x/gov/client"
+ govtypes "github.com/cosmos/cosmos-sdk/x/gov/types"
+ "github.com/cosmos/cosmos-sdk/x/group"
+ _ "github.com/cosmos/cosmos-sdk/x/group/module" // import for side-effects
+ _ "github.com/cosmos/cosmos-sdk/x/mint" // import for side-effects
+ minttypes "github.com/cosmos/cosmos-sdk/x/mint/types"
+ _ "github.com/cosmos/cosmos-sdk/x/protocolpool" // import for side-effects
+ protocolpooltypes "github.com/cosmos/cosmos-sdk/x/protocolpool/types"
+ _ "github.com/cosmos/cosmos-sdk/x/slashing" // import for side-effects
+ slashingtypes "github.com/cosmos/cosmos-sdk/x/slashing/types"
+ _ "github.com/cosmos/cosmos-sdk/x/staking" // import for side-effects
+ stakingtypes "github.com/cosmos/cosmos-sdk/x/staking/types"
+ )
+
+ var (
+ // module account permissions
+ moduleAccPerms = []*authmodulev1.ModuleAccountPermission{
+ {
+ Account: authtypes.FeeCollectorName
+ },
+ {
+ Account: distrtypes.ModuleName
+ },
+ {
+ Account: minttypes.ModuleName,
+ Permissions: []string{
+ authtypes.Minter
+ }},
+ {
+ Account: stakingtypes.BondedPoolName,
+ Permissions: []string{
+ authtypes.Burner, stakingtypes.ModuleName
+ }},
+ {
+ Account: stakingtypes.NotBondedPoolName,
+ Permissions: []string{
+ authtypes.Burner, stakingtypes.ModuleName
+ }},
+ {
+ Account: govtypes.ModuleName,
+ Permissions: []string{
+ authtypes.Burner
+ }},
+ {
+ Account: nft.ModuleName
+ },
+ {
+ Account: protocolpooltypes.ModuleName
+ },
+ {
+ Account: protocolpooltypes.ProtocolPoolEscrowAccount
+ },
+ }
+
+ // blocked account addresses
+ blockAccAddrs = []string{
+ authtypes.FeeCollectorName,
+ distrtypes.ModuleName,
+ minttypes.ModuleName,
+ stakingtypes.BondedPoolName,
+ stakingtypes.NotBondedPoolName,
+ nft.ModuleName,
+ // We allow the following module accounts to receive funds:
+ // govtypes.ModuleName
+ }
+
+ ModuleConfig = []*appv1alpha1.ModuleConfig{
+ {
+ Name: runtime.ModuleName,
+ Config: appconfig.WrapAny(&runtimev1alpha1.Module{
+ AppName: "SimApp",
+ // NOTE: upgrade module is required to be prioritized
+ PreBlockers: []string{
+ upgradetypes.ModuleName,
+ authtypes.ModuleName,
+ },
+ // During begin block slashing happens after distr.BeginBlocker so that
+ // there is nothing left over in the validator fee pool, so as to keep the
+ // CanWithdrawInvariant invariant.
+ // NOTE: staking module is required if HistoricalEntries param > 0
+ BeginBlockers: []string{
+ minttypes.ModuleName,
+ distrtypes.ModuleName,
+ protocolpooltypes.ModuleName,
+ slashingtypes.ModuleName,
+ evidencetypes.ModuleName,
+ stakingtypes.ModuleName,
+ authz.ModuleName,
+ epochstypes.ModuleName,
+ },
+ EndBlockers: []string{
+ govtypes.ModuleName,
+ stakingtypes.ModuleName,
+ feegrant.ModuleName,
+ group.ModuleName,
+ protocolpooltypes.ModuleName,
+ },
+ OverrideStoreKeys: []*runtimev1alpha1.StoreKeyConfig{
+ {
+ ModuleName: authtypes.ModuleName,
+ KvStoreKey: "acc",
+ },
+ },
+ // NOTE: The genutils module must occur after staking so that pools are
+ // properly initialized with tokens from genesis accounts.
+ // NOTE: The genutils module must also occur after auth so that it can access the params from auth.
+ InitGenesis: []string{
+ authtypes.ModuleName,
+ banktypes.ModuleName,
+ distrtypes.ModuleName,
+ stakingtypes.ModuleName,
+ slashingtypes.ModuleName,
+ govtypes.ModuleName,
+ minttypes.ModuleName,
+ genutiltypes.ModuleName,
+ evidencetypes.ModuleName,
+ authz.ModuleName,
+ feegrant.ModuleName,
+ nft.ModuleName,
+ group.ModuleName,
+ upgradetypes.ModuleName,
+ vestingtypes.ModuleName,
+ circuittypes.ModuleName,
+ epochstypes.ModuleName,
+ protocolpooltypes.ModuleName,
+ },
+ // When ExportGenesis is not specified, the export genesis module order
+ // is equal to the init genesis order
+ ExportGenesis: []string{
+ consensustypes.ModuleName,
+ authtypes.ModuleName,
+ protocolpooltypes.ModuleName, // Must be exported before bank
+ banktypes.ModuleName,
+ distrtypes.ModuleName,
+ stakingtypes.ModuleName,
+ slashingtypes.ModuleName,
+ govtypes.ModuleName,
+ minttypes.ModuleName,
+ genutiltypes.ModuleName,
+ evidencetypes.ModuleName,
+ authz.ModuleName,
+ feegrant.ModuleName,
+ nft.ModuleName,
+ group.ModuleName,
+ upgradetypes.ModuleName,
+ vestingtypes.ModuleName,
+ circuittypes.ModuleName,
+ epochstypes.ModuleName,
+ },
+ // Uncomment if you want to set a custom migration order here.
+ // OrderMigrations: []string{
+ },
+ }),
+ },
+ {
+ Name: authtypes.ModuleName,
+ Config: appconfig.WrapAny(&authmodulev1.Module{
+ Bech32Prefix: "cosmos",
+ ModuleAccountPermissions: moduleAccPerms,
+ // By default modules authority is the governance module. This is configurable with the following:
+ // Authority: "group", // A custom module authority can be set using a module name
+ // Authority: "cosmos1cwwv22j5ca08ggdv9c2uky355k908694z577tv", // or a specific address
+ }),
+ },
+ {
+ Name: vestingtypes.ModuleName,
+ Config: appconfig.WrapAny(&vestingmodulev1.Module{
+ }),
+ },
+ {
+ Name: banktypes.ModuleName,
+ Config: appconfig.WrapAny(&bankmodulev1.Module{
+ BlockedModuleAccountsOverride: blockAccAddrs,
+ }),
+ },
+ {
+ Name: stakingtypes.ModuleName,
+ Config: appconfig.WrapAny(&stakingmodulev1.Module{
+ // NOTE: specifying a prefix is only necessary when using bech32 addresses
+ // If not specfied, the auth Bech32Prefix appended with "valoper" and "valcons" is used by default
+ Bech32PrefixValidator: "cosmosvaloper",
+ Bech32PrefixConsensus: "cosmosvalcons",
+ }),
+ },
+ {
+ Name: slashingtypes.ModuleName,
+ Config: appconfig.WrapAny(&slashingmodulev1.Module{
+ }),
+ },
+ {
+ Name: "tx",
+ Config: appconfig.WrapAny(&txconfigv1.Config{
+ SkipAnteHandler: true, // Enable this to skip the default antehandlers and set custom ante handlers.
+ }),
+ },
+ {
+ Name: genutiltypes.ModuleName,
+ Config: appconfig.WrapAny(&genutilmodulev1.Module{
+ }),
+ },
+ {
+ Name: authz.ModuleName,
+ Config: appconfig.WrapAny(&authzmodulev1.Module{
+ }),
+ },
+ {
+ Name: upgradetypes.ModuleName,
+ Config: appconfig.WrapAny(&upgrademodulev1.Module{
+ }),
+ },
+ {
+ Name: distrtypes.ModuleName,
+ Config: appconfig.WrapAny(&distrmodulev1.Module{
+ }),
+ },
+ {
+ Name: evidencetypes.ModuleName,
+ Config: appconfig.WrapAny(&evidencemodulev1.Module{
+ }),
+ },
+ {
+ Name: minttypes.ModuleName,
+ Config: appconfig.WrapAny(&mintmodulev1.Module{
+ }),
+ },
+ {
+ Name: group.ModuleName,
+ Config: appconfig.WrapAny(&groupmodulev1.Module{
+ MaxExecutionPeriod: durationpb.New(time.Second * 1209600),
+ MaxMetadataLen: 255,
+ }),
+ },
+ {
+ Name: nft.ModuleName,
+ Config: appconfig.WrapAny(&nftmodulev1.Module{
+ }),
+ },
+ {
+ Name: feegrant.ModuleName,
+ Config: appconfig.WrapAny(&feegrantmodulev1.Module{
+ }),
+ },
+ {
+ Name: govtypes.ModuleName,
+ Config: appconfig.WrapAny(&govmodulev1.Module{
+ }),
+ },
+ {
+ Name: consensustypes.ModuleName,
+ Config: appconfig.WrapAny(&consensusmodulev1.Module{
+ }),
+ },
+ {
+ Name: circuittypes.ModuleName,
+ Config: appconfig.WrapAny(&circuitmodulev1.Module{
+ }),
+ },
+ {
+ Name: epochstypes.ModuleName,
+ Config: appconfig.WrapAny(&epochsmodulev1.Module{
+ }),
+ },
+ {
+ Name: protocolpooltypes.ModuleName,
+ Config: appconfig.WrapAny(&protocolpoolmodulev1.Module{
+ }),
+ },
+ }
+
+ // AppConfig is application configuration (used by depinject)
+
+ AppConfig = depinject.Configs(appconfig.Compose(&appv1alpha1.Config{
+ Modules: ModuleConfig,
+ }),
+ depinject.Supply(
+ // supply custom module basics
+ map[string]module.AppModuleBasic{
+ genutiltypes.ModuleName: genutil.NewAppModuleBasic(genutiltypes.DefaultMessageValidator),
+ govtypes.ModuleName: gov.NewAppModuleBasic(
+ []govclient.ProposalHandler{
+ },
+ ),
+ },
+ ),
+ )
+ )
+ ```
+
+3. Wire the other modules:
+
+ Next to runtime, the other (depinject-enabled) modules are wired in the `AppConfig`:
+
+ ```go expandable
+ package simapp
+
+ import (
+
+ "time"
+ "google.golang.org/protobuf/types/known/durationpb"
+
+ runtimev1alpha1 "cosmossdk.io/api/cosmos/app/runtime/v1alpha1"
+ appv1alpha1 "cosmossdk.io/api/cosmos/app/v1alpha1"
+ authmodulev1 "cosmossdk.io/api/cosmos/auth/module/v1"
+ authzmodulev1 "cosmossdk.io/api/cosmos/authz/module/v1"
+ bankmodulev1 "cosmossdk.io/api/cosmos/bank/module/v1"
+ circuitmodulev1 "cosmossdk.io/api/cosmos/circuit/module/v1"
+ consensusmodulev1 "cosmossdk.io/api/cosmos/consensus/module/v1"
+ distrmodulev1 "cosmossdk.io/api/cosmos/distribution/module/v1"
+ epochsmodulev1 "cosmossdk.io/api/cosmos/epochs/module/v1"
+ evidencemodulev1 "cosmossdk.io/api/cosmos/evidence/module/v1"
+ feegrantmodulev1 "cosmossdk.io/api/cosmos/feegrant/module/v1"
+ genutilmodulev1 "cosmossdk.io/api/cosmos/genutil/module/v1"
+ govmodulev1 "cosmossdk.io/api/cosmos/gov/module/v1"
+ groupmodulev1 "cosmossdk.io/api/cosmos/group/module/v1"
+ mintmodulev1 "cosmossdk.io/api/cosmos/mint/module/v1"
+ nftmodulev1 "cosmossdk.io/api/cosmos/nft/module/v1"
+ protocolpoolmodulev1 "cosmossdk.io/api/cosmos/protocolpool/module/v1"
+ slashingmodulev1 "cosmossdk.io/api/cosmos/slashing/module/v1"
+ stakingmodulev1 "cosmossdk.io/api/cosmos/staking/module/v1"
+ txconfigv1 "cosmossdk.io/api/cosmos/tx/config/v1"
+ upgrademodulev1 "cosmossdk.io/api/cosmos/upgrade/module/v1"
+ vestingmodulev1 "cosmossdk.io/api/cosmos/vesting/module/v1"
+ "cosmossdk.io/core/appconfig"
+ "cosmossdk.io/depinject"
+ _ "cosmossdk.io/x/circuit" // import for side-effects
+ circuittypes "cosmossdk.io/x/circuit/types"
+ _ "cosmossdk.io/x/evidence" // import for side-effects
+ evidencetypes "cosmossdk.io/x/evidence/types"
+ "cosmossdk.io/x/feegrant"
+ _ "cosmossdk.io/x/feegrant/module" // import for side-effects
+ "cosmossdk.io/x/nft"
+ _ "cosmossdk.io/x/nft/module" // import for side-effects
+ _ "cosmossdk.io/x/upgrade" // import for side-effects
+ upgradetypes "cosmossdk.io/x/upgrade/types"
+ "github.com/cosmos/cosmos-sdk/runtime"
+ "github.com/cosmos/cosmos-sdk/types/module"
+ _ "github.com/cosmos/cosmos-sdk/x/auth/tx/config" // import for side-effects
+ authtypes "github.com/cosmos/cosmos-sdk/x/auth/types"
+ _ "github.com/cosmos/cosmos-sdk/x/auth/vesting" // import for side-effects
+ vestingtypes "github.com/cosmos/cosmos-sdk/x/auth/vesting/types"
+ "github.com/cosmos/cosmos-sdk/x/authz"
+ _ "github.com/cosmos/cosmos-sdk/x/authz/module" // import for side-effects
+ _ "github.com/cosmos/cosmos-sdk/x/bank" // import for side-effects
+ banktypes "github.com/cosmos/cosmos-sdk/x/bank/types"
+ _ "github.com/cosmos/cosmos-sdk/x/consensus" // import for side-effects
+ consensustypes "github.com/cosmos/cosmos-sdk/x/consensus/types"
+ _ "github.com/cosmos/cosmos-sdk/x/distribution" // import for side-effects
+ distrtypes "github.com/cosmos/cosmos-sdk/x/distribution/types"
+ _ "github.com/cosmos/cosmos-sdk/x/epochs" // import for side-effects
+ epochstypes "github.com/cosmos/cosmos-sdk/x/epochs/types"
+ "github.com/cosmos/cosmos-sdk/x/genutil"
+ genutiltypes "github.com/cosmos/cosmos-sdk/x/genutil/types"
+ "github.com/cosmos/cosmos-sdk/x/gov"
+ govclient "github.com/cosmos/cosmos-sdk/x/gov/client"
+ govtypes "github.com/cosmos/cosmos-sdk/x/gov/types"
+ "github.com/cosmos/cosmos-sdk/x/group"
+ _ "github.com/cosmos/cosmos-sdk/x/group/module" // import for side-effects
+ _ "github.com/cosmos/cosmos-sdk/x/mint" // import for side-effects
+ minttypes "github.com/cosmos/cosmos-sdk/x/mint/types"
+ _ "github.com/cosmos/cosmos-sdk/x/protocolpool" // import for side-effects
+ protocolpooltypes "github.com/cosmos/cosmos-sdk/x/protocolpool/types"
+ _ "github.com/cosmos/cosmos-sdk/x/slashing" // import for side-effects
+ slashingtypes "github.com/cosmos/cosmos-sdk/x/slashing/types"
+ _ "github.com/cosmos/cosmos-sdk/x/staking" // import for side-effects
+ stakingtypes "github.com/cosmos/cosmos-sdk/x/staking/types"
+ )
+
+ var (
+ // module account permissions
+ moduleAccPerms = []*authmodulev1.ModuleAccountPermission{
+ {
+ Account: authtypes.FeeCollectorName
+ },
+ {
+ Account: distrtypes.ModuleName
+ },
+ {
+ Account: minttypes.ModuleName,
+ Permissions: []string{
+ authtypes.Minter
+ }},
+ {
+ Account: stakingtypes.BondedPoolName,
+ Permissions: []string{
+ authtypes.Burner, stakingtypes.ModuleName
+ }},
+ {
+ Account: stakingtypes.NotBondedPoolName,
+ Permissions: []string{
+ authtypes.Burner, stakingtypes.ModuleName
+ }},
+ {
+ Account: govtypes.ModuleName,
+ Permissions: []string{
+ authtypes.Burner
+ }},
+ {
+ Account: nft.ModuleName
+ },
+ {
+ Account: protocolpooltypes.ModuleName
+ },
+ {
+ Account: protocolpooltypes.ProtocolPoolEscrowAccount
+ },
+ }
+
+ // blocked account addresses
+ blockAccAddrs = []string{
+ authtypes.FeeCollectorName,
+ distrtypes.ModuleName,
+ minttypes.ModuleName,
+ stakingtypes.BondedPoolName,
+ stakingtypes.NotBondedPoolName,
+ nft.ModuleName,
+ // We allow the following module accounts to receive funds:
+ // govtypes.ModuleName
+ }
+
+ ModuleConfig = []*appv1alpha1.ModuleConfig{
+ {
+ Name: runtime.ModuleName,
+ Config: appconfig.WrapAny(&runtimev1alpha1.Module{
+ AppName: "SimApp",
+ // NOTE: upgrade module is required to be prioritized
+ PreBlockers: []string{
+ upgradetypes.ModuleName,
+ authtypes.ModuleName,
+ },
+ // During begin block slashing happens after distr.BeginBlocker so that
+ // there is nothing left over in the validator fee pool, so as to keep the
+ // CanWithdrawInvariant invariant.
+ // NOTE: staking module is required if HistoricalEntries param > 0
+ BeginBlockers: []string{
+ minttypes.ModuleName,
+ distrtypes.ModuleName,
+ protocolpooltypes.ModuleName,
+ slashingtypes.ModuleName,
+ evidencetypes.ModuleName,
+ stakingtypes.ModuleName,
+ authz.ModuleName,
+ epochstypes.ModuleName,
+ },
+ EndBlockers: []string{
+ govtypes.ModuleName,
+ stakingtypes.ModuleName,
+ feegrant.ModuleName,
+ group.ModuleName,
+ protocolpooltypes.ModuleName,
+ },
+ OverrideStoreKeys: []*runtimev1alpha1.StoreKeyConfig{
+ {
+ ModuleName: authtypes.ModuleName,
+ KvStoreKey: "acc",
+ },
+ },
+ // NOTE: The genutils module must occur after staking so that pools are
+ // properly initialized with tokens from genesis accounts.
+ // NOTE: The genutils module must also occur after auth so that it can access the params from auth.
+ InitGenesis: []string{
+ authtypes.ModuleName,
+ banktypes.ModuleName,
+ distrtypes.ModuleName,
+ stakingtypes.ModuleName,
+ slashingtypes.ModuleName,
+ govtypes.ModuleName,
+ minttypes.ModuleName,
+ genutiltypes.ModuleName,
+ evidencetypes.ModuleName,
+ authz.ModuleName,
+ feegrant.ModuleName,
+ nft.ModuleName,
+ group.ModuleName,
+ upgradetypes.ModuleName,
+ vestingtypes.ModuleName,
+ circuittypes.ModuleName,
+ epochstypes.ModuleName,
+ protocolpooltypes.ModuleName,
+ },
+ // When ExportGenesis is not specified, the export genesis module order
+ // is equal to the init genesis order
+ ExportGenesis: []string{
+ consensustypes.ModuleName,
+ authtypes.ModuleName,
+ protocolpooltypes.ModuleName, // Must be exported before bank
+ banktypes.ModuleName,
+ distrtypes.ModuleName,
+ stakingtypes.ModuleName,
+ slashingtypes.ModuleName,
+ govtypes.ModuleName,
+ minttypes.ModuleName,
+ genutiltypes.ModuleName,
+ evidencetypes.ModuleName,
+ authz.ModuleName,
+ feegrant.ModuleName,
+ nft.ModuleName,
+ group.ModuleName,
+ upgradetypes.ModuleName,
+ vestingtypes.ModuleName,
+ circuittypes.ModuleName,
+ epochstypes.ModuleName,
+ },
+ // Uncomment if you want to set a custom migration order here.
+ // OrderMigrations: []string{
+ },
+ }),
+ },
+ {
+ Name: authtypes.ModuleName,
+ Config: appconfig.WrapAny(&authmodulev1.Module{
+ Bech32Prefix: "cosmos",
+ ModuleAccountPermissions: moduleAccPerms,
+ // By default modules authority is the governance module. This is configurable with the following:
+ // Authority: "group", // A custom module authority can be set using a module name
+ // Authority: "cosmos1cwwv22j5ca08ggdv9c2uky355k908694z577tv", // or a specific address
+ }),
+ },
+ {
+ Name: vestingtypes.ModuleName,
+ Config: appconfig.WrapAny(&vestingmodulev1.Module{
+ }),
+ },
+ {
+ Name: banktypes.ModuleName,
+ Config: appconfig.WrapAny(&bankmodulev1.Module{
+ BlockedModuleAccountsOverride: blockAccAddrs,
+ }),
+ },
+ {
+ Name: stakingtypes.ModuleName,
+ Config: appconfig.WrapAny(&stakingmodulev1.Module{
+ // NOTE: specifying a prefix is only necessary when using bech32 addresses
+ // If not specfied, the auth Bech32Prefix appended with "valoper" and "valcons" is used by default
+ Bech32PrefixValidator: "cosmosvaloper",
+ Bech32PrefixConsensus: "cosmosvalcons",
+ }),
+ },
+ {
+ Name: slashingtypes.ModuleName,
+ Config: appconfig.WrapAny(&slashingmodulev1.Module{
+ }),
+ },
+ {
+ Name: "tx",
+ Config: appconfig.WrapAny(&txconfigv1.Config{
+ SkipAnteHandler: true, // Enable this to skip the default antehandlers and set custom ante handlers.
+ }),
+ },
+ {
+ Name: genutiltypes.ModuleName,
+ Config: appconfig.WrapAny(&genutilmodulev1.Module{
+ }),
+ },
+ {
+ Name: authz.ModuleName,
+ Config: appconfig.WrapAny(&authzmodulev1.Module{
+ }),
+ },
+ {
+ Name: upgradetypes.ModuleName,
+ Config: appconfig.WrapAny(&upgrademodulev1.Module{
+ }),
+ },
+ {
+ Name: distrtypes.ModuleName,
+ Config: appconfig.WrapAny(&distrmodulev1.Module{
+ }),
+ },
+ {
+ Name: evidencetypes.ModuleName,
+ Config: appconfig.WrapAny(&evidencemodulev1.Module{
+ }),
+ },
+ {
+ Name: minttypes.ModuleName,
+ Config: appconfig.WrapAny(&mintmodulev1.Module{
+ }),
+ },
+ {
+ Name: group.ModuleName,
+ Config: appconfig.WrapAny(&groupmodulev1.Module{
+ MaxExecutionPeriod: durationpb.New(time.Second * 1209600),
+ MaxMetadataLen: 255,
+ }),
+ },
+ {
+ Name: nft.ModuleName,
+ Config: appconfig.WrapAny(&nftmodulev1.Module{
+ }),
+ },
+ {
+ Name: feegrant.ModuleName,
+ Config: appconfig.WrapAny(&feegrantmodulev1.Module{
+ }),
+ },
+ {
+ Name: govtypes.ModuleName,
+ Config: appconfig.WrapAny(&govmodulev1.Module{
+ }),
+ },
+ {
+ Name: consensustypes.ModuleName,
+ Config: appconfig.WrapAny(&consensusmodulev1.Module{
+ }),
+ },
+ {
+ Name: circuittypes.ModuleName,
+ Config: appconfig.WrapAny(&circuitmodulev1.Module{
+ }),
+ },
+ {
+ Name: epochstypes.ModuleName,
+ Config: appconfig.WrapAny(&epochsmodulev1.Module{
+ }),
+ },
+ {
+ Name: protocolpooltypes.ModuleName,
+ Config: appconfig.WrapAny(&protocolpoolmodulev1.Module{
+ }),
+ },
+ }
+
+ // AppConfig is application configuration (used by depinject)
+
+ AppConfig = depinject.Configs(appconfig.Compose(&appv1alpha1.Config{
+ Modules: ModuleConfig,
+ }),
+ depinject.Supply(
+ // supply custom module basics
+ map[string]module.AppModuleBasic{
+ genutiltypes.ModuleName: genutil.NewAppModuleBasic(genutiltypes.DefaultMessageValidator),
+ govtypes.ModuleName: gov.NewAppModuleBasic(
+ []govclient.ProposalHandler{
+ },
+ ),
+ },
+ ),
+ )
+ )
+ ```
+
+ Note: the `tx` isn't a module, but a configuration. It should be wired in the `AppConfig` as well.
+
+ ```go expandable
+ package simapp
+
+ import (
+
+ "time"
+ "google.golang.org/protobuf/types/known/durationpb"
+
+ runtimev1alpha1 "cosmossdk.io/api/cosmos/app/runtime/v1alpha1"
+ appv1alpha1 "cosmossdk.io/api/cosmos/app/v1alpha1"
+ authmodulev1 "cosmossdk.io/api/cosmos/auth/module/v1"
+ authzmodulev1 "cosmossdk.io/api/cosmos/authz/module/v1"
+ bankmodulev1 "cosmossdk.io/api/cosmos/bank/module/v1"
+ circuitmodulev1 "cosmossdk.io/api/cosmos/circuit/module/v1"
+ consensusmodulev1 "cosmossdk.io/api/cosmos/consensus/module/v1"
+ distrmodulev1 "cosmossdk.io/api/cosmos/distribution/module/v1"
+ epochsmodulev1 "cosmossdk.io/api/cosmos/epochs/module/v1"
+ evidencemodulev1 "cosmossdk.io/api/cosmos/evidence/module/v1"
+ feegrantmodulev1 "cosmossdk.io/api/cosmos/feegrant/module/v1"
+ genutilmodulev1 "cosmossdk.io/api/cosmos/genutil/module/v1"
+ govmodulev1 "cosmossdk.io/api/cosmos/gov/module/v1"
+ groupmodulev1 "cosmossdk.io/api/cosmos/group/module/v1"
+ mintmodulev1 "cosmossdk.io/api/cosmos/mint/module/v1"
+ nftmodulev1 "cosmossdk.io/api/cosmos/nft/module/v1"
+ protocolpoolmodulev1 "cosmossdk.io/api/cosmos/protocolpool/module/v1"
+ slashingmodulev1 "cosmossdk.io/api/cosmos/slashing/module/v1"
+ stakingmodulev1 "cosmossdk.io/api/cosmos/staking/module/v1"
+ txconfigv1 "cosmossdk.io/api/cosmos/tx/config/v1"
+ upgrademodulev1 "cosmossdk.io/api/cosmos/upgrade/module/v1"
+ vestingmodulev1 "cosmossdk.io/api/cosmos/vesting/module/v1"
+ "cosmossdk.io/core/appconfig"
+ "cosmossdk.io/depinject"
+ _ "cosmossdk.io/x/circuit" // import for side-effects
+ circuittypes "cosmossdk.io/x/circuit/types"
+ _ "cosmossdk.io/x/evidence" // import for side-effects
+ evidencetypes "cosmossdk.io/x/evidence/types"
+ "cosmossdk.io/x/feegrant"
+ _ "cosmossdk.io/x/feegrant/module" // import for side-effects
+ "cosmossdk.io/x/nft"
+ _ "cosmossdk.io/x/nft/module" // import for side-effects
+ _ "cosmossdk.io/x/upgrade" // import for side-effects
+ upgradetypes "cosmossdk.io/x/upgrade/types"
+ "github.com/cosmos/cosmos-sdk/runtime"
+ "github.com/cosmos/cosmos-sdk/types/module"
+ _ "github.com/cosmos/cosmos-sdk/x/auth/tx/config" // import for side-effects
+ authtypes "github.com/cosmos/cosmos-sdk/x/auth/types"
+ _ "github.com/cosmos/cosmos-sdk/x/auth/vesting" // import for side-effects
+ vestingtypes "github.com/cosmos/cosmos-sdk/x/auth/vesting/types"
+ "github.com/cosmos/cosmos-sdk/x/authz"
+ _ "github.com/cosmos/cosmos-sdk/x/authz/module" // import for side-effects
+ _ "github.com/cosmos/cosmos-sdk/x/bank" // import for side-effects
+ banktypes "github.com/cosmos/cosmos-sdk/x/bank/types"
+ _ "github.com/cosmos/cosmos-sdk/x/consensus" // import for side-effects
+ consensustypes "github.com/cosmos/cosmos-sdk/x/consensus/types"
+ _ "github.com/cosmos/cosmos-sdk/x/distribution" // import for side-effects
+ distrtypes "github.com/cosmos/cosmos-sdk/x/distribution/types"
+ _ "github.com/cosmos/cosmos-sdk/x/epochs" // import for side-effects
+ epochstypes "github.com/cosmos/cosmos-sdk/x/epochs/types"
+ "github.com/cosmos/cosmos-sdk/x/genutil"
+ genutiltypes "github.com/cosmos/cosmos-sdk/x/genutil/types"
+ "github.com/cosmos/cosmos-sdk/x/gov"
+ govclient "github.com/cosmos/cosmos-sdk/x/gov/client"
+ govtypes "github.com/cosmos/cosmos-sdk/x/gov/types"
+ "github.com/cosmos/cosmos-sdk/x/group"
+ _ "github.com/cosmos/cosmos-sdk/x/group/module" // import for side-effects
+ _ "github.com/cosmos/cosmos-sdk/x/mint" // import for side-effects
+ minttypes "github.com/cosmos/cosmos-sdk/x/mint/types"
+ _ "github.com/cosmos/cosmos-sdk/x/protocolpool" // import for side-effects
+ protocolpooltypes "github.com/cosmos/cosmos-sdk/x/protocolpool/types"
+ _ "github.com/cosmos/cosmos-sdk/x/slashing" // import for side-effects
+ slashingtypes "github.com/cosmos/cosmos-sdk/x/slashing/types"
+ _ "github.com/cosmos/cosmos-sdk/x/staking" // import for side-effects
+ stakingtypes "github.com/cosmos/cosmos-sdk/x/staking/types"
+ )
+
+ var (
+ // module account permissions
+ moduleAccPerms = []*authmodulev1.ModuleAccountPermission{
+ {
+ Account: authtypes.FeeCollectorName
+ },
+ {
+ Account: distrtypes.ModuleName
+ },
+ {
+ Account: minttypes.ModuleName,
+ Permissions: []string{
+ authtypes.Minter
+ }},
+ {
+ Account: stakingtypes.BondedPoolName,
+ Permissions: []string{
+ authtypes.Burner, stakingtypes.ModuleName
+ }},
+ {
+ Account: stakingtypes.NotBondedPoolName,
+ Permissions: []string{
+ authtypes.Burner, stakingtypes.ModuleName
+ }},
+ {
+ Account: govtypes.ModuleName,
+ Permissions: []string{
+ authtypes.Burner
+ }},
+ {
+ Account: nft.ModuleName
+ },
+ {
+ Account: protocolpooltypes.ModuleName
+ },
+ {
+ Account: protocolpooltypes.ProtocolPoolEscrowAccount
+ },
+ }
+
+ // blocked account addresses
+ blockAccAddrs = []string{
+ authtypes.FeeCollectorName,
+ distrtypes.ModuleName,
+ minttypes.ModuleName,
+ stakingtypes.BondedPoolName,
+ stakingtypes.NotBondedPoolName,
+ nft.ModuleName,
+ // We allow the following module accounts to receive funds:
+ // govtypes.ModuleName
+ }
+
+ ModuleConfig = []*appv1alpha1.ModuleConfig{
+ {
+ Name: runtime.ModuleName,
+ Config: appconfig.WrapAny(&runtimev1alpha1.Module{
+ AppName: "SimApp",
+ // NOTE: upgrade module is required to be prioritized
+ PreBlockers: []string{
+ upgradetypes.ModuleName,
+ authtypes.ModuleName,
+ },
+ // During begin block slashing happens after distr.BeginBlocker so that
+ // there is nothing left over in the validator fee pool, so as to keep the
+ // CanWithdrawInvariant invariant.
+ // NOTE: staking module is required if HistoricalEntries param > 0
+ BeginBlockers: []string{
+ minttypes.ModuleName,
+ distrtypes.ModuleName,
+ protocolpooltypes.ModuleName,
+ slashingtypes.ModuleName,
+ evidencetypes.ModuleName,
+ stakingtypes.ModuleName,
+ authz.ModuleName,
+ epochstypes.ModuleName,
+ },
+ EndBlockers: []string{
+ govtypes.ModuleName,
+ stakingtypes.ModuleName,
+ feegrant.ModuleName,
+ group.ModuleName,
+ protocolpooltypes.ModuleName,
+ },
+ OverrideStoreKeys: []*runtimev1alpha1.StoreKeyConfig{
+ {
+ ModuleName: authtypes.ModuleName,
+ KvStoreKey: "acc",
+ },
+ },
+ // NOTE: The genutils module must occur after staking so that pools are
+ // properly initialized with tokens from genesis accounts.
+ // NOTE: The genutils module must also occur after auth so that it can access the params from auth.
+ InitGenesis: []string{
+ authtypes.ModuleName,
+ banktypes.ModuleName,
+ distrtypes.ModuleName,
+ stakingtypes.ModuleName,
+ slashingtypes.ModuleName,
+ govtypes.ModuleName,
+ minttypes.ModuleName,
+ genutiltypes.ModuleName,
+ evidencetypes.ModuleName,
+ authz.ModuleName,
+ feegrant.ModuleName,
+ nft.ModuleName,
+ group.ModuleName,
+ upgradetypes.ModuleName,
+ vestingtypes.ModuleName,
+ circuittypes.ModuleName,
+ epochstypes.ModuleName,
+ protocolpooltypes.ModuleName,
+ },
+ // When ExportGenesis is not specified, the export genesis module order
+ // is equal to the init genesis order
+ ExportGenesis: []string{
+ consensustypes.ModuleName,
+ authtypes.ModuleName,
+ protocolpooltypes.ModuleName, // Must be exported before bank
+ banktypes.ModuleName,
+ distrtypes.ModuleName,
+ stakingtypes.ModuleName,
+ slashingtypes.ModuleName,
+ govtypes.ModuleName,
+ minttypes.ModuleName,
+ genutiltypes.ModuleName,
+ evidencetypes.ModuleName,
+ authz.ModuleName,
+ feegrant.ModuleName,
+ nft.ModuleName,
+ group.ModuleName,
+ upgradetypes.ModuleName,
+ vestingtypes.ModuleName,
+ circuittypes.ModuleName,
+ epochstypes.ModuleName,
+ },
+ // Uncomment if you want to set a custom migration order here.
+ // OrderMigrations: []string{
+ },
+ }),
+ },
+ {
+ Name: authtypes.ModuleName,
+ Config: appconfig.WrapAny(&authmodulev1.Module{
+ Bech32Prefix: "cosmos",
+ ModuleAccountPermissions: moduleAccPerms,
+ // By default modules authority is the governance module. This is configurable with the following:
+ // Authority: "group", // A custom module authority can be set using a module name
+ // Authority: "cosmos1cwwv22j5ca08ggdv9c2uky355k908694z577tv", // or a specific address
+ }),
+ },
+ {
+ Name: vestingtypes.ModuleName,
+ Config: appconfig.WrapAny(&vestingmodulev1.Module{
+ }),
+ },
+ {
+ Name: banktypes.ModuleName,
+ Config: appconfig.WrapAny(&bankmodulev1.Module{
+ BlockedModuleAccountsOverride: blockAccAddrs,
+ }),
+ },
+ {
+ Name: stakingtypes.ModuleName,
+ Config: appconfig.WrapAny(&stakingmodulev1.Module{
+ // NOTE: specifying a prefix is only necessary when using bech32 addresses
+ // If not specfied, the auth Bech32Prefix appended with "valoper" and "valcons" is used by default
+ Bech32PrefixValidator: "cosmosvaloper",
+ Bech32PrefixConsensus: "cosmosvalcons",
+ }),
+ },
+ {
+ Name: slashingtypes.ModuleName,
+ Config: appconfig.WrapAny(&slashingmodulev1.Module{
+ }),
+ },
+ {
+ Name: "tx",
+ Config: appconfig.WrapAny(&txconfigv1.Config{
+ SkipAnteHandler: true, // Enable this to skip the default antehandlers and set custom ante handlers.
+ }),
+ },
+ {
+ Name: genutiltypes.ModuleName,
+ Config: appconfig.WrapAny(&genutilmodulev1.Module{
+ }),
+ },
+ {
+ Name: authz.ModuleName,
+ Config: appconfig.WrapAny(&authzmodulev1.Module{
+ }),
+ },
+ {
+ Name: upgradetypes.ModuleName,
+ Config: appconfig.WrapAny(&upgrademodulev1.Module{
+ }),
+ },
+ {
+ Name: distrtypes.ModuleName,
+ Config: appconfig.WrapAny(&distrmodulev1.Module{
+ }),
+ },
+ {
+ Name: evidencetypes.ModuleName,
+ Config: appconfig.WrapAny(&evidencemodulev1.Module{
+ }),
+ },
+ {
+ Name: minttypes.ModuleName,
+ Config: appconfig.WrapAny(&mintmodulev1.Module{
+ }),
+ },
+ {
+ Name: group.ModuleName,
+ Config: appconfig.WrapAny(&groupmodulev1.Module{
+ MaxExecutionPeriod: durationpb.New(time.Second * 1209600),
+ MaxMetadataLen: 255,
+ }),
+ },
+ {
+ Name: nft.ModuleName,
+ Config: appconfig.WrapAny(&nftmodulev1.Module{
+ }),
+ },
+ {
+ Name: feegrant.ModuleName,
+ Config: appconfig.WrapAny(&feegrantmodulev1.Module{
+ }),
+ },
+ {
+ Name: govtypes.ModuleName,
+ Config: appconfig.WrapAny(&govmodulev1.Module{
+ }),
+ },
+ {
+ Name: consensustypes.ModuleName,
+ Config: appconfig.WrapAny(&consensusmodulev1.Module{
+ }),
+ },
+ {
+ Name: circuittypes.ModuleName,
+ Config: appconfig.WrapAny(&circuitmodulev1.Module{
+ }),
+ },
+ {
+ Name: epochstypes.ModuleName,
+ Config: appconfig.WrapAny(&epochsmodulev1.Module{
+ }),
+ },
+ {
+ Name: protocolpooltypes.ModuleName,
+ Config: appconfig.WrapAny(&protocolpoolmodulev1.Module{
+ }),
+ },
+ }
+
+ // AppConfig is application configuration (used by depinject)
+
+ AppConfig = depinject.Configs(appconfig.Compose(&appv1alpha1.Config{
+ Modules: ModuleConfig,
+ }),
+ depinject.Supply(
+ // supply custom module basics
+ map[string]module.AppModuleBasic{
+ genutiltypes.ModuleName: genutil.NewAppModuleBasic(genutiltypes.DefaultMessageValidator),
+ govtypes.ModuleName: gov.NewAppModuleBasic(
+ []govclient.ProposalHandler{
+ },
+ ),
+ },
+ ),
+ )
+ )
+ ```
+
+See the complete `app_config.go` file for `SimApp` [here](https://github.com/cosmos/cosmos-sdk/blob/v0.53.0-rc.2/simapp/app_config.go).
+
+### Alternative formats
+
+
+The example above shows how to create an `AppConfig` using Go. However, it is also possible to create an `AppConfig` using YAML, or JSON.\
+The configuration can then be embed with `go:embed` and read with [`appconfig.LoadYAML`](https://pkg.go.dev/cosmossdk.io/core/appconfig#LoadYAML), or [`appconfig.LoadJSON`](https://pkg.go.dev/cosmossdk.io/core/appconfig#LoadJSON), in `app_di.go`.
+
+```go
+//go:embed app_config.yaml
+var (
+ appConfigYaml []byte
+ appConfig = appconfig.LoadYAML(appConfigYaml)
+)
+```
+
+
+
+```yaml expandable
+modules:
+ - name: runtime
+ config:
+ "@type": cosmos.app.runtime.v1alpha1.Module
+ app_name: SimApp
+ begin_blockers: [staking, auth, bank]
+ end_blockers: [bank, auth, staking]
+ init_genesis: [bank, auth, staking]
+ - name: auth
+ config:
+ "@type": cosmos.auth.module.v1.Module
+ bech32_prefix: cosmos
+ - name: bank
+ config:
+ "@type": cosmos.bank.module.v1.Module
+ - name: staking
+ config:
+ "@type": cosmos.staking.module.v1.Module
+ - name: tx
+ config:
+ "@type": cosmos.tx.module.v1.Module
+```
+
+A more complete example of `app.yaml` can be found [here](https://github.com/cosmos/cosmos-sdk/blob/release/v0.53.x/simapp/example_app.yaml).
+
+## `app_di.go`
+
+`app_di.go` is the place where `SimApp` is constructed. `depinject.Inject` automatically wires the app modules and keepers when provided with an application configuration (`AppConfig`). `SimApp` is constructed upon calling the injected `*runtime.AppBuilder` with `appBuilder.Build(...)`.\
+In short `depinject` and the [`runtime` package](/sdk/v0.53/build/building-apps/runtime) abstract the wiring of the app, and the `AppBuilder` is the place where the app is constructed. [`runtime`](/sdk/v0.53/build/building-apps/runtime) takes care of registering the codecs, KV store, subspaces and instantiating `baseapp`.
+
+```go expandable
+//go:build !app_v1
+
+package simapp
+
+import (
+
+ "io"
+
+ dbm "github.com/cosmos/cosmos-db"
+
+ clienthelpers "cosmossdk.io/client/v2/helpers"
+ "cosmossdk.io/depinject"
+ "cosmossdk.io/log"
+ storetypes "cosmossdk.io/store/types"
+ circuitkeeper "cosmossdk.io/x/circuit/keeper"
+ evidencekeeper "cosmossdk.io/x/evidence/keeper"
+ feegrantkeeper "cosmossdk.io/x/feegrant/keeper"
+ nftkeeper "cosmossdk.io/x/nft/keeper"
+ upgradekeeper "cosmossdk.io/x/upgrade/keeper"
+ "github.com/cosmos/cosmos-sdk/baseapp"
+ "github.com/cosmos/cosmos-sdk/client"
+ "github.com/cosmos/cosmos-sdk/codec"
+ codectypes "github.com/cosmos/cosmos-sdk/codec/types"
+ "github.com/cosmos/cosmos-sdk/runtime"
+ "github.com/cosmos/cosmos-sdk/server"
+ "github.com/cosmos/cosmos-sdk/server/api"
+ "github.com/cosmos/cosmos-sdk/server/config"
+ servertypes "github.com/cosmos/cosmos-sdk/server/types"
+ testdata_pulsar "github.com/cosmos/cosmos-sdk/testutil/testdata/testpb"
+ "github.com/cosmos/cosmos-sdk/types/module"
+ "github.com/cosmos/cosmos-sdk/x/auth"
+ "github.com/cosmos/cosmos-sdk/x/auth/ante"
+ authkeeper "github.com/cosmos/cosmos-sdk/x/auth/keeper"
+ authsims "github.com/cosmos/cosmos-sdk/x/auth/simulation"
+ authtypes "github.com/cosmos/cosmos-sdk/x/auth/types"
+ authzkeeper "github.com/cosmos/cosmos-sdk/x/authz/keeper"
+ bankkeeper "github.com/cosmos/cosmos-sdk/x/bank/keeper"
+ consensuskeeper "github.com/cosmos/cosmos-sdk/x/consensus/keeper"
+ distrkeeper "github.com/cosmos/cosmos-sdk/x/distribution/keeper"
+ epochskeeper "github.com/cosmos/cosmos-sdk/x/epochs/keeper"
+ govkeeper "github.com/cosmos/cosmos-sdk/x/gov/keeper"
+ groupkeeper "github.com/cosmos/cosmos-sdk/x/group/keeper"
+ mintkeeper "github.com/cosmos/cosmos-sdk/x/mint/keeper"
+ protocolpoolkeeper "github.com/cosmos/cosmos-sdk/x/protocolpool/keeper"
+ slashingkeeper "github.com/cosmos/cosmos-sdk/x/slashing/keeper"
+ stakingkeeper "github.com/cosmos/cosmos-sdk/x/staking/keeper"
+)
+
+// DefaultNodeHome default home directories for the application daemon
+var DefaultNodeHome string
+
+var (
+ _ runtime.AppI = (*SimApp)(nil)
+ _ servertypes.Application = (*SimApp)(nil)
+)
+
+// SimApp extends an ABCI application, but with most of its parameters exported.
+// They are exported for convenience in creating helper functions, as object
+// capabilities aren't needed for testing.
+type SimApp struct {
+ *runtime.App
+ legacyAmino *codec.LegacyAmino
+ appCodec codec.Codec
+ txConfig client.TxConfig
+ interfaceRegistry codectypes.InterfaceRegistry
+
+ // essential keepers
+ AccountKeeper authkeeper.AccountKeeper
+ BankKeeper bankkeeper.BaseKeeper
+ StakingKeeper *stakingkeeper.Keeper
+ SlashingKeeper slashingkeeper.Keeper
+ MintKeeper mintkeeper.Keeper
+ DistrKeeper distrkeeper.Keeper
+ GovKeeper *govkeeper.Keeper
+ UpgradeKeeper *upgradekeeper.Keeper
+ EvidenceKeeper evidencekeeper.Keeper
+ ConsensusParamsKeeper consensuskeeper.Keeper
+ CircuitKeeper circuitkeeper.Keeper
+
+ // supplementary keepers
+ FeeGrantKeeper feegrantkeeper.Keeper
+ GroupKeeper groupkeeper.Keeper
+ AuthzKeeper authzkeeper.Keeper
+ NFTKeeper nftkeeper.Keeper
+ EpochsKeeper epochskeeper.Keeper
+ ProtocolPoolKeeper protocolpoolkeeper.Keeper
+
+ // simulation manager
+ sm *module.SimulationManager
+}
+
+func init() {
+ var err error
+ DefaultNodeHome, err = clienthelpers.GetNodeHomeDirectory(".simapp")
+ if err != nil {
+ panic(err)
+}
+}
+
+// NewSimApp returns a reference to an initialized SimApp.
+func NewSimApp(
+ logger log.Logger,
+ db dbm.DB,
+ traceStore io.Writer,
+ loadLatest bool,
+ appOpts servertypes.AppOptions,
+ baseAppOptions ...func(*baseapp.BaseApp),
+) *SimApp {
+ var (
+ app = &SimApp{
+}
+
+appBuilder *runtime.AppBuilder
+
+ // merge the AppConfig and other configuration in one config
+ appConfig = depinject.Configs(
+ AppConfig,
+ depinject.Supply(
+ // supply the application options
+ appOpts,
+ // supply the logger
+ logger,
+
+ // ADVANCED CONFIGURATION
+
+ //
+ // AUTH
+ //
+ // For providing a custom function required in auth to generate custom account types
+ // add it below. By default the auth module uses simulation.RandomGenesisAccounts.
+ //
+ // authtypes.RandomGenesisAccountsFn(simulation.RandomGenesisAccounts),
+ //
+ // For providing a custom a base account type add it below.
+ // By default the auth module uses authtypes.ProtoBaseAccount().
+ //
+ // func()
+
+sdk.AccountI {
+ return authtypes.ProtoBaseAccount()
+},
+ //
+ // For providing a different address codec, add it below.
+ // By default the auth module uses a Bech32 address codec,
+ // with the prefix defined in the auth module configuration.
+ //
+ // func()
+
+address.Codec {
+ return <- custom address codec type ->
+}
+
+ //
+ // STAKING
+ //
+ // For providing a different validator and consensus address codec, add it below.
+ // By default the staking module uses the bech32 prefix provided in the auth config,
+ // and appends "valoper" and "valcons" for validator and consensus addresses respectively.
+ // When providing a custom address codec in auth, custom address codecs must be provided here as well.
+ //
+ // func()
+
+runtime.ValidatorAddressCodec {
+ return <- custom validator address codec type ->
+}
+ // func()
+
+runtime.ConsensusAddressCodec {
+ return <- custom consensus address codec type ->
+}
+
+ //
+ // MINT
+ //
+
+ // For providing a custom inflation function for x/mint add here your
+ // custom function that implements the minttypes.InflationCalculationFn
+ // interface.
+ ),
+ )
+ )
+ if err := depinject.Inject(appConfig,
+ &appBuilder,
+ &app.appCodec,
+ &app.legacyAmino,
+ &app.txConfig,
+ &app.interfaceRegistry,
+ &app.AccountKeeper,
+ &app.BankKeeper,
+ &app.StakingKeeper,
+ &app.SlashingKeeper,
+ &app.MintKeeper,
+ &app.DistrKeeper,
+ &app.GovKeeper,
+ &app.UpgradeKeeper,
+ &app.AuthzKeeper,
+ &app.EvidenceKeeper,
+ &app.FeeGrantKeeper,
+ &app.GroupKeeper,
+ &app.NFTKeeper,
+ &app.ConsensusParamsKeeper,
+ &app.CircuitKeeper,
+ &app.EpochsKeeper,
+ &app.ProtocolPoolKeeper,
+ ); err != nil {
+ panic(err)
+}
+
+ // Below we could construct and set an application specific mempool and
+ // ABCI 1.0 PrepareProposal and ProcessProposal handlers. These defaults are
+ // already set in the SDK's BaseApp, this shows an example of how to override
+ // them.
+ //
+ // Example:
+ //
+ // app.App = appBuilder.Build(...)
+ // nonceMempool := mempool.NewSenderNonceMempool()
+ // abciPropHandler := NewDefaultProposalHandler(nonceMempool, app.App.BaseApp)
+ //
+ // app.App.BaseApp.SetMempool(nonceMempool)
+ // app.App.BaseApp.SetPrepareProposal(abciPropHandler.PrepareProposalHandler())
+ // app.App.BaseApp.SetProcessProposal(abciPropHandler.ProcessProposalHandler())
+ //
+ // Alternatively, you can construct BaseApp options, append those to
+ // baseAppOptions and pass them to the appBuilder.
+ //
+ // Example:
+ //
+ // prepareOpt = func(app *baseapp.BaseApp) {
+ // abciPropHandler := baseapp.NewDefaultProposalHandler(nonceMempool, app)
+ // app.SetPrepareProposal(abciPropHandler.PrepareProposalHandler())
+ //
+}
+ // baseAppOptions = append(baseAppOptions, prepareOpt)
+
+ // create and set dummy vote extension handler
+ voteExtOp := func(bApp *baseapp.BaseApp) {
+ voteExtHandler := NewVoteExtensionHandler()
+
+voteExtHandler.SetHandlers(bApp)
+}
+
+baseAppOptions = append(baseAppOptions, voteExtOp, baseapp.SetOptimisticExecution())
+
+app.App = appBuilder.Build(db, traceStore, baseAppOptions...)
+
+ // register streaming services
+ if err := app.RegisterStreamingServices(appOpts, app.kvStoreKeys()); err != nil {
+ panic(err)
+}
+
+ /**** Module Options ****/
+
+ // RegisterUpgradeHandlers is used for registering any on-chain upgrades.
+ app.RegisterUpgradeHandlers()
+
+ // add test gRPC service for testing gRPC queries in isolation
+ testdata_pulsar.RegisterQueryServer(app.GRPCQueryRouter(), testdata_pulsar.QueryImpl{
+})
+
+ // create the simulation manager and define the order of the modules for deterministic simulations
+ //
+ // NOTE: this is not required apps that don't use the simulator for fuzz testing
+ // transactions
+ overrideModules := map[string]module.AppModuleSimulation{
+ authtypes.ModuleName: auth.NewAppModule(app.appCodec, app.AccountKeeper, authsims.RandomGenesisAccounts, nil),
+}
+
+app.sm = module.NewSimulationManagerFromAppModules(app.ModuleManager.Modules, overrideModules)
+
+app.sm.RegisterStoreDecoders()
+
+ // A custom InitChainer can be set if extra pre-init-genesis logic is required.
+ // By default, when using app wiring enabled module, this is not required.
+ // For instance, the upgrade module will set automatically the module version map in its init genesis thanks to app wiring.
+ // However, when registering a module manually (i.e. that does not support app wiring), the module version map
+ // must be set manually as follow. The upgrade module will de-duplicate the module version map.
+ //
+ // app.SetInitChainer(func(ctx sdk.Context, req *abci.RequestInitChain) (*abci.ResponseInitChain, error) {
+ // app.UpgradeKeeper.SetModuleVersionMap(ctx, app.ModuleManager.GetVersionMap())
+ // return app.App.InitChainer(ctx, req)
+ //
+})
+
+ // set custom ante handler
+ app.setAnteHandler(app.txConfig)
+ if err := app.Load(loadLatest); err != nil {
+ panic(err)
+}
+
+return app
+}
+
+// setAnteHandler sets custom ante handlers.
+// "x/auth/tx" pre-defined ante handler have been disabled in app_config.
+func (app *SimApp)
+
+setAnteHandler(txConfig client.TxConfig) {
+ anteHandler, err := NewAnteHandler(
+ HandlerOptions{
+ ante.HandlerOptions{
+ UnorderedNonceManager: app.AccountKeeper,
+ AccountKeeper: app.AccountKeeper,
+ BankKeeper: app.BankKeeper,
+ SignModeHandler: txConfig.SignModeHandler(),
+ FeegrantKeeper: app.FeeGrantKeeper,
+ SigGasConsumer: ante.DefaultSigVerificationGasConsumer,
+},
+ &app.CircuitKeeper,
+},
+ )
+ if err != nil {
+ panic(err)
+}
+
+ // Set the AnteHandler for the app
+ app.SetAnteHandler(anteHandler)
+}
+
+// LegacyAmino returns SimApp's amino codec.
+//
+// NOTE: This is solely to be used for testing purposes as it may be desirable
+// for modules to register their own custom testing types.
+func (app *SimApp)
+
+LegacyAmino() *codec.LegacyAmino {
+ return app.legacyAmino
+}
+
+// AppCodec returns SimApp's app codec.
+//
+// NOTE: This is solely to be used for testing purposes as it may be desirable
+// for modules to register their own custom testing types.
+func (app *SimApp)
+
+AppCodec()
+
+codec.Codec {
+ return app.appCodec
+}
+
+// InterfaceRegistry returns SimApp's InterfaceRegistry.
+func (app *SimApp)
+
+InterfaceRegistry()
+
+codectypes.InterfaceRegistry {
+ return app.interfaceRegistry
+}
+
+// TxConfig returns SimApp's TxConfig
+func (app *SimApp)
+
+TxConfig()
+
+client.TxConfig {
+ return app.txConfig
+}
+
+// GetKey returns the KVStoreKey for the provided store key.
+//
+// NOTE: This is solely to be used for testing purposes.
+func (app *SimApp)
+
+GetKey(storeKey string) *storetypes.KVStoreKey {
+ sk := app.UnsafeFindStoreKey(storeKey)
+
+kvStoreKey, ok := sk.(*storetypes.KVStoreKey)
+ if !ok {
+ return nil
+}
+
+return kvStoreKey
+}
+
+func (app *SimApp)
+
+kvStoreKeys()
+
+map[string]*storetypes.KVStoreKey {
+ keys := make(map[string]*storetypes.KVStoreKey)
+ for _, k := range app.GetStoreKeys() {
+ if kv, ok := k.(*storetypes.KVStoreKey); ok {
+ keys[kv.Name()] = kv
+}
+
+}
+
+return keys
+}
+
+// SimulationManager implements the SimulationApp interface
+func (app *SimApp)
+
+SimulationManager() *module.SimulationManager {
+ return app.sm
+}
+
+// RegisterAPIRoutes registers all application module routes with the provided
+// API server.
+func (app *SimApp)
+
+RegisterAPIRoutes(apiSvr *api.Server, apiConfig config.APIConfig) {
+ app.App.RegisterAPIRoutes(apiSvr, apiConfig)
+ // register swagger API in app.go so that other applications can override easily
+ if err := server.RegisterSwaggerAPI(apiSvr.ClientCtx, apiSvr.Router, apiConfig.Swagger); err != nil {
+ panic(err)
+}
+}
+
+// GetMaccPerms returns a copy of the module account permissions
+//
+// NOTE: This is solely to be used for testing purposes.
+func GetMaccPerms()
+
+map[string][]string {
+ dup := make(map[string][]string)
+ for _, perms := range moduleAccPerms {
+ dup[perms.Account] = perms.Permissions
+}
+
+return dup
+}
+
+// BlockedAddresses returns all the app's blocked account addresses.
+func BlockedAddresses()
+
+map[string]bool {
+ result := make(map[string]bool)
+ if len(blockAccAddrs) > 0 {
+ for _, addr := range blockAccAddrs {
+ result[addr] = true
+}
+
+}
+
+else {
+ for addr := range GetMaccPerms() {
+ result[addr] = true
+}
+
+}
+
+return result
+}
+```
+
+
+When using `depinject.Inject`, the injected types must be pointers.
+
+
+### Advanced Configuration
+
+In advanced cases, it is possible to inject extra (module) configuration in a way that is not (yet) supported by `AppConfig`.\
+In this case, use `depinject.Configs` for combining the extra configuration, and `AppConfig` and `depinject.Supply` for providing the extra configuration.
+More information on how `depinject.Configs` and `depinject.Supply` function can be found in the [`depinject` documentation](https://pkg.go.dev/cosmossdk.io/depinject).
+
+```go expandable
+//go:build !app_v1
+
+package simapp
+
+import (
+
+ "io"
+
+ dbm "github.com/cosmos/cosmos-db"
+
+ clienthelpers "cosmossdk.io/client/v2/helpers"
+ "cosmossdk.io/depinject"
+ "cosmossdk.io/log"
+ storetypes "cosmossdk.io/store/types"
+ circuitkeeper "cosmossdk.io/x/circuit/keeper"
+ evidencekeeper "cosmossdk.io/x/evidence/keeper"
+ feegrantkeeper "cosmossdk.io/x/feegrant/keeper"
+ nftkeeper "cosmossdk.io/x/nft/keeper"
+ upgradekeeper "cosmossdk.io/x/upgrade/keeper"
+ "github.com/cosmos/cosmos-sdk/baseapp"
+ "github.com/cosmos/cosmos-sdk/client"
+ "github.com/cosmos/cosmos-sdk/codec"
+ codectypes "github.com/cosmos/cosmos-sdk/codec/types"
+ "github.com/cosmos/cosmos-sdk/runtime"
+ "github.com/cosmos/cosmos-sdk/server"
+ "github.com/cosmos/cosmos-sdk/server/api"
+ "github.com/cosmos/cosmos-sdk/server/config"
+ servertypes "github.com/cosmos/cosmos-sdk/server/types"
+ testdata_pulsar "github.com/cosmos/cosmos-sdk/testutil/testdata/testpb"
+ "github.com/cosmos/cosmos-sdk/types/module"
+ "github.com/cosmos/cosmos-sdk/x/auth"
+ "github.com/cosmos/cosmos-sdk/x/auth/ante"
+ authkeeper "github.com/cosmos/cosmos-sdk/x/auth/keeper"
+ authsims "github.com/cosmos/cosmos-sdk/x/auth/simulation"
+ authtypes "github.com/cosmos/cosmos-sdk/x/auth/types"
+ authzkeeper "github.com/cosmos/cosmos-sdk/x/authz/keeper"
+ bankkeeper "github.com/cosmos/cosmos-sdk/x/bank/keeper"
+ consensuskeeper "github.com/cosmos/cosmos-sdk/x/consensus/keeper"
+ distrkeeper "github.com/cosmos/cosmos-sdk/x/distribution/keeper"
+ epochskeeper "github.com/cosmos/cosmos-sdk/x/epochs/keeper"
+ govkeeper "github.com/cosmos/cosmos-sdk/x/gov/keeper"
+ groupkeeper "github.com/cosmos/cosmos-sdk/x/group/keeper"
+ mintkeeper "github.com/cosmos/cosmos-sdk/x/mint/keeper"
+ protocolpoolkeeper "github.com/cosmos/cosmos-sdk/x/protocolpool/keeper"
+ slashingkeeper "github.com/cosmos/cosmos-sdk/x/slashing/keeper"
+ stakingkeeper "github.com/cosmos/cosmos-sdk/x/staking/keeper"
+)
+
+// DefaultNodeHome default home directories for the application daemon
+var DefaultNodeHome string
+
+var (
+ _ runtime.AppI = (*SimApp)(nil)
+ _ servertypes.Application = (*SimApp)(nil)
+)
+
+// SimApp extends an ABCI application, but with most of its parameters exported.
+// They are exported for convenience in creating helper functions, as object
+// capabilities aren't needed for testing.
+type SimApp struct {
+ *runtime.App
+ legacyAmino *codec.LegacyAmino
+ appCodec codec.Codec
+ txConfig client.TxConfig
+ interfaceRegistry codectypes.InterfaceRegistry
+
+ // essential keepers
+ AccountKeeper authkeeper.AccountKeeper
+ BankKeeper bankkeeper.BaseKeeper
+ StakingKeeper *stakingkeeper.Keeper
+ SlashingKeeper slashingkeeper.Keeper
+ MintKeeper mintkeeper.Keeper
+ DistrKeeper distrkeeper.Keeper
+ GovKeeper *govkeeper.Keeper
+ UpgradeKeeper *upgradekeeper.Keeper
+ EvidenceKeeper evidencekeeper.Keeper
+ ConsensusParamsKeeper consensuskeeper.Keeper
+ CircuitKeeper circuitkeeper.Keeper
+
+ // supplementary keepers
+ FeeGrantKeeper feegrantkeeper.Keeper
+ GroupKeeper groupkeeper.Keeper
+ AuthzKeeper authzkeeper.Keeper
+ NFTKeeper nftkeeper.Keeper
+ EpochsKeeper epochskeeper.Keeper
+ ProtocolPoolKeeper protocolpoolkeeper.Keeper
+
+ // simulation manager
+ sm *module.SimulationManager
+}
+
+func init() {
+ var err error
+ DefaultNodeHome, err = clienthelpers.GetNodeHomeDirectory(".simapp")
+ if err != nil {
+ panic(err)
+}
+}
+
+// NewSimApp returns a reference to an initialized SimApp.
+func NewSimApp(
+ logger log.Logger,
+ db dbm.DB,
+ traceStore io.Writer,
+ loadLatest bool,
+ appOpts servertypes.AppOptions,
+ baseAppOptions ...func(*baseapp.BaseApp),
+) *SimApp {
+ var (
+ app = &SimApp{
+}
+
+appBuilder *runtime.AppBuilder
+
+ // merge the AppConfig and other configuration in one config
+ appConfig = depinject.Configs(
+ AppConfig,
+ depinject.Supply(
+ // supply the application options
+ appOpts,
+ // supply the logger
+ logger,
+
+ // ADVANCED CONFIGURATION
+
+ //
+ // AUTH
+ //
+ // For providing a custom function required in auth to generate custom account types
+ // add it below. By default the auth module uses simulation.RandomGenesisAccounts.
+ //
+ // authtypes.RandomGenesisAccountsFn(simulation.RandomGenesisAccounts),
+ //
+ // For providing a custom a base account type add it below.
+ // By default the auth module uses authtypes.ProtoBaseAccount().
+ //
+ // func()
+
+sdk.AccountI {
+ return authtypes.ProtoBaseAccount()
+},
+ //
+ // For providing a different address codec, add it below.
+ // By default the auth module uses a Bech32 address codec,
+ // with the prefix defined in the auth module configuration.
+ //
+ // func()
+
+address.Codec {
+ return <- custom address codec type ->
+}
+
+ //
+ // STAKING
+ //
+ // For providing a different validator and consensus address codec, add it below.
+ // By default the staking module uses the bech32 prefix provided in the auth config,
+ // and appends "valoper" and "valcons" for validator and consensus addresses respectively.
+ // When providing a custom address codec in auth, custom address codecs must be provided here as well.
+ //
+ // func()
+
+runtime.ValidatorAddressCodec {
+ return <- custom validator address codec type ->
+}
+ // func()
+
+runtime.ConsensusAddressCodec {
+ return <- custom consensus address codec type ->
+}
+
+ //
+ // MINT
+ //
+
+ // For providing a custom inflation function for x/mint add here your
+ // custom function that implements the minttypes.InflationCalculationFn
+ // interface.
+ ),
+ )
+ )
+ if err := depinject.Inject(appConfig,
+ &appBuilder,
+ &app.appCodec,
+ &app.legacyAmino,
+ &app.txConfig,
+ &app.interfaceRegistry,
+ &app.AccountKeeper,
+ &app.BankKeeper,
+ &app.StakingKeeper,
+ &app.SlashingKeeper,
+ &app.MintKeeper,
+ &app.DistrKeeper,
+ &app.GovKeeper,
+ &app.UpgradeKeeper,
+ &app.AuthzKeeper,
+ &app.EvidenceKeeper,
+ &app.FeeGrantKeeper,
+ &app.GroupKeeper,
+ &app.NFTKeeper,
+ &app.ConsensusParamsKeeper,
+ &app.CircuitKeeper,
+ &app.EpochsKeeper,
+ &app.ProtocolPoolKeeper,
+ ); err != nil {
+ panic(err)
+}
+
+ // Below we could construct and set an application specific mempool and
+ // ABCI 1.0 PrepareProposal and ProcessProposal handlers. These defaults are
+ // already set in the SDK's BaseApp, this shows an example of how to override
+ // them.
+ //
+ // Example:
+ //
+ // app.App = appBuilder.Build(...)
+ // nonceMempool := mempool.NewSenderNonceMempool()
+ // abciPropHandler := NewDefaultProposalHandler(nonceMempool, app.App.BaseApp)
+ //
+ // app.App.BaseApp.SetMempool(nonceMempool)
+ // app.App.BaseApp.SetPrepareProposal(abciPropHandler.PrepareProposalHandler())
+ // app.App.BaseApp.SetProcessProposal(abciPropHandler.ProcessProposalHandler())
+ //
+ // Alternatively, you can construct BaseApp options, append those to
+ // baseAppOptions and pass them to the appBuilder.
+ //
+ // Example:
+ //
+ // prepareOpt = func(app *baseapp.BaseApp) {
+ // abciPropHandler := baseapp.NewDefaultProposalHandler(nonceMempool, app)
+ // app.SetPrepareProposal(abciPropHandler.PrepareProposalHandler())
+ //
+}
+ // baseAppOptions = append(baseAppOptions, prepareOpt)
+
+ // create and set dummy vote extension handler
+ voteExtOp := func(bApp *baseapp.BaseApp) {
+ voteExtHandler := NewVoteExtensionHandler()
+
+voteExtHandler.SetHandlers(bApp)
+}
+
+baseAppOptions = append(baseAppOptions, voteExtOp, baseapp.SetOptimisticExecution())
+
+app.App = appBuilder.Build(db, traceStore, baseAppOptions...)
+
+ // register streaming services
+ if err := app.RegisterStreamingServices(appOpts, app.kvStoreKeys()); err != nil {
+ panic(err)
+}
+
+ /**** Module Options ****/
+
+ // RegisterUpgradeHandlers is used for registering any on-chain upgrades.
+ app.RegisterUpgradeHandlers()
+
+ // add test gRPC service for testing gRPC queries in isolation
+ testdata_pulsar.RegisterQueryServer(app.GRPCQueryRouter(), testdata_pulsar.QueryImpl{
+})
+
+ // create the simulation manager and define the order of the modules for deterministic simulations
+ //
+ // NOTE: this is not required apps that don't use the simulator for fuzz testing
+ // transactions
+ overrideModules := map[string]module.AppModuleSimulation{
+ authtypes.ModuleName: auth.NewAppModule(app.appCodec, app.AccountKeeper, authsims.RandomGenesisAccounts, nil),
+}
+
+app.sm = module.NewSimulationManagerFromAppModules(app.ModuleManager.Modules, overrideModules)
+
+app.sm.RegisterStoreDecoders()
+
+ // A custom InitChainer can be set if extra pre-init-genesis logic is required.
+ // By default, when using app wiring enabled module, this is not required.
+ // For instance, the upgrade module will set automatically the module version map in its init genesis thanks to app wiring.
+ // However, when registering a module manually (i.e. that does not support app wiring), the module version map
+ // must be set manually as follow. The upgrade module will de-duplicate the module version map.
+ //
+ // app.SetInitChainer(func(ctx sdk.Context, req *abci.RequestInitChain) (*abci.ResponseInitChain, error) {
+ // app.UpgradeKeeper.SetModuleVersionMap(ctx, app.ModuleManager.GetVersionMap())
+ // return app.App.InitChainer(ctx, req)
+ //
+})
+
+ // set custom ante handler
+ app.setAnteHandler(app.txConfig)
+ if err := app.Load(loadLatest); err != nil {
+ panic(err)
+}
+
+return app
+}
+
+// setAnteHandler sets custom ante handlers.
+// "x/auth/tx" pre-defined ante handler have been disabled in app_config.
+func (app *SimApp)
+
+setAnteHandler(txConfig client.TxConfig) {
+ anteHandler, err := NewAnteHandler(
+ HandlerOptions{
+ ante.HandlerOptions{
+ UnorderedNonceManager: app.AccountKeeper,
+ AccountKeeper: app.AccountKeeper,
+ BankKeeper: app.BankKeeper,
+ SignModeHandler: txConfig.SignModeHandler(),
+ FeegrantKeeper: app.FeeGrantKeeper,
+ SigGasConsumer: ante.DefaultSigVerificationGasConsumer,
+},
+ &app.CircuitKeeper,
+},
+ )
+ if err != nil {
+ panic(err)
+}
+
+ // Set the AnteHandler for the app
+ app.SetAnteHandler(anteHandler)
+}
+
+// LegacyAmino returns SimApp's amino codec.
+//
+// NOTE: This is solely to be used for testing purposes as it may be desirable
+// for modules to register their own custom testing types.
+func (app *SimApp)
+
+LegacyAmino() *codec.LegacyAmino {
+ return app.legacyAmino
+}
+
+// AppCodec returns SimApp's app codec.
+//
+// NOTE: This is solely to be used for testing purposes as it may be desirable
+// for modules to register their own custom testing types.
+func (app *SimApp)
+
+AppCodec()
+
+codec.Codec {
+ return app.appCodec
+}
+
+// InterfaceRegistry returns SimApp's InterfaceRegistry.
+func (app *SimApp)
+
+InterfaceRegistry()
+
+codectypes.InterfaceRegistry {
+ return app.interfaceRegistry
+}
+
+// TxConfig returns SimApp's TxConfig
+func (app *SimApp)
+
+TxConfig()
+
+client.TxConfig {
+ return app.txConfig
+}
+
+// GetKey returns the KVStoreKey for the provided store key.
+//
+// NOTE: This is solely to be used for testing purposes.
+func (app *SimApp)
+
+GetKey(storeKey string) *storetypes.KVStoreKey {
+ sk := app.UnsafeFindStoreKey(storeKey)
+
+kvStoreKey, ok := sk.(*storetypes.KVStoreKey)
+ if !ok {
+ return nil
+}
+
+return kvStoreKey
+}
+
+func (app *SimApp)
+
+kvStoreKeys()
+
+map[string]*storetypes.KVStoreKey {
+ keys := make(map[string]*storetypes.KVStoreKey)
+ for _, k := range app.GetStoreKeys() {
+ if kv, ok := k.(*storetypes.KVStoreKey); ok {
+ keys[kv.Name()] = kv
+}
+
+}
+
+return keys
+}
+
+// SimulationManager implements the SimulationApp interface
+func (app *SimApp)
+
+SimulationManager() *module.SimulationManager {
+ return app.sm
+}
+
+// RegisterAPIRoutes registers all application module routes with the provided
+// API server.
+func (app *SimApp)
+
+RegisterAPIRoutes(apiSvr *api.Server, apiConfig config.APIConfig) {
+ app.App.RegisterAPIRoutes(apiSvr, apiConfig)
+ // register swagger API in app.go so that other applications can override easily
+ if err := server.RegisterSwaggerAPI(apiSvr.ClientCtx, apiSvr.Router, apiConfig.Swagger); err != nil {
+ panic(err)
+}
+}
+
+// GetMaccPerms returns a copy of the module account permissions
+//
+// NOTE: This is solely to be used for testing purposes.
+func GetMaccPerms()
+
+map[string][]string {
+ dup := make(map[string][]string)
+ for _, perms := range moduleAccPerms {
+ dup[perms.Account] = perms.Permissions
+}
+
+return dup
+}
+
+// BlockedAddresses returns all the app's blocked account addresses.
+func BlockedAddresses()
+
+map[string]bool {
+ result := make(map[string]bool)
+ if len(blockAccAddrs) > 0 {
+ for _, addr := range blockAccAddrs {
+ result[addr] = true
+}
+
+}
+
+else {
+ for addr := range GetMaccPerms() {
+ result[addr] = true
+}
+
+}
+
+return result
+}
+```
+
+### Registering non app wiring modules
+
+It is possible to combine app wiring / depinject enabled modules with non-app wiring modules.
+To do so, use the `app.RegisterModules` method to register the modules on your app, as well as `app.RegisterStores` for registering the extra stores needed.
+
+```go expandable
+// ....
+app.App = appBuilder.Build(db, traceStore, baseAppOptions...)
+
+// register module manually
+app.RegisterStores(storetypes.NewKVStoreKey(example.ModuleName))
+
+app.ExampleKeeper = examplekeeper.NewKeeper(app.appCodec, app.AccountKeeper.AddressCodec(), runtime.NewKVStoreService(app.GetKey(example.ModuleName)), authtypes.NewModuleAddress(govtypes.ModuleName).String())
+ exampleAppModule := examplemodule.NewAppModule(app.ExampleKeeper)
+ if err := app.RegisterModules(&exampleAppModule); err != nil {
+ panic(err)
+}
+
+// ....
+```
+
+
+When using AutoCLI and combining app wiring and non-app wiring modules. The AutoCLI options should be manually constructed instead of injected.
+Otherwise it will miss the non depinject modules and not register their CLI.
+
+
+### Complete `app_di.go`
+
+
+Note that in the complete `SimApp` `app_di.go` file, testing utilities are also defined, but they could as well be defined in a separate file.
+
+
+```go expandable
+//go:build !app_v1
+
+package simapp
+
+import (
+
+ "io"
+
+ dbm "github.com/cosmos/cosmos-db"
+
+ clienthelpers "cosmossdk.io/client/v2/helpers"
+ "cosmossdk.io/depinject"
+ "cosmossdk.io/log"
+ storetypes "cosmossdk.io/store/types"
+ circuitkeeper "cosmossdk.io/x/circuit/keeper"
+ evidencekeeper "cosmossdk.io/x/evidence/keeper"
+ feegrantkeeper "cosmossdk.io/x/feegrant/keeper"
+ nftkeeper "cosmossdk.io/x/nft/keeper"
+ upgradekeeper "cosmossdk.io/x/upgrade/keeper"
+ "github.com/cosmos/cosmos-sdk/baseapp"
+ "github.com/cosmos/cosmos-sdk/client"
+ "github.com/cosmos/cosmos-sdk/codec"
+ codectypes "github.com/cosmos/cosmos-sdk/codec/types"
+ "github.com/cosmos/cosmos-sdk/runtime"
+ "github.com/cosmos/cosmos-sdk/server"
+ "github.com/cosmos/cosmos-sdk/server/api"
+ "github.com/cosmos/cosmos-sdk/server/config"
+ servertypes "github.com/cosmos/cosmos-sdk/server/types"
+ testdata_pulsar "github.com/cosmos/cosmos-sdk/testutil/testdata/testpb"
+ "github.com/cosmos/cosmos-sdk/types/module"
+ "github.com/cosmos/cosmos-sdk/x/auth"
+ "github.com/cosmos/cosmos-sdk/x/auth/ante"
+ authkeeper "github.com/cosmos/cosmos-sdk/x/auth/keeper"
+ authsims "github.com/cosmos/cosmos-sdk/x/auth/simulation"
+ authtypes "github.com/cosmos/cosmos-sdk/x/auth/types"
+ authzkeeper "github.com/cosmos/cosmos-sdk/x/authz/keeper"
+ bankkeeper "github.com/cosmos/cosmos-sdk/x/bank/keeper"
+ consensuskeeper "github.com/cosmos/cosmos-sdk/x/consensus/keeper"
+ distrkeeper "github.com/cosmos/cosmos-sdk/x/distribution/keeper"
+ epochskeeper "github.com/cosmos/cosmos-sdk/x/epochs/keeper"
+ govkeeper "github.com/cosmos/cosmos-sdk/x/gov/keeper"
+ groupkeeper "github.com/cosmos/cosmos-sdk/x/group/keeper"
+ mintkeeper "github.com/cosmos/cosmos-sdk/x/mint/keeper"
+ protocolpoolkeeper "github.com/cosmos/cosmos-sdk/x/protocolpool/keeper"
+ slashingkeeper "github.com/cosmos/cosmos-sdk/x/slashing/keeper"
+ stakingkeeper "github.com/cosmos/cosmos-sdk/x/staking/keeper"
+)
+
+// DefaultNodeHome default home directories for the application daemon
+var DefaultNodeHome string
+
+var (
+ _ runtime.AppI = (*SimApp)(nil)
+ _ servertypes.Application = (*SimApp)(nil)
+)
+
+// SimApp extends an ABCI application, but with most of its parameters exported.
+// They are exported for convenience in creating helper functions, as object
+// capabilities aren't needed for testing.
+type SimApp struct {
+ *runtime.App
+ legacyAmino *codec.LegacyAmino
+ appCodec codec.Codec
+ txConfig client.TxConfig
+ interfaceRegistry codectypes.InterfaceRegistry
+
+ // essential keepers
+ AccountKeeper authkeeper.AccountKeeper
+ BankKeeper bankkeeper.BaseKeeper
+ StakingKeeper *stakingkeeper.Keeper
+ SlashingKeeper slashingkeeper.Keeper
+ MintKeeper mintkeeper.Keeper
+ DistrKeeper distrkeeper.Keeper
+ GovKeeper *govkeeper.Keeper
+ UpgradeKeeper *upgradekeeper.Keeper
+ EvidenceKeeper evidencekeeper.Keeper
+ ConsensusParamsKeeper consensuskeeper.Keeper
+ CircuitKeeper circuitkeeper.Keeper
+
+ // supplementary keepers
+ FeeGrantKeeper feegrantkeeper.Keeper
+ GroupKeeper groupkeeper.Keeper
+ AuthzKeeper authzkeeper.Keeper
+ NFTKeeper nftkeeper.Keeper
+ EpochsKeeper epochskeeper.Keeper
+ ProtocolPoolKeeper protocolpoolkeeper.Keeper
+
+ // simulation manager
+ sm *module.SimulationManager
+}
+
+func init() {
+ var err error
+ DefaultNodeHome, err = clienthelpers.GetNodeHomeDirectory(".simapp")
+ if err != nil {
+ panic(err)
+}
+}
+
+// NewSimApp returns a reference to an initialized SimApp.
+func NewSimApp(
+ logger log.Logger,
+ db dbm.DB,
+ traceStore io.Writer,
+ loadLatest bool,
+ appOpts servertypes.AppOptions,
+ baseAppOptions ...func(*baseapp.BaseApp),
+) *SimApp {
+ var (
+ app = &SimApp{
+}
+
+appBuilder *runtime.AppBuilder
+
+ // merge the AppConfig and other configuration in one config
+ appConfig = depinject.Configs(
+ AppConfig,
+ depinject.Supply(
+ // supply the application options
+ appOpts,
+ // supply the logger
+ logger,
+
+ // ADVANCED CONFIGURATION
+
+ //
+ // AUTH
+ //
+ // For providing a custom function required in auth to generate custom account types
+ // add it below. By default the auth module uses simulation.RandomGenesisAccounts.
+ //
+ // authtypes.RandomGenesisAccountsFn(simulation.RandomGenesisAccounts),
+ //
+ // For providing a custom a base account type add it below.
+ // By default the auth module uses authtypes.ProtoBaseAccount().
+ //
+ // func()
+
+sdk.AccountI {
+ return authtypes.ProtoBaseAccount()
+},
+ //
+ // For providing a different address codec, add it below.
+ // By default the auth module uses a Bech32 address codec,
+ // with the prefix defined in the auth module configuration.
+ //
+ // func()
+
+address.Codec {
+ return <- custom address codec type ->
+}
+
+ //
+ // STAKING
+ //
+ // For providing a different validator and consensus address codec, add it below.
+ // By default the staking module uses the bech32 prefix provided in the auth config,
+ // and appends "valoper" and "valcons" for validator and consensus addresses respectively.
+ // When providing a custom address codec in auth, custom address codecs must be provided here as well.
+ //
+ // func()
+
+runtime.ValidatorAddressCodec {
+ return <- custom validator address codec type ->
+}
+ // func()
+
+runtime.ConsensusAddressCodec {
+ return <- custom consensus address codec type ->
+}
+
+ //
+ // MINT
+ //
+
+ // For providing a custom inflation function for x/mint add here your
+ // custom function that implements the minttypes.InflationCalculationFn
+ // interface.
+ ),
+ )
+ )
+ if err := depinject.Inject(appConfig,
+ &appBuilder,
+ &app.appCodec,
+ &app.legacyAmino,
+ &app.txConfig,
+ &app.interfaceRegistry,
+ &app.AccountKeeper,
+ &app.BankKeeper,
+ &app.StakingKeeper,
+ &app.SlashingKeeper,
+ &app.MintKeeper,
+ &app.DistrKeeper,
+ &app.GovKeeper,
+ &app.UpgradeKeeper,
+ &app.AuthzKeeper,
+ &app.EvidenceKeeper,
+ &app.FeeGrantKeeper,
+ &app.GroupKeeper,
+ &app.NFTKeeper,
+ &app.ConsensusParamsKeeper,
+ &app.CircuitKeeper,
+ &app.EpochsKeeper,
+ &app.ProtocolPoolKeeper,
+ ); err != nil {
+ panic(err)
+}
+
+ // Below we could construct and set an application specific mempool and
+ // ABCI 1.0 PrepareProposal and ProcessProposal handlers. These defaults are
+ // already set in the SDK's BaseApp, this shows an example of how to override
+ // them.
+ //
+ // Example:
+ //
+ // app.App = appBuilder.Build(...)
+ // nonceMempool := mempool.NewSenderNonceMempool()
+ // abciPropHandler := NewDefaultProposalHandler(nonceMempool, app.App.BaseApp)
+ //
+ // app.App.BaseApp.SetMempool(nonceMempool)
+ // app.App.BaseApp.SetPrepareProposal(abciPropHandler.PrepareProposalHandler())
+ // app.App.BaseApp.SetProcessProposal(abciPropHandler.ProcessProposalHandler())
+ //
+ // Alternatively, you can construct BaseApp options, append those to
+ // baseAppOptions and pass them to the appBuilder.
+ //
+ // Example:
+ //
+ // prepareOpt = func(app *baseapp.BaseApp) {
+ // abciPropHandler := baseapp.NewDefaultProposalHandler(nonceMempool, app)
+ // app.SetPrepareProposal(abciPropHandler.PrepareProposalHandler())
+ //
+}
+ // baseAppOptions = append(baseAppOptions, prepareOpt)
+
+ // create and set dummy vote extension handler
+ voteExtOp := func(bApp *baseapp.BaseApp) {
+ voteExtHandler := NewVoteExtensionHandler()
+
+voteExtHandler.SetHandlers(bApp)
+}
+
+baseAppOptions = append(baseAppOptions, voteExtOp, baseapp.SetOptimisticExecution())
+
+app.App = appBuilder.Build(db, traceStore, baseAppOptions...)
+
+ // register streaming services
+ if err := app.RegisterStreamingServices(appOpts, app.kvStoreKeys()); err != nil {
+ panic(err)
+}
+
+ /**** Module Options ****/
+
+ // RegisterUpgradeHandlers is used for registering any on-chain upgrades.
+ app.RegisterUpgradeHandlers()
+
+ // add test gRPC service for testing gRPC queries in isolation
+ testdata_pulsar.RegisterQueryServer(app.GRPCQueryRouter(), testdata_pulsar.QueryImpl{
+})
+
+ // create the simulation manager and define the order of the modules for deterministic simulations
+ //
+ // NOTE: this is not required apps that don't use the simulator for fuzz testing
+ // transactions
+ overrideModules := map[string]module.AppModuleSimulation{
+ authtypes.ModuleName: auth.NewAppModule(app.appCodec, app.AccountKeeper, authsims.RandomGenesisAccounts, nil),
+}
+
+app.sm = module.NewSimulationManagerFromAppModules(app.ModuleManager.Modules, overrideModules)
+
+app.sm.RegisterStoreDecoders()
+
+ // A custom InitChainer can be set if extra pre-init-genesis logic is required.
+ // By default, when using app wiring enabled module, this is not required.
+ // For instance, the upgrade module will set automatically the module version map in its init genesis thanks to app wiring.
+ // However, when registering a module manually (i.e. that does not support app wiring), the module version map
+ // must be set manually as follow. The upgrade module will de-duplicate the module version map.
+ //
+ // app.SetInitChainer(func(ctx sdk.Context, req *abci.RequestInitChain) (*abci.ResponseInitChain, error) {
+ // app.UpgradeKeeper.SetModuleVersionMap(ctx, app.ModuleManager.GetVersionMap())
+ // return app.App.InitChainer(ctx, req)
+ //
+})
+
+ // set custom ante handler
+ app.setAnteHandler(app.txConfig)
+ if err := app.Load(loadLatest); err != nil {
+ panic(err)
+}
+
+return app
+}
+
+// setAnteHandler sets custom ante handlers.
+// "x/auth/tx" pre-defined ante handler have been disabled in app_config.
+func (app *SimApp)
+
+setAnteHandler(txConfig client.TxConfig) {
+ anteHandler, err := NewAnteHandler(
+ HandlerOptions{
+ ante.HandlerOptions{
+ UnorderedNonceManager: app.AccountKeeper,
+ AccountKeeper: app.AccountKeeper,
+ BankKeeper: app.BankKeeper,
+ SignModeHandler: txConfig.SignModeHandler(),
+ FeegrantKeeper: app.FeeGrantKeeper,
+ SigGasConsumer: ante.DefaultSigVerificationGasConsumer,
+},
+ &app.CircuitKeeper,
+},
+ )
+ if err != nil {
+ panic(err)
+}
+
+ // Set the AnteHandler for the app
+ app.SetAnteHandler(anteHandler)
+}
+
+// LegacyAmino returns SimApp's amino codec.
+//
+// NOTE: This is solely to be used for testing purposes as it may be desirable
+// for modules to register their own custom testing types.
+func (app *SimApp)
+
+LegacyAmino() *codec.LegacyAmino {
+ return app.legacyAmino
+}
+
+// AppCodec returns SimApp's app codec.
+//
+// NOTE: This is solely to be used for testing purposes as it may be desirable
+// for modules to register their own custom testing types.
+func (app *SimApp)
+
+AppCodec()
+
+codec.Codec {
+ return app.appCodec
+}
+
+// InterfaceRegistry returns SimApp's InterfaceRegistry.
+func (app *SimApp)
+
+InterfaceRegistry()
+
+codectypes.InterfaceRegistry {
+ return app.interfaceRegistry
+}
+
+// TxConfig returns SimApp's TxConfig
+func (app *SimApp)
+
+TxConfig()
+
+client.TxConfig {
+ return app.txConfig
+}
+
+// GetKey returns the KVStoreKey for the provided store key.
+//
+// NOTE: This is solely to be used for testing purposes.
+func (app *SimApp)
+
+GetKey(storeKey string) *storetypes.KVStoreKey {
+ sk := app.UnsafeFindStoreKey(storeKey)
+
+kvStoreKey, ok := sk.(*storetypes.KVStoreKey)
+ if !ok {
+ return nil
+}
+
+return kvStoreKey
+}
+
+func (app *SimApp)
+
+kvStoreKeys()
+
+map[string]*storetypes.KVStoreKey {
+ keys := make(map[string]*storetypes.KVStoreKey)
+ for _, k := range app.GetStoreKeys() {
+ if kv, ok := k.(*storetypes.KVStoreKey); ok {
+ keys[kv.Name()] = kv
+}
+
+}
+
+return keys
+}
+
+// SimulationManager implements the SimulationApp interface
+func (app *SimApp)
+
+SimulationManager() *module.SimulationManager {
+ return app.sm
+}
+
+// RegisterAPIRoutes registers all application module routes with the provided
+// API server.
+func (app *SimApp)
+
+RegisterAPIRoutes(apiSvr *api.Server, apiConfig config.APIConfig) {
+ app.App.RegisterAPIRoutes(apiSvr, apiConfig)
+ // register swagger API in app.go so that other applications can override easily
+ if err := server.RegisterSwaggerAPI(apiSvr.ClientCtx, apiSvr.Router, apiConfig.Swagger); err != nil {
+ panic(err)
+}
+}
+
+// GetMaccPerms returns a copy of the module account permissions
+//
+// NOTE: This is solely to be used for testing purposes.
+func GetMaccPerms()
+
+map[string][]string {
+ dup := make(map[string][]string)
+ for _, perms := range moduleAccPerms {
+ dup[perms.Account] = perms.Permissions
+}
+
+return dup
+}
+
+// BlockedAddresses returns all the app's blocked account addresses.
+func BlockedAddresses()
+
+map[string]bool {
+ result := make(map[string]bool)
+ if len(blockAccAddrs) > 0 {
+ for _, addr := range blockAccAddrs {
+ result[addr] = true
+}
+
+}
+
+else {
+ for addr := range GetMaccPerms() {
+ result[addr] = true
+}
+
+}
+
+return result
+}
+```
diff --git a/sdk/next/build/building-apps/app-go.mdx b/sdk/next/build/building-apps/app-go.mdx
new file mode 100644
index 000000000..811fa2ffe
--- /dev/null
+++ b/sdk/next/build/building-apps/app-go.mdx
@@ -0,0 +1,1203 @@
+---
+title: Overview of app.go
+description: Understanding the structure and purpose of the app.go file in Cosmos SDK applications
+---
+
+## What is app.go?
+
+The `app.go` file is the core of your Cosmos SDK application. It defines your blockchain application by composing together modules, keepers, and handlers that implement your chain's functionality. This file is where you wire together all the components that make your blockchain work.
+
+The `SimApp` implementation serves as a reference example and testing application for the Cosmos SDK, demonstrating how to properly structure and initialize a blockchain application.
+
+## Core Components
+
+### SimApp Struct
+
+The `SimApp` struct is the main application type that extends the ABCI application. It contains all the necessary components:
+
+```go
+type SimApp struct {
+ *baseapp.BaseApp // Core ABCI application
+ legacyAmino *codec.LegacyAmino
+ appCodec codec.Codec
+ txConfig client.TxConfig
+ interfaceRegistry types.InterfaceRegistry
+
+ // Store keys for accessing state
+ keys map[string]*storetypes.KVStoreKey
+ tkeys map[string]*storetypes.TransientStoreKey
+
+ // Module keepers (business logic)
+ AccountKeeper authkeeper.AccountKeeper
+ BankKeeper bankkeeper.Keeper
+ StakingKeeper *stakingkeeper.Keeper
+ // ... other keepers
+
+ // Module manager
+ ModuleManager *module.Manager
+ BasicModuleManager module.BasicManager
+}
+```
+
+### Key Initialization Steps
+
+The `NewSimApp` constructor follows a specific initialization order:
+
+#### 1. Codec Setup
+
+```go
+interfaceRegistry, _ := types.NewInterfaceRegistryWithOptions(...)
+appCodec := codec.NewProtoCodec(interfaceRegistry)
+legacyAmino := codec.NewLegacyAmino()
+txConfig := tx.NewTxConfig(appCodec, tx.DefaultSignModes)
+```
+
+Codecs handle serialization and deserialization of messages and state. The interface registry enables protobuf Any type support.
+
+#### 2. BaseApp Creation
+
+```go
+bApp := baseapp.NewBaseApp(appName, logger, db, txConfig.TxDecoder(), baseAppOptions...)
+```
+
+BaseApp provides the core ABCI interface implementation and transaction processing pipeline.
+
+#### 3. Store Keys Registration
+
+```go
+keys := storetypes.NewKVStoreKeys(
+ authtypes.StoreKey, banktypes.StoreKey, stakingtypes.StoreKey,
+ // ... other module store keys
+)
+```
+
+Each module gets its own isolated key-value store for persisting state.
+
+#### 4. Keeper Initialization
+
+Keepers are initialized in dependency order. For example, the BankKeeper depends on AccountKeeper:
+
+```go
+app.AccountKeeper = authkeeper.NewAccountKeeper(...)
+app.BankKeeper = bankkeeper.NewBaseKeeper(
+ appCodec,
+ runtime.NewKVStoreService(keys[banktypes.StoreKey]),
+ app.AccountKeeper, // dependency
+ BlockedAddresses(),
+ authtypes.NewModuleAddress(govtypes.ModuleName).String(),
+ logger,
+)
+```
+
+#### 5. Module Manager Setup
+
+```go
+app.ModuleManager = module.NewManager(
+ genutil.NewAppModule(...),
+ auth.NewAppModule(...),
+ bank.NewAppModule(...),
+ // ... other modules
+)
+```
+
+The Module Manager orchestrates module lifecycle operations.
+
+### Module Account Permissions
+
+Module accounts are special accounts owned by modules rather than users:
+
+```go
+maccPerms = map[string][]string{
+ authtypes.FeeCollectorName: nil, // can receive fees
+ minttypes.ModuleName: {authtypes.Minter}, // can mint tokens
+ stakingtypes.BondedPoolName: {authtypes.Burner, authtypes.Staking},
+ govtypes.ModuleName: {authtypes.Burner},
+}
+```
+
+Permissions control what operations each module account can perform (minting, burning, staking).
+
+### Execution Order
+
+The Module Manager controls the order of operations during blockchain lifecycle events:
+
+#### Begin Block Order
+
+```go
+app.ModuleManager.SetOrderBeginBlockers(
+ upgradetypes.ModuleName, // Check for upgrades first
+ minttypes.ModuleName, // Mint new tokens
+ distrtypes.ModuleName, // Distribute rewards
+ slashingtypes.ModuleName, // Process slashing
+ // ...
+)
+```
+
+This order ensures operations happen in the correct sequence (e.g., slashing happens after reward distribution).
+
+#### End Block Order
+
+```go
+app.ModuleManager.SetOrderEndBlockers(
+ crisistypes.ModuleName, // Check invariants
+ govtypes.ModuleName, // Process governance
+ stakingtypes.ModuleName, // Update validator set
+ // ...
+)
+```
+
+#### Genesis Order
+
+```go
+genesisModuleOrder := []string{
+ authtypes.ModuleName, // Accounts must exist first
+ banktypes.ModuleName, // Then balances
+ stakingtypes.ModuleName, // Then staking
+ // ...
+}
+```
+
+Genesis initialization must happen in dependency order.
+
+### ABCI Lifecycle Methods
+
+The application implements key ABCI methods that CometBFT calls:
+
+#### InitChainer
+
+```go
+func (app *SimApp) InitChainer(ctx sdk.Context, req *abci.RequestInitChain) (*abci.ResponseInitChain, error) {
+ var genesisState GenesisState
+ json.Unmarshal(req.AppStateBytes, &genesisState)
+ return app.ModuleManager.InitGenesis(ctx, app.appCodec, genesisState)
+}
+```
+
+Called once when the chain starts to initialize state from genesis.
+
+#### BeginBlocker & EndBlocker
+
+```go
+func (app *SimApp) BeginBlocker(ctx sdk.Context) (sdk.BeginBlock, error) {
+ return app.ModuleManager.BeginBlock(ctx)
+}
+
+func (app *SimApp) EndBlocker(ctx sdk.Context) (sdk.EndBlock, error) {
+ return app.ModuleManager.EndBlock(ctx)
+}
+```
+
+Called at the beginning and end of each block to perform module-specific logic.
+
+### Ante and Post Handlers
+
+#### AnteHandler
+
+The AnteHandler processes transactions before message execution:
+
+```go
+func (app *SimApp) setAnteHandler(txConfig client.TxConfig) {
+ anteHandler, err := NewAnteHandler(
+ HandlerOptions{
+ ante.HandlerOptions{
+ AccountKeeper: app.AccountKeeper,
+ BankKeeper: app.BankKeeper,
+ SignModeHandler: txConfig.SignModeHandler(),
+ FeegrantKeeper: app.FeeGrantKeeper,
+ SigGasConsumer: ante.DefaultSigVerificationGasConsumer,
+ },
+ &app.CircuitKeeper,
+ },
+ )
+ app.SetAnteHandler(anteHandler)
+}
+```
+
+AnteHandlers typically:
+- Verify signatures
+- Deduct fees
+- Check sequence numbers
+- Validate transaction format
+
+#### PostHandler
+
+PostHandlers run after message execution:
+
+```go
+func (app *SimApp) setPostHandler() {
+ postHandler, err := posthandler.NewPostHandler(
+ posthandler.HandlerOptions{},
+ )
+ app.SetPostHandler(postHandler)
+}
+```
+
+PostHandlers can implement features like transaction tips.
+
+### API and Service Registration
+
+The application exposes gRPC and REST APIs:
+
+```go
+func (app *SimApp) RegisterAPIRoutes(apiSvr *api.Server, apiConfig config.APIConfig) {
+ // Register gRPC gateway routes
+ authtx.RegisterGRPCGatewayRoutes(clientCtx, apiSvr.GRPCGatewayRouter)
+ cmtservice.RegisterGRPCGatewayRoutes(clientCtx, apiSvr.GRPCGatewayRouter)
+
+ // Register module routes
+ app.BasicModuleManager.RegisterGRPCGatewayRoutes(clientCtx, apiSvr.GRPCGatewayRouter)
+}
+```
+
+These routes allow clients to query state and submit transactions via HTTP.
+
+## Building Your Own Application
+
+When creating your own blockchain application, follow the patterns demonstrated in SimApp:
+
+1. **Set up codecs and registries** for serialization and type handling
+2. **Initialize BaseApp** with your application name and configuration
+3. **Register store keys** for each module that needs persistent storage
+4. **Initialize module keepers** in dependency order (e.g., AccountKeeper before BankKeeper)
+5. **Configure the Module Manager** with your selected modules
+6. **Set execution orders** for BeginBlock, EndBlock, and Genesis operations
+7. **Configure module account permissions** based on your requirements
+8. **Set up ante/post handlers** for transaction processing
+9. **Register services and APIs** for client interaction
+
+The key is understanding how components interact and ensuring proper initialization order to avoid dependency issues.
+
+
+## Additional Resources
+
+For hands-on tutorials on running a node, see the [Cosmos SDK Node Tutorial](/sdk/v0.53/tutorials).
+
+## Complete `app.go`
+
+```go expandable
+//go:build app_v1
+
+package simapp
+
+import (
+
+ "encoding/json"
+ "fmt"
+ "io"
+ "os"
+ "path/filepath"
+ "cosmossdk.io/log"
+ "cosmossdk.io/x/tx/signing"
+
+ autocliv1 "cosmossdk.io/api/cosmos/autocli/v1"
+ reflectionv1 "cosmossdk.io/api/cosmos/reflection/v1"
+ "cosmossdk.io/client/v2/autocli"
+ "cosmossdk.io/core/appmodule"
+ "github.com/cosmos/cosmos-sdk/codec/address"
+
+ authcodec "github.com/cosmos/cosmos-sdk/x/auth/codec"
+ "github.com/cosmos/cosmos-sdk/x/auth/tx"
+
+ abci "github.com/cometbft/cometbft/abci/types"
+ dbm "github.com/cosmos/cosmos-db"
+ "github.com/cosmos/gogoproto/proto"
+ "github.com/spf13/cast"
+
+ storetypes "cosmossdk.io/store/types"
+ "cosmossdk.io/x/evidence"
+ evidencekeeper "cosmossdk.io/x/evidence/keeper"
+ evidencetypes "cosmossdk.io/x/evidence/types"
+ "cosmossdk.io/x/feegrant"
+ feegrantkeeper "cosmossdk.io/x/feegrant/keeper"
+ feegrantmodule "cosmossdk.io/x/feegrant/module"
+ "cosmossdk.io/x/nft"
+ nftkeeper "cosmossdk.io/x/nft/keeper"
+ nftmodule "cosmossdk.io/x/nft/module"
+ "cosmossdk.io/x/upgrade"
+ upgradekeeper "cosmossdk.io/x/upgrade/keeper"
+ upgradetypes "cosmossdk.io/x/upgrade/types"
+ "cosmossdk.io/x/circuit"
+ circuitkeeper "cosmossdk.io/x/circuit/keeper"
+ circuittypes "cosmossdk.io/x/circuit/types"
+ "github.com/cosmos/cosmos-sdk/baseapp"
+ "github.com/cosmos/cosmos-sdk/client"
+ "github.com/cosmos/cosmos-sdk/client/flags"
+ "github.com/cosmos/cosmos-sdk/client/grpc/cmtservice"
+ nodeservice "github.com/cosmos/cosmos-sdk/client/grpc/node"
+ "github.com/cosmos/cosmos-sdk/codec"
+ "github.com/cosmos/cosmos-sdk/codec/types"
+ "github.com/cosmos/cosmos-sdk/runtime"
+ runtimeservices "github.com/cosmos/cosmos-sdk/runtime/services"
+ "github.com/cosmos/cosmos-sdk/server"
+ "github.com/cosmos/cosmos-sdk/server/api"
+ "github.com/cosmos/cosmos-sdk/server/config"
+ servertypes "github.com/cosmos/cosmos-sdk/server/types"
+ "github.com/cosmos/cosmos-sdk/std"
+ testdata_pulsar "github.com/cosmos/cosmos-sdk/testutil/testdata/testpb"
+ sdk "github.com/cosmos/cosmos-sdk/types"
+ "github.com/cosmos/cosmos-sdk/types/module"
+ "github.com/cosmos/cosmos-sdk/types/msgservice"
+ "github.com/cosmos/cosmos-sdk/version"
+ "github.com/cosmos/cosmos-sdk/x/auth"
+ "github.com/cosmos/cosmos-sdk/x/auth/ante"
+ authkeeper "github.com/cosmos/cosmos-sdk/x/auth/keeper"
+ "github.com/cosmos/cosmos-sdk/x/auth/posthandler"
+ authsims "github.com/cosmos/cosmos-sdk/x/auth/simulation"
+ authtx "github.com/cosmos/cosmos-sdk/x/auth/tx"
+ authtypes "github.com/cosmos/cosmos-sdk/x/auth/types"
+ "github.com/cosmos/cosmos-sdk/x/auth/vesting"
+ vestingtypes "github.com/cosmos/cosmos-sdk/x/auth/vesting/types"
+ "github.com/cosmos/cosmos-sdk/x/authz"
+ authzkeeper "github.com/cosmos/cosmos-sdk/x/authz/keeper"
+ authzmodule "github.com/cosmos/cosmos-sdk/x/authz/module"
+ "github.com/cosmos/cosmos-sdk/x/bank"
+ bankkeeper "github.com/cosmos/cosmos-sdk/x/bank/keeper"
+ banktypes "github.com/cosmos/cosmos-sdk/x/bank/types"
+ consensus "github.com/cosmos/cosmos-sdk/x/consensus"
+ consensusparamkeeper "github.com/cosmos/cosmos-sdk/x/consensus/keeper"
+ consensusparamtypes "github.com/cosmos/cosmos-sdk/x/consensus/types"
+ "github.com/cosmos/cosmos-sdk/x/crisis"
+ crisiskeeper "github.com/cosmos/cosmos-sdk/x/crisis/keeper"
+ crisistypes "github.com/cosmos/cosmos-sdk/x/crisis/types"
+ distr "github.com/cosmos/cosmos-sdk/x/distribution"
+ distrkeeper "github.com/cosmos/cosmos-sdk/x/distribution/keeper"
+ distrtypes "github.com/cosmos/cosmos-sdk/x/distribution/types"
+ "github.com/cosmos/cosmos-sdk/x/genutil"
+ genutiltypes "github.com/cosmos/cosmos-sdk/x/genutil/types"
+ "github.com/cosmos/cosmos-sdk/x/gov"
+ govclient "github.com/cosmos/cosmos-sdk/x/gov/client"
+ govkeeper "github.com/cosmos/cosmos-sdk/x/gov/keeper"
+ govtypes "github.com/cosmos/cosmos-sdk/x/gov/types"
+ govv1beta1 "github.com/cosmos/cosmos-sdk/x/gov/types/v1beta1"
+ "github.com/cosmos/cosmos-sdk/x/group"
+ groupkeeper "github.com/cosmos/cosmos-sdk/x/group/keeper"
+ groupmodule "github.com/cosmos/cosmos-sdk/x/group/module"
+ "github.com/cosmos/cosmos-sdk/x/mint"
+ mintkeeper "github.com/cosmos/cosmos-sdk/x/mint/keeper"
+ minttypes "github.com/cosmos/cosmos-sdk/x/mint/types"
+ "github.com/cosmos/cosmos-sdk/x/params"
+ paramsclient "github.com/cosmos/cosmos-sdk/x/params/client"
+ paramskeeper "github.com/cosmos/cosmos-sdk/x/params/keeper"
+ paramstypes "github.com/cosmos/cosmos-sdk/x/params/types"
+ paramproposal "github.com/cosmos/cosmos-sdk/x/params/types/proposal"
+ "github.com/cosmos/cosmos-sdk/x/slashing"
+ slashingkeeper "github.com/cosmos/cosmos-sdk/x/slashing/keeper"
+ slashingtypes "github.com/cosmos/cosmos-sdk/x/slashing/types"
+ "github.com/cosmos/cosmos-sdk/x/staking"
+ stakingkeeper "github.com/cosmos/cosmos-sdk/x/staking/keeper"
+ stakingtypes "github.com/cosmos/cosmos-sdk/x/staking/types"
+)
+
+const appName = "SimApp"
+
+var (
+ // DefaultNodeHome default home directories for the application daemon
+ DefaultNodeHome string
+
+ // module account permissions
+ maccPerms = map[string][]string{
+ authtypes.FeeCollectorName: nil,
+ distrtypes.ModuleName: nil,
+ minttypes.ModuleName: {
+ authtypes.Minter
+},
+ stakingtypes.BondedPoolName: {
+ authtypes.Burner, authtypes.Staking
+},
+ stakingtypes.NotBondedPoolName: {
+ authtypes.Burner, authtypes.Staking
+},
+ govtypes.ModuleName: {
+ authtypes.Burner
+},
+ nft.ModuleName: nil,
+}
+)
+
+var (
+ _ runtime.AppI = (*SimApp)(nil)
+ _ servertypes.Application = (*SimApp)(nil)
+)
+
+// stdAccAddressCodec is a temporary address codec that we will use until we
+// can populate it with the correct bech32 prefixes without depending on the global.
+type stdAccAddressCodec struct{
+}
+
+func (g stdAccAddressCodec)
+
+StringToBytes(text string) ([]byte, error) {
+ if text == "" {
+ return nil, nil
+}
+
+return sdk.AccAddressFromBech32(text)
+}
+
+func (g stdAccAddressCodec)
+
+BytesToString(bz []byte) (string, error) {
+ if bz == nil {
+ return "", nil
+}
+
+return sdk.AccAddress(bz).String(), nil
+}
+
+// stdValAddressCodec is a temporary address codec that we will use until we
+// can populate it with the correct bech32 prefixes without depending on the global.
+type stdValAddressCodec struct{
+}
+
+func (g stdValAddressCodec)
+
+StringToBytes(text string) ([]byte, error) {
+ return sdk.ValAddressFromBech32(text)
+}
+
+func (g stdValAddressCodec)
+
+BytesToString(bz []byte) (string, error) {
+ return sdk.ValAddress(bz).String(), nil
+}
+
+// SimApp extends an ABCI application, but with most of its parameters exported.
+// They are exported for convenience in creating helper functions, as object
+// capabilities aren't needed for testing.
+type SimApp struct {
+ *baseapp.BaseApp
+ legacyAmino *codec.LegacyAmino
+ appCodec codec.Codec
+ txConfig client.TxConfig
+ interfaceRegistry types.InterfaceRegistry
+
+ // keys to access the substores
+ keys map[string]*storetypes.KVStoreKey
+ tkeys map[string]*storetypes.TransientStoreKey
+
+ // keepers
+ AccountKeeper authkeeper.AccountKeeper
+ BankKeeper bankkeeper.Keeper
+ StakingKeeper *stakingkeeper.Keeper
+ SlashingKeeper slashingkeeper.Keeper
+ MintKeeper mintkeeper.Keeper
+ DistrKeeper distrkeeper.Keeper
+ GovKeeper govkeeper.Keeper
+ CrisisKeeper *crisiskeeper.Keeper
+ UpgradeKeeper *upgradekeeper.Keeper
+ ParamsKeeper paramskeeper.Keeper
+ AuthzKeeper authzkeeper.Keeper
+ EvidenceKeeper evidencekeeper.Keeper
+ FeeGrantKeeper feegrantkeeper.Keeper
+ GroupKeeper groupkeeper.Keeper
+ NFTKeeper nftkeeper.Keeper
+ ConsensusParamsKeeper consensusparamkeeper.Keeper
+ CircuitKeeper circuitkeeper.Keeper
+
+ // the module manager
+ ModuleManager *module.Manager
+ BasicModuleManager module.BasicManager
+
+ // simulation manager
+ sm *module.SimulationManager
+
+ // module configurator
+ configurator module.Configurator
+}
+
+func init() {
+ userHomeDir, err := os.UserHomeDir()
+ if err != nil {
+ panic(err)
+}
+
+DefaultNodeHome = filepath.Join(userHomeDir, ".simapp")
+}
+
+// NewSimApp returns a reference to an initialized SimApp.
+func NewSimApp(
+ logger log.Logger,
+ db dbm.DB,
+ traceStore io.Writer,
+ loadLatest bool,
+ appOpts servertypes.AppOptions,
+ baseAppOptions ...func(*baseapp.BaseApp),
+) *SimApp {
+ interfaceRegistry, _ := types.NewInterfaceRegistryWithOptions(types.InterfaceRegistryOptions{
+ ProtoFiles: proto.HybridResolver,
+ SigningOptions: signing.Options{
+ AddressCodec: address.Bech32Codec{
+ Bech32Prefix: sdk.GetConfig().GetBech32AccountAddrPrefix(),
+},
+ ValidatorAddressCodec: address.Bech32Codec{
+ Bech32Prefix: sdk.GetConfig().GetBech32ValidatorAddrPrefix(),
+},
+},
+})
+ appCodec := codec.NewProtoCodec(interfaceRegistry)
+ legacyAmino := codec.NewLegacyAmino()
+ txConfig := tx.NewTxConfig(appCodec, tx.DefaultSignModes)
+
+std.RegisterLegacyAminoCodec(legacyAmino)
+
+std.RegisterInterfaces(interfaceRegistry)
+
+ // Below we could construct and set an application specific mempool and
+ // ABCI 1.0 PrepareProposal and ProcessProposal handlers. These defaults are
+ // already set in the SDK's BaseApp, this shows an example of how to override
+ // them.
+ //
+ // Example:
+ //
+ // bApp := baseapp.NewBaseApp(...)
+ // nonceMempool := mempool.NewSenderNonceMempool()
+ // abciPropHandler := NewDefaultProposalHandler(nonceMempool, bApp)
+ //
+ // bApp.SetMempool(nonceMempool)
+ // bApp.SetPrepareProposal(abciPropHandler.PrepareProposalHandler())
+ // bApp.SetProcessProposal(abciPropHandler.ProcessProposalHandler())
+ //
+ // Alternatively, you can construct BaseApp options, append those to
+ // baseAppOptions and pass them to NewBaseApp.
+ //
+ // Example:
+ //
+ // prepareOpt = func(app *baseapp.BaseApp) {
+ // abciPropHandler := baseapp.NewDefaultProposalHandler(nonceMempool, app)
+ // app.SetPrepareProposal(abciPropHandler.PrepareProposalHandler())
+ //
+}
+ // baseAppOptions = append(baseAppOptions, prepareOpt)
+ bApp := baseapp.NewBaseApp(appName, logger, db, txConfig.TxDecoder(), baseAppOptions...)
+
+bApp.SetCommitMultiStoreTracer(traceStore)
+
+bApp.SetVersion(version.Version)
+
+bApp.SetInterfaceRegistry(interfaceRegistry)
+
+bApp.SetTxEncoder(txConfig.TxEncoder())
+ keys := storetypes.NewKVStoreKeys(
+ authtypes.StoreKey, banktypes.StoreKey, stakingtypes.StoreKey, crisistypes.StoreKey,
+ minttypes.StoreKey, distrtypes.StoreKey, slashingtypes.StoreKey,
+ govtypes.StoreKey, paramstypes.StoreKey, consensusparamtypes.StoreKey, upgradetypes.StoreKey, feegrant.StoreKey,
+ evidencetypes.StoreKey, circuittypes.StoreKey,
+ authzkeeper.StoreKey, nftkeeper.StoreKey, group.StoreKey,
+ )
+
+ // register streaming services
+ if err := bApp.RegisterStreamingServices(appOpts, keys); err != nil {
+ panic(err)
+}
+ tkeys := storetypes.NewTransientStoreKeys(paramstypes.TStoreKey)
+ app := &SimApp{
+ BaseApp: bApp,
+ legacyAmino: legacyAmino,
+ appCodec: appCodec,
+ txConfig: txConfig,
+ interfaceRegistry: interfaceRegistry,
+ keys: keys,
+ tkeys: tkeys,
+}
+
+app.ParamsKeeper = initParamsKeeper(appCodec, legacyAmino, keys[paramstypes.StoreKey], tkeys[paramstypes.TStoreKey])
+
+ // set the BaseApp's parameter store
+ app.ConsensusParamsKeeper = consensusparamkeeper.NewKeeper(appCodec, runtime.NewKVStoreService(keys[consensusparamtypes.StoreKey]), authtypes.NewModuleAddress(govtypes.ModuleName).String(), runtime.EventService{
+})
+
+bApp.SetParamStore(app.ConsensusParamsKeeper.ParamsStore)
+
+ // add keepers
+ app.AccountKeeper = authkeeper.NewAccountKeeper(appCodec, runtime.NewKVStoreService(keys[authtypes.StoreKey]), authtypes.ProtoBaseAccount, maccPerms, sdk.Bech32MainPrefix, authtypes.NewModuleAddress(govtypes.ModuleName).String())
+
+app.BankKeeper = bankkeeper.NewBaseKeeper(
+ appCodec,
+ runtime.NewKVStoreService(keys[banktypes.StoreKey]),
+ app.AccountKeeper,
+ BlockedAddresses(),
+ authtypes.NewModuleAddress(govtypes.ModuleName).String(),
+ logger,
+ )
+
+app.StakingKeeper = stakingkeeper.NewKeeper(
+ appCodec, keys[stakingtypes.StoreKey], app.AccountKeeper, app.BankKeeper, authtypes.NewModuleAddress(govtypes.ModuleName).String(),
+ )
+
+app.MintKeeper = mintkeeper.NewKeeper(appCodec, runtime.NewKVStoreService(keys[minttypes.StoreKey]), app.StakingKeeper, app.AccountKeeper, app.BankKeeper, authtypes.FeeCollectorName, authtypes.NewModuleAddress(govtypes.ModuleName).String())
+
+app.DistrKeeper = distrkeeper.NewKeeper(appCodec, runtime.NewKVStoreService(keys[distrtypes.StoreKey]), app.AccountKeeper, app.BankKeeper, app.StakingKeeper, authtypes.FeeCollectorName, authtypes.NewModuleAddress(govtypes.ModuleName).String())
+
+app.SlashingKeeper = slashingkeeper.NewKeeper(
+ appCodec, legacyAmino, runtime.NewKVStoreService(keys[slashingtypes.StoreKey]), app.StakingKeeper, authtypes.NewModuleAddress(govtypes.ModuleName).String(),
+ )
+ invCheckPeriod := cast.ToUint(appOpts.Get(server.FlagInvCheckPeriod))
+
+app.CrisisKeeper = crisiskeeper.NewKeeper(appCodec, runtime.NewKVStoreService(keys[crisistypes.StoreKey]), invCheckPeriod,
+ app.BankKeeper, authtypes.FeeCollectorName, authtypes.NewModuleAddress(govtypes.ModuleName).String(), app.AccountKeeper.AddressCodec())
+
+app.FeeGrantKeeper = feegrantkeeper.NewKeeper(appCodec, runtime.NewKVStoreService(keys[feegrant.StoreKey]), app.AccountKeeper)
+
+ // register the staking hooks
+ // NOTE: stakingKeeper above is passed by reference, so that it will contain these hooks
+ app.StakingKeeper.SetHooks(
+ stakingtypes.NewMultiStakingHooks(app.DistrKeeper.Hooks(), app.SlashingKeeper.Hooks()),
+ )
+
+app.CircuitKeeper = circuitkeeper.NewKeeper(appCodec, runtime.NewKVStoreService(keys[circuittypes.StoreKey]), authtypes.NewModuleAddress(govtypes.ModuleName).String(), app.AccountKeeper.AddressCodec())
+
+app.BaseApp.SetCircuitBreaker(&app.CircuitKeeper)
+
+app.AuthzKeeper = authzkeeper.NewKeeper(runtime.NewKVStoreService(keys[authzkeeper.StoreKey]), appCodec, app.MsgServiceRouter(), app.AccountKeeper)
+ groupConfig := group.DefaultConfig()
+ /*
+ Example of setting group params:
+ groupConfig.MaxMetadataLen = 1000
+ */
+ app.GroupKeeper = groupkeeper.NewKeeper(keys[group.StoreKey], appCodec, app.MsgServiceRouter(), app.AccountKeeper, groupConfig)
+
+ // get skipUpgradeHeights from the app options
+ skipUpgradeHeights := map[int64]bool{
+}
+ for _, h := range cast.ToIntSlice(appOpts.Get(server.FlagUnsafeSkipUpgrades)) {
+ skipUpgradeHeights[int64(h)] = true
+}
+ homePath := cast.ToString(appOpts.Get(flags.FlagHome))
+ // set the governance module account as the authority for conducting upgrades
+ app.UpgradeKeeper = upgradekeeper.NewKeeper(skipUpgradeHeights, runtime.NewKVStoreService(keys[upgradetypes.StoreKey]), appCodec, homePath, app.BaseApp, authtypes.NewModuleAddress(govtypes.ModuleName).String())
+
+ // Register the proposal types
+ // Deprecated: Avoid adding new handlers, instead use the new proposal flow
+ // by granting the governance module the right to execute the message.
+ // See: /sdk/v0.53/build/modules/gov#proposal-messages
+ govRouter := govv1beta1.NewRouter()
+
+govRouter.AddRoute(govtypes.RouterKey, govv1beta1.ProposalHandler).
+ AddRoute(paramproposal.RouterKey, params.NewParamChangeProposalHandler(app.ParamsKeeper)).
+ AddRoute(upgradetypes.RouterKey, upgrade.NewSoftwareUpgradeProposalHandler(app.UpgradeKeeper))
+ govConfig := govtypes.DefaultConfig()
+ /*
+ Example of setting gov params:
+ govConfig.MaxMetadataLen = 10000
+ */
+ govKeeper := govkeeper.NewKeeper(
+ appCodec, runtime.NewKVStoreService(keys[govtypes.StoreKey]), app.AccountKeeper, app.BankKeeper,
+ app.StakingKeeper, app.DistrKeeper, app.MsgServiceRouter(), govConfig, authtypes.NewModuleAddress(govtypes.ModuleName).String(),
+ )
+
+ // Set legacy router for backwards compatibility with gov v1beta1
+ govKeeper.SetLegacyRouter(govRouter)
+
+app.GovKeeper = *govKeeper.SetHooks(
+ govtypes.NewMultiGovHooks(
+ // register the governance hooks
+ ),
+ )
+
+app.NFTKeeper = nftkeeper.NewKeeper(runtime.NewKVStoreService(keys[nftkeeper.StoreKey]), appCodec, app.AccountKeeper, app.BankKeeper)
+
+ // create evidence keeper with router
+ evidenceKeeper := evidencekeeper.NewKeeper(
+ appCodec, runtime.NewKVStoreService(keys[evidencetypes.StoreKey]), app.StakingKeeper, app.SlashingKeeper, app.AccountKeeper.AddressCodec(), runtime.ProvideCometInfoService(),
+ )
+ // If evidence needs to be handled for the app, set routes in router here and seal
+ app.EvidenceKeeper = *evidenceKeeper
+
+ /**** Module Options ****/
+
+ // NOTE: we may consider parsing `appOpts` inside module constructors. For the moment
+ // we prefer to be more strict in what arguments the modules expect.
+ skipGenesisInvariants := cast.ToBool(appOpts.Get(crisis.FlagSkipGenesisInvariants))
+
+ // NOTE: Any module instantiated in the module manager that is later modified
+ // must be passed by reference here.
+ app.ModuleManager = module.NewManager(
+ genutil.NewAppModule(
+ app.AccountKeeper, app.StakingKeeper, app,
+ txConfig,
+ ),
+ auth.NewAppModule(appCodec, app.AccountKeeper, authsims.RandomGenesisAccounts, app.GetSubspace(authtypes.ModuleName)),
+ vesting.NewAppModule(app.AccountKeeper, app.BankKeeper),
+ bank.NewAppModule(appCodec, app.BankKeeper, app.AccountKeeper, app.GetSubspace(banktypes.ModuleName)),
+ crisis.NewAppModule(app.CrisisKeeper, skipGenesisInvariants, app.GetSubspace(crisistypes.ModuleName)),
+ feegrantmodule.NewAppModule(appCodec, app.AccountKeeper, app.BankKeeper, app.FeeGrantKeeper, app.interfaceRegistry),
+ gov.NewAppModule(appCodec, &app.GovKeeper, app.AccountKeeper, app.BankKeeper, app.GetSubspace(govtypes.ModuleName)),
+ mint.NewAppModule(appCodec, app.MintKeeper, app.AccountKeeper, nil, app.GetSubspace(minttypes.ModuleName)),
+ slashing.NewAppModule(appCodec, app.SlashingKeeper, app.AccountKeeper, app.BankKeeper, app.StakingKeeper, app.GetSubspace(slashingtypes.ModuleName), app.interfaceRegistry),
+ distr.NewAppModule(appCodec, app.DistrKeeper, app.AccountKeeper, app.BankKeeper, app.StakingKeeper, app.GetSubspace(distrtypes.ModuleName)),
+ staking.NewAppModule(appCodec, app.StakingKeeper, app.AccountKeeper, app.BankKeeper, app.GetSubspace(stakingtypes.ModuleName)),
+ upgrade.NewAppModule(app.UpgradeKeeper, app.AccountKeeper.AddressCodec()),
+ evidence.NewAppModule(app.EvidenceKeeper),
+ params.NewAppModule(app.ParamsKeeper),
+ authzmodule.NewAppModule(appCodec, app.AuthzKeeper, app.AccountKeeper, app.BankKeeper, app.interfaceRegistry),
+ groupmodule.NewAppModule(appCodec, app.GroupKeeper, app.AccountKeeper, app.BankKeeper, app.interfaceRegistry),
+ nftmodule.NewAppModule(appCodec, app.NFTKeeper, app.AccountKeeper, app.BankKeeper, app.interfaceRegistry),
+ consensus.NewAppModule(appCodec, app.ConsensusParamsKeeper),
+ circuit.NewAppModule(appCodec, app.CircuitKeeper),
+ )
+
+ // BasicModuleManager defines the module BasicManager is in charge of setting up basic,
+ // non-dependant module elements, such as codec registration and genesis verification.
+ // By default it is composed of all the module from the module manager.
+ // Additionally, app module basics can be overwritten by passing them as argument.
+ app.BasicModuleManager = module.NewBasicManagerFromManager(
+ app.ModuleManager,
+ map[string]module.AppModuleBasic{
+ genutiltypes.ModuleName: genutil.NewAppModuleBasic(genutiltypes.DefaultMessageValidator),
+ govtypes.ModuleName: gov.NewAppModuleBasic(
+ []govclient.ProposalHandler{
+ paramsclient.ProposalHandler,
+},
+ ),
+})
+
+app.BasicModuleManager.RegisterLegacyAminoCodec(legacyAmino)
+
+app.BasicModuleManager.RegisterInterfaces(interfaceRegistry)
+
+ // During begin block slashing happens after distr.BeginBlocker so that
+ // there is nothing left over in the validator fee pool, so as to keep the
+ // CanWithdrawInvariant invariant.
+ // NOTE: staking module is required if HistoricalEntries param > 0
+ app.ModuleManager.SetOrderBeginBlockers(
+ upgradetypes.ModuleName,
+ minttypes.ModuleName,
+ distrtypes.ModuleName,
+ slashingtypes.ModuleName,
+ evidencetypes.ModuleName,
+ stakingtypes.ModuleName,
+ genutiltypes.ModuleName,
+ authz.ModuleName,
+ )
+
+app.ModuleManager.SetOrderEndBlockers(
+ crisistypes.ModuleName,
+ govtypes.ModuleName,
+ stakingtypes.ModuleName,
+ genutiltypes.ModuleName,
+ feegrant.ModuleName,
+ group.ModuleName,
+ )
+
+ // NOTE: The genutils module must occur after staking so that pools are
+ // properly initialized with tokens from genesis accounts.
+ // NOTE: The genutils module must also occur after auth so that it can access the params from auth.
+ genesisModuleOrder := []string{
+ authtypes.ModuleName, banktypes.ModuleName,
+ distrtypes.ModuleName, stakingtypes.ModuleName, slashingtypes.ModuleName, govtypes.ModuleName,
+ minttypes.ModuleName, crisistypes.ModuleName, genutiltypes.ModuleName, evidencetypes.ModuleName, authz.ModuleName,
+ feegrant.ModuleName, nft.ModuleName, group.ModuleName, paramstypes.ModuleName, upgradetypes.ModuleName,
+ vestingtypes.ModuleName, consensusparamtypes.ModuleName, circuittypes.ModuleName,
+}
+
+app.ModuleManager.SetOrderInitGenesis(genesisModuleOrder...)
+
+app.ModuleManager.SetOrderExportGenesis(genesisModuleOrder...)
+
+ // Uncomment if you want to set a custom migration order here.
+ // app.ModuleManager.SetOrderMigrations(custom order)
+
+app.ModuleManager.RegisterInvariants(app.CrisisKeeper)
+
+app.configurator = module.NewConfigurator(app.appCodec, app.MsgServiceRouter(), app.GRPCQueryRouter())
+ err := app.ModuleManager.RegisterServices(app.configurator)
+ if err != nil {
+ panic(err)
+}
+
+ // RegisterUpgradeHandlers is used for registering any on-chain upgrades.
+ // Make sure it's called after `app.ModuleManager` and `app.configurator` are set.
+ app.RegisterUpgradeHandlers()
+
+autocliv1.RegisterQueryServer(app.GRPCQueryRouter(), runtimeservices.NewAutoCLIQueryService(app.ModuleManager.Modules))
+
+reflectionSvc, err := runtimeservices.NewReflectionService()
+ if err != nil {
+ panic(err)
+}
+
+reflectionv1.RegisterReflectionServiceServer(app.GRPCQueryRouter(), reflectionSvc)
+
+ // add test gRPC service for testing gRPC queries in isolation
+ testdata_pulsar.RegisterQueryServer(app.GRPCQueryRouter(), testdata_pulsar.QueryImpl{
+})
+
+ // create the simulation manager and define the order of the modules for deterministic simulations
+ //
+ // NOTE: this is not required apps that don't use the simulator for fuzz testing
+ // transactions
+ overrideModules := map[string]module.AppModuleSimulation{
+ authtypes.ModuleName: auth.NewAppModule(app.appCodec, app.AccountKeeper, authsims.RandomGenesisAccounts, app.GetSubspace(authtypes.ModuleName)),
+}
+
+app.sm = module.NewSimulationManagerFromAppModules(app.ModuleManager.Modules, overrideModules)
+
+app.sm.RegisterStoreDecoders()
+
+ // initialize stores
+ app.MountKVStores(keys)
+
+app.MountTransientStores(tkeys)
+
+ // initialize BaseApp
+ app.SetInitChainer(app.InitChainer)
+
+app.SetBeginBlocker(app.BeginBlocker)
+
+app.SetEndBlocker(app.EndBlocker)
+
+app.setAnteHandler(txConfig)
+
+ // In v0.46, the SDK introduces _postHandlers_. PostHandlers are like
+ // antehandlers, but are run _after_ the `runMsgs` execution. They are also
+ // defined as a chain, and have the same signature as antehandlers.
+ //
+ // In baseapp, postHandlers are run in the same store branch as `runMsgs`,
+ // meaning that both `runMsgs` and `postHandler` state will be committed if
+ // both are successful, and both will be reverted if any of the two fails.
+ //
+ // The SDK exposes a default postHandlers chain, which comprises of only
+ // one decorator: the Transaction Tips decorator. However, some chains do
+ // not need it by default, so feel free to comment the next line if you do
+ // not need tips.
+ // To read more about tips:
+ // /sdk/v0.53/build/modules/auth/auth
+ //
+ // Please note that changing any of the anteHandler or postHandler chain is
+ // likely to be a state-machine breaking change, which needs a coordinated
+ // upgrade.
+ app.setPostHandler()
+
+ // At startup, after all modules have been registered, check that all prot
+ // annotations are correct.
+ protoFiles, err := proto.MergedRegistry()
+ if err != nil {
+ panic(err)
+}
+
+err = msgservice.ValidateProtoAnnotations(protoFiles)
+ if err != nil {
+ // Once we switch to using protoreflect-based antehandlers, we might
+ // want to panic here instead of logging a warning.
+ fmt.Fprintln(os.Stderr, err.Error())
+}
+ if loadLatest {
+ if err := app.LoadLatestVersion(); err != nil {
+ panic(fmt.Errorf("error loading last version: %w", err))
+}
+
+}
+
+return app
+}
+
+func (app *SimApp)
+
+setAnteHandler(txConfig client.TxConfig) {
+ anteHandler, err := NewAnteHandler(
+ HandlerOptions{
+ ante.HandlerOptions{
+ AccountKeeper: app.AccountKeeper,
+ BankKeeper: app.BankKeeper,
+ SignModeHandler: txConfig.SignModeHandler(),
+ FeegrantKeeper: app.FeeGrantKeeper,
+ SigGasConsumer: ante.DefaultSigVerificationGasConsumer,
+},
+ &app.CircuitKeeper,
+},
+ )
+ if err != nil {
+ panic(err)
+}
+
+ // Set the AnteHandler for the app
+ app.SetAnteHandler(anteHandler)
+}
+
+func (app *SimApp)
+
+setPostHandler() {
+ postHandler, err := posthandler.NewPostHandler(
+ posthandler.HandlerOptions{
+},
+ )
+ if err != nil {
+ panic(err)
+}
+
+app.SetPostHandler(postHandler)
+}
+
+// Name returns the name of the App
+func (app *SimApp)
+
+Name()
+
+string {
+ return app.BaseApp.Name()
+}
+
+// BeginBlocker application updates every begin block
+func (app *SimApp)
+
+BeginBlocker(ctx sdk.Context) (sdk.BeginBlock, error) {
+ return app.ModuleManager.BeginBlock(ctx)
+}
+
+// EndBlocker application updates every end block
+func (app *SimApp)
+
+EndBlocker(ctx sdk.Context) (sdk.EndBlock, error) {
+ return app.ModuleManager.EndBlock(ctx)
+}
+
+func (a *SimApp)
+
+Configurator()
+
+module.Configurator {
+ return a.configurator
+}
+
+// InitChainer application update at chain initialization
+func (app *SimApp)
+
+InitChainer(ctx sdk.Context, req *abci.RequestInitChain) (*abci.ResponseInitChain, error) {
+ var genesisState GenesisState
+ if err := json.Unmarshal(req.AppStateBytes, &genesisState); err != nil {
+ panic(err)
+}
+
+app.UpgradeKeeper.SetModuleVersionMap(ctx, app.ModuleManager.GetVersionMap())
+
+return app.ModuleManager.InitGenesis(ctx, app.appCodec, genesisState)
+}
+
+// LoadHeight loads a particular height
+func (app *SimApp)
+
+LoadHeight(height int64)
+
+error {
+ return app.LoadVersion(height)
+}
+
+// LegacyAmino returns SimApp's amino codec.
+//
+// NOTE: This is solely to be used for testing purposes as it may be desirable
+// for modules to register their own custom testing types.
+func (app *SimApp)
+
+LegacyAmino() *codec.LegacyAmino {
+ return app.legacyAmino
+}
+
+// AppCodec returns SimApp's app codec.
+//
+// NOTE: This is solely to be used for testing purposes as it may be desirable
+// for modules to register their own custom testing types.
+func (app *SimApp)
+
+AppCodec()
+
+codec.Codec {
+ return app.appCodec
+}
+
+// InterfaceRegistry returns SimApp's InterfaceRegistry
+func (app *SimApp)
+
+InterfaceRegistry()
+
+types.InterfaceRegistry {
+ return app.interfaceRegistry
+}
+
+// TxConfig returns SimApp's TxConfig
+func (app *SimApp)
+
+TxConfig()
+
+client.TxConfig {
+ return app.txConfig
+}
+
+// AutoCliOpts returns the autocli options for the app.
+func (app *SimApp)
+
+AutoCliOpts()
+
+autocli.AppOptions {
+ modules := make(map[string]appmodule.AppModule, 0)
+ for _, m := range app.ModuleManager.Modules {
+ if moduleWithName, ok := m.(module.HasName); ok {
+ moduleName := moduleWithName.Name()
+ if appModule, ok := moduleWithName.(appmodule.AppModule); ok {
+ modules[moduleName] = appModule
+}
+
+}
+
+}
+
+return autocli.AppOptions{
+ Modules: modules,
+ AddressCodec: authcodec.NewBech32Codec(sdk.GetConfig().GetBech32AccountAddrPrefix()),
+}
+}
+
+// DefaultGenesis returns a default genesis from the registered AppModuleBasic's.
+func (a *SimApp)
+
+DefaultGenesis()
+
+map[string]json.RawMessage {
+ return a.BasicModuleManager.DefaultGenesis(a.appCodec)
+}
+
+// GetKey returns the KVStoreKey for the provided store key.
+//
+// NOTE: This is solely to be used for testing purposes.
+func (app *SimApp)
+
+GetKey(storeKey string) *storetypes.KVStoreKey {
+ return app.keys[storeKey]
+}
+
+// GetStoreKeys returns all the stored store keys.
+func (app *SimApp)
+
+GetStoreKeys() []storetypes.StoreKey {
+ keys := make([]storetypes.StoreKey, len(app.keys))
+ for _, key := range app.keys {
+ keys = append(keys, key)
+}
+
+return keys
+}
+
+// GetSubspace returns a param subspace for a given module name.
+//
+// NOTE: This is solely to be used for testing purposes.
+func (app *SimApp)
+
+GetSubspace(moduleName string)
+
+paramstypes.Subspace {
+ subspace, _ := app.ParamsKeeper.GetSubspace(moduleName)
+
+return subspace
+}
+
+// SimulationManager implements the SimulationApp interface
+func (app *SimApp)
+
+SimulationManager() *module.SimulationManager {
+ return app.sm
+}
+
+// RegisterAPIRoutes registers all application module routes with the provided
+// API server.
+func (app *SimApp)
+
+RegisterAPIRoutes(apiSvr *api.Server, apiConfig config.APIConfig) {
+ clientCtx := apiSvr.ClientCtx
+ // Register new tx routes from grpc-gateway.
+ authtx.RegisterGRPCGatewayRoutes(clientCtx, apiSvr.GRPCGatewayRouter)
+
+ // Register new CometBFT queries routes from grpc-gateway.
+ cmtservice.RegisterGRPCGatewayRoutes(clientCtx, apiSvr.GRPCGatewayRouter)
+
+ // Register node gRPC service for grpc-gateway.
+ nodeservice.RegisterGRPCGatewayRoutes(clientCtx, apiSvr.GRPCGatewayRouter)
+
+ // Register grpc-gateway routes for all modules.
+ app.BasicModuleManager.RegisterGRPCGatewayRoutes(clientCtx, apiSvr.GRPCGatewayRouter)
+
+ // register swagger API from root so that other applications can override easily
+ if err := server.RegisterSwaggerAPI(apiSvr.ClientCtx, apiSvr.Router, apiConfig.Swagger); err != nil {
+ panic(err)
+}
+}
+
+// RegisterTxService implements the Application.RegisterTxService method.
+func (app *SimApp)
+
+RegisterTxService(clientCtx client.Context) {
+ authtx.RegisterTxService(app.BaseApp.GRPCQueryRouter(), clientCtx, app.BaseApp.Simulate, app.interfaceRegistry)
+}
+
+// RegisterTendermintService implements the Application.RegisterTendermintService method.
+func (app *SimApp)
+
+RegisterTendermintService(clientCtx client.Context) {
+ cmtApp := server.NewCometABCIWrapper(app)
+
+cmtservice.RegisterTendermintService(
+ clientCtx,
+ app.BaseApp.GRPCQueryRouter(),
+ app.interfaceRegistry,
+ cmtApp.Query,
+ )
+}
+
+func (app *SimApp)
+
+RegisterNodeService(clientCtx client.Context, cfg config.Config) {
+ nodeservice.RegisterNodeService(clientCtx, app.GRPCQueryRouter(), cfg)
+}
+
+// GetMaccPerms returns a copy of the module account permissions
+//
+// NOTE: This is solely to be used for testing purposes.
+func GetMaccPerms()
+
+map[string][]string {
+ dupMaccPerms := make(map[string][]string)
+ for k, v := range maccPerms {
+ dupMaccPerms[k] = v
+}
+
+return dupMaccPerms
+}
+
+// BlockedAddresses returns all the app's blocked account addresses.
+func BlockedAddresses()
+
+map[string]bool {
+ modAccAddrs := make(map[string]bool)
+ for acc := range GetMaccPerms() {
+ modAccAddrs[authtypes.NewModuleAddress(acc).String()] = true
+}
+
+ // allow the following addresses to receive funds
+ delete(modAccAddrs, authtypes.NewModuleAddress(govtypes.ModuleName).String())
+
+return modAccAddrs
+}
+
+// initParamsKeeper init params keeper and its subspaces
+func initParamsKeeper(appCodec codec.BinaryCodec, legacyAmino *codec.LegacyAmino, key, tkey storetypes.StoreKey)
+
+paramskeeper.Keeper {
+ paramsKeeper := paramskeeper.NewKeeper(appCodec, legacyAmino, key, tkey)
+
+paramsKeeper.Subspace(authtypes.ModuleName)
+
+paramsKeeper.Subspace(banktypes.ModuleName)
+
+paramsKeeper.Subspace(stakingtypes.ModuleName)
+
+paramsKeeper.Subspace(minttypes.ModuleName)
+
+paramsKeeper.Subspace(distrtypes.ModuleName)
+
+paramsKeeper.Subspace(slashingtypes.ModuleName)
+
+paramsKeeper.Subspace(govtypes.ModuleName)
+
+paramsKeeper.Subspace(crisistypes.ModuleName)
+
+return paramsKeeper
+}
+```
diff --git a/sdk/next/build/building-apps/app-mempool.mdx b/sdk/next/build/building-apps/app-mempool.mdx
new file mode 100644
index 000000000..a3d1bdebb
--- /dev/null
+++ b/sdk/next/build/building-apps/app-mempool.mdx
@@ -0,0 +1,95 @@
+---
+title: Application Mempool
+---
+
+
+**Synopsis**
+This section describes how the app-side mempool can be used and replaced.
+
+
+Since `v0.47` the application has its own mempool to allow much more granular
+block building than previous versions. This change was enabled by
+[ABCI 1.0](https://github.com/cometbft/cometbft/blob/v0.37.0/spec/abci).
+Notably it introduces the `PrepareProposal` and `ProcessProposal` steps of ABCI++.
+
+
+**Prerequisite Readings**
+
+* [BaseApp](/sdk/v0.53/learn/advanced/baseapp)
+* [ABCI](/sdk/v0.53/build/abci/introduction)
+
+
+
+## Mempool
+
+There are countless designs that an application developer can write for a mempool, the SDK opted to provide only simple mempool implementations.
+Namely, the SDK provides the following mempools:
+
+* [No-op Mempool](#no-op-mempool)
+* [Sender Nonce Mempool](#sender-nonce-mempool)
+* [Priority Nonce Mempool](#priority-nonce-mempool)
+
+By default, the SDK uses the [No-op Mempool](#no-op-mempool), but it can be replaced by the application developer in [`app.go`](/sdk/v0.53/build/building-apps/app-go-di):
+
+```go
+nonceMempool := mempool.NewSenderNonceMempool()
+ mempoolOpt := baseapp.SetMempool(nonceMempool)
+
+baseAppOptions = append(baseAppOptions, mempoolOpt)
+```
+
+### No-op Mempool
+
+A no-op mempool is a mempool where transactions are completely discarded and ignored when BaseApp interacts with the mempool.
+When this mempool is used, it is assumed that an application will rely on CometBFT's transaction ordering defined in `RequestPrepareProposal`,
+which is FIFO-ordered by default.
+
+> Note: If a NoOp mempool is used, PrepareProposal and ProcessProposal both should be aware of this as
+> PrepareProposal could include transactions that could fail verification in ProcessProposal.
+
+### Sender Nonce Mempool
+
+The nonce mempool is a mempool that keeps transactions from an sorted by nonce in order to avoid the issues with nonces.
+It works by storing the transaction in a list sorted by the transaction nonce. When the proposer asks for transactions to be included in a block it randomly selects a sender and gets the first transaction in the list. It repeats this until the mempool is empty or the block is full.
+
+It is configurable with the following parameters:
+
+#### MaxTxs
+
+It is an integer value that sets the mempool in one of three modes, *bounded*, *unbounded*, or *disabled*.
+
+* **negative**: Disabled, mempool does not insert new transaction and return early.
+* **zero**: Unbounded mempool has no transaction limit and will never fail with `ErrMempoolTxMaxCapacity`.
+* **positive**: Bounded, it fails with `ErrMempoolTxMaxCapacity` when `maxTx` value is the same as `CountTx()`
+
+#### Seed
+
+Set the seed for the random number generator used to select transactions from the mempool.
+
+### Priority Nonce Mempool
+
+The [priority nonce mempool](https://github.com/cosmos/cosmos-sdk/blob/main/types/mempool/priority_nonce_spec.md) is a mempool implementation that stores txs in a partially ordered set by 2 dimensions:
+
+* priority
+* sender-nonce (sequence number)
+
+Internally it uses one priority ordered [skip list](https://pkg.go.dev/github.com/huandu/skiplist) and one skip list per sender ordered by sender-nonce (sequence number). When there are multiple txs from the same sender, they are not always comparable by priority to other sender txs and must be partially ordered by both sender-nonce and priority.
+
+It is configurable with the following parameters:
+
+#### MaxTxs
+
+It is an integer value that sets the mempool in one of three modes, *bounded*, *unbounded*, or *disabled*.
+
+* **negative**: Disabled, mempool does not insert new transaction and return early.
+* **zero**: Unbounded mempool has no transaction limit and will never fail with `ErrMempoolTxMaxCapacity`.
+* **positive**: Bounded, it fails with `ErrMempoolTxMaxCapacity` when `maxTx` value is the same as `CountTx()`
+
+#### Callback
+
+The priority nonce mempool provides mempool options allowing the application sets callback(s).
+
+* **OnRead**: Set a callback to be called when a transaction is read from the mempool.
+* **TxReplacement**: Sets a callback to be called when duplicated transaction nonce detected during mempool insert. Application can define a transaction replacement rule based on tx priority or certain transaction fields.
+
+More information on the SDK mempool implementation can be found in the [godocs](https://pkg.go.dev/github.com/cosmos/cosmos-sdk/types/mempool).
diff --git a/sdk/next/build/building-apps/app-testnet.mdx b/sdk/next/build/building-apps/app-testnet.mdx
new file mode 100644
index 000000000..df6ed761c
--- /dev/null
+++ b/sdk/next/build/building-apps/app-testnet.mdx
@@ -0,0 +1,257 @@
+---
+title: Application Testnets
+description: >-
+ Building an application is complicated and requires a lot of testing. The
+ Cosmos SDK provides a way to test your application in a real-world
+ environment: a testnet.
+---
+
+Building an application is complicated and requires a lot of testing. The Cosmos SDK provides a way to test your application in a real-world environment: a testnet.
+
+We allow developers to take the state from their mainnet and run tests against the state. This is useful for testing upgrade migrations, or for testing the application in a real-world environment.
+
+## Testnet Setup
+
+We will be breaking down the steps to create a testnet from mainnet state.
+
+```go
+// InitSimAppForTestnet is broken down into two sections:
+ // Required Changes: Changes that, if not made, will cause the testnet to halt or panic
+ // Optional Changes: Changes to customize the testnet to one's liking (lower vote times, fund accounts, etc)
+
+func InitSimAppForTestnet(app *SimApp, newValAddr bytes.HexBytes, newValPubKey crypto.PubKey, newOperatorAddress, upgradeToTrigger string) *SimApp {
+ ...
+}
+```
+
+### Required Changes
+
+#### Staking
+
+When creating a testnet the important part is migrate the validator set from many validators to one or a few. This allows developers to spin up the chain without needing to replace validator keys.
+
+```go expandable
+ctx := app.BaseApp.NewUncachedContext(true, tmproto.Header{
+})
+ pubkey := &ed25519.PubKey{
+ Key: newValPubKey.Bytes()
+}
+
+pubkeyAny, err := types.NewAnyWithValue(pubkey)
+ if err != nil {
+ tmos.Exit(err.Error())
+}
+
+ // STAKING
+ //
+
+ // Create Validator struct for our new validator.
+ _, bz, err := bech32.DecodeAndConvert(newOperatorAddress)
+ if err != nil {
+ tmos.Exit(err.Error())
+}
+
+bech32Addr, err := bech32.ConvertAndEncode("simvaloper", bz)
+ if err != nil {
+ tmos.Exit(err.Error())
+}
+ newVal := stakingtypes.Validator{
+ OperatorAddress: bech32Addr,
+ ConsensusPubkey: pubkeyAny,
+ Jailed: false,
+ Status: stakingtypes.Bonded,
+ Tokens: sdk.NewInt(900000000000000),
+ DelegatorShares: sdk.MustNewDecFromStr("10000000"),
+ Description: stakingtypes.Description{
+ Moniker: "Testnet Validator",
+},
+ Commission: stakingtypes.Commission{
+ CommissionRates: stakingtypes.CommissionRates{
+ Rate: sdk.MustNewDecFromStr("0.05"),
+ MaxRate: sdk.MustNewDecFromStr("0.1"),
+ MaxChangeRate: sdk.MustNewDecFromStr("0.05"),
+},
+},
+ MinSelfDelegation: sdk.OneInt(),
+}
+
+ // Remove all validators from power store
+ stakingKey := app.GetKey(stakingtypes.ModuleName)
+ stakingStore := ctx.KVStore(stakingKey)
+ iterator := app.StakingKeeper.ValidatorsPowerStoreIterator(ctx)
+ for ; iterator.Valid(); iterator.Next() {
+ stakingStore.Delete(iterator.Key())
+}
+
+iterator.Close()
+
+ // Remove all validators from last validators store
+ iterator = app.StakingKeeper.LastValidatorsIterator(ctx)
+ for ; iterator.Valid(); iterator.Next() {
+ app.StakingKeeper.LastValidatorPower.Delete(iterator.Key())
+}
+
+iterator.Close()
+
+ // Add our validator to power and last validators store
+ app.StakingKeeper.SetValidator(ctx, newVal)
+
+err = app.StakingKeeper.SetValidatorByConsAddr(ctx, newVal)
+ if err != nil {
+ panic(err)
+}
+
+app.StakingKeeper.SetValidatorByPowerIndex(ctx, newVal)
+
+app.StakingKeeper.SetLastValidatorPower(ctx, newVal.GetOperator(), 0)
+ if err := app.StakingKeeper.Hooks().AfterValidatorCreated(ctx, newVal.GetOperator()); err != nil {
+ panic(err)
+}
+```
+
+#### Distribution
+
+Since the validator set has changed, we need to update the distribution records for the new validator.
+
+```go
+// Initialize records for this validator across all distribution stores
+ app.DistrKeeper.ValidatorHistoricalRewards.Set(ctx, newVal.GetOperator(), 0, distrtypes.NewValidatorHistoricalRewards(sdk.DecCoins{
+}, 1))
+
+app.DistrKeeper.ValidatorCurrentRewards.Set(ctx, newVal.GetOperator(), distrtypes.NewValidatorCurrentRewards(sdk.DecCoins{
+}, 1))
+
+app.DistrKeeper.ValidatorAccumulatedCommission.Set(ctx, newVal.GetOperator(), distrtypes.InitialValidatorAccumulatedCommission())
+
+app.DistrKeeper.ValidatorOutstandingRewards.Set(ctx, newVal.GetOperator(), distrtypes.ValidatorOutstandingRewards{
+ Rewards: sdk.DecCoins{
+}})
+```
+
+#### Slashing
+
+We also need to set the validator signing info for the new validator.
+
+```go expandable
+// SLASHING
+ //
+
+ // Set validator signing info for our new validator.
+ newConsAddr := sdk.ConsAddress(newValAddr.Bytes())
+ newValidatorSigningInfo := slashingtypes.ValidatorSigningInfo{
+ Address: newConsAddr.String(),
+ StartHeight: app.LastBlockHeight() - 1,
+ Tombstoned: false,
+}
+
+app.SlashingKeeper.ValidatorSigningInfo.Set(ctx, newConsAddr, newValidatorSigningInfo)
+```
+
+#### Bank
+
+It is useful to create new accounts for your testing purposes. This avoids the need to have the same key as you may have on mainnet.
+
+```go expandable
+// BANK
+ //
+ defaultCoins := sdk.NewCoins(sdk.NewInt64Coin("ustake", 1000000000000))
+ localSimAppAccounts := []sdk.AccAddress{
+ sdk.MustAccAddressFromBech32("cosmos12smx2wdlyttvyzvzg54y2vnqwq2qjateuf7thj"),
+ sdk.MustAccAddressFromBech32("cosmos1cyyzpxplxdzkeea7kwsydadg87357qnahakaks"),
+ sdk.MustAccAddressFromBech32("cosmos18s5lynnmx37hq4wlrw9gdn68sg2uxp5rgk26vv"),
+ sdk.MustAccAddressFromBech32("cosmos1qwexv7c6sm95lwhzn9027vyu2ccneaqad4w8ka"),
+ sdk.MustAccAddressFromBech32("cosmos14hcxlnwlqtq75ttaxf674vk6mafspg8xwgnn53"),
+ sdk.MustAccAddressFromBech32("cosmos12rr534cer5c0vj53eq4y32lcwguyy7nndt0u2t"),
+ sdk.MustAccAddressFromBech32("cosmos1nt33cjd5auzh36syym6azgc8tve0jlvklnq7jq"),
+ sdk.MustAccAddressFromBech32("cosmos10qfrpash5g2vk3hppvu45x0g860czur8ff5yx0"),
+ sdk.MustAccAddressFromBech32("cosmos1f4tvsdukfwh6s9swrc24gkuz23tp8pd3e9r5fa"),
+ sdk.MustAccAddressFromBech32("cosmos1myv43sqgnj5sm4zl98ftl45af9cfzk7nhjxjqh"),
+ sdk.MustAccAddressFromBech32("cosmos14gs9zqh8m49yy9kscjqu9h72exyf295afg6kgk"),
+ sdk.MustAccAddressFromBech32("cosmos1jllfytsz4dryxhz5tl7u73v29exsf80vz52ucc")
+}
+
+ // Fund localSimApp accounts
+ for _, account := range localSimAppAccounts {
+ err := app.BankKeeper.MintCoins(ctx, minttypes.ModuleName, defaultCoins)
+ if err != nil {
+ tmos.Exit(err.Error())
+}
+
+err = app.BankKeeper.SendCoinsFromModuleToAccount(ctx, minttypes.ModuleName, account, defaultCoins)
+ if err != nil {
+ tmos.Exit(err.Error())
+}
+
+}
+```
+
+#### Upgrade
+
+If you would like to schedule an upgrade the below can be used.
+
+```go expandable
+// UPGRADE
+ //
+ if upgradeToTrigger != "" {
+ upgradePlan := upgradetypes.Plan{
+ Name: upgradeToTrigger,
+ Height: app.LastBlockHeight(),
+}
+
+err = app.UpgradeKeeper.ScheduleUpgrade(ctx, upgradePlan)
+ if err != nil {
+ panic(err)
+}
+
+}
+```
+
+### Optional Changes
+
+If you have custom modules that rely on specific state from the above modules and/or you would like to test your custom module, you will need to update the state of your custom module to reflect your needs
+
+## Running the Testnet
+
+Before we can run the testnet we must plug everything together.
+
+in `root.go`, in the `initRootCmd` function we add:
+
+```diff
+ server.AddCommands(rootCmd, simapp.DefaultNodeHome, newApp, createSimAppAndExport, addModuleInitFlags)
+ ++ server.AddTestnetCreatorCommand(rootCmd, simapp.DefaultNodeHome, newTestnetApp, addModuleInitFlags)
+```
+
+Next we will add a newTestnetApp helper function:
+
+```diff expandable
+// newTestnetApp starts by running the normal newApp method. From there, the app interface returned is modified in order
+// for a testnet to be created from the provided app.
+func newTestnetApp(logger log.Logger, db cometbftdb.DB, traceStore io.Writer, appOpts servertypes.AppOptions) servertypes.Application {
+ // Create an app and type cast to an SimApp
+ app := newApp(logger, db, traceStore, appOpts)
+ simApp, ok := app.(*simapp.SimApp)
+ if !ok {
+ panic("app created from newApp is not of type simApp")
+ }
+
+ newValAddr, ok := appOpts.Get(server.KeyNewValAddr).(bytes.HexBytes)
+ if !ok {
+ panic("newValAddr is not of type bytes.HexBytes")
+ }
+ newValPubKey, ok := appOpts.Get(server.KeyUserPubKey).(crypto.PubKey)
+ if !ok {
+ panic("newValPubKey is not of type crypto.PubKey")
+ }
+ newOperatorAddress, ok := appOpts.Get(server.KeyNewOpAddr).(string)
+ if !ok {
+ panic("newOperatorAddress is not of type string")
+ }
+ upgradeToTrigger, ok := appOpts.Get(server.KeyTriggerTestnetUpgrade).(string)
+ if !ok {
+ panic("upgradeToTrigger is not of type string")
+ }
+
+ // Make modifications to the normal SimApp required to run the network locally
+ return simapp.InitSimAppForTestnet(simApp, newValAddr, newValPubKey, newOperatorAddress, upgradeToTrigger)
+}
+```
diff --git a/sdk/next/build/building-apps/app-upgrade.mdx b/sdk/next/build/building-apps/app-upgrade.mdx
new file mode 100644
index 000000000..6059956dc
--- /dev/null
+++ b/sdk/next/build/building-apps/app-upgrade.mdx
@@ -0,0 +1,221 @@
+---
+title: Chain Upgrades
+---
+
+
+This document describes how to upgrade your blockchain by upgrading the application binary. In Cosmos SDK, the application binary (the ABCI app) defines the blockchain's behavior, so upgrading the app effectively upgrades the chain. If you are looking specifically for the changes to perform between SDK versions, see the [SDK migrations documentation](/sdk/v0.53/build/migrations/intro).
+
+
+{/*
+
+This section is currently incomplete. Track the progress of this document [here](https://github.com/cosmos/cosmos-sdk/issues/11504).
+
+*/}
+
+
+**Prerequisite Readings**
+
+* [`x/upgrade` Documentation](/sdk/v0.53/build/migrations/intro)
+
+
+
+## General Workflow
+
+Let's assume we are running v0.38.0 of our application binary (and thus the chain) in our testnet and want to upgrade to v0.40.0.
+How would this look in practice? First, we want to finalize the v0.40.0 release candidate
+and then install a specially named upgrade handler (eg. "testnet-v2" or even "v0.40.0"). An upgrade
+handler should be defined in a new version of the software to define what migrations
+to run to migrate from the older version of the software. Naturally, this is app-specific rather
+than module specific, and must be defined in `app.go`, even if it imports logic from various
+modules to perform the actions. You can register them with `upgradeKeeper.SetUpgradeHandler`
+during the app initialization (before starting the abci server), and they serve not only to
+perform a migration, but also to identify if this is the old or new version (eg. presence of
+a handler registered for the named upgrade).
+
+Once the release candidate along with an appropriate upgrade handler is frozen,
+we can have a governance vote to approve this chain upgrade at some future block height (e.g. 200000).
+This is known as an upgrade.Plan. The v0.38.0 code will not know of this handler, but will
+continue to run until block 200000, when the plan kicks in at `BeginBlock`. It will check
+for the existence of the handler, and finding it missing, know that it is running the obsolete software,
+and gracefully exit.
+
+Generally the application binary will restart on exit, but then will execute this BeginBlocker
+again and exit, causing a restart loop. Either the operator can manually install the new software,
+or you can make use of an external watcher daemon to possibly download and then switch binaries,
+also potentially doing a backup. The SDK tool for doing such, is called [Cosmovisor](/sdk/v0.53/build/tooling/cosmovisor).
+
+When the binary restarts with the upgraded version (here v0.40.0), it will detect we have registered the
+"testnet-v2" upgrade handler in the code, and realize it is the new version. It then will run the upgrade handler
+and *migrate the database in-place*. Once finished, it marks the upgrade as done, and continues processing
+the rest of the block as normal. Once 2/3 of the voting power has upgraded, the blockchain will immediately
+resume the consensus mechanism. If the majority of operators add a custom `do-upgrade` script, this should
+be a matter of minutes and not even require them to be awake at that time.
+
+## Integrating With An App
+
+
+The following is not required for users using `depinject`, this is abstracted for them.
+
+
+In addition to basic module wiring, setup the upgrade Keeper for the app and then define a `PreBlocker` that calls the upgrade
+keeper's PreBlocker method:
+
+```go
+func (app *myApp)
+
+PreBlocker(ctx sdk.Context, req req.RequestFinalizeBlock) (*sdk.ResponsePreBlock, error) {
+ // For demonstration sake, the app PreBlocker only returns the upgrade module pre-blocker.
+ // In a real app, the module manager should call all pre-blockers
+ // return app.ModuleManager.PreBlock(ctx, req)
+
+return app.upgradeKeeper.PreBlocker(ctx, req)
+}
+```
+
+The app must then integrate the upgrade keeper with its governance module as appropriate. The governance module
+should call ScheduleUpgrade to schedule an upgrade and ClearUpgradePlan to cancel a pending upgrade.
+
+## Performing Upgrades
+
+Chain upgrades can be scheduled at a predefined block height. Once this block height is reached, the
+existing application binary will cease to process ABCI messages and a new version with code that handles the upgrade must be deployed.
+All upgrades are coordinated by a unique upgrade name that cannot be reused on the same blockchain. In order for the upgrade
+module to know that the upgrade has been safely applied, a handler with the name of the upgrade must be installed.
+Here is an example handler for an upgrade named "my-fancy-upgrade":
+
+```go
+app.upgradeKeeper.SetUpgradeHandler("my-fancy-upgrade", func(ctx context.Context, plan upgrade.Plan) {
+ // Perform any migrations of the state store needed for this upgrade
+})
+```
+
+This upgrade handler performs the dual function of alerting the upgrade module that the named upgrade has been applied,
+as well as providing the opportunity for the upgraded software to perform any necessary state migrations. Both the halt
+(with the old binary) and applying the migration (with the new binary) are enforced in the state machine. Actually
+switching the binaries is an ops task and not handled inside the sdk / abci app.
+
+Here is a sample code to set store migrations with an upgrade:
+
+```go expandable
+// this configures a no-op upgrade handler for the "my-fancy-upgrade" upgrade
+app.UpgradeKeeper.SetUpgradeHandler("my-fancy-upgrade", func(ctx context.Context, plan upgrade.Plan) {
+ // upgrade changes here
+})
+
+upgradeInfo, err := app.UpgradeKeeper.ReadUpgradeInfoFromDisk()
+ if err != nil {
+ // handle error
+}
+ if upgradeInfo.Name == "my-fancy-upgrade" && !app.UpgradeKeeper.IsSkipHeight(upgradeInfo.Height) {
+ storeUpgrades := store.StoreUpgrades{
+ Renamed: []store.StoreRename{{
+ OldKey: "foo",
+ NewKey: "bar",
+}},
+ Deleted: []string{
+},
+}
+ // configure store loader that checks if version == upgradeHeight and applies store upgrades
+ app.SetStoreLoader(upgrade.UpgradeStoreLoader(upgradeInfo.Height, &storeUpgrades))
+}
+```
+
+## Halt Behavior
+
+Before halting the ABCI state machine in the BeginBlocker method, the upgrade module will log an error
+that looks like:
+
+```text
+ UPGRADE "" NEEDED at height :
+```
+
+where `Name` and `Info` are the values of the respective fields on the upgrade Plan.
+
+To perform the actual halt of the blockchain, the upgrade keeper simply panics which prevents the ABCI state machine
+from proceeding but doesn't actually exit the process. Exiting the process can cause issues for other nodes that start
+to lose connectivity with the exiting nodes, thus this module prefers to just halt but not exit.
+
+## Automation
+
+Read more about [Cosmovisor](/sdk/v0.53/build/tooling/cosmovisor), the tool for automating upgrades.
+
+## Canceling Upgrades
+
+There are two ways to cancel a planned upgrade - with on-chain governance or off-chain social consensus.
+For the first one, there is a `CancelSoftwareUpgrade` governance proposal, which can be voted on and will
+remove the scheduled upgrade plan. Of course this requires that the upgrade was known to be a bad idea
+well before the upgrade itself, to allow time for a vote. If you want to allow such a possibility, you
+should set the upgrade height to be `2 * (votingperiod + depositperiod) + (safety delta)` from the beginning of
+the first upgrade proposal. Safety delta is the time available from the success of an upgrade proposal
+and the realization it was a bad idea (due to external testing). You can also start a `CancelSoftwareUpgrade`
+proposal while the original `SoftwareUpgrade` proposal is still being voted upon, as long as the voting
+period ends after the `SoftwareUpgrade` proposal.
+
+However, let's assume that we don't realize the upgrade has a bug until shortly before it will occur
+(or while we try it out - hitting some panic in the migration). It would seem the blockchain is stuck,
+but we need to allow an escape for social consensus to overrule the planned upgrade. To do so, there's
+a `--unsafe-skip-upgrades` flag to the start command, which will cause the node to mark the upgrade
+as done upon hitting the planned upgrade height(s), without halting and without actually performing a migration.
+If over two-thirds run their nodes with this flag on the old binary, it will allow the chain to continue through
+the upgrade with a manual override. (This must be well-documented for anyone syncing from genesis later on).
+
+Example:
+
+```shell
+ start --unsafe-skip-upgrades ...
+```
+
+## Pre-Upgrade Handling
+
+Cosmovisor supports custom pre-upgrade handling. Use pre-upgrade handling when you need to implement application config changes that are required in the newer version before you perform the upgrade.
+
+Using Cosmovisor pre-upgrade handling is optional. If pre-upgrade handling is not implemented, the upgrade continues.
+
+For example, make the required new-version changes to `app.toml` settings during the pre-upgrade handling. The pre-upgrade handling process means that the file does not have to be manually updated after the upgrade.
+
+Before the application binary is upgraded, Cosmovisor calls a `pre-upgrade` command that can be implemented by the application.
+
+The `pre-upgrade` command does not take in any command-line arguments and is expected to terminate with the following exit codes:
+
+| Exit status code | How it is handled in Cosmosvisor |
+| ---------------- | ------------------------------------------------------------------------------------------------------------------- |
+| `0` | Assumes `pre-upgrade` command executed successfully and continues the upgrade. |
+| `1` | Default exit code when `pre-upgrade` command has not been implemented. |
+| `30` | `pre-upgrade` command was executed but failed. This fails the entire upgrade. |
+| `31` | `pre-upgrade` command was executed but failed. But the command is retried until exit code `1` or `30` are returned. |
+
+## Sample
+
+Here is a sample structure of the `pre-upgrade` command:
+
+```go expandable
+func preUpgradeCommand() *cobra.Command {
+ cmd := &cobra.Command{
+ Use: "pre-upgrade",
+ Short: "Pre-upgrade command",
+ Long: "Pre-upgrade command to implement custom pre-upgrade handling",
+ Run: func(cmd *cobra.Command, args []string) {
+ err := HandlePreUpgrade()
+ if err != nil {
+ os.Exit(30)
+}
+
+os.Exit(0)
+},
+}
+
+return cmd
+}
+```
+
+Ensure that the pre-upgrade command has been registered in the application:
+
+```go
+rootCmd.AddCommand(
+ // ..
+ preUpgradeCommand(),
+ // ..
+ )
+```
+
+When not using Cosmovisor, ensure to run ` pre-upgrade` before starting the application binary.
diff --git a/sdk/next/build/building-apps/runtime.mdx b/sdk/next/build/building-apps/runtime.mdx
new file mode 100644
index 000000000..b7987ba2b
--- /dev/null
+++ b/sdk/next/build/building-apps/runtime.mdx
@@ -0,0 +1,1877 @@
+---
+title: What is runtime?
+description: >-
+ The runtime package in the Cosmos SDK provides a flexible framework for
+ configuring and managing blockchain applications. It serves as the foundation
+ for creating modular blockchain applications using a declarative configuration
+ approach.
+---
+
+The `runtime` package in the Cosmos SDK provides a flexible framework for configuring and managing blockchain applications. It serves as the foundation for creating modular blockchain applications using a declarative configuration approach.
+
+## Overview
+
+The runtime package acts as a wrapper around the `BaseApp` and `ModuleManager`, offering a hybrid approach where applications can be configured both declaratively through configuration files and programmatically through traditional methods.
+It is a layer of abstraction between `baseapp` and the application modules that simplifies the process of building a Cosmos SDK application.
+
+## Core Components
+
+### App Structure
+
+The runtime App struct contains several key components:
+
+```go
+type App struct {
+ *baseapp.BaseApp
+ ModuleManager *module.Manager
+ configurator module.Configurator
+ config *runtimev1alpha1.Module
+ storeKeys []storetypes.StoreKey
+ // ... other fields
+}
+```
+
+Cosmos SDK applications should embed the `*runtime.App` struct to leverage the runtime module.
+
+```go expandable
+//go:build !app_v1
+
+package simapp
+
+import (
+
+ "io"
+
+ dbm "github.com/cosmos/cosmos-db"
+
+ clienthelpers "cosmossdk.io/client/v2/helpers"
+ "cosmossdk.io/depinject"
+ "cosmossdk.io/log"
+ storetypes "cosmossdk.io/store/types"
+ circuitkeeper "cosmossdk.io/x/circuit/keeper"
+ evidencekeeper "cosmossdk.io/x/evidence/keeper"
+ feegrantkeeper "cosmossdk.io/x/feegrant/keeper"
+ nftkeeper "cosmossdk.io/x/nft/keeper"
+ upgradekeeper "cosmossdk.io/x/upgrade/keeper"
+ "github.com/cosmos/cosmos-sdk/baseapp"
+ "github.com/cosmos/cosmos-sdk/client"
+ "github.com/cosmos/cosmos-sdk/codec"
+ codectypes "github.com/cosmos/cosmos-sdk/codec/types"
+ "github.com/cosmos/cosmos-sdk/runtime"
+ "github.com/cosmos/cosmos-sdk/server"
+ "github.com/cosmos/cosmos-sdk/server/api"
+ "github.com/cosmos/cosmos-sdk/server/config"
+ servertypes "github.com/cosmos/cosmos-sdk/server/types"
+ testdata_pulsar "github.com/cosmos/cosmos-sdk/testutil/testdata/testpb"
+ "github.com/cosmos/cosmos-sdk/types/module"
+ "github.com/cosmos/cosmos-sdk/x/auth"
+ "github.com/cosmos/cosmos-sdk/x/auth/ante"
+ authkeeper "github.com/cosmos/cosmos-sdk/x/auth/keeper"
+ authsims "github.com/cosmos/cosmos-sdk/x/auth/simulation"
+ authtypes "github.com/cosmos/cosmos-sdk/x/auth/types"
+ authzkeeper "github.com/cosmos/cosmos-sdk/x/authz/keeper"
+ bankkeeper "github.com/cosmos/cosmos-sdk/x/bank/keeper"
+ consensuskeeper "github.com/cosmos/cosmos-sdk/x/consensus/keeper"
+ distrkeeper "github.com/cosmos/cosmos-sdk/x/distribution/keeper"
+ epochskeeper "github.com/cosmos/cosmos-sdk/x/epochs/keeper"
+ govkeeper "github.com/cosmos/cosmos-sdk/x/gov/keeper"
+ groupkeeper "github.com/cosmos/cosmos-sdk/x/group/keeper"
+ mintkeeper "github.com/cosmos/cosmos-sdk/x/mint/keeper"
+ protocolpoolkeeper "github.com/cosmos/cosmos-sdk/x/protocolpool/keeper"
+ slashingkeeper "github.com/cosmos/cosmos-sdk/x/slashing/keeper"
+ stakingkeeper "github.com/cosmos/cosmos-sdk/x/staking/keeper"
+)
+
+// DefaultNodeHome default home directories for the application daemon
+var DefaultNodeHome string
+
+var (
+ _ runtime.AppI = (*SimApp)(nil)
+ _ servertypes.Application = (*SimApp)(nil)
+)
+
+// SimApp extends an ABCI application, but with most of its parameters exported.
+// They are exported for convenience in creating helper functions, as object
+// capabilities aren't needed for testing.
+type SimApp struct {
+ *runtime.App
+ legacyAmino *codec.LegacyAmino
+ appCodec codec.Codec
+ txConfig client.TxConfig
+ interfaceRegistry codectypes.InterfaceRegistry
+
+ // essential keepers
+ AccountKeeper authkeeper.AccountKeeper
+ BankKeeper bankkeeper.BaseKeeper
+ StakingKeeper *stakingkeeper.Keeper
+ SlashingKeeper slashingkeeper.Keeper
+ MintKeeper mintkeeper.Keeper
+ DistrKeeper distrkeeper.Keeper
+ GovKeeper *govkeeper.Keeper
+ UpgradeKeeper *upgradekeeper.Keeper
+ EvidenceKeeper evidencekeeper.Keeper
+ ConsensusParamsKeeper consensuskeeper.Keeper
+ CircuitKeeper circuitkeeper.Keeper
+
+ // supplementary keepers
+ FeeGrantKeeper feegrantkeeper.Keeper
+ GroupKeeper groupkeeper.Keeper
+ AuthzKeeper authzkeeper.Keeper
+ NFTKeeper nftkeeper.Keeper
+ EpochsKeeper epochskeeper.Keeper
+ ProtocolPoolKeeper protocolpoolkeeper.Keeper
+
+ // simulation manager
+ sm *module.SimulationManager
+}
+
+func init() {
+ var err error
+ DefaultNodeHome, err = clienthelpers.GetNodeHomeDirectory(".simapp")
+ if err != nil {
+ panic(err)
+}
+}
+
+// NewSimApp returns a reference to an initialized SimApp.
+func NewSimApp(
+ logger log.Logger,
+ db dbm.DB,
+ traceStore io.Writer,
+ loadLatest bool,
+ appOpts servertypes.AppOptions,
+ baseAppOptions ...func(*baseapp.BaseApp),
+) *SimApp {
+ var (
+ app = &SimApp{
+}
+
+appBuilder *runtime.AppBuilder
+
+ // merge the AppConfig and other configuration in one config
+ appConfig = depinject.Configs(
+ AppConfig,
+ depinject.Supply(
+ // supply the application options
+ appOpts,
+ // supply the logger
+ logger,
+
+ // ADVANCED CONFIGURATION
+
+ //
+ // AUTH
+ //
+ // For providing a custom function required in auth to generate custom account types
+ // add it below. By default the auth module uses simulation.RandomGenesisAccounts.
+ //
+ // authtypes.RandomGenesisAccountsFn(simulation.RandomGenesisAccounts),
+ //
+ // For providing a custom a base account type add it below.
+ // By default the auth module uses authtypes.ProtoBaseAccount().
+ //
+ // func()
+
+sdk.AccountI {
+ return authtypes.ProtoBaseAccount()
+},
+ //
+ // For providing a different address codec, add it below.
+ // By default the auth module uses a Bech32 address codec,
+ // with the prefix defined in the auth module configuration.
+ //
+ // func()
+
+address.Codec {
+ return <- custom address codec type ->
+}
+ //
+ // STAKING
+ //
+ // For providing a different validator and consensus address codec, add it below.
+ // By default the staking module uses the bech32 prefix provided in the auth config,
+ // and appends "valoper" and "valcons" for validator and consensus addresses respectively.
+ // When providing a custom address codec in auth, custom address codecs must be provided here as well.
+ //
+ // func()
+
+runtime.ValidatorAddressCodec {
+ return <- custom validator address codec type ->
+}
+ // func()
+
+runtime.ConsensusAddressCodec {
+ return <- custom consensus address codec type ->
+}
+
+ //
+ // MINT
+ //
+
+ // For providing a custom inflation function for x/mint add here your
+ // custom minting function that implements the mintkeeper.MintFn
+ // interface.
+ ),
+ )
+ )
+ if err := depinject.Inject(appConfig,
+ &appBuilder,
+ &app.appCodec,
+ &app.legacyAmino,
+ &app.txConfig,
+ &app.interfaceRegistry,
+ &app.AccountKeeper,
+ &app.BankKeeper,
+ &app.StakingKeeper,
+ &app.SlashingKeeper,
+ &app.MintKeeper,
+ &app.DistrKeeper,
+ &app.GovKeeper,
+ &app.UpgradeKeeper,
+ &app.AuthzKeeper,
+ &app.EvidenceKeeper,
+ &app.FeeGrantKeeper,
+ &app.GroupKeeper,
+ &app.NFTKeeper,
+ &app.ConsensusParamsKeeper,
+ &app.CircuitKeeper,
+ &app.EpochsKeeper,
+ &app.ProtocolPoolKeeper,
+ ); err != nil {
+ panic(err)
+}
+
+ // Below we could construct and set an application specific mempool and
+ // ABCI 1.0 PrepareProposal and ProcessProposal handlers. These defaults are
+ // already set in the SDK's BaseApp, this shows an example of how to override
+ // them.
+ //
+ // Example:
+ //
+ // app.App = appBuilder.Build(...)
+ // nonceMempool := mempool.NewSenderNonceMempool()
+ // abciPropHandler := NewDefaultProposalHandler(nonceMempool, app.App.BaseApp)
+ //
+ // app.App.BaseApp.SetMempool(nonceMempool)
+ // app.App.BaseApp.SetPrepareProposal(abciPropHandler.PrepareProposalHandler())
+ // app.App.BaseApp.SetProcessProposal(abciPropHandler.ProcessProposalHandler())
+ //
+ // Alternatively, you can construct BaseApp options, append those to
+ // baseAppOptions and pass them to the appBuilder.
+ //
+ // Example:
+ //
+ // prepareOpt = func(app *baseapp.BaseApp) {
+ // abciPropHandler := baseapp.NewDefaultProposalHandler(nonceMempool, app)
+ // app.SetPrepareProposal(abciPropHandler.PrepareProposalHandler())
+ //
+}
+ // baseAppOptions = append(baseAppOptions, prepareOpt)
+
+ // create and set dummy vote extension handler
+ voteExtOp := func(bApp *baseapp.BaseApp) {
+ voteExtHandler := NewVoteExtensionHandler()
+
+voteExtHandler.SetHandlers(bApp)
+}
+
+baseAppOptions = append(baseAppOptions, voteExtOp, baseapp.SetOptimisticExecution())
+
+app.App = appBuilder.Build(db, traceStore, baseAppOptions...)
+
+ // register streaming services
+ if err := app.RegisterStreamingServices(appOpts, app.kvStoreKeys()); err != nil {
+ panic(err)
+}
+
+ /**** Module Options ****/
+
+ // RegisterUpgradeHandlers is used for registering any on-chain upgrades.
+ app.RegisterUpgradeHandlers()
+
+ // add test gRPC service for testing gRPC queries in isolation
+ testdata_pulsar.RegisterQueryServer(app.GRPCQueryRouter(), testdata_pulsar.QueryImpl{
+})
+
+ // create the simulation manager and define the order of the modules for deterministic simulations
+ //
+ // NOTE: this is not required apps that don't use the simulator for fuzz testing
+ // transactions
+ overrideModules := map[string]module.AppModuleSimulation{
+ authtypes.ModuleName: auth.NewAppModule(app.appCodec, app.AccountKeeper, authsims.RandomGenesisAccounts, nil),
+}
+
+app.sm = module.NewSimulationManagerFromAppModules(app.ModuleManager.Modules, overrideModules)
+
+app.sm.RegisterStoreDecoders()
+
+ // A custom InitChainer can be set if extra pre-init-genesis logic is required.
+ // By default, when using app wiring enabled module, this is not required.
+ // For instance, the upgrade module will set automatically the module version map in its init genesis thanks to app wiring.
+ // However, when registering a module manually (i.e. that does not support app wiring), the module version map
+ // must be set manually as follow. The upgrade module will de-duplicate the module version map.
+ //
+ // app.SetInitChainer(func(ctx sdk.Context, req *abci.RequestInitChain) (*abci.ResponseInitChain, error) {
+ // app.UpgradeKeeper.SetModuleVersionMap(ctx, app.ModuleManager.GetVersionMap())
+ // return app.App.InitChainer(ctx, req)
+ //
+})
+
+ // set custom ante handler
+ app.setAnteHandler(app.txConfig)
+ if err := app.Load(loadLatest); err != nil {
+ panic(err)
+}
+
+return app
+}
+
+// setAnteHandler sets custom ante handlers.
+// "x/auth/tx" pre-defined ante handler have been disabled in app_config.
+func (app *SimApp)
+
+setAnteHandler(txConfig client.TxConfig) {
+ anteHandler, err := NewAnteHandler(
+ HandlerOptions{
+ ante.HandlerOptions{
+ AccountKeeper: app.AccountKeeper,
+ BankKeeper: app.BankKeeper,
+ SignModeHandler: txConfig.SignModeHandler(),
+ FeegrantKeeper: app.FeeGrantKeeper,
+ SigGasConsumer: ante.DefaultSigVerificationGasConsumer,
+},
+ &app.CircuitKeeper,
+},
+ )
+ if err != nil {
+ panic(err)
+}
+
+ // Set the AnteHandler for the app
+ app.SetAnteHandler(anteHandler)
+}
+
+// LegacyAmino returns SimApp's amino codec.
+//
+// NOTE: This is solely to be used for testing purposes as it may be desirable
+// for modules to register their own custom testing types.
+func (app *SimApp)
+
+LegacyAmino() *codec.LegacyAmino {
+ return app.legacyAmino
+}
+
+// AppCodec returns SimApp's app codec.
+//
+// NOTE: This is solely to be used for testing purposes as it may be desirable
+// for modules to register their own custom testing types.
+func (app *SimApp)
+
+AppCodec()
+
+codec.Codec {
+ return app.appCodec
+}
+
+// InterfaceRegistry returns SimApp's InterfaceRegistry.
+func (app *SimApp)
+
+InterfaceRegistry()
+
+codectypes.InterfaceRegistry {
+ return app.interfaceRegistry
+}
+
+// TxConfig returns SimApp's TxConfig
+func (app *SimApp)
+
+TxConfig()
+
+client.TxConfig {
+ return app.txConfig
+}
+
+// GetKey returns the KVStoreKey for the provided store key.
+//
+// NOTE: This is solely to be used for testing purposes.
+func (app *SimApp)
+
+GetKey(storeKey string) *storetypes.KVStoreKey {
+ sk := app.UnsafeFindStoreKey(storeKey)
+
+kvStoreKey, ok := sk.(*storetypes.KVStoreKey)
+ if !ok {
+ return nil
+}
+
+return kvStoreKey
+}
+
+func (app *SimApp)
+
+kvStoreKeys()
+
+map[string]*storetypes.KVStoreKey {
+ keys := make(map[string]*storetypes.KVStoreKey)
+ for _, k := range app.GetStoreKeys() {
+ if kv, ok := k.(*storetypes.KVStoreKey); ok {
+ keys[kv.Name()] = kv
+}
+
+}
+
+return keys
+}
+
+// SimulationManager implements the SimulationApp interface
+func (app *SimApp)
+
+SimulationManager() *module.SimulationManager {
+ return app.sm
+}
+
+// RegisterAPIRoutes registers all application module routes with the provided
+// API server.
+func (app *SimApp)
+
+RegisterAPIRoutes(apiSvr *api.Server, apiConfig config.APIConfig) {
+ app.App.RegisterAPIRoutes(apiSvr, apiConfig)
+ // register swagger API in app.go so that other applications can override easily
+ if err := server.RegisterSwaggerAPI(apiSvr.ClientCtx, apiSvr.Router, apiConfig.Swagger); err != nil {
+ panic(err)
+}
+}
+
+// GetMaccPerms returns a copy of the module account permissions
+//
+// NOTE: This is solely to be used for testing purposes.
+func GetMaccPerms()
+
+map[string][]string {
+ dup := make(map[string][]string)
+ for _, perms := range moduleAccPerms {
+ dup[perms.Account] = perms.Permissions
+}
+
+return dup
+}
+
+// BlockedAddresses returns all the app's blocked account addresses.
+func BlockedAddresses()
+
+map[string]bool {
+ result := make(map[string]bool)
+ if len(blockAccAddrs) > 0 {
+ for _, addr := range blockAccAddrs {
+ result[addr] = true
+}
+
+}
+
+else {
+ for addr := range GetMaccPerms() {
+ result[addr] = true
+}
+
+}
+
+return result
+}
+```
+
+### Configuration
+
+The runtime module is configured using App Wiring. The main configuration object is the [`Module` message](https://github.com/cosmos/cosmos-sdk/blob/v0.53.0-rc.2/proto/cosmos/app/runtime/v1alpha1/module.proto), which supports the following key settings:
+
+* `app_name`: The name of the application
+* `begin_blockers`: List of module names to call during BeginBlock
+* `end_blockers`: List of module names to call during EndBlock
+* `init_genesis`: Order of module initialization during genesis
+* `export_genesis`: Order for exporting module genesis data
+* `pre_blockers`: Modules to execute before block processing
+
+Learn more about wiring `runtime` in the [next section](/sdk/v0.53/build/building-apps/app-go-di).
+
+#### Store Configuration
+
+By default, the runtime module uses the module name as the store key.
+However it provides a flexible store key configuration through:
+
+* `override_store_keys`: Allows customizing module store keys
+* `skip_store_keys`: Specifies store keys to skip during keeper construction
+
+Example configuration:
+
+```go expandable
+package simapp
+
+import (
+
+ "time"
+ "google.golang.org/protobuf/types/known/durationpb"
+
+ runtimev1alpha1 "cosmossdk.io/api/cosmos/app/runtime/v1alpha1"
+ appv1alpha1 "cosmossdk.io/api/cosmos/app/v1alpha1"
+ authmodulev1 "cosmossdk.io/api/cosmos/auth/module/v1"
+ authzmodulev1 "cosmossdk.io/api/cosmos/authz/module/v1"
+ bankmodulev1 "cosmossdk.io/api/cosmos/bank/module/v1"
+ circuitmodulev1 "cosmossdk.io/api/cosmos/circuit/module/v1"
+ consensusmodulev1 "cosmossdk.io/api/cosmos/consensus/module/v1"
+ distrmodulev1 "cosmossdk.io/api/cosmos/distribution/module/v1"
+ epochsmodulev1 "cosmossdk.io/api/cosmos/epochs/module/v1"
+ evidencemodulev1 "cosmossdk.io/api/cosmos/evidence/module/v1"
+ feegrantmodulev1 "cosmossdk.io/api/cosmos/feegrant/module/v1"
+ genutilmodulev1 "cosmossdk.io/api/cosmos/genutil/module/v1"
+ govmodulev1 "cosmossdk.io/api/cosmos/gov/module/v1"
+ groupmodulev1 "cosmossdk.io/api/cosmos/group/module/v1"
+ mintmodulev1 "cosmossdk.io/api/cosmos/mint/module/v1"
+ nftmodulev1 "cosmossdk.io/api/cosmos/nft/module/v1"
+ protocolpoolmodulev1 "cosmossdk.io/api/cosmos/protocolpool/module/v1"
+ slashingmodulev1 "cosmossdk.io/api/cosmos/slashing/module/v1"
+ stakingmodulev1 "cosmossdk.io/api/cosmos/staking/module/v1"
+ txconfigv1 "cosmossdk.io/api/cosmos/tx/config/v1"
+ upgrademodulev1 "cosmossdk.io/api/cosmos/upgrade/module/v1"
+ vestingmodulev1 "cosmossdk.io/api/cosmos/vesting/module/v1"
+ "cosmossdk.io/core/appconfig"
+ "cosmossdk.io/depinject"
+ _ "cosmossdk.io/x/circuit" // import for side-effects
+ circuittypes "cosmossdk.io/x/circuit/types"
+ _ "cosmossdk.io/x/evidence" // import for side-effects
+ evidencetypes "cosmossdk.io/x/evidence/types"
+ "cosmossdk.io/x/feegrant"
+ _ "cosmossdk.io/x/feegrant/module" // import for side-effects
+ "cosmossdk.io/x/nft"
+ _ "cosmossdk.io/x/nft/module" // import for side-effects
+ _ "cosmossdk.io/x/upgrade" // import for side-effects
+ upgradetypes "cosmossdk.io/x/upgrade/types"
+ "github.com/cosmos/cosmos-sdk/runtime"
+ "github.com/cosmos/cosmos-sdk/types/module"
+ _ "github.com/cosmos/cosmos-sdk/x/auth/tx/config" // import for side-effects
+ authtypes "github.com/cosmos/cosmos-sdk/x/auth/types"
+ _ "github.com/cosmos/cosmos-sdk/x/auth/vesting" // import for side-effects
+ vestingtypes "github.com/cosmos/cosmos-sdk/x/auth/vesting/types"
+ "github.com/cosmos/cosmos-sdk/x/authz"
+ _ "github.com/cosmos/cosmos-sdk/x/authz/module" // import for side-effects
+ _ "github.com/cosmos/cosmos-sdk/x/bank" // import for side-effects
+ banktypes "github.com/cosmos/cosmos-sdk/x/bank/types"
+ _ "github.com/cosmos/cosmos-sdk/x/consensus" // import for side-effects
+ consensustypes "github.com/cosmos/cosmos-sdk/x/consensus/types"
+ _ "github.com/cosmos/cosmos-sdk/x/distribution" // import for side-effects
+ distrtypes "github.com/cosmos/cosmos-sdk/x/distribution/types"
+ _ "github.com/cosmos/cosmos-sdk/x/epochs" // import for side-effects
+ epochstypes "github.com/cosmos/cosmos-sdk/x/epochs/types"
+ "github.com/cosmos/cosmos-sdk/x/genutil"
+ genutiltypes "github.com/cosmos/cosmos-sdk/x/genutil/types"
+ "github.com/cosmos/cosmos-sdk/x/gov"
+ govclient "github.com/cosmos/cosmos-sdk/x/gov/client"
+ govtypes "github.com/cosmos/cosmos-sdk/x/gov/types"
+ "github.com/cosmos/cosmos-sdk/x/group"
+ _ "github.com/cosmos/cosmos-sdk/x/group/module" // import for side-effects
+ _ "github.com/cosmos/cosmos-sdk/x/mint" // import for side-effects
+ minttypes "github.com/cosmos/cosmos-sdk/x/mint/types"
+ _ "github.com/cosmos/cosmos-sdk/x/protocolpool" // import for side-effects
+ protocolpooltypes "github.com/cosmos/cosmos-sdk/x/protocolpool/types"
+ _ "github.com/cosmos/cosmos-sdk/x/slashing" // import for side-effects
+ slashingtypes "github.com/cosmos/cosmos-sdk/x/slashing/types"
+ _ "github.com/cosmos/cosmos-sdk/x/staking" // import for side-effects
+ stakingtypes "github.com/cosmos/cosmos-sdk/x/staking/types"
+)
+
+var (
+ // module account permissions
+ moduleAccPerms = []*authmodulev1.ModuleAccountPermission{
+ {
+ Account: authtypes.FeeCollectorName
+},
+ {
+ Account: distrtypes.ModuleName
+},
+ {
+ Account: minttypes.ModuleName,
+ Permissions: []string{
+ authtypes.Minter
+}},
+ {
+ Account: stakingtypes.BondedPoolName,
+ Permissions: []string{
+ authtypes.Burner, stakingtypes.ModuleName
+}},
+ {
+ Account: stakingtypes.NotBondedPoolName,
+ Permissions: []string{
+ authtypes.Burner, stakingtypes.ModuleName
+}},
+ {
+ Account: govtypes.ModuleName,
+ Permissions: []string{
+ authtypes.Burner
+}},
+ {
+ Account: nft.ModuleName
+},
+ {
+ Account: protocolpooltypes.ModuleName
+},
+ {
+ Account: protocolpooltypes.ProtocolPoolEscrowAccount
+},
+}
+
+ // blocked account addresses
+ blockAccAddrs = []string{
+ authtypes.FeeCollectorName,
+ distrtypes.ModuleName,
+ minttypes.ModuleName,
+ stakingtypes.BondedPoolName,
+ stakingtypes.NotBondedPoolName,
+ nft.ModuleName,
+ // We allow the following module accounts to receive funds:
+ // govtypes.ModuleName
+}
+
+ModuleConfig = []*appv1alpha1.ModuleConfig{
+ {
+ Name: runtime.ModuleName,
+ Config: appconfig.WrapAny(&runtimev1alpha1.Module{
+ AppName: "SimApp",
+ // NOTE: upgrade module is required to be prioritized
+ PreBlockers: []string{
+ upgradetypes.ModuleName,
+ authtypes.ModuleName,
+},
+ // During begin block slashing happens after distr.BeginBlocker so that
+ // there is nothing left over in the validator fee pool, so as to keep the
+ // CanWithdrawInvariant invariant.
+ // NOTE: staking module is required if HistoricalEntries param > 0
+ BeginBlockers: []string{
+ minttypes.ModuleName,
+ distrtypes.ModuleName,
+ protocolpooltypes.ModuleName,
+ slashingtypes.ModuleName,
+ evidencetypes.ModuleName,
+ stakingtypes.ModuleName,
+ authz.ModuleName,
+ epochstypes.ModuleName,
+},
+ EndBlockers: []string{
+ govtypes.ModuleName,
+ stakingtypes.ModuleName,
+ feegrant.ModuleName,
+ group.ModuleName,
+ protocolpooltypes.ModuleName,
+},
+ OverrideStoreKeys: []*runtimev1alpha1.StoreKeyConfig{
+ {
+ ModuleName: authtypes.ModuleName,
+ KvStoreKey: "acc",
+},
+},
+ SkipStoreKeys: []string{
+ "tx",
+},
+ // NOTE: The genutils module must occur after staking so that pools are
+ // properly initialized with tokens from genesis accounts.
+ // NOTE: The genutils module must also occur after auth so that it can access the params from auth.
+ InitGenesis: []string{
+ authtypes.ModuleName,
+ banktypes.ModuleName,
+ distrtypes.ModuleName,
+ stakingtypes.ModuleName,
+ slashingtypes.ModuleName,
+ govtypes.ModuleName,
+ minttypes.ModuleName,
+ genutiltypes.ModuleName,
+ evidencetypes.ModuleName,
+ authz.ModuleName,
+ feegrant.ModuleName,
+ nft.ModuleName,
+ group.ModuleName,
+ upgradetypes.ModuleName,
+ vestingtypes.ModuleName,
+ circuittypes.ModuleName,
+ epochstypes.ModuleName,
+ protocolpooltypes.ModuleName,
+},
+ // When ExportGenesis is not specified, the export genesis module order
+ // is equal to the init genesis order
+ ExportGenesis: []string{
+ consensustypes.ModuleName,
+ authtypes.ModuleName,
+ protocolpooltypes.ModuleName, // Must be exported before bank
+ banktypes.ModuleName,
+ distrtypes.ModuleName,
+ stakingtypes.ModuleName,
+ slashingtypes.ModuleName,
+ govtypes.ModuleName,
+ minttypes.ModuleName,
+ genutiltypes.ModuleName,
+ evidencetypes.ModuleName,
+ authz.ModuleName,
+ feegrant.ModuleName,
+ nft.ModuleName,
+ group.ModuleName,
+ upgradetypes.ModuleName,
+ vestingtypes.ModuleName,
+ circuittypes.ModuleName,
+ epochstypes.ModuleName,
+},
+ // Uncomment if you want to set a custom migration order here.
+ // OrderMigrations: []string{
+},
+}),
+},
+ {
+ Name: authtypes.ModuleName,
+ Config: appconfig.WrapAny(&authmodulev1.Module{
+ Bech32Prefix: "cosmos",
+ ModuleAccountPermissions: moduleAccPerms,
+ // By default modules authority is the governance module. This is configurable with the following:
+ // Authority: "group", // A custom module authority can be set using a module name
+ // Authority: "cosmos1cwwv22j5ca08ggdv9c2uky355k908694z577tv", // or a specific address
+ EnableUnorderedTransactions: true,
+}),
+},
+ {
+ Name: vestingtypes.ModuleName,
+ Config: appconfig.WrapAny(&vestingmodulev1.Module{
+}),
+},
+ {
+ Name: banktypes.ModuleName,
+ Config: appconfig.WrapAny(&bankmodulev1.Module{
+ BlockedModuleAccountsOverride: blockAccAddrs,
+}),
+},
+ {
+ Name: stakingtypes.ModuleName,
+ Config: appconfig.WrapAny(&stakingmodulev1.Module{
+ // NOTE: specifying a prefix is only necessary when using bech32 addresses
+ // If not specfied, the auth Bech32Prefix appended with "valoper" and "valcons" is used by default
+ Bech32PrefixValidator: "cosmosvaloper",
+ Bech32PrefixConsensus: "cosmosvalcons",
+}),
+},
+ {
+ Name: slashingtypes.ModuleName,
+ Config: appconfig.WrapAny(&slashingmodulev1.Module{
+}),
+},
+ {
+ Name: "tx",
+ Config: appconfig.WrapAny(&txconfigv1.Config{
+ SkipAnteHandler: true, // Enable this to skip the default antehandlers and set custom ante handlers.
+}),
+},
+ {
+ Name: genutiltypes.ModuleName,
+ Config: appconfig.WrapAny(&genutilmodulev1.Module{
+}),
+},
+ {
+ Name: authz.ModuleName,
+ Config: appconfig.WrapAny(&authzmodulev1.Module{
+}),
+},
+ {
+ Name: upgradetypes.ModuleName,
+ Config: appconfig.WrapAny(&upgrademodulev1.Module{
+}),
+},
+ {
+ Name: distrtypes.ModuleName,
+ Config: appconfig.WrapAny(&distrmodulev1.Module{
+}),
+},
+ {
+ Name: evidencetypes.ModuleName,
+ Config: appconfig.WrapAny(&evidencemodulev1.Module{
+}),
+},
+ {
+ Name: minttypes.ModuleName,
+ Config: appconfig.WrapAny(&mintmodulev1.Module{
+}),
+},
+ {
+ Name: group.ModuleName,
+ Config: appconfig.WrapAny(&groupmodulev1.Module{
+ MaxExecutionPeriod: durationpb.New(time.Second * 1209600),
+ MaxMetadataLen: 255,
+}),
+},
+ {
+ Name: nft.ModuleName,
+ Config: appconfig.WrapAny(&nftmodulev1.Module{
+}),
+},
+ {
+ Name: feegrant.ModuleName,
+ Config: appconfig.WrapAny(&feegrantmodulev1.Module{
+}),
+},
+ {
+ Name: govtypes.ModuleName,
+ Config: appconfig.WrapAny(&govmodulev1.Module{
+}),
+},
+ {
+ Name: consensustypes.ModuleName,
+ Config: appconfig.WrapAny(&consensusmodulev1.Module{
+}),
+},
+ {
+ Name: circuittypes.ModuleName,
+ Config: appconfig.WrapAny(&circuitmodulev1.Module{
+}),
+},
+ {
+ Name: epochstypes.ModuleName,
+ Config: appconfig.WrapAny(&epochsmodulev1.Module{
+}),
+},
+ {
+ Name: protocolpooltypes.ModuleName,
+ Config: appconfig.WrapAny(&protocolpoolmodulev1.Module{
+}),
+},
+}
+
+ // AppConfig is application configuration (used by depinject)
+
+AppConfig = depinject.Configs(appconfig.Compose(&appv1alpha1.Config{
+ Modules: ModuleConfig,
+}),
+ depinject.Supply(
+ // supply custom module basics
+ map[string]module.AppModuleBasic{
+ genutiltypes.ModuleName: genutil.NewAppModuleBasic(genutiltypes.DefaultMessageValidator),
+ govtypes.ModuleName: gov.NewAppModuleBasic(
+ []govclient.ProposalHandler{
+},
+ ),
+},
+ ),
+ )
+)
+```
+
+## Key Features
+
+### 1. BaseApp and other Core SDK components integration
+
+The runtime module integrates with the `BaseApp` and other core SDK components to provide a seamless experience for developers.
+
+The developer only needs to embed the `runtime.App` struct in their application to leverage the runtime module.
+The configuration of the module manager and other core components is handled internally via the [`AppBuilder`](#4-application-building).
+
+### 2. Module Registration
+
+Runtime has built-in support for [`depinject`-enabled modules](/sdk/v0.53/build/building-modules/depinject).
+Such modules can be registered through the configuration file (often named `app_config.go`), with no additional code required.
+
+```go expandable
+package simapp
+
+import (
+
+ "time"
+ "google.golang.org/protobuf/types/known/durationpb"
+
+ runtimev1alpha1 "cosmossdk.io/api/cosmos/app/runtime/v1alpha1"
+ appv1alpha1 "cosmossdk.io/api/cosmos/app/v1alpha1"
+ authmodulev1 "cosmossdk.io/api/cosmos/auth/module/v1"
+ authzmodulev1 "cosmossdk.io/api/cosmos/authz/module/v1"
+ bankmodulev1 "cosmossdk.io/api/cosmos/bank/module/v1"
+ circuitmodulev1 "cosmossdk.io/api/cosmos/circuit/module/v1"
+ consensusmodulev1 "cosmossdk.io/api/cosmos/consensus/module/v1"
+ distrmodulev1 "cosmossdk.io/api/cosmos/distribution/module/v1"
+ epochsmodulev1 "cosmossdk.io/api/cosmos/epochs/module/v1"
+ evidencemodulev1 "cosmossdk.io/api/cosmos/evidence/module/v1"
+ feegrantmodulev1 "cosmossdk.io/api/cosmos/feegrant/module/v1"
+ genutilmodulev1 "cosmossdk.io/api/cosmos/genutil/module/v1"
+ govmodulev1 "cosmossdk.io/api/cosmos/gov/module/v1"
+ groupmodulev1 "cosmossdk.io/api/cosmos/group/module/v1"
+ mintmodulev1 "cosmossdk.io/api/cosmos/mint/module/v1"
+ nftmodulev1 "cosmossdk.io/api/cosmos/nft/module/v1"
+ protocolpoolmodulev1 "cosmossdk.io/api/cosmos/protocolpool/module/v1"
+ slashingmodulev1 "cosmossdk.io/api/cosmos/slashing/module/v1"
+ stakingmodulev1 "cosmossdk.io/api/cosmos/staking/module/v1"
+ txconfigv1 "cosmossdk.io/api/cosmos/tx/config/v1"
+ upgrademodulev1 "cosmossdk.io/api/cosmos/upgrade/module/v1"
+ vestingmodulev1 "cosmossdk.io/api/cosmos/vesting/module/v1"
+ "cosmossdk.io/core/appconfig"
+ "cosmossdk.io/depinject"
+ _ "cosmossdk.io/x/circuit" // import for side-effects
+ circuittypes "cosmossdk.io/x/circuit/types"
+ _ "cosmossdk.io/x/evidence" // import for side-effects
+ evidencetypes "cosmossdk.io/x/evidence/types"
+ "cosmossdk.io/x/feegrant"
+ _ "cosmossdk.io/x/feegrant/module" // import for side-effects
+ "cosmossdk.io/x/nft"
+ _ "cosmossdk.io/x/nft/module" // import for side-effects
+ _ "cosmossdk.io/x/upgrade" // import for side-effects
+ upgradetypes "cosmossdk.io/x/upgrade/types"
+ "github.com/cosmos/cosmos-sdk/runtime"
+ "github.com/cosmos/cosmos-sdk/types/module"
+ _ "github.com/cosmos/cosmos-sdk/x/auth/tx/config" // import for side-effects
+ authtypes "github.com/cosmos/cosmos-sdk/x/auth/types"
+ _ "github.com/cosmos/cosmos-sdk/x/auth/vesting" // import for side-effects
+ vestingtypes "github.com/cosmos/cosmos-sdk/x/auth/vesting/types"
+ "github.com/cosmos/cosmos-sdk/x/authz"
+ _ "github.com/cosmos/cosmos-sdk/x/authz/module" // import for side-effects
+ _ "github.com/cosmos/cosmos-sdk/x/bank" // import for side-effects
+ banktypes "github.com/cosmos/cosmos-sdk/x/bank/types"
+ _ "github.com/cosmos/cosmos-sdk/x/consensus" // import for side-effects
+ consensustypes "github.com/cosmos/cosmos-sdk/x/consensus/types"
+ _ "github.com/cosmos/cosmos-sdk/x/distribution" // import for side-effects
+ distrtypes "github.com/cosmos/cosmos-sdk/x/distribution/types"
+ _ "github.com/cosmos/cosmos-sdk/x/epochs" // import for side-effects
+ epochstypes "github.com/cosmos/cosmos-sdk/x/epochs/types"
+ "github.com/cosmos/cosmos-sdk/x/genutil"
+ genutiltypes "github.com/cosmos/cosmos-sdk/x/genutil/types"
+ "github.com/cosmos/cosmos-sdk/x/gov"
+ govclient "github.com/cosmos/cosmos-sdk/x/gov/client"
+ govtypes "github.com/cosmos/cosmos-sdk/x/gov/types"
+ "github.com/cosmos/cosmos-sdk/x/group"
+ _ "github.com/cosmos/cosmos-sdk/x/group/module" // import for side-effects
+ _ "github.com/cosmos/cosmos-sdk/x/mint" // import for side-effects
+ minttypes "github.com/cosmos/cosmos-sdk/x/mint/types"
+ _ "github.com/cosmos/cosmos-sdk/x/protocolpool" // import for side-effects
+ protocolpooltypes "github.com/cosmos/cosmos-sdk/x/protocolpool/types"
+ _ "github.com/cosmos/cosmos-sdk/x/slashing" // import for side-effects
+ slashingtypes "github.com/cosmos/cosmos-sdk/x/slashing/types"
+ _ "github.com/cosmos/cosmos-sdk/x/staking" // import for side-effects
+ stakingtypes "github.com/cosmos/cosmos-sdk/x/staking/types"
+)
+
+var (
+ // module account permissions
+ moduleAccPerms = []*authmodulev1.ModuleAccountPermission{
+ {
+ Account: authtypes.FeeCollectorName
+},
+ {
+ Account: distrtypes.ModuleName
+},
+ {
+ Account: minttypes.ModuleName,
+ Permissions: []string{
+ authtypes.Minter
+}},
+ {
+ Account: stakingtypes.BondedPoolName,
+ Permissions: []string{
+ authtypes.Burner, stakingtypes.ModuleName
+}},
+ {
+ Account: stakingtypes.NotBondedPoolName,
+ Permissions: []string{
+ authtypes.Burner, stakingtypes.ModuleName
+}},
+ {
+ Account: govtypes.ModuleName,
+ Permissions: []string{
+ authtypes.Burner
+}},
+ {
+ Account: nft.ModuleName
+},
+ {
+ Account: protocolpooltypes.ModuleName
+},
+ {
+ Account: protocolpooltypes.ProtocolPoolEscrowAccount
+},
+}
+
+ // blocked account addresses
+ blockAccAddrs = []string{
+ authtypes.FeeCollectorName,
+ distrtypes.ModuleName,
+ minttypes.ModuleName,
+ stakingtypes.BondedPoolName,
+ stakingtypes.NotBondedPoolName,
+ nft.ModuleName,
+ // We allow the following module accounts to receive funds:
+ // govtypes.ModuleName
+}
+
+ModuleConfig = []*appv1alpha1.ModuleConfig{
+ {
+ Name: runtime.ModuleName,
+ Config: appconfig.WrapAny(&runtimev1alpha1.Module{
+ AppName: "SimApp",
+ // NOTE: upgrade module is required to be prioritized
+ PreBlockers: []string{
+ upgradetypes.ModuleName,
+ authtypes.ModuleName,
+},
+ // During begin block slashing happens after distr.BeginBlocker so that
+ // there is nothing left over in the validator fee pool, so as to keep the
+ // CanWithdrawInvariant invariant.
+ // NOTE: staking module is required if HistoricalEntries param > 0
+ BeginBlockers: []string{
+ minttypes.ModuleName,
+ distrtypes.ModuleName,
+ protocolpooltypes.ModuleName,
+ slashingtypes.ModuleName,
+ evidencetypes.ModuleName,
+ stakingtypes.ModuleName,
+ authz.ModuleName,
+ epochstypes.ModuleName,
+},
+ EndBlockers: []string{
+ govtypes.ModuleName,
+ stakingtypes.ModuleName,
+ feegrant.ModuleName,
+ group.ModuleName,
+ protocolpooltypes.ModuleName,
+},
+ OverrideStoreKeys: []*runtimev1alpha1.StoreKeyConfig{
+ {
+ ModuleName: authtypes.ModuleName,
+ KvStoreKey: "acc",
+},
+},
+ SkipStoreKeys: []string{
+ "tx",
+},
+ // NOTE: The genutils module must occur after staking so that pools are
+ // properly initialized with tokens from genesis accounts.
+ // NOTE: The genutils module must also occur after auth so that it can access the params from auth.
+ InitGenesis: []string{
+ authtypes.ModuleName,
+ banktypes.ModuleName,
+ distrtypes.ModuleName,
+ stakingtypes.ModuleName,
+ slashingtypes.ModuleName,
+ govtypes.ModuleName,
+ minttypes.ModuleName,
+ genutiltypes.ModuleName,
+ evidencetypes.ModuleName,
+ authz.ModuleName,
+ feegrant.ModuleName,
+ nft.ModuleName,
+ group.ModuleName,
+ upgradetypes.ModuleName,
+ vestingtypes.ModuleName,
+ circuittypes.ModuleName,
+ epochstypes.ModuleName,
+ protocolpooltypes.ModuleName,
+},
+ // When ExportGenesis is not specified, the export genesis module order
+ // is equal to the init genesis order
+ ExportGenesis: []string{
+ consensustypes.ModuleName,
+ authtypes.ModuleName,
+ protocolpooltypes.ModuleName, // Must be exported before bank
+ banktypes.ModuleName,
+ distrtypes.ModuleName,
+ stakingtypes.ModuleName,
+ slashingtypes.ModuleName,
+ govtypes.ModuleName,
+ minttypes.ModuleName,
+ genutiltypes.ModuleName,
+ evidencetypes.ModuleName,
+ authz.ModuleName,
+ feegrant.ModuleName,
+ nft.ModuleName,
+ group.ModuleName,
+ upgradetypes.ModuleName,
+ vestingtypes.ModuleName,
+ circuittypes.ModuleName,
+ epochstypes.ModuleName,
+},
+ // Uncomment if you want to set a custom migration order here.
+ // OrderMigrations: []string{
+},
+}),
+},
+ {
+ Name: authtypes.ModuleName,
+ Config: appconfig.WrapAny(&authmodulev1.Module{
+ Bech32Prefix: "cosmos",
+ ModuleAccountPermissions: moduleAccPerms,
+ // By default modules authority is the governance module. This is configurable with the following:
+ // Authority: "group", // A custom module authority can be set using a module name
+ // Authority: "cosmos1cwwv22j5ca08ggdv9c2uky355k908694z577tv", // or a specific address
+ EnableUnorderedTransactions: true,
+}),
+},
+ {
+ Name: vestingtypes.ModuleName,
+ Config: appconfig.WrapAny(&vestingmodulev1.Module{
+}),
+},
+ {
+ Name: banktypes.ModuleName,
+ Config: appconfig.WrapAny(&bankmodulev1.Module{
+ BlockedModuleAccountsOverride: blockAccAddrs,
+}),
+},
+ {
+ Name: stakingtypes.ModuleName,
+ Config: appconfig.WrapAny(&stakingmodulev1.Module{
+ // NOTE: specifying a prefix is only necessary when using bech32 addresses
+ // If not specfied, the auth Bech32Prefix appended with "valoper" and "valcons" is used by default
+ Bech32PrefixValidator: "cosmosvaloper",
+ Bech32PrefixConsensus: "cosmosvalcons",
+}),
+},
+ {
+ Name: slashingtypes.ModuleName,
+ Config: appconfig.WrapAny(&slashingmodulev1.Module{
+}),
+},
+ {
+ Name: "tx",
+ Config: appconfig.WrapAny(&txconfigv1.Config{
+ SkipAnteHandler: true, // Enable this to skip the default antehandlers and set custom ante handlers.
+}),
+},
+ {
+ Name: genutiltypes.ModuleName,
+ Config: appconfig.WrapAny(&genutilmodulev1.Module{
+}),
+},
+ {
+ Name: authz.ModuleName,
+ Config: appconfig.WrapAny(&authzmodulev1.Module{
+}),
+},
+ {
+ Name: upgradetypes.ModuleName,
+ Config: appconfig.WrapAny(&upgrademodulev1.Module{
+}),
+},
+ {
+ Name: distrtypes.ModuleName,
+ Config: appconfig.WrapAny(&distrmodulev1.Module{
+}),
+},
+ {
+ Name: evidencetypes.ModuleName,
+ Config: appconfig.WrapAny(&evidencemodulev1.Module{
+}),
+},
+ {
+ Name: minttypes.ModuleName,
+ Config: appconfig.WrapAny(&mintmodulev1.Module{
+}),
+},
+ {
+ Name: group.ModuleName,
+ Config: appconfig.WrapAny(&groupmodulev1.Module{
+ MaxExecutionPeriod: durationpb.New(time.Second * 1209600),
+ MaxMetadataLen: 255,
+}),
+},
+ {
+ Name: nft.ModuleName,
+ Config: appconfig.WrapAny(&nftmodulev1.Module{
+}),
+},
+ {
+ Name: feegrant.ModuleName,
+ Config: appconfig.WrapAny(&feegrantmodulev1.Module{
+}),
+},
+ {
+ Name: govtypes.ModuleName,
+ Config: appconfig.WrapAny(&govmodulev1.Module{
+}),
+},
+ {
+ Name: consensustypes.ModuleName,
+ Config: appconfig.WrapAny(&consensusmodulev1.Module{
+}),
+},
+ {
+ Name: circuittypes.ModuleName,
+ Config: appconfig.WrapAny(&circuitmodulev1.Module{
+}),
+},
+ {
+ Name: epochstypes.ModuleName,
+ Config: appconfig.WrapAny(&epochsmodulev1.Module{
+}),
+},
+ {
+ Name: protocolpooltypes.ModuleName,
+ Config: appconfig.WrapAny(&protocolpoolmodulev1.Module{
+}),
+},
+}
+
+ // AppConfig is application configuration (used by depinject)
+
+AppConfig = depinject.Configs(appconfig.Compose(&appv1alpha1.Config{
+ Modules: ModuleConfig,
+}),
+ depinject.Supply(
+ // supply custom module basics
+ map[string]module.AppModuleBasic{
+ genutiltypes.ModuleName: genutil.NewAppModuleBasic(genutiltypes.DefaultMessageValidator),
+ govtypes.ModuleName: gov.NewAppModuleBasic(
+ []govclient.ProposalHandler{
+},
+ ),
+},
+ ),
+ )
+)
+```
+
+Additionally, the runtime package facilitates manual module registration through the `RegisterModules` method. This is the primary integration point for modules not registered via configuration.
+
+
+Even when using manual registration, the module should still be configured in the `Module` message in AppConfig.
+
+
+```go
+func (a *App)
+
+RegisterModules(modules ...module.AppModule)
+
+error
+```
+
+The SDK recommends using the declarative approach with `depinject` for module registration whenever possible.
+
+### 3. Service Registration
+
+Runtime registers all [core services](https://pkg.go.dev/cosmossdk.io/core) required by modules.
+These services include `store`, `event manager`, `context`, and `logger`.
+Runtime ensures that services are scoped to their respective modules during the wiring process.
+
+```go expandable
+package runtime
+
+import (
+
+ "fmt"
+ "os"
+ "slices"
+ "github.com/cosmos/gogoproto/proto"
+ "google.golang.org/protobuf/reflect/protodesc"
+ "google.golang.org/protobuf/reflect/protoregistry"
+
+ runtimev1alpha1 "cosmossdk.io/api/cosmos/app/runtime/v1alpha1"
+ appv1alpha1 "cosmossdk.io/api/cosmos/app/v1alpha1"
+ authmodulev1 "cosmossdk.io/api/cosmos/auth/module/v1"
+ stakingmodulev1 "cosmossdk.io/api/cosmos/staking/module/v1"
+ "cosmossdk.io/core/address"
+ "cosmossdk.io/core/appmodule"
+ "cosmossdk.io/core/comet"
+ "cosmossdk.io/core/event"
+ "cosmossdk.io/core/genesis"
+ "cosmossdk.io/core/header"
+ "cosmossdk.io/core/store"
+ "cosmossdk.io/depinject"
+ "cosmossdk.io/log"
+ storetypes "cosmossdk.io/store/types"
+ "cosmossdk.io/x/tx/signing"
+ "github.com/cosmos/cosmos-sdk/baseapp"
+ "github.com/cosmos/cosmos-sdk/codec"
+ addresscodec "github.com/cosmos/cosmos-sdk/codec/address"
+ codectypes "github.com/cosmos/cosmos-sdk/codec/types"
+ "github.com/cosmos/cosmos-sdk/std"
+ "github.com/cosmos/cosmos-sdk/types/module"
+ "github.com/cosmos/cosmos-sdk/types/msgservice"
+)
+
+type appModule struct {
+ app *App
+}
+
+func (m appModule)
+
+RegisterServices(configurator module.Configurator) {
+ err := m.app.registerRuntimeServices(configurator)
+ if err != nil {
+ panic(err)
+}
+}
+
+func (m appModule)
+
+IsOnePerModuleType() {
+}
+
+func (m appModule)
+
+IsAppModule() {
+}
+
+var (
+ _ appmodule.AppModule = appModule{
+}
+ _ module.HasServices = appModule{
+}
+)
+
+// BaseAppOption is a depinject.AutoGroupType which can be used to pass
+// BaseApp options into the depinject. It should be used carefully.
+type BaseAppOption func(*baseapp.BaseApp)
+
+// IsManyPerContainerType indicates that this is a depinject.ManyPerContainerType.
+func (b BaseAppOption)
+
+IsManyPerContainerType() {
+}
+
+func init() {
+ appmodule.Register(&runtimev1alpha1.Module{
+},
+ appmodule.Provide(
+ ProvideApp,
+ ProvideInterfaceRegistry,
+ ProvideKVStoreKey,
+ ProvideTransientStoreKey,
+ ProvideMemoryStoreKey,
+ ProvideGenesisTxHandler,
+ ProvideKVStoreService,
+ ProvideMemoryStoreService,
+ ProvideTransientStoreService,
+ ProvideEventService,
+ ProvideHeaderInfoService,
+ ProvideCometInfoService,
+ ProvideBasicManager,
+ ProvideAddressCodec,
+ ),
+ appmodule.Invoke(SetupAppBuilder),
+ )
+}
+
+func ProvideApp(interfaceRegistry codectypes.InterfaceRegistry) (
+ codec.Codec,
+ *codec.LegacyAmino,
+ *AppBuilder,
+ *baseapp.MsgServiceRouter,
+ *baseapp.GRPCQueryRouter,
+ appmodule.AppModule,
+ protodesc.Resolver,
+ protoregistry.MessageTypeResolver,
+ error,
+) {
+ protoFiles := proto.HybridResolver
+ protoTypes := protoregistry.GlobalTypes
+
+ // At startup, check that all proto annotations are correct.
+ if err := msgservice.ValidateProtoAnnotations(protoFiles); err != nil {
+ // Once we switch to using protoreflect-based ante handlers, we might
+ // want to panic here instead of logging a warning.
+ _, _ = fmt.Fprintln(os.Stderr, err.Error())
+}
+ amino := codec.NewLegacyAmino()
+
+std.RegisterInterfaces(interfaceRegistry)
+
+std.RegisterLegacyAminoCodec(amino)
+ cdc := codec.NewProtoCodec(interfaceRegistry)
+ msgServiceRouter := baseapp.NewMsgServiceRouter()
+ grpcQueryRouter := baseapp.NewGRPCQueryRouter()
+ app := &App{
+ storeKeys: nil,
+ interfaceRegistry: interfaceRegistry,
+ cdc: cdc,
+ amino: amino,
+ basicManager: module.BasicManager{
+},
+ msgServiceRouter: msgServiceRouter,
+ grpcQueryRouter: grpcQueryRouter,
+}
+ appBuilder := &AppBuilder{
+ app
+}
+
+return cdc, amino, appBuilder, msgServiceRouter, grpcQueryRouter, appModule{
+ app
+}, protoFiles, protoTypes, nil
+}
+
+type AppInputs struct {
+ depinject.In
+
+ AppConfig *appv1alpha1.Config `optional:"true"`
+ Config *runtimev1alpha1.Module
+ AppBuilder *AppBuilder
+ Modules map[string]appmodule.AppModule
+ CustomModuleBasics map[string]module.AppModuleBasic `optional:"true"`
+ BaseAppOptions []BaseAppOption
+ InterfaceRegistry codectypes.InterfaceRegistry
+ LegacyAmino *codec.LegacyAmino
+ Logger log.Logger
+}
+
+func SetupAppBuilder(inputs AppInputs) {
+ app := inputs.AppBuilder.app
+ app.baseAppOptions = inputs.BaseAppOptions
+ app.config = inputs.Config
+ app.appConfig = inputs.AppConfig
+ app.logger = inputs.Logger
+ app.ModuleManager = module.NewManagerFromMap(inputs.Modules)
+ for name, mod := range inputs.Modules {
+ if customBasicMod, ok := inputs.CustomModuleBasics[name]; ok {
+ app.basicManager[name] = customBasicMod
+ customBasicMod.RegisterInterfaces(inputs.InterfaceRegistry)
+
+customBasicMod.RegisterLegacyAminoCodec(inputs.LegacyAmino)
+
+continue
+}
+ coreAppModuleBasic := module.CoreAppModuleBasicAdaptor(name, mod)
+
+app.basicManager[name] = coreAppModuleBasic
+ coreAppModuleBasic.RegisterInterfaces(inputs.InterfaceRegistry)
+
+coreAppModuleBasic.RegisterLegacyAminoCodec(inputs.LegacyAmino)
+}
+}
+
+func ProvideInterfaceRegistry(addressCodec address.Codec, validatorAddressCodec ValidatorAddressCodec, customGetSigners []signing.CustomGetSigner) (codectypes.InterfaceRegistry, error) {
+ signingOptions := signing.Options{
+ AddressCodec: addressCodec,
+ ValidatorAddressCodec: validatorAddressCodec,
+}
+ for _, signer := range customGetSigners {
+ signingOptions.DefineCustomGetSigners(signer.MsgType, signer.Fn)
+}
+
+interfaceRegistry, err := codectypes.NewInterfaceRegistryWithOptions(codectypes.InterfaceRegistryOptions{
+ ProtoFiles: proto.HybridResolver,
+ SigningOptions: signingOptions,
+})
+ if err != nil {
+ return nil, err
+}
+ if err := interfaceRegistry.SigningContext().Validate(); err != nil {
+ return nil, err
+}
+
+return interfaceRegistry, nil
+}
+
+func registerStoreKey(wrapper *AppBuilder, key storetypes.StoreKey) {
+ wrapper.app.storeKeys = append(wrapper.app.storeKeys, key)
+}
+
+func storeKeyOverride(config *runtimev1alpha1.Module, moduleName string) *runtimev1alpha1.StoreKeyConfig {
+ for _, cfg := range config.OverrideStoreKeys {
+ if cfg.ModuleName == moduleName {
+ return cfg
+}
+
+}
+
+return nil
+}
+
+func ProvideKVStoreKey(config *runtimev1alpha1.Module, key depinject.ModuleKey, app *AppBuilder) *storetypes.KVStoreKey {
+ if slices.Contains(config.SkipStoreKeys, key.Name()) {
+ return nil
+}
+ override := storeKeyOverride(config, key.Name())
+
+var storeKeyName string
+ if override != nil {
+ storeKeyName = override.KvStoreKey
+}
+
+else {
+ storeKeyName = key.Name()
+}
+ storeKey := storetypes.NewKVStoreKey(storeKeyName)
+
+registerStoreKey(app, storeKey)
+
+return storeKey
+}
+
+func ProvideTransientStoreKey(config *runtimev1alpha1.Module, key depinject.ModuleKey, app *AppBuilder) *storetypes.TransientStoreKey {
+ if slices.Contains(config.SkipStoreKeys, key.Name()) {
+ return nil
+}
+ storeKey := storetypes.NewTransientStoreKey(fmt.Sprintf("transient:%s", key.Name()))
+
+registerStoreKey(app, storeKey)
+
+return storeKey
+}
+
+func ProvideMemoryStoreKey(config *runtimev1alpha1.Module, key depinject.ModuleKey, app *AppBuilder) *storetypes.MemoryStoreKey {
+ if slices.Contains(config.SkipStoreKeys, key.Name()) {
+ return nil
+}
+ storeKey := storetypes.NewMemoryStoreKey(fmt.Sprintf("memory:%s", key.Name()))
+
+registerStoreKey(app, storeKey)
+
+return storeKey
+}
+
+func ProvideGenesisTxHandler(appBuilder *AppBuilder)
+
+genesis.TxHandler {
+ return appBuilder.app
+}
+
+func ProvideKVStoreService(config *runtimev1alpha1.Module, key depinject.ModuleKey, app *AppBuilder)
+
+store.KVStoreService {
+ storeKey := ProvideKVStoreKey(config, key, app)
+
+return kvStoreService{
+ key: storeKey
+}
+}
+
+func ProvideMemoryStoreService(config *runtimev1alpha1.Module, key depinject.ModuleKey, app *AppBuilder)
+
+store.MemoryStoreService {
+ storeKey := ProvideMemoryStoreKey(config, key, app)
+
+return memStoreService{
+ key: storeKey
+}
+}
+
+func ProvideTransientStoreService(config *runtimev1alpha1.Module, key depinject.ModuleKey, app *AppBuilder)
+
+store.TransientStoreService {
+ storeKey := ProvideTransientStoreKey(config, key, app)
+
+return transientStoreService{
+ key: storeKey
+}
+}
+
+func ProvideEventService()
+
+event.Service {
+ return EventService{
+}
+}
+
+func ProvideCometInfoService()
+
+comet.BlockInfoService {
+ return cometInfoService{
+}
+}
+
+func ProvideHeaderInfoService(app *AppBuilder)
+
+header.Service {
+ return headerInfoService{
+}
+}
+
+func ProvideBasicManager(app *AppBuilder)
+
+module.BasicManager {
+ return app.app.basicManager
+}
+
+type (
+ // ValidatorAddressCodec is an alias for address.Codec for validator addresses.
+ ValidatorAddressCodec address.Codec
+
+ // ConsensusAddressCodec is an alias for address.Codec for validator consensus addresses.
+ ConsensusAddressCodec address.Codec
+)
+
+type AddressCodecInputs struct {
+ depinject.In
+
+ AuthConfig *authmodulev1.Module `optional:"true"`
+ StakingConfig *stakingmodulev1.Module `optional:"true"`
+
+ AddressCodecFactory func()
+
+address.Codec `optional:"true"`
+ ValidatorAddressCodecFactory func()
+
+ValidatorAddressCodec `optional:"true"`
+ ConsensusAddressCodecFactory func()
+
+ConsensusAddressCodec `optional:"true"`
+}
+
+// ProvideAddressCodec provides an address.Codec to the container for any
+// modules that want to do address string <> bytes conversion.
+func ProvideAddressCodec(in AddressCodecInputs) (address.Codec, ValidatorAddressCodec, ConsensusAddressCodec) {
+ if in.AddressCodecFactory != nil && in.ValidatorAddressCodecFactory != nil && in.ConsensusAddressCodecFactory != nil {
+ return in.AddressCodecFactory(), in.ValidatorAddressCodecFactory(), in.ConsensusAddressCodecFactory()
+}
+ if in.AuthConfig == nil || in.AuthConfig.Bech32Prefix == "" {
+ panic("auth config bech32 prefix cannot be empty if no custom address codec is provided")
+}
+ if in.StakingConfig == nil {
+ in.StakingConfig = &stakingmodulev1.Module{
+}
+
+}
+ if in.StakingConfig.Bech32PrefixValidator == "" {
+ in.StakingConfig.Bech32PrefixValidator = fmt.Sprintf("%svaloper", in.AuthConfig.Bech32Prefix)
+}
+ if in.StakingConfig.Bech32PrefixConsensus == "" {
+ in.StakingConfig.Bech32PrefixConsensus = fmt.Sprintf("%svalcons", in.AuthConfig.Bech32Prefix)
+}
+
+return addresscodec.NewBech32Codec(in.AuthConfig.Bech32Prefix),
+ addresscodec.NewBech32Codec(in.StakingConfig.Bech32PrefixValidator),
+ addresscodec.NewBech32Codec(in.StakingConfig.Bech32PrefixConsensus)
+}
+```
+
+Additionally, runtime provides automatic registration of other essential (i.e., gRPC routes) services available to the App:
+
+* AutoCLI Query Service
+* Reflection Service
+* Custom module services
+
+```go expandable
+package runtime
+
+import (
+
+ "encoding/json"
+ "io"
+
+ dbm "github.com/cosmos/cosmos-db"
+ "github.com/cosmos/cosmos-sdk/baseapp"
+ "github.com/cosmos/cosmos-sdk/types/module"
+ "github.com/cosmos/cosmos-sdk/version"
+)
+
+// AppBuilder is a type that is injected into a container by the runtime module
+// (as *AppBuilder)
+
+which can be used to create an app which is compatible with
+// the existing app.go initialization conventions.
+type AppBuilder struct {
+ app *App
+}
+
+// DefaultGenesis returns a default genesis from the registered AppModuleBasic's.
+func (a *AppBuilder)
+
+DefaultGenesis()
+
+map[string]json.RawMessage {
+ return a.app.DefaultGenesis()
+}
+
+// Build builds an *App instance.
+func (a *AppBuilder)
+
+Build(db dbm.DB, traceStore io.Writer, baseAppOptions ...func(*baseapp.BaseApp)) *App {
+ for _, option := range a.app.baseAppOptions {
+ baseAppOptions = append(baseAppOptions, option)
+}
+
+ // set routers first in case they get modified by other options
+ baseAppOptions = append(
+ []func(*baseapp.BaseApp) {
+ func(bApp *baseapp.BaseApp) {
+ bApp.SetMsgServiceRouter(a.app.msgServiceRouter)
+
+bApp.SetGRPCQueryRouter(a.app.grpcQueryRouter)
+},
+},
+ baseAppOptions...,
+ )
+ bApp := baseapp.NewBaseApp(a.app.config.AppName, a.app.logger, db, nil, baseAppOptions...)
+
+bApp.SetCommitMultiStoreTracer(traceStore)
+
+bApp.SetVersion(version.Version)
+
+bApp.SetInterfaceRegistry(a.app.interfaceRegistry)
+
+bApp.MountStores(a.app.storeKeys...)
+
+a.app.BaseApp = bApp
+ a.app.configurator = module.NewConfigurator(a.app.cdc, a.app.MsgServiceRouter(), a.app.GRPCQueryRouter())
+ if err := a.app.ModuleManager.RegisterServices(a.app.configurator); err != nil {
+ panic(err)
+}
+
+return a.app
+}
+```
+
+### 4. Application Building
+
+The `AppBuilder` type provides a structured way to build applications:
+
+```go expandable
+package runtime
+
+import (
+
+ "encoding/json"
+ "io"
+
+ dbm "github.com/cosmos/cosmos-db"
+ "github.com/cosmos/cosmos-sdk/baseapp"
+ "github.com/cosmos/cosmos-sdk/types/module"
+ "github.com/cosmos/cosmos-sdk/version"
+)
+
+// AppBuilder is a type that is injected into a container by the runtime module
+// (as *AppBuilder)
+
+which can be used to create an app which is compatible with
+// the existing app.go initialization conventions.
+type AppBuilder struct {
+ app *App
+}
+
+// DefaultGenesis returns a default genesis from the registered AppModuleBasic's.
+func (a *AppBuilder)
+
+DefaultGenesis()
+
+map[string]json.RawMessage {
+ return a.app.DefaultGenesis()
+}
+
+// Build builds an *App instance.
+func (a *AppBuilder)
+
+Build(db dbm.DB, traceStore io.Writer, baseAppOptions ...func(*baseapp.BaseApp)) *App {
+ for _, option := range a.app.baseAppOptions {
+ baseAppOptions = append(baseAppOptions, option)
+}
+
+ // set routers first in case they get modified by other options
+ baseAppOptions = append(
+ []func(*baseapp.BaseApp) {
+ func(bApp *baseapp.BaseApp) {
+ bApp.SetMsgServiceRouter(a.app.msgServiceRouter)
+
+bApp.SetGRPCQueryRouter(a.app.grpcQueryRouter)
+},
+},
+ baseAppOptions...,
+ )
+ bApp := baseapp.NewBaseApp(a.app.config.AppName, a.app.logger, db, nil, baseAppOptions...)
+
+bApp.SetCommitMultiStoreTracer(traceStore)
+
+bApp.SetVersion(version.Version)
+
+bApp.SetInterfaceRegistry(a.app.interfaceRegistry)
+
+bApp.MountStores(a.app.storeKeys...)
+
+a.app.BaseApp = bApp
+ a.app.configurator = module.NewConfigurator(a.app.cdc, a.app.MsgServiceRouter(), a.app.GRPCQueryRouter())
+ if err := a.app.ModuleManager.RegisterServices(a.app.configurator); err != nil {
+ panic(err)
+}
+
+return a.app
+}
+```
+
+Key building steps:
+
+1. Configuration loading
+2. Module registration
+3. Service setup
+4. Store mounting
+5. Router configuration
+
+An application only needs to call `AppBuilder.Build` to create a fully configured application (`runtime.App`).
+
+```go expandable
+package runtime
+
+import (
+
+ "encoding/json"
+ "io"
+
+ dbm "github.com/cosmos/cosmos-db"
+ "github.com/cosmos/cosmos-sdk/baseapp"
+ "github.com/cosmos/cosmos-sdk/types/module"
+ "github.com/cosmos/cosmos-sdk/version"
+)
+
+// AppBuilder is a type that is injected into a container by the runtime module
+// (as *AppBuilder)
+
+which can be used to create an app which is compatible with
+// the existing app.go initialization conventions.
+type AppBuilder struct {
+ app *App
+}
+
+// DefaultGenesis returns a default genesis from the registered AppModuleBasic's.
+func (a *AppBuilder)
+
+DefaultGenesis()
+
+map[string]json.RawMessage {
+ return a.app.DefaultGenesis()
+}
+
+// Build builds an *App instance.
+func (a *AppBuilder)
+
+Build(db dbm.DB, traceStore io.Writer, baseAppOptions ...func(*baseapp.BaseApp)) *App {
+ for _, option := range a.app.baseAppOptions {
+ baseAppOptions = append(baseAppOptions, option)
+}
+
+ // set routers first in case they get modified by other options
+ baseAppOptions = append(
+ []func(*baseapp.BaseApp) {
+ func(bApp *baseapp.BaseApp) {
+ bApp.SetMsgServiceRouter(a.app.msgServiceRouter)
+
+bApp.SetGRPCQueryRouter(a.app.grpcQueryRouter)
+},
+},
+ baseAppOptions...,
+ )
+ bApp := baseapp.NewBaseApp(a.app.config.AppName, a.app.logger, db, nil, baseAppOptions...)
+
+bApp.SetCommitMultiStoreTracer(traceStore)
+
+bApp.SetVersion(version.Version)
+
+bApp.SetInterfaceRegistry(a.app.interfaceRegistry)
+
+bApp.MountStores(a.app.storeKeys...)
+
+a.app.BaseApp = bApp
+ a.app.configurator = module.NewConfigurator(a.app.cdc, a.app.MsgServiceRouter(), a.app.GRPCQueryRouter())
+ if err := a.app.ModuleManager.RegisterServices(a.app.configurator); err != nil {
+ panic(err)
+}
+
+return a.app
+}
+```
+
+More information on building applications can be found in the [next section](/sdk/v0.53/build/building-apps/app-go).
+
+## Best Practices
+
+1. **Module Order**: Carefully consider the order of modules in begin\_blockers, end\_blockers, and pre\_blockers.
+2. **Store Keys**: Use override\_store\_keys only when necessary to maintain clarity
+3. **Genesis Order**: Maintain correct initialization order in init\_genesis
+4. **Migration Management**: Use order\_migrations to control upgrade paths
+
+### Migration Considerations
+
+When upgrading between versions:
+
+1. Review the migration order specified in `order_migrations`
+2. Ensure all required modules are included in the configuration
+3. Validate store key configurations
+4. Test the upgrade path thoroughly
diff --git a/sdk/next/build/building-apps/vote-extensions.mdx b/sdk/next/build/building-apps/vote-extensions.mdx
new file mode 100644
index 000000000..7dfac27b9
--- /dev/null
+++ b/sdk/next/build/building-apps/vote-extensions.mdx
@@ -0,0 +1,186 @@
+---
+title: Vote Extensions
+---
+
+
+**Synopsis**
+This section describes how the application can define and use vote extensions
+defined in ABCI++.
+
+
+## Extend Vote
+
+ABCI++ allows an application to extend a pre-commit vote with arbitrary data. This
+process does NOT have to be deterministic, and the data returned can be unique to the
+validator process. The Cosmos SDK defines `baseapp.ExtendVoteHandler`:
+
+```go
+type ExtendVoteHandler func(Context, *abci.RequestExtendVote) (*abci.ResponseExtendVote, error)
+```
+
+An application can set this handler in `app.go` via the `baseapp.SetExtendVoteHandler`
+`BaseApp` option function. The `sdk.ExtendVoteHandler`, if defined, is called during
+the `ExtendVote` ABCI method. Note, if an application decides to implement
+`baseapp.ExtendVoteHandler`, it MUST return a non-nil `VoteExtension`. However, the vote
+extension can be empty. See [here](https://github.com/cometbft/cometbft/blob/v0.38.0-rc1/spec/abci/abci++_methods.md#extendvote)
+for more details.
+
+There are many decentralized censorship-resistant use cases for vote extensions.
+For example, a validator may want to submit prices for a price oracle or encryption
+shares for an encrypted transaction mempool. Note, an application should be careful
+to consider the size of the vote extensions as they could increase latency in block
+production. See [here](https://github.com/cometbft/cometbft/blob/v0.38.0-rc1/docs/qa/CometBFT-QA-38.md#vote-extensions-testbed)
+for more details.
+
+## Verify Vote Extension
+
+Similar to extending a vote, an application can also verify vote extensions from
+other validators when validating their pre-commits. For a given vote extension,
+this process MUST be deterministic. The Cosmos SDK defines `sdk.VerifyVoteExtensionHandler`:
+
+```go expandable
+package types
+
+import (
+
+ abci "github.com/cometbft/cometbft/abci/types"
+)
+
+// InitChainer initializes application state at genesis
+type InitChainer func(ctx Context, req *abci.RequestInitChain) (*abci.ResponseInitChain, error)
+
+// PrepareCheckStater runs code during commit after the block has been committed, and the `checkState`
+// has been branched for the new block.
+type PrepareCheckStater func(ctx Context)
+
+// Precommiter runs code during commit immediately before the `deliverState` is written to the `rootMultiStore`.
+type Precommiter func(ctx Context)
+
+// PeerFilter responds to p2p filtering queries from Tendermint
+type PeerFilter func(info string) *abci.ResponseQuery
+
+// ProcessProposalHandler defines a function type alias for processing a proposer
+type ProcessProposalHandler func(Context, *abci.RequestProcessProposal) (*abci.ResponseProcessProposal, error)
+
+// PrepareProposalHandler defines a function type alias for preparing a proposal
+type PrepareProposalHandler func(Context, *abci.RequestPrepareProposal) (*abci.ResponsePrepareProposal, error)
+
+// ExtendVoteHandler defines a function type alias for extending a pre-commit vote.
+type ExtendVoteHandler func(Context, *abci.RequestExtendVote) (*abci.ResponseExtendVote, error)
+
+// VerifyVoteExtensionHandler defines a function type alias for verifying a
+// pre-commit vote extension.
+type VerifyVoteExtensionHandler func(Context, *abci.RequestVerifyVoteExtension) (*abci.ResponseVerifyVoteExtension, error)
+
+// BeginBlocker defines a function type alias for executing application
+// business logic before transactions are executed.
+//
+// Note: The BeginBlock ABCI method no longer exists in the ABCI specification
+// as of CometBFT v0.38.0. This function type alias is provided for backwards
+// compatibility with applications that still use the BeginBlock ABCI method
+// and allows for existing BeginBlock functionality within applications.
+type BeginBlocker func(Context) (BeginBlock, error)
+
+// EndBlocker defines a function type alias for executing application
+// business logic after transactions are executed but before committing.
+//
+// Note: The EndBlock ABCI method no longer exists in the ABCI specification
+// as of CometBFT v0.38.0. This function type alias is provided for backwards
+// compatibility with applications that still use the EndBlock ABCI method
+// and allows for existing EndBlock functionality within applications.
+type EndBlocker func(Context) (EndBlock, error)
+
+// EndBlock defines a type which contains endblock events and validator set updates
+type EndBlock struct {
+ ValidatorUpdates []abci.ValidatorUpdate
+ Events []abci.Event
+}
+
+// BeginBlock defines a type which contains beginBlock events
+type BeginBlock struct {
+ Events []abci.Event
+}
+```
+
+An application can set this handler in `app.go` via the `baseapp.SetVerifyVoteExtensionHandler`
+`BaseApp` option function. The `sdk.VerifyVoteExtensionHandler`, if defined, is called
+during the `VerifyVoteExtension` ABCI method. If an application defines a vote
+extension handler, it should also define a verification handler. Note, not all
+validators will share the same view of what vote extensions they verify depending
+on how votes are propagated. See [here](https://github.com/cometbft/cometbft/blob/v0.38.0-rc1/spec/abci/abci++_methods.md#verifyvoteextension)
+for more details.
+
+## Vote Extension Propagation
+
+The agreed upon vote extensions at height `H` are provided to the proposing validator
+at height `H+1` during `PrepareProposal`. As a result, the vote extensions are
+not natively provided or exposed to the remaining validators during `ProcessProposal`.
+As a result, if an application requires that the agreed upon vote extensions from
+height `H` are available to all validators at `H+1`, the application must propagate
+these vote extensions manually in the block proposal itself. This can be done by
+"injecting" them into the block proposal, since the `Txs` field in `PrepareProposal`
+is just a slice of byte slices.
+
+`FinalizeBlock` will ignore any byte slice that doesn't implement an `sdk.Tx`, so
+any injected vote extensions will safely be ignored in `FinalizeBlock`. For more
+details on propagation, see the [ABCI++ 2.0 ADR](https://github.com/cosmos/cosmos-sdk/blob/main/docs/architecture/adr-064-abci-2.0.md#vote-extension-propagation--verification).
+
+### Recovery of injected Vote Extensions
+
+As stated before, vote extensions can be injected into a block proposal (along with
+other transactions in the `Txs` field). The Cosmos SDK provides a pre-FinalizeBlock
+hook to allow applications to recover vote extensions, perform any necessary
+computation on them, and then store the results in the cached store. These results
+will be available to the application during the subsequent `FinalizeBlock` call.
+
+An example of how a pre-FinalizeBlock hook could look like is shown below:
+
+```go expandable
+app.SetPreBlocker(func(ctx sdk.Context, req *abci.RequestFinalizeBlock)
+
+error {
+ allVEs := []VE{
+} // store all parsed vote extensions here
+ for _, tx := range req.Txs {
+ // define a custom function that tries to parse the tx as a vote extension
+ ve, ok := parseVoteExtension(tx)
+ if !ok {
+ continue
+}
+
+allVEs = append(allVEs, ve)
+}
+
+ // perform any necessary computation on the vote extensions and store the result
+ // in the cached store
+ result := compute(allVEs)
+ err := storeVEResult(ctx, result)
+ if err != nil {
+ return err
+}
+
+return nil
+})
+```
+
+Then, in an app's module, the application can retrieve the result of the computation
+of vote extensions from the cached store:
+
+```go expandable
+func (k Keeper)
+
+BeginBlocker(ctx context.Context)
+
+error {
+ // retrieve the result of the computation of vote extensions from the cached store
+ result, err := k.GetVEResult(ctx)
+ if err != nil {
+ return err
+}
+
+ // use the result of the computation of vote extensions
+ k.setSomething(result)
+
+return nil
+}
+```
diff --git a/sdk/next/build/building-modules/beginblock-endblock.mdx b/sdk/next/build/building-modules/beginblock-endblock.mdx
new file mode 100644
index 000000000..8674e9104
--- /dev/null
+++ b/sdk/next/build/building-modules/beginblock-endblock.mdx
@@ -0,0 +1,114 @@
+---
+title: BeginBlocker and EndBlocker
+---
+{/* force preview rebuild: 2025-09-30 */}
+
+
+**Synopsis**
+`BeginBlocker` and `EndBlocker` are optional methods module developers can implement in their module. They will be triggered at the beginning and at the end of each block respectively, when the [`BeginBlock`](/sdk/v0.53/learn/advanced/baseapp#beginblock) and [`EndBlock`](/sdk/v0.53/learn/advanced/baseapp#endblock) ABCI messages are received from the underlying consensus engine.
+
+
+
+**Prerequisite Readings**
+
+* [Module Manager](/sdk/v0.53/build/building-modules/module-manager)
+
+
+
+## BeginBlocker and EndBlocker
+
+`BeginBlocker` and `EndBlocker` are a way for module developers to add automatic execution of logic to their module. This is a powerful tool that should be used carefully, as complex automatic functions can slow down or even halt the chain.
+
+In 0.47.0, Prepare and Process Proposal were added that allow app developers to do arbitrary work at those phases, but they do not influence the work that will be done in BeginBlock. If an application required `BeginBlock` to execute prior to any sort of work is done then this is not possible today (0.50.0).
+
+When needed, `BeginBlocker` and `EndBlocker` are implemented as part of the [`HasBeginBlocker`, `HasABCIEndBlocker` and `EndBlocker` interfaces](/sdk/v0.53/build/building-modules/module-manager#appmodule). This means either can be left-out if not required. The `BeginBlock` and `EndBlock` methods of the interface implemented in `module.go` generally defer to `BeginBlocker` and `EndBlocker` methods respectively, which are usually implemented in `abci.go`.
+
+The actual implementation of `BeginBlocker` and `EndBlocker` in `abci.go` are very similar to that of a [`Msg` service](/sdk/v0.53/build/building-modules/msg-services):
+
+* They generally use the [`keeper`](/sdk/v0.53/build/building-modules/keeper) and [`ctx`](/sdk/v0.53/learn/advanced/context) to retrieve information about the latest state.
+* If needed, they use the `keeper` and `ctx` to trigger state-transitions.
+* If needed, they can emit [`events`](/sdk/v0.53/learn/advanced/events) via the `ctx`'s `EventManager`.
+
+A specific type of `EndBlocker` is available to return validator updates to the underlying consensus engine in the form of an [`[]abci.ValidatorUpdates`](/cometbft/v0.38/spec/abci/Methods#endblock). This is the preferred way to implement custom validator changes.
+
+It is possible for developers to define the order of execution between the `BeginBlocker`/`EndBlocker` functions of each of their application's modules via the module's manager `SetOrderBeginBlocker`/`SetOrderEndBlocker` methods. For more on the module manager, click [here](/sdk/v0.53/build/building-modules/module-manager).
+
+See an example implementation of `BeginBlocker` from the `distribution` module:
+
+```go expandable
+package distribution
+
+import (
+
+ "time"
+ "github.com/cosmos/cosmos-sdk/telemetry"
+ sdk "github.com/cosmos/cosmos-sdk/types"
+ "github.com/cosmos/cosmos-sdk/x/distribution/keeper"
+ "github.com/cosmos/cosmos-sdk/x/distribution/types"
+)
+
+// BeginBlocker sets the proposer for determining distribution during endblock
+// and distribute rewards for the previous block.
+func BeginBlocker(ctx sdk.Context, k keeper.Keeper)
+
+error {
+ defer telemetry.ModuleMeasureSince(types.ModuleName, time.Now(), telemetry.MetricKeyBeginBlocker)
+
+ // determine the total power signing the block
+ var previousTotalPower int64
+ for _, voteInfo := range ctx.VoteInfos() {
+ previousTotalPower += voteInfo.Validator.Power
+}
+
+ // TODO this is Tendermint-dependent
+ // ref https://github.com/cosmos/cosmos-sdk/issues/3095
+ if ctx.BlockHeight() > 1 {
+ k.AllocateTokens(ctx, previousTotalPower, ctx.VoteInfos())
+}
+
+ // record the proposer for when we payout on the next block
+ consAddr := sdk.ConsAddress(ctx.BlockHeader().ProposerAddress)
+
+k.SetPreviousProposerConsAddr(ctx, consAddr)
+
+return nil
+}
+```
+
+and an example implementation of `EndBlocker` from the `staking` module:
+
+```go expandable
+package keeper
+
+import (
+
+ "context"
+ "time"
+
+ abci "github.com/cometbft/cometbft/abci/types"
+ "github.com/cosmos/cosmos-sdk/telemetry"
+ sdk "github.com/cosmos/cosmos-sdk/types"
+ "github.com/cosmos/cosmos-sdk/x/staking/types"
+)
+
+// BeginBlocker will persist the current header and validator set as a historical entry
+// and prune the oldest entry based on the HistoricalEntries parameter
+func (k *Keeper)
+
+BeginBlocker(ctx sdk.Context) {
+ defer telemetry.ModuleMeasureSince(types.ModuleName, time.Now(), telemetry.MetricKeyBeginBlocker)
+
+k.TrackHistoricalInfo(ctx)
+}
+
+// Called every block, update validator set
+func (k *Keeper)
+
+EndBlocker(ctx context.Context) ([]abci.ValidatorUpdate, error) {
+ defer telemetry.ModuleMeasureSince(types.ModuleName, time.Now(), telemetry.MetricKeyEndBlocker)
+
+return k.BlockValidatorUpdates(sdk.UnwrapSDKContext(ctx)), nil
+}
+```
+
+{/* TODO: leaving this here to update docs with core api changes */}
diff --git a/sdk/next/build/building-modules/depinject.mdx b/sdk/next/build/building-modules/depinject.mdx
new file mode 100644
index 000000000..583d888a8
--- /dev/null
+++ b/sdk/next/build/building-modules/depinject.mdx
@@ -0,0 +1,3495 @@
+---
+title: Modules depinject-ready
+---
+
+
+**Prerequisite Readings**
+
+* [Depinject Documentation](/sdk/v0.53/build/packages/depinject)
+
+
+
+[`depinject`](/sdk/v0.53/build/packages/depinject) is used to wire any module in `app.go`.
+All core modules are already configured to support dependency injection.
+
+To work with `depinject` a module must define its configuration and requirements so that `depinject` can provide the right dependencies.
+
+In brief, as a module developer, the following steps are required:
+
+1. Define the module configuration using Protobuf
+2. Define the module dependencies in `x/{moduleName}/module.go`
+
+A chain developer can then use the module by following these two steps:
+
+1. Configure the module in `app_config.go` or `app.yaml`
+2. Inject the module in `app.go`
+
+## Module Configuration
+
+The module available configuration is defined in a Protobuf file, located at `{moduleName}/module/v1/module.proto`.
+
+```protobuf
+// Reference: https://github.com/cosmos/cosmos-sdk/blob/v0.50.0-alpha.0/proto/cosmos/group/module/v1/module.proto
+```
+
+* `go_import` must point to the Go package of the custom module.
+* Message fields define the module configuration.
+ That configuration can be set in the `app_config.go` / `app.yaml` file for a chain developer to configure the module.\
+ Taking `group` as example, a chain developer is able to decide, thanks to `uint64 max_metadata_len`, what the maximum metadata length allowed for a group proposal is.
+
+ ```go expandable
+ package simapp
+
+ import (
+
+ "time"
+ "google.golang.org/protobuf/types/known/durationpb"
+
+ runtimev1alpha1 "cosmossdk.io/api/cosmos/app/runtime/v1alpha1"
+ appv1alpha1 "cosmossdk.io/api/cosmos/app/v1alpha1"
+ authmodulev1 "cosmossdk.io/api/cosmos/auth/module/v1"
+ authzmodulev1 "cosmossdk.io/api/cosmos/authz/module/v1"
+ bankmodulev1 "cosmossdk.io/api/cosmos/bank/module/v1"
+ circuitmodulev1 "cosmossdk.io/api/cosmos/circuit/module/v1"
+ consensusmodulev1 "cosmossdk.io/api/cosmos/consensus/module/v1"
+ crisismodulev1 "cosmossdk.io/api/cosmos/crisis/module/v1"
+ distrmodulev1 "cosmossdk.io/api/cosmos/distribution/module/v1"
+ evidencemodulev1 "cosmossdk.io/api/cosmos/evidence/module/v1"
+ feegrantmodulev1 "cosmossdk.io/api/cosmos/feegrant/module/v1"
+ genutilmodulev1 "cosmossdk.io/api/cosmos/genutil/module/v1"
+ govmodulev1 "cosmossdk.io/api/cosmos/gov/module/v1"
+ groupmodulev1 "cosmossdk.io/api/cosmos/group/module/v1"
+ mintmodulev1 "cosmossdk.io/api/cosmos/mint/module/v1"
+ nftmodulev1 "cosmossdk.io/api/cosmos/nft/module/v1"
+ paramsmodulev1 "cosmossdk.io/api/cosmos/params/module/v1"
+ slashingmodulev1 "cosmossdk.io/api/cosmos/slashing/module/v1"
+ stakingmodulev1 "cosmossdk.io/api/cosmos/staking/module/v1"
+ txconfigv1 "cosmossdk.io/api/cosmos/tx/config/v1"
+ upgrademodulev1 "cosmossdk.io/api/cosmos/upgrade/module/v1"
+ vestingmodulev1 "cosmossdk.io/api/cosmos/vesting/module/v1"
+ "cosmossdk.io/depinject"
+
+ _ "cosmossdk.io/x/circuit" // import for side-effects
+ _ "cosmossdk.io/x/evidence" // import for side-effects
+ _ "cosmossdk.io/x/feegrant/module" // import for side-effects
+ _ "cosmossdk.io/x/nft/module" // import for side-effects
+ _ "cosmossdk.io/x/upgrade" // import for side-effects
+ _ "github.com/cosmos/cosmos-sdk/x/auth/tx/config" // import for side-effects
+ _ "github.com/cosmos/cosmos-sdk/x/auth/vesting" // import for side-effects
+ _ "github.com/cosmos/cosmos-sdk/x/authz/module" // import for side-effects
+ _ "github.com/cosmos/cosmos-sdk/x/bank" // import for side-effects
+ _ "github.com/cosmos/cosmos-sdk/x/consensus" // import for side-effects
+ _ "github.com/cosmos/cosmos-sdk/x/crisis" // import for side-effects
+ _ "github.com/cosmos/cosmos-sdk/x/distribution" // import for side-effects
+ "github.com/cosmos/cosmos-sdk/x/genutil"
+ "github.com/cosmos/cosmos-sdk/x/gov"
+ _ "github.com/cosmos/cosmos-sdk/x/group/module" // import for side-effects
+ _ "github.com/cosmos/cosmos-sdk/x/mint" // import for side-effects
+ _ "github.com/cosmos/cosmos-sdk/x/params" // import for side-effects
+ _ "github.com/cosmos/cosmos-sdk/x/slashing" // import for side-effects
+ _ "github.com/cosmos/cosmos-sdk/x/staking" // import for side-effects
+
+ "cosmossdk.io/core/appconfig"
+ circuittypes "cosmossdk.io/x/circuit/types"
+ evidencetypes "cosmossdk.io/x/evidence/types"
+ "cosmossdk.io/x/feegrant"
+ "cosmossdk.io/x/nft"
+ upgradetypes "cosmossdk.io/x/upgrade/types"
+ "github.com/cosmos/cosmos-sdk/runtime"
+ "github.com/cosmos/cosmos-sdk/types/module"
+ authtypes "github.com/cosmos/cosmos-sdk/x/auth/types"
+ vestingtypes "github.com/cosmos/cosmos-sdk/x/auth/vesting/types"
+ "github.com/cosmos/cosmos-sdk/x/authz"
+ banktypes "github.com/cosmos/cosmos-sdk/x/bank/types"
+ consensustypes "github.com/cosmos/cosmos-sdk/x/consensus/types"
+ crisistypes "github.com/cosmos/cosmos-sdk/x/crisis/types"
+ distrtypes "github.com/cosmos/cosmos-sdk/x/distribution/types"
+ genutiltypes "github.com/cosmos/cosmos-sdk/x/genutil/types"
+ govclient "github.com/cosmos/cosmos-sdk/x/gov/client"
+ govtypes "github.com/cosmos/cosmos-sdk/x/gov/types"
+ "github.com/cosmos/cosmos-sdk/x/group"
+ minttypes "github.com/cosmos/cosmos-sdk/x/mint/types"
+ paramsclient "github.com/cosmos/cosmos-sdk/x/params/client"
+ paramstypes "github.com/cosmos/cosmos-sdk/x/params/types"
+ slashingtypes "github.com/cosmos/cosmos-sdk/x/slashing/types"
+ stakingtypes "github.com/cosmos/cosmos-sdk/x/staking/types"
+ )
+
+ var (
+ // module account permissions
+ moduleAccPerms = []*authmodulev1.ModuleAccountPermission{
+ {
+ Account: authtypes.FeeCollectorName
+ },
+ {
+ Account: distrtypes.ModuleName
+ },
+ {
+ Account: minttypes.ModuleName,
+ Permissions: []string{
+ authtypes.Minter
+ }},
+ {
+ Account: stakingtypes.BondedPoolName,
+ Permissions: []string{
+ authtypes.Burner, stakingtypes.ModuleName
+ }},
+ {
+ Account: stakingtypes.NotBondedPoolName,
+ Permissions: []string{
+ authtypes.Burner, stakingtypes.ModuleName
+ }},
+ {
+ Account: govtypes.ModuleName,
+ Permissions: []string{
+ authtypes.Burner
+ }},
+ {
+ Account: nft.ModuleName
+ },
+ }
+
+ // blocked account addresses
+ blockAccAddrs = []string{
+ authtypes.FeeCollectorName,
+ distrtypes.ModuleName,
+ minttypes.ModuleName,
+ stakingtypes.BondedPoolName,
+ stakingtypes.NotBondedPoolName,
+ nft.ModuleName,
+ // We allow the following module accounts to receive funds:
+ // govtypes.ModuleName
+ }
+
+ // application configuration (used by depinject)
+
+ AppConfig = depinject.Configs(appconfig.Compose(&appv1alpha1.Config{
+ Modules: []*appv1alpha1.ModuleConfig{
+ {
+ Name: runtime.ModuleName,
+ Config: appconfig.WrapAny(&runtimev1alpha1.Module{
+ AppName: "SimApp",
+ // During begin block slashing happens after distr.BeginBlocker so that
+ // there is nothing left over in the validator fee pool, so as to keep the
+ // CanWithdrawInvariant invariant.
+ // NOTE: staking module is required if HistoricalEntries param > 0
+ BeginBlockers: []string{
+ upgradetypes.ModuleName,
+ minttypes.ModuleName,
+ distrtypes.ModuleName,
+ slashingtypes.ModuleName,
+ evidencetypes.ModuleName,
+ stakingtypes.ModuleName,
+ genutiltypes.ModuleName,
+ authz.ModuleName,
+ },
+ EndBlockers: []string{
+ crisistypes.ModuleName,
+ govtypes.ModuleName,
+ stakingtypes.ModuleName,
+ genutiltypes.ModuleName,
+ feegrant.ModuleName,
+ group.ModuleName,
+ },
+ OverrideStoreKeys: []*runtimev1alpha1.StoreKeyConfig{
+ {
+ ModuleName: authtypes.ModuleName,
+ KvStoreKey: "acc",
+ },
+ },
+ // NOTE: The genutils module must occur after staking so that pools are
+ // properly initialized with tokens from genesis accounts.
+ // NOTE: The genutils module must also occur after auth so that it can access the params from auth.
+ InitGenesis: []string{
+ authtypes.ModuleName,
+ banktypes.ModuleName,
+ distrtypes.ModuleName,
+ stakingtypes.ModuleName,
+ slashingtypes.ModuleName,
+ govtypes.ModuleName,
+ minttypes.ModuleName,
+ crisistypes.ModuleName,
+ genutiltypes.ModuleName,
+ evidencetypes.ModuleName,
+ authz.ModuleName,
+ feegrant.ModuleName,
+ nft.ModuleName,
+ group.ModuleName,
+ paramstypes.ModuleName,
+ upgradetypes.ModuleName,
+ vestingtypes.ModuleName,
+ consensustypes.ModuleName,
+ circuittypes.ModuleName,
+ },
+ // When ExportGenesis is not specified, the export genesis module order
+ // is equal to the init genesis order
+ // ExportGenesis: []string{
+ },
+ // Uncomment if you want to set a custom migration order here.
+ // OrderMigrations: []string{
+ },
+ }),
+ },
+ {
+ Name: authtypes.ModuleName,
+ Config: appconfig.WrapAny(&authmodulev1.Module{
+ Bech32Prefix: "cosmos",
+ ModuleAccountPermissions: moduleAccPerms,
+ // By default modules authority is the governance module. This is configurable with the following:
+ // Authority: "group", // A custom module authority can be set using a module name
+ // Authority: "cosmos1cwwv22j5ca08ggdv9c2uky355k908694z577tv", // or a specific address
+ }),
+ },
+ {
+ Name: vestingtypes.ModuleName,
+ Config: appconfig.WrapAny(&vestingmodulev1.Module{
+ }),
+ },
+ {
+ Name: banktypes.ModuleName,
+ Config: appconfig.WrapAny(&bankmodulev1.Module{
+ BlockedModuleAccountsOverride: blockAccAddrs,
+ }),
+ },
+ {
+ Name: stakingtypes.ModuleName,
+ Config: appconfig.WrapAny(&stakingmodulev1.Module{
+ }),
+ },
+ {
+ Name: slashingtypes.ModuleName,
+ Config: appconfig.WrapAny(&slashingmodulev1.Module{
+ }),
+ },
+ {
+ Name: paramstypes.ModuleName,
+ Config: appconfig.WrapAny(¶msmodulev1.Module{
+ }),
+ },
+ {
+ Name: "tx",
+ Config: appconfig.WrapAny(&txconfigv1.Config{
+ }),
+ },
+ {
+ Name: genutiltypes.ModuleName,
+ Config: appconfig.WrapAny(&genutilmodulev1.Module{
+ }),
+ },
+ {
+ Name: authz.ModuleName,
+ Config: appconfig.WrapAny(&authzmodulev1.Module{
+ }),
+ },
+ {
+ Name: upgradetypes.ModuleName,
+ Config: appconfig.WrapAny(&upgrademodulev1.Module{
+ }),
+ },
+ {
+ Name: distrtypes.ModuleName,
+ Config: appconfig.WrapAny(&distrmodulev1.Module{
+ }),
+ },
+ {
+ Name: evidencetypes.ModuleName,
+ Config: appconfig.WrapAny(&evidencemodulev1.Module{
+ }),
+ },
+ {
+ Name: minttypes.ModuleName,
+ Config: appconfig.WrapAny(&mintmodulev1.Module{
+ }),
+ },
+ {
+ Name: group.ModuleName,
+ Config: appconfig.WrapAny(&groupmodulev1.Module{
+ MaxExecutionPeriod: durationpb.New(time.Second * 1209600),
+ MaxMetadataLen: 255,
+ }),
+ },
+ {
+ Name: nft.ModuleName,
+ Config: appconfig.WrapAny(&nftmodulev1.Module{
+ }),
+ },
+ {
+ Name: feegrant.ModuleName,
+ Config: appconfig.WrapAny(&feegrantmodulev1.Module{
+ }),
+ },
+ {
+ Name: govtypes.ModuleName,
+ Config: appconfig.WrapAny(&govmodulev1.Module{
+ }),
+ },
+ {
+ Name: crisistypes.ModuleName,
+ Config: appconfig.WrapAny(&crisismodulev1.Module{
+ }),
+ },
+ {
+ Name: consensustypes.ModuleName,
+ Config: appconfig.WrapAny(&consensusmodulev1.Module{
+ }),
+ },
+ {
+ Name: circuittypes.ModuleName,
+ Config: appconfig.WrapAny(&circuitmodulev1.Module{
+ }),
+ },
+ },
+ }),
+ depinject.Supply(
+ // supply custom module basics
+ map[string]module.AppModuleBasic{
+ genutiltypes.ModuleName: genutil.NewAppModuleBasic(genutiltypes.DefaultMessageValidator),
+ govtypes.ModuleName: gov.NewAppModuleBasic(
+ []govclient.ProposalHandler{
+ paramsclient.ProposalHandler,
+ },
+ ),
+ },
+ ))
+ )
+ ```
+
+That message is generated using [`pulsar`](https://github.com/cosmos/cosmos-sdk/blob/v0.50.0-alpha.0/scripts/protocgen-pulsar.sh) (by running `make proto-gen`).
+In the case of the `group` module, this file is generated here: [Link](https://github.com/cosmos/cosmos-sdk/blob/v0.50.0-alpha.0/api/cosmos/group/module/v1/module.pulsar.go).
+
+The part that is relevant for the module configuration is:
+
+```go expandable
+// Code generated by protoc-gen-go-pulsar. DO NOT EDIT.
+package modulev1
+
+import (
+
+ _ "cosmossdk.io/api/amino"
+ _ "cosmossdk.io/api/cosmos/app/v1alpha1"
+ fmt "fmt"
+ runtime "github.com/cosmos/cosmos-proto/runtime"
+ _ "github.com/cosmos/gogoproto/gogoproto"
+ protoreflect "google.golang.org/protobuf/reflect/protoreflect"
+ protoiface "google.golang.org/protobuf/runtime/protoiface"
+ protoimpl "google.golang.org/protobuf/runtime/protoimpl"
+ durationpb "google.golang.org/protobuf/types/known/durationpb"
+ io "io"
+ reflect "reflect"
+ sync "sync"
+)
+
+var (
+ md_Module protoreflect.MessageDescriptor
+ fd_Module_max_execution_period protoreflect.FieldDescriptor
+ fd_Module_max_metadata_len protoreflect.FieldDescriptor
+)
+
+func init() {
+ file_cosmos_group_module_v1_module_proto_init()
+
+md_Module = File_cosmos_group_module_v1_module_proto.Messages().ByName("Module")
+
+fd_Module_max_execution_period = md_Module.Fields().ByName("max_execution_period")
+
+fd_Module_max_metadata_len = md_Module.Fields().ByName("max_metadata_len")
+}
+
+var _ protoreflect.Message = (*fastReflection_Module)(nil)
+
+type fastReflection_Module Module
+
+func (x *Module)
+
+ProtoReflect()
+
+protoreflect.Message {
+ return (*fastReflection_Module)(x)
+}
+
+func (x *Module)
+
+slowProtoReflect()
+
+protoreflect.Message {
+ mi := &file_cosmos_group_module_v1_module_proto_msgTypes[0]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+}
+
+return ms
+}
+
+return mi.MessageOf(x)
+}
+
+var _fastReflection_Module_messageType fastReflection_Module_messageType
+var _ protoreflect.MessageType = fastReflection_Module_messageType{
+}
+
+type fastReflection_Module_messageType struct{
+}
+
+func (x fastReflection_Module_messageType)
+
+Zero()
+
+protoreflect.Message {
+ return (*fastReflection_Module)(nil)
+}
+
+func (x fastReflection_Module_messageType)
+
+New()
+
+protoreflect.Message {
+ return new(fastReflection_Module)
+}
+
+func (x fastReflection_Module_messageType)
+
+Descriptor()
+
+protoreflect.MessageDescriptor {
+ return md_Module
+}
+
+// Descriptor returns message descriptor, which contains only the protobuf
+// type information for the message.
+func (x *fastReflection_Module)
+
+Descriptor()
+
+protoreflect.MessageDescriptor {
+ return md_Module
+}
+
+// Type returns the message type, which encapsulates both Go and protobuf
+// type information. If the Go type information is not needed,
+// it is recommended that the message descriptor be used instead.
+func (x *fastReflection_Module)
+
+Type()
+
+protoreflect.MessageType {
+ return _fastReflection_Module_messageType
+}
+
+// New returns a newly allocated and mutable empty message.
+func (x *fastReflection_Module)
+
+New()
+
+protoreflect.Message {
+ return new(fastReflection_Module)
+}
+
+// Interface unwraps the message reflection interface and
+// returns the underlying ProtoMessage interface.
+func (x *fastReflection_Module)
+
+Interface()
+
+protoreflect.ProtoMessage {
+ return (*Module)(x)
+}
+
+// Range iterates over every populated field in an undefined order,
+// calling f for each field descriptor and value encountered.
+// Range returns immediately if f returns false.
+// While iterating, mutating operations may only be performed
+// on the current field descriptor.
+func (x *fastReflection_Module)
+
+Range(f func(protoreflect.FieldDescriptor, protoreflect.Value)
+
+bool) {
+ if x.MaxExecutionPeriod != nil {
+ value := protoreflect.ValueOfMessage(x.MaxExecutionPeriod.ProtoReflect())
+ if !f(fd_Module_max_execution_period, value) {
+ return
+}
+
+}
+ if x.MaxMetadataLen != uint64(0) {
+ value := protoreflect.ValueOfUint64(x.MaxMetadataLen)
+ if !f(fd_Module_max_metadata_len, value) {
+ return
+}
+
+}
+}
+
+// Has reports whether a field is populated.
+//
+// Some fields have the property of nullability where it is possible to
+// distinguish between the default value of a field and whether the field
+// was explicitly populated with the default value. Singular message fields,
+// member fields of a oneof, and proto2 scalar fields are nullable. Such
+// fields are populated only if explicitly set.
+//
+// In other cases (aside from the nullable cases above),
+// a proto3 scalar field is populated if it contains a non-zero value, and
+// a repeated field is populated if it is non-empty.
+func (x *fastReflection_Module)
+
+Has(fd protoreflect.FieldDescriptor)
+
+bool {
+ switch fd.FullName() {
+ case "cosmos.group.module.v1.Module.max_execution_period":
+ return x.MaxExecutionPeriod != nil
+ case "cosmos.group.module.v1.Module.max_metadata_len":
+ return x.MaxMetadataLen != uint64(0)
+
+default:
+ if fd.IsExtension() {
+ panic(fmt.Errorf("proto3 declared messages do not support extensions: cosmos.group.module.v1.Module"))
+}
+
+panic(fmt.Errorf("message cosmos.group.module.v1.Module does not contain field %s", fd.FullName()))
+}
+}
+
+// Clear clears the field such that a subsequent Has call reports false.
+//
+// Clearing an extension field clears both the extension type and value
+// associated with the given field number.
+//
+// Clear is a mutating operation and unsafe for concurrent use.
+func (x *fastReflection_Module)
+
+Clear(fd protoreflect.FieldDescriptor) {
+ switch fd.FullName() {
+ case "cosmos.group.module.v1.Module.max_execution_period":
+ x.MaxExecutionPeriod = nil
+ case "cosmos.group.module.v1.Module.max_metadata_len":
+ x.MaxMetadataLen = uint64(0)
+
+default:
+ if fd.IsExtension() {
+ panic(fmt.Errorf("proto3 declared messages do not support extensions: cosmos.group.module.v1.Module"))
+}
+
+panic(fmt.Errorf("message cosmos.group.module.v1.Module does not contain field %s", fd.FullName()))
+}
+}
+
+// Get retrieves the value for a field.
+//
+// For unpopulated scalars, it returns the default value, where
+// the default value of a bytes scalar is guaranteed to be a copy.
+// For unpopulated composite types, it returns an empty, read-only view
+// of the value; to obtain a mutable reference, use Mutable.
+func (x *fastReflection_Module)
+
+Get(descriptor protoreflect.FieldDescriptor)
+
+protoreflect.Value {
+ switch descriptor.FullName() {
+ case "cosmos.group.module.v1.Module.max_execution_period":
+ value := x.MaxExecutionPeriod
+ return protoreflect.ValueOfMessage(value.ProtoReflect())
+ case "cosmos.group.module.v1.Module.max_metadata_len":
+ value := x.MaxMetadataLen
+ return protoreflect.ValueOfUint64(value)
+
+default:
+ if descriptor.IsExtension() {
+ panic(fmt.Errorf("proto3 declared messages do not support extensions: cosmos.group.module.v1.Module"))
+}
+
+panic(fmt.Errorf("message cosmos.group.module.v1.Module does not contain field %s", descriptor.FullName()))
+}
+}
+
+// Set stores the value for a field.
+//
+// For a field belonging to a oneof, it implicitly clears any other field
+// that may be currently set within the same oneof.
+// For extension fields, it implicitly stores the provided ExtensionType.
+// When setting a composite type, it is unspecified whether the stored value
+// aliases the source's memory in any way. If the composite value is an
+// empty, read-only value, then it panics.
+//
+// Set is a mutating operation and unsafe for concurrent use.
+func (x *fastReflection_Module)
+
+Set(fd protoreflect.FieldDescriptor, value protoreflect.Value) {
+ switch fd.FullName() {
+ case "cosmos.group.module.v1.Module.max_execution_period":
+ x.MaxExecutionPeriod = value.Message().Interface().(*durationpb.Duration)
+ case "cosmos.group.module.v1.Module.max_metadata_len":
+ x.MaxMetadataLen = value.Uint()
+
+default:
+ if fd.IsExtension() {
+ panic(fmt.Errorf("proto3 declared messages do not support extensions: cosmos.group.module.v1.Module"))
+}
+
+panic(fmt.Errorf("message cosmos.group.module.v1.Module does not contain field %s", fd.FullName()))
+}
+}
+
+// Mutable returns a mutable reference to a composite type.
+//
+// If the field is unpopulated, it may allocate a composite value.
+// For a field belonging to a oneof, it implicitly clears any other field
+// that may be currently set within the same oneof.
+// For extension fields, it implicitly stores the provided ExtensionType
+// if not already stored.
+// It panics if the field does not contain a composite type.
+//
+// Mutable is a mutating operation and unsafe for concurrent use.
+func (x *fastReflection_Module)
+
+Mutable(fd protoreflect.FieldDescriptor)
+
+protoreflect.Value {
+ switch fd.FullName() {
+ case "cosmos.group.module.v1.Module.max_execution_period":
+ if x.MaxExecutionPeriod == nil {
+ x.MaxExecutionPeriod = new(durationpb.Duration)
+}
+
+return protoreflect.ValueOfMessage(x.MaxExecutionPeriod.ProtoReflect())
+ case "cosmos.group.module.v1.Module.max_metadata_len":
+ panic(fmt.Errorf("field max_metadata_len of message cosmos.group.module.v1.Module is not mutable"))
+
+default:
+ if fd.IsExtension() {
+ panic(fmt.Errorf("proto3 declared messages do not support extensions: cosmos.group.module.v1.Module"))
+}
+
+panic(fmt.Errorf("message cosmos.group.module.v1.Module does not contain field %s", fd.FullName()))
+}
+}
+
+// NewField returns a new value that is assignable to the field
+// for the given descriptor. For scalars, this returns the default value.
+// For lists, maps, and messages, this returns a new, empty, mutable value.
+func (x *fastReflection_Module)
+
+NewField(fd protoreflect.FieldDescriptor)
+
+protoreflect.Value {
+ switch fd.FullName() {
+ case "cosmos.group.module.v1.Module.max_execution_period":
+ m := new(durationpb.Duration)
+
+return protoreflect.ValueOfMessage(m.ProtoReflect())
+ case "cosmos.group.module.v1.Module.max_metadata_len":
+ return protoreflect.ValueOfUint64(uint64(0))
+
+default:
+ if fd.IsExtension() {
+ panic(fmt.Errorf("proto3 declared messages do not support extensions: cosmos.group.module.v1.Module"))
+}
+
+panic(fmt.Errorf("message cosmos.group.module.v1.Module does not contain field %s", fd.FullName()))
+}
+}
+
+// WhichOneof reports which field within the oneof is populated,
+// returning nil if none are populated.
+// It panics if the oneof descriptor does not belong to this message.
+func (x *fastReflection_Module)
+
+WhichOneof(d protoreflect.OneofDescriptor)
+
+protoreflect.FieldDescriptor {
+ switch d.FullName() {
+ default:
+ panic(fmt.Errorf("%s is not a oneof field in cosmos.group.module.v1.Module", d.FullName()))
+}
+
+panic("unreachable")
+}
+
+// GetUnknown retrieves the entire list of unknown fields.
+// The caller may only mutate the contents of the RawFields
+// if the mutated bytes are stored back into the message with SetUnknown.
+func (x *fastReflection_Module)
+
+GetUnknown()
+
+protoreflect.RawFields {
+ return x.unknownFields
+}
+
+// SetUnknown stores an entire list of unknown fields.
+// The raw fields must be syntactically valid according to the wire format.
+// An implementation may panic if this is not the case.
+// Once stored, the caller must not mutate the content of the RawFields.
+// An empty RawFields may be passed to clear the fields.
+//
+// SetUnknown is a mutating operation and unsafe for concurrent use.
+func (x *fastReflection_Module)
+
+SetUnknown(fields protoreflect.RawFields) {
+ x.unknownFields = fields
+}
+
+// IsValid reports whether the message is valid.
+//
+// An invalid message is an empty, read-only value.
+//
+// An invalid message often corresponds to a nil pointer of the concrete
+// message type, but the details are implementation dependent.
+// Validity is not part of the protobuf data model, and may not
+// be preserved in marshaling or other operations.
+func (x *fastReflection_Module)
+
+IsValid()
+
+bool {
+ return x != nil
+}
+
+// ProtoMethods returns optional fastReflectionFeature-path implementations of various operations.
+// This method may return nil.
+//
+// The returned methods type is identical to
+// "google.golang.org/protobuf/runtime/protoiface".Methods.
+// Consult the protoiface package documentation for details.
+func (x *fastReflection_Module)
+
+ProtoMethods() *protoiface.Methods {
+ size := func(input protoiface.SizeInput)
+
+protoiface.SizeOutput {
+ x := input.Message.Interface().(*Module)
+ if x == nil {
+ return protoiface.SizeOutput{
+ NoUnkeyedLiterals: input.NoUnkeyedLiterals,
+ Size: 0,
+}
+
+}
+ options := runtime.SizeInputToOptions(input)
+ _ = options
+ var n int
+ var l int
+ _ = l
+ if x.MaxExecutionPeriod != nil {
+ l = options.Size(x.MaxExecutionPeriod)
+
+n += 1 + l + runtime.Sov(uint64(l))
+}
+ if x.MaxMetadataLen != 0 {
+ n += 1 + runtime.Sov(uint64(x.MaxMetadataLen))
+}
+ if x.unknownFields != nil {
+ n += len(x.unknownFields)
+}
+
+return protoiface.SizeOutput{
+ NoUnkeyedLiterals: input.NoUnkeyedLiterals,
+ Size: n,
+}
+
+}
+ marshal := func(input protoiface.MarshalInput) (protoiface.MarshalOutput, error) {
+ x := input.Message.Interface().(*Module)
+ if x == nil {
+ return protoiface.MarshalOutput{
+ NoUnkeyedLiterals: input.NoUnkeyedLiterals,
+ Buf: input.Buf,
+}, nil
+}
+ options := runtime.MarshalInputToOptions(input)
+ _ = options
+ size := options.Size(x)
+ dAtA := make([]byte, size)
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if x.unknownFields != nil {
+ i -= len(x.unknownFields)
+
+copy(dAtA[i:], x.unknownFields)
+}
+ if x.MaxMetadataLen != 0 {
+ i = runtime.EncodeVarint(dAtA, i, uint64(x.MaxMetadataLen))
+
+i--
+ dAtA[i] = 0x10
+}
+ if x.MaxExecutionPeriod != nil {
+ encoded, err := options.Marshal(x.MaxExecutionPeriod)
+ if err != nil {
+ return protoiface.MarshalOutput{
+ NoUnkeyedLiterals: input.NoUnkeyedLiterals,
+ Buf: input.Buf,
+}, err
+}
+
+i -= len(encoded)
+
+copy(dAtA[i:], encoded)
+
+i = runtime.EncodeVarint(dAtA, i, uint64(len(encoded)))
+
+i--
+ dAtA[i] = 0xa
+}
+ if input.Buf != nil {
+ input.Buf = append(input.Buf, dAtA...)
+}
+
+else {
+ input.Buf = dAtA
+}
+
+return protoiface.MarshalOutput{
+ NoUnkeyedLiterals: input.NoUnkeyedLiterals,
+ Buf: input.Buf,
+}, nil
+}
+ unmarshal := func(input protoiface.UnmarshalInput) (protoiface.UnmarshalOutput, error) {
+ x := input.Message.Interface().(*Module)
+ if x == nil {
+ return protoiface.UnmarshalOutput{
+ NoUnkeyedLiterals: input.NoUnkeyedLiterals,
+ Flags: input.Flags,
+}, nil
+}
+ options := runtime.UnmarshalInputToOptions(input)
+ _ = options
+ dAtA := input.Buf
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return protoiface.UnmarshalOutput{
+ NoUnkeyedLiterals: input.NoUnkeyedLiterals,
+ Flags: input.Flags
+}, runtime.ErrIntOverflow
+}
+ if iNdEx >= l {
+ return protoiface.UnmarshalOutput{
+ NoUnkeyedLiterals: input.NoUnkeyedLiterals,
+ Flags: input.Flags
+}, io.ErrUnexpectedEOF
+}
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+}
+
+}
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return protoiface.UnmarshalOutput{
+ NoUnkeyedLiterals: input.NoUnkeyedLiterals,
+ Flags: input.Flags
+}, fmt.Errorf("proto: Module: wiretype end group for non-group")
+}
+ if fieldNum <= 0 {
+ return protoiface.UnmarshalOutput{
+ NoUnkeyedLiterals: input.NoUnkeyedLiterals,
+ Flags: input.Flags
+}, fmt.Errorf("proto: Module: illegal tag %d (wire type %d)", fieldNum, wire)
+}
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return protoiface.UnmarshalOutput{
+ NoUnkeyedLiterals: input.NoUnkeyedLiterals,
+ Flags: input.Flags
+}, fmt.Errorf("proto: wrong wireType = %d for field MaxExecutionPeriod", wireType)
+}
+
+var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return protoiface.UnmarshalOutput{
+ NoUnkeyedLiterals: input.NoUnkeyedLiterals,
+ Flags: input.Flags
+}, runtime.ErrIntOverflow
+}
+ if iNdEx >= l {
+ return protoiface.UnmarshalOutput{
+ NoUnkeyedLiterals: input.NoUnkeyedLiterals,
+ Flags: input.Flags
+}, io.ErrUnexpectedEOF
+}
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+}
+
+}
+ if msglen < 0 {
+ return protoiface.UnmarshalOutput{
+ NoUnkeyedLiterals: input.NoUnkeyedLiterals,
+ Flags: input.Flags
+}, runtime.ErrInvalidLength
+}
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return protoiface.UnmarshalOutput{
+ NoUnkeyedLiterals: input.NoUnkeyedLiterals,
+ Flags: input.Flags
+}, runtime.ErrInvalidLength
+}
+ if postIndex > l {
+ return protoiface.UnmarshalOutput{
+ NoUnkeyedLiterals: input.NoUnkeyedLiterals,
+ Flags: input.Flags
+}, io.ErrUnexpectedEOF
+}
+ if x.MaxExecutionPeriod == nil {
+ x.MaxExecutionPeriod = &durationpb.Duration{
+}
+
+}
+ if err := options.Unmarshal(dAtA[iNdEx:postIndex], x.MaxExecutionPeriod); err != nil {
+ return protoiface.UnmarshalOutput{
+ NoUnkeyedLiterals: input.NoUnkeyedLiterals,
+ Flags: input.Flags
+}, err
+}
+
+iNdEx = postIndex
+ case 2:
+ if wireType != 0 {
+ return protoiface.UnmarshalOutput{
+ NoUnkeyedLiterals: input.NoUnkeyedLiterals,
+ Flags: input.Flags
+}, fmt.Errorf("proto: wrong wireType = %d for field MaxMetadataLen", wireType)
+}
+
+x.MaxMetadataLen = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return protoiface.UnmarshalOutput{
+ NoUnkeyedLiterals: input.NoUnkeyedLiterals,
+ Flags: input.Flags
+}, runtime.ErrIntOverflow
+}
+ if iNdEx >= l {
+ return protoiface.UnmarshalOutput{
+ NoUnkeyedLiterals: input.NoUnkeyedLiterals,
+ Flags: input.Flags
+}, io.ErrUnexpectedEOF
+}
+ b := dAtA[iNdEx]
+ iNdEx++
+ x.MaxMetadataLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+}
+
+}
+
+default:
+ iNdEx = preIndex
+ skippy, err := runtime.Skip(dAtA[iNdEx:])
+ if err != nil {
+ return protoiface.UnmarshalOutput{
+ NoUnkeyedLiterals: input.NoUnkeyedLiterals,
+ Flags: input.Flags
+}, err
+}
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return protoiface.UnmarshalOutput{
+ NoUnkeyedLiterals: input.NoUnkeyedLiterals,
+ Flags: input.Flags
+}, runtime.ErrInvalidLength
+}
+ if (iNdEx + skippy) > l {
+ return protoiface.UnmarshalOutput{
+ NoUnkeyedLiterals: input.NoUnkeyedLiterals,
+ Flags: input.Flags
+}, io.ErrUnexpectedEOF
+}
+ if !options.DiscardUnknown {
+ x.unknownFields = append(x.unknownFields, dAtA[iNdEx:iNdEx+skippy]...)
+}
+
+iNdEx += skippy
+}
+
+}
+ if iNdEx > l {
+ return protoiface.UnmarshalOutput{
+ NoUnkeyedLiterals: input.NoUnkeyedLiterals,
+ Flags: input.Flags
+}, io.ErrUnexpectedEOF
+}
+
+return protoiface.UnmarshalOutput{
+ NoUnkeyedLiterals: input.NoUnkeyedLiterals,
+ Flags: input.Flags
+}, nil
+}
+
+return &protoiface.Methods{
+ NoUnkeyedLiterals: struct{
+}{
+},
+ Flags: protoiface.SupportMarshalDeterministic | protoiface.SupportUnmarshalDiscardUnknown,
+ Size: size,
+ Marshal: marshal,
+ Unmarshal: unmarshal,
+ Merge: nil,
+ CheckInitialized: nil,
+}
+}
+
+// Code generated by protoc-gen-go. DO NOT EDIT.
+// versions:
+// protoc-gen-go v1.27.0
+// protoc (unknown)
+// source: cosmos/group/module/v1/module.proto
+
+const (
+ // Verify that this generated code is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
+ // Verify that runtime/protoimpl is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
+)
+
+// Module is the config object of the group module.
+type Module struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // max_execution_period defines the max duration after a proposal's voting period ends that members can send a MsgExec
+ // to execute the proposal.
+ MaxExecutionPeriod *durationpb.Duration `protobuf:"bytes,1,opt,name=max_execution_period,json=maxExecutionPeriod,proto3" json:"max_execution_period,omitempty"`
+ // max_metadata_len defines the max length of the metadata bytes field for various entities within the group module.
+ // Defaults to 255 if not explicitly set.
+ MaxMetadataLen uint64 `protobuf:"varint,2,opt,name=max_metadata_len,json=maxMetadataLen,proto3" json:"max_metadata_len,omitempty"`
+}
+
+func (x *Module)
+
+Reset() {
+ *x = Module{
+}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_cosmos_group_module_v1_module_proto_msgTypes[0]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+
+ms.StoreMessageInfo(mi)
+}
+}
+
+func (x *Module)
+
+String()
+
+string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*Module)
+
+ProtoMessage() {
+}
+
+// Deprecated: Use Module.ProtoReflect.Descriptor instead.
+func (*Module)
+
+Descriptor() ([]byte, []int) {
+ return file_cosmos_group_module_v1_module_proto_rawDescGZIP(), []int{0
+}
+}
+
+func (x *Module)
+
+GetMaxExecutionPeriod() *durationpb.Duration {
+ if x != nil {
+ return x.MaxExecutionPeriod
+}
+
+return nil
+}
+
+func (x *Module)
+
+GetMaxMetadataLen()
+
+uint64 {
+ if x != nil {
+ return x.MaxMetadataLen
+}
+
+return 0
+}
+
+var File_cosmos_group_module_v1_module_proto protoreflect.FileDescriptor
+
+var file_cosmos_group_module_v1_module_proto_rawDesc = []byte{
+ 0x0a, 0x23, 0x63, 0x6f, 0x73, 0x6d, 0x6f, 0x73, 0x2f, 0x67, 0x72, 0x6f, 0x75, 0x70, 0x2f, 0x6d,
+ 0x6f, 0x64, 0x75, 0x6c, 0x65, 0x2f, 0x76, 0x31, 0x2f, 0x6d, 0x6f, 0x64, 0x75, 0x6c, 0x65, 0x2e,
+ 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x16, 0x63, 0x6f, 0x73, 0x6d, 0x6f, 0x73, 0x2e, 0x67, 0x72,
+ 0x6f, 0x75, 0x70, 0x2e, 0x6d, 0x6f, 0x64, 0x75, 0x6c, 0x65, 0x2e, 0x76, 0x31, 0x1a, 0x20, 0x63,
+ 0x6f, 0x73, 0x6d, 0x6f, 0x73, 0x2f, 0x61, 0x70, 0x70, 0x2f, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68,
+ 0x61, 0x31, 0x2f, 0x6d, 0x6f, 0x64, 0x75, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a,
+ 0x14, 0x67, 0x6f, 0x67, 0x6f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x67, 0x6f, 0x67, 0x6f, 0x2e,
+ 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72,
+ 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e,
+ 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x11, 0x61, 0x6d, 0x69, 0x6e, 0x6f, 0x2f, 0x61, 0x6d, 0x69,
+ 0x6e, 0x6f, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xbc, 0x01, 0x0a, 0x06, 0x4d, 0x6f, 0x64,
+ 0x75, 0x6c, 0x65, 0x12, 0x5a, 0x0a, 0x14, 0x6d, 0x61, 0x78, 0x5f, 0x65, 0x78, 0x65, 0x63, 0x75,
+ 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x70, 0x65, 0x72, 0x69, 0x6f, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28,
+ 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f,
+ 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x42, 0x0d, 0xc8, 0xde,
+ 0x1f, 0x00, 0x98, 0xdf, 0x1f, 0x01, 0xa8, 0xe7, 0xb0, 0x2a, 0x01, 0x52, 0x12, 0x6d, 0x61, 0x78,
+ 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x65, 0x72, 0x69, 0x6f, 0x64, 0x12,
+ 0x28, 0x0a, 0x10, 0x6d, 0x61, 0x78, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x5f,
+ 0x6c, 0x65, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x04, 0x52, 0x0e, 0x6d, 0x61, 0x78, 0x4d, 0x65,
+ 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x4c, 0x65, 0x6e, 0x3a, 0x2c, 0xba, 0xc0, 0x96, 0xda, 0x01,
+ 0x26, 0x0a, 0x24, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x63, 0x6f,
+ 0x73, 0x6d, 0x6f, 0x73, 0x2f, 0x63, 0x6f, 0x73, 0x6d, 0x6f, 0x73, 0x2d, 0x73, 0x64, 0x6b, 0x2f,
+ 0x78, 0x2f, 0x67, 0x72, 0x6f, 0x75, 0x70, 0x42, 0xd6, 0x01, 0x0a, 0x1a, 0x63, 0x6f, 0x6d, 0x2e,
+ 0x63, 0x6f, 0x73, 0x6d, 0x6f, 0x73, 0x2e, 0x67, 0x72, 0x6f, 0x75, 0x70, 0x2e, 0x6d, 0x6f, 0x64,
+ 0x75, 0x6c, 0x65, 0x2e, 0x76, 0x31, 0x42, 0x0b, 0x4d, 0x6f, 0x64, 0x75, 0x6c, 0x65, 0x50, 0x72,
+ 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x30, 0x63, 0x6f, 0x73, 0x6d, 0x6f, 0x73, 0x73, 0x64, 0x6b,
+ 0x2e, 0x69, 0x6f, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x63, 0x6f, 0x73, 0x6d, 0x6f, 0x73, 0x2f, 0x67,
+ 0x72, 0x6f, 0x75, 0x70, 0x2f, 0x6d, 0x6f, 0x64, 0x75, 0x6c, 0x65, 0x2f, 0x76, 0x31, 0x3b, 0x6d,
+ 0x6f, 0x64, 0x75, 0x6c, 0x65, 0x76, 0x31, 0xa2, 0x02, 0x03, 0x43, 0x47, 0x4d, 0xaa, 0x02, 0x16,
+ 0x43, 0x6f, 0x73, 0x6d, 0x6f, 0x73, 0x2e, 0x47, 0x72, 0x6f, 0x75, 0x70, 0x2e, 0x4d, 0x6f, 0x64,
+ 0x75, 0x6c, 0x65, 0x2e, 0x56, 0x31, 0xca, 0x02, 0x16, 0x43, 0x6f, 0x73, 0x6d, 0x6f, 0x73, 0x5c,
+ 0x47, 0x72, 0x6f, 0x75, 0x70, 0x5c, 0x4d, 0x6f, 0x64, 0x75, 0x6c, 0x65, 0x5c, 0x56, 0x31, 0xe2,
+ 0x02, 0x22, 0x43, 0x6f, 0x73, 0x6d, 0x6f, 0x73, 0x5c, 0x47, 0x72, 0x6f, 0x75, 0x70, 0x5c, 0x4d,
+ 0x6f, 0x64, 0x75, 0x6c, 0x65, 0x5c, 0x56, 0x31, 0x5c, 0x47, 0x50, 0x42, 0x4d, 0x65, 0x74, 0x61,
+ 0x64, 0x61, 0x74, 0x61, 0xea, 0x02, 0x19, 0x43, 0x6f, 0x73, 0x6d, 0x6f, 0x73, 0x3a, 0x3a, 0x47,
+ 0x72, 0x6f, 0x75, 0x70, 0x3a, 0x3a, 0x4d, 0x6f, 0x64, 0x75, 0x6c, 0x65, 0x3a, 0x3a, 0x56, 0x31,
+ 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
+}
+
+var (
+ file_cosmos_group_module_v1_module_proto_rawDescOnce sync.Once
+ file_cosmos_group_module_v1_module_proto_rawDescData = file_cosmos_group_module_v1_module_proto_rawDesc
+)
+
+func file_cosmos_group_module_v1_module_proto_rawDescGZIP() []byte {
+ file_cosmos_group_module_v1_module_proto_rawDescOnce.Do(func() {
+ file_cosmos_group_module_v1_module_proto_rawDescData = protoimpl.X.CompressGZIP(file_cosmos_group_module_v1_module_proto_rawDescData)
+})
+
+return file_cosmos_group_module_v1_module_proto_rawDescData
+}
+
+var file_cosmos_group_module_v1_module_proto_msgTypes = make([]protoimpl.MessageInfo, 1)
+
+var file_cosmos_group_module_v1_module_proto_goTypes = []interface{
+}{
+ (*Module)(nil), // 0: cosmos.group.module.v1.Module
+ (*durationpb.Duration)(nil), // 1: google.protobuf.Duration
+}
+
+var file_cosmos_group_module_v1_module_proto_depIdxs = []int32{
+ 1, // 0: cosmos.group.module.v1.Module.max_execution_period:type_name -> google.protobuf.Duration
+ 1, // [1:1] is the sub-list for method output_type
+ 1, // [1:1] is the sub-list for method input_type
+ 1, // [1:1] is the sub-list for extension type_name
+ 1, // [1:1] is the sub-list for extension extendee
+ 0, // [0:1] is the sub-list for field type_name
+}
+
+func init() {
+ file_cosmos_group_module_v1_module_proto_init()
+}
+
+func file_cosmos_group_module_v1_module_proto_init() {
+ if File_cosmos_group_module_v1_module_proto != nil {
+ return
+}
+ if !protoimpl.UnsafeEnabled {
+ file_cosmos_group_module_v1_module_proto_msgTypes[0].Exporter = func(v interface{
+}, i int)
+
+interface{
+} {
+ switch v := v.(*Module); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+}
+
+}
+
+}
+
+type x struct{
+}
+ out := protoimpl.TypeBuilder{
+ File: protoimpl.DescBuilder{
+ GoPackagePath: reflect.TypeOf(x{
+}).PkgPath(),
+ RawDescriptor: file_cosmos_group_module_v1_module_proto_rawDesc,
+ NumEnums: 0,
+ NumMessages: 1,
+ NumExtensions: 0,
+ NumServices: 0,
+},
+ GoTypes: file_cosmos_group_module_v1_module_proto_goTypes,
+ DependencyIndexes: file_cosmos_group_module_v1_module_proto_depIdxs,
+ MessageInfos: file_cosmos_group_module_v1_module_proto_msgTypes,
+}.Build()
+
+File_cosmos_group_module_v1_module_proto = out.File
+ file_cosmos_group_module_v1_module_proto_rawDesc = nil
+ file_cosmos_group_module_v1_module_proto_goTypes = nil
+ file_cosmos_group_module_v1_module_proto_depIdxs = nil
+}
+```
+
+
+Pulsar is optional. The official [`protoc-gen-go`](https://developers.google.com/protocol-buffers/docs/reference/go-generated) can be used as well.
+
+
+## Dependency Definition
+
+Once the configuration proto is defined, the module's `module.go` must define what dependencies are required by the module.
+The boilerplate is similar for all modules.
+
+
+All methods, structs and their fields must be public for `depinject`.
+
+
+1. Import the module configuration generated package:
+
+ ```go expandable
+ package module
+
+ import (
+
+ "context"
+ "encoding/json"
+ "fmt"
+
+ abci "github.com/cometbft/cometbft/abci/types"
+ gwruntime "github.com/grpc-ecosystem/grpc-gateway/runtime"
+ "github.com/spf13/cobra"
+
+ modulev1 "cosmossdk.io/api/cosmos/group/module/v1"
+ "cosmossdk.io/core/address"
+ "cosmossdk.io/core/appmodule"
+ "cosmossdk.io/depinject"
+
+ store "cosmossdk.io/store/types"
+ "github.com/cosmos/cosmos-sdk/baseapp"
+ sdkclient "github.com/cosmos/cosmos-sdk/client"
+ "github.com/cosmos/cosmos-sdk/codec"
+ cdctypes "github.com/cosmos/cosmos-sdk/codec/types"
+ sdk "github.com/cosmos/cosmos-sdk/types"
+ "github.com/cosmos/cosmos-sdk/types/module"
+ simtypes "github.com/cosmos/cosmos-sdk/types/simulation"
+ "github.com/cosmos/cosmos-sdk/x/group"
+ "github.com/cosmos/cosmos-sdk/x/group/client/cli"
+ "github.com/cosmos/cosmos-sdk/x/group/keeper"
+ "github.com/cosmos/cosmos-sdk/x/group/simulation"
+ )
+
+ // ConsensusVersion defines the current x/group module consensus version.
+ const ConsensusVersion = 2
+
+ var (
+ _ module.AppModuleBasic = AppModuleBasic{
+ }
+ _ module.AppModuleSimulation = AppModule{
+ }
+ )
+
+ type AppModule struct {
+ AppModuleBasic
+ keeper keeper.Keeper
+ bankKeeper group.BankKeeper
+ accKeeper group.AccountKeeper
+ registry cdctypes.InterfaceRegistry
+ }
+
+ // NewAppModule creates a new AppModule object
+ func NewAppModule(cdc codec.Codec, keeper keeper.Keeper, ak group.AccountKeeper, bk group.BankKeeper, registry cdctypes.InterfaceRegistry)
+
+ AppModule {
+ return AppModule{
+ AppModuleBasic: AppModuleBasic{
+ cdc: cdc, ac: ak.AddressCodec()
+ },
+ keeper: keeper,
+ bankKeeper: bk,
+ accKeeper: ak,
+ registry: registry,
+ }
+ }
+
+ var (
+ _ appmodule.AppModule = AppModule{
+ }
+ _ appmodule.HasEndBlocker = AppModule{
+ }
+ )
+
+ // IsOnePerModuleType implements the depinject.OnePerModuleType interface.
+ func (am AppModule)
+
+ IsOnePerModuleType() {
+ }
+
+ // IsAppModule implements the appmodule.AppModule interface.
+ func (am AppModule)
+
+ IsAppModule() {
+ }
+
+ type AppModuleBasic struct {
+ cdc codec.Codec
+ ac address.Codec
+ }
+
+ // Name returns the group module's name.
+ func (AppModuleBasic)
+
+ Name()
+
+ string {
+ return group.ModuleName
+ }
+
+ // DefaultGenesis returns default genesis state as raw bytes for the group
+ // module.
+ func (AppModuleBasic)
+
+ DefaultGenesis(cdc codec.JSONCodec)
+
+ json.RawMessage {
+ return cdc.MustMarshalJSON(group.NewGenesisState())
+ }
+
+ // ValidateGenesis performs genesis state validation for the group module.
+ func (AppModuleBasic)
+
+ ValidateGenesis(cdc codec.JSONCodec, config sdkclient.TxEncodingConfig, bz json.RawMessage)
+
+ error {
+ var data group.GenesisState
+ if err := cdc.UnmarshalJSON(bz, &data); err != nil {
+ return fmt.Errorf("failed to unmarshal %s genesis state: %w", group.ModuleName, err)
+ }
+
+ return data.Validate()
+ }
+
+ // GetQueryCmd returns the cli query commands for the group module
+ func (a AppModuleBasic)
+
+ GetQueryCmd() *cobra.Command {
+ return cli.QueryCmd(a.Name())
+ }
+
+ // GetTxCmd returns the transaction commands for the group module
+ func (a AppModuleBasic)
+
+ GetTxCmd() *cobra.Command {
+ return cli.TxCmd(a.Name(), a.ac)
+ }
+
+ // RegisterGRPCGatewayRoutes registers the gRPC Gateway routes for the group module.
+ func (a AppModuleBasic)
+
+ RegisterGRPCGatewayRoutes(clientCtx sdkclient.Context, mux *gwruntime.ServeMux) {
+ if err := group.RegisterQueryHandlerClient(context.Background(), mux, group.NewQueryClient(clientCtx)); err != nil {
+ panic(err)
+ }
+ }
+
+ // RegisterInterfaces registers the group module's interface types
+ func (AppModuleBasic)
+
+ RegisterInterfaces(registry cdctypes.InterfaceRegistry) {
+ group.RegisterInterfaces(registry)
+ }
+
+ // RegisterLegacyAminoCodec registers the group module's types for the given codec.
+ func (AppModuleBasic)
+
+ RegisterLegacyAminoCodec(cdc *codec.LegacyAmino) {
+ group.RegisterLegacyAminoCodec(cdc)
+ }
+
+ // Name returns the group module's name.
+ func (AppModule)
+
+ Name()
+
+ string {
+ return group.ModuleName
+ }
+
+ // RegisterInvariants does nothing, there are no invariants to enforce
+ func (am AppModule)
+
+ RegisterInvariants(ir sdk.InvariantRegistry) {
+ keeper.RegisterInvariants(ir, am.keeper)
+ }
+
+ // InitGenesis performs genesis initialization for the group module. It returns
+ // no validator updates.
+ func (am AppModule)
+
+ InitGenesis(ctx sdk.Context, cdc codec.JSONCodec, data json.RawMessage) []abci.ValidatorUpdate {
+ am.keeper.InitGenesis(ctx, cdc, data)
+
+ return []abci.ValidatorUpdate{
+ }
+ }
+
+ // ExportGenesis returns the exported genesis state as raw bytes for the group
+ // module.
+ func (am AppModule)
+
+ ExportGenesis(ctx sdk.Context, cdc codec.JSONCodec)
+
+ json.RawMessage {
+ gs := am.keeper.ExportGenesis(ctx, cdc)
+
+ return cdc.MustMarshalJSON(gs)
+ }
+
+ // RegisterServices registers a gRPC query service to respond to the
+ // module-specific gRPC queries.
+ func (am AppModule)
+
+ RegisterServices(cfg module.Configurator) {
+ group.RegisterMsgServer(cfg.MsgServer(), am.keeper)
+
+ group.RegisterQueryServer(cfg.QueryServer(), am.keeper)
+ m := keeper.NewMigrator(am.keeper)
+ if err := cfg.RegisterMigration(group.ModuleName, 1, m.Migrate1to2); err != nil {
+ panic(fmt.Sprintf("failed to migrate x/%s from version 1 to 2: %v", group.ModuleName, err))
+ }
+ }
+
+ // ConsensusVersion implements AppModule/ConsensusVersion.
+ func (AppModule)
+
+ ConsensusVersion()
+
+ uint64 {
+ return ConsensusVersion
+ }
+
+ // EndBlock implements the group module's EndBlock.
+ func (am AppModule)
+
+ EndBlock(ctx context.Context)
+
+ error {
+ c := sdk.UnwrapSDKContext(ctx)
+
+ return EndBlocker(c, am.keeper)
+ }
+
+ // ____________________________________________________________________________
+
+ // AppModuleSimulation functions
+
+ // GenerateGenesisState creates a randomized GenState of the group module.
+ func (AppModule)
+
+ GenerateGenesisState(simState *module.SimulationState) {
+ simulation.RandomizedGenState(simState)
+ }
+
+ // RegisterStoreDecoder registers a decoder for group module's types
+ func (am AppModule)
+
+ RegisterStoreDecoder(sdr simtypes.StoreDecoderRegistry) {
+ sdr[group.StoreKey] = simulation.NewDecodeStore(am.cdc)
+ }
+
+ // WeightedOperations returns the all the gov module operations with their respective weights.
+ func (am AppModule)
+
+ WeightedOperations(simState module.SimulationState) []simtypes.WeightedOperation {
+ return simulation.WeightedOperations(
+ am.registry,
+ simState.AppParams, simState.Cdc, simState.TxConfig,
+ am.accKeeper, am.bankKeeper, am.keeper, am.cdc,
+ )
+ }
+
+ //
+ // App Wiring Setup
+ //
+
+ func init() {
+ appmodule.Register(
+ &modulev1.Module{
+ },
+ appmodule.Provide(ProvideModule),
+ )
+ }
+
+ type GroupInputs struct {
+ depinject.In
+
+ Config *modulev1.Module
+ Key *store.KVStoreKey
+ Cdc codec.Codec
+ AccountKeeper group.AccountKeeper
+ BankKeeper group.BankKeeper
+ Registry cdctypes.InterfaceRegistry
+ MsgServiceRouter baseapp.MessageRouter
+ }
+
+ type GroupOutputs struct {
+ depinject.Out
+
+ GroupKeeper keeper.Keeper
+ Module appmodule.AppModule
+ }
+
+ func ProvideModule(in GroupInputs)
+
+ GroupOutputs {
+ /*
+ Example of setting group params:
+ in.Config.MaxMetadataLen = 1000
+ in.Config.MaxExecutionPeriod = "1209600s"
+ */
+ k := keeper.NewKeeper(in.Key, in.Cdc, in.MsgServiceRouter, in.AccountKeeper, group.Config{
+ MaxExecutionPeriod: in.Config.MaxExecutionPeriod.AsDuration(),
+ MaxMetadataLen: in.Config.MaxMetadataLen
+ })
+ m := NewAppModule(in.Cdc, k, in.AccountKeeper, in.BankKeeper, in.Registry)
+
+ return GroupOutputs{
+ GroupKeeper: k,
+ Module: m
+ }
+ }
+ ```
+
+ Define an `init()` function for defining the `providers` of the module configuration:\
+ This registers the module configuration message and the wiring of the module.
+
+ ```go expandable
+ package module
+
+ import (
+
+ "context"
+ "encoding/json"
+ "fmt"
+
+ abci "github.com/cometbft/cometbft/abci/types"
+ gwruntime "github.com/grpc-ecosystem/grpc-gateway/runtime"
+ "github.com/spf13/cobra"
+
+ modulev1 "cosmossdk.io/api/cosmos/group/module/v1"
+ "cosmossdk.io/core/address"
+ "cosmossdk.io/core/appmodule"
+ "cosmossdk.io/depinject"
+
+ store "cosmossdk.io/store/types"
+ "github.com/cosmos/cosmos-sdk/baseapp"
+ sdkclient "github.com/cosmos/cosmos-sdk/client"
+ "github.com/cosmos/cosmos-sdk/codec"
+ cdctypes "github.com/cosmos/cosmos-sdk/codec/types"
+ sdk "github.com/cosmos/cosmos-sdk/types"
+ "github.com/cosmos/cosmos-sdk/types/module"
+ simtypes "github.com/cosmos/cosmos-sdk/types/simulation"
+ "github.com/cosmos/cosmos-sdk/x/group"
+ "github.com/cosmos/cosmos-sdk/x/group/client/cli"
+ "github.com/cosmos/cosmos-sdk/x/group/keeper"
+ "github.com/cosmos/cosmos-sdk/x/group/simulation"
+ )
+
+ // ConsensusVersion defines the current x/group module consensus version.
+ const ConsensusVersion = 2
+
+ var (
+ _ module.AppModuleBasic = AppModuleBasic{
+ }
+ _ module.AppModuleSimulation = AppModule{
+ }
+ )
+
+ type AppModule struct {
+ AppModuleBasic
+ keeper keeper.Keeper
+ bankKeeper group.BankKeeper
+ accKeeper group.AccountKeeper
+ registry cdctypes.InterfaceRegistry
+ }
+
+ // NewAppModule creates a new AppModule object
+ func NewAppModule(cdc codec.Codec, keeper keeper.Keeper, ak group.AccountKeeper, bk group.BankKeeper, registry cdctypes.InterfaceRegistry)
+
+ AppModule {
+ return AppModule{
+ AppModuleBasic: AppModuleBasic{
+ cdc: cdc, ac: ak.AddressCodec()
+ },
+ keeper: keeper,
+ bankKeeper: bk,
+ accKeeper: ak,
+ registry: registry,
+ }
+ }
+
+ var (
+ _ appmodule.AppModule = AppModule{
+ }
+ _ appmodule.HasEndBlocker = AppModule{
+ }
+ )
+
+ // IsOnePerModuleType implements the depinject.OnePerModuleType interface.
+ func (am AppModule)
+
+ IsOnePerModuleType() {
+ }
+
+ // IsAppModule implements the appmodule.AppModule interface.
+ func (am AppModule)
+
+ IsAppModule() {
+ }
+
+ type AppModuleBasic struct {
+ cdc codec.Codec
+ ac address.Codec
+ }
+
+ // Name returns the group module's name.
+ func (AppModuleBasic)
+
+ Name()
+
+ string {
+ return group.ModuleName
+ }
+
+ // DefaultGenesis returns default genesis state as raw bytes for the group
+ // module.
+ func (AppModuleBasic)
+
+ DefaultGenesis(cdc codec.JSONCodec)
+
+ json.RawMessage {
+ return cdc.MustMarshalJSON(group.NewGenesisState())
+ }
+
+ // ValidateGenesis performs genesis state validation for the group module.
+ func (AppModuleBasic)
+
+ ValidateGenesis(cdc codec.JSONCodec, config sdkclient.TxEncodingConfig, bz json.RawMessage)
+
+ error {
+ var data group.GenesisState
+ if err := cdc.UnmarshalJSON(bz, &data); err != nil {
+ return fmt.Errorf("failed to unmarshal %s genesis state: %w", group.ModuleName, err)
+ }
+
+ return data.Validate()
+ }
+
+ // GetQueryCmd returns the cli query commands for the group module
+ func (a AppModuleBasic)
+
+ GetQueryCmd() *cobra.Command {
+ return cli.QueryCmd(a.Name())
+ }
+
+ // GetTxCmd returns the transaction commands for the group module
+ func (a AppModuleBasic)
+
+ GetTxCmd() *cobra.Command {
+ return cli.TxCmd(a.Name(), a.ac)
+ }
+
+ // RegisterGRPCGatewayRoutes registers the gRPC Gateway routes for the group module.
+ func (a AppModuleBasic)
+
+ RegisterGRPCGatewayRoutes(clientCtx sdkclient.Context, mux *gwruntime.ServeMux) {
+ if err := group.RegisterQueryHandlerClient(context.Background(), mux, group.NewQueryClient(clientCtx)); err != nil {
+ panic(err)
+ }
+ }
+
+ // RegisterInterfaces registers the group module's interface types
+ func (AppModuleBasic)
+
+ RegisterInterfaces(registry cdctypes.InterfaceRegistry) {
+ group.RegisterInterfaces(registry)
+ }
+
+ // RegisterLegacyAminoCodec registers the group module's types for the given codec.
+ func (AppModuleBasic)
+
+ RegisterLegacyAminoCodec(cdc *codec.LegacyAmino) {
+ group.RegisterLegacyAminoCodec(cdc)
+ }
+
+ // Name returns the group module's name.
+ func (AppModule)
+
+ Name()
+
+ string {
+ return group.ModuleName
+ }
+
+ // RegisterInvariants does nothing, there are no invariants to enforce
+ func (am AppModule)
+
+ RegisterInvariants(ir sdk.InvariantRegistry) {
+ keeper.RegisterInvariants(ir, am.keeper)
+ }
+
+ // InitGenesis performs genesis initialization for the group module. It returns
+ // no validator updates.
+ func (am AppModule)
+
+ InitGenesis(ctx sdk.Context, cdc codec.JSONCodec, data json.RawMessage) []abci.ValidatorUpdate {
+ am.keeper.InitGenesis(ctx, cdc, data)
+
+ return []abci.ValidatorUpdate{
+ }
+ }
+
+ // ExportGenesis returns the exported genesis state as raw bytes for the group
+ // module.
+ func (am AppModule)
+
+ ExportGenesis(ctx sdk.Context, cdc codec.JSONCodec)
+
+ json.RawMessage {
+ gs := am.keeper.ExportGenesis(ctx, cdc)
+
+ return cdc.MustMarshalJSON(gs)
+ }
+
+ // RegisterServices registers a gRPC query service to respond to the
+ // module-specific gRPC queries.
+ func (am AppModule)
+
+ RegisterServices(cfg module.Configurator) {
+ group.RegisterMsgServer(cfg.MsgServer(), am.keeper)
+
+ group.RegisterQueryServer(cfg.QueryServer(), am.keeper)
+ m := keeper.NewMigrator(am.keeper)
+ if err := cfg.RegisterMigration(group.ModuleName, 1, m.Migrate1to2); err != nil {
+ panic(fmt.Sprintf("failed to migrate x/%s from version 1 to 2: %v", group.ModuleName, err))
+ }
+ }
+
+ // ConsensusVersion implements AppModule/ConsensusVersion.
+ func (AppModule)
+
+ ConsensusVersion()
+
+ uint64 {
+ return ConsensusVersion
+ }
+
+ // EndBlock implements the group module's EndBlock.
+ func (am AppModule)
+
+ EndBlock(ctx context.Context)
+
+ error {
+ c := sdk.UnwrapSDKContext(ctx)
+
+ return EndBlocker(c, am.keeper)
+ }
+
+ // ____________________________________________________________________________
+
+ // AppModuleSimulation functions
+
+ // GenerateGenesisState creates a randomized GenState of the group module.
+ func (AppModule)
+
+ GenerateGenesisState(simState *module.SimulationState) {
+ simulation.RandomizedGenState(simState)
+ }
+
+ // RegisterStoreDecoder registers a decoder for group module's types
+ func (am AppModule)
+
+ RegisterStoreDecoder(sdr simtypes.StoreDecoderRegistry) {
+ sdr[group.StoreKey] = simulation.NewDecodeStore(am.cdc)
+ }
+
+ // WeightedOperations returns the all the gov module operations with their respective weights.
+ func (am AppModule)
+
+ WeightedOperations(simState module.SimulationState) []simtypes.WeightedOperation {
+ return simulation.WeightedOperations(
+ am.registry,
+ simState.AppParams, simState.Cdc, simState.TxConfig,
+ am.accKeeper, am.bankKeeper, am.keeper, am.cdc,
+ )
+ }
+
+ //
+ // App Wiring Setup
+ //
+
+ func init() {
+ appmodule.Register(
+ &modulev1.Module{
+ },
+ appmodule.Provide(ProvideModule),
+ )
+ }
+
+ type GroupInputs struct {
+ depinject.In
+
+ Config *modulev1.Module
+ Key *store.KVStoreKey
+ Cdc codec.Codec
+ AccountKeeper group.AccountKeeper
+ BankKeeper group.BankKeeper
+ Registry cdctypes.InterfaceRegistry
+ MsgServiceRouter baseapp.MessageRouter
+ }
+
+ type GroupOutputs struct {
+ depinject.Out
+
+ GroupKeeper keeper.Keeper
+ Module appmodule.AppModule
+ }
+
+ func ProvideModule(in GroupInputs)
+
+ GroupOutputs {
+ /*
+ Example of setting group params:
+ in.Config.MaxMetadataLen = 1000
+ in.Config.MaxExecutionPeriod = "1209600s"
+ */
+ k := keeper.NewKeeper(in.Key, in.Cdc, in.MsgServiceRouter, in.AccountKeeper, group.Config{
+ MaxExecutionPeriod: in.Config.MaxExecutionPeriod.AsDuration(),
+ MaxMetadataLen: in.Config.MaxMetadataLen
+ })
+ m := NewAppModule(in.Cdc, k, in.AccountKeeper, in.BankKeeper, in.Registry)
+
+ return GroupOutputs{
+ GroupKeeper: k,
+ Module: m
+ }
+ }
+ ```
+
+2. Ensure that the module implements the `appmodule.AppModule` interface:
+
+ ```go expandable
+ package module
+
+ import (
+
+ "context"
+ "encoding/json"
+ "fmt"
+
+ abci "github.com/cometbft/cometbft/abci/types"
+ gwruntime "github.com/grpc-ecosystem/grpc-gateway/runtime"
+ "github.com/spf13/cobra"
+
+ modulev1 "cosmossdk.io/api/cosmos/group/module/v1"
+ "cosmossdk.io/core/appmodule"
+ "cosmossdk.io/depinject"
+ "github.com/cosmos/cosmos-sdk/baseapp"
+ sdkclient "github.com/cosmos/cosmos-sdk/client"
+ "github.com/cosmos/cosmos-sdk/codec"
+ cdctypes "github.com/cosmos/cosmos-sdk/codec/types"
+ store "github.com/cosmos/cosmos-sdk/store/types"
+ sdk "github.com/cosmos/cosmos-sdk/types"
+ "github.com/cosmos/cosmos-sdk/types/module"
+ simtypes "github.com/cosmos/cosmos-sdk/types/simulation"
+ "github.com/cosmos/cosmos-sdk/x/group"
+ "github.com/cosmos/cosmos-sdk/x/group/client/cli"
+ "github.com/cosmos/cosmos-sdk/x/group/keeper"
+ "github.com/cosmos/cosmos-sdk/x/group/simulation"
+ )
+
+ // ConsensusVersion defines the current x/group module consensus version.
+ const ConsensusVersion = 2
+
+ var (
+ _ module.EndBlockAppModule = AppModule{
+ }
+ _ module.AppModuleBasic = AppModuleBasic{
+ }
+ _ module.AppModuleSimulation = AppModule{
+ }
+ )
+
+ type AppModule struct {
+ AppModuleBasic
+ keeper keeper.Keeper
+ bankKeeper group.BankKeeper
+ accKeeper group.AccountKeeper
+ registry cdctypes.InterfaceRegistry
+ }
+
+ // NewAppModule creates a new AppModule object
+ func NewAppModule(cdc codec.Codec, keeper keeper.Keeper, ak group.AccountKeeper, bk group.BankKeeper, registry cdctypes.InterfaceRegistry)
+
+ AppModule {
+ return AppModule{
+ AppModuleBasic: AppModuleBasic{
+ cdc: cdc
+ },
+ keeper: keeper,
+ bankKeeper: bk,
+ accKeeper: ak,
+ registry: registry,
+ }
+ }
+
+ var _ appmodule.AppModule = AppModule{
+ }
+
+ // IsOnePerModuleType implements the depinject.OnePerModuleType interface.
+ func (am AppModule)
+
+ IsOnePerModuleType() {
+ }
+
+ // IsAppModule implements the appmodule.AppModule interface.
+ func (am AppModule)
+
+ IsAppModule() {
+ }
+
+ type AppModuleBasic struct {
+ cdc codec.Codec
+ }
+
+ // Name returns the group module's name.
+ func (AppModuleBasic)
+
+ Name()
+
+ string {
+ return group.ModuleName
+ }
+
+ // DefaultGenesis returns default genesis state as raw bytes for the group
+ // module.
+ func (AppModuleBasic)
+
+ DefaultGenesis(cdc codec.JSONCodec)
+
+ json.RawMessage {
+ return cdc.MustMarshalJSON(group.NewGenesisState())
+ }
+
+ // ValidateGenesis performs genesis state validation for the group module.
+ func (AppModuleBasic)
+
+ ValidateGenesis(cdc codec.JSONCodec, config sdkclient.TxEncodingConfig, bz json.RawMessage)
+
+ error {
+ var data group.GenesisState
+ if err := cdc.UnmarshalJSON(bz, &data); err != nil {
+ return fmt.Errorf("failed to unmarshal %s genesis state: %w", group.ModuleName, err)
+ }
+
+ return data.Validate()
+ }
+
+ // GetQueryCmd returns the cli query commands for the group module
+ func (a AppModuleBasic)
+
+ GetQueryCmd() *cobra.Command {
+ return cli.QueryCmd(a.Name())
+ }
+
+ // GetTxCmd returns the transaction commands for the group module
+ func (a AppModuleBasic)
+
+ GetTxCmd() *cobra.Command {
+ return cli.TxCmd(a.Name())
+ }
+
+ // RegisterGRPCGatewayRoutes registers the gRPC Gateway routes for the group module.
+ func (a AppModuleBasic)
+
+ RegisterGRPCGatewayRoutes(clientCtx sdkclient.Context, mux *gwruntime.ServeMux) {
+ if err := group.RegisterQueryHandlerClient(context.Background(), mux, group.NewQueryClient(clientCtx)); err != nil {
+ panic(err)
+ }
+ }
+
+ // RegisterInterfaces registers the group module's interface types
+ func (AppModuleBasic)
+
+ RegisterInterfaces(registry cdctypes.InterfaceRegistry) {
+ group.RegisterInterfaces(registry)
+ }
+
+ // RegisterLegacyAminoCodec registers the group module's types for the given codec.
+ func (AppModuleBasic)
+
+ RegisterLegacyAminoCodec(cdc *codec.LegacyAmino) {
+ group.RegisterLegacyAminoCodec(cdc)
+ }
+
+ // Name returns the group module's name.
+ func (AppModule)
+
+ Name()
+
+ string {
+ return group.ModuleName
+ }
+
+ // RegisterInvariants does nothing, there are no invariants to enforce
+ func (am AppModule)
+
+ RegisterInvariants(ir sdk.InvariantRegistry) {
+ keeper.RegisterInvariants(ir, am.keeper)
+ }
+
+ func (am AppModule)
+
+ NewHandler()
+
+ sdk.Handler {
+ return nil
+ }
+
+ // InitGenesis performs genesis initialization for the group module. It returns
+ // no validator updates.
+ func (am AppModule)
+
+ InitGenesis(ctx sdk.Context, cdc codec.JSONCodec, data json.RawMessage) []abci.ValidatorUpdate {
+ am.keeper.InitGenesis(ctx, cdc, data)
+
+ return []abci.ValidatorUpdate{
+ }
+ }
+
+ // ExportGenesis returns the exported genesis state as raw bytes for the group
+ // module.
+ func (am AppModule)
+
+ ExportGenesis(ctx sdk.Context, cdc codec.JSONCodec)
+
+ json.RawMessage {
+ gs := am.keeper.ExportGenesis(ctx, cdc)
+
+ return cdc.MustMarshalJSON(gs)
+ }
+
+ // RegisterServices registers a gRPC query service to respond to the
+ // module-specific gRPC queries.
+ func (am AppModule)
+
+ RegisterServices(cfg module.Configurator) {
+ group.RegisterMsgServer(cfg.MsgServer(), am.keeper)
+
+ group.RegisterQueryServer(cfg.QueryServer(), am.keeper)
+ m := keeper.NewMigrator(am.keeper)
+ if err := cfg.RegisterMigration(group.ModuleName, 1, m.Migrate1to2); err != nil {
+ panic(fmt.Sprintf("failed to migrate x/%s from version 1 to 2: %v", group.ModuleName, err))
+ }
+ }
+
+ // ConsensusVersion implements AppModule/ConsensusVersion.
+ func (AppModule)
+
+ ConsensusVersion()
+
+ uint64 {
+ return ConsensusVersion
+ }
+
+ // EndBlock implements the group module's EndBlock.
+ func (am AppModule)
+
+ EndBlock(ctx sdk.Context, _ abci.RequestEndBlock) []abci.ValidatorUpdate {
+ EndBlocker(ctx, am.keeper)
+
+ return []abci.ValidatorUpdate{
+ }
+ }
+
+ // ____________________________________________________________________________
+
+ // AppModuleSimulation functions
+
+ // GenerateGenesisState creates a randomized GenState of the group module.
+ func (AppModule)
+
+ GenerateGenesisState(simState *module.SimulationState) {
+ simulation.RandomizedGenState(simState)
+ }
+
+ // RegisterStoreDecoder registers a decoder for group module's types
+ func (am AppModule)
+
+ RegisterStoreDecoder(sdr sdk.StoreDecoderRegistry) {
+ sdr[group.StoreKey] = simulation.NewDecodeStore(am.cdc)
+ }
+
+ // WeightedOperations returns the all the gov module operations with their respective weights.
+ func (am AppModule)
+
+ WeightedOperations(simState module.SimulationState) []simtypes.WeightedOperation {
+ return simulation.WeightedOperations(
+ am.registry,
+ simState.AppParams, simState.Cdc,
+ am.accKeeper, am.bankKeeper, am.keeper, am.cdc,
+ )
+ }
+
+ //
+ // App Wiring Setup
+ //
+
+ func init() {
+ appmodule.Register(
+ &modulev1.Module{
+ },
+ appmodule.Provide(ProvideModule),
+ )
+ }
+
+ type GroupInputs struct {
+ depinject.In
+
+ Config *modulev1.Module
+ Key *store.KVStoreKey
+ Cdc codec.Codec
+ AccountKeeper group.AccountKeeper
+ BankKeeper group.BankKeeper
+ Registry cdctypes.InterfaceRegistry
+ MsgServiceRouter *baseapp.MsgServiceRouter
+ }
+
+ type GroupOutputs struct {
+ depinject.Out
+
+ GroupKeeper keeper.Keeper
+ Module appmodule.AppModule
+ }
+
+ func ProvideModule(in GroupInputs)
+
+ GroupOutputs {
+ /*
+ Example of setting group params:
+ in.Config.MaxMetadataLen = 1000
+ in.Config.MaxExecutionPeriod = "1209600s"
+ */
+ k := keeper.NewKeeper(in.Key, in.Cdc, in.MsgServiceRouter, in.AccountKeeper, group.Config{
+ MaxExecutionPeriod: in.Config.MaxExecutionPeriod.AsDuration(),
+ MaxMetadataLen: in.Config.MaxMetadataLen
+ })
+ m := NewAppModule(in.Cdc, k, in.AccountKeeper, in.BankKeeper, in.Registry)
+
+ return GroupOutputs{
+ GroupKeeper: k,
+ Module: m
+ }
+ }
+ ```
+
+3. Define a struct that inherits `depinject.In` and define the module inputs (i.e. module dependencies):
+
+ * `depinject` provides the right dependencies to the module.
+ * `depinject` also checks that all dependencies are provided.
+
+ :::tip
+ For making a dependency optional, add the `optional:"true"` struct tag.\
+
+ ```go expandable
+ package module
+
+ import (
+
+ "context"
+ "encoding/json"
+ "fmt"
+
+ abci "github.com/cometbft/cometbft/abci/types"
+ gwruntime "github.com/grpc-ecosystem/grpc-gateway/runtime"
+ "github.com/spf13/cobra"
+
+ modulev1 "cosmossdk.io/api/cosmos/group/module/v1"
+ "cosmossdk.io/core/address"
+ "cosmossdk.io/core/appmodule"
+ "cosmossdk.io/depinject"
+
+ store "cosmossdk.io/store/types"
+ "github.com/cosmos/cosmos-sdk/baseapp"
+ sdkclient "github.com/cosmos/cosmos-sdk/client"
+ "github.com/cosmos/cosmos-sdk/codec"
+ cdctypes "github.com/cosmos/cosmos-sdk/codec/types"
+ sdk "github.com/cosmos/cosmos-sdk/types"
+ "github.com/cosmos/cosmos-sdk/types/module"
+ simtypes "github.com/cosmos/cosmos-sdk/types/simulation"
+ "github.com/cosmos/cosmos-sdk/x/group"
+ "github.com/cosmos/cosmos-sdk/x/group/client/cli"
+ "github.com/cosmos/cosmos-sdk/x/group/keeper"
+ "github.com/cosmos/cosmos-sdk/x/group/simulation"
+ )
+
+ // ConsensusVersion defines the current x/group module consensus version.
+ const ConsensusVersion = 2
+
+ var (
+ _ module.AppModuleBasic = AppModuleBasic{
+ }
+ _ module.AppModuleSimulation = AppModule{
+ }
+ )
+
+ type AppModule struct {
+ AppModuleBasic
+ keeper keeper.Keeper
+ bankKeeper group.BankKeeper
+ accKeeper group.AccountKeeper
+ registry cdctypes.InterfaceRegistry
+ }
+
+ // NewAppModule creates a new AppModule object
+ func NewAppModule(cdc codec.Codec, keeper keeper.Keeper, ak group.AccountKeeper, bk group.BankKeeper, registry cdctypes.InterfaceRegistry)
+
+ AppModule {
+ return AppModule{
+ AppModuleBasic: AppModuleBasic{
+ cdc: cdc, ac: ak.AddressCodec()
+ },
+ keeper: keeper,
+ bankKeeper: bk,
+ accKeeper: ak,
+ registry: registry,
+ }
+ }
+
+ var (
+ _ appmodule.AppModule = AppModule{
+ }
+ _ appmodule.HasEndBlocker = AppModule{
+ }
+ )
+
+ // IsOnePerModuleType implements the depinject.OnePerModuleType interface.
+ func (am AppModule)
+
+ IsOnePerModuleType() {
+ }
+
+ // IsAppModule implements the appmodule.AppModule interface.
+ func (am AppModule)
+
+ IsAppModule() {
+ }
+
+ type AppModuleBasic struct {
+ cdc codec.Codec
+ ac address.Codec
+ }
+
+ // Name returns the group module's name.
+ func (AppModuleBasic)
+
+ Name()
+
+ string {
+ return group.ModuleName
+ }
+
+ // DefaultGenesis returns default genesis state as raw bytes for the group
+ // module.
+ func (AppModuleBasic)
+
+ DefaultGenesis(cdc codec.JSONCodec)
+
+ json.RawMessage {
+ return cdc.MustMarshalJSON(group.NewGenesisState())
+ }
+
+ // ValidateGenesis performs genesis state validation for the group module.
+ func (AppModuleBasic)
+
+ ValidateGenesis(cdc codec.JSONCodec, config sdkclient.TxEncodingConfig, bz json.RawMessage)
+
+ error {
+ var data group.GenesisState
+ if err := cdc.UnmarshalJSON(bz, &data); err != nil {
+ return fmt.Errorf("failed to unmarshal %s genesis state: %w", group.ModuleName, err)
+ }
+
+ return data.Validate()
+ }
+
+ // GetQueryCmd returns the cli query commands for the group module
+ func (a AppModuleBasic)
+
+ GetQueryCmd() *cobra.Command {
+ return cli.QueryCmd(a.Name())
+ }
+
+ // GetTxCmd returns the transaction commands for the group module
+ func (a AppModuleBasic)
+
+ GetTxCmd() *cobra.Command {
+ return cli.TxCmd(a.Name(), a.ac)
+ }
+
+ // RegisterGRPCGatewayRoutes registers the gRPC Gateway routes for the group module.
+ func (a AppModuleBasic)
+
+ RegisterGRPCGatewayRoutes(clientCtx sdkclient.Context, mux *gwruntime.ServeMux) {
+ if err := group.RegisterQueryHandlerClient(context.Background(), mux, group.NewQueryClient(clientCtx)); err != nil {
+ panic(err)
+ }
+ }
+
+ // RegisterInterfaces registers the group module's interface types
+ func (AppModuleBasic)
+
+ RegisterInterfaces(registry cdctypes.InterfaceRegistry) {
+ group.RegisterInterfaces(registry)
+ }
+
+ // RegisterLegacyAminoCodec registers the group module's types for the given codec.
+ func (AppModuleBasic)
+
+ RegisterLegacyAminoCodec(cdc *codec.LegacyAmino) {
+ group.RegisterLegacyAminoCodec(cdc)
+ }
+
+ // Name returns the group module's name.
+ func (AppModule)
+
+ Name()
+
+ string {
+ return group.ModuleName
+ }
+
+ // RegisterInvariants does nothing, there are no invariants to enforce
+ func (am AppModule)
+
+ RegisterInvariants(ir sdk.InvariantRegistry) {
+ keeper.RegisterInvariants(ir, am.keeper)
+ }
+
+ // InitGenesis performs genesis initialization for the group module. It returns
+ // no validator updates.
+ func (am AppModule)
+
+ InitGenesis(ctx sdk.Context, cdc codec.JSONCodec, data json.RawMessage) []abci.ValidatorUpdate {
+ am.keeper.InitGenesis(ctx, cdc, data)
+
+ return []abci.ValidatorUpdate{
+ }
+ }
+
+ // ExportGenesis returns the exported genesis state as raw bytes for the group
+ // module.
+ func (am AppModule)
+
+ ExportGenesis(ctx sdk.Context, cdc codec.JSONCodec)
+
+ json.RawMessage {
+ gs := am.keeper.ExportGenesis(ctx, cdc)
+
+ return cdc.MustMarshalJSON(gs)
+ }
+
+ // RegisterServices registers a gRPC query service to respond to the
+ // module-specific gRPC queries.
+ func (am AppModule)
+
+ RegisterServices(cfg module.Configurator) {
+ group.RegisterMsgServer(cfg.MsgServer(), am.keeper)
+
+ group.RegisterQueryServer(cfg.QueryServer(), am.keeper)
+ m := keeper.NewMigrator(am.keeper)
+ if err := cfg.RegisterMigration(group.ModuleName, 1, m.Migrate1to2); err != nil {
+ panic(fmt.Sprintf("failed to migrate x/%s from version 1 to 2: %v", group.ModuleName, err))
+ }
+ }
+
+ // ConsensusVersion implements AppModule/ConsensusVersion.
+ func (AppModule)
+
+ ConsensusVersion()
+
+ uint64 {
+ return ConsensusVersion
+ }
+
+ // EndBlock implements the group module's EndBlock.
+ func (am AppModule)
+
+ EndBlock(ctx context.Context)
+
+ error {
+ c := sdk.UnwrapSDKContext(ctx)
+
+ return EndBlocker(c, am.keeper)
+ }
+
+ // ____________________________________________________________________________
+
+ // AppModuleSimulation functions
+
+ // GenerateGenesisState creates a randomized GenState of the group module.
+ func (AppModule)
+
+ GenerateGenesisState(simState *module.SimulationState) {
+ simulation.RandomizedGenState(simState)
+ }
+
+ // RegisterStoreDecoder registers a decoder for group module's types
+ func (am AppModule)
+
+ RegisterStoreDecoder(sdr simtypes.StoreDecoderRegistry) {
+ sdr[group.StoreKey] = simulation.NewDecodeStore(am.cdc)
+ }
+
+ // WeightedOperations returns the all the gov module operations with their respective weights.
+ func (am AppModule)
+
+ WeightedOperations(simState module.SimulationState) []simtypes.WeightedOperation {
+ return simulation.WeightedOperations(
+ am.registry,
+ simState.AppParams, simState.Cdc, simState.TxConfig,
+ am.accKeeper, am.bankKeeper, am.keeper, am.cdc,
+ )
+ }
+
+ //
+ // App Wiring Setup
+ //
+
+ func init() {
+ appmodule.Register(
+ &modulev1.Module{
+ },
+ appmodule.Provide(ProvideModule),
+ )
+ }
+
+ type GroupInputs struct {
+ depinject.In
+
+ Config *modulev1.Module
+ Key *store.KVStoreKey
+ Cdc codec.Codec
+ AccountKeeper group.AccountKeeper
+ BankKeeper group.BankKeeper
+ Registry cdctypes.InterfaceRegistry
+ MsgServiceRouter baseapp.MessageRouter
+ }
+
+ type GroupOutputs struct {
+ depinject.Out
+
+ GroupKeeper keeper.Keeper
+ Module appmodule.AppModule
+ }
+
+ func ProvideModule(in GroupInputs)
+
+ GroupOutputs {
+ /*
+ Example of setting group params:
+ in.Config.MaxMetadataLen = 1000
+ in.Config.MaxExecutionPeriod = "1209600s"
+ */
+ k := keeper.NewKeeper(in.Key, in.Cdc, in.MsgServiceRouter, in.AccountKeeper, group.Config{
+ MaxExecutionPeriod: in.Config.MaxExecutionPeriod.AsDuration(),
+ MaxMetadataLen: in.Config.MaxMetadataLen
+ })
+ m := NewAppModule(in.Cdc, k, in.AccountKeeper, in.BankKeeper, in.Registry)
+
+ return GroupOutputs{
+ GroupKeeper: k,
+ Module: m
+ }
+ }
+ ```
+
+4. Define the module outputs with a public struct that inherits `depinject.Out`:
+ The module outputs are the dependencies that the module provides to other modules. It is usually the module itself and its keeper.
+
+ ```go expandable
+ package module
+
+ import (
+
+ "context"
+ "encoding/json"
+ "fmt"
+
+ abci "github.com/cometbft/cometbft/abci/types"
+ gwruntime "github.com/grpc-ecosystem/grpc-gateway/runtime"
+ "github.com/spf13/cobra"
+
+ modulev1 "cosmossdk.io/api/cosmos/group/module/v1"
+ "cosmossdk.io/core/address"
+ "cosmossdk.io/core/appmodule"
+ "cosmossdk.io/depinject"
+
+ store "cosmossdk.io/store/types"
+ "github.com/cosmos/cosmos-sdk/baseapp"
+ sdkclient "github.com/cosmos/cosmos-sdk/client"
+ "github.com/cosmos/cosmos-sdk/codec"
+ cdctypes "github.com/cosmos/cosmos-sdk/codec/types"
+ sdk "github.com/cosmos/cosmos-sdk/types"
+ "github.com/cosmos/cosmos-sdk/types/module"
+ simtypes "github.com/cosmos/cosmos-sdk/types/simulation"
+ "github.com/cosmos/cosmos-sdk/x/group"
+ "github.com/cosmos/cosmos-sdk/x/group/client/cli"
+ "github.com/cosmos/cosmos-sdk/x/group/keeper"
+ "github.com/cosmos/cosmos-sdk/x/group/simulation"
+ )
+
+ // ConsensusVersion defines the current x/group module consensus version.
+ const ConsensusVersion = 2
+
+ var (
+ _ module.AppModuleBasic = AppModuleBasic{
+ }
+ _ module.AppModuleSimulation = AppModule{
+ }
+ )
+
+ type AppModule struct {
+ AppModuleBasic
+ keeper keeper.Keeper
+ bankKeeper group.BankKeeper
+ accKeeper group.AccountKeeper
+ registry cdctypes.InterfaceRegistry
+ }
+
+ // NewAppModule creates a new AppModule object
+ func NewAppModule(cdc codec.Codec, keeper keeper.Keeper, ak group.AccountKeeper, bk group.BankKeeper, registry cdctypes.InterfaceRegistry)
+
+ AppModule {
+ return AppModule{
+ AppModuleBasic: AppModuleBasic{
+ cdc: cdc, ac: ak.AddressCodec()
+ },
+ keeper: keeper,
+ bankKeeper: bk,
+ accKeeper: ak,
+ registry: registry,
+ }
+ }
+
+ var (
+ _ appmodule.AppModule = AppModule{
+ }
+ _ appmodule.HasEndBlocker = AppModule{
+ }
+ )
+
+ // IsOnePerModuleType implements the depinject.OnePerModuleType interface.
+ func (am AppModule)
+
+ IsOnePerModuleType() {
+ }
+
+ // IsAppModule implements the appmodule.AppModule interface.
+ func (am AppModule)
+
+ IsAppModule() {
+ }
+
+ type AppModuleBasic struct {
+ cdc codec.Codec
+ ac address.Codec
+ }
+
+ // Name returns the group module's name.
+ func (AppModuleBasic)
+
+ Name()
+
+ string {
+ return group.ModuleName
+ }
+
+ // DefaultGenesis returns default genesis state as raw bytes for the group
+ // module.
+ func (AppModuleBasic)
+
+ DefaultGenesis(cdc codec.JSONCodec)
+
+ json.RawMessage {
+ return cdc.MustMarshalJSON(group.NewGenesisState())
+ }
+
+ // ValidateGenesis performs genesis state validation for the group module.
+ func (AppModuleBasic)
+
+ ValidateGenesis(cdc codec.JSONCodec, config sdkclient.TxEncodingConfig, bz json.RawMessage)
+
+ error {
+ var data group.GenesisState
+ if err := cdc.UnmarshalJSON(bz, &data); err != nil {
+ return fmt.Errorf("failed to unmarshal %s genesis state: %w", group.ModuleName, err)
+ }
+
+ return data.Validate()
+ }
+
+ // GetQueryCmd returns the cli query commands for the group module
+ func (a AppModuleBasic)
+
+ GetQueryCmd() *cobra.Command {
+ return cli.QueryCmd(a.Name())
+ }
+
+ // GetTxCmd returns the transaction commands for the group module
+ func (a AppModuleBasic)
+
+ GetTxCmd() *cobra.Command {
+ return cli.TxCmd(a.Name(), a.ac)
+ }
+
+ // RegisterGRPCGatewayRoutes registers the gRPC Gateway routes for the group module.
+ func (a AppModuleBasic)
+
+ RegisterGRPCGatewayRoutes(clientCtx sdkclient.Context, mux *gwruntime.ServeMux) {
+ if err := group.RegisterQueryHandlerClient(context.Background(), mux, group.NewQueryClient(clientCtx)); err != nil {
+ panic(err)
+ }
+ }
+
+ // RegisterInterfaces registers the group module's interface types
+ func (AppModuleBasic)
+
+ RegisterInterfaces(registry cdctypes.InterfaceRegistry) {
+ group.RegisterInterfaces(registry)
+ }
+
+ // RegisterLegacyAminoCodec registers the group module's types for the given codec.
+ func (AppModuleBasic)
+
+ RegisterLegacyAminoCodec(cdc *codec.LegacyAmino) {
+ group.RegisterLegacyAminoCodec(cdc)
+ }
+
+ // Name returns the group module's name.
+ func (AppModule)
+
+ Name()
+
+ string {
+ return group.ModuleName
+ }
+
+ // RegisterInvariants does nothing, there are no invariants to enforce
+ func (am AppModule)
+
+ RegisterInvariants(ir sdk.InvariantRegistry) {
+ keeper.RegisterInvariants(ir, am.keeper)
+ }
+
+ // InitGenesis performs genesis initialization for the group module. It returns
+ // no validator updates.
+ func (am AppModule)
+
+ InitGenesis(ctx sdk.Context, cdc codec.JSONCodec, data json.RawMessage) []abci.ValidatorUpdate {
+ am.keeper.InitGenesis(ctx, cdc, data)
+
+ return []abci.ValidatorUpdate{
+ }
+ }
+
+ // ExportGenesis returns the exported genesis state as raw bytes for the group
+ // module.
+ func (am AppModule)
+
+ ExportGenesis(ctx sdk.Context, cdc codec.JSONCodec)
+
+ json.RawMessage {
+ gs := am.keeper.ExportGenesis(ctx, cdc)
+
+ return cdc.MustMarshalJSON(gs)
+ }
+
+ // RegisterServices registers a gRPC query service to respond to the
+ // module-specific gRPC queries.
+ func (am AppModule)
+
+ RegisterServices(cfg module.Configurator) {
+ group.RegisterMsgServer(cfg.MsgServer(), am.keeper)
+
+ group.RegisterQueryServer(cfg.QueryServer(), am.keeper)
+ m := keeper.NewMigrator(am.keeper)
+ if err := cfg.RegisterMigration(group.ModuleName, 1, m.Migrate1to2); err != nil {
+ panic(fmt.Sprintf("failed to migrate x/%s from version 1 to 2: %v", group.ModuleName, err))
+ }
+ }
+
+ // ConsensusVersion implements AppModule/ConsensusVersion.
+ func (AppModule)
+
+ ConsensusVersion()
+
+ uint64 {
+ return ConsensusVersion
+ }
+
+ // EndBlock implements the group module's EndBlock.
+ func (am AppModule)
+
+ EndBlock(ctx context.Context)
+
+ error {
+ c := sdk.UnwrapSDKContext(ctx)
+
+ return EndBlocker(c, am.keeper)
+ }
+
+ // ____________________________________________________________________________
+
+ // AppModuleSimulation functions
+
+ // GenerateGenesisState creates a randomized GenState of the group module.
+ func (AppModule)
+
+ GenerateGenesisState(simState *module.SimulationState) {
+ simulation.RandomizedGenState(simState)
+ }
+
+ // RegisterStoreDecoder registers a decoder for group module's types
+ func (am AppModule)
+
+ RegisterStoreDecoder(sdr simtypes.StoreDecoderRegistry) {
+ sdr[group.StoreKey] = simulation.NewDecodeStore(am.cdc)
+ }
+
+ // WeightedOperations returns the all the gov module operations with their respective weights.
+ func (am AppModule)
+
+ WeightedOperations(simState module.SimulationState) []simtypes.WeightedOperation {
+ return simulation.WeightedOperations(
+ am.registry,
+ simState.AppParams, simState.Cdc, simState.TxConfig,
+ am.accKeeper, am.bankKeeper, am.keeper, am.cdc,
+ )
+ }
+
+ //
+ // App Wiring Setup
+ //
+
+ func init() {
+ appmodule.Register(
+ &modulev1.Module{
+ },
+ appmodule.Provide(ProvideModule),
+ )
+ }
+
+ type GroupInputs struct {
+ depinject.In
+
+ Config *modulev1.Module
+ Key *store.KVStoreKey
+ Cdc codec.Codec
+ AccountKeeper group.AccountKeeper
+ BankKeeper group.BankKeeper
+ Registry cdctypes.InterfaceRegistry
+ MsgServiceRouter baseapp.MessageRouter
+ }
+
+ type GroupOutputs struct {
+ depinject.Out
+
+ GroupKeeper keeper.Keeper
+ Module appmodule.AppModule
+ }
+
+ func ProvideModule(in GroupInputs)
+
+ GroupOutputs {
+ /*
+ Example of setting group params:
+ in.Config.MaxMetadataLen = 1000
+ in.Config.MaxExecutionPeriod = "1209600s"
+ */
+ k := keeper.NewKeeper(in.Key, in.Cdc, in.MsgServiceRouter, in.AccountKeeper, group.Config{
+ MaxExecutionPeriod: in.Config.MaxExecutionPeriod.AsDuration(),
+ MaxMetadataLen: in.Config.MaxMetadataLen
+ })
+ m := NewAppModule(in.Cdc, k, in.AccountKeeper, in.BankKeeper, in.Registry)
+
+ return GroupOutputs{
+ GroupKeeper: k,
+ Module: m
+ }
+ }
+ ```
+
+5. Create a function named `ProvideModule` (as called in 1.) and use the inputs for instantiating the module outputs.
+
+```go expandable
+package module
+
+import (
+
+ "context"
+ "encoding/json"
+ "fmt"
+
+ abci "github.com/cometbft/cometbft/abci/types"
+ gwruntime "github.com/grpc-ecosystem/grpc-gateway/runtime"
+ "github.com/spf13/cobra"
+
+ modulev1 "cosmossdk.io/api/cosmos/group/module/v1"
+ "cosmossdk.io/core/address"
+ "cosmossdk.io/core/appmodule"
+ "cosmossdk.io/depinject"
+
+ store "cosmossdk.io/store/types"
+ "github.com/cosmos/cosmos-sdk/baseapp"
+ sdkclient "github.com/cosmos/cosmos-sdk/client"
+ "github.com/cosmos/cosmos-sdk/codec"
+ cdctypes "github.com/cosmos/cosmos-sdk/codec/types"
+ sdk "github.com/cosmos/cosmos-sdk/types"
+ "github.com/cosmos/cosmos-sdk/types/module"
+ simtypes "github.com/cosmos/cosmos-sdk/types/simulation"
+ "github.com/cosmos/cosmos-sdk/x/group"
+ "github.com/cosmos/cosmos-sdk/x/group/client/cli"
+ "github.com/cosmos/cosmos-sdk/x/group/keeper"
+ "github.com/cosmos/cosmos-sdk/x/group/simulation"
+)
+
+// ConsensusVersion defines the current x/group module consensus version.
+const ConsensusVersion = 2
+
+var (
+ _ module.AppModuleBasic = AppModuleBasic{
+}
+ _ module.AppModuleSimulation = AppModule{
+}
+)
+
+type AppModule struct {
+ AppModuleBasic
+ keeper keeper.Keeper
+ bankKeeper group.BankKeeper
+ accKeeper group.AccountKeeper
+ registry cdctypes.InterfaceRegistry
+}
+
+// NewAppModule creates a new AppModule object
+func NewAppModule(cdc codec.Codec, keeper keeper.Keeper, ak group.AccountKeeper, bk group.BankKeeper, registry cdctypes.InterfaceRegistry)
+
+AppModule {
+ return AppModule{
+ AppModuleBasic: AppModuleBasic{
+ cdc: cdc, ac: ak.AddressCodec()
+},
+ keeper: keeper,
+ bankKeeper: bk,
+ accKeeper: ak,
+ registry: registry,
+}
+}
+
+var (
+ _ appmodule.AppModule = AppModule{
+}
+ _ appmodule.HasEndBlocker = AppModule{
+}
+)
+
+// IsOnePerModuleType implements the depinject.OnePerModuleType interface.
+func (am AppModule)
+
+IsOnePerModuleType() {
+}
+
+// IsAppModule implements the appmodule.AppModule interface.
+func (am AppModule)
+
+IsAppModule() {
+}
+
+type AppModuleBasic struct {
+ cdc codec.Codec
+ ac address.Codec
+}
+
+// Name returns the group module's name.
+func (AppModuleBasic)
+
+Name()
+
+string {
+ return group.ModuleName
+}
+
+// DefaultGenesis returns default genesis state as raw bytes for the group
+// module.
+func (AppModuleBasic)
+
+DefaultGenesis(cdc codec.JSONCodec)
+
+json.RawMessage {
+ return cdc.MustMarshalJSON(group.NewGenesisState())
+}
+
+// ValidateGenesis performs genesis state validation for the group module.
+func (AppModuleBasic)
+
+ValidateGenesis(cdc codec.JSONCodec, config sdkclient.TxEncodingConfig, bz json.RawMessage)
+
+error {
+ var data group.GenesisState
+ if err := cdc.UnmarshalJSON(bz, &data); err != nil {
+ return fmt.Errorf("failed to unmarshal %s genesis state: %w", group.ModuleName, err)
+}
+
+return data.Validate()
+}
+
+// GetQueryCmd returns the cli query commands for the group module
+func (a AppModuleBasic)
+
+GetQueryCmd() *cobra.Command {
+ return cli.QueryCmd(a.Name())
+}
+
+// GetTxCmd returns the transaction commands for the group module
+func (a AppModuleBasic)
+
+GetTxCmd() *cobra.Command {
+ return cli.TxCmd(a.Name(), a.ac)
+}
+
+// RegisterGRPCGatewayRoutes registers the gRPC Gateway routes for the group module.
+func (a AppModuleBasic)
+
+RegisterGRPCGatewayRoutes(clientCtx sdkclient.Context, mux *gwruntime.ServeMux) {
+ if err := group.RegisterQueryHandlerClient(context.Background(), mux, group.NewQueryClient(clientCtx)); err != nil {
+ panic(err)
+}
+}
+
+// RegisterInterfaces registers the group module's interface types
+func (AppModuleBasic)
+
+RegisterInterfaces(registry cdctypes.InterfaceRegistry) {
+ group.RegisterInterfaces(registry)
+}
+
+// RegisterLegacyAminoCodec registers the group module's types for the given codec.
+func (AppModuleBasic)
+
+RegisterLegacyAminoCodec(cdc *codec.LegacyAmino) {
+ group.RegisterLegacyAminoCodec(cdc)
+}
+
+// Name returns the group module's name.
+func (AppModule)
+
+Name()
+
+string {
+ return group.ModuleName
+}
+
+// RegisterInvariants does nothing, there are no invariants to enforce
+func (am AppModule)
+
+RegisterInvariants(ir sdk.InvariantRegistry) {
+ keeper.RegisterInvariants(ir, am.keeper)
+}
+
+// InitGenesis performs genesis initialization for the group module. It returns
+// no validator updates.
+func (am AppModule)
+
+InitGenesis(ctx sdk.Context, cdc codec.JSONCodec, data json.RawMessage) []abci.ValidatorUpdate {
+ am.keeper.InitGenesis(ctx, cdc, data)
+
+return []abci.ValidatorUpdate{
+}
+}
+
+// ExportGenesis returns the exported genesis state as raw bytes for the group
+// module.
+func (am AppModule)
+
+ExportGenesis(ctx sdk.Context, cdc codec.JSONCodec)
+
+json.RawMessage {
+ gs := am.keeper.ExportGenesis(ctx, cdc)
+
+return cdc.MustMarshalJSON(gs)
+}
+
+// RegisterServices registers a gRPC query service to respond to the
+// module-specific gRPC queries.
+func (am AppModule)
+
+RegisterServices(cfg module.Configurator) {
+ group.RegisterMsgServer(cfg.MsgServer(), am.keeper)
+
+group.RegisterQueryServer(cfg.QueryServer(), am.keeper)
+ m := keeper.NewMigrator(am.keeper)
+ if err := cfg.RegisterMigration(group.ModuleName, 1, m.Migrate1to2); err != nil {
+ panic(fmt.Sprintf("failed to migrate x/%s from version 1 to 2: %v", group.ModuleName, err))
+}
+}
+
+// ConsensusVersion implements AppModule/ConsensusVersion.
+func (AppModule)
+
+ConsensusVersion()
+
+uint64 {
+ return ConsensusVersion
+}
+
+// EndBlock implements the group module's EndBlock.
+func (am AppModule)
+
+EndBlock(ctx context.Context)
+
+error {
+ c := sdk.UnwrapSDKContext(ctx)
+
+return EndBlocker(c, am.keeper)
+}
+
+// ____________________________________________________________________________
+
+// AppModuleSimulation functions
+
+// GenerateGenesisState creates a randomized GenState of the group module.
+func (AppModule)
+
+GenerateGenesisState(simState *module.SimulationState) {
+ simulation.RandomizedGenState(simState)
+}
+
+// RegisterStoreDecoder registers a decoder for group module's types
+func (am AppModule)
+
+RegisterStoreDecoder(sdr simtypes.StoreDecoderRegistry) {
+ sdr[group.StoreKey] = simulation.NewDecodeStore(am.cdc)
+}
+
+// WeightedOperations returns the all the gov module operations with their respective weights.
+func (am AppModule)
+
+WeightedOperations(simState module.SimulationState) []simtypes.WeightedOperation {
+ return simulation.WeightedOperations(
+ am.registry,
+ simState.AppParams, simState.Cdc, simState.TxConfig,
+ am.accKeeper, am.bankKeeper, am.keeper, am.cdc,
+ )
+}
+
+//
+// App Wiring Setup
+//
+
+func init() {
+ appmodule.Register(
+ &modulev1.Module{
+},
+ appmodule.Provide(ProvideModule),
+ )
+}
+
+type GroupInputs struct {
+ depinject.In
+
+ Config *modulev1.Module
+ Key *store.KVStoreKey
+ Cdc codec.Codec
+ AccountKeeper group.AccountKeeper
+ BankKeeper group.BankKeeper
+ Registry cdctypes.InterfaceRegistry
+ MsgServiceRouter baseapp.MessageRouter
+}
+
+type GroupOutputs struct {
+ depinject.Out
+
+ GroupKeeper keeper.Keeper
+ Module appmodule.AppModule
+}
+
+func ProvideModule(in GroupInputs)
+
+GroupOutputs {
+ /*
+ Example of setting group params:
+ in.Config.MaxMetadataLen = 1000
+ in.Config.MaxExecutionPeriod = "1209600s"
+ */
+ k := keeper.NewKeeper(in.Key, in.Cdc, in.MsgServiceRouter, in.AccountKeeper, group.Config{
+ MaxExecutionPeriod: in.Config.MaxExecutionPeriod.AsDuration(),
+ MaxMetadataLen: in.Config.MaxMetadataLen
+})
+ m := NewAppModule(in.Cdc, k, in.AccountKeeper, in.BankKeeper, in.Registry)
+
+return GroupOutputs{
+ GroupKeeper: k,
+ Module: m
+}
+}
+```
+
+The `ProvideModule` function should return an instance of `cosmossdk.io/core/appmodule.AppModule` which implements
+one or more app module extension interfaces for initializing the module.
+
+Following is the complete app wiring configuration for `group`:
+
+```go expandable
+package module
+
+import (
+
+ "context"
+ "encoding/json"
+ "fmt"
+
+ abci "github.com/cometbft/cometbft/abci/types"
+ gwruntime "github.com/grpc-ecosystem/grpc-gateway/runtime"
+ "github.com/spf13/cobra"
+
+ modulev1 "cosmossdk.io/api/cosmos/group/module/v1"
+ "cosmossdk.io/core/address"
+ "cosmossdk.io/core/appmodule"
+ "cosmossdk.io/depinject"
+
+ store "cosmossdk.io/store/types"
+ "github.com/cosmos/cosmos-sdk/baseapp"
+ sdkclient "github.com/cosmos/cosmos-sdk/client"
+ "github.com/cosmos/cosmos-sdk/codec"
+ cdctypes "github.com/cosmos/cosmos-sdk/codec/types"
+ sdk "github.com/cosmos/cosmos-sdk/types"
+ "github.com/cosmos/cosmos-sdk/types/module"
+ simtypes "github.com/cosmos/cosmos-sdk/types/simulation"
+ "github.com/cosmos/cosmos-sdk/x/group"
+ "github.com/cosmos/cosmos-sdk/x/group/client/cli"
+ "github.com/cosmos/cosmos-sdk/x/group/keeper"
+ "github.com/cosmos/cosmos-sdk/x/group/simulation"
+)
+
+// ConsensusVersion defines the current x/group module consensus version.
+const ConsensusVersion = 2
+
+var (
+ _ module.AppModuleBasic = AppModuleBasic{
+}
+ _ module.AppModuleSimulation = AppModule{
+}
+)
+
+type AppModule struct {
+ AppModuleBasic
+ keeper keeper.Keeper
+ bankKeeper group.BankKeeper
+ accKeeper group.AccountKeeper
+ registry cdctypes.InterfaceRegistry
+}
+
+// NewAppModule creates a new AppModule object
+func NewAppModule(cdc codec.Codec, keeper keeper.Keeper, ak group.AccountKeeper, bk group.BankKeeper, registry cdctypes.InterfaceRegistry)
+
+AppModule {
+ return AppModule{
+ AppModuleBasic: AppModuleBasic{
+ cdc: cdc, ac: ak.AddressCodec()
+},
+ keeper: keeper,
+ bankKeeper: bk,
+ accKeeper: ak,
+ registry: registry,
+}
+}
+
+var (
+ _ appmodule.AppModule = AppModule{
+}
+ _ appmodule.HasEndBlocker = AppModule{
+}
+)
+
+// IsOnePerModuleType implements the depinject.OnePerModuleType interface.
+func (am AppModule)
+
+IsOnePerModuleType() {
+}
+
+// IsAppModule implements the appmodule.AppModule interface.
+func (am AppModule)
+
+IsAppModule() {
+}
+
+type AppModuleBasic struct {
+ cdc codec.Codec
+ ac address.Codec
+}
+
+// Name returns the group module's name.
+func (AppModuleBasic)
+
+Name()
+
+string {
+ return group.ModuleName
+}
+
+// DefaultGenesis returns default genesis state as raw bytes for the group
+// module.
+func (AppModuleBasic)
+
+DefaultGenesis(cdc codec.JSONCodec)
+
+json.RawMessage {
+ return cdc.MustMarshalJSON(group.NewGenesisState())
+}
+
+// ValidateGenesis performs genesis state validation for the group module.
+func (AppModuleBasic)
+
+ValidateGenesis(cdc codec.JSONCodec, config sdkclient.TxEncodingConfig, bz json.RawMessage)
+
+error {
+ var data group.GenesisState
+ if err := cdc.UnmarshalJSON(bz, &data); err != nil {
+ return fmt.Errorf("failed to unmarshal %s genesis state: %w", group.ModuleName, err)
+}
+
+return data.Validate()
+}
+
+// GetQueryCmd returns the cli query commands for the group module
+func (a AppModuleBasic)
+
+GetQueryCmd() *cobra.Command {
+ return cli.QueryCmd(a.Name())
+}
+
+// GetTxCmd returns the transaction commands for the group module
+func (a AppModuleBasic)
+
+GetTxCmd() *cobra.Command {
+ return cli.TxCmd(a.Name(), a.ac)
+}
+
+// RegisterGRPCGatewayRoutes registers the gRPC Gateway routes for the group module.
+func (a AppModuleBasic)
+
+RegisterGRPCGatewayRoutes(clientCtx sdkclient.Context, mux *gwruntime.ServeMux) {
+ if err := group.RegisterQueryHandlerClient(context.Background(), mux, group.NewQueryClient(clientCtx)); err != nil {
+ panic(err)
+}
+}
+
+// RegisterInterfaces registers the group module's interface types
+func (AppModuleBasic)
+
+RegisterInterfaces(registry cdctypes.InterfaceRegistry) {
+ group.RegisterInterfaces(registry)
+}
+
+// RegisterLegacyAminoCodec registers the group module's types for the given codec.
+func (AppModuleBasic)
+
+RegisterLegacyAminoCodec(cdc *codec.LegacyAmino) {
+ group.RegisterLegacyAminoCodec(cdc)
+}
+
+// Name returns the group module's name.
+func (AppModule)
+
+Name()
+
+string {
+ return group.ModuleName
+}
+
+// RegisterInvariants does nothing, there are no invariants to enforce
+func (am AppModule)
+
+RegisterInvariants(ir sdk.InvariantRegistry) {
+ keeper.RegisterInvariants(ir, am.keeper)
+}
+
+// InitGenesis performs genesis initialization for the group module. It returns
+// no validator updates.
+func (am AppModule)
+
+InitGenesis(ctx sdk.Context, cdc codec.JSONCodec, data json.RawMessage) []abci.ValidatorUpdate {
+ am.keeper.InitGenesis(ctx, cdc, data)
+
+return []abci.ValidatorUpdate{
+}
+}
+
+// ExportGenesis returns the exported genesis state as raw bytes for the group
+// module.
+func (am AppModule)
+
+ExportGenesis(ctx sdk.Context, cdc codec.JSONCodec)
+
+json.RawMessage {
+ gs := am.keeper.ExportGenesis(ctx, cdc)
+
+return cdc.MustMarshalJSON(gs)
+}
+
+// RegisterServices registers a gRPC query service to respond to the
+// module-specific gRPC queries.
+func (am AppModule)
+
+RegisterServices(cfg module.Configurator) {
+ group.RegisterMsgServer(cfg.MsgServer(), am.keeper)
+
+group.RegisterQueryServer(cfg.QueryServer(), am.keeper)
+ m := keeper.NewMigrator(am.keeper)
+ if err := cfg.RegisterMigration(group.ModuleName, 1, m.Migrate1to2); err != nil {
+ panic(fmt.Sprintf("failed to migrate x/%s from version 1 to 2: %v", group.ModuleName, err))
+}
+}
+
+// ConsensusVersion implements AppModule/ConsensusVersion.
+func (AppModule)
+
+ConsensusVersion()
+
+uint64 {
+ return ConsensusVersion
+}
+
+// EndBlock implements the group module's EndBlock.
+func (am AppModule)
+
+EndBlock(ctx context.Context)
+
+error {
+ c := sdk.UnwrapSDKContext(ctx)
+
+return EndBlocker(c, am.keeper)
+}
+
+// ____________________________________________________________________________
+
+// AppModuleSimulation functions
+
+// GenerateGenesisState creates a randomized GenState of the group module.
+func (AppModule)
+
+GenerateGenesisState(simState *module.SimulationState) {
+ simulation.RandomizedGenState(simState)
+}
+
+// RegisterStoreDecoder registers a decoder for group module's types
+func (am AppModule)
+
+RegisterStoreDecoder(sdr simtypes.StoreDecoderRegistry) {
+ sdr[group.StoreKey] = simulation.NewDecodeStore(am.cdc)
+}
+
+// WeightedOperations returns the all the gov module operations with their respective weights.
+func (am AppModule)
+
+WeightedOperations(simState module.SimulationState) []simtypes.WeightedOperation {
+ return simulation.WeightedOperations(
+ am.registry,
+ simState.AppParams, simState.Cdc, simState.TxConfig,
+ am.accKeeper, am.bankKeeper, am.keeper, am.cdc,
+ )
+}
+
+//
+// App Wiring Setup
+//
+
+func init() {
+ appmodule.Register(
+ &modulev1.Module{
+},
+ appmodule.Provide(ProvideModule),
+ )
+}
+
+type GroupInputs struct {
+ depinject.In
+
+ Config *modulev1.Module
+ Key *store.KVStoreKey
+ Cdc codec.Codec
+ AccountKeeper group.AccountKeeper
+ BankKeeper group.BankKeeper
+ Registry cdctypes.InterfaceRegistry
+ MsgServiceRouter baseapp.MessageRouter
+}
+
+type GroupOutputs struct {
+ depinject.Out
+
+ GroupKeeper keeper.Keeper
+ Module appmodule.AppModule
+}
+
+func ProvideModule(in GroupInputs)
+
+GroupOutputs {
+ /*
+ Example of setting group params:
+ in.Config.MaxMetadataLen = 1000
+ in.Config.MaxExecutionPeriod = "1209600s"
+ */
+ k := keeper.NewKeeper(in.Key, in.Cdc, in.MsgServiceRouter, in.AccountKeeper, group.Config{
+ MaxExecutionPeriod: in.Config.MaxExecutionPeriod.AsDuration(),
+ MaxMetadataLen: in.Config.MaxMetadataLen
+})
+ m := NewAppModule(in.Cdc, k, in.AccountKeeper, in.BankKeeper, in.Registry)
+
+return GroupOutputs{
+ GroupKeeper: k,
+ Module: m
+}
+}
+```
+
+The module is now ready to be used with `depinject` by a chain developer.
+
+## Integrate in an application
+
+The App Wiring is done in `app_config.go` / `app.yaml` and `app_di.go` and is explained in detail in the [overview of `app_di.go`](/sdk/v0.53/build/building-apps/app-go-di).
diff --git a/sdk/next/build/building-modules/errors.mdx b/sdk/next/build/building-modules/errors.mdx
new file mode 100644
index 000000000..a08ad33b7
--- /dev/null
+++ b/sdk/next/build/building-modules/errors.mdx
@@ -0,0 +1,702 @@
+---
+title: Errors
+---
+
+
+**Synopsis**
+This document outlines the recommended usage and APIs for error handling in Cosmos SDK modules.
+
+
+Modules are encouraged to define and register their own errors to provide better
+context on failed message or handler execution. Typically, these errors should be
+common or general errors which can be further wrapped to provide additional specific
+execution context.
+
+## Registration
+
+Modules should define and register their custom errors in `x/{module}/errors.go`.
+Registration of errors is handled via the [`errors` package](https://github.com/cosmos/cosmos-sdk/blob/main/errors/errors.go).
+
+Example:
+
+```go expandable
+package types
+
+import "cosmossdk.io/errors"
+
+// x/distribution module sentinel errors
+var (
+ ErrEmptyDelegatorAddr = errors.Register(ModuleName, 2, "delegator address is empty")
+
+ErrEmptyWithdrawAddr = errors.Register(ModuleName, 3, "withdraw address is empty")
+
+ErrEmptyValidatorAddr = errors.Register(ModuleName, 4, "validator address is empty")
+
+ErrEmptyDelegationDistInfo = errors.Register(ModuleName, 5, "no delegation distribution info")
+
+ErrNoValidatorDistInfo = errors.Register(ModuleName, 6, "no validator distribution info")
+
+ErrNoValidatorCommission = errors.Register(ModuleName, 7, "no validator commission to withdraw")
+
+ErrSetWithdrawAddrDisabled = errors.Register(ModuleName, 8, "set withdraw address disabled")
+
+ErrBadDistribution = errors.Register(ModuleName, 9, "community pool does not have sufficient coins to distribute")
+
+ErrInvalidProposalAmount = errors.Register(ModuleName, 10, "invalid community pool spend proposal amount")
+
+ErrEmptyProposalRecipient = errors.Register(ModuleName, 11, "invalid community pool spend proposal recipient")
+
+ErrNoValidatorExists = errors.Register(ModuleName, 12, "validator does not exist")
+
+ErrNoDelegationExists = errors.Register(ModuleName, 13, "delegation does not exist")
+)
+```
+
+Each custom module error must provide the codespace, which is typically the module name
+(e.g. "distribution") and is unique per module, and a uint32 code. Together, the codespace and code
+provide a globally unique Cosmos SDK error. Typically, the code is monotonically increasing but does not
+necessarily have to be. The only restrictions on error codes are the following:
+
+* Must be greater than one, as a code value of one is reserved for internal errors.
+* Must be unique within the module.
+
+Note, the Cosmos SDK provides a core set of *common* errors. These errors are defined in [`types/errors/errors.go`](https://github.com/cosmos/cosmos-sdk/blob/main/types/errors/errors.go).
+
+## Wrapping
+
+The custom module errors can be returned as their concrete type as they already fulfill the `error`
+interface. However, module errors can be wrapped to provide further context and meaning to failed
+execution.
+
+Example:
+
+```go expandable
+package keeper
+
+import (
+
+ "context"
+ "errors"
+ "fmt"
+ "cosmossdk.io/collections"
+ "cosmossdk.io/core/store"
+ "cosmossdk.io/log"
+ "cosmossdk.io/math"
+
+ errorsmod "cosmossdk.io/errors"
+ "github.com/cosmos/cosmos-sdk/codec"
+ sdk "github.com/cosmos/cosmos-sdk/types"
+ sdkerrors "github.com/cosmos/cosmos-sdk/types/errors"
+ "github.com/cosmos/cosmos-sdk/types/query"
+ authtypes "github.com/cosmos/cosmos-sdk/x/auth/types"
+ "github.com/cosmos/cosmos-sdk/x/bank/types"
+)
+
+var _ Keeper = (*BaseKeeper)(nil)
+
+// Keeper defines a module interface that facilitates the transfer of coins
+// between accounts.
+type Keeper interface {
+ SendKeeper
+ WithMintCoinsRestriction(MintingRestrictionFn)
+
+BaseKeeper
+
+ InitGenesis(context.Context, *types.GenesisState)
+
+ExportGenesis(context.Context) *types.GenesisState
+
+ GetSupply(ctx context.Context, denom string)
+
+sdk.Coin
+ HasSupply(ctx context.Context, denom string)
+
+bool
+ GetPaginatedTotalSupply(ctx context.Context, pagination *query.PageRequest) (sdk.Coins, *query.PageResponse, error)
+
+IterateTotalSupply(ctx context.Context, cb func(sdk.Coin)
+
+bool)
+
+GetDenomMetaData(ctx context.Context, denom string) (types.Metadata, bool)
+
+HasDenomMetaData(ctx context.Context, denom string)
+
+bool
+ SetDenomMetaData(ctx context.Context, denomMetaData types.Metadata)
+
+GetAllDenomMetaData(ctx context.Context) []types.Metadata
+ IterateAllDenomMetaData(ctx context.Context, cb func(types.Metadata)
+
+bool)
+
+SendCoinsFromModuleToAccount(ctx context.Context, senderModule string, recipientAddr sdk.AccAddress, amt sdk.Coins)
+
+error
+ SendCoinsFromModuleToModule(ctx context.Context, senderModule, recipientModule string, amt sdk.Coins)
+
+error
+ SendCoinsFromAccountToModule(ctx context.Context, senderAddr sdk.AccAddress, recipientModule string, amt sdk.Coins)
+
+error
+ DelegateCoinsFromAccountToModule(ctx context.Context, senderAddr sdk.AccAddress, recipientModule string, amt sdk.Coins)
+
+error
+ UndelegateCoinsFromModuleToAccount(ctx context.Context, senderModule string, recipientAddr sdk.AccAddress, amt sdk.Coins)
+
+error
+ MintCoins(ctx context.Context, moduleName string, amt sdk.Coins)
+
+error
+ BurnCoins(ctx context.Context, moduleName string, amt sdk.Coins)
+
+error
+
+ DelegateCoins(ctx context.Context, delegatorAddr, moduleAccAddr sdk.AccAddress, amt sdk.Coins)
+
+error
+ UndelegateCoins(ctx context.Context, moduleAccAddr, delegatorAddr sdk.AccAddress, amt sdk.Coins)
+
+error
+
+ types.QueryServer
+}
+
+// BaseKeeper manages transfers between accounts. It implements the Keeper interface.
+type BaseKeeper struct {
+ BaseSendKeeper
+
+ ak types.AccountKeeper
+ cdc codec.BinaryCodec
+ storeService store.KVStoreService
+ mintCoinsRestrictionFn MintingRestrictionFn
+ logger log.Logger
+}
+
+type MintingRestrictionFn func(ctx context.Context, coins sdk.Coins)
+
+error
+
+// GetPaginatedTotalSupply queries for the supply, ignoring 0 coins, with a given pagination
+func (k BaseKeeper)
+
+GetPaginatedTotalSupply(ctx context.Context, pagination *query.PageRequest) (sdk.Coins, *query.PageResponse, error) {
+ results, pageResp, err := query.CollectionPaginate[string, math.Int](ctx, k.Supply, pagination)
+ if err != nil {
+ return nil, nil, err
+}
+ coins := sdk.NewCoins()
+ for _, res := range results {
+ coins = coins.Add(sdk.NewCoin(res.Key, res.Value))
+}
+
+return coins, pageResp, nil
+}
+
+// NewBaseKeeper returns a new BaseKeeper object with a given codec, dedicated
+// store key, an AccountKeeper implementation, and a parameter Subspace used to
+// store and fetch module parameters. The BaseKeeper also accepts a
+// blocklist map. This blocklist describes the set of addresses that are not allowed
+// to receive funds through direct and explicit actions, for example, by using a MsgSend or
+// by using a SendCoinsFromModuleToAccount execution.
+func NewBaseKeeper(
+ cdc codec.BinaryCodec,
+ storeService store.KVStoreService,
+ ak types.AccountKeeper,
+ blockedAddrs map[string]bool,
+ authority string,
+ logger log.Logger,
+)
+
+BaseKeeper {
+ if _, err := ak.AddressCodec().StringToBytes(authority); err != nil {
+ panic(fmt.Errorf("invalid bank authority address: %w", err))
+}
+
+ // add the module name to the logger
+ logger = logger.With(log.ModuleKey, "x/"+types.ModuleName)
+
+return BaseKeeper{
+ BaseSendKeeper: NewBaseSendKeeper(cdc, storeService, ak, blockedAddrs, authority, logger),
+ ak: ak,
+ cdc: cdc,
+ storeService: storeService,
+ mintCoinsRestrictionFn: func(ctx context.Context, coins sdk.Coins)
+
+error {
+ return nil
+},
+ logger: logger,
+}
+}
+
+// WithMintCoinsRestriction restricts the bank Keeper used within a specific module to
+// have restricted permissions on minting via function passed in parameter.
+// Previous restriction functions can be nested as such:
+//
+// bankKeeper.WithMintCoinsRestriction(restriction1).WithMintCoinsRestriction(restriction2)
+
+func (k BaseKeeper)
+
+WithMintCoinsRestriction(check MintingRestrictionFn)
+
+BaseKeeper {
+ oldRestrictionFn := k.mintCoinsRestrictionFn
+ k.mintCoinsRestrictionFn = func(ctx context.Context, coins sdk.Coins)
+
+error {
+ err := check(ctx, coins)
+ if err != nil {
+ return err
+}
+
+err = oldRestrictionFn(ctx, coins)
+ if err != nil {
+ return err
+}
+
+return nil
+}
+
+return k
+}
+
+// DelegateCoins performs delegation by deducting amt coins from an account with
+// address addr. For vesting accounts, delegations amounts are tracked for both
+// vesting and vested coins. The coins are then transferred from the delegator
+// address to a ModuleAccount address. If any of the delegation amounts are negative,
+// an error is returned.
+func (k BaseKeeper)
+
+DelegateCoins(ctx context.Context, delegatorAddr, moduleAccAddr sdk.AccAddress, amt sdk.Coins)
+
+error {
+ moduleAcc := k.ak.GetAccount(ctx, moduleAccAddr)
+ if moduleAcc == nil {
+ return errorsmod.Wrapf(sdkerrors.ErrUnknownAddress, "module account %s does not exist", moduleAccAddr)
+}
+ if !amt.IsValid() {
+ return errorsmod.Wrap(sdkerrors.ErrInvalidCoins, amt.String())
+}
+ balances := sdk.NewCoins()
+ for _, coin := range amt {
+ balance := k.GetBalance(ctx, delegatorAddr, coin.GetDenom())
+ if balance.IsLT(coin) {
+ return errorsmod.Wrapf(
+ sdkerrors.ErrInsufficientFunds, "failed to delegate; %s is smaller than %s", balance, amt,
+ )
+}
+
+balances = balances.Add(balance)
+ err := k.setBalance(ctx, delegatorAddr, balance.Sub(coin))
+ if err != nil {
+ return err
+}
+
+}
+ if err := k.trackDelegation(ctx, delegatorAddr, balances, amt); err != nil {
+ return errorsmod.Wrap(err, "failed to track delegation")
+}
+ // emit coin spent event
+ sdkCtx := sdk.UnwrapSDKContext(ctx)
+
+sdkCtx.EventManager().EmitEvent(
+ types.NewCoinSpentEvent(delegatorAddr, amt),
+ )
+ err := k.addCoins(ctx, moduleAccAddr, amt)
+ if err != nil {
+ return err
+}
+
+return nil
+}
+
+// UndelegateCoins performs undelegation by crediting amt coins to an account with
+// address addr. For vesting accounts, undelegation amounts are tracked for both
+// vesting and vested coins. The coins are then transferred from a ModuleAccount
+// address to the delegator address. If any of the undelegation amounts are
+// negative, an error is returned.
+func (k BaseKeeper)
+
+UndelegateCoins(ctx context.Context, moduleAccAddr, delegatorAddr sdk.AccAddress, amt sdk.Coins)
+
+error {
+ moduleAcc := k.ak.GetAccount(ctx, moduleAccAddr)
+ if moduleAcc == nil {
+ return errorsmod.Wrapf(sdkerrors.ErrUnknownAddress, "module account %s does not exist", moduleAccAddr)
+}
+ if !amt.IsValid() {
+ return errorsmod.Wrap(sdkerrors.ErrInvalidCoins, amt.String())
+}
+ err := k.subUnlockedCoins(ctx, moduleAccAddr, amt)
+ if err != nil {
+ return err
+}
+ if err := k.trackUndelegation(ctx, delegatorAddr, amt); err != nil {
+ return errorsmod.Wrap(err, "failed to track undelegation")
+}
+
+err = k.addCoins(ctx, delegatorAddr, amt)
+ if err != nil {
+ return err
+}
+
+return nil
+}
+
+// GetSupply retrieves the Supply from store
+func (k BaseKeeper)
+
+GetSupply(ctx context.Context, denom string)
+
+sdk.Coin {
+ amt, err := k.Supply.Get(ctx, denom)
+ if err != nil {
+ return sdk.NewCoin(denom, math.ZeroInt())
+}
+
+return sdk.NewCoin(denom, amt)
+}
+
+// HasSupply checks if the supply coin exists in store.
+func (k BaseKeeper)
+
+HasSupply(ctx context.Context, denom string)
+
+bool {
+ has, err := k.Supply.Has(ctx, denom)
+
+return has && err == nil
+}
+
+// GetDenomMetaData retrieves the denomination metadata. returns the metadata and true if the denom exists,
+// false otherwise.
+func (k BaseKeeper)
+
+GetDenomMetaData(ctx context.Context, denom string) (types.Metadata, bool) {
+ m, err := k.BaseViewKeeper.DenomMetadata.Get(ctx, denom)
+
+return m, err == nil
+}
+
+// HasDenomMetaData checks if the denomination metadata exists in store.
+func (k BaseKeeper)
+
+HasDenomMetaData(ctx context.Context, denom string)
+
+bool {
+ has, err := k.BaseViewKeeper.DenomMetadata.Has(ctx, denom)
+
+return has && err == nil
+}
+
+// GetAllDenomMetaData retrieves all denominations metadata
+func (k BaseKeeper)
+
+GetAllDenomMetaData(ctx context.Context) []types.Metadata {
+ denomMetaData := make([]types.Metadata, 0)
+
+k.IterateAllDenomMetaData(ctx, func(metadata types.Metadata)
+
+bool {
+ denomMetaData = append(denomMetaData, metadata)
+
+return false
+})
+
+return denomMetaData
+}
+
+// IterateAllDenomMetaData iterates over all the denominations metadata and
+// provides the metadata to a callback. If true is returned from the
+// callback, iteration is halted.
+func (k BaseKeeper)
+
+IterateAllDenomMetaData(ctx context.Context, cb func(types.Metadata)
+
+bool) {
+ err := k.BaseViewKeeper.DenomMetadata.Walk(ctx, nil, func(_ string, metadata types.Metadata) (stop bool, err error) {
+ return cb(metadata), nil
+})
+ if err != nil && !errors.Is(err, collections.ErrInvalidIterator) {
+ panic(err)
+}
+}
+
+// SetDenomMetaData sets the denominations metadata
+func (k BaseKeeper)
+
+SetDenomMetaData(ctx context.Context, denomMetaData types.Metadata) {
+ _ = k.BaseViewKeeper.DenomMetadata.Set(ctx, denomMetaData.Base, denomMetaData)
+}
+
+// SendCoinsFromModuleToAccount transfers coins from a ModuleAccount to an AccAddress.
+// It will panic if the module account does not exist. An error is returned if
+// the recipient address is black-listed or if sending the tokens fails.
+func (k BaseKeeper)
+
+SendCoinsFromModuleToAccount(
+ ctx context.Context, senderModule string, recipientAddr sdk.AccAddress, amt sdk.Coins,
+)
+
+error {
+ senderAddr := k.ak.GetModuleAddress(senderModule)
+ if senderAddr == nil {
+ panic(errorsmod.Wrapf(sdkerrors.ErrUnknownAddress, "module account %s does not exist", senderModule))
+}
+ if k.BlockedAddr(recipientAddr) {
+ return errorsmod.Wrapf(sdkerrors.ErrUnauthorized, "%s is not allowed to receive funds", recipientAddr)
+}
+
+return k.SendCoins(ctx, senderAddr, recipientAddr, amt)
+}
+
+// SendCoinsFromModuleToModule transfers coins from a ModuleAccount to another.
+// It will panic if either module account does not exist.
+func (k BaseKeeper)
+
+SendCoinsFromModuleToModule(
+ ctx context.Context, senderModule, recipientModule string, amt sdk.Coins,
+)
+
+error {
+ senderAddr := k.ak.GetModuleAddress(senderModule)
+ if senderAddr == nil {
+ panic(errorsmod.Wrapf(sdkerrors.ErrUnknownAddress, "module account %s does not exist", senderModule))
+}
+ recipientAcc := k.ak.GetModuleAccount(ctx, recipientModule)
+ if recipientAcc == nil {
+ panic(errorsmod.Wrapf(sdkerrors.ErrUnknownAddress, "module account %s does not exist", recipientModule))
+}
+
+return k.SendCoins(ctx, senderAddr, recipientAcc.GetAddress(), amt)
+}
+
+// SendCoinsFromAccountToModule transfers coins from an AccAddress to a ModuleAccount.
+// It will panic if the module account does not exist.
+func (k BaseKeeper)
+
+SendCoinsFromAccountToModule(
+ ctx context.Context, senderAddr sdk.AccAddress, recipientModule string, amt sdk.Coins,
+)
+
+error {
+ recipientAcc := k.ak.GetModuleAccount(ctx, recipientModule)
+ if recipientAcc == nil {
+ panic(errorsmod.Wrapf(sdkerrors.ErrUnknownAddress, "module account %s does not exist", recipientModule))
+}
+
+return k.SendCoins(ctx, senderAddr, recipientAcc.GetAddress(), amt)
+}
+
+// DelegateCoinsFromAccountToModule delegates coins and transfers them from a
+// delegator account to a module account. It will panic if the module account
+// does not exist or is unauthorized.
+func (k BaseKeeper)
+
+DelegateCoinsFromAccountToModule(
+ ctx context.Context, senderAddr sdk.AccAddress, recipientModule string, amt sdk.Coins,
+)
+
+error {
+ recipientAcc := k.ak.GetModuleAccount(ctx, recipientModule)
+ if recipientAcc == nil {
+ panic(errorsmod.Wrapf(sdkerrors.ErrUnknownAddress, "module account %s does not exist", recipientModule))
+}
+ if !recipientAcc.HasPermission(authtypes.Staking) {
+ panic(errorsmod.Wrapf(sdkerrors.ErrUnauthorized, "module account %s does not have permissions to receive delegated coins", recipientModule))
+}
+
+return k.DelegateCoins(ctx, senderAddr, recipientAcc.GetAddress(), amt)
+}
+
+// UndelegateCoinsFromModuleToAccount undelegates the unbonding coins and transfers
+// them from a module account to the delegator account. It will panic if the
+// module account does not exist or is unauthorized.
+func (k BaseKeeper)
+
+UndelegateCoinsFromModuleToAccount(
+ ctx context.Context, senderModule string, recipientAddr sdk.AccAddress, amt sdk.Coins,
+)
+
+error {
+ acc := k.ak.GetModuleAccount(ctx, senderModule)
+ if acc == nil {
+ panic(errorsmod.Wrapf(sdkerrors.ErrUnknownAddress, "module account %s does not exist", senderModule))
+}
+ if !acc.HasPermission(authtypes.Staking) {
+ panic(errorsmod.Wrapf(sdkerrors.ErrUnauthorized, "module account %s does not have permissions to undelegate coins", senderModule))
+}
+
+return k.UndelegateCoins(ctx, acc.GetAddress(), recipientAddr, amt)
+}
+
+// MintCoins creates new coins from thin air and adds it to the module account.
+// It will panic if the module account does not exist or is unauthorized.
+func (k BaseKeeper)
+
+MintCoins(ctx context.Context, moduleName string, amounts sdk.Coins)
+
+error {
+ sdkCtx := sdk.UnwrapSDKContext(ctx)
+ err := k.mintCoinsRestrictionFn(ctx, amounts)
+ if err != nil {
+ k.logger.Error(fmt.Sprintf("Module %q attempted to mint coins %s it doesn't have permission for, error %v", moduleName, amounts, err))
+
+return err
+}
+ acc := k.ak.GetModuleAccount(ctx, moduleName)
+ if acc == nil {
+ panic(errorsmod.Wrapf(sdkerrors.ErrUnknownAddress, "module account %s does not exist", moduleName))
+}
+ if !acc.HasPermission(authtypes.Minter) {
+ panic(errorsmod.Wrapf(sdkerrors.ErrUnauthorized, "module account %s does not have permissions to mint tokens", moduleName))
+}
+
+err = k.addCoins(ctx, acc.GetAddress(), amounts)
+ if err != nil {
+ return err
+}
+ for _, amount := range amounts {
+ supply := k.GetSupply(ctx, amount.GetDenom())
+
+supply = supply.Add(amount)
+
+k.setSupply(ctx, supply)
+}
+
+k.logger.Debug("minted coins from module account", "amount", amounts.String(), "from", moduleName)
+
+ // emit mint event
+ sdkCtx.EventManager().EmitEvent(
+ types.NewCoinMintEvent(acc.GetAddress(), amounts),
+ )
+
+return nil
+}
+
+// BurnCoins burns coins deletes coins from the balance of the module account.
+// It will panic if the module account does not exist or is unauthorized.
+func (k BaseKeeper)
+
+BurnCoins(ctx context.Context, moduleName string, amounts sdk.Coins)
+
+error {
+ acc := k.ak.GetModuleAccount(ctx, moduleName)
+ if acc == nil {
+ panic(errorsmod.Wrapf(sdkerrors.ErrUnknownAddress, "module account %s does not exist", moduleName))
+}
+ if !acc.HasPermission(authtypes.Burner) {
+ panic(errorsmod.Wrapf(sdkerrors.ErrUnauthorized, "module account %s does not have permissions to burn tokens", moduleName))
+}
+ err := k.subUnlockedCoins(ctx, acc.GetAddress(), amounts)
+ if err != nil {
+ return err
+}
+ for _, amount := range amounts {
+ supply := k.GetSupply(ctx, amount.GetDenom())
+
+supply = supply.Sub(amount)
+
+k.setSupply(ctx, supply)
+}
+
+k.logger.Debug("burned tokens from module account", "amount", amounts.String(), "from", moduleName)
+
+ // emit burn event
+ sdkCtx := sdk.UnwrapSDKContext(ctx)
+
+sdkCtx.EventManager().EmitEvent(
+ types.NewCoinBurnEvent(acc.GetAddress(), amounts),
+ )
+
+return nil
+}
+
+// setSupply sets the supply for the given coin
+func (k BaseKeeper)
+
+setSupply(ctx context.Context, coin sdk.Coin) {
+ // Bank invariants and IBC requires to remove zero coins.
+ if coin.IsZero() {
+ _ = k.Supply.Remove(ctx, coin.Denom)
+}
+
+else {
+ _ = k.Supply.Set(ctx, coin.Denom, coin.Amount)
+}
+}
+
+// trackDelegation tracks the delegation of the given account if it is a vesting account
+func (k BaseKeeper)
+
+trackDelegation(ctx context.Context, addr sdk.AccAddress, balance, amt sdk.Coins)
+
+error {
+ acc := k.ak.GetAccount(ctx, addr)
+ if acc == nil {
+ return errorsmod.Wrapf(sdkerrors.ErrUnknownAddress, "account %s does not exist", addr)
+}
+
+vacc, ok := acc.(types.VestingAccount)
+ if ok {
+ // TODO: return error on account.TrackDelegation
+ sdkCtx := sdk.UnwrapSDKContext(ctx)
+
+vacc.TrackDelegation(sdkCtx.BlockHeader().Time, balance, amt)
+
+k.ak.SetAccount(ctx, acc)
+}
+
+return nil
+}
+
+// trackUndelegation trakcs undelegation of the given account if it is a vesting account
+func (k BaseKeeper)
+
+trackUndelegation(ctx context.Context, addr sdk.AccAddress, amt sdk.Coins)
+
+error {
+ acc := k.ak.GetAccount(ctx, addr)
+ if acc == nil {
+ return errorsmod.Wrapf(sdkerrors.ErrUnknownAddress, "account %s does not exist", addr)
+}
+
+vacc, ok := acc.(types.VestingAccount)
+ if ok {
+ // TODO: return error on account.TrackUndelegation
+ vacc.TrackUndelegation(amt)
+
+k.ak.SetAccount(ctx, acc)
+}
+
+return nil
+}
+
+// IterateTotalSupply iterates over the total supply calling the given cb (callback)
+
+function
+// with the balance of each coin.
+// The iteration stops if the callback returns true.
+func (k BaseViewKeeper)
+
+IterateTotalSupply(ctx context.Context, cb func(sdk.Coin)
+
+bool) {
+ err := k.Supply.Walk(ctx, nil, func(s string, m math.Int) (bool, error) {
+ return cb(sdk.NewCoin(s, m)), nil
+})
+ if err != nil && !errors.Is(err, collections.ErrInvalidIterator) {
+ panic(err)
+}
+}
+```
+
+Regardless if an error is wrapped or not, the Cosmos SDK's `errors` package provides a function to determine if
+an error is of a particular kind via `Is`.
+
+## ABCI
+
+If a module error is registered, the Cosmos SDK `errors` package allows ABCI information to be extracted
+through the `ABCIInfo` function. The package also provides `ResponseCheckTx` and `ResponseDeliverTx` as
+auxiliary functions to automatically get `CheckTx` and `DeliverTx` responses from an error.
diff --git a/sdk/next/build/building-modules/genesis.mdx b/sdk/next/build/building-modules/genesis.mdx
new file mode 100644
index 000000000..893034197
--- /dev/null
+++ b/sdk/next/build/building-modules/genesis.mdx
@@ -0,0 +1,768 @@
+---
+title: Module Genesis
+---
+{/* force preview rebuild: 2025-09-30 */}
+
+
+**Synopsis**
+Modules generally handle a subset of the state and, as such, they need to define the related subset of the genesis file as well as methods to initialize, verify and export it.
+
+
+
+**Prerequisite Readings**
+
+* [Module Manager](/sdk/v0.53/build/building-modules/module-manager)
+* [Keepers](/sdk/v0.53/build/building-modules/keeper)
+
+
+
+## Type Definition
+
+The subset of the genesis state defined from a given module is generally defined in a `genesis.proto` file ([more info](/sdk/v0.53/learn/advanced/encoding#gogoproto) on how to define protobuf messages). The struct defining the module's subset of the genesis state is usually called `GenesisState` and contains all the module-related values that need to be initialized during the genesis process.
+
+See an example of `GenesisState` protobuf message definition from the `auth` module:
+
+```protobuf
+// Reference: https://github.com/cosmos/cosmos-sdk/blob/v0.50.0-alpha.0/proto/cosmos/auth/v1beta1/genesis.proto
+```
+
+Next we present the main genesis-related methods that need to be implemented by module developers in order for their module to be used in Cosmos SDK applications.
+
+### `DefaultGenesis`
+
+The `DefaultGenesis()` method is a simple method that calls the constructor function for `GenesisState` with the default value for each parameter. See an example from the `auth` module:
+
+```go expandable
+package auth
+
+import (
+
+ "context"
+ "encoding/json"
+ "fmt"
+
+ abci "github.com/cometbft/cometbft/abci/types"
+ gwruntime "github.com/grpc-ecosystem/grpc-gateway/runtime"
+ "github.com/spf13/cobra"
+ "cosmossdk.io/depinject"
+
+ authcodec "github.com/cosmos/cosmos-sdk/x/auth/codec"
+ "cosmossdk.io/core/address"
+ "cosmossdk.io/core/appmodule"
+
+ modulev1 "cosmossdk.io/api/cosmos/auth/module/v1"
+ "cosmossdk.io/core/store"
+ "github.com/cosmos/cosmos-sdk/client"
+ "github.com/cosmos/cosmos-sdk/codec"
+ codectypes "github.com/cosmos/cosmos-sdk/codec/types"
+ sdk "github.com/cosmos/cosmos-sdk/types"
+ "github.com/cosmos/cosmos-sdk/types/module"
+ simtypes "github.com/cosmos/cosmos-sdk/types/simulation"
+ "github.com/cosmos/cosmos-sdk/x/auth/client/cli"
+ "github.com/cosmos/cosmos-sdk/x/auth/exported"
+ "github.com/cosmos/cosmos-sdk/x/auth/keeper"
+ "github.com/cosmos/cosmos-sdk/x/auth/simulation"
+ "github.com/cosmos/cosmos-sdk/x/auth/types"
+)
+
+// ConsensusVersion defines the current x/auth module consensus version.
+const (
+ ConsensusVersion = 5
+ GovModuleName = "gov"
+)
+
+var (
+ _ module.AppModule = AppModule{
+}
+ _ module.AppModuleBasic = AppModuleBasic{
+}
+ _ module.AppModuleSimulation = AppModule{
+}
+)
+
+// AppModuleBasic defines the basic application module used by the auth module.
+type AppModuleBasic struct {
+ ac address.Codec
+}
+
+// Name returns the auth module's name.
+func (AppModuleBasic)
+
+Name()
+
+string {
+ return types.ModuleName
+}
+
+// RegisterLegacyAminoCodec registers the auth module's types for the given codec.
+func (AppModuleBasic)
+
+RegisterLegacyAminoCodec(cdc *codec.LegacyAmino) {
+ types.RegisterLegacyAminoCodec(cdc)
+}
+
+// DefaultGenesis returns default genesis state as raw bytes for the auth
+// module.
+func (AppModuleBasic)
+
+DefaultGenesis(cdc codec.JSONCodec)
+
+json.RawMessage {
+ return cdc.MustMarshalJSON(types.DefaultGenesisState())
+}
+
+// ValidateGenesis performs genesis state validation for the auth module.
+func (AppModuleBasic)
+
+ValidateGenesis(cdc codec.JSONCodec, config client.TxEncodingConfig, bz json.RawMessage)
+
+error {
+ var data types.GenesisState
+ if err := cdc.UnmarshalJSON(bz, &data); err != nil {
+ return fmt.Errorf("failed to unmarshal %s genesis state: %w", types.ModuleName, err)
+}
+
+return types.ValidateGenesis(data)
+}
+
+// RegisterGRPCGatewayRoutes registers the gRPC Gateway routes for the auth module.
+func (AppModuleBasic)
+
+RegisterGRPCGatewayRoutes(clientCtx client.Context, mux *gwruntime.ServeMux) {
+ if err := types.RegisterQueryHandlerClient(context.Background(), mux, types.NewQueryClient(clientCtx)); err != nil {
+ panic(err)
+}
+}
+
+// GetTxCmd returns the root tx command for the auth module.
+func (AppModuleBasic)
+
+GetTxCmd() *cobra.Command {
+ return nil
+}
+
+// GetQueryCmd returns the root query command for the auth module.
+func (ab AppModuleBasic)
+
+GetQueryCmd() *cobra.Command {
+ return cli.GetQueryCmd(ab.ac)
+}
+
+// RegisterInterfaces registers interfaces and implementations of the auth module.
+func (AppModuleBasic)
+
+RegisterInterfaces(registry codectypes.InterfaceRegistry) {
+ types.RegisterInterfaces(registry)
+}
+
+// AppModule implements an application module for the auth module.
+type AppModule struct {
+ AppModuleBasic
+
+ accountKeeper keeper.AccountKeeper
+ randGenAccountsFn types.RandomGenesisAccountsFn
+
+ // legacySubspace is used solely for migration of x/params managed parameters
+ legacySubspace exported.Subspace
+}
+
+var _ appmodule.AppModule = AppModule{
+}
+
+// IsOnePerModuleType implements the depinject.OnePerModuleType interface.
+func (am AppModule)
+
+IsOnePerModuleType() {
+}
+
+// IsAppModule implements the appmodule.AppModule interface.
+func (am AppModule)
+
+IsAppModule() {
+}
+
+// NewAppModule creates a new AppModule object
+func NewAppModule(cdc codec.Codec, accountKeeper keeper.AccountKeeper, randGenAccountsFn types.RandomGenesisAccountsFn, ss exported.Subspace)
+
+AppModule {
+ return AppModule{
+ AppModuleBasic: AppModuleBasic{
+ ac: accountKeeper.AddressCodec()
+},
+ accountKeeper: accountKeeper,
+ randGenAccountsFn: randGenAccountsFn,
+ legacySubspace: ss,
+}
+}
+
+// Name returns the auth module's name.
+func (AppModule)
+
+Name()
+
+string {
+ return types.ModuleName
+}
+
+// RegisterServices registers a GRPC query service to respond to the
+// module-specific GRPC queries.
+func (am AppModule)
+
+RegisterServices(cfg module.Configurator) {
+ types.RegisterMsgServer(cfg.MsgServer(), keeper.NewMsgServerImpl(am.accountKeeper))
+
+types.RegisterQueryServer(cfg.QueryServer(), keeper.NewQueryServer(am.accountKeeper))
+ m := keeper.NewMigrator(am.accountKeeper, cfg.QueryServer(), am.legacySubspace)
+ if err := cfg.RegisterMigration(types.ModuleName, 1, m.Migrate1to2); err != nil {
+ panic(fmt.Sprintf("failed to migrate x/%s from version 1 to 2: %v", types.ModuleName, err))
+}
+ if err := cfg.RegisterMigration(types.ModuleName, 2, m.Migrate2to3); err != nil {
+ panic(fmt.Sprintf("failed to migrate x/%s from version 2 to 3: %v", types.ModuleName, err))
+}
+ if err := cfg.RegisterMigration(types.ModuleName, 3, m.Migrate3to4); err != nil {
+ panic(fmt.Sprintf("failed to migrate x/%s from version 3 to 4: %v", types.ModuleName, err))
+}
+ if err := cfg.RegisterMigration(types.ModuleName, 4, m.Migrate4To5); err != nil {
+ panic(fmt.Sprintf("failed to migrate x/%s from version 4 to 5", types.ModuleName))
+}
+}
+
+// InitGenesis performs genesis initialization for the auth module. It returns
+// no validator updates.
+func (am AppModule)
+
+InitGenesis(ctx sdk.Context, cdc codec.JSONCodec, data json.RawMessage) []abci.ValidatorUpdate {
+ var genesisState types.GenesisState
+ cdc.MustUnmarshalJSON(data, &genesisState)
+
+am.accountKeeper.InitGenesis(ctx, genesisState)
+
+return []abci.ValidatorUpdate{
+}
+}
+
+// ExportGenesis returns the exported genesis state as raw bytes for the auth
+// module.
+func (am AppModule)
+
+ExportGenesis(ctx sdk.Context, cdc codec.JSONCodec)
+
+json.RawMessage {
+ gs := am.accountKeeper.ExportGenesis(ctx)
+
+return cdc.MustMarshalJSON(gs)
+}
+
+// ConsensusVersion implements AppModule/ConsensusVersion.
+func (AppModule)
+
+ConsensusVersion()
+
+uint64 {
+ return ConsensusVersion
+}
+
+// AppModuleSimulation functions
+
+// GenerateGenesisState creates a randomized GenState of the auth module
+func (am AppModule)
+
+GenerateGenesisState(simState *module.SimulationState) {
+ simulation.RandomizedGenState(simState, am.randGenAccountsFn)
+}
+
+// ProposalMsgs returns msgs used for governance proposals for simulations.
+func (AppModule)
+
+ProposalMsgs(simState module.SimulationState) []simtypes.WeightedProposalMsg {
+ return simulation.ProposalMsgs()
+}
+
+// RegisterStoreDecoder registers a decoder for auth module's types
+func (am AppModule)
+
+RegisterStoreDecoder(sdr simtypes.StoreDecoderRegistry) {
+ sdr[types.StoreKey] = simtypes.NewStoreDecoderFuncFromCollectionsSchema(am.accountKeeper.Schema)
+}
+
+// WeightedOperations doesn't return any auth module operation.
+func (AppModule)
+
+WeightedOperations(_ module.SimulationState) []simtypes.WeightedOperation {
+ return nil
+}
+
+//
+// App Wiring Setup
+//
+
+func init() {
+ appmodule.Register(&modulev1.Module{
+},
+ appmodule.Provide(ProvideAddressCodec),
+ appmodule.Provide(ProvideModule),
+ )
+}
+
+// ProvideAddressCodec provides an address.Codec to the container for any
+// modules that want to do address string <> bytes conversion.
+func ProvideAddressCodec(config *modulev1.Module)
+
+address.Codec {
+ return authcodec.NewBech32Codec(config.Bech32Prefix)
+}
+
+type ModuleInputs struct {
+ depinject.In
+
+ Config *modulev1.Module
+ StoreService store.KVStoreService
+ Cdc codec.Codec
+
+ RandomGenesisAccountsFn types.RandomGenesisAccountsFn `optional:"true"`
+ AccountI func()
+
+sdk.AccountI `optional:"true"`
+
+ // LegacySubspace is used solely for migration of x/params managed parameters
+ LegacySubspace exported.Subspace `optional:"true"`
+}
+
+type ModuleOutputs struct {
+ depinject.Out
+
+ AccountKeeper keeper.AccountKeeper
+ Module appmodule.AppModule
+}
+
+func ProvideModule(in ModuleInputs)
+
+ModuleOutputs {
+ maccPerms := map[string][]string{
+}
+ for _, permission := range in.Config.ModuleAccountPermissions {
+ maccPerms[permission.Account] = permission.Permissions
+}
+
+ // default to governance authority if not provided
+ authority := types.NewModuleAddress(GovModuleName)
+ if in.Config.Authority != "" {
+ authority = types.NewModuleAddressOrBech32Address(in.Config.Authority)
+}
+ if in.RandomGenesisAccountsFn == nil {
+ in.RandomGenesisAccountsFn = simulation.RandomGenesisAccounts
+}
+ if in.AccountI == nil {
+ in.AccountI = types.ProtoBaseAccount
+}
+ k := keeper.NewAccountKeeper(in.Cdc, in.StoreService, in.AccountI, maccPerms, in.Config.Bech32Prefix, authority.String())
+ m := NewAppModule(in.Cdc, k, in.RandomGenesisAccountsFn, in.LegacySubspace)
+
+return ModuleOutputs{
+ AccountKeeper: k,
+ Module: m
+}
+}
+```
+
+### `ValidateGenesis`
+
+The `ValidateGenesis(data GenesisState)` method is called to verify that the provided `genesisState` is correct. It should perform validity checks on each of the parameters listed in `GenesisState`. See an example from the `auth` module:
+
+```go expandable
+package types
+
+import (
+
+ "encoding/json"
+ "fmt"
+ "sort"
+
+ proto "github.com/cosmos/gogoproto/proto"
+ "github.com/cosmos/cosmos-sdk/codec"
+ "github.com/cosmos/cosmos-sdk/codec/types"
+ sdk "github.com/cosmos/cosmos-sdk/types"
+ "github.com/cosmos/cosmos-sdk/types/module"
+)
+
+var _ types.UnpackInterfacesMessage = GenesisState{
+}
+
+// RandomGenesisAccountsFn defines the function required to generate custom account types
+type RandomGenesisAccountsFn func(simState *module.SimulationState)
+
+GenesisAccounts
+
+// NewGenesisState - Create a new genesis state
+func NewGenesisState(params Params, accounts GenesisAccounts) *GenesisState {
+ genAccounts, err := PackAccounts(accounts)
+ if err != nil {
+ panic(err)
+}
+
+return &GenesisState{
+ Params: params,
+ Accounts: genAccounts,
+}
+}
+
+// UnpackInterfaces implements UnpackInterfacesMessage.UnpackInterfaces
+func (g GenesisState)
+
+UnpackInterfaces(unpacker types.AnyUnpacker)
+
+error {
+ for _, any := range g.Accounts {
+ var account GenesisAccount
+ err := unpacker.UnpackAny(any, &account)
+ if err != nil {
+ return err
+}
+
+}
+
+return nil
+}
+
+// DefaultGenesisState - Return a default genesis state
+func DefaultGenesisState() *GenesisState {
+ return NewGenesisState(DefaultParams(), GenesisAccounts{
+})
+}
+
+// GetGenesisStateFromAppState returns x/auth GenesisState given raw application
+// genesis state.
+func GetGenesisStateFromAppState(cdc codec.Codec, appState map[string]json.RawMessage)
+
+GenesisState {
+ var genesisState GenesisState
+ if appState[ModuleName] != nil {
+ cdc.MustUnmarshalJSON(appState[ModuleName], &genesisState)
+}
+
+return genesisState
+}
+
+// ValidateGenesis performs basic validation of auth genesis data returning an
+// error for any failed validation criteria.
+func ValidateGenesis(data GenesisState)
+
+error {
+ if err := data.Params.Validate(); err != nil {
+ return err
+}
+
+genAccs, err := UnpackAccounts(data.Accounts)
+ if err != nil {
+ return err
+}
+
+return ValidateGenAccounts(genAccs)
+}
+
+// SanitizeGenesisAccounts sorts accounts and coin sets.
+func SanitizeGenesisAccounts(genAccs GenesisAccounts)
+
+GenesisAccounts {
+ // Make sure there aren't any duplicated account numbers by fixing the duplicates with the lowest unused values.
+ // seenAccNum = easy lookup for used account numbers.
+ seenAccNum := map[uint64]bool{
+}
+ // dupAccNum = a map of account number to accounts with duplicate account numbers (excluding the 1st one seen).
+ dupAccNum := map[uint64]GenesisAccounts{
+}
+ for _, acc := range genAccs {
+ num := acc.GetAccountNumber()
+ if !seenAccNum[num] {
+ seenAccNum[num] = true
+}
+
+else {
+ dupAccNum[num] = append(dupAccNum[num], acc)
+}
+
+}
+
+ // dupAccNums a sorted list of the account numbers with duplicates.
+ var dupAccNums []uint64
+ for num := range dupAccNum {
+ dupAccNums = append(dupAccNums, num)
+}
+
+sort.Slice(dupAccNums, func(i, j int)
+
+bool {
+ return dupAccNums[i] < dupAccNums[j]
+})
+
+ // Change the account number of the duplicated ones to the first unused value.
+ globalNum := uint64(0)
+ for _, dupNum := range dupAccNums {
+ accs := dupAccNum[dupNum]
+ for _, acc := range accs {
+ for seenAccNum[globalNum] {
+ globalNum++
+}
+ if err := acc.SetAccountNumber(globalNum); err != nil {
+ panic(err)
+}
+
+seenAccNum[globalNum] = true
+}
+
+}
+
+ // Then sort them all by account number.
+ sort.Slice(genAccs, func(i, j int)
+
+bool {
+ return genAccs[i].GetAccountNumber() < genAccs[j].GetAccountNumber()
+})
+
+return genAccs
+}
+
+// ValidateGenAccounts validates an array of GenesisAccounts and checks for duplicates
+func ValidateGenAccounts(accounts GenesisAccounts)
+
+error {
+ addrMap := make(map[string]bool, len(accounts))
+ for _, acc := range accounts {
+ // check for duplicated accounts
+ addrStr := acc.GetAddress().String()
+ if _, ok := addrMap[addrStr]; ok {
+ return fmt.Errorf("duplicate account found in genesis state; address: %s", addrStr)
+}
+
+addrMap[addrStr] = true
+
+ // check account specific validation
+ if err := acc.Validate(); err != nil {
+ return fmt.Errorf("invalid account found in genesis state; address: %s, error: %s", addrStr, err.Error())
+}
+
+}
+
+return nil
+}
+
+// GenesisAccountIterator implements genesis account iteration.
+type GenesisAccountIterator struct{
+}
+
+// IterateGenesisAccounts iterates over all the genesis accounts found in
+// appGenesis and invokes a callback on each genesis account. If any call
+// returns true, iteration stops.
+func (GenesisAccountIterator)
+
+IterateGenesisAccounts(
+ cdc codec.Codec, appGenesis map[string]json.RawMessage, cb func(sdk.AccountI) (stop bool),
+) {
+ for _, genAcc := range GetGenesisStateFromAppState(cdc, appGenesis).Accounts {
+ acc, ok := genAcc.GetCachedValue().(sdk.AccountI)
+ if !ok {
+ panic("expected account")
+}
+ if cb(acc) {
+ break
+}
+
+}
+}
+
+// PackAccounts converts GenesisAccounts to Any slice
+func PackAccounts(accounts GenesisAccounts) ([]*types.Any, error) {
+ accountsAny := make([]*types.Any, len(accounts))
+ for i, acc := range accounts {
+ msg, ok := acc.(proto.Message)
+ if !ok {
+ return nil, fmt.Errorf("cannot proto marshal %T", acc)
+}
+
+any, err := types.NewAnyWithValue(msg)
+ if err != nil {
+ return nil, err
+}
+
+accountsAny[i] = any
+}
+
+return accountsAny, nil
+}
+
+// UnpackAccounts converts Any slice to GenesisAccounts
+func UnpackAccounts(accountsAny []*types.Any) (GenesisAccounts, error) {
+ accounts := make(GenesisAccounts, len(accountsAny))
+ for i, any := range accountsAny {
+ acc, ok := any.GetCachedValue().(GenesisAccount)
+ if !ok {
+ return nil, fmt.Errorf("expected genesis account")
+}
+
+accounts[i] = acc
+}
+
+return accounts, nil
+}
+```
+
+## Other Genesis Methods
+
+Other than the methods related directly to `GenesisState`, module developers are expected to implement two other methods as part of the [`AppModuleGenesis` interface](/sdk/v0.53/build/building-modules/module-manager#appmodulegenesis) (only if the module needs to initialize a subset of state in genesis). These methods are [`InitGenesis`](#initgenesis) and [`ExportGenesis`](#exportgenesis).
+
+### `InitGenesis`
+
+The `InitGenesis` method is executed during [`InitChain`](/sdk/v0.53/learn/advanced/baseapp#initchain) when the application is first started. Given a `GenesisState`, it initializes the subset of the state managed by the module by using the module's [`keeper`](/sdk/v0.53/build/building-modules/keeper) setter function on each parameter within the `GenesisState`.
+
+The [module manager](/sdk/v0.53/build/building-modules/module-manager) of the application is responsible for calling the `InitGenesis` method of each of the application's modules in order. This order is set by the application developer via the manager's `SetOrderGenesisMethod`, which is called in the [application's constructor function](/sdk/v0.53/learn/beginner/app-anatomy#constructor-function).
+
+See an example of `InitGenesis` from the `auth` module:
+
+```go expandable
+package keeper
+
+import (
+
+ sdk "github.com/cosmos/cosmos-sdk/types"
+ "github.com/cosmos/cosmos-sdk/x/auth/types"
+)
+
+// InitGenesis - Init store state from genesis data
+//
+// CONTRACT: old coins from the FeeCollectionKeeper need to be transferred through
+// a genesis port script to the new fee collector account
+func (ak AccountKeeper)
+
+InitGenesis(ctx sdk.Context, data types.GenesisState) {
+ if err := ak.Params.Set(ctx, data.Params); err != nil {
+ panic(err)
+}
+
+accounts, err := types.UnpackAccounts(data.Accounts)
+ if err != nil {
+ panic(err)
+}
+
+accounts = types.SanitizeGenesisAccounts(accounts)
+
+ // Set the accounts and make sure the global account number matches the largest account number (even if zero).
+ var lastAccNum *uint64
+ for _, acc := range accounts {
+ accNum := acc.GetAccountNumber()
+ for lastAccNum == nil || *lastAccNum < accNum {
+ n := ak.NextAccountNumber(ctx)
+
+lastAccNum = &n
+}
+
+ak.SetAccount(ctx, acc)
+}
+
+ak.GetModuleAccount(ctx, types.FeeCollectorName)
+}
+
+// ExportGenesis returns a GenesisState for a given context and keeper
+func (ak AccountKeeper)
+
+ExportGenesis(ctx sdk.Context) *types.GenesisState {
+ params := ak.GetParams(ctx)
+
+var genAccounts types.GenesisAccounts
+ ak.IterateAccounts(ctx, func(account sdk.AccountI)
+
+bool {
+ genAccount := account.(types.GenesisAccount)
+
+genAccounts = append(genAccounts, genAccount)
+
+return false
+})
+
+return types.NewGenesisState(params, genAccounts)
+}
+```
+
+### `ExportGenesis`
+
+The `ExportGenesis` method is executed whenever an export of the state is made. It takes the latest known version of the subset of the state managed by the module and creates a new `GenesisState` out of it. This is mainly used when the chain needs to be upgraded via a hard fork.
+
+See an example of `ExportGenesis` from the `auth` module.
+
+```go expandable
+package keeper
+
+import (
+
+ sdk "github.com/cosmos/cosmos-sdk/types"
+ "github.com/cosmos/cosmos-sdk/x/auth/types"
+)
+
+// InitGenesis - Init store state from genesis data
+//
+// CONTRACT: old coins from the FeeCollectionKeeper need to be transferred through
+// a genesis port script to the new fee collector account
+func (ak AccountKeeper)
+
+InitGenesis(ctx sdk.Context, data types.GenesisState) {
+ if err := ak.Params.Set(ctx, data.Params); err != nil {
+ panic(err)
+}
+
+accounts, err := types.UnpackAccounts(data.Accounts)
+ if err != nil {
+ panic(err)
+}
+
+accounts = types.SanitizeGenesisAccounts(accounts)
+
+ // Set the accounts and make sure the global account number matches the largest account number (even if zero).
+ var lastAccNum *uint64
+ for _, acc := range accounts {
+ accNum := acc.GetAccountNumber()
+ for lastAccNum == nil || *lastAccNum < accNum {
+ n := ak.NextAccountNumber(ctx)
+
+lastAccNum = &n
+}
+
+ak.SetAccount(ctx, acc)
+}
+
+ak.GetModuleAccount(ctx, types.FeeCollectorName)
+}
+
+// ExportGenesis returns a GenesisState for a given context and keeper
+func (ak AccountKeeper)
+
+ExportGenesis(ctx sdk.Context) *types.GenesisState {
+ params := ak.GetParams(ctx)
+
+var genAccounts types.GenesisAccounts
+ ak.IterateAccounts(ctx, func(account sdk.AccountI)
+
+bool {
+ genAccount := account.(types.GenesisAccount)
+
+genAccounts = append(genAccounts, genAccount)
+
+return false
+})
+
+return types.NewGenesisState(params, genAccounts)
+}
+```
+
+### GenesisTxHandler
+
+`GenesisTxHandler` is a way for modules to submit state transitions prior to the first block. This is used by `x/genutil` to submit the genesis transactions for the validators to be added to staking.
+
+```go
+package genesis
+
+// TxHandler is an interface that modules can implement to provide genesis state transitions
+type TxHandler interface {
+ ExecuteGenesisTx([]byte)
+
+error
+}
+```
diff --git a/sdk/next/build/building-modules/intro.mdx b/sdk/next/build/building-modules/intro.mdx
new file mode 100644
index 000000000..074248f28
--- /dev/null
+++ b/sdk/next/build/building-modules/intro.mdx
@@ -0,0 +1,304 @@
+---
+title: Introduction to Cosmos SDK Modules
+---
+
+
+**Synopsis**
+Modules define most of the logic of Cosmos SDK applications. Developers compose modules together using the Cosmos SDK to build their custom application-specific blockchains. This document outlines the basic concepts behind SDK modules and how to approach module management.
+
+
+
+**Prerequisite Readings**
+
+* [Anatomy of a Cosmos SDK application](/sdk/v0.53/learn/beginner/app-anatomy)
+* [Lifecycle of a Cosmos SDK transaction](/sdk/v0.53/learn/beginner/tx-lifecycle)
+
+
+
+## Role of Modules in a Cosmos SDK Application
+
+The Cosmos SDK can be thought of as the Ruby-on-Rails of blockchain development. It comes with a core that provides the basic functionalities every blockchain application needs, like a [boilerplate implementation of the ABCI](/sdk/v0.53/learn/advanced/baseapp) to communicate with the underlying consensus engine, a [`multistore`](/sdk/v0.53/learn/advanced/store#multistore) to persist state, a [server](/sdk/v0.53/learn/advanced/node) to form a full-node and [interfaces](/sdk/v0.53/build/building-modules/module-interfaces) to handle queries.
+
+On top of this core, the Cosmos SDK enables developers to build modules that implement the business logic of their application. In other words, SDK modules implement the bulk of the logic of applications, while the core does the wiring and enables modules to be composed together. The end goal is to build a robust ecosystem of open-source Cosmos SDK modules, making it increasingly easier to build complex blockchain applications.
+
+Cosmos SDK modules can be seen as little state-machines within the state-machine. They generally define a subset of the state using one or more `KVStore`s in the [main multistore](/sdk/v0.53/learn/advanced/store), as well as a subset of [message types](/sdk/v0.53/build/building-modules/messages-and-queries#messages). These messages are routed by one of the main components of Cosmos SDK core, [`BaseApp`](/sdk/v0.53/learn/advanced/baseapp), to a module Protobuf [`Msg` service](/sdk/v0.53/build/building-modules/msg-services) that defines them.
+
+```mermaid expandable
+flowchart TD
+ A[Transaction relayed from the full-node's consensus engine to the node's application via DeliverTx]
+ A --> B[APPLICATION]
+ B --> C["Using baseapp's methods: Decode the Tx, extract and route the message(s)"]
+ C --> D[Message routed to the correct module to be processed]
+ D --> E[AUTH MODULE]
+ D --> F[BANK MODULE]
+ D --> G[STAKING MODULE]
+ D --> H[GOV MODULE]
+ H --> I[Handles message, Updates state]
+ E --> I
+ F --> I
+ G --> I
+ I --> J["Return result to the underlying consensus engine (e.g. CometBFT) (0=Ok, 1=Err)"]
+```
+
+As a result of this architecture, building a Cosmos SDK application usually revolves around writing modules to implement the specialized logic of the application and composing them with existing modules to complete the application. Developers will generally work on modules that implement logic needed for their specific use case that do not exist yet, and will use existing modules for more generic functionalities like staking, accounts, or token management.
+
+### Modules as super-users
+
+Modules have the ability to perform actions that are not available to regular users. This is because modules are given sudo permissions by the state machine. Modules can reject another modules desire to execute a function but this logic must be explicit. Examples of this can be seen when modules create functions to modify parameters:
+
+```go expandable
+package keeper
+
+import (
+
+ "context"
+ "github.com/hashicorp/go-metrics"
+
+ errorsmod "cosmossdk.io/errors"
+ "cosmossdk.io/x/bank/types"
+ "github.com/cosmos/cosmos-sdk/telemetry"
+ sdk "github.com/cosmos/cosmos-sdk/types"
+ sdkerrors "github.com/cosmos/cosmos-sdk/types/errors"
+)
+
+type msgServer struct {
+ Keeper
+}
+
+var _ types.MsgServer = msgServer{
+}
+
+// NewMsgServerImpl returns an implementation of the bank MsgServer interface
+// for the provided Keeper.
+func NewMsgServerImpl(keeper Keeper)
+
+types.MsgServer {
+ return &msgServer{
+ Keeper: keeper
+}
+}
+
+func (k msgServer)
+
+Send(ctx context.Context, msg *types.MsgSend) (*types.MsgSendResponse, error) {
+ var (
+ from, to []byte
+ err error
+ )
+ if base, ok := k.Keeper.(BaseKeeper); ok {
+ from, err = base.ak.AddressCodec().StringToBytes(msg.FromAddress)
+ if err != nil {
+ return nil, sdkerrors.ErrInvalidAddress.Wrapf("invalid from address: %s", err)
+}
+
+to, err = base.ak.AddressCodec().StringToBytes(msg.ToAddress)
+ if err != nil {
+ return nil, sdkerrors.ErrInvalidAddress.Wrapf("invalid to address: %s", err)
+}
+
+}
+
+else {
+ return nil, sdkerrors.ErrInvalidRequest.Wrapf("invalid keeper type: %T", k.Keeper)
+}
+ if !msg.Amount.IsValid() {
+ return nil, errorsmod.Wrap(sdkerrors.ErrInvalidCoins, msg.Amount.String())
+}
+ if !msg.Amount.IsAllPositive() {
+ return nil, errorsmod.Wrap(sdkerrors.ErrInvalidCoins, msg.Amount.String())
+}
+ if err := k.IsSendEnabledCoins(ctx, msg.Amount...); err != nil {
+ return nil, err
+}
+ if k.BlockedAddr(to) {
+ return nil, errorsmod.Wrapf(sdkerrors.ErrUnauthorized, "%s is not allowed to receive funds", msg.ToAddress)
+}
+
+err = k.SendCoins(ctx, from, to, msg.Amount)
+ if err != nil {
+ return nil, err
+}
+
+defer func() {
+ for _, a := range msg.Amount {
+ if a.Amount.IsInt64() {
+ telemetry.SetGaugeWithLabels(
+ []string{"tx", "msg", "send"
+},
+ float32(a.Amount.Int64()),
+ []metrics.Label{
+ telemetry.NewLabel("denom", a.Denom)
+},
+ )
+}
+
+}
+
+}()
+
+return &types.MsgSendResponse{
+}, nil
+}
+
+func (k msgServer)
+
+MultiSend(ctx context.Context, msg *types.MsgMultiSend) (*types.MsgMultiSendResponse, error) {
+ if len(msg.Inputs) == 0 {
+ return nil, types.ErrNoInputs
+}
+ if len(msg.Inputs) != 1 {
+ return nil, types.ErrMultipleSenders
+}
+ if len(msg.Outputs) == 0 {
+ return nil, types.ErrNoOutputs
+}
+ if err := types.ValidateInputOutputs(msg.Inputs[0], msg.Outputs); err != nil {
+ return nil, err
+}
+
+ // NOTE: totalIn == totalOut should already have been checked
+ for _, in := range msg.Inputs {
+ if err := k.IsSendEnabledCoins(ctx, in.Coins...); err != nil {
+ return nil, err
+}
+
+}
+ for _, out := range msg.Outputs {
+ if base, ok := k.Keeper.(BaseKeeper); ok {
+ accAddr, err := base.ak.AddressCodec().StringToBytes(out.Address)
+ if err != nil {
+ return nil, err
+}
+ if k.BlockedAddr(accAddr) {
+ return nil, errorsmod.Wrapf(sdkerrors.ErrUnauthorized, "%s is not allowed to receive funds", out.Address)
+}
+
+}
+
+else {
+ return nil, sdkerrors.ErrInvalidRequest.Wrapf("invalid keeper type: %T", k.Keeper)
+}
+
+}
+ err := k.InputOutputCoins(ctx, msg.Inputs[0], msg.Outputs)
+ if err != nil {
+ return nil, err
+}
+
+return &types.MsgMultiSendResponse{
+}, nil
+}
+
+func (k msgServer)
+
+UpdateParams(ctx context.Context, req *types.MsgUpdateParams) (*types.MsgUpdateParamsResponse, error) {
+ if k.GetAuthority() != req.Authority {
+ return nil, errorsmod.Wrapf(types.ErrInvalidSigner, "invalid authority; expected %s, got %s", k.GetAuthority(), req.Authority)
+}
+ if err := req.Params.Validate(); err != nil {
+ return nil, err
+}
+ if err := k.SetParams(ctx, req.Params); err != nil {
+ return nil, err
+}
+
+return &types.MsgUpdateParamsResponse{
+}, nil
+}
+
+func (k msgServer)
+
+SetSendEnabled(ctx context.Context, msg *types.MsgSetSendEnabled) (*types.MsgSetSendEnabledResponse, error) {
+ if k.GetAuthority() != msg.Authority {
+ return nil, errorsmod.Wrapf(types.ErrInvalidSigner, "invalid authority; expected %s, got %s", k.GetAuthority(), msg.Authority)
+}
+ seen := map[string]bool{
+}
+ for _, se := range msg.SendEnabled {
+ if _, alreadySeen := seen[se.Denom]; alreadySeen {
+ return nil, sdkerrors.ErrInvalidRequest.Wrapf("duplicate denom entries found for %q", se.Denom)
+}
+
+seen[se.Denom] = true
+ if err := se.Validate(); err != nil {
+ return nil, sdkerrors.ErrInvalidRequest.Wrapf("invalid SendEnabled denom %q: %s", se.Denom, err)
+}
+
+}
+ for _, denom := range msg.UseDefaultFor {
+ if err := sdk.ValidateDenom(denom); err != nil {
+ return nil, sdkerrors.ErrInvalidRequest.Wrapf("invalid UseDefaultFor denom %q: %s", denom, err)
+}
+
+}
+ if len(msg.SendEnabled) > 0 {
+ k.SetAllSendEnabled(ctx, msg.SendEnabled)
+}
+ if len(msg.UseDefaultFor) > 0 {
+ k.DeleteSendEnabled(ctx, msg.UseDefaultFor...)
+}
+
+return &types.MsgSetSendEnabledResponse{
+}, nil
+}
+
+func (k msgServer)
+
+Burn(goCtx context.Context, msg *types.MsgBurn) (*types.MsgBurnResponse, error) {
+ var (
+ from []byte
+ err error
+ )
+
+var coins sdk.Coins
+ for _, coin := range msg.Amount {
+ coins = coins.Add(sdk.NewCoin(coin.Denom, coin.Amount))
+}
+ if base, ok := k.Keeper.(BaseKeeper); ok {
+ from, err = base.ak.AddressCodec().StringToBytes(msg.FromAddress)
+ if err != nil {
+ return nil, sdkerrors.ErrInvalidAddress.Wrapf("invalid from address: %s", err)
+}
+
+}
+
+else {
+ return nil, sdkerrors.ErrInvalidRequest.Wrapf("invalid keeper type: %T", k.Keeper)
+}
+ if !coins.IsValid() {
+ return nil, errorsmod.Wrap(sdkerrors.ErrInvalidCoins, coins.String())
+}
+ if !coins.IsAllPositive() {
+ return nil, errorsmod.Wrap(sdkerrors.ErrInvalidCoins, coins.String())
+}
+
+err = k.BurnCoins(goCtx, from, coins)
+ if err != nil {
+ return nil, err
+}
+
+return &types.MsgBurnResponse{
+}, nil
+}
+```
+
+## How to Approach Building Modules as a Developer
+
+While there are no definitive guidelines for writing modules, here are some important design principles developers should keep in mind when building them:
+
+* **Composability**: Cosmos SDK applications are almost always composed of multiple modules. This means developers need to carefully consider the integration of their module not only with the core of the Cosmos SDK, but also with other modules. The former is achieved by following standard design patterns outlined [here](#main-components-of-cosmos-sdk-modules), while the latter is achieved by properly exposing the store(s) of the module via the [`keeper`](/sdk/v0.53/build/building-modules/keeper).
+* **Specialization**: A direct consequence of the **composability** feature is that modules should be **specialized**. Developers should carefully establish the scope of their module and not batch multiple functionalities into the same module. This separation of concerns enables modules to be re-used in other projects and improves the upgradability of the application. **Specialization** also plays an important role in the [object-capabilities model](/sdk/v0.53/learn/advanced/ocap) of the Cosmos SDK.
+* **Capabilities**: Most modules need to read and/or write to the store(s) of other modules. However, in an open-source environment, it is possible for some modules to be malicious. That is why module developers need to carefully think not only about how their module interacts with other modules, but also about how to give access to the module's store(s). The Cosmos SDK takes a capabilities-oriented approach to inter-module security. This means that each store defined by a module is accessed by a `key`, which is held by the module's [`keeper`](/sdk/v0.53/build/building-modules/keeper). This `keeper` defines how to access the store(s) and under what conditions. Access to the module's store(s) is done by passing a reference to the module's `keeper`.
+
+## Main Components of Cosmos SDK Modules
+
+Modules are by convention defined in the `./x/` subfolder (e.g. the `bank` module will be defined in the `./x/bank` folder). They generally share the same core components:
+
+* A [`keeper`](/sdk/v0.53/build/building-modules/keeper), used to access the module's store(s) and update the state.
+* A [`Msg` service](/sdk/v0.53/build/building-modules/messages-and-queries#messages), used to process messages when they are routed to the module by [`BaseApp`](/sdk/v0.53/learn/advanced/baseapp#message-routing) and trigger state-transitions.
+* A [query service](/sdk/v0.53/build/building-modules/query-services), used to process user queries when they are routed to the module by [`BaseApp`](/sdk/v0.53/learn/advanced/baseapp#query-routing).
+* Interfaces, for end users to query the subset of the state defined by the module and create `message`s of the custom types defined in the module.
+
+In addition to these components, modules implement the `AppModule` interface in order to be managed by the [`module manager`](/sdk/v0.53/build/building-modules/module-manager).
+
+Please refer to the [structure document](/sdk/v0.53/build/building-modules/structure) to learn about the recommended structure of a module's directory.
diff --git a/sdk/next/build/building-modules/invariants.mdx b/sdk/next/build/building-modules/invariants.mdx
new file mode 100644
index 000000000..05e4aa7ad
--- /dev/null
+++ b/sdk/next/build/building-modules/invariants.mdx
@@ -0,0 +1,530 @@
+---
+title: Invariants
+---
+{/* force preview rebuild: 2025-09-30 */}
+
+
+**Synopsis**
+An invariant is a property of the application that should always be true. In the context of the Cosmos SDK, an `Invariant` is a function that checks for a particular invariant. These functions are useful to detect bugs early on and act upon them to limit their potential consequences (e.g. by halting the chain). They are also useful in the development process of the application to detect bugs via simulations.
+
+
+
+**Prerequisite Readings**
+
+* [Keepers](/sdk/v0.53/build/building-modules/keeper)
+
+
+
+## Implementing `Invariant`s
+
+An `Invariant` is a function that checks for a particular invariant within a module. Module `Invariant`s must follow the `Invariant` type:
+
+```go expandable
+package types
+
+import "fmt"
+
+// An Invariant is a function which tests a particular invariant.
+// The invariant returns a descriptive message about what happened
+// and a boolean indicating whether the invariant has been broken.
+// The simulator will then halt and print the logs.
+type Invariant func(ctx Context) (string, bool)
+
+// Invariants defines a group of invariants
+type Invariants []Invariant
+
+// expected interface for registering invariants
+type InvariantRegistry interface {
+ RegisterRoute(moduleName, route string, invar Invariant)
+}
+
+// FormatInvariant returns a standardized invariant message.
+func FormatInvariant(module, name, msg string)
+
+string {
+ return fmt.Sprintf("%s: %s invariant\n%s\n", module, name, msg)
+}
+```
+
+The `string` return value is the invariant message, which can be used when printing logs, and the `bool` return value is the actual result of the invariant check.
+
+In practice, each module implements `Invariant`s in a `keeper/invariants.go` file within the module's folder. The standard is to implement one `Invariant` function per logical grouping of invariants with the following model:
+
+```go
+// Example for an Invariant that checks balance-related invariants
+
+func BalanceInvariants(k Keeper)
+
+sdk.Invariant {
+ return func(ctx context.Context) (string, bool) {
+ // Implement checks for balance-related invariants
+}
+}
+```
+
+Additionally, module developers should generally implement an `AllInvariants` function that runs all the `Invariant`s functions of the module:
+
+```go expandable
+// AllInvariants runs all invariants of the module.
+// In this example, the module implements two Invariants: BalanceInvariants and DepositsInvariants
+
+func AllInvariants(k Keeper)
+
+sdk.Invariant {
+ return func(ctx context.Context) (string, bool) {
+ res, stop := BalanceInvariants(k)(ctx)
+ if stop {
+ return res, stop
+}
+
+return DepositsInvariant(k)(ctx)
+}
+}
+```
+
+Finally, module developers need to implement the `RegisterInvariants` method as part of the [`AppModule` interface](/sdk/v0.53/build/building-modules/module-manager#appmodule). Indeed, the `RegisterInvariants` method of the module, implemented in the `module/module.go` file, typically only defers the call to a `RegisterInvariants` method implemented in the `keeper/invariants.go` file. The `RegisterInvariants` method registers a route for each `Invariant` function in the [`InvariantRegistry`](#invariant-registry):
+
+```go expandable
+package keeper
+
+import (
+
+ "bytes"
+ "fmt"
+ "cosmossdk.io/math"
+ sdk "github.com/cosmos/cosmos-sdk/types"
+ "github.com/cosmos/cosmos-sdk/x/staking/types"
+)
+
+// RegisterInvariants registers all staking invariants
+func RegisterInvariants(ir sdk.InvariantRegistry, k *Keeper) {
+ ir.RegisterRoute(types.ModuleName, "module-accounts",
+ ModuleAccountInvariants(k))
+
+ir.RegisterRoute(types.ModuleName, "nonnegative-power",
+ NonNegativePowerInvariant(k))
+
+ir.RegisterRoute(types.ModuleName, "positive-delegation",
+ PositiveDelegationInvariant(k))
+
+ir.RegisterRoute(types.ModuleName, "delegator-shares",
+ DelegatorSharesInvariant(k))
+}
+
+// AllInvariants runs all invariants of the staking module.
+func AllInvariants(k *Keeper)
+
+sdk.Invariant {
+ return func(ctx sdk.Context) (string, bool) {
+ res, stop := ModuleAccountInvariants(k)(ctx)
+ if stop {
+ return res, stop
+}
+
+res, stop = NonNegativePowerInvariant(k)(ctx)
+ if stop {
+ return res, stop
+}
+
+res, stop = PositiveDelegationInvariant(k)(ctx)
+ if stop {
+ return res, stop
+}
+
+return DelegatorSharesInvariant(k)(ctx)
+}
+}
+
+// ModuleAccountInvariants checks that the bonded and notBonded ModuleAccounts pools
+// reflects the tokens actively bonded and not bonded
+func ModuleAccountInvariants(k *Keeper)
+
+sdk.Invariant {
+ return func(ctx sdk.Context) (string, bool) {
+ bonded := math.ZeroInt()
+ notBonded := math.ZeroInt()
+ bondedPool := k.GetBondedPool(ctx)
+ notBondedPool := k.GetNotBondedPool(ctx)
+ bondDenom := k.BondDenom(ctx)
+
+k.IterateValidators(ctx, func(_ int64, validator types.ValidatorI)
+
+bool {
+ switch validator.GetStatus() {
+ case types.Bonded:
+ bonded = bonded.Add(validator.GetTokens())
+ case types.Unbonding, types.Unbonded:
+ notBonded = notBonded.Add(validator.GetTokens())
+
+default:
+ panic("invalid validator status")
+}
+
+return false
+})
+
+k.IterateUnbondingDelegations(ctx, func(_ int64, ubd types.UnbondingDelegation)
+
+bool {
+ for _, entry := range ubd.Entries {
+ notBonded = notBonded.Add(entry.Balance)
+}
+
+return false
+})
+ poolBonded := k.bankKeeper.GetBalance(ctx, bondedPool.GetAddress(), bondDenom)
+ poolNotBonded := k.bankKeeper.GetBalance(ctx, notBondedPool.GetAddress(), bondDenom)
+ broken := !poolBonded.Amount.Equal(bonded) || !poolNotBonded.Amount.Equal(notBonded)
+
+ // Bonded tokens should equal sum of tokens with bonded validators
+ // Not-bonded tokens should equal unbonding delegations plus tokens on unbonded validators
+ return sdk.FormatInvariant(types.ModuleName, "bonded and not bonded module account coins", fmt.Sprintf(
+ "\tPool's bonded tokens: %v\n"+
+ "\tsum of bonded tokens: %v\n"+
+ "not bonded token invariance:\n"+
+ "\tPool's not bonded tokens: %v\n"+
+ "\tsum of not bonded tokens: %v\n"+
+ "module accounts total (bonded + not bonded):\n"+
+ "\tModule Accounts' tokens: %v\n"+
+ "\tsum tokens: %v\n",
+ poolBonded, bonded, poolNotBonded, notBonded, poolBonded.Add(poolNotBonded), bonded.Add(notBonded))), broken
+}
+}
+
+// NonNegativePowerInvariant checks that all stored validators have >= 0 power.
+func NonNegativePowerInvariant(k *Keeper)
+
+sdk.Invariant {
+ return func(ctx sdk.Context) (string, bool) {
+ var (
+ msg string
+ broken bool
+ )
+ iterator := k.ValidatorsPowerStoreIterator(ctx)
+ for ; iterator.Valid(); iterator.Next() {
+ validator, found := k.GetValidator(ctx, iterator.Value())
+ if !found {
+ panic(fmt.Sprintf("validator record not found for address: %X\n", iterator.Value()))
+}
+ powerKey := types.GetValidatorsByPowerIndexKey(validator, k.PowerReduction(ctx))
+ if !bytes.Equal(iterator.Key(), powerKey) {
+ broken = true
+ msg += fmt.Sprintf("power store invariance:\n\tvalidator.Power: %v"+
+ "\n\tkey should be: %v\n\tkey in store: %v\n",
+ validator.GetConsensusPower(k.PowerReduction(ctx)), powerKey, iterator.Key())
+}
+ if validator.Tokens.IsNegative() {
+ broken = true
+ msg += fmt.Sprintf("\tnegative tokens for validator: %v\n", validator)
+}
+
+}
+
+iterator.Close()
+
+return sdk.FormatInvariant(types.ModuleName, "nonnegative power", fmt.Sprintf("found invalid validator powers\n%s", msg)), broken
+}
+}
+
+// PositiveDelegationInvariant checks that all stored delegations have > 0 shares.
+func PositiveDelegationInvariant(k *Keeper)
+
+sdk.Invariant {
+ return func(ctx sdk.Context) (string, bool) {
+ var (
+ msg string
+ count int
+ )
+ delegations := k.GetAllDelegations(ctx)
+ for _, delegation := range delegations {
+ if delegation.Shares.IsNegative() {
+ count++
+ msg += fmt.Sprintf("\tdelegation with negative shares: %+v\n", delegation)
+}
+ if delegation.Shares.IsZero() {
+ count++
+ msg += fmt.Sprintf("\tdelegation with zero shares: %+v\n", delegation)
+}
+
+}
+ broken := count != 0
+
+ return sdk.FormatInvariant(types.ModuleName, "positive delegations", fmt.Sprintf(
+ "%d invalid delegations found\n%s", count, msg)), broken
+}
+}
+
+// DelegatorSharesInvariant checks whether all the delegator shares which persist
+// in the delegator object add up to the correct total delegator shares
+// amount stored in each validator.
+func DelegatorSharesInvariant(k *Keeper)
+
+sdk.Invariant {
+ return func(ctx sdk.Context) (string, bool) {
+ var (
+ msg string
+ broken bool
+ )
+ validators := k.GetAllValidators(ctx)
+ validatorsDelegationShares := map[string]math.LegacyDec{
+}
+
+ // initialize a map: validator -> its delegation shares
+ for _, validator := range validators {
+ validatorsDelegationShares[validator.GetOperator().String()] = math.LegacyZeroDec()
+}
+
+ // iterate through all the delegations to calculate the total delegation shares for each validator
+ delegations := k.GetAllDelegations(ctx)
+ for _, delegation := range delegations {
+ delegationValidatorAddr := delegation.GetValidatorAddr().String()
+ validatorDelegationShares := validatorsDelegationShares[delegationValidatorAddr]
+ validatorsDelegationShares[delegationValidatorAddr] = validatorDelegationShares.Add(delegation.Shares)
+}
+
+ // for each validator, check if its total delegation shares calculated from the step above equals to its expected delegation shares
+ for _, validator := range validators {
+ expValTotalDelShares := validator.GetDelegatorShares()
+ calculatedValTotalDelShares := validatorsDelegationShares[validator.GetOperator().String()]
+ if !calculatedValTotalDelShares.Equal(expValTotalDelShares) {
+ broken = true
+ msg += fmt.Sprintf("broken delegator shares invariance:\n"+
+ "\tvalidator.DelegatorShares: %v\n"+
+ "\tsum of Delegator.Shares: %v\n", expValTotalDelShares, calculatedValTotalDelShares)
+}
+
+}
+
+return sdk.FormatInvariant(types.ModuleName, "delegator shares", msg), broken
+}
+}
+```
+
+For more, see an example of [`Invariant`s implementation from the `staking` module](https://github.com/cosmos/cosmos-sdk/blob/v0.50.0-alpha.0/x/staking/keeper/invariants.go).
+
+## Invariant Registry
+
+The `InvariantRegistry` is a registry where the `Invariant`s of all the modules of an application are registered. There is only one `InvariantRegistry` per **application**, meaning module developers need not implement their own `InvariantRegistry` when building a module. **All module developers need to do is to register their modules' invariants in the `InvariantRegistry`, as explained in the section above**. The rest of this section gives more information on the `InvariantRegistry` itself, and does not contain anything directly relevant to module developers.
+
+At its core, the `InvariantRegistry` is defined in the Cosmos SDK as an interface:
+
+```go expandable
+package types
+
+import "fmt"
+
+// An Invariant is a function which tests a particular invariant.
+// The invariant returns a descriptive message about what happened
+// and a boolean indicating whether the invariant has been broken.
+// The simulator will then halt and print the logs.
+type Invariant func(ctx Context) (string, bool)
+
+// Invariants defines a group of invariants
+type Invariants []Invariant
+
+// expected interface for registering invariants
+type InvariantRegistry interface {
+ RegisterRoute(moduleName, route string, invar Invariant)
+}
+
+// FormatInvariant returns a standardized invariant message.
+func FormatInvariant(module, name, msg string)
+
+string {
+ return fmt.Sprintf("%s: %s invariant\n%s\n", module, name, msg)
+}
+```
+
+Typically, this interface is implemented in the `keeper` of a specific module. The most used implementation of an `InvariantRegistry` can be found in the `crisis` module:
+
+```go expandable
+package keeper
+
+import (
+
+ "context"
+ "fmt"
+ "time"
+ "cosmossdk.io/collections"
+ "cosmossdk.io/core/address"
+ "cosmossdk.io/log"
+
+ storetypes "cosmossdk.io/core/store"
+ "github.com/cosmos/cosmos-sdk/codec"
+ sdk "github.com/cosmos/cosmos-sdk/types"
+ "github.com/cosmos/cosmos-sdk/x/crisis/types"
+)
+
+// Keeper - crisis keeper
+type Keeper struct {
+ routes []types.InvarRoute
+ invCheckPeriod uint
+ storeService storetypes.KVStoreService
+ cdc codec.BinaryCodec
+
+ // the address capable of executing a MsgUpdateParams message. Typically, this
+ // should be the x/gov module account.
+ authority string
+
+ supplyKeeper types.SupplyKeeper
+
+ feeCollectorName string // name of the FeeCollector ModuleAccount
+
+ addressCodec address.Codec
+
+ Schema collections.Schema
+ ConstantFee collections.Item[sdk.Coin]
+}
+
+// NewKeeper creates a new Keeper object
+func NewKeeper(
+ cdc codec.BinaryCodec, storeService storetypes.KVStoreService, invCheckPeriod uint,
+ supplyKeeper types.SupplyKeeper, feeCollectorName, authority string, ac address.Codec,
+) *Keeper {
+ sb := collections.NewSchemaBuilder(storeService)
+ k := &Keeper{
+ storeService: storeService,
+ cdc: cdc,
+ routes: make([]types.InvarRoute, 0),
+ invCheckPeriod: invCheckPeriod,
+ supplyKeeper: supplyKeeper,
+ feeCollectorName: feeCollectorName,
+ authority: authority,
+ addressCodec: ac,
+ ConstantFee: collections.NewItem(sb, types.ConstantFeeKey, "constant_fee", codec.CollValue[sdk.Coin](cdc)),
+}
+
+schema, err := sb.Build()
+ if err != nil {
+ panic(err)
+}
+
+k.Schema = schema
+ return k
+}
+
+// GetAuthority returns the x/crisis module's authority.
+func (k *Keeper)
+
+GetAuthority()
+
+string {
+ return k.authority
+}
+
+// Logger returns a module-specific logger.
+func (k *Keeper)
+
+Logger(ctx context.Context)
+
+log.Logger {
+ sdkCtx := sdk.UnwrapSDKContext(ctx)
+
+return sdkCtx.Logger().With("module", "x/"+types.ModuleName)
+}
+
+// RegisterRoute register the routes for each of the invariants
+func (k *Keeper)
+
+RegisterRoute(moduleName, route string, invar sdk.Invariant) {
+ invarRoute := types.NewInvarRoute(moduleName, route, invar)
+
+k.routes = append(k.routes, invarRoute)
+}
+
+// Routes - return the keeper's invariant routes
+func (k *Keeper)
+
+Routes() []types.InvarRoute {
+ return k.routes
+}
+
+// Invariants returns a copy of all registered Crisis keeper invariants.
+func (k *Keeper)
+
+Invariants() []sdk.Invariant {
+ invars := make([]sdk.Invariant, len(k.routes))
+ for i, route := range k.routes {
+ invars[i] = route.Invar
+}
+
+return invars
+}
+
+// AssertInvariants asserts all registered invariants. If any invariant fails,
+// the method panics.
+func (k *Keeper)
+
+AssertInvariants(ctx sdk.Context) {
+ logger := k.Logger(ctx)
+ start := time.Now()
+ invarRoutes := k.Routes()
+ n := len(invarRoutes)
+ for i, ir := range invarRoutes {
+ logger.Info("asserting crisis invariants", "inv", fmt.Sprint(i+1, "/", n), "name", ir.FullRoute())
+
+invCtx, _ := ctx.CacheContext()
+ if res, stop := ir.Invar(invCtx); stop {
+ // TODO: Include app name as part of context to allow for this to be
+ // variable.
+ panic(fmt.Errorf("invariant broken: %s\n"+
+ "\tCRITICAL please submit the following transaction:\n"+
+ "\t\t tx crisis invariant-broken %s %s", res, ir.ModuleName, ir.Route))
+}
+
+}
+ diff := time.Since(start)
+
+logger.Info("asserted all invariants", "duration", diff, "height", ctx.BlockHeight())
+}
+
+// InvCheckPeriod returns the invariant checks period.
+func (k *Keeper)
+
+InvCheckPeriod()
+
+uint {
+ return k.invCheckPeriod
+}
+
+// SendCoinsFromAccountToFeeCollector transfers amt to the fee collector account.
+func (k *Keeper)
+
+SendCoinsFromAccountToFeeCollector(ctx context.Context, senderAddr sdk.AccAddress, amt sdk.Coins)
+
+error {
+ return k.supplyKeeper.SendCoinsFromAccountToModule(ctx, senderAddr, k.feeCollectorName, amt)
+}
+```
+
+The `InvariantRegistry` is therefore typically instantiated by instantiating the `keeper` of the `crisis` module in the [application's constructor function](/sdk/v0.53/learn/beginner/app-anatomy#constructor-function).
+
+`Invariant`s can be checked manually via [`message`s](/sdk/v0.53/build/building-modules/messages-and-queries), but most often they are checked automatically at the end of each block. Here is an example from the `crisis` module:
+
+```go expandable
+package crisis
+
+import (
+
+ "context"
+ "time"
+ "github.com/cosmos/cosmos-sdk/telemetry"
+ sdk "github.com/cosmos/cosmos-sdk/types"
+ "github.com/cosmos/cosmos-sdk/x/crisis/keeper"
+ "github.com/cosmos/cosmos-sdk/x/crisis/types"
+)
+
+// check all registered invariants
+func EndBlocker(ctx context.Context, k keeper.Keeper) {
+ defer telemetry.ModuleMeasureSince(types.ModuleName, time.Now(), telemetry.MetricKeyEndBlocker)
+ sdkCtx := sdk.UnwrapSDKContext(ctx)
+ if k.InvCheckPeriod() == 0 || sdkCtx.BlockHeight()%int64(k.InvCheckPeriod()) != 0 {
+ // skip running the invariant check
+ return
+}
+
+k.AssertInvariants(sdkCtx)
+}
+```
+
+In both cases, if one of the `Invariant`s returns false, the `InvariantRegistry` can trigger special logic (e.g. have the application panic and print the `Invariant`s message in the log).
diff --git a/sdk/next/build/building-modules/keeper.mdx b/sdk/next/build/building-modules/keeper.mdx
new file mode 100644
index 000000000..68e1681d2
--- /dev/null
+++ b/sdk/next/build/building-modules/keeper.mdx
@@ -0,0 +1,372 @@
+---
+title: Keepers
+---
+{/* force preview rebuild: 2025-09-30 */}
+
+
+**Synopsis**
+`Keeper`s refer to a Cosmos SDK abstraction whose role is to manage access to the subset of the state defined by various modules. `Keeper`s are module-specific, i.e. the subset of state defined by a module can only be accessed by a `keeper` defined in said module. If a module needs to access the subset of state defined by another module, a reference to the second module's internal `keeper` needs to be passed to the first one. This is done in `app.go` during the instantiation of module keepers.
+
+
+
+**Prerequisite Readings**
+
+* [Introduction to Cosmos SDK Modules](/sdk/v0.53/build/building-modules/intro)
+
+
+
+## Motivation
+
+The Cosmos SDK is a framework that makes it easy for developers to build complex decentralized applications from scratch, mainly by composing modules together. As the ecosystem of open-source modules for the Cosmos SDK expands, it will become increasingly likely that some of these modules contain vulnerabilities, as a result of the negligence or malice of their developer.
+
+The Cosmos SDK adopts an [object-capabilities-based approach](/sdk/v0.53/learn/advanced/ocap) to help developers better protect their application from unwanted inter-module interactions, and `keeper`s are at the core of this approach. A `keeper` can be considered quite literally to be the gatekeeper of a module's store(s). Each store (typically an [`IAVL` Store](/sdk/v0.53/learn/advanced/store#iavl-store)) defined within a module comes with a `storeKey`, which grants unlimited access to it. The module's `keeper` holds this `storeKey` (which should otherwise remain unexposed), and defines [methods](#implementing-methods) for reading and writing to the store(s).
+
+The core idea behind the object-capabilities approach is to only reveal what is necessary to get the work done. In practice, this means that instead of handling permissions of modules through access-control lists, module `keeper`s are passed a reference to the specific instance of the other modules' `keeper`s that they need to access (this is done in the [application's constructor function](/sdk/v0.53/learn/beginner/app-anatomy#constructor-function)). As a consequence, a module can only interact with the subset of state defined in another module via the methods exposed by the instance of the other module's `keeper`. This is a great way for developers to control the interactions that their own module can have with modules developed by external developers.
+
+## Type Definition
+
+`keeper`s are generally implemented in a `/keeper/keeper.go` file located in the module's folder. By convention, the type `keeper` of a module is simply named `Keeper` and usually follows the following structure:
+
+```go
+type Keeper struct {
+ // External keepers, if any
+
+ // Store key(s)
+
+ // codec
+
+ // authority
+}
+```
+
+For example, here is the type definition of the `keeper` from the `staking` module:
+
+```go expandable
+package keeper
+
+import (
+
+ "fmt"
+ "cosmossdk.io/log"
+ "cosmossdk.io/math"
+ abci "github.com/cometbft/cometbft/abci/types"
+
+ storetypes "cosmossdk.io/store/types"
+ "github.com/cosmos/cosmos-sdk/codec"
+ sdk "github.com/cosmos/cosmos-sdk/types"
+ "github.com/cosmos/cosmos-sdk/x/staking/types"
+)
+
+// Implements ValidatorSet interface
+var _ types.ValidatorSet = Keeper{
+}
+
+// Implements DelegationSet interface
+var _ types.DelegationSet = Keeper{
+}
+
+// Keeper of the x/staking store
+type Keeper struct {
+ storeKey storetypes.StoreKey
+ cdc codec.BinaryCodec
+ authKeeper types.AccountKeeper
+ bankKeeper types.BankKeeper
+ hooks types.StakingHooks
+ authority string
+}
+
+// NewKeeper creates a new staking Keeper instance
+func NewKeeper(
+ cdc codec.BinaryCodec,
+ key storetypes.StoreKey,
+ ak types.AccountKeeper,
+ bk types.BankKeeper,
+ authority string,
+) *Keeper {
+ // ensure bonded and not bonded module accounts are set
+ if addr := ak.GetModuleAddress(types.BondedPoolName); addr == nil {
+ panic(fmt.Sprintf("%s module account has not been set", types.BondedPoolName))
+}
+ if addr := ak.GetModuleAddress(types.NotBondedPoolName); addr == nil {
+ panic(fmt.Sprintf("%s module account has not been set", types.NotBondedPoolName))
+}
+
+ // ensure that authority is a valid AccAddress
+ if _, err := ak.AddressCodec().StringToBytes(authority); err != nil {
+ panic("authority is not a valid acc address")
+}
+
+return &Keeper{
+ storeKey: key,
+ cdc: cdc,
+ authKeeper: ak,
+ bankKeeper: bk,
+ hooks: nil,
+ authority: authority,
+}
+}
+
+// Logger returns a module-specific logger.
+func (k Keeper)
+
+Logger(ctx sdk.Context)
+
+log.Logger {
+ return ctx.Logger().With("module", "x/"+types.ModuleName)
+}
+
+// Hooks gets the hooks for staking *Keeper {
+ func (k *Keeper)
+
+Hooks()
+
+types.StakingHooks {
+ if k.hooks == nil {
+ // return a no-op implementation if no hooks are set
+ return types.MultiStakingHooks{
+}
+
+}
+
+return k.hooks
+}
+
+// SetHooks Set the validator hooks. In contrast to other receivers, this method must take a pointer due to nature
+// of the hooks interface and SDK start up sequence.
+func (k *Keeper)
+
+SetHooks(sh types.StakingHooks) {
+ if k.hooks != nil {
+ panic("cannot set validator hooks twice")
+}
+
+k.hooks = sh
+}
+
+// GetLastTotalPower Load the last total validator power.
+func (k Keeper)
+
+GetLastTotalPower(ctx sdk.Context)
+
+math.Int {
+ store := ctx.KVStore(k.storeKey)
+ bz := store.Get(types.LastTotalPowerKey)
+ if bz == nil {
+ return math.ZeroInt()
+}
+ ip := sdk.IntProto{
+}
+
+k.cdc.MustUnmarshal(bz, &ip)
+
+return ip.Int
+}
+
+// SetLastTotalPower Set the last total validator power.
+func (k Keeper)
+
+SetLastTotalPower(ctx sdk.Context, power math.Int) {
+ store := ctx.KVStore(k.storeKey)
+ bz := k.cdc.MustMarshal(&sdk.IntProto{
+ Int: power
+})
+
+store.Set(types.LastTotalPowerKey, bz)
+}
+
+// GetAuthority returns the x/staking module's authority.
+func (k Keeper)
+
+GetAuthority()
+
+string {
+ return k.authority
+}
+
+// SetValidatorUpdates sets the ABCI validator power updates for the current block.
+func (k Keeper)
+
+SetValidatorUpdates(ctx sdk.Context, valUpdates []abci.ValidatorUpdate) {
+ store := ctx.KVStore(k.storeKey)
+ bz := k.cdc.MustMarshal(&types.ValidatorUpdates{
+ Updates: valUpdates
+})
+
+store.Set(types.ValidatorUpdatesKey, bz)
+}
+
+// GetValidatorUpdates returns the ABCI validator power updates within the current block.
+func (k Keeper)
+
+GetValidatorUpdates(ctx sdk.Context) []abci.ValidatorUpdate {
+ store := ctx.KVStore(k.storeKey)
+ bz := store.Get(types.ValidatorUpdatesKey)
+
+var valUpdates types.ValidatorUpdates
+ k.cdc.MustUnmarshal(bz, &valUpdates)
+
+return valUpdates.Updates
+}
+```
+
+Let us go through the different parameters:
+
+* An expected `keeper` is a `keeper` external to a module that is required by the internal `keeper` of said module. External `keeper`s are listed in the internal `keeper`'s type definition as interfaces. These interfaces are themselves defined in an `expected_keepers.go` file in the root of the module's folder. In this context, interfaces are used to reduce the number of dependencies, as well as to facilitate the maintenance of the module itself.
+* `storeKey`s grant access to the store(s) of the [multistore](/sdk/v0.53/learn/advanced/store) managed by the module. They should always remain unexposed to external modules.
+* `cdc` is the [codec](/sdk/v0.53/learn/advanced/encoding) used to marshall and unmarshall structs to/from `[]byte`. The `cdc` can be any of `codec.BinaryCodec`, `codec.JSONCodec` or `codec.Codec` based on your requirements. It can be either a proto or amino codec as long as they implement these interfaces.
+* The authority listed is a module account or user account that has the right to change module level parameters. Previously this was handled by the param module, which has been deprecated.
+
+Of course, it is possible to define different types of internal `keeper`s for the same module (e.g. a read-only `keeper`). Each type of `keeper` comes with its own constructor function, which is called from the [application's constructor function](/sdk/v0.53/learn/beginner/app-anatomy). This is where `keeper`s are instantiated, and where developers make sure to pass correct instances of modules' `keeper`s to other modules that require them.
+
+## Implementing Methods
+
+`Keeper`s primarily expose getter and setter methods for the store(s) managed by their module. These methods should remain as simple as possible and strictly be limited to getting or setting the requested value, as validity checks should have already been performed by the [`Msg` server](/sdk/v0.53/build/building-modules/msg-services) when `keeper`s' methods are called.
+
+Typically, a *getter* method will have the following signature
+
+```go
+func (k Keeper)
+
+Get(ctx context.Context, key string)
+
+returnType
+```
+
+and the method will go through the following steps:
+
+1. Retrieve the appropriate store from the `ctx` using the `storeKey`. This is done through the `KVStore(storeKey sdk.StoreKey)` method of the `ctx`. Then it's preferred to use the `prefix.Store` to access only the desired limited subset of the store for convenience and safety.
+2. If it exists, get the `[]byte` value stored at location `[]byte(key)` using the `Get(key []byte)` method of the store.
+3. Unmarshall the retrieved value from `[]byte` to `returnType` using the codec `cdc`. Return the value.
+
+Similarly, a *setter* method will have the following signature
+
+```go
+func (k Keeper)
+
+Set(ctx context.Context, key string, value valueType)
+```
+
+and the method will go through the following steps:
+
+1. Retrieve the appropriate store from the `ctx` using the `storeKey`. This is done through the `KVStore(storeKey sdk.StoreKey)` method of the `ctx`. It's preferred to use the `prefix.Store` to access only the desired limited subset of the store for convenience and safety.
+2. Marshal `value` to `[]byte` using the codec `cdc`.
+3. Set the encoded value in the store at location `key` using the `Set(key []byte, value []byte)` method of the store.
+
+For more, see an example of `keeper`'s [methods implementation from the `staking` module](https://github.com/cosmos/cosmos-sdk/blob/v0.50.0-alpha.0/x/staking/keeper/keeper.go).
+
+The [module `KVStore`](/sdk/v0.53/learn/advanced/store#kvstore-and-commitkvstore-interfaces) also provides an `Iterator()` method which returns an `Iterator` object to iterate over a domain of keys.
+
+This is an example from the `auth` module to iterate accounts:
+
+```go expandable
+package keeper
+
+import (
+
+ "context"
+ "errors"
+ "cosmossdk.io/collections"
+
+ sdk "github.com/cosmos/cosmos-sdk/types"
+)
+
+// NewAccountWithAddress implements AccountKeeperI.
+func (ak AccountKeeper)
+
+NewAccountWithAddress(ctx context.Context, addr sdk.AccAddress)
+
+sdk.AccountI {
+ acc := ak.proto()
+ err := acc.SetAddress(addr)
+ if err != nil {
+ panic(err)
+}
+
+return ak.NewAccount(ctx, acc)
+}
+
+// NewAccount sets the next account number to a given account interface
+func (ak AccountKeeper)
+
+NewAccount(ctx context.Context, acc sdk.AccountI)
+
+sdk.AccountI {
+ if err := acc.SetAccountNumber(ak.NextAccountNumber(ctx)); err != nil {
+ panic(err)
+}
+
+return acc
+}
+
+// HasAccount implements AccountKeeperI.
+func (ak AccountKeeper)
+
+HasAccount(ctx context.Context, addr sdk.AccAddress)
+
+bool {
+ has, _ := ak.Accounts.Has(ctx, addr)
+
+return has
+}
+
+// GetAccount implements AccountKeeperI.
+func (ak AccountKeeper)
+
+GetAccount(ctx context.Context, addr sdk.AccAddress)
+
+sdk.AccountI {
+ acc, err := ak.Accounts.Get(ctx, addr)
+ if err != nil && !errors.Is(err, collections.ErrNotFound) {
+ panic(err)
+}
+
+return acc
+}
+
+// GetAllAccounts returns all accounts in the accountKeeper.
+func (ak AccountKeeper)
+
+GetAllAccounts(ctx context.Context) (accounts []sdk.AccountI) {
+ ak.IterateAccounts(ctx, func(acc sdk.AccountI) (stop bool) {
+ accounts = append(accounts, acc)
+
+return false
+})
+
+return accounts
+}
+
+// SetAccount implements AccountKeeperI.
+func (ak AccountKeeper)
+
+SetAccount(ctx context.Context, acc sdk.AccountI) {
+ err := ak.Accounts.Set(ctx, acc.GetAddress(), acc)
+ if err != nil {
+ panic(err)
+}
+}
+
+// RemoveAccount removes an account for the account mapper store.
+// NOTE: this will cause supply invariant violation if called
+func (ak AccountKeeper)
+
+RemoveAccount(ctx context.Context, acc sdk.AccountI) {
+ err := ak.Accounts.Remove(ctx, acc.GetAddress())
+ if err != nil {
+ panic(err)
+}
+}
+
+// IterateAccounts iterates over all the stored accounts and performs a callback function.
+// Stops iteration when callback returns true.
+func (ak AccountKeeper)
+
+IterateAccounts(ctx context.Context, cb func(account sdk.AccountI) (stop bool)) {
+ err := ak.Accounts.Walk(ctx, nil, func(_ sdk.AccAddress, value sdk.AccountI) (bool, error) {
+ return cb(value), nil
+})
+ if err != nil {
+ panic(err)
+}
+}
+```
diff --git a/sdk/next/build/building-modules/messages-and-queries.mdx b/sdk/next/build/building-modules/messages-and-queries.mdx
new file mode 100644
index 000000000..737969106
--- /dev/null
+++ b/sdk/next/build/building-modules/messages-and-queries.mdx
@@ -0,0 +1,1607 @@
+---
+title: Messages and Queries
+---
+{/* force preview rebuild: 2025-09-30 */}
+
+
+**Synopsis**
+`Msg`s and `Queries` are the two primary objects handled by modules. Most of the core components defined in a module, like `Msg` services, `keeper`s and `Query` services, exist to process `message`s and `queries`.
+
+
+
+**Prerequisite Readings**
+
+* [Introduction to Cosmos SDK Modules](/sdk/v0.53/build/building-modules/intro)
+
+
+
+## Messages
+
+`Msg`s are objects whose end-goal is to trigger state-transitions. They are wrapped in [transactions](/sdk/v0.53/learn/advanced/transactions), which may contain one or more of them.
+
+When a transaction is relayed from the underlying consensus engine to the Cosmos SDK application, it is first decoded by [`BaseApp`](/sdk/v0.53/learn/advanced/baseapp). Then, each message contained in the transaction is extracted and routed to the appropriate module via `BaseApp`'s `MsgServiceRouter` so that it can be processed by the module's [`Msg` service](/sdk/v0.53/build/building-modules/msg-services). For a more detailed explanation of the lifecycle of a transaction, click [here](/sdk/v0.53/learn/beginner/tx-lifecycle).
+
+### `Msg` Services
+
+Defining Protobuf `Msg` services is the recommended way to handle messages. A Protobuf `Msg` service should be created for each module, typically in `tx.proto` (see more info about [conventions and naming](/sdk/v0.53/learn/advanced/encoding#faq)). It must have an RPC service method defined for each message in the module.
+
+Each `Msg` service method must have exactly one argument, which must implement the `sdk.Msg` interface, and a Protobuf response. The naming convention is to call the RPC argument `Msg` and the RPC response `MsgResponse`. For example:
+
+```protobuf
+ rpc Send(MsgSend) returns (MsgSendResponse);
+```
+
+See an example of a `Msg` service definition from `x/bank` module:
+
+```protobuf
+// Reference: https://github.com/cosmos/cosmos-sdk/blob/v0.50.0-alpha.0/proto/cosmos/bank/v1beta1/tx.proto#L13-L36
+```
+
+### `sdk.Msg` Interface
+
+`sdk.Msg` is a alias of `proto.Message`.
+
+To attach a `ValidateBasic()` method to a message then you must add methods to the type adhereing to the `HasValidateBasic`.
+
+```go expandable
+package types
+
+import (
+
+ "encoding/json"
+ fmt "fmt"
+ strings "strings"
+ "github.com/cosmos/gogoproto/proto"
+ protov2 "google.golang.org/protobuf/proto"
+ "github.com/cosmos/cosmos-sdk/codec"
+ cryptotypes "github.com/cosmos/cosmos-sdk/crypto/types"
+)
+
+type (
+ // Msg defines the interface a transaction message needed to fulfill.
+ Msg = proto.Message
+
+ // LegacyMsg defines the interface a transaction message needed to fulfill up through
+ // v0.47.
+ LegacyMsg interface {
+ Msg
+
+ // GetSigners returns the addrs of signers that must sign.
+ // CONTRACT: All signatures must be present to be valid.
+ // CONTRACT: Returns addrs in some deterministic order.
+ GetSigners() []AccAddress
+}
+
+ // Fee defines an interface for an application application-defined concrete
+ // transaction type to be able to set and return the transaction fee.
+ Fee interface {
+ GetGas()
+
+uint64
+ GetAmount()
+
+Coins
+}
+
+ // Signature defines an interface for an application application-defined
+ // concrete transaction type to be able to set and return transaction signatures.
+ Signature interface {
+ GetPubKey()
+
+cryptotypes.PubKey
+ GetSignature() []byte
+}
+
+ // HasMsgs defines an interface a transaction must fulfill.
+ HasMsgs interface {
+ // GetMsgs gets the all the transaction's messages.
+ GetMsgs() []Msg
+}
+
+ // Tx defines an interface a transaction must fulfill.
+ Tx interface {
+ HasMsgs
+
+ // GetMsgsV2 gets the transaction's messages as google.golang.org/protobuf/proto.Message's.
+ GetMsgsV2() ([]protov2.Message, error)
+}
+
+ // FeeTx defines the interface to be implemented by Tx to use the FeeDecorators
+ FeeTx interface {
+ Tx
+ GetGas()
+
+uint64
+ GetFee()
+
+Coins
+ FeePayer() []byte
+ FeeGranter() []byte
+}
+
+ // TxWithMemo must have GetMemo()
+
+method to use ValidateMemoDecorator
+ TxWithMemo interface {
+ Tx
+ GetMemo()
+
+string
+}
+
+ // TxWithTimeoutHeight extends the Tx interface by allowing a transaction to
+ // set a height timeout.
+ TxWithTimeoutHeight interface {
+ Tx
+
+ GetTimeoutHeight()
+
+uint64
+}
+
+ // HasValidateBasic defines a type that has a ValidateBasic method.
+ // ValidateBasic is deprecated and now optional.
+ // Prefer validating messages directly in the msg server.
+ HasValidateBasic interface {
+ // ValidateBasic does a simple validation check that
+ // doesn't require access to any other information.
+ ValidateBasic()
+
+error
+}
+)
+
+// TxDecoder unmarshals transaction bytes
+type TxDecoder func(txBytes []byte) (Tx, error)
+
+// TxEncoder marshals transaction to bytes
+type TxEncoder func(tx Tx) ([]byte, error)
+
+// MsgTypeURL returns the TypeURL of a `sdk.Msg`.
+func MsgTypeURL(msg proto.Message)
+
+string {
+ if m, ok := msg.(protov2.Message); ok {
+ return "/" + string(m.ProtoReflect().Descriptor().FullName())
+}
+
+return "/" + proto.MessageName(msg)
+}
+
+// GetMsgFromTypeURL returns a `sdk.Msg` message type from a type URL
+func GetMsgFromTypeURL(cdc codec.Codec, input string) (Msg, error) {
+ var msg Msg
+ bz, err := json.Marshal(struct {
+ Type string `json:"@type"`
+}{
+ Type: input,
+})
+ if err != nil {
+ return nil, err
+}
+ if err := cdc.UnmarshalInterfaceJSON(bz, &msg); err != nil {
+ return nil, fmt.Errorf("failed to determine sdk.Msg for %s URL : %w", input, err)
+}
+
+return msg, nil
+}
+
+// GetModuleNameFromTypeURL assumes that module name is the second element of the msg type URL
+// e.g. "cosmos.bank.v1beta1.MsgSend" => "bank"
+// It returns an empty string if the input is not a valid type URL
+func GetModuleNameFromTypeURL(input string)
+
+string {
+ moduleName := strings.Split(input, ".")
+ if len(moduleName) > 1 {
+ return moduleName[1]
+}
+
+return ""
+}
+```
+
+In 0.50+ signers from the `GetSigners()` call is automated via a protobuf annotation.
+
+Read more about the signer field [here](/sdk/v0.53/build/building-modules/protobuf-annotations).
+
+```protobuf
+// Reference: https://github.com/cosmos/cosmos-sdk/blob/e6848d99b55a65d014375b295bdd7f9641aac95e/proto/cosmos/bank/v1beta1/tx.proto#L40
+```
+
+If there is a need for custom signers then there is an alternative path which can be taken. A function which returns `signing.CustomGetSigner` for a specific message can be defined.
+
+```go expandable
+func ProvideBankSendTransactionGetSigners()
+
+signing.CustomGetSigner {
+
+ // Extract the signer from the signature.
+ signer, err := coretypes.LatestSigner(Tx).Sender(ethTx)
+ if err != nil {
+ return nil, err
+}
+
+ // Return the signer in the required format.
+ return [][]byte{
+ signer.Bytes()
+}, nil
+}
+```
+
+When using dependency injection (depinject) this can be provided to the application via the provide method.
+
+```go
+depinject.Provide(banktypes.ProvideBankSendTransactionGetSigners)
+```
+
+The Cosmos SDK uses Protobuf definitions to generate client and server code:
+
+* `MsgServer` interface defines the server API for the `Msg` service and its implementation is described as part of the [`Msg` services](/sdk/v0.53/build/building-modules/msg-services) documentation.
+* Structures are generated for all RPC request and response types.
+
+A `RegisterMsgServer` method is also generated and should be used to register the module's `MsgServer` implementation in `RegisterServices` method from the [`AppModule` interface](/sdk/v0.53/build/building-modules/module-manager#appmodule).
+
+In order for clients (CLI and grpc-gateway) to have these URLs registered, the Cosmos SDK provides the function `RegisterMsgServiceDesc(registry codectypes.InterfaceRegistry, sd *grpc.ServiceDesc)` that should be called inside module's [`RegisterInterfaces`](/sdk/v0.53/build/building-modules/module-manager#appmodulebasic) method, using the proto-generated `&_Msg_serviceDesc` as `*grpc.ServiceDesc` argument.
+
+## Queries
+
+A `query` is a request for information made by end-users of applications through an interface and processed by a full-node. A `query` is received by a full-node through its consensus engine and relayed to the application via the ABCI. It is then routed to the appropriate module via `BaseApp`'s `QueryRouter` so that it can be processed by the module's query service (./04-query-services.md). For a deeper look at the lifecycle of a `query`, click [here](/sdk/v0.53/learn/beginner/query-lifecycle).
+
+### gRPC Queries
+
+Queries should be defined using [Protobuf services](https://developers.google.com/protocol-buffers/docs/proto#services). A `Query` service should be created per module in `query.proto`. This service lists endpoints starting with `rpc`.
+
+Here's an example of such a `Query` service definition:
+
+```protobuf
+// Reference: https://github.com/cosmos/cosmos-sdk/blob/v0.50.0-alpha.0/proto/cosmos/auth/v1beta1/query.proto#L14-L89
+```
+
+As `proto.Message`s, generated `Response` types implement by default `String()` method of [`fmt.Stringer`](https://pkg.go.dev/fmt#Stringer).
+
+A `RegisterQueryServer` method is also generated and should be used to register the module's query server in the `RegisterServices` method from the [`AppModule` interface](/sdk/v0.53/build/building-modules/module-manager#appmodule).
+
+### Legacy Queries
+
+Before the introduction of Protobuf and gRPC in the Cosmos SDK, there was usually no specific `query` object defined by module developers, contrary to `message`s. Instead, the Cosmos SDK took the simpler approach of using a simple `path` to define each `query`. The `path` contains the `query` type and all the arguments needed to process it. For most module queries, the `path` should look like the following:
+
+```text
+queryCategory/queryRoute/queryType/arg1/arg2/...
+```
+
+where:
+
+* `queryCategory` is the category of the `query`, typically `custom` for module queries. It is used to differentiate between different kinds of queries within `BaseApp`'s [`Query` method](/sdk/v0.53/learn/advanced/baseapp#query).
+* `queryRoute` is used by `BaseApp`'s [`queryRouter`](/sdk/v0.53/learn/advanced/baseapp#query-routing) to map the `query` to its module. Usually, `queryRoute` should be the name of the module.
+* `queryType` is used by the module's [`querier`](/sdk/v0.53/build/building-modules/query-services#legacy-queriers) to map the `query` to the appropriate `querier function` within the module.
+* `args` are the actual arguments needed to process the `query`. They are filled out by the end-user. Note that for bigger queries, you might prefer passing arguments in the `Data` field of the request `req` instead of the `path`.
+
+The `path` for each `query` must be defined by the module developer in the module's [command-line interface file](/sdk/v0.53/build/building-modules/module-interfaces#query-commands).Overall, there are 3 mains components module developers need to implement in order to make the subset of the state defined by their module queryable:
+
+* A [`querier`](/sdk/v0.53/build/building-modules/query-services#legacy-queriers), to process the `query` once it has been [routed to the module](/sdk/v0.53/learn/advanced/baseapp#query-routing).
+* [Query commands](/sdk/v0.53/build/building-modules/module-interfaces#query-commands) in the module's CLI file, where the `path` for each `query` is specified.
+* `query` return types. Typically defined in a file `types/querier.go`, they specify the result type of each of the module's `queries`. These custom types must implement the `String()` method of [`fmt.Stringer`](https://pkg.go.dev/fmt#Stringer).
+
+### Store Queries
+
+Store queries query directly for store keys. They use `clientCtx.QueryABCI(req abci.RequestQuery)` to return the full `abci.ResponseQuery` with inclusion Merkle proofs.
+
+See following examples:
+
+```go expandable
+package baseapp
+
+import (
+
+ "context"
+ "crypto/sha256"
+ "fmt"
+ "os"
+ "sort"
+ "strings"
+ "syscall"
+ "time"
+
+ errorsmod "cosmossdk.io/errors"
+ "cosmossdk.io/store/rootmulti"
+ snapshottypes "cosmossdk.io/store/snapshots/types"
+ storetypes "cosmossdk.io/store/types"
+ "github.com/cockroachdb/errors"
+ abci "github.com/cometbft/cometbft/abci/types"
+ cmtproto "github.com/cometbft/cometbft/proto/tendermint/types"
+ "github.com/cosmos/gogoproto/proto"
+ "google.golang.org/grpc/codes"
+ grpcstatus "google.golang.org/grpc/status"
+ "github.com/cosmos/cosmos-sdk/codec"
+ "github.com/cosmos/cosmos-sdk/telemetry"
+ sdk "github.com/cosmos/cosmos-sdk/types"
+ sdkerrors "github.com/cosmos/cosmos-sdk/types/errors"
+)
+
+// Supported ABCI Query prefixes and paths
+const (
+ QueryPathApp = "app"
+ QueryPathCustom = "custom"
+ QueryPathP2P = "p2p"
+ QueryPathStore = "store"
+
+ QueryPathBroadcastTx = "/cosmos.tx.v1beta1.Service/BroadcastTx"
+)
+
+func (app *BaseApp)
+
+InitChain(req *abci.RequestInitChain) (*abci.ResponseInitChain, error) {
+ if req.ChainId != app.chainID {
+ return nil, fmt.Errorf("invalid chain-id on InitChain; expected: %s, got: %s", app.chainID, req.ChainId)
+}
+
+ // On a new chain, we consider the init chain block height as 0, even though
+ // req.InitialHeight is 1 by default.
+ initHeader := cmtproto.Header{
+ ChainID: req.ChainId,
+ Time: req.Time
+}
+
+app.initialHeight = req.InitialHeight
+
+ app.logger.Info("InitChain", "initialHeight", req.InitialHeight, "chainID", req.ChainId)
+
+ // Set the initial height, which will be used to determine if we are proposing
+ // or processing the first block or not.
+ app.initialHeight = req.InitialHeight
+
+ // if req.InitialHeight is > 1, then we set the initial version on all stores
+ if req.InitialHeight > 1 {
+ initHeader.Height = req.InitialHeight
+ if err := app.cms.SetInitialVersion(req.InitialHeight); err != nil {
+ return nil, err
+}
+
+}
+
+ // initialize states with a correct header
+ app.setState(execModeFinalize, initHeader)
+
+app.setState(execModeCheck, initHeader)
+
+ // Store the consensus params in the BaseApp's param store. Note, this must be
+ // done after the finalizeBlockState and context have been set as it's persisted
+ // to state.
+ if req.ConsensusParams != nil {
+ err := app.StoreConsensusParams(app.finalizeBlockState.ctx, *req.ConsensusParams)
+ if err != nil {
+ return nil, err
+}
+
+}
+
+defer func() {
+ // InitChain represents the state of the application BEFORE the first block,
+ // i.e. the genesis block. This means that when processing the app's InitChain
+ // handler, the block height is zero by default. However, after Commit is called
+ // the height needs to reflect the true block height.
+ initHeader.Height = req.InitialHeight
+ app.checkState.ctx = app.checkState.ctx.WithBlockHeader(initHeader)
+
+app.finalizeBlockState.ctx = app.finalizeBlockState.ctx.WithBlockHeader(initHeader)
+}()
+ if app.initChainer == nil {
+ return &abci.ResponseInitChain{
+}, nil
+}
+
+ // add block gas meter for any genesis transactions (allow infinite gas)
+
+app.finalizeBlockState.ctx = app.finalizeBlockState.ctx.WithBlockGasMeter(storetypes.NewInfiniteGasMeter())
+
+res, err := app.initChainer(app.finalizeBlockState.ctx, req)
+ if err != nil {
+ return nil, err
+}
+ if len(req.Validators) > 0 {
+ if len(req.Validators) != len(res.Validators) {
+ return nil, fmt.Errorf(
+ "len(RequestInitChain.Validators) != len(GenesisValidators) (%d != %d)",
+ len(req.Validators), len(res.Validators),
+ )
+}
+
+sort.Sort(abci.ValidatorUpdates(req.Validators))
+
+sort.Sort(abci.ValidatorUpdates(res.Validators))
+ for i := range res.Validators {
+ if !proto.Equal(&res.Validators[i], &req.Validators[i]) {
+ return nil, fmt.Errorf("genesisValidators[%d] != req.Validators[%d] ", i, i)
+}
+
+}
+
+}
+
+ // In the case of a new chain, AppHash will be the hash of an empty string.
+ // During an upgrade, it'll be the hash of the last committed block.
+ var appHash []byte
+ if !app.LastCommitID().IsZero() {
+ appHash = app.LastCommitID().Hash
+}
+
+else {
+ // $ echo -n '' | sha256sum
+ // e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855
+ emptyHash := sha256.Sum256([]byte{
+})
+
+appHash = emptyHash[:]
+}
+
+ // NOTE: We don't commit, but FinalizeBlock for block InitialHeight starts from
+ // this FinalizeBlockState.
+ return &abci.ResponseInitChain{
+ ConsensusParams: res.ConsensusParams,
+ Validators: res.Validators,
+ AppHash: appHash,
+}, nil
+}
+
+func (app *BaseApp)
+
+Info(req *abci.RequestInfo) (*abci.ResponseInfo, error) {
+ lastCommitID := app.cms.LastCommitID()
+
+return &abci.ResponseInfo{
+ Data: app.name,
+ Version: app.version,
+ AppVersion: app.appVersion,
+ LastBlockHeight: lastCommitID.Version,
+ LastBlockAppHash: lastCommitID.Hash,
+}, nil
+}
+
+// Query implements the ABCI interface. It delegates to CommitMultiStore if it
+// implements Queryable.
+func (app *BaseApp)
+
+Query(_ context.Context, req *abci.RequestQuery) (resp *abci.ResponseQuery, err error) {
+ // add panic recovery for all queries
+ //
+ // Ref: https://github.com/cosmos/cosmos-sdk/pull/8039
+ defer func() {
+ if r := recover(); r != nil {
+ resp = sdkerrors.QueryResult(errorsmod.Wrapf(sdkerrors.ErrPanic, "%v", r), app.trace)
+}
+
+}()
+
+ // when a client did not provide a query height, manually inject the latest
+ if req.Height == 0 {
+ req.Height = app.LastBlockHeight()
+}
+
+telemetry.IncrCounter(1, "query", "count")
+
+telemetry.IncrCounter(1, "query", req.Path)
+
+defer telemetry.MeasureSince(time.Now(), req.Path)
+ if req.Path == QueryPathBroadcastTx {
+ return sdkerrors.QueryResult(errorsmod.Wrap(sdkerrors.ErrInvalidRequest, "can't route a broadcast tx message"), app.trace), nil
+}
+
+ // handle gRPC routes first rather than calling splitPath because '/' characters
+ // are used as part of gRPC paths
+ if grpcHandler := app.grpcQueryRouter.Route(req.Path); grpcHandler != nil {
+ return app.handleQueryGRPC(grpcHandler, req), nil
+}
+ path := SplitABCIQueryPath(req.Path)
+ if len(path) == 0 {
+ return sdkerrors.QueryResult(errorsmod.Wrap(sdkerrors.ErrUnknownRequest, "no query path provided"), app.trace), nil
+}
+ switch path[0] {
+ case QueryPathApp:
+ // "/app" prefix for special application queries
+ resp = handleQueryApp(app, path, req)
+ case QueryPathStore:
+ resp = handleQueryStore(app, path, *req)
+ case QueryPathP2P:
+ resp = handleQueryP2P(app, path)
+
+default:
+ resp = sdkerrors.QueryResult(errorsmod.Wrap(sdkerrors.ErrUnknownRequest, "unknown query path"), app.trace)
+}
+
+return resp, nil
+}
+
+// ListSnapshots implements the ABCI interface. It delegates to app.snapshotManager if set.
+func (app *BaseApp)
+
+ListSnapshots(req *abci.RequestListSnapshots) (*abci.ResponseListSnapshots, error) {
+ resp := &abci.ResponseListSnapshots{
+ Snapshots: []*abci.Snapshot{
+}}
+ if app.snapshotManager == nil {
+ return resp, nil
+}
+
+snapshots, err := app.snapshotManager.List()
+ if err != nil {
+ app.logger.Error("failed to list snapshots", "err", err)
+
+return nil, err
+}
+ for _, snapshot := range snapshots {
+ abciSnapshot, err := snapshot.ToABCI()
+ if err != nil {
+ app.logger.Error("failed to convert ABCI snapshots", "err", err)
+
+return nil, err
+}
+
+resp.Snapshots = append(resp.Snapshots, &abciSnapshot)
+}
+
+return resp, nil
+}
+
+// LoadSnapshotChunk implements the ABCI interface. It delegates to app.snapshotManager if set.
+func (app *BaseApp)
+
+LoadSnapshotChunk(req *abci.RequestLoadSnapshotChunk) (*abci.ResponseLoadSnapshotChunk, error) {
+ if app.snapshotManager == nil {
+ return &abci.ResponseLoadSnapshotChunk{
+}, nil
+}
+
+chunk, err := app.snapshotManager.LoadChunk(req.Height, req.Format, req.Chunk)
+ if err != nil {
+ app.logger.Error(
+ "failed to load snapshot chunk",
+ "height", req.Height,
+ "format", req.Format,
+ "chunk", req.Chunk,
+ "err", err,
+ )
+
+return nil, err
+}
+
+return &abci.ResponseLoadSnapshotChunk{
+ Chunk: chunk
+}, nil
+}
+
+// OfferSnapshot implements the ABCI interface. It delegates to app.snapshotManager if set.
+func (app *BaseApp)
+
+OfferSnapshot(req *abci.RequestOfferSnapshot) (*abci.ResponseOfferSnapshot, error) {
+ if app.snapshotManager == nil {
+ app.logger.Error("snapshot manager not configured")
+
+return &abci.ResponseOfferSnapshot{
+ Result: abci.ResponseOfferSnapshot_ABORT
+}, nil
+}
+ if req.Snapshot == nil {
+ app.logger.Error("received nil snapshot")
+
+return &abci.ResponseOfferSnapshot{
+ Result: abci.ResponseOfferSnapshot_REJECT
+}, nil
+}
+
+snapshot, err := snapshottypes.SnapshotFromABCI(req.Snapshot)
+ if err != nil {
+ app.logger.Error("failed to decode snapshot metadata", "err", err)
+
+return &abci.ResponseOfferSnapshot{
+ Result: abci.ResponseOfferSnapshot_REJECT
+}, nil
+}
+
+err = app.snapshotManager.Restore(snapshot)
+ switch {
+ case err == nil:
+ return &abci.ResponseOfferSnapshot{
+ Result: abci.ResponseOfferSnapshot_ACCEPT
+}, nil
+ case errors.Is(err, snapshottypes.ErrUnknownFormat):
+ return &abci.ResponseOfferSnapshot{
+ Result: abci.ResponseOfferSnapshot_REJECT_FORMAT
+}, nil
+ case errors.Is(err, snapshottypes.ErrInvalidMetadata):
+ app.logger.Error(
+ "rejecting invalid snapshot",
+ "height", req.Snapshot.Height,
+ "format", req.Snapshot.Format,
+ "err", err,
+ )
+
+return &abci.ResponseOfferSnapshot{
+ Result: abci.ResponseOfferSnapshot_REJECT
+}, nil
+
+ default:
+ app.logger.Error(
+ "failed to restore snapshot",
+ "height", req.Snapshot.Height,
+ "format", req.Snapshot.Format,
+ "err", err,
+ )
+
+ // We currently don't support resetting the IAVL stores and retrying a
+ // different snapshot, so we ask CometBFT to abort all snapshot restoration.
+ return &abci.ResponseOfferSnapshot{
+ Result: abci.ResponseOfferSnapshot_ABORT
+}, nil
+}
+}
+
+// ApplySnapshotChunk implements the ABCI interface. It delegates to app.snapshotManager if set.
+func (app *BaseApp)
+
+ApplySnapshotChunk(req *abci.RequestApplySnapshotChunk) (*abci.ResponseApplySnapshotChunk, error) {
+ if app.snapshotManager == nil {
+ app.logger.Error("snapshot manager not configured")
+
+return &abci.ResponseApplySnapshotChunk{
+ Result: abci.ResponseApplySnapshotChunk_ABORT
+}, nil
+}
+
+ _, err := app.snapshotManager.RestoreChunk(req.Chunk)
+ switch {
+ case err == nil:
+ return &abci.ResponseApplySnapshotChunk{
+ Result: abci.ResponseApplySnapshotChunk_ACCEPT
+}, nil
+ case errors.Is(err, snapshottypes.ErrChunkHashMismatch):
+ app.logger.Error(
+ "chunk checksum mismatch; rejecting sender and requesting refetch",
+ "chunk", req.Index,
+ "sender", req.Sender,
+ "err", err,
+ )
+
+return &abci.ResponseApplySnapshotChunk{
+ Result: abci.ResponseApplySnapshotChunk_RETRY,
+ RefetchChunks: []uint32{
+ req.Index
+},
+ RejectSenders: []string{
+ req.Sender
+},
+}, nil
+
+ default:
+ app.logger.Error("failed to restore snapshot", "err", err)
+
+return &abci.ResponseApplySnapshotChunk{
+ Result: abci.ResponseApplySnapshotChunk_ABORT
+}, nil
+}
+}
+
+// CheckTx implements the ABCI interface and executes a tx in CheckTx mode. In
+// CheckTx mode, messages are not executed. This means messages are only validated
+// and only the AnteHandler is executed. State is persisted to the BaseApp's
+// internal CheckTx state if the AnteHandler passes. Otherwise, the ResponseCheckTx
+// will contain relevant error information. Regardless of tx execution outcome,
+// the ResponseCheckTx will contain relevant gas execution context.
+func (app *BaseApp)
+
+CheckTx(req *abci.RequestCheckTx) (*abci.ResponseCheckTx, error) {
+ var mode execMode
+ switch {
+ case req.Type == abci.CheckTxType_New:
+ mode = execModeCheck
+ case req.Type == abci.CheckTxType_Recheck:
+ mode = execModeReCheck
+
+ default:
+ return nil, fmt.Errorf("unknown RequestCheckTx type: %s", req.Type)
+}
+
+gInfo, result, anteEvents, err := app.runTx(mode, req.Tx)
+ if err != nil {
+ return sdkerrors.ResponseCheckTxWithEvents(err, gInfo.GasWanted, gInfo.GasUsed, anteEvents, app.trace), nil
+}
+
+return &abci.ResponseCheckTx{
+ GasWanted: int64(gInfo.GasWanted), // TODO: Should type accept unsigned ints?
+ GasUsed: int64(gInfo.GasUsed), // TODO: Should type accept unsigned ints?
+ Log: result.Log,
+ Data: result.Data,
+ Events: sdk.MarkEventsToIndex(result.Events, app.indexEvents),
+}, nil
+}
+
+// PrepareProposal implements the PrepareProposal ABCI method and returns a
+// ResponsePrepareProposal object to the client. The PrepareProposal method is
+// responsible for allowing the block proposer to perform application-dependent
+// work in a block before proposing it.
+//
+// Transactions can be modified, removed, or added by the application. Since the
+// application maintains its own local mempool, it will ignore the transactions
+// provided to it in RequestPrepareProposal. Instead, it will determine which
+// transactions to return based on the mempool's semantics and the MaxTxBytes
+// provided by the client's request.
+//
+// Ref: https://github.com/cosmos/cosmos-sdk/blob/main/docs/architecture/adr-060-abci-1.0.md
+// Ref: https://github.com/cometbft/cometbft/blob/main/spec/abci/abci%2B%2B_basic_concepts.md
+func (app *BaseApp)
+
+PrepareProposal(req *abci.RequestPrepareProposal) (resp *abci.ResponsePrepareProposal, err error) {
+ if app.prepareProposal == nil {
+ return nil, errors.New("PrepareProposal handler not set")
+}
+
+ // Always reset state given that PrepareProposal can timeout and be called
+ // again in a subsequent round.
+ header := cmtproto.Header{
+ ChainID: app.chainID,
+ Height: req.Height,
+ Time: req.Time,
+ ProposerAddress: req.ProposerAddress,
+ NextValidatorsHash: req.NextValidatorsHash,
+}
+
+app.setState(execModePrepareProposal, header)
+
+ // CometBFT must never call PrepareProposal with a height of 0.
+ //
+ // Ref: https://github.com/cometbft/cometbft/blob/059798a4f5b0c9f52aa8655fa619054a0154088c/spec/core/state.md?plain=1#L37-L38
+ if req.Height < 1 {
+ return nil, errors.New("PrepareProposal called with invalid height")
+}
+
+app.prepareProposalState.ctx = app.getContextForProposal(app.prepareProposalState.ctx, req.Height).
+ WithVoteInfos(toVoteInfo(req.LocalLastCommit.Votes)). // this is a set of votes that are not finalized yet, wait for commit
+ WithBlockHeight(req.Height).
+ WithBlockTime(req.Time).
+ WithProposer(req.ProposerAddress).
+ WithExecMode(sdk.ExecModePrepareProposal).
+ WithCometInfo(prepareProposalInfo{
+ req
+})
+
+app.prepareProposalState.ctx = app.prepareProposalState.ctx.
+ WithConsensusParams(app.GetConsensusParams(app.prepareProposalState.ctx)).
+ WithBlockGasMeter(app.getBlockGasMeter(app.prepareProposalState.ctx))
+
+defer func() {
+ if err := recover(); err != nil {
+ app.logger.Error(
+ "panic recovered in PrepareProposal",
+ "height", req.Height,
+ "time", req.Time,
+ "panic", err,
+ )
+
+resp = &abci.ResponsePrepareProposal{
+}
+
+}
+
+}()
+
+resp, err = app.prepareProposal(app.prepareProposalState.ctx, req)
+ if err != nil {
+ app.logger.Error("failed to prepare proposal", "height", req.Height, "error", err)
+
+return &abci.ResponsePrepareProposal{
+}, nil
+}
+
+return resp, nil
+}
+
+// ProcessProposal implements the ProcessProposal ABCI method and returns a
+// ResponseProcessProposal object to the client. The ProcessProposal method is
+// responsible for allowing execution of application-dependent work in a proposed
+// block. Note, the application defines the exact implementation details of
+// ProcessProposal. In general, the application must at the very least ensure
+// that all transactions are valid. If all transactions are valid, then we inform
+// CometBFT that the Status is ACCEPT. However, the application is also able
+// to implement optimizations such as executing the entire proposed block
+// immediately.
+//
+// If a panic is detected during execution of an application's ProcessProposal
+// handler, it will be recovered and we will reject the proposal.
+//
+// Ref: https://github.com/cosmos/cosmos-sdk/blob/main/docs/architecture/adr-060-abci-1.0.md
+// Ref: https://github.com/cometbft/cometbft/blob/main/spec/abci/abci%2B%2B_basic_concepts.md
+func (app *BaseApp)
+
+ProcessProposal(req *abci.RequestProcessProposal) (resp *abci.ResponseProcessProposal, err error) {
+ if app.processProposal == nil {
+ return nil, errors.New("ProcessProposal handler not set")
+}
+
+ // CometBFT must never call ProcessProposal with a height of 0.
+ // Ref: https://github.com/cometbft/cometbft/blob/059798a4f5b0c9f52aa8655fa619054a0154088c/spec/core/state.md?plain=1#L37-L38
+ if req.Height < 1 {
+ return nil, errors.New("ProcessProposal called with invalid height")
+}
+
+ // Always reset state given that ProcessProposal can timeout and be called
+ // again in a subsequent round.
+ header := cmtproto.Header{
+ ChainID: app.chainID,
+ Height: req.Height,
+ Time: req.Time,
+ ProposerAddress: req.ProposerAddress,
+ NextValidatorsHash: req.NextValidatorsHash,
+}
+
+app.setState(execModeProcessProposal, header)
+
+ // Since the application can get access to FinalizeBlock state and write to it,
+ // we must be sure to reset it in case ProcessProposal timeouts and is called
+ // again in a subsequent round. However, we only want to do this after we've
+ // processed the first block, as we want to avoid overwriting the finalizeState
+ // after state changes during InitChain.
+ if req.Height > app.initialHeight {
+ app.setState(execModeFinalize, header)
+}
+
+app.processProposalState.ctx = app.getContextForProposal(app.processProposalState.ctx, req.Height).
+ WithVoteInfos(req.ProposedLastCommit.Votes). // this is a set of votes that are not finalized yet, wait for commit
+ WithBlockHeight(req.Height).
+ WithBlockTime(req.Time).
+ WithHeaderHash(req.Hash).
+ WithProposer(req.ProposerAddress).
+ WithCometInfo(cometInfo{
+ ProposerAddress: req.ProposerAddress,
+ ValidatorsHash: req.NextValidatorsHash,
+ Misbehavior: req.Misbehavior,
+ LastCommit: req.ProposedLastCommit
+}).
+ WithExecMode(sdk.ExecModeProcessProposal)
+
+app.processProposalState.ctx = app.processProposalState.ctx.
+ WithConsensusParams(app.GetConsensusParams(app.processProposalState.ctx)).
+ WithBlockGasMeter(app.getBlockGasMeter(app.processProposalState.ctx))
+
+defer func() {
+ if err := recover(); err != nil {
+ app.logger.Error(
+ "panic recovered in ProcessProposal",
+ "height", req.Height,
+ "time", req.Time,
+ "hash", fmt.Sprintf("%X", req.Hash),
+ "panic", err,
+ )
+
+resp = &abci.ResponseProcessProposal{
+ Status: abci.ResponseProcessProposal_REJECT
+}
+
+}
+
+}()
+
+resp, err = app.processProposal(app.processProposalState.ctx, req)
+ if err != nil {
+ app.logger.Error("failed to process proposal", "height", req.Height, "error", err)
+
+return &abci.ResponseProcessProposal{
+ Status: abci.ResponseProcessProposal_REJECT
+}, nil
+}
+
+return resp, nil
+}
+
+// ExtendVote implements the ExtendVote ABCI method and returns a ResponseExtendVote.
+// It calls the application's ExtendVote handler which is responsible for performing
+// application-specific business logic when sending a pre-commit for the NEXT
+// block height. The extensions response may be non-deterministic but must always
+// be returned, even if empty.
+//
+// Agreed upon vote extensions are made available to the proposer of the next
+// height and are committed in the subsequent height, i.e. H+2. An error is
+// returned if vote extensions are not enabled or if extendVote fails or panics.
+func (app *BaseApp)
+
+ExtendVote(_ context.Context, req *abci.RequestExtendVote) (resp *abci.ResponseExtendVote, err error) {
+ // Always reset state given that ExtendVote and VerifyVoteExtension can timeout
+ // and be called again in a subsequent round.
+ emptyHeader := cmtproto.Header{
+ ChainID: app.chainID,
+ Height: req.Height
+}
+
+app.setState(execModeVoteExtension, emptyHeader)
+ if app.extendVote == nil {
+ return nil, errors.New("application ExtendVote handler not set")
+}
+
+ // If vote extensions are not enabled, as a safety precaution, we return an
+ // error.
+ cp := app.GetConsensusParams(app.voteExtensionState.ctx)
+ if cp.Abci != nil && cp.Abci.VoteExtensionsEnableHeight <= 0 {
+ return nil, fmt.Errorf("vote extensions are not enabled; unexpected call to ExtendVote at height %d", req.Height)
+}
+
+app.voteExtensionState.ctx = app.voteExtensionState.ctx.
+ WithConsensusParams(cp).
+ WithBlockGasMeter(storetypes.NewInfiniteGasMeter()).
+ WithBlockHeight(req.Height).
+ WithHeaderHash(req.Hash).
+ WithExecMode(sdk.ExecModeVoteExtension)
+
+ // add a deferred recover handler in case extendVote panics
+ defer func() {
+ if r := recover(); r != nil {
+ app.logger.Error(
+ "panic recovered in ExtendVote",
+ "height", req.Height,
+ "hash", fmt.Sprintf("%X", req.Hash),
+ "panic", err,
+ )
+
+err = fmt.Errorf("recovered application panic in ExtendVote: %v", r)
+}
+
+}()
+
+resp, err = app.extendVote(app.voteExtensionState.ctx, req)
+ if err != nil {
+ app.logger.Error("failed to extend vote", "height", req.Height, "error", err)
+
+return &abci.ResponseExtendVote{
+ VoteExtension: []byte{
+}}, nil
+}
+
+return resp, err
+}
+
+// VerifyVoteExtension implements the VerifyVoteExtension ABCI method and returns
+// a ResponseVerifyVoteExtension. It calls the applications' VerifyVoteExtension
+// handler which is responsible for performing application-specific business
+// logic in verifying a vote extension from another validator during the pre-commit
+// phase. The response MUST be deterministic. An error is returned if vote
+// extensions are not enabled or if verifyVoteExt fails or panics.
+func (app *BaseApp)
+
+VerifyVoteExtension(req *abci.RequestVerifyVoteExtension) (resp *abci.ResponseVerifyVoteExtension, err error) {
+ if app.verifyVoteExt == nil {
+ return nil, errors.New("application VerifyVoteExtension handler not set")
+}
+
+ // If vote extensions are not enabled, as a safety precaution, we return an
+ // error.
+ cp := app.GetConsensusParams(app.voteExtensionState.ctx)
+ if cp.Abci != nil && cp.Abci.VoteExtensionsEnableHeight <= 0 {
+ return nil, fmt.Errorf("vote extensions are not enabled; unexpected call to VerifyVoteExtension at height %d", req.Height)
+}
+
+ // add a deferred recover handler in case verifyVoteExt panics
+ defer func() {
+ if r := recover(); r != nil {
+ app.logger.Error(
+ "panic recovered in VerifyVoteExtension",
+ "height", req.Height,
+ "hash", fmt.Sprintf("%X", req.Hash),
+ "validator", fmt.Sprintf("%X", req.ValidatorAddress),
+ "panic", r,
+ )
+
+err = fmt.Errorf("recovered application panic in VerifyVoteExtension: %v", r)
+}
+
+}()
+
+resp, err = app.verifyVoteExt(app.voteExtensionState.ctx, req)
+ if err != nil {
+ app.logger.Error("failed to verify vote extension", "height", req.Height, "error", err)
+
+return &abci.ResponseVerifyVoteExtension{
+ Status: abci.ResponseVerifyVoteExtension_REJECT
+}, nil
+}
+
+return resp, err
+}
+
+// FinalizeBlock will execute the block proposal provided by RequestFinalizeBlock.
+// Specifically, it will execute an application's BeginBlock (if defined), followed
+// by the transactions in the proposal, finally followed by the application's
+// EndBlock (if defined).
+//
+// For each raw transaction, i.e. a byte slice, BaseApp will only execute it if
+// it adheres to the sdk.Tx interface. Otherwise, the raw transaction will be
+// skipped. This is to support compatibility with proposers injecting vote
+// extensions into the proposal, which should not themselves be executed in cases
+// where they adhere to the sdk.Tx interface.
+func (app *BaseApp)
+
+FinalizeBlock(req *abci.RequestFinalizeBlock) (*abci.ResponseFinalizeBlock, error) {
+ var events []abci.Event
+ if err := app.validateFinalizeBlockHeight(req); err != nil {
+ return nil, err
+}
+ if app.cms.TracingEnabled() {
+ app.cms.SetTracingContext(storetypes.TraceContext(
+ map[string]any{"blockHeight": req.Height
+},
+ ))
+}
+ header := cmtproto.Header{
+ ChainID: app.chainID,
+ Height: req.Height,
+ Time: req.Time,
+ ProposerAddress: req.ProposerAddress,
+ NextValidatorsHash: req.NextValidatorsHash,
+}
+
+ // Initialize the FinalizeBlock state. If this is the first block, it should
+ // already be initialized in InitChain. Otherwise app.finalizeBlockState will be
+ // nil, since it is reset on Commit.
+ if app.finalizeBlockState == nil {
+ app.setState(execModeFinalize, header)
+}
+
+else {
+ // In the first block, app.finalizeBlockState.ctx will already be initialized
+ // by InitChain. Context is now updated with Header information.
+ app.finalizeBlockState.ctx = app.finalizeBlockState.ctx.
+ WithBlockHeader(header).
+ WithBlockHeight(req.Height)
+}
+ gasMeter := app.getBlockGasMeter(app.finalizeBlockState.ctx)
+
+app.finalizeBlockState.ctx = app.finalizeBlockState.ctx.
+ WithBlockGasMeter(gasMeter).
+ WithHeaderHash(req.Hash).
+ WithConsensusParams(app.GetConsensusParams(app.finalizeBlockState.ctx)).
+ WithVoteInfos(req.DecidedLastCommit.Votes).
+ WithExecMode(sdk.ExecModeFinalize)
+ if app.checkState != nil {
+ app.checkState.ctx = app.checkState.ctx.
+ WithBlockGasMeter(gasMeter).
+ WithHeaderHash(req.Hash)
+}
+ beginBlock := app.beginBlock(req)
+
+events = append(events, beginBlock.Events...)
+
+ // Iterate over all raw transactions in the proposal and attempt to execute
+ // them, gathering the execution results.
+ //
+ // NOTE: Not all raw transactions may adhere to the sdk.Tx interface, e.g.
+ // vote extensions, so skip those.
+ txResults := make([]*abci.ExecTxResult, 0, len(req.Txs))
+ for _, rawTx := range req.Txs {
+ if _, err := app.txDecoder(rawTx); err == nil {
+ txResults = append(txResults, app.deliverTx(rawTx))
+}
+
+}
+ if app.finalizeBlockState.ms.TracingEnabled() {
+ app.finalizeBlockState.ms = app.finalizeBlockState.ms.SetTracingContext(nil).(storetypes.CacheMultiStore)
+}
+
+endBlock, err := app.endBlock(app.finalizeBlockState.ctx)
+ if err != nil {
+ return nil, err
+}
+
+events = append(events, endBlock.Events...)
+ cp := app.GetConsensusParams(app.finalizeBlockState.ctx)
+
+return &abci.ResponseFinalizeBlock{
+ Events: events,
+ TxResults: txResults,
+ ValidatorUpdates: endBlock.ValidatorUpdates,
+ ConsensusParamUpdates: &cp,
+ AppHash: app.workingHash(),
+}, nil
+}
+
+// Commit implements the ABCI interface. It will commit all state that exists in
+// the deliver state's multi-store and includes the resulting commit ID in the
+// returned abci.ResponseCommit. Commit will set the check state based on the
+// latest header and reset the deliver state. Also, if a non-zero halt height is
+// defined in config, Commit will execute a deferred function call to check
+// against that height and gracefully halt if it matches the latest committed
+// height.
+func (app *BaseApp)
+
+Commit() (*abci.ResponseCommit, error) {
+ header := app.finalizeBlockState.ctx.BlockHeader()
+ retainHeight := app.GetBlockRetentionHeight(header.Height)
+ if app.precommiter != nil {
+ app.precommiter(app.finalizeBlockState.ctx)
+}
+
+rms, ok := app.cms.(*rootmulti.Store)
+ if ok {
+ rms.SetCommitHeader(header)
+}
+
+app.cms.Commit()
+ resp := &abci.ResponseCommit{
+ RetainHeight: retainHeight,
+}
+ abciListeners := app.streamingManager.ABCIListeners
+ if len(abciListeners) > 0 {
+ ctx := app.finalizeBlockState.ctx
+ blockHeight := ctx.BlockHeight()
+ changeSet := app.cms.PopStateCache()
+ for _, abciListener := range abciListeners {
+ if err := abciListener.ListenCommit(ctx, *resp, changeSet); err != nil {
+ app.logger.Error("Commit listening hook failed", "height", blockHeight, "err", err)
+}
+
+}
+
+}
+
+ // Reset the CheckTx state to the latest committed.
+ //
+ // NOTE: This is safe because CometBFT holds a lock on the mempool for
+ // Commit. Use the header from this latest block.
+ app.setState(execModeCheck, header)
+
+app.finalizeBlockState = nil
+ if app.prepareCheckStater != nil {
+ app.prepareCheckStater(app.checkState.ctx)
+}
+
+var halt bool
+ switch {
+ case app.haltHeight > 0 && uint64(header.Height) >= app.haltHeight:
+ halt = true
+ case app.haltTime > 0 && header.Time.Unix() >= int64(app.haltTime):
+ halt = true
+}
+ if halt {
+ // Halt the binary and allow CometBFT to receive the ResponseCommit
+ // response with the commit ID hash. This will allow the node to successfully
+ // restart and process blocks assuming the halt configuration has been
+ // reset or moved to a more distant value.
+ app.halt()
+}
+
+go app.snapshotManager.SnapshotIfApplicable(header.Height)
+
+return resp, nil
+}
+
+// workingHash gets the apphash that will be finalized in commit.
+// These writes will be persisted to the root multi-store (app.cms)
+
+and flushed to
+// disk in the Commit phase. This means when the ABCI client requests Commit(), the application
+// state transitions will be flushed to disk and as a result, but we already have
+// an application Merkle root.
+func (app *BaseApp)
+
+workingHash() []byte {
+ // Write the FinalizeBlock state into branched storage and commit the MultiStore.
+ // The write to the FinalizeBlock state writes all state transitions to the root
+ // MultiStore (app.cms)
+
+so when Commit()
+
+is called it persists those values.
+ app.finalizeBlockState.ms.Write()
+
+ // Get the hash of all writes in order to return the apphash to the comet in finalizeBlock.
+ commitHash := app.cms.WorkingHash()
+
+app.logger.Debug("hash of all writes", "workingHash", fmt.Sprintf("%X", commitHash))
+
+return commitHash
+}
+
+// halt attempts to gracefully shutdown the node via SIGINT and SIGTERM falling
+// back on os.Exit if both fail.
+func (app *BaseApp)
+
+halt() {
+ app.logger.Info("halting node per configuration", "height", app.haltHeight, "time", app.haltTime)
+
+p, err := os.FindProcess(os.Getpid())
+ if err == nil {
+ // attempt cascading signals in case SIGINT fails (os dependent)
+ sigIntErr := p.Signal(syscall.SIGINT)
+ sigTermErr := p.Signal(syscall.SIGTERM)
+ if sigIntErr == nil || sigTermErr == nil {
+ return
+}
+
+}
+
+ // Resort to exiting immediately if the process could not be found or killed
+ // via SIGINT/SIGTERM signals.
+ app.logger.Info("failed to send SIGINT/SIGTERM; exiting...")
+
+os.Exit(0)
+}
+
+func handleQueryApp(app *BaseApp, path []string, req *abci.RequestQuery) *abci.ResponseQuery {
+ if len(path) >= 2 {
+ switch path[1] {
+ case "simulate":
+ txBytes := req.Data
+
+ gInfo, res, err := app.Simulate(txBytes)
+ if err != nil {
+ return sdkerrors.QueryResult(errorsmod.Wrap(err, "failed to simulate tx"), app.trace)
+}
+ simRes := &sdk.SimulationResponse{
+ GasInfo: gInfo,
+ Result: res,
+}
+
+bz, err := codec.ProtoMarshalJSON(simRes, app.interfaceRegistry)
+ if err != nil {
+ return sdkerrors.QueryResult(errorsmod.Wrap(err, "failed to JSON encode simulation response"), app.trace)
+}
+
+return &abci.ResponseQuery{
+ Codespace: sdkerrors.RootCodespace,
+ Height: req.Height,
+ Value: bz,
+}
+ case "version":
+ return &abci.ResponseQuery{
+ Codespace: sdkerrors.RootCodespace,
+ Height: req.Height,
+ Value: []byte(app.version),
+}
+
+default:
+ return sdkerrors.QueryResult(errorsmod.Wrapf(sdkerrors.ErrUnknownRequest, "unknown query: %s", path), app.trace)
+}
+
+}
+
+return sdkerrors.QueryResult(
+ errorsmod.Wrap(
+ sdkerrors.ErrUnknownRequest,
+ "expected second parameter to be either 'simulate' or 'version', neither was present",
+ ), app.trace)
+}
+
+func handleQueryStore(app *BaseApp, path []string, req abci.RequestQuery) *abci.ResponseQuery {
+ // "/store" prefix for store queries
+ queryable, ok := app.cms.(storetypes.Queryable)
+ if !ok {
+ return sdkerrors.QueryResult(errorsmod.Wrap(sdkerrors.ErrUnknownRequest, "multi-store does not support queries"), app.trace)
+}
+
+req.Path = "/" + strings.Join(path[1:], "/")
+ if req.Height <= 1 && req.Prove {
+ return sdkerrors.QueryResult(
+ errorsmod.Wrap(
+ sdkerrors.ErrInvalidRequest,
+ "cannot query with proof when height <= 1; please provide a valid height",
+ ), app.trace)
+}
+ sdkReq := storetypes.RequestQuery(req)
+
+resp, err := queryable.Query(&sdkReq)
+ if err != nil {
+ return sdkerrors.QueryResult(err, app.trace)
+}
+
+resp.Height = req.Height
+ abciResp := abci.ResponseQuery(*resp)
+
+return &abciResp
+}
+
+func handleQueryP2P(app *BaseApp, path []string) *abci.ResponseQuery {
+ // "/p2p" prefix for p2p queries
+ if len(path) < 4 {
+ return sdkerrors.QueryResult(errorsmod.Wrap(sdkerrors.ErrUnknownRequest, "path should be p2p filter "), app.trace)
+}
+
+var resp *abci.ResponseQuery
+
+ cmd, typ, arg := path[1], path[2], path[3]
+ switch cmd {
+ case "filter":
+ switch typ {
+ case "addr":
+ resp = app.FilterPeerByAddrPort(arg)
+ case "id":
+ resp = app.FilterPeerByID(arg)
+}
+
+default:
+ resp = sdkerrors.QueryResult(errorsmod.Wrap(sdkerrors.ErrUnknownRequest, "expected second parameter to be 'filter'"), app.trace)
+}
+
+return resp
+}
+
+// SplitABCIQueryPath splits a string path using the delimiter '/'.
+//
+// e.g. "this/is/funny" becomes []string{"this", "is", "funny"
+}
+
+func SplitABCIQueryPath(requestPath string) (path []string) {
+ path = strings.Split(requestPath, "/")
+
+ // first element is empty string
+ if len(path) > 0 && path[0] == "" {
+ path = path[1:]
+}
+
+return path
+}
+
+// FilterPeerByAddrPort filters peers by address/port.
+func (app *BaseApp)
+
+FilterPeerByAddrPort(info string) *abci.ResponseQuery {
+ if app.addrPeerFilter != nil {
+ return app.addrPeerFilter(info)
+}
+
+return &abci.ResponseQuery{
+}
+}
+
+// FilterPeerByID filters peers by node ID.
+func (app *BaseApp)
+
+FilterPeerByID(info string) *abci.ResponseQuery {
+ if app.idPeerFilter != nil {
+ return app.idPeerFilter(info)
+}
+
+return &abci.ResponseQuery{
+}
+}
+
+// getContextForProposal returns the correct Context for PrepareProposal and
+// ProcessProposal. We use finalizeBlockState on the first block to be able to
+// access any state changes made in InitChain.
+func (app *BaseApp)
+
+getContextForProposal(ctx sdk.Context, height int64)
+
+sdk.Context {
+ if height == app.initialHeight {
+ ctx, _ = app.finalizeBlockState.ctx.CacheContext()
+
+ // clear all context data set during InitChain to avoid inconsistent behavior
+ ctx = ctx.WithBlockHeader(cmtproto.Header{
+})
+
+return ctx
+}
+
+return ctx
+}
+
+func (app *BaseApp)
+
+handleQueryGRPC(handler GRPCQueryHandler, req *abci.RequestQuery) *abci.ResponseQuery {
+ ctx, err := app.CreateQueryContext(req.Height, req.Prove)
+ if err != nil {
+ return sdkerrors.QueryResult(err, app.trace)
+}
+
+resp, err := handler(ctx, req)
+ if err != nil {
+ resp = sdkerrors.QueryResult(gRPCErrorToSDKError(err), app.trace)
+
+resp.Height = req.Height
+ return resp
+}
+
+return resp
+}
+
+func gRPCErrorToSDKError(err error)
+
+error {
+ status, ok := grpcstatus.FromError(err)
+ if !ok {
+ return errorsmod.Wrap(sdkerrors.ErrInvalidRequest, err.Error())
+}
+ switch status.Code() {
+ case codes.NotFound:
+ return errorsmod.Wrap(sdkerrors.ErrKeyNotFound, err.Error())
+ case codes.InvalidArgument:
+ return errorsmod.Wrap(sdkerrors.ErrInvalidRequest, err.Error())
+ case codes.FailedPrecondition:
+ return errorsmod.Wrap(sdkerrors.ErrInvalidRequest, err.Error())
+ case codes.Unauthenticated:
+ return errorsmod.Wrap(sdkerrors.ErrUnauthorized, err.Error())
+
+default:
+ return errorsmod.Wrap(sdkerrors.ErrUnknownRequest, err.Error())
+}
+}
+
+func checkNegativeHeight(height int64)
+
+error {
+ if height < 0 {
+ return errorsmod.Wrap(sdkerrors.ErrInvalidRequest, "cannot query with height < 0; please provide a valid height")
+}
+
+return nil
+}
+
+// createQueryContext creates a new sdk.Context for a query, taking as args
+// the block height and whether the query needs a proof or not.
+func (app *BaseApp)
+
+CreateQueryContext(height int64, prove bool) (sdk.Context, error) {
+ if err := checkNegativeHeight(height); err != nil {
+ return sdk.Context{
+}, err
+}
+
+ // use custom query multi-store if provided
+ qms := app.qms
+ if qms == nil {
+ qms = app.cms.(storetypes.MultiStore)
+}
+ lastBlockHeight := qms.LatestVersion()
+ if lastBlockHeight == 0 {
+ return sdk.Context{
+}, errorsmod.Wrapf(sdkerrors.ErrInvalidHeight, "%s is not ready; please wait for first block", app.Name())
+}
+ if height > lastBlockHeight {
+ return sdk.Context{
+},
+ errorsmod.Wrap(
+ sdkerrors.ErrInvalidHeight,
+ "cannot query with height in the future; please provide a valid height",
+ )
+}
+
+ // when a client did not provide a query height, manually inject the latest
+ if height == 0 {
+ height = lastBlockHeight
+}
+ if height <= 1 && prove {
+ return sdk.Context{
+},
+ errorsmod.Wrap(
+ sdkerrors.ErrInvalidRequest,
+ "cannot query with proof when height <= 1; please provide a valid height",
+ )
+}
+
+cacheMS, err := qms.CacheMultiStoreWithVersion(height)
+ if err != nil {
+ return sdk.Context{
+},
+ errorsmod.Wrapf(
+ sdkerrors.ErrInvalidRequest,
+ "failed to load state at height %d; %s (latest height: %d)", height, err, lastBlockHeight,
+ )
+}
+
+ // branch the commit multi-store for safety
+ ctx := sdk.NewContext(cacheMS, app.checkState.ctx.BlockHeader(), true, app.logger).
+ WithMinGasPrices(app.minGasPrices).
+ WithBlockHeight(height)
+ if height != lastBlockHeight {
+ rms, ok := app.cms.(*rootmulti.Store)
+ if ok {
+ cInfo, err := rms.GetCommitInfo(height)
+ if cInfo != nil && err == nil {
+ ctx = ctx.WithBlockTime(cInfo.Timestamp)
+}
+
+}
+
+}
+
+return ctx, nil
+}
+
+// GetBlockRetentionHeight returns the height for which all blocks below this height
+// are pruned from CometBFT. Given a commitment height and a non-zero local
+// minRetainBlocks configuration, the retentionHeight is the smallest height that
+// satisfies:
+//
+// - Unbonding (safety threshold)
+
+time: The block interval in which validators
+// can be economically punished for misbehavior. Blocks in this interval must be
+// auditable e.g. by the light client.
+//
+// - Logical store snapshot interval: The block interval at which the underlying
+// logical store database is persisted to disk, e.g. every 10000 heights. Blocks
+// since the last IAVL snapshot must be available for replay on application restart.
+//
+// - State sync snapshots: Blocks since the oldest available snapshot must be
+// available for state sync nodes to catch up (oldest because a node may be
+// restoring an old snapshot while a new snapshot was taken).
+//
+// - Local (minRetainBlocks)
+
+config: Archive nodes may want to retain more or
+// all blocks, e.g. via a local config option min-retain-blocks. There may also
+// be a need to vary retention for other nodes, e.g. sentry nodes which do not
+// need historical blocks.
+func (app *BaseApp)
+
+GetBlockRetentionHeight(commitHeight int64)
+
+int64 {
+ // pruning is disabled if minRetainBlocks is zero
+ if app.minRetainBlocks == 0 {
+ return 0
+}
+ minNonZero := func(x, y int64)
+
+int64 {
+ switch {
+ case x == 0:
+ return y
+ case y == 0:
+ return x
+ case x < y:
+ return x
+
+ default:
+ return y
+}
+
+}
+
+ // Define retentionHeight as the minimum value that satisfies all non-zero
+ // constraints. All blocks below (commitHeight-retentionHeight)
+
+are pruned
+ // from CometBFT.
+ var retentionHeight int64
+
+ // Define the number of blocks needed to protect against misbehaving validators
+ // which allows light clients to operate safely. Note, we piggy back of the
+ // evidence parameters instead of computing an estimated number of blocks based
+ // on the unbonding period and block commitment time as the two should be
+ // equivalent.
+ cp := app.GetConsensusParams(app.finalizeBlockState.ctx)
+ if cp.Evidence != nil && cp.Evidence.MaxAgeNumBlocks > 0 {
+ retentionHeight = commitHeight - cp.Evidence.MaxAgeNumBlocks
+}
+ if app.snapshotManager != nil {
+ snapshotRetentionHeights := app.snapshotManager.GetSnapshotBlockRetentionHeights()
+ if snapshotRetentionHeights > 0 {
+ retentionHeight = minNonZero(retentionHeight, commitHeight-snapshotRetentionHeights)
+}
+
+}
+ v := commitHeight - int64(app.minRetainBlocks)
+
+retentionHeight = minNonZero(retentionHeight, v)
+ if retentionHeight <= 0 {
+ // prune nothing in the case of a non-positive height
+ return 0
+}
+
+return retentionHeight
+}
+
+// toVoteInfo converts the new ExtendedVoteInfo to VoteInfo.
+func toVoteInfo(votes []abci.ExtendedVoteInfo) []abci.VoteInfo {
+ legacyVotes := make([]abci.VoteInfo, len(votes))
+ for i, vote := range votes {
+ legacyVotes[i] = abci.VoteInfo{
+ Validator: abci.Validator{
+ Address: vote.Validator.Address,
+ Power: vote.Validator.Power,
+},
+ BlockIdFlag: vote.BlockIdFlag,
+}
+
+}
+
+return legacyVotes
+}
+```
diff --git a/sdk/next/build/building-modules/module-interfaces.mdx b/sdk/next/build/building-modules/module-interfaces.mdx
new file mode 100644
index 000000000..f3752777f
--- /dev/null
+++ b/sdk/next/build/building-modules/module-interfaces.mdx
@@ -0,0 +1,1084 @@
+---
+title: Module Interfaces
+---
+{/* force preview rebuild: 2025-09-30 */}
+
+
+**Synopsis**
+This document details how to build CLI and REST interfaces for a module. Examples from various Cosmos SDK modules are included.
+
+
+
+**Prerequisite Readings**
+
+* [Building Modules Intro](/sdk/v0.53/build/building-modules/intro)
+
+
+
+## CLI
+
+One of the main interfaces for an application is the [command-line interface](/sdk/v0.53/learn/advanced/cli). This entrypoint adds commands from the application's modules enabling end-users to create [**messages**](/sdk/v0.53/build/building-modules/messages-and-queries#messages) wrapped in transactions and [**queries**](/sdk/v0.53/build/building-modules/messages-and-queries#queries). The CLI files are typically found in the module's `./client/cli` folder.
+
+### Transaction Commands
+
+In order to create messages that trigger state changes, end-users must create [transactions](/sdk/v0.53/learn/advanced/transactions) that wrap and deliver the messages. A transaction command creates a transaction that includes one or more messages.
+
+Transaction commands typically have their own `tx.go` file that lives within the module's `./client/cli` folder. The commands are specified in getter functions and the name of the function should include the name of the command.
+
+Here is an example from the `x/bank` module:
+
+```go expandable
+package cli
+
+import (
+
+ "fmt"
+ "cosmossdk.io/core/address"
+ sdkmath "cosmossdk.io/math"
+ "github.com/spf13/cobra"
+ "github.com/cosmos/cosmos-sdk/client"
+ "github.com/cosmos/cosmos-sdk/client/flags"
+ "github.com/cosmos/cosmos-sdk/client/tx"
+ sdk "github.com/cosmos/cosmos-sdk/types"
+ "github.com/cosmos/cosmos-sdk/x/bank/types"
+)
+
+var FlagSplit = "split"
+
+// NewTxCmd returns a root CLI command handler for all x/bank transaction commands.
+func NewTxCmd(ac address.Codec) *cobra.Command {
+ txCmd := &cobra.Command{
+ Use: types.ModuleName,
+ Short: "Bank transaction subcommands",
+ DisableFlagParsing: true,
+ SuggestionsMinimumDistance: 2,
+ RunE: client.ValidateCmd,
+}
+
+txCmd.AddCommand(
+ NewSendTxCmd(ac),
+ NewMultiSendTxCmd(ac),
+ )
+
+return txCmd
+}
+
+// NewSendTxCmd returns a CLI command handler for creating a MsgSend transaction.
+func NewSendTxCmd(ac address.Codec) *cobra.Command {
+ cmd := &cobra.Command{
+ Use: "send [from_key_or_address] [to_address] [amount]",
+ Short: "Send funds from one account to another.",
+ Long: `Send funds from one account to another.
+Note, the '--from' flag is ignored as it is implied from [from_key_or_address].
+When using '--dry-run' a key name cannot be used, only a bech32 address.
+`,
+ Args: cobra.ExactArgs(3),
+ RunE: func(cmd *cobra.Command, args []string)
+
+error {
+ cmd.Flags().Set(flags.FlagFrom, args[0])
+
+clientCtx, err := client.GetClientTxContext(cmd)
+ if err != nil {
+ return err
+}
+
+toAddr, err := ac.StringToBytes(args[1])
+ if err != nil {
+ return err
+}
+
+coins, err := sdk.ParseCoinsNormalized(args[2])
+ if err != nil {
+ return err
+}
+ if len(coins) == 0 {
+ return fmt.Errorf("invalid coins")
+}
+ msg := types.NewMsgSend(clientCtx.GetFromAddress(), toAddr, coins)
+
+return tx.GenerateOrBroadcastTxCLI(clientCtx, cmd.Flags(), msg)
+},
+}
+
+flags.AddTxFlagsToCmd(cmd)
+
+return cmd
+}
+
+// NewMultiSendTxCmd returns a CLI command handler for creating a MsgMultiSend transaction.
+// For a better UX this command is limited to send funds from one account to two or more accounts.
+func NewMultiSendTxCmd(ac address.Codec) *cobra.Command {
+ cmd := &cobra.Command{
+ Use: "multi-send [from_key_or_address] [to_address_1, to_address_2, ...] [amount]",
+ Short: "Send funds from one account to two or more accounts.",
+ Long: `Send funds from one account to two or more accounts.
+By default, sends the [amount] to each address of the list.
+Using the '--split' flag, the [amount] is split equally between the addresses.
+Note, the '--from' flag is ignored as it is implied from [from_key_or_address].
+When using '--dry-run' a key name cannot be used, only a bech32 address.
+`,
+ Args: cobra.MinimumNArgs(4),
+ RunE: func(cmd *cobra.Command, args []string)
+
+error {
+ cmd.Flags().Set(flags.FlagFrom, args[0])
+
+clientCtx, err := client.GetClientTxContext(cmd)
+ if err != nil {
+ return err
+}
+
+coins, err := sdk.ParseCoinsNormalized(args[len(args)-1])
+ if err != nil {
+ return err
+}
+ if coins.IsZero() {
+ return fmt.Errorf("must send positive amount")
+}
+
+split, err := cmd.Flags().GetBool(FlagSplit)
+ if err != nil {
+ return err
+}
+ totalAddrs := sdkmath.NewInt(int64(len(args) - 2))
+ // coins to be received by the addresses
+ sendCoins := coins
+ if split {
+ sendCoins = coins.QuoInt(totalAddrs)
+}
+
+var output []types.Output
+ for _, arg := range args[1 : len(args)-1] {
+ toAddr, err := ac.StringToBytes(arg)
+ if err != nil {
+ return err
+}
+
+output = append(output, types.NewOutput(toAddr, sendCoins))
+}
+
+ // amount to be send from the from address
+ var amount sdk.Coins
+ if split {
+ // user input: 1000stake to send to 3 addresses
+ // actual: 333stake to each address (=> 999stake actually sent)
+
+amount = sendCoins.MulInt(totalAddrs)
+}
+
+else {
+ amount = coins.MulInt(totalAddrs)
+}
+ msg := types.NewMsgMultiSend(types.NewInput(clientCtx.FromAddress, amount), output)
+
+return tx.GenerateOrBroadcastTxCLI(clientCtx, cmd.Flags(), msg)
+},
+}
+
+cmd.Flags().Bool(FlagSplit, false, "Send the equally split token amount to each address")
+
+flags.AddTxFlagsToCmd(cmd)
+
+return cmd
+}
+```
+
+In the example, `NewSendTxCmd()` creates and returns the transaction command for a transaction that wraps and delivers `MsgSend`. `MsgSend` is the message used to send tokens from one account to another.
+
+In general, the getter function does the following:
+
+* **Constructs the command:** Read the [Cobra Documentation](https://pkg.go.dev/github.com/spf13/cobra) for more detailed information on how to create commands.
+ * **Use:** Specifies the format of the user input required to invoke the command. In the example above, `send` is the name of the transaction command and `[from_key_or_address]`, `[to_address]`, and `[amount]` are the arguments.
+ * **Args:** The number of arguments the user provides. In this case, there are exactly three: `[from_key_or_address]`, `[to_address]`, and `[amount]`.
+ * **Short and Long:** Descriptions for the command. A `Short` description is expected. A `Long` description can be used to provide additional information that is displayed when a user adds the `--help` flag.
+ * **RunE:** Defines a function that can return an error. This is the function that is called when the command is executed. This function encapsulates all of the logic to create a new transaction.
+ * The function typically starts by getting the `clientCtx`, which can be done with `client.GetClientTxContext(cmd)`. The `clientCtx` contains information relevant to transaction handling, including information about the user. In this example, the `clientCtx` is used to retrieve the address of the sender by calling `clientCtx.GetFromAddress()`.
+ * If applicable, the command's arguments are parsed. In this example, the arguments `[to_address]` and `[amount]` are both parsed.
+ * A [message](/sdk/v0.53/build/building-modules/messages-and-queries) is created using the parsed arguments and information from the `clientCtx`. The constructor function of the message type is called directly. In this case, `types.NewMsgSend(fromAddr, toAddr, amount)`. Its good practice to call, if possible, the necessary [message validation methods](/sdk/v0.53/build/building-modules/msg-services#Validation) before broadcasting the message.
+ * Depending on what the user wants, the transaction is either generated offline or signed and broadcasted to the preconfigured node using `tx.GenerateOrBroadcastTxCLI(clientCtx, flags, msg)`.
+* **Adds transaction flags:** All transaction commands must add a set of transaction [flags](#flags). The transaction flags are used to collect additional information from the user (e.g. the amount of fees the user is willing to pay). The transaction flags are added to the constructed command using `AddTxFlagsToCmd(cmd)`.
+* **Returns the command:** Finally, the transaction command is returned.
+
+Each module can implement `NewTxCmd()`, which aggregates all of the transaction commands of the module. Here is an example from the `x/bank` module:
+
+```go expandable
+package cli
+
+import (
+
+ "fmt"
+ "cosmossdk.io/core/address"
+ sdkmath "cosmossdk.io/math"
+ "github.com/spf13/cobra"
+ "github.com/cosmos/cosmos-sdk/client"
+ "github.com/cosmos/cosmos-sdk/client/flags"
+ "github.com/cosmos/cosmos-sdk/client/tx"
+ sdk "github.com/cosmos/cosmos-sdk/types"
+ "github.com/cosmos/cosmos-sdk/x/bank/types"
+)
+
+var FlagSplit = "split"
+
+// NewTxCmd returns a root CLI command handler for all x/bank transaction commands.
+func NewTxCmd(ac address.Codec) *cobra.Command {
+ txCmd := &cobra.Command{
+ Use: types.ModuleName,
+ Short: "Bank transaction subcommands",
+ DisableFlagParsing: true,
+ SuggestionsMinimumDistance: 2,
+ RunE: client.ValidateCmd,
+}
+
+txCmd.AddCommand(
+ NewSendTxCmd(ac),
+ NewMultiSendTxCmd(ac),
+ )
+
+return txCmd
+}
+
+// NewSendTxCmd returns a CLI command handler for creating a MsgSend transaction.
+func NewSendTxCmd(ac address.Codec) *cobra.Command {
+ cmd := &cobra.Command{
+ Use: "send [from_key_or_address] [to_address] [amount]",
+ Short: "Send funds from one account to another.",
+ Long: `Send funds from one account to another.
+Note, the '--from' flag is ignored as it is implied from [from_key_or_address].
+When using '--dry-run' a key name cannot be used, only a bech32 address.
+`,
+ Args: cobra.ExactArgs(3),
+ RunE: func(cmd *cobra.Command, args []string)
+
+error {
+ cmd.Flags().Set(flags.FlagFrom, args[0])
+
+clientCtx, err := client.GetClientTxContext(cmd)
+ if err != nil {
+ return err
+}
+
+toAddr, err := ac.StringToBytes(args[1])
+ if err != nil {
+ return err
+}
+
+coins, err := sdk.ParseCoinsNormalized(args[2])
+ if err != nil {
+ return err
+}
+ if len(coins) == 0 {
+ return fmt.Errorf("invalid coins")
+}
+ msg := types.NewMsgSend(clientCtx.GetFromAddress(), toAddr, coins)
+
+return tx.GenerateOrBroadcastTxCLI(clientCtx, cmd.Flags(), msg)
+},
+}
+
+flags.AddTxFlagsToCmd(cmd)
+
+return cmd
+}
+
+// NewMultiSendTxCmd returns a CLI command handler for creating a MsgMultiSend transaction.
+// For a better UX this command is limited to send funds from one account to two or more accounts.
+func NewMultiSendTxCmd(ac address.Codec) *cobra.Command {
+ cmd := &cobra.Command{
+ Use: "multi-send [from_key_or_address] [to_address_1, to_address_2, ...] [amount]",
+ Short: "Send funds from one account to two or more accounts.",
+ Long: `Send funds from one account to two or more accounts.
+By default, sends the [amount] to each address of the list.
+Using the '--split' flag, the [amount] is split equally between the addresses.
+Note, the '--from' flag is ignored as it is implied from [from_key_or_address].
+When using '--dry-run' a key name cannot be used, only a bech32 address.
+`,
+ Args: cobra.MinimumNArgs(4),
+ RunE: func(cmd *cobra.Command, args []string)
+
+error {
+ cmd.Flags().Set(flags.FlagFrom, args[0])
+
+clientCtx, err := client.GetClientTxContext(cmd)
+ if err != nil {
+ return err
+}
+
+coins, err := sdk.ParseCoinsNormalized(args[len(args)-1])
+ if err != nil {
+ return err
+}
+ if coins.IsZero() {
+ return fmt.Errorf("must send positive amount")
+}
+
+split, err := cmd.Flags().GetBool(FlagSplit)
+ if err != nil {
+ return err
+}
+ totalAddrs := sdkmath.NewInt(int64(len(args) - 2))
+ // coins to be received by the addresses
+ sendCoins := coins
+ if split {
+ sendCoins = coins.QuoInt(totalAddrs)
+}
+
+var output []types.Output
+ for _, arg := range args[1 : len(args)-1] {
+ toAddr, err := ac.StringToBytes(arg)
+ if err != nil {
+ return err
+}
+
+output = append(output, types.NewOutput(toAddr, sendCoins))
+}
+
+ // amount to be send from the from address
+ var amount sdk.Coins
+ if split {
+ // user input: 1000stake to send to 3 addresses
+ // actual: 333stake to each address (=> 999stake actually sent)
+
+amount = sendCoins.MulInt(totalAddrs)
+}
+
+else {
+ amount = coins.MulInt(totalAddrs)
+}
+ msg := types.NewMsgMultiSend(types.NewInput(clientCtx.FromAddress, amount), output)
+
+return tx.GenerateOrBroadcastTxCLI(clientCtx, cmd.Flags(), msg)
+},
+}
+
+cmd.Flags().Bool(FlagSplit, false, "Send the equally split token amount to each address")
+
+flags.AddTxFlagsToCmd(cmd)
+
+return cmd
+}
+```
+
+Each module then can also implement a `GetTxCmd()` method that simply returns `NewTxCmd()`. This allows the root command to easily aggregate all of the transaction commands for each module. Here is an example:
+
+```go expandable
+package bank
+
+import (
+
+ "context"
+ "encoding/json"
+ "fmt"
+ "time"
+
+ modulev1 "cosmossdk.io/api/cosmos/bank/module/v1"
+ "cosmossdk.io/core/address"
+ "cosmossdk.io/core/appmodule"
+ "cosmossdk.io/depinject"
+ "cosmossdk.io/log"
+ abci "github.com/cometbft/cometbft/abci/types"
+ gwruntime "github.com/grpc-ecosystem/grpc-gateway/runtime"
+ "github.com/spf13/cobra"
+
+ corestore "cosmossdk.io/core/store"
+ "github.com/cosmos/cosmos-sdk/client"
+ "github.com/cosmos/cosmos-sdk/codec"
+ codectypes "github.com/cosmos/cosmos-sdk/codec/types"
+ "github.com/cosmos/cosmos-sdk/telemetry"
+ sdk "github.com/cosmos/cosmos-sdk/types"
+ "github.com/cosmos/cosmos-sdk/types/module"
+ simtypes "github.com/cosmos/cosmos-sdk/types/simulation"
+ authtypes "github.com/cosmos/cosmos-sdk/x/auth/types"
+ "github.com/cosmos/cosmos-sdk/x/bank/client/cli"
+ "github.com/cosmos/cosmos-sdk/x/bank/exported"
+ "github.com/cosmos/cosmos-sdk/x/bank/keeper"
+ v1bank "github.com/cosmos/cosmos-sdk/x/bank/migrations/v1"
+ "github.com/cosmos/cosmos-sdk/x/bank/simulation"
+ "github.com/cosmos/cosmos-sdk/x/bank/types"
+ govtypes "github.com/cosmos/cosmos-sdk/x/gov/types"
+)
+
+// ConsensusVersion defines the current x/bank module consensus version.
+const ConsensusVersion = 4
+
+var (
+ _ module.AppModule = AppModule{
+}
+ _ module.AppModuleBasic = AppModuleBasic{
+}
+ _ module.AppModuleSimulation = AppModule{
+}
+)
+
+// AppModuleBasic defines the basic application module used by the bank module.
+type AppModuleBasic struct {
+ cdc codec.Codec
+ ac address.Codec
+}
+
+// Name returns the bank module's name.
+func (AppModuleBasic)
+
+Name()
+
+string {
+ return types.ModuleName
+}
+
+// RegisterLegacyAminoCodec registers the bank module's types on the LegacyAmino codec.
+func (AppModuleBasic)
+
+RegisterLegacyAminoCodec(cdc *codec.LegacyAmino) {
+ types.RegisterLegacyAminoCodec(cdc)
+}
+
+// DefaultGenesis returns default genesis state as raw bytes for the bank
+// module.
+func (AppModuleBasic)
+
+DefaultGenesis(cdc codec.JSONCodec)
+
+json.RawMessage {
+ return cdc.MustMarshalJSON(types.DefaultGenesisState())
+}
+
+// ValidateGenesis performs genesis state validation for the bank module.
+func (AppModuleBasic)
+
+ValidateGenesis(cdc codec.JSONCodec, _ client.TxEncodingConfig, bz json.RawMessage)
+
+error {
+ var data types.GenesisState
+ if err := cdc.UnmarshalJSON(bz, &data); err != nil {
+ return fmt.Errorf("failed to unmarshal %s genesis state: %w", types.ModuleName, err)
+}
+
+return data.Validate()
+}
+
+// RegisterGRPCGatewayRoutes registers the gRPC Gateway routes for the bank module.
+func (AppModuleBasic)
+
+RegisterGRPCGatewayRoutes(clientCtx client.Context, mux *gwruntime.ServeMux) {
+ if err := types.RegisterQueryHandlerClient(context.Background(), mux, types.NewQueryClient(clientCtx)); err != nil {
+ panic(err)
+}
+}
+
+// GetTxCmd returns the root tx command for the bank module.
+func (ab AppModuleBasic)
+
+GetTxCmd() *cobra.Command {
+ return cli.NewTxCmd(ab.ac)
+}
+
+// GetQueryCmd returns no root query command for the bank module.
+func (ab AppModuleBasic)
+
+GetQueryCmd() *cobra.Command {
+ return cli.GetQueryCmd(ab.ac)
+}
+
+// RegisterInterfaces registers interfaces and implementations of the bank module.
+func (AppModuleBasic)
+
+RegisterInterfaces(registry codectypes.InterfaceRegistry) {
+ types.RegisterInterfaces(registry)
+
+ // Register legacy interfaces for migration scripts.
+ v1bank.RegisterInterfaces(registry)
+}
+
+// AppModule implements an application module for the bank module.
+type AppModule struct {
+ AppModuleBasic
+
+ keeper keeper.Keeper
+ accountKeeper types.AccountKeeper
+
+ // legacySubspace is used solely for migration of x/params managed parameters
+ legacySubspace exported.Subspace
+}
+
+var _ appmodule.AppModule = AppModule{
+}
+
+// IsOnePerModuleType implements the depinject.OnePerModuleType interface.
+func (am AppModule)
+
+IsOnePerModuleType() {
+}
+
+// IsAppModule implements the appmodule.AppModule interface.
+func (am AppModule)
+
+IsAppModule() {
+}
+
+// RegisterServices registers module services.
+func (am AppModule)
+
+RegisterServices(cfg module.Configurator) {
+ types.RegisterMsgServer(cfg.MsgServer(), keeper.NewMsgServerImpl(am.keeper))
+
+types.RegisterQueryServer(cfg.QueryServer(), am.keeper)
+ m := keeper.NewMigrator(am.keeper.(keeper.BaseKeeper), am.legacySubspace)
+ if err := cfg.RegisterMigration(types.ModuleName, 1, m.Migrate1to2); err != nil {
+ panic(fmt.Sprintf("failed to migrate x/bank from version 1 to 2: %v", err))
+}
+ if err := cfg.RegisterMigration(types.ModuleName, 2, m.Migrate2to3); err != nil {
+ panic(fmt.Sprintf("failed to migrate x/bank from version 2 to 3: %v", err))
+}
+ if err := cfg.RegisterMigration(types.ModuleName, 3, m.Migrate3to4); err != nil {
+ panic(fmt.Sprintf("failed to migrate x/bank from version 3 to 4: %v", err))
+}
+}
+
+// NewAppModule creates a new AppModule object
+func NewAppModule(cdc codec.Codec, keeper keeper.Keeper, accountKeeper types.AccountKeeper, ss exported.Subspace)
+
+AppModule {
+ return AppModule{
+ AppModuleBasic: AppModuleBasic{
+ cdc: cdc, ac: accountKeeper.AddressCodec()
+},
+ keeper: keeper,
+ accountKeeper: accountKeeper,
+ legacySubspace: ss,
+}
+}
+
+// Name returns the bank module's name.
+func (AppModule)
+
+Name()
+
+string {
+ return types.ModuleName
+}
+
+// RegisterInvariants registers the bank module invariants.
+func (am AppModule)
+
+RegisterInvariants(ir sdk.InvariantRegistry) {
+ keeper.RegisterInvariants(ir, am.keeper)
+}
+
+// QuerierRoute returns the bank module's querier route name.
+func (AppModule)
+
+QuerierRoute()
+
+string {
+ return types.RouterKey
+}
+
+// InitGenesis performs genesis initialization for the bank module. It returns
+// no validator updates.
+func (am AppModule)
+
+InitGenesis(ctx sdk.Context, cdc codec.JSONCodec, data json.RawMessage) []abci.ValidatorUpdate {
+ start := time.Now()
+
+var genesisState types.GenesisState
+ cdc.MustUnmarshalJSON(data, &genesisState)
+
+telemetry.MeasureSince(start, "InitGenesis", "crisis", "unmarshal")
+
+am.keeper.InitGenesis(ctx, &genesisState)
+
+return []abci.ValidatorUpdate{
+}
+}
+
+// ExportGenesis returns the exported genesis state as raw bytes for the bank
+// module.
+func (am AppModule)
+
+ExportGenesis(ctx sdk.Context, cdc codec.JSONCodec)
+
+json.RawMessage {
+ gs := am.keeper.ExportGenesis(ctx)
+
+return cdc.MustMarshalJSON(gs)
+}
+
+// ConsensusVersion implements AppModule/ConsensusVersion.
+func (AppModule)
+
+ConsensusVersion()
+
+uint64 {
+ return ConsensusVersion
+}
+
+// AppModuleSimulation functions
+
+// GenerateGenesisState creates a randomized GenState of the bank module.
+func (AppModule)
+
+GenerateGenesisState(simState *module.SimulationState) {
+ simulation.RandomizedGenState(simState)
+}
+
+// ProposalMsgs returns msgs used for governance proposals for simulations.
+func (AppModule)
+
+ProposalMsgs(simState module.SimulationState) []simtypes.WeightedProposalMsg {
+ return simulation.ProposalMsgs()
+}
+
+// RegisterStoreDecoder registers a decoder for supply module's types
+func (am AppModule)
+
+RegisterStoreDecoder(sdr simtypes.StoreDecoderRegistry) {
+ sdr[types.StoreKey] = simtypes.NewStoreDecoderFuncFromCollectionsSchema(am.keeper.(keeper.BaseKeeper).Schema)
+}
+
+// WeightedOperations returns the all the gov module operations with their respective weights.
+func (am AppModule)
+
+WeightedOperations(simState module.SimulationState) []simtypes.WeightedOperation {
+ return simulation.WeightedOperations(
+ simState.AppParams, simState.Cdc, simState.TxConfig, am.accountKeeper, am.keeper,
+ )
+}
+
+// App Wiring Setup
+
+func init() {
+ appmodule.Register(&modulev1.Module{
+},
+ appmodule.Provide(ProvideModule),
+ )
+}
+
+type ModuleInputs struct {
+ depinject.In
+
+ Config *modulev1.Module
+ Cdc codec.Codec
+ StoreService corestore.KVStoreService
+ Logger log.Logger
+
+ AccountKeeper types.AccountKeeper
+
+ // LegacySubspace is used solely for migration of x/params managed parameters
+ LegacySubspace exported.Subspace `optional:"true"`
+}
+
+type ModuleOutputs struct {
+ depinject.Out
+
+ BankKeeper keeper.BaseKeeper
+ Module appmodule.AppModule
+}
+
+func ProvideModule(in ModuleInputs)
+
+ModuleOutputs {
+ // Configure blocked module accounts.
+ //
+ // Default behavior for blockedAddresses is to regard any module mentioned in
+ // AccountKeeper's module account permissions as blocked.
+ blockedAddresses := make(map[string]bool)
+ if len(in.Config.BlockedModuleAccountsOverride) > 0 {
+ for _, moduleName := range in.Config.BlockedModuleAccountsOverride {
+ blockedAddresses[authtypes.NewModuleAddress(moduleName).String()] = true
+}
+
+}
+
+else {
+ for _, permission := range in.AccountKeeper.GetModulePermissions() {
+ blockedAddresses[permission.GetAddress().String()] = true
+}
+
+}
+
+ // default to governance authority if not provided
+ authority := authtypes.NewModuleAddress(govtypes.ModuleName)
+ if in.Config.Authority != "" {
+ authority = authtypes.NewModuleAddressOrBech32Address(in.Config.Authority)
+}
+ bankKeeper := keeper.NewBaseKeeper(
+ in.Cdc,
+ in.StoreService,
+ in.AccountKeeper,
+ blockedAddresses,
+ authority.String(),
+ in.Logger,
+ )
+ m := NewAppModule(in.Cdc, bankKeeper, in.AccountKeeper, in.LegacySubspace)
+
+return ModuleOutputs{
+ BankKeeper: bankKeeper,
+ Module: m
+}
+}
+```
+
+### Query Commands
+
+{/*
+
+This section is being rewritten. Refer to [AutoCLI](/sdk/v0.53/learn/advanced/autocli) while this section is being updated.
+
+*/}
+
+## gRPC
+
+[gRPC](https://grpc.io/) is a Remote Procedure Call (RPC) framework. RPC is the preferred way for external clients like wallets and exchanges to interact with a blockchain.
+
+In addition to providing an ABCI query pathway, the Cosmos SDK provides a gRPC proxy server that routes gRPC query requests to ABCI query requests.
+
+In order to do that, modules must implement `RegisterGRPCGatewayRoutes(clientCtx client.Context, mux *runtime.ServeMux)` on `AppModuleBasic` to wire the client gRPC requests to the correct handler inside the module.
+
+Here's an example from the `x/auth` module:
+
+```go expandable
+package auth
+
+import (
+
+ "context"
+ "encoding/json"
+ "fmt"
+
+ abci "github.com/cometbft/cometbft/abci/types"
+ gwruntime "github.com/grpc-ecosystem/grpc-gateway/runtime"
+ "github.com/spf13/cobra"
+ "cosmossdk.io/depinject"
+
+ authcodec "github.com/cosmos/cosmos-sdk/x/auth/codec"
+ "cosmossdk.io/core/address"
+ "cosmossdk.io/core/appmodule"
+
+ modulev1 "cosmossdk.io/api/cosmos/auth/module/v1"
+ "cosmossdk.io/core/store"
+ "github.com/cosmos/cosmos-sdk/client"
+ "github.com/cosmos/cosmos-sdk/codec"
+ codectypes "github.com/cosmos/cosmos-sdk/codec/types"
+ sdk "github.com/cosmos/cosmos-sdk/types"
+ "github.com/cosmos/cosmos-sdk/types/module"
+ simtypes "github.com/cosmos/cosmos-sdk/types/simulation"
+ "github.com/cosmos/cosmos-sdk/x/auth/client/cli"
+ "github.com/cosmos/cosmos-sdk/x/auth/exported"
+ "github.com/cosmos/cosmos-sdk/x/auth/keeper"
+ "github.com/cosmos/cosmos-sdk/x/auth/simulation"
+ "github.com/cosmos/cosmos-sdk/x/auth/types"
+)
+
+// ConsensusVersion defines the current x/auth module consensus version.
+const (
+ ConsensusVersion = 5
+ GovModuleName = "gov"
+)
+
+var (
+ _ module.AppModule = AppModule{
+}
+ _ module.AppModuleBasic = AppModuleBasic{
+}
+ _ module.AppModuleSimulation = AppModule{
+}
+)
+
+// AppModuleBasic defines the basic application module used by the auth module.
+type AppModuleBasic struct {
+ ac address.Codec
+}
+
+// Name returns the auth module's name.
+func (AppModuleBasic)
+
+Name()
+
+string {
+ return types.ModuleName
+}
+
+// RegisterLegacyAminoCodec registers the auth module's types for the given codec.
+func (AppModuleBasic)
+
+RegisterLegacyAminoCodec(cdc *codec.LegacyAmino) {
+ types.RegisterLegacyAminoCodec(cdc)
+}
+
+// DefaultGenesis returns default genesis state as raw bytes for the auth
+// module.
+func (AppModuleBasic)
+
+DefaultGenesis(cdc codec.JSONCodec)
+
+json.RawMessage {
+ return cdc.MustMarshalJSON(types.DefaultGenesisState())
+}
+
+// ValidateGenesis performs genesis state validation for the auth module.
+func (AppModuleBasic)
+
+ValidateGenesis(cdc codec.JSONCodec, config client.TxEncodingConfig, bz json.RawMessage)
+
+error {
+ var data types.GenesisState
+ if err := cdc.UnmarshalJSON(bz, &data); err != nil {
+ return fmt.Errorf("failed to unmarshal %s genesis state: %w", types.ModuleName, err)
+}
+
+return types.ValidateGenesis(data)
+}
+
+// RegisterGRPCGatewayRoutes registers the gRPC Gateway routes for the auth module.
+func (AppModuleBasic)
+
+RegisterGRPCGatewayRoutes(clientCtx client.Context, mux *gwruntime.ServeMux) {
+ if err := types.RegisterQueryHandlerClient(context.Background(), mux, types.NewQueryClient(clientCtx)); err != nil {
+ panic(err)
+}
+}
+
+// GetTxCmd returns the root tx command for the auth module.
+func (AppModuleBasic)
+
+GetTxCmd() *cobra.Command {
+ return nil
+}
+
+// GetQueryCmd returns the root query command for the auth module.
+func (ab AppModuleBasic)
+
+GetQueryCmd() *cobra.Command {
+ return cli.GetQueryCmd(ab.ac)
+}
+
+// RegisterInterfaces registers interfaces and implementations of the auth module.
+func (AppModuleBasic)
+
+RegisterInterfaces(registry codectypes.InterfaceRegistry) {
+ types.RegisterInterfaces(registry)
+}
+
+// AppModule implements an application module for the auth module.
+type AppModule struct {
+ AppModuleBasic
+
+ accountKeeper keeper.AccountKeeper
+ randGenAccountsFn types.RandomGenesisAccountsFn
+
+ // legacySubspace is used solely for migration of x/params managed parameters
+ legacySubspace exported.Subspace
+}
+
+var _ appmodule.AppModule = AppModule{
+}
+
+// IsOnePerModuleType implements the depinject.OnePerModuleType interface.
+func (am AppModule)
+
+IsOnePerModuleType() {
+}
+
+// IsAppModule implements the appmodule.AppModule interface.
+func (am AppModule)
+
+IsAppModule() {
+}
+
+// NewAppModule creates a new AppModule object
+func NewAppModule(cdc codec.Codec, accountKeeper keeper.AccountKeeper, randGenAccountsFn types.RandomGenesisAccountsFn, ss exported.Subspace)
+
+AppModule {
+ return AppModule{
+ AppModuleBasic: AppModuleBasic{
+ ac: accountKeeper.AddressCodec()
+},
+ accountKeeper: accountKeeper,
+ randGenAccountsFn: randGenAccountsFn,
+ legacySubspace: ss,
+}
+}
+
+// Name returns the auth module's name.
+func (AppModule)
+
+Name()
+
+string {
+ return types.ModuleName
+}
+
+// RegisterServices registers a GRPC query service to respond to the
+// module-specific GRPC queries.
+func (am AppModule)
+
+RegisterServices(cfg module.Configurator) {
+ types.RegisterMsgServer(cfg.MsgServer(), keeper.NewMsgServerImpl(am.accountKeeper))
+
+types.RegisterQueryServer(cfg.QueryServer(), keeper.NewQueryServer(am.accountKeeper))
+ m := keeper.NewMigrator(am.accountKeeper, cfg.QueryServer(), am.legacySubspace)
+ if err := cfg.RegisterMigration(types.ModuleName, 1, m.Migrate1to2); err != nil {
+ panic(fmt.Sprintf("failed to migrate x/%s from version 1 to 2: %v", types.ModuleName, err))
+}
+ if err := cfg.RegisterMigration(types.ModuleName, 2, m.Migrate2to3); err != nil {
+ panic(fmt.Sprintf("failed to migrate x/%s from version 2 to 3: %v", types.ModuleName, err))
+}
+ if err := cfg.RegisterMigration(types.ModuleName, 3, m.Migrate3to4); err != nil {
+ panic(fmt.Sprintf("failed to migrate x/%s from version 3 to 4: %v", types.ModuleName, err))
+}
+ if err := cfg.RegisterMigration(types.ModuleName, 4, m.Migrate4To5); err != nil {
+ panic(fmt.Sprintf("failed to migrate x/%s from version 4 to 5", types.ModuleName))
+}
+}
+
+// InitGenesis performs genesis initialization for the auth module. It returns
+// no validator updates.
+func (am AppModule)
+
+InitGenesis(ctx sdk.Context, cdc codec.JSONCodec, data json.RawMessage) []abci.ValidatorUpdate {
+ var genesisState types.GenesisState
+ cdc.MustUnmarshalJSON(data, &genesisState)
+
+am.accountKeeper.InitGenesis(ctx, genesisState)
+
+return []abci.ValidatorUpdate{
+}
+}
+
+// ExportGenesis returns the exported genesis state as raw bytes for the auth
+// module.
+func (am AppModule)
+
+ExportGenesis(ctx sdk.Context, cdc codec.JSONCodec)
+
+json.RawMessage {
+ gs := am.accountKeeper.ExportGenesis(ctx)
+
+return cdc.MustMarshalJSON(gs)
+}
+
+// ConsensusVersion implements AppModule/ConsensusVersion.
+func (AppModule)
+
+ConsensusVersion()
+
+uint64 {
+ return ConsensusVersion
+}
+
+// AppModuleSimulation functions
+
+// GenerateGenesisState creates a randomized GenState of the auth module
+func (am AppModule)
+
+GenerateGenesisState(simState *module.SimulationState) {
+ simulation.RandomizedGenState(simState, am.randGenAccountsFn)
+}
+
+// ProposalMsgs returns msgs used for governance proposals for simulations.
+func (AppModule)
+
+ProposalMsgs(simState module.SimulationState) []simtypes.WeightedProposalMsg {
+ return simulation.ProposalMsgs()
+}
+
+// RegisterStoreDecoder registers a decoder for auth module's types
+func (am AppModule)
+
+RegisterStoreDecoder(sdr simtypes.StoreDecoderRegistry) {
+ sdr[types.StoreKey] = simtypes.NewStoreDecoderFuncFromCollectionsSchema(am.accountKeeper.Schema)
+}
+
+// WeightedOperations doesn't return any auth module operation.
+func (AppModule)
+
+WeightedOperations(_ module.SimulationState) []simtypes.WeightedOperation {
+ return nil
+}
+
+//
+// App Wiring Setup
+//
+
+func init() {
+ appmodule.Register(&modulev1.Module{
+},
+ appmodule.Provide(ProvideAddressCodec),
+ appmodule.Provide(ProvideModule),
+ )
+}
+
+// ProvideAddressCodec provides an address.Codec to the container for any
+// modules that want to do address string <> bytes conversion.
+func ProvideAddressCodec(config *modulev1.Module)
+
+address.Codec {
+ return authcodec.NewBech32Codec(config.Bech32Prefix)
+}
+
+type ModuleInputs struct {
+ depinject.In
+
+ Config *modulev1.Module
+ StoreService store.KVStoreService
+ Cdc codec.Codec
+
+ RandomGenesisAccountsFn types.RandomGenesisAccountsFn `optional:"true"`
+ AccountI func()
+
+sdk.AccountI `optional:"true"`
+
+ // LegacySubspace is used solely for migration of x/params managed parameters
+ LegacySubspace exported.Subspace `optional:"true"`
+}
+
+type ModuleOutputs struct {
+ depinject.Out
+
+ AccountKeeper keeper.AccountKeeper
+ Module appmodule.AppModule
+}
+
+func ProvideModule(in ModuleInputs)
+
+ModuleOutputs {
+ maccPerms := map[string][]string{
+}
+ for _, permission := range in.Config.ModuleAccountPermissions {
+ maccPerms[permission.Account] = permission.Permissions
+}
+
+ // default to governance authority if not provided
+ authority := types.NewModuleAddress(GovModuleName)
+ if in.Config.Authority != "" {
+ authority = types.NewModuleAddressOrBech32Address(in.Config.Authority)
+}
+ if in.RandomGenesisAccountsFn == nil {
+ in.RandomGenesisAccountsFn = simulation.RandomGenesisAccounts
+}
+ if in.AccountI == nil {
+ in.AccountI = types.ProtoBaseAccount
+}
+ k := keeper.NewAccountKeeper(in.Cdc, in.StoreService, in.AccountI, maccPerms, in.Config.Bech32Prefix, authority.String())
+ m := NewAppModule(in.Cdc, k, in.RandomGenesisAccountsFn, in.LegacySubspace)
+
+return ModuleOutputs{
+ AccountKeeper: k,
+ Module: m
+}
+}
+```
+
+## gRPC-gateway REST
+
+Applications need to support web services that use HTTP requests (e.g. a web wallet like [Keplr](https://keplr.app)). [grpc-gateway](https://github.com/grpc-ecosystem/grpc-gateway) translates REST calls into gRPC calls, which might be useful for clients that do not use gRPC.
+
+Modules that want to expose REST queries should add `google.api.http` annotations to their `rpc` methods, such as in the example below from the `x/auth` module:
+
+```protobuf
+// Reference: https://github.com/cosmos/cosmos-sdk/blob/v0.50.0-alpha.0/proto/cosmos/auth/v1beta1/query.proto#L14-L89
+```
+
+gRPC gateway is started in-process along with the application and CometBFT. It can be enabled or disabled by setting gRPC Configuration `enable` in [`app.toml`](/sdk/v0.50/user/run-node/run-node#configuring-the-node-using-apptoml-and-configtoml).
+
+The Cosmos SDK provides a command for generating [Swagger](https://swagger.io/) documentation (`protoc-gen-swagger`). Setting `swagger` in [`app.toml`](/sdk/v0.50/user/run-node/run-node#configuring-the-node-using-apptoml-and-configtoml) defines if swagger documentation should be automatically registered.
diff --git a/sdk/next/build/building-modules/module-manager.mdx b/sdk/next/build/building-modules/module-manager.mdx
new file mode 100644
index 000000000..a0ac9ecdd
--- /dev/null
+++ b/sdk/next/build/building-modules/module-manager.mdx
@@ -0,0 +1,16223 @@
+---
+title: Module Manager
+---
+{/* force preview rebuild: 2025-09-30 */}
+
+
+**Synopsis**
+Cosmos SDK modules need to implement the [`AppModule` interfaces](#application-module-interfaces), in order to be managed by the application's [module manager](#sdk/v0.53/build/building-modules/module-manager). The module manager plays an important role in [`message` and `query` routing](/sdk/v0.53/learn/advanced/baseapp#routing), and allows application developers to set the order of execution of a variety of functions like [`PreBlocker`](/sdk/v0.53/learn/beginner/app-anatomy#preblocker) and [`BeginBlocker` and `EndBlocker`](/sdk/v0.53/learn/beginner/app-anatomy#beginblocker-and-endblocker).
+
+
+
+**Prerequisite Readings**
+
+* [Introduction to Cosmos SDK Modules](/sdk/v0.53/build/building-modules/intro)
+
+
+
+## Application Module Interfaces
+
+Application module interfaces exist to facilitate the composition of modules together to form a functional Cosmos SDK application.
+
+
+
+It is recommended to implement interfaces from the [Core API](/sdk/v0.53/build/architecture/adr-063-core-module-api) `appmodule` package. This makes modules less dependent on the SDK.
+For legacy reason modules can still implement interfaces from the SDK `module` package.
+
+
+There are 2 main application module interfaces:
+
+* [`appmodule.AppModule` / `module.AppModule`](#appmodule) for inter-dependent module functionalities (except genesis-related functionalities).
+* (legacy) [`module.AppModuleBasic`](#appmodulebasic) for independent module functionalities. New modules can use `module.CoreAppModuleBasicAdaptor` instead.
+
+The above interfaces are mostly embedding smaller interfaces (extension interfaces), that defines specific functionalities:
+
+* (legacy) `module.HasName`: Allows the module to provide its own name for legacy purposes.
+* (legacy) [`module.HasGenesisBasics`](#modulehasgenesisbasics): The legacy interface for stateless genesis methods.
+* [`module.HasGenesis`](#modulehasgenesis) for inter-dependent genesis-related module functionalities.
+* [`module.HasABCIGenesis`](#modulehasabcigenesis) for inter-dependent genesis-related module functionalities.
+* [`appmodule.HasGenesis` / `module.HasGenesis`](#appmodulehasgenesis): The extension interface for stateful genesis methods.
+* [`appmodule.HasPreBlocker`](#haspreblocker): The extension interface that contains information about the `AppModule` and `PreBlock`.
+* [`appmodule.HasBeginBlocker`](#hasbeginblocker): The extension interface that contains information about the `AppModule` and `BeginBlock`.
+* [`appmodule.HasEndBlocker`](#hasendblocker): The extension interface that contains information about the `AppModule` and `EndBlock`.
+* [`appmodule.HasPrecommit`](#hasprecommit): The extension interface that contains information about the `AppModule` and `Precommit`.
+* [`appmodule.HasPrepareCheckState`](#haspreparecheckstate): The extension interface that contains information about the `AppModule` and `PrepareCheckState`.
+* [`appmodule.HasService` / `module.HasServices`](#hasservices): The extension interface for modules to register services.
+* [`module.HasABCIEndBlock`](#hasabciendblock): The extension interface that contains information about the `AppModule`, `EndBlock` and returns an updated validator set.
+* (legacy) [`module.HasInvariants`](#hasinvariants): The extension interface for registering invariants.
+* (legacy) [`module.HasConsensusVersion`](#hasconsensusversion): The extension interface for declaring a module consensus version.
+
+The `AppModuleBasic` interface exists to define independent methods of the module, i.e. those that do not depend on other modules in the application. This allows for the construction of the basic application structure early in the application definition, generally in the `init()` function of the [main application file](/sdk/v0.53/learn/beginner/app-anatomy#core-application-file).
+
+The `AppModule` interface exists to define inter-dependent module methods. Many modules need to interact with other modules, typically through [`keeper`s](/sdk/v0.53/build/building-modules/keeper), which means there is a need for an interface where modules list their `keeper`s and other methods that require a reference to another module's object. `AppModule` interface extension, such as `HasBeginBlocker` and `HasEndBlocker`, also enables the module manager to set the order of execution between module's methods like `BeginBlock` and `EndBlock`, which is important in cases where the order of execution between modules matters in the context of the application.
+
+The usage of extension interfaces allows modules to define only the functionalities they need. For example, a module that does not need an `EndBlock` does not need to define the `HasEndBlocker` interface and thus the `EndBlock` method. `AppModule` and `AppModuleGenesis` are voluntarily small interfaces, that can take advantage of the `Module` patterns without having to define many placeholder functions.
+
+### `AppModuleBasic`
+
+
+Use `module.CoreAppModuleBasicAdaptor` instead for creating an `AppModuleBasic` from an `appmodule.AppModule`.
+
+
+The `AppModuleBasic` interface defines the independent methods modules need to implement.
+
+```go expandable
+/*
+Package module contains application module patterns and associated "manager" functionality.
+The module pattern has been broken down by:
+ - independent module functionality (AppModuleBasic)
+ - inter-dependent module simulation functionality (AppModuleSimulation)
+ - inter-dependent module full functionality (AppModule)
+
+inter-dependent module functionality is module functionality which somehow
+depends on other modules, typically through the module keeper. Many of the
+module keepers are dependent on each other, thus in order to access the full
+set of module functionality we need to define all the keepers/params-store/keys
+etc. This full set of advanced functionality is defined by the AppModule interface.
+
+Independent module functions are separated to allow for the construction of the
+basic application structures required early on in the application definition
+and used to enable the definition of full module functionality later in the
+process. This separation is necessary, however we still want to allow for a
+high level pattern for modules to follow - for instance, such that we don't
+have to manually register all of the codecs for all the modules. This basic
+procedure as well as other basic patterns are handled through the use of
+BasicManager.
+
+Lastly the interface for genesis functionality (HasGenesis & HasABCIGenesis)
+
+has been
+separated out from full module functionality (AppModule)
+
+so that modules which
+are only used for genesis can take advantage of the Module patterns without
+needlessly defining many placeholder functions
+*/
+package module
+
+import (
+
+ "context"
+ "encoding/json"
+ "errors"
+ "fmt"
+ "maps"
+ "slices"
+ "sort"
+
+ abci "github.com/cometbft/cometbft/abci/types"
+ "github.com/grpc-ecosystem/grpc-gateway/runtime"
+ "github.com/spf13/cobra"
+ "cosmossdk.io/core/appmodule"
+ "cosmossdk.io/core/genesis"
+ errorsmod "cosmossdk.io/errors"
+ storetypes "cosmossdk.io/store/types"
+ "github.com/cosmos/cosmos-sdk/client"
+ "github.com/cosmos/cosmos-sdk/codec"
+ "github.com/cosmos/cosmos-sdk/codec/types"
+ sdk "github.com/cosmos/cosmos-sdk/types"
+ sdkerrors "github.com/cosmos/cosmos-sdk/types/errors"
+)
+
+// AppModuleBasic is the standard form for basic non-dependant elements of an application module.
+type AppModuleBasic interface {
+ HasName
+ RegisterLegacyAminoCodec(*codec.LegacyAmino)
+
+RegisterInterfaces(types.InterfaceRegistry)
+
+RegisterGRPCGatewayRoutes(client.Context, *runtime.ServeMux)
+}
+
+// HasName allows the module to provide its own name for legacy purposes.
+// Newer apps should specify the name for their modules using a map
+// using NewManagerFromMap.
+type HasName interface {
+ Name()
+
+string
+}
+
+// HasGenesisBasics is the legacy interface for stateless genesis methods.
+type HasGenesisBasics interface {
+ DefaultGenesis(codec.JSONCodec)
+
+json.RawMessage
+ ValidateGenesis(codec.JSONCodec, client.TxEncodingConfig, json.RawMessage)
+
+error
+}
+
+// BasicManager is a collection of AppModuleBasic
+type BasicManager map[string]AppModuleBasic
+
+// NewBasicManager creates a new BasicManager object
+func NewBasicManager(modules ...AppModuleBasic)
+
+BasicManager {
+ moduleMap := make(map[string]AppModuleBasic)
+ for _, module := range modules {
+ moduleMap[module.Name()] = module
+}
+
+return moduleMap
+}
+
+// NewBasicManagerFromManager creates a new BasicManager from a Manager
+// The BasicManager will contain all AppModuleBasic from the AppModule Manager
+// Module's AppModuleBasic can be overridden by passing a custom AppModuleBasic map
+func NewBasicManagerFromManager(manager *Manager, customModuleBasics map[string]AppModuleBasic)
+
+BasicManager {
+ moduleMap := make(map[string]AppModuleBasic)
+ for name, module := range manager.Modules {
+ if customBasicMod, ok := customModuleBasics[name]; ok {
+ moduleMap[name] = customBasicMod
+ continue
+}
+ if appModule, ok := module.(appmodule.AppModule); ok {
+ moduleMap[name] = CoreAppModuleBasicAdaptor(name, appModule)
+
+continue
+}
+ if basicMod, ok := module.(AppModuleBasic); ok {
+ moduleMap[name] = basicMod
+}
+
+}
+
+return moduleMap
+}
+
+// RegisterLegacyAminoCodec registers all module codecs
+func (bm BasicManager)
+
+RegisterLegacyAminoCodec(cdc *codec.LegacyAmino) {
+ for _, b := range bm {
+ b.RegisterLegacyAminoCodec(cdc)
+}
+}
+
+// RegisterInterfaces registers all module interface types
+func (bm BasicManager)
+
+RegisterInterfaces(registry types.InterfaceRegistry) {
+ for _, m := range bm {
+ m.RegisterInterfaces(registry)
+}
+}
+
+// DefaultGenesis provides default genesis information for all modules
+func (bm BasicManager)
+
+DefaultGenesis(cdc codec.JSONCodec)
+
+map[string]json.RawMessage {
+ genesisData := make(map[string]json.RawMessage)
+ for _, b := range bm {
+ if mod, ok := b.(HasGenesisBasics); ok {
+ genesisData[b.Name()] = mod.DefaultGenesis(cdc)
+}
+
+}
+
+return genesisData
+}
+
+// ValidateGenesis performs genesis state validation for all modules
+func (bm BasicManager)
+
+ValidateGenesis(cdc codec.JSONCodec, txEncCfg client.TxEncodingConfig, genesisData map[string]json.RawMessage)
+
+error {
+ for _, b := range bm {
+ // first check if the module is an adapted Core API Module
+ if mod, ok := b.(HasGenesisBasics); ok {
+ if err := mod.ValidateGenesis(cdc, txEncCfg, genesisData[b.Name()]); err != nil {
+ return err
+}
+
+}
+
+}
+
+return nil
+}
+
+// RegisterGRPCGatewayRoutes registers all module rest routes
+func (bm BasicManager)
+
+RegisterGRPCGatewayRoutes(clientCtx client.Context, rtr *runtime.ServeMux) {
+ for _, b := range bm {
+ b.RegisterGRPCGatewayRoutes(clientCtx, rtr)
+}
+}
+
+// AddTxCommands adds all tx commands to the rootTxCmd.
+func (bm BasicManager)
+
+AddTxCommands(rootTxCmd *cobra.Command) {
+ for _, b := range bm {
+ if mod, ok := b.(interface {
+ GetTxCmd() *cobra.Command
+}); ok {
+ if cmd := mod.GetTxCmd(); cmd != nil {
+ rootTxCmd.AddCommand(cmd)
+}
+
+}
+
+}
+}
+
+// AddQueryCommands adds all query commands to the rootQueryCmd.
+func (bm BasicManager)
+
+AddQueryCommands(rootQueryCmd *cobra.Command) {
+ for _, b := range bm {
+ if mod, ok := b.(interface {
+ GetQueryCmd() *cobra.Command
+}); ok {
+ if cmd := mod.GetQueryCmd(); cmd != nil {
+ rootQueryCmd.AddCommand(cmd)
+}
+
+}
+
+}
+}
+
+// HasGenesis is the extension interface for stateful genesis methods.
+type HasGenesis interface {
+ HasGenesisBasics
+ InitGenesis(sdk.Context, codec.JSONCodec, json.RawMessage)
+
+ExportGenesis(sdk.Context, codec.JSONCodec)
+
+json.RawMessage
+}
+
+// HasABCIGenesis is the extension interface for stateful genesis methods which returns validator updates.
+type HasABCIGenesis interface {
+ HasGenesisBasics
+ InitGenesis(sdk.Context, codec.JSONCodec, json.RawMessage) []abci.ValidatorUpdate
+ ExportGenesis(sdk.Context, codec.JSONCodec)
+
+json.RawMessage
+}
+
+// AppModule is the form for an application module. Most of
+// its functionality has been moved to extension interfaces.
+// Deprecated: use appmodule.AppModule with a combination of extension interfaes interfaces instead.
+type AppModule interface {
+ appmodule.AppModule
+
+ AppModuleBasic
+}
+
+// HasInvariants is the interface for registering invariants.
+//
+// Deprecated: this will be removed in the next Cosmos SDK release.
+type HasInvariants interface {
+ // RegisterInvariants registers module invariants.
+ RegisterInvariants(sdk.InvariantRegistry)
+}
+
+// HasServices is the interface for modules to register services.
+type HasServices interface {
+ // RegisterServices allows a module to register services.
+ RegisterServices(Configurator)
+}
+
+// HasConsensusVersion is the interface for declaring a module consensus version.
+type HasConsensusVersion interface {
+ // ConsensusVersion is a sequence number for state-breaking change of the
+ // module. It should be incremented on each consensus-breaking change
+ // introduced by the module. To avoid wrong/empty versions, the initial version
+ // should be set to 1.
+ ConsensusVersion()
+
+uint64
+}
+
+// HasABCIEndblock is a released typo of HasABCIEndBlock.
+// Deprecated: use HasABCIEndBlock instead.
+type HasABCIEndblock HasABCIEndBlock
+
+// HasABCIEndBlock is the interface for modules that need to run code at the end of the block.
+type HasABCIEndBlock interface {
+ AppModule
+ EndBlock(context.Context) ([]abci.ValidatorUpdate, error)
+}
+
+var (
+ _ appmodule.AppModule = (*GenesisOnlyAppModule)(nil)
+ _ AppModuleBasic = (*GenesisOnlyAppModule)(nil)
+)
+
+// AppModuleGenesis is the standard form for an application module genesis functions
+type AppModuleGenesis interface {
+ AppModuleBasic
+ HasABCIGenesis
+}
+
+// GenesisOnlyAppModule is an AppModule that only has import/export functionality
+type GenesisOnlyAppModule struct {
+ AppModuleGenesis
+}
+
+// NewGenesisOnlyAppModule creates a new GenesisOnlyAppModule object
+func NewGenesisOnlyAppModule(amg AppModuleGenesis)
+
+GenesisOnlyAppModule {
+ return GenesisOnlyAppModule{
+ AppModuleGenesis: amg,
+}
+}
+
+// IsOnePerModuleType implements the depinject.OnePerModuleType interface.
+func (GenesisOnlyAppModule)
+
+IsOnePerModuleType() {
+}
+
+// IsAppModule implements the appmodule.AppModule interface.
+func (GenesisOnlyAppModule)
+
+IsAppModule() {
+}
+
+// RegisterInvariants is a placeholder function register no invariants
+func (GenesisOnlyAppModule)
+
+RegisterInvariants(_ sdk.InvariantRegistry) {
+}
+
+// ConsensusVersion implements AppModule/ConsensusVersion.
+func (gam GenesisOnlyAppModule)
+
+ConsensusVersion()
+
+uint64 {
+ return 1
+}
+
+// Manager defines a module manager that provides the high level utility for managing and executing
+// operations for a group of modules
+type Manager struct {
+ Modules map[string]any // interface{
+}
+
+is used now to support the legacy AppModule as well as new core appmodule.AppModule.
+ OrderInitGenesis []string
+ OrderExportGenesis []string
+ OrderPreBlockers []string
+ OrderBeginBlockers []string
+ OrderEndBlockers []string
+ OrderPrepareCheckStaters []string
+ OrderPrecommiters []string
+ OrderMigrations []string
+}
+
+// NewManager creates a new Manager object.
+func NewManager(modules ...AppModule) *Manager {
+ moduleMap := make(map[string]any)
+ modulesStr := make([]string, 0, len(modules))
+ preBlockModulesStr := make([]string, 0)
+ for _, module := range modules {
+ if _, ok := module.(appmodule.AppModule); !ok {
+ panic(fmt.Sprintf("module %s does not implement appmodule.AppModule", module.Name()))
+}
+
+moduleMap[module.Name()] = module
+ modulesStr = append(modulesStr, module.Name())
+ if _, ok := module.(appmodule.HasPreBlocker); ok {
+ preBlockModulesStr = append(preBlockModulesStr, module.Name())
+}
+
+}
+
+return &Manager{
+ Modules: moduleMap,
+ OrderInitGenesis: modulesStr,
+ OrderExportGenesis: modulesStr,
+ OrderPreBlockers: preBlockModulesStr,
+ OrderBeginBlockers: modulesStr,
+ OrderPrepareCheckStaters: modulesStr,
+ OrderPrecommiters: modulesStr,
+ OrderEndBlockers: modulesStr,
+}
+}
+
+// NewManagerFromMap creates a new Manager object from a map of module names to module implementations.
+// This method should be used for apps and modules which have migrated to the cosmossdk.io/core.appmodule.AppModule API.
+func NewManagerFromMap(moduleMap map[string]appmodule.AppModule) *Manager {
+ simpleModuleMap := make(map[string]any)
+ modulesStr := make([]string, 0, len(simpleModuleMap))
+ preBlockModulesStr := make([]string, 0)
+ for name, module := range moduleMap {
+ simpleModuleMap[name] = module
+ modulesStr = append(modulesStr, name)
+ if _, ok := module.(appmodule.HasPreBlocker); ok {
+ preBlockModulesStr = append(preBlockModulesStr, name)
+}
+
+}
+
+ // Sort the modules by name. Given that we are using a map above we can't guarantee the order.
+ sort.Strings(modulesStr)
+
+return &Manager{
+ Modules: simpleModuleMap,
+ OrderInitGenesis: modulesStr,
+ OrderExportGenesis: modulesStr,
+ OrderPreBlockers: preBlockModulesStr,
+ OrderBeginBlockers: modulesStr,
+ OrderEndBlockers: modulesStr,
+ OrderPrecommiters: modulesStr,
+ OrderPrepareCheckStaters: modulesStr,
+}
+}
+
+// SetOrderInitGenesis sets the order of init genesis calls
+func (m *Manager)
+
+SetOrderInitGenesis(moduleNames ...string) {
+ m.assertNoForgottenModules("SetOrderInitGenesis", moduleNames, func(moduleName string)
+
+bool {
+ module := m.Modules[moduleName]
+ if _, hasGenesis := module.(appmodule.HasGenesis); hasGenesis {
+ return !hasGenesis
+}
+ if _, hasABCIGenesis := module.(HasABCIGenesis); hasABCIGenesis {
+ return !hasABCIGenesis
+}
+
+ _, hasGenesis := module.(HasGenesis)
+
+return !hasGenesis
+})
+
+m.OrderInitGenesis = moduleNames
+}
+
+// SetOrderExportGenesis sets the order of export genesis calls
+func (m *Manager)
+
+SetOrderExportGenesis(moduleNames ...string) {
+ m.assertNoForgottenModules("SetOrderExportGenesis", moduleNames, func(moduleName string)
+
+bool {
+ module := m.Modules[moduleName]
+ if _, hasGenesis := module.(appmodule.HasGenesis); hasGenesis {
+ return !hasGenesis
+}
+ if _, hasABCIGenesis := module.(HasABCIGenesis); hasABCIGenesis {
+ return !hasABCIGenesis
+}
+
+ _, hasGenesis := module.(HasGenesis)
+
+return !hasGenesis
+})
+
+m.OrderExportGenesis = moduleNames
+}
+
+// SetOrderPreBlockers sets the order of set pre-blocker calls
+func (m *Manager)
+
+SetOrderPreBlockers(moduleNames ...string) {
+ m.assertNoForgottenModules("SetOrderPreBlockers", moduleNames,
+ func(moduleName string)
+
+bool {
+ module := m.Modules[moduleName]
+ _, hasBlock := module.(appmodule.HasPreBlocker)
+
+return !hasBlock
+})
+
+m.OrderPreBlockers = moduleNames
+}
+
+// SetOrderBeginBlockers sets the order of set begin-blocker calls
+func (m *Manager)
+
+SetOrderBeginBlockers(moduleNames ...string) {
+ m.assertNoForgottenModules("SetOrderBeginBlockers", moduleNames,
+ func(moduleName string)
+
+bool {
+ module := m.Modules[moduleName]
+ _, hasBeginBlock := module.(appmodule.HasBeginBlocker)
+
+return !hasBeginBlock
+})
+
+m.OrderBeginBlockers = moduleNames
+}
+
+// SetOrderEndBlockers sets the order of set end-blocker calls
+func (m *Manager)
+
+SetOrderEndBlockers(moduleNames ...string) {
+ m.assertNoForgottenModules("SetOrderEndBlockers", moduleNames,
+ func(moduleName string)
+
+bool {
+ module := m.Modules[moduleName]
+ if _, hasEndBlock := module.(appmodule.HasEndBlocker); hasEndBlock {
+ return !hasEndBlock
+}
+
+ _, hasABCIEndBlock := module.(HasABCIEndBlock)
+
+return !hasABCIEndBlock
+})
+
+m.OrderEndBlockers = moduleNames
+}
+
+// SetOrderPrepareCheckStaters sets the order of set prepare-check-stater calls
+func (m *Manager)
+
+SetOrderPrepareCheckStaters(moduleNames ...string) {
+ m.assertNoForgottenModules("SetOrderPrepareCheckStaters", moduleNames,
+ func(moduleName string)
+
+bool {
+ module := m.Modules[moduleName]
+ _, hasPrepareCheckState := module.(appmodule.HasPrepareCheckState)
+
+return !hasPrepareCheckState
+})
+
+m.OrderPrepareCheckStaters = moduleNames
+}
+
+// SetOrderPrecommiters sets the order of set precommiter calls
+func (m *Manager)
+
+SetOrderPrecommiters(moduleNames ...string) {
+ m.assertNoForgottenModules("SetOrderPrecommiters", moduleNames,
+ func(moduleName string)
+
+bool {
+ module := m.Modules[moduleName]
+ _, hasPrecommit := module.(appmodule.HasPrecommit)
+
+return !hasPrecommit
+})
+
+m.OrderPrecommiters = moduleNames
+}
+
+// SetOrderMigrations sets the order of migrations to be run. If not set
+// then migrations will be run with an order defined in `DefaultMigrationsOrder`.
+func (m *Manager)
+
+SetOrderMigrations(moduleNames ...string) {
+ m.assertNoForgottenModules("SetOrderMigrations", moduleNames, nil)
+
+m.OrderMigrations = moduleNames
+}
+
+// RegisterInvariants registers all module invariants
+//
+// Deprecated: this function is a no-op and will be removed in the next release of the Cosmos SDK.
+func (m *Manager)
+
+RegisterInvariants(_ sdk.InvariantRegistry) {
+}
+
+// RegisterServices registers all module services
+func (m *Manager)
+
+RegisterServices(cfg Configurator)
+
+error {
+ for _, module := range m.Modules {
+ if module, ok := module.(HasServices); ok {
+ module.RegisterServices(cfg)
+}
+ if module, ok := module.(appmodule.HasServices); ok {
+ err := module.RegisterServices(cfg)
+ if err != nil {
+ return err
+}
+
+}
+ if cfg.Error() != nil {
+ return cfg.Error()
+}
+
+}
+
+return nil
+}
+
+// InitGenesis performs init genesis functionality for modules. Exactly one
+// module must return a non-empty validator set update to correctly initialize
+// the chain.
+func (m *Manager)
+
+InitGenesis(ctx sdk.Context, cdc codec.JSONCodec, genesisData map[string]json.RawMessage) (*abci.ResponseInitChain, error) {
+ var validatorUpdates []abci.ValidatorUpdate
+ ctx.Logger().Info("initializing blockchain state from genesis.json")
+ for _, moduleName := range m.OrderInitGenesis {
+ if genesisData[moduleName] == nil {
+ continue
+}
+ mod := m.Modules[moduleName]
+ // we might get an adapted module, a native core API module or a legacy module
+ if module, ok := mod.(appmodule.HasGenesis); ok {
+ ctx.Logger().Debug("running initialization for module", "module", moduleName)
+ // core API genesis
+ source, err := genesis.SourceFromRawJSON(genesisData[moduleName])
+ if err != nil {
+ return &abci.ResponseInitChain{
+}, err
+}
+
+err = module.InitGenesis(ctx, source)
+ if err != nil {
+ return &abci.ResponseInitChain{
+}, err
+}
+
+}
+
+else if module, ok := mod.(HasGenesis); ok {
+ ctx.Logger().Debug("running initialization for module", "module", moduleName)
+
+module.InitGenesis(ctx, cdc, genesisData[moduleName])
+}
+
+else if module, ok := mod.(HasABCIGenesis); ok {
+ ctx.Logger().Debug("running initialization for module", "module", moduleName)
+ moduleValUpdates := module.InitGenesis(ctx, cdc, genesisData[moduleName])
+
+ // use these validator updates if provided, the module manager assumes
+ // only one module will update the validator set
+ if len(moduleValUpdates) > 0 {
+ if len(validatorUpdates) > 0 {
+ return &abci.ResponseInitChain{
+}, errors.New("validator InitGenesis updates already set by a previous module")
+}
+
+validatorUpdates = moduleValUpdates
+}
+
+}
+
+}
+
+ // a chain must initialize with a non-empty validator set
+ if len(validatorUpdates) == 0 {
+ return &abci.ResponseInitChain{
+}, fmt.Errorf("validator set is empty after InitGenesis, please ensure at least one validator is initialized with a delegation greater than or equal to the DefaultPowerReduction (%d)", sdk.DefaultPowerReduction)
+}
+
+return &abci.ResponseInitChain{
+ Validators: validatorUpdates,
+}, nil
+}
+
+// ExportGenesis performs export genesis functionality for modules
+func (m *Manager)
+
+ExportGenesis(ctx sdk.Context, cdc codec.JSONCodec) (map[string]json.RawMessage, error) {
+ return m.ExportGenesisForModules(ctx, cdc, []string{
+})
+}
+
+// ExportGenesisForModules performs export genesis functionality for modules
+func (m *Manager)
+
+ExportGenesisForModules(ctx sdk.Context, cdc codec.JSONCodec, modulesToExport []string) (map[string]json.RawMessage, error) {
+ if len(modulesToExport) == 0 {
+ modulesToExport = m.OrderExportGenesis
+}
+ // verify modules exists in app, so that we don't panic in the middle of an export
+ if err := m.checkModulesExists(modulesToExport); err != nil {
+ return nil, err
+}
+
+type genesisResult struct {
+ bz json.RawMessage
+ err error
+}
+ channels := make(map[string]chan genesisResult)
+ for _, moduleName := range modulesToExport {
+ mod := m.Modules[moduleName]
+ if module, ok := mod.(appmodule.HasGenesis); ok {
+ // core API genesis
+ channels[moduleName] = make(chan genesisResult)
+
+go func(module appmodule.HasGenesis, ch chan genesisResult) {
+ ctx := ctx.WithGasMeter(storetypes.NewInfiniteGasMeter()) // avoid race conditions
+ target := genesis.RawJSONTarget{
+}
+ err := module.ExportGenesis(ctx, target.Target())
+ if err != nil {
+ ch <- genesisResult{
+ nil, err
+}
+
+return
+}
+
+rawJSON, err := target.JSON()
+ if err != nil {
+ ch <- genesisResult{
+ nil, err
+}
+
+return
+}
+
+ch <- genesisResult{
+ rawJSON, nil
+}
+
+}(module, channels[moduleName])
+}
+
+else if module, ok := mod.(HasGenesis); ok {
+ channels[moduleName] = make(chan genesisResult)
+
+go func(module HasGenesis, ch chan genesisResult) {
+ ctx := ctx.WithGasMeter(storetypes.NewInfiniteGasMeter()) // avoid race conditions
+ ch <- genesisResult{
+ module.ExportGenesis(ctx, cdc), nil
+}
+
+}(module, channels[moduleName])
+}
+
+else if module, ok := mod.(HasABCIGenesis); ok {
+ channels[moduleName] = make(chan genesisResult)
+
+go func(module HasABCIGenesis, ch chan genesisResult) {
+ ctx := ctx.WithGasMeter(storetypes.NewInfiniteGasMeter()) // avoid race conditions
+ ch <- genesisResult{
+ module.ExportGenesis(ctx, cdc), nil
+}
+
+}(module, channels[moduleName])
+}
+
+}
+ genesisData := make(map[string]json.RawMessage)
+ for moduleName := range channels {
+ res := <-channels[moduleName]
+ if res.err != nil {
+ return nil, fmt.Errorf("genesis export error in %s: %w", moduleName, res.err)
+}
+
+genesisData[moduleName] = res.bz
+}
+
+return genesisData, nil
+}
+
+// checkModulesExists verifies that all modules in the list exist in the app
+func (m *Manager)
+
+checkModulesExists(moduleName []string)
+
+error {
+ for _, name := range moduleName {
+ if _, ok := m.Modules[name]; !ok {
+ return fmt.Errorf("module %s does not exist", name)
+}
+
+}
+
+return nil
+}
+
+// assertNoForgottenModules checks that we didn't forget any modules in the SetOrder* functions.
+// `pass` is a closure which allows one to omit modules from `moduleNames`.
+// If you provide non-nil `pass` and it returns true, the module would not be subject of the assertion.
+func (m *Manager)
+
+assertNoForgottenModules(setOrderFnName string, moduleNames []string, pass func(moduleName string)
+
+bool) {
+ ms := make(map[string]bool)
+ for _, m := range moduleNames {
+ ms[m] = true
+}
+
+var missing []string
+ for m := range m.Modules {
+ if pass != nil && pass(m) {
+ continue
+}
+ if !ms[m] {
+ missing = append(missing, m)
+}
+
+}
+ if len(missing) != 0 {
+ sort.Strings(missing)
+
+panic(fmt.Sprintf(
+ "all modules must be defined when setting %s, missing: %v", setOrderFnName, missing))
+}
+}
+
+// MigrationHandler is the migration function that each module registers.
+type MigrationHandler func(sdk.Context)
+
+error
+
+// VersionMap is a map of moduleName -> version
+type VersionMap map[string]uint64
+
+// RunMigrations performs in-place store migrations for all modules. This
+// function MUST be called insde an x/upgrade UpgradeHandler.
+//
+// Recall that in an upgrade handler, the `fromVM` VersionMap is retrieved from
+// x/upgrade's store, and the function needs to return the target VersionMap
+// that will in turn be persisted to the x/upgrade's store. In general,
+// returning RunMigrations should be enough:
+//
+// Example:
+//
+// cfg := module.NewConfigurator(...)
+// app.UpgradeKeeper.SetUpgradeHandler("my-plan", func(ctx context.Context, plan upgradetypes.Plan, fromVM module.VersionMap) (module.VersionMap, error) {
+// return app.mm.RunMigrations(ctx, cfg, fromVM)
+//
+})
+//
+// Internally, RunMigrations will perform the following steps:
+// - create an `updatedVM` VersionMap of module with their latest ConsensusVersion
+// - make a diff of `fromVM` and `udpatedVM`, and for each module:
+// - if the module's `fromVM` version is less than its `updatedVM` version,
+// then run in-place store migrations for that module between those versions.
+// - if the module does not exist in the `fromVM` (which means that it's a new module,
+// because it was not in the previous x/upgrade's store), then run
+// `InitGenesis` on that module.
+//
+// - return the `updatedVM` to be persisted in the x/upgrade's store.
+//
+// Migrations are run in an order defined by `Manager.OrderMigrations` or (if not set)
+
+defined by
+// `DefaultMigrationsOrder` function.
+//
+// As an app developer, if you wish to skip running InitGenesis for your new
+// module "foo", you need to manually pass a `fromVM` argument to this function
+// foo's module version set to its latest ConsensusVersion. That way, the diff
+// between the function's `fromVM` and `udpatedVM` will be empty, hence not
+// running anything for foo.
+//
+// Example:
+//
+// cfg := module.NewConfigurator(...)
+// app.UpgradeKeeper.SetUpgradeHandler("my-plan", func(ctx context.Context, plan upgradetypes.Plan, fromVM module.VersionMap) (module.VersionMap, error) {
+// // Assume "foo" is a new module.
+// // `fromVM` is fetched from existing x/upgrade store. Since foo didn't exist
+// // before this upgrade, `v, exists := fromVM["foo"]; exists == false`, and RunMigration will by default
+// // run InitGenesis on foo.
+// // To skip running foo's InitGenesis, you need set `fromVM`'s foo to its latest
+// // consensus version:
+// fromVM["foo"] = foo.AppModule{
+}.ConsensusVersion()
+//
+// return app.mm.RunMigrations(ctx, cfg, fromVM)
+//
+})
+//
+// Please also refer to /sdk/v0.53/build/migrations/upgrading for more information.
+func (m Manager)
+
+RunMigrations(ctx context.Context, cfg Configurator, fromVM VersionMap) (VersionMap, error) {
+ c, ok := cfg.(*configurator)
+ if !ok {
+ return nil, errorsmod.Wrapf(sdkerrors.ErrInvalidType, "expected %T, got %T", &configurator{
+}, cfg)
+}
+ modules := m.OrderMigrations
+ if modules == nil {
+ modules = DefaultMigrationsOrder(m.ModuleNames())
+}
+ sdkCtx := sdk.UnwrapSDKContext(ctx)
+ updatedVM := VersionMap{
+}
+ for _, moduleName := range modules {
+ module := m.Modules[moduleName]
+ fromVersion, exists := fromVM[moduleName]
+ toVersion := uint64(0)
+ if module, ok := module.(HasConsensusVersion); ok {
+ toVersion = module.ConsensusVersion()
+}
+
+ // We run migration if the module is specified in `fromVM`.
+ // Otherwise we run InitGenesis.
+ //
+ // The module won't exist in the fromVM in two cases:
+ // 1. A new module is added. In this case we run InitGenesis with an
+ // empty genesis state.
+ // 2. An existing chain is upgrading from version < 0.43 to v0.43+ for the first time.
+ // In this case, all modules have yet to be added to x/upgrade's VersionMap store.
+ if exists {
+ err := c.runModuleMigrations(sdkCtx, moduleName, fromVersion, toVersion)
+ if err != nil {
+ return nil, err
+}
+
+}
+
+else {
+ sdkCtx.Logger().Info(fmt.Sprintf("adding a new module: %s", moduleName))
+ if module, ok := m.Modules[moduleName].(HasGenesis); ok {
+ module.InitGenesis(sdkCtx, c.cdc, module.DefaultGenesis(c.cdc))
+}
+ if module, ok := m.Modules[moduleName].(HasABCIGenesis); ok {
+ moduleValUpdates := module.InitGenesis(sdkCtx, c.cdc, module.DefaultGenesis(c.cdc))
+ // The module manager assumes only one module will update the
+ // validator set, and it can't be a new module.
+ if len(moduleValUpdates) > 0 {
+ return nil, errorsmod.Wrapf(sdkerrors.ErrLogic, "validator InitGenesis update is already set by another module")
+}
+
+}
+
+}
+
+updatedVM[moduleName] = toVersion
+}
+
+return updatedVM, nil
+}
+
+// PreBlock performs begin block functionality for upgrade module.
+// It takes the current context as a parameter and returns a boolean value
+// indicating whether the migration was successfully executed or not.
+func (m *Manager)
+
+PreBlock(ctx sdk.Context) (*sdk.ResponsePreBlock, error) {
+ paramsChanged := false
+ for _, moduleName := range m.OrderPreBlockers {
+ if module, ok := m.Modules[moduleName].(appmodule.HasPreBlocker); ok {
+ rsp, err := module.PreBlock(ctx)
+ if err != nil {
+ return nil, err
+}
+ if rsp.IsConsensusParamsChanged() {
+ paramsChanged = true
+}
+
+}
+
+}
+
+return &sdk.ResponsePreBlock{
+ ConsensusParamsChanged: paramsChanged,
+}, nil
+}
+
+// BeginBlock performs begin block functionality for all modules. It creates a
+// child context with an event manager to aggregate events emitted from all
+// modules.
+func (m *Manager)
+
+BeginBlock(ctx sdk.Context) (sdk.BeginBlock, error) {
+ ctx = ctx.WithEventManager(sdk.NewEventManager())
+ for _, moduleName := range m.OrderBeginBlockers {
+ if module, ok := m.Modules[moduleName].(appmodule.HasBeginBlocker); ok {
+ if err := module.BeginBlock(ctx); err != nil {
+ return sdk.BeginBlock{
+}, err
+}
+
+}
+
+}
+
+return sdk.BeginBlock{
+ Events: ctx.EventManager().ABCIEvents(),
+}, nil
+}
+
+// EndBlock performs end block functionality for all modules. It creates a
+// child context with an event manager to aggregate events emitted from all
+// modules.
+func (m *Manager)
+
+EndBlock(ctx sdk.Context) (sdk.EndBlock, error) {
+ ctx = ctx.WithEventManager(sdk.NewEventManager())
+ validatorUpdates := []abci.ValidatorUpdate{
+}
+ for _, moduleName := range m.OrderEndBlockers {
+ if module, ok := m.Modules[moduleName].(appmodule.HasEndBlocker); ok {
+ err := module.EndBlock(ctx)
+ if err != nil {
+ return sdk.EndBlock{
+}, err
+}
+
+}
+
+else if module, ok := m.Modules[moduleName].(HasABCIEndBlock); ok {
+ moduleValUpdates, err := module.EndBlock(ctx)
+ if err != nil {
+ return sdk.EndBlock{
+}, err
+}
+ // use these validator updates if provided, the module manager assumes
+ // only one module will update the validator set
+ if len(moduleValUpdates) > 0 {
+ if len(validatorUpdates) > 0 {
+ return sdk.EndBlock{
+}, errors.New("validator EndBlock updates already set by a previous module")
+}
+ for _, updates := range moduleValUpdates {
+ validatorUpdates = append(validatorUpdates, abci.ValidatorUpdate{
+ PubKey: updates.PubKey,
+ Power: updates.Power
+})
+}
+
+}
+
+}
+
+else {
+ continue
+}
+
+}
+
+return sdk.EndBlock{
+ ValidatorUpdates: validatorUpdates,
+ Events: ctx.EventManager().ABCIEvents(),
+}, nil
+}
+
+// Precommit performs precommit functionality for all modules.
+func (m *Manager)
+
+Precommit(ctx sdk.Context)
+
+error {
+ for _, moduleName := range m.OrderPrecommiters {
+ module, ok := m.Modules[moduleName].(appmodule.HasPrecommit)
+ if !ok {
+ continue
+}
+ if err := module.Precommit(ctx); err != nil {
+ return err
+}
+
+}
+
+return nil
+}
+
+// PrepareCheckState performs functionality for preparing the check state for all modules.
+func (m *Manager)
+
+PrepareCheckState(ctx sdk.Context)
+
+error {
+ for _, moduleName := range m.OrderPrepareCheckStaters {
+ module, ok := m.Modules[moduleName].(appmodule.HasPrepareCheckState)
+ if !ok {
+ continue
+}
+ if err := module.PrepareCheckState(ctx); err != nil {
+ return err
+}
+
+}
+
+return nil
+}
+
+// GetVersionMap gets consensus version from all modules
+func (m *Manager)
+
+GetVersionMap()
+
+VersionMap {
+ vermap := make(VersionMap)
+ for name, v := range m.Modules {
+ version := uint64(0)
+ if v, ok := v.(HasConsensusVersion); ok {
+ version = v.ConsensusVersion()
+}
+
+vermap[name] = version
+}
+
+return vermap
+}
+
+// ModuleNames returns list of all module names, without any particular order.
+func (m *Manager)
+
+ModuleNames() []string {
+ return slices.Collect(maps.Keys(m.Modules))
+}
+
+// DefaultMigrationsOrder returns a default migrations order: ascending alphabetical by module name,
+// except x/auth which will run last, see:
+// https://github.com/cosmos/cosmos-sdk/issues/10591
+func DefaultMigrationsOrder(modules []string) []string {
+ const authName = "auth"
+ out := make([]string, 0, len(modules))
+ hasAuth := false
+ for _, m := range modules {
+ if m == authName {
+ hasAuth = true
+}
+
+else {
+ out = append(out, m)
+}
+
+}
+
+sort.Strings(out)
+ if hasAuth {
+ out = append(out, authName)
+}
+
+return out
+}
+```
+
+* `RegisterLegacyAminoCodec(*codec.LegacyAmino)`: Registers the `amino` codec for the module, which is used to marshal and unmarshal structs to/from `[]byte` in order to persist them in the module's `KVStore`.
+* `RegisterInterfaces(codectypes.InterfaceRegistry)`: Registers a module's interface types and their concrete implementations as `proto.Message`.
+* `RegisterGRPCGatewayRoutes(client.Context, *runtime.ServeMux)`: Registers gRPC routes for the module.
+
+All the `AppModuleBasic` of an application are managed by the [`BasicManager`](#basicmanager).
+
+### `HasName`
+
+```go expandable
+/*
+Package module contains application module patterns and associated "manager" functionality.
+The module pattern has been broken down by:
+ - independent module functionality (AppModuleBasic)
+ - inter-dependent module simulation functionality (AppModuleSimulation)
+ - inter-dependent module full functionality (AppModule)
+
+inter-dependent module functionality is module functionality which somehow
+depends on other modules, typically through the module keeper. Many of the
+module keepers are dependent on each other, thus in order to access the full
+set of module functionality we need to define all the keepers/params-store/keys
+etc. This full set of advanced functionality is defined by the AppModule interface.
+
+Independent module functions are separated to allow for the construction of the
+basic application structures required early on in the application definition
+and used to enable the definition of full module functionality later in the
+process. This separation is necessary, however we still want to allow for a
+high level pattern for modules to follow - for instance, such that we don't
+have to manually register all of the codecs for all the modules. This basic
+procedure as well as other basic patterns are handled through the use of
+BasicManager.
+
+Lastly the interface for genesis functionality (HasGenesis & HasABCIGenesis)
+
+has been
+separated out from full module functionality (AppModule)
+
+so that modules which
+are only used for genesis can take advantage of the Module patterns without
+needlessly defining many placeholder functions
+*/
+package module
+
+import (
+
+ "context"
+ "encoding/json"
+ "errors"
+ "fmt"
+ "maps"
+ "slices"
+ "sort"
+
+ abci "github.com/cometbft/cometbft/abci/types"
+ "github.com/grpc-ecosystem/grpc-gateway/runtime"
+ "github.com/spf13/cobra"
+ "cosmossdk.io/core/appmodule"
+ "cosmossdk.io/core/genesis"
+ errorsmod "cosmossdk.io/errors"
+ storetypes "cosmossdk.io/store/types"
+ "github.com/cosmos/cosmos-sdk/client"
+ "github.com/cosmos/cosmos-sdk/codec"
+ "github.com/cosmos/cosmos-sdk/codec/types"
+ sdk "github.com/cosmos/cosmos-sdk/types"
+ sdkerrors "github.com/cosmos/cosmos-sdk/types/errors"
+)
+
+// AppModuleBasic is the standard form for basic non-dependant elements of an application module.
+type AppModuleBasic interface {
+ HasName
+ RegisterLegacyAminoCodec(*codec.LegacyAmino)
+
+RegisterInterfaces(types.InterfaceRegistry)
+
+RegisterGRPCGatewayRoutes(client.Context, *runtime.ServeMux)
+}
+
+// HasName allows the module to provide its own name for legacy purposes.
+// Newer apps should specify the name for their modules using a map
+// using NewManagerFromMap.
+type HasName interface {
+ Name()
+
+string
+}
+
+// HasGenesisBasics is the legacy interface for stateless genesis methods.
+type HasGenesisBasics interface {
+ DefaultGenesis(codec.JSONCodec)
+
+json.RawMessage
+ ValidateGenesis(codec.JSONCodec, client.TxEncodingConfig, json.RawMessage)
+
+error
+}
+
+// BasicManager is a collection of AppModuleBasic
+type BasicManager map[string]AppModuleBasic
+
+// NewBasicManager creates a new BasicManager object
+func NewBasicManager(modules ...AppModuleBasic)
+
+BasicManager {
+ moduleMap := make(map[string]AppModuleBasic)
+ for _, module := range modules {
+ moduleMap[module.Name()] = module
+}
+
+return moduleMap
+}
+
+// NewBasicManagerFromManager creates a new BasicManager from a Manager
+// The BasicManager will contain all AppModuleBasic from the AppModule Manager
+// Module's AppModuleBasic can be overridden by passing a custom AppModuleBasic map
+func NewBasicManagerFromManager(manager *Manager, customModuleBasics map[string]AppModuleBasic)
+
+BasicManager {
+ moduleMap := make(map[string]AppModuleBasic)
+ for name, module := range manager.Modules {
+ if customBasicMod, ok := customModuleBasics[name]; ok {
+ moduleMap[name] = customBasicMod
+ continue
+}
+ if appModule, ok := module.(appmodule.AppModule); ok {
+ moduleMap[name] = CoreAppModuleBasicAdaptor(name, appModule)
+
+continue
+}
+ if basicMod, ok := module.(AppModuleBasic); ok {
+ moduleMap[name] = basicMod
+}
+
+}
+
+return moduleMap
+}
+
+// RegisterLegacyAminoCodec registers all module codecs
+func (bm BasicManager)
+
+RegisterLegacyAminoCodec(cdc *codec.LegacyAmino) {
+ for _, b := range bm {
+ b.RegisterLegacyAminoCodec(cdc)
+}
+}
+
+// RegisterInterfaces registers all module interface types
+func (bm BasicManager)
+
+RegisterInterfaces(registry types.InterfaceRegistry) {
+ for _, m := range bm {
+ m.RegisterInterfaces(registry)
+}
+}
+
+// DefaultGenesis provides default genesis information for all modules
+func (bm BasicManager)
+
+DefaultGenesis(cdc codec.JSONCodec)
+
+map[string]json.RawMessage {
+ genesisData := make(map[string]json.RawMessage)
+ for _, b := range bm {
+ if mod, ok := b.(HasGenesisBasics); ok {
+ genesisData[b.Name()] = mod.DefaultGenesis(cdc)
+}
+
+}
+
+return genesisData
+}
+
+// ValidateGenesis performs genesis state validation for all modules
+func (bm BasicManager)
+
+ValidateGenesis(cdc codec.JSONCodec, txEncCfg client.TxEncodingConfig, genesisData map[string]json.RawMessage)
+
+error {
+ for _, b := range bm {
+ // first check if the module is an adapted Core API Module
+ if mod, ok := b.(HasGenesisBasics); ok {
+ if err := mod.ValidateGenesis(cdc, txEncCfg, genesisData[b.Name()]); err != nil {
+ return err
+}
+
+}
+
+}
+
+return nil
+}
+
+// RegisterGRPCGatewayRoutes registers all module rest routes
+func (bm BasicManager)
+
+RegisterGRPCGatewayRoutes(clientCtx client.Context, rtr *runtime.ServeMux) {
+ for _, b := range bm {
+ b.RegisterGRPCGatewayRoutes(clientCtx, rtr)
+}
+}
+
+// AddTxCommands adds all tx commands to the rootTxCmd.
+func (bm BasicManager)
+
+AddTxCommands(rootTxCmd *cobra.Command) {
+ for _, b := range bm {
+ if mod, ok := b.(interface {
+ GetTxCmd() *cobra.Command
+}); ok {
+ if cmd := mod.GetTxCmd(); cmd != nil {
+ rootTxCmd.AddCommand(cmd)
+}
+
+}
+
+}
+}
+
+// AddQueryCommands adds all query commands to the rootQueryCmd.
+func (bm BasicManager)
+
+AddQueryCommands(rootQueryCmd *cobra.Command) {
+ for _, b := range bm {
+ if mod, ok := b.(interface {
+ GetQueryCmd() *cobra.Command
+}); ok {
+ if cmd := mod.GetQueryCmd(); cmd != nil {
+ rootQueryCmd.AddCommand(cmd)
+}
+
+}
+
+}
+}
+
+// HasGenesis is the extension interface for stateful genesis methods.
+type HasGenesis interface {
+ HasGenesisBasics
+ InitGenesis(sdk.Context, codec.JSONCodec, json.RawMessage)
+
+ExportGenesis(sdk.Context, codec.JSONCodec)
+
+json.RawMessage
+}
+
+// HasABCIGenesis is the extension interface for stateful genesis methods which returns validator updates.
+type HasABCIGenesis interface {
+ HasGenesisBasics
+ InitGenesis(sdk.Context, codec.JSONCodec, json.RawMessage) []abci.ValidatorUpdate
+ ExportGenesis(sdk.Context, codec.JSONCodec)
+
+json.RawMessage
+}
+
+// AppModule is the form for an application module. Most of
+// its functionality has been moved to extension interfaces.
+// Deprecated: use appmodule.AppModule with a combination of extension interfaes interfaces instead.
+type AppModule interface {
+ appmodule.AppModule
+
+ AppModuleBasic
+}
+
+// HasInvariants is the interface for registering invariants.
+//
+// Deprecated: this will be removed in the next Cosmos SDK release.
+type HasInvariants interface {
+ // RegisterInvariants registers module invariants.
+ RegisterInvariants(sdk.InvariantRegistry)
+}
+
+// HasServices is the interface for modules to register services.
+type HasServices interface {
+ // RegisterServices allows a module to register services.
+ RegisterServices(Configurator)
+}
+
+// HasConsensusVersion is the interface for declaring a module consensus version.
+type HasConsensusVersion interface {
+ // ConsensusVersion is a sequence number for state-breaking change of the
+ // module. It should be incremented on each consensus-breaking change
+ // introduced by the module. To avoid wrong/empty versions, the initial version
+ // should be set to 1.
+ ConsensusVersion()
+
+uint64
+}
+
+// HasABCIEndblock is a released typo of HasABCIEndBlock.
+// Deprecated: use HasABCIEndBlock instead.
+type HasABCIEndblock HasABCIEndBlock
+
+// HasABCIEndBlock is the interface for modules that need to run code at the end of the block.
+type HasABCIEndBlock interface {
+ AppModule
+ EndBlock(context.Context) ([]abci.ValidatorUpdate, error)
+}
+
+var (
+ _ appmodule.AppModule = (*GenesisOnlyAppModule)(nil)
+ _ AppModuleBasic = (*GenesisOnlyAppModule)(nil)
+)
+
+// AppModuleGenesis is the standard form for an application module genesis functions
+type AppModuleGenesis interface {
+ AppModuleBasic
+ HasABCIGenesis
+}
+
+// GenesisOnlyAppModule is an AppModule that only has import/export functionality
+type GenesisOnlyAppModule struct {
+ AppModuleGenesis
+}
+
+// NewGenesisOnlyAppModule creates a new GenesisOnlyAppModule object
+func NewGenesisOnlyAppModule(amg AppModuleGenesis)
+
+GenesisOnlyAppModule {
+ return GenesisOnlyAppModule{
+ AppModuleGenesis: amg,
+}
+}
+
+// IsOnePerModuleType implements the depinject.OnePerModuleType interface.
+func (GenesisOnlyAppModule)
+
+IsOnePerModuleType() {
+}
+
+// IsAppModule implements the appmodule.AppModule interface.
+func (GenesisOnlyAppModule)
+
+IsAppModule() {
+}
+
+// RegisterInvariants is a placeholder function register no invariants
+func (GenesisOnlyAppModule)
+
+RegisterInvariants(_ sdk.InvariantRegistry) {
+}
+
+// ConsensusVersion implements AppModule/ConsensusVersion.
+func (gam GenesisOnlyAppModule)
+
+ConsensusVersion()
+
+uint64 {
+ return 1
+}
+
+// Manager defines a module manager that provides the high level utility for managing and executing
+// operations for a group of modules
+type Manager struct {
+ Modules map[string]any // interface{
+}
+
+is used now to support the legacy AppModule as well as new core appmodule.AppModule.
+ OrderInitGenesis []string
+ OrderExportGenesis []string
+ OrderPreBlockers []string
+ OrderBeginBlockers []string
+ OrderEndBlockers []string
+ OrderPrepareCheckStaters []string
+ OrderPrecommiters []string
+ OrderMigrations []string
+}
+
+// NewManager creates a new Manager object.
+func NewManager(modules ...AppModule) *Manager {
+ moduleMap := make(map[string]any)
+ modulesStr := make([]string, 0, len(modules))
+ preBlockModulesStr := make([]string, 0)
+ for _, module := range modules {
+ if _, ok := module.(appmodule.AppModule); !ok {
+ panic(fmt.Sprintf("module %s does not implement appmodule.AppModule", module.Name()))
+}
+
+moduleMap[module.Name()] = module
+ modulesStr = append(modulesStr, module.Name())
+ if _, ok := module.(appmodule.HasPreBlocker); ok {
+ preBlockModulesStr = append(preBlockModulesStr, module.Name())
+}
+
+}
+
+return &Manager{
+ Modules: moduleMap,
+ OrderInitGenesis: modulesStr,
+ OrderExportGenesis: modulesStr,
+ OrderPreBlockers: preBlockModulesStr,
+ OrderBeginBlockers: modulesStr,
+ OrderPrepareCheckStaters: modulesStr,
+ OrderPrecommiters: modulesStr,
+ OrderEndBlockers: modulesStr,
+}
+}
+
+// NewManagerFromMap creates a new Manager object from a map of module names to module implementations.
+// This method should be used for apps and modules which have migrated to the cosmossdk.io/core.appmodule.AppModule API.
+func NewManagerFromMap(moduleMap map[string]appmodule.AppModule) *Manager {
+ simpleModuleMap := make(map[string]any)
+ modulesStr := make([]string, 0, len(simpleModuleMap))
+ preBlockModulesStr := make([]string, 0)
+ for name, module := range moduleMap {
+ simpleModuleMap[name] = module
+ modulesStr = append(modulesStr, name)
+ if _, ok := module.(appmodule.HasPreBlocker); ok {
+ preBlockModulesStr = append(preBlockModulesStr, name)
+}
+
+}
+
+ // Sort the modules by name. Given that we are using a map above we can't guarantee the order.
+ sort.Strings(modulesStr)
+
+return &Manager{
+ Modules: simpleModuleMap,
+ OrderInitGenesis: modulesStr,
+ OrderExportGenesis: modulesStr,
+ OrderPreBlockers: preBlockModulesStr,
+ OrderBeginBlockers: modulesStr,
+ OrderEndBlockers: modulesStr,
+ OrderPrecommiters: modulesStr,
+ OrderPrepareCheckStaters: modulesStr,
+}
+}
+
+// SetOrderInitGenesis sets the order of init genesis calls
+func (m *Manager)
+
+SetOrderInitGenesis(moduleNames ...string) {
+ m.assertNoForgottenModules("SetOrderInitGenesis", moduleNames, func(moduleName string)
+
+bool {
+ module := m.Modules[moduleName]
+ if _, hasGenesis := module.(appmodule.HasGenesis); hasGenesis {
+ return !hasGenesis
+}
+ if _, hasABCIGenesis := module.(HasABCIGenesis); hasABCIGenesis {
+ return !hasABCIGenesis
+}
+
+ _, hasGenesis := module.(HasGenesis)
+
+return !hasGenesis
+})
+
+m.OrderInitGenesis = moduleNames
+}
+
+// SetOrderExportGenesis sets the order of export genesis calls
+func (m *Manager)
+
+SetOrderExportGenesis(moduleNames ...string) {
+ m.assertNoForgottenModules("SetOrderExportGenesis", moduleNames, func(moduleName string)
+
+bool {
+ module := m.Modules[moduleName]
+ if _, hasGenesis := module.(appmodule.HasGenesis); hasGenesis {
+ return !hasGenesis
+}
+ if _, hasABCIGenesis := module.(HasABCIGenesis); hasABCIGenesis {
+ return !hasABCIGenesis
+}
+
+ _, hasGenesis := module.(HasGenesis)
+
+return !hasGenesis
+})
+
+m.OrderExportGenesis = moduleNames
+}
+
+// SetOrderPreBlockers sets the order of set pre-blocker calls
+func (m *Manager)
+
+SetOrderPreBlockers(moduleNames ...string) {
+ m.assertNoForgottenModules("SetOrderPreBlockers", moduleNames,
+ func(moduleName string)
+
+bool {
+ module := m.Modules[moduleName]
+ _, hasBlock := module.(appmodule.HasPreBlocker)
+
+return !hasBlock
+})
+
+m.OrderPreBlockers = moduleNames
+}
+
+// SetOrderBeginBlockers sets the order of set begin-blocker calls
+func (m *Manager)
+
+SetOrderBeginBlockers(moduleNames ...string) {
+ m.assertNoForgottenModules("SetOrderBeginBlockers", moduleNames,
+ func(moduleName string)
+
+bool {
+ module := m.Modules[moduleName]
+ _, hasBeginBlock := module.(appmodule.HasBeginBlocker)
+
+return !hasBeginBlock
+})
+
+m.OrderBeginBlockers = moduleNames
+}
+
+// SetOrderEndBlockers sets the order of set end-blocker calls
+func (m *Manager)
+
+SetOrderEndBlockers(moduleNames ...string) {
+ m.assertNoForgottenModules("SetOrderEndBlockers", moduleNames,
+ func(moduleName string)
+
+bool {
+ module := m.Modules[moduleName]
+ if _, hasEndBlock := module.(appmodule.HasEndBlocker); hasEndBlock {
+ return !hasEndBlock
+}
+
+ _, hasABCIEndBlock := module.(HasABCIEndBlock)
+
+return !hasABCIEndBlock
+})
+
+m.OrderEndBlockers = moduleNames
+}
+
+// SetOrderPrepareCheckStaters sets the order of set prepare-check-stater calls
+func (m *Manager)
+
+SetOrderPrepareCheckStaters(moduleNames ...string) {
+ m.assertNoForgottenModules("SetOrderPrepareCheckStaters", moduleNames,
+ func(moduleName string)
+
+bool {
+ module := m.Modules[moduleName]
+ _, hasPrepareCheckState := module.(appmodule.HasPrepareCheckState)
+
+return !hasPrepareCheckState
+})
+
+m.OrderPrepareCheckStaters = moduleNames
+}
+
+// SetOrderPrecommiters sets the order of set precommiter calls
+func (m *Manager)
+
+SetOrderPrecommiters(moduleNames ...string) {
+ m.assertNoForgottenModules("SetOrderPrecommiters", moduleNames,
+ func(moduleName string)
+
+bool {
+ module := m.Modules[moduleName]
+ _, hasPrecommit := module.(appmodule.HasPrecommit)
+
+return !hasPrecommit
+})
+
+m.OrderPrecommiters = moduleNames
+}
+
+// SetOrderMigrations sets the order of migrations to be run. If not set
+// then migrations will be run with an order defined in `DefaultMigrationsOrder`.
+func (m *Manager)
+
+SetOrderMigrations(moduleNames ...string) {
+ m.assertNoForgottenModules("SetOrderMigrations", moduleNames, nil)
+
+m.OrderMigrations = moduleNames
+}
+
+// RegisterInvariants registers all module invariants
+//
+// Deprecated: this function is a no-op and will be removed in the next release of the Cosmos SDK.
+func (m *Manager)
+
+RegisterInvariants(_ sdk.InvariantRegistry) {
+}
+
+// RegisterServices registers all module services
+func (m *Manager)
+
+RegisterServices(cfg Configurator)
+
+error {
+ for _, module := range m.Modules {
+ if module, ok := module.(HasServices); ok {
+ module.RegisterServices(cfg)
+}
+ if module, ok := module.(appmodule.HasServices); ok {
+ err := module.RegisterServices(cfg)
+ if err != nil {
+ return err
+}
+
+}
+ if cfg.Error() != nil {
+ return cfg.Error()
+}
+
+}
+
+return nil
+}
+
+// InitGenesis performs init genesis functionality for modules. Exactly one
+// module must return a non-empty validator set update to correctly initialize
+// the chain.
+func (m *Manager)
+
+InitGenesis(ctx sdk.Context, cdc codec.JSONCodec, genesisData map[string]json.RawMessage) (*abci.ResponseInitChain, error) {
+ var validatorUpdates []abci.ValidatorUpdate
+ ctx.Logger().Info("initializing blockchain state from genesis.json")
+ for _, moduleName := range m.OrderInitGenesis {
+ if genesisData[moduleName] == nil {
+ continue
+}
+ mod := m.Modules[moduleName]
+ // we might get an adapted module, a native core API module or a legacy module
+ if module, ok := mod.(appmodule.HasGenesis); ok {
+ ctx.Logger().Debug("running initialization for module", "module", moduleName)
+ // core API genesis
+ source, err := genesis.SourceFromRawJSON(genesisData[moduleName])
+ if err != nil {
+ return &abci.ResponseInitChain{
+}, err
+}
+
+err = module.InitGenesis(ctx, source)
+ if err != nil {
+ return &abci.ResponseInitChain{
+}, err
+}
+
+}
+
+else if module, ok := mod.(HasGenesis); ok {
+ ctx.Logger().Debug("running initialization for module", "module", moduleName)
+
+module.InitGenesis(ctx, cdc, genesisData[moduleName])
+}
+
+else if module, ok := mod.(HasABCIGenesis); ok {
+ ctx.Logger().Debug("running initialization for module", "module", moduleName)
+ moduleValUpdates := module.InitGenesis(ctx, cdc, genesisData[moduleName])
+
+ // use these validator updates if provided, the module manager assumes
+ // only one module will update the validator set
+ if len(moduleValUpdates) > 0 {
+ if len(validatorUpdates) > 0 {
+ return &abci.ResponseInitChain{
+}, errors.New("validator InitGenesis updates already set by a previous module")
+}
+
+validatorUpdates = moduleValUpdates
+}
+
+}
+
+}
+
+ // a chain must initialize with a non-empty validator set
+ if len(validatorUpdates) == 0 {
+ return &abci.ResponseInitChain{
+}, fmt.Errorf("validator set is empty after InitGenesis, please ensure at least one validator is initialized with a delegation greater than or equal to the DefaultPowerReduction (%d)", sdk.DefaultPowerReduction)
+}
+
+return &abci.ResponseInitChain{
+ Validators: validatorUpdates,
+}, nil
+}
+
+// ExportGenesis performs export genesis functionality for modules
+func (m *Manager)
+
+ExportGenesis(ctx sdk.Context, cdc codec.JSONCodec) (map[string]json.RawMessage, error) {
+ return m.ExportGenesisForModules(ctx, cdc, []string{
+})
+}
+
+// ExportGenesisForModules performs export genesis functionality for modules
+func (m *Manager)
+
+ExportGenesisForModules(ctx sdk.Context, cdc codec.JSONCodec, modulesToExport []string) (map[string]json.RawMessage, error) {
+ if len(modulesToExport) == 0 {
+ modulesToExport = m.OrderExportGenesis
+}
+ // verify modules exists in app, so that we don't panic in the middle of an export
+ if err := m.checkModulesExists(modulesToExport); err != nil {
+ return nil, err
+}
+
+type genesisResult struct {
+ bz json.RawMessage
+ err error
+}
+ channels := make(map[string]chan genesisResult)
+ for _, moduleName := range modulesToExport {
+ mod := m.Modules[moduleName]
+ if module, ok := mod.(appmodule.HasGenesis); ok {
+ // core API genesis
+ channels[moduleName] = make(chan genesisResult)
+
+go func(module appmodule.HasGenesis, ch chan genesisResult) {
+ ctx := ctx.WithGasMeter(storetypes.NewInfiniteGasMeter()) // avoid race conditions
+ target := genesis.RawJSONTarget{
+}
+ err := module.ExportGenesis(ctx, target.Target())
+ if err != nil {
+ ch <- genesisResult{
+ nil, err
+}
+
+return
+}
+
+rawJSON, err := target.JSON()
+ if err != nil {
+ ch <- genesisResult{
+ nil, err
+}
+
+return
+}
+
+ch <- genesisResult{
+ rawJSON, nil
+}
+
+}(module, channels[moduleName])
+}
+
+else if module, ok := mod.(HasGenesis); ok {
+ channels[moduleName] = make(chan genesisResult)
+
+go func(module HasGenesis, ch chan genesisResult) {
+ ctx := ctx.WithGasMeter(storetypes.NewInfiniteGasMeter()) // avoid race conditions
+ ch <- genesisResult{
+ module.ExportGenesis(ctx, cdc), nil
+}
+
+}(module, channels[moduleName])
+}
+
+else if module, ok := mod.(HasABCIGenesis); ok {
+ channels[moduleName] = make(chan genesisResult)
+
+go func(module HasABCIGenesis, ch chan genesisResult) {
+ ctx := ctx.WithGasMeter(storetypes.NewInfiniteGasMeter()) // avoid race conditions
+ ch <- genesisResult{
+ module.ExportGenesis(ctx, cdc), nil
+}
+
+}(module, channels[moduleName])
+}
+
+}
+ genesisData := make(map[string]json.RawMessage)
+ for moduleName := range channels {
+ res := <-channels[moduleName]
+ if res.err != nil {
+ return nil, fmt.Errorf("genesis export error in %s: %w", moduleName, res.err)
+}
+
+genesisData[moduleName] = res.bz
+}
+
+return genesisData, nil
+}
+
+// checkModulesExists verifies that all modules in the list exist in the app
+func (m *Manager)
+
+checkModulesExists(moduleName []string)
+
+error {
+ for _, name := range moduleName {
+ if _, ok := m.Modules[name]; !ok {
+ return fmt.Errorf("module %s does not exist", name)
+}
+
+}
+
+return nil
+}
+
+// assertNoForgottenModules checks that we didn't forget any modules in the SetOrder* functions.
+// `pass` is a closure which allows one to omit modules from `moduleNames`.
+// If you provide non-nil `pass` and it returns true, the module would not be subject of the assertion.
+func (m *Manager)
+
+assertNoForgottenModules(setOrderFnName string, moduleNames []string, pass func(moduleName string)
+
+bool) {
+ ms := make(map[string]bool)
+ for _, m := range moduleNames {
+ ms[m] = true
+}
+
+var missing []string
+ for m := range m.Modules {
+ if pass != nil && pass(m) {
+ continue
+}
+ if !ms[m] {
+ missing = append(missing, m)
+}
+
+}
+ if len(missing) != 0 {
+ sort.Strings(missing)
+
+panic(fmt.Sprintf(
+ "all modules must be defined when setting %s, missing: %v", setOrderFnName, missing))
+}
+}
+
+// MigrationHandler is the migration function that each module registers.
+type MigrationHandler func(sdk.Context)
+
+error
+
+// VersionMap is a map of moduleName -> version
+type VersionMap map[string]uint64
+
+// RunMigrations performs in-place store migrations for all modules. This
+// function MUST be called insde an x/upgrade UpgradeHandler.
+//
+// Recall that in an upgrade handler, the `fromVM` VersionMap is retrieved from
+// x/upgrade's store, and the function needs to return the target VersionMap
+// that will in turn be persisted to the x/upgrade's store. In general,
+// returning RunMigrations should be enough:
+//
+// Example:
+//
+// cfg := module.NewConfigurator(...)
+// app.UpgradeKeeper.SetUpgradeHandler("my-plan", func(ctx context.Context, plan upgradetypes.Plan, fromVM module.VersionMap) (module.VersionMap, error) {
+// return app.mm.RunMigrations(ctx, cfg, fromVM)
+//
+})
+//
+// Internally, RunMigrations will perform the following steps:
+// - create an `updatedVM` VersionMap of module with their latest ConsensusVersion
+// - make a diff of `fromVM` and `udpatedVM`, and for each module:
+// - if the module's `fromVM` version is less than its `updatedVM` version,
+// then run in-place store migrations for that module between those versions.
+// - if the module does not exist in the `fromVM` (which means that it's a new module,
+// because it was not in the previous x/upgrade's store), then run
+// `InitGenesis` on that module.
+//
+// - return the `updatedVM` to be persisted in the x/upgrade's store.
+//
+// Migrations are run in an order defined by `Manager.OrderMigrations` or (if not set)
+
+defined by
+// `DefaultMigrationsOrder` function.
+//
+// As an app developer, if you wish to skip running InitGenesis for your new
+// module "foo", you need to manually pass a `fromVM` argument to this function
+// foo's module version set to its latest ConsensusVersion. That way, the diff
+// between the function's `fromVM` and `udpatedVM` will be empty, hence not
+// running anything for foo.
+//
+// Example:
+//
+// cfg := module.NewConfigurator(...)
+// app.UpgradeKeeper.SetUpgradeHandler("my-plan", func(ctx context.Context, plan upgradetypes.Plan, fromVM module.VersionMap) (module.VersionMap, error) {
+// // Assume "foo" is a new module.
+// // `fromVM` is fetched from existing x/upgrade store. Since foo didn't exist
+// // before this upgrade, `v, exists := fromVM["foo"]; exists == false`, and RunMigration will by default
+// // run InitGenesis on foo.
+// // To skip running foo's InitGenesis, you need set `fromVM`'s foo to its latest
+// // consensus version:
+// fromVM["foo"] = foo.AppModule{
+}.ConsensusVersion()
+//
+// return app.mm.RunMigrations(ctx, cfg, fromVM)
+//
+})
+//
+// Please also refer to /sdk/v0.53/build/migrations/upgrading for more information.
+func (m Manager)
+
+RunMigrations(ctx context.Context, cfg Configurator, fromVM VersionMap) (VersionMap, error) {
+ c, ok := cfg.(*configurator)
+ if !ok {
+ return nil, errorsmod.Wrapf(sdkerrors.ErrInvalidType, "expected %T, got %T", &configurator{
+}, cfg)
+}
+ modules := m.OrderMigrations
+ if modules == nil {
+ modules = DefaultMigrationsOrder(m.ModuleNames())
+}
+ sdkCtx := sdk.UnwrapSDKContext(ctx)
+ updatedVM := VersionMap{
+}
+ for _, moduleName := range modules {
+ module := m.Modules[moduleName]
+ fromVersion, exists := fromVM[moduleName]
+ toVersion := uint64(0)
+ if module, ok := module.(HasConsensusVersion); ok {
+ toVersion = module.ConsensusVersion()
+}
+
+ // We run migration if the module is specified in `fromVM`.
+ // Otherwise we run InitGenesis.
+ //
+ // The module won't exist in the fromVM in two cases:
+ // 1. A new module is added. In this case we run InitGenesis with an
+ // empty genesis state.
+ // 2. An existing chain is upgrading from version < 0.43 to v0.43+ for the first time.
+ // In this case, all modules have yet to be added to x/upgrade's VersionMap store.
+ if exists {
+ err := c.runModuleMigrations(sdkCtx, moduleName, fromVersion, toVersion)
+ if err != nil {
+ return nil, err
+}
+
+}
+
+else {
+ sdkCtx.Logger().Info(fmt.Sprintf("adding a new module: %s", moduleName))
+ if module, ok := m.Modules[moduleName].(HasGenesis); ok {
+ module.InitGenesis(sdkCtx, c.cdc, module.DefaultGenesis(c.cdc))
+}
+ if module, ok := m.Modules[moduleName].(HasABCIGenesis); ok {
+ moduleValUpdates := module.InitGenesis(sdkCtx, c.cdc, module.DefaultGenesis(c.cdc))
+ // The module manager assumes only one module will update the
+ // validator set, and it can't be a new module.
+ if len(moduleValUpdates) > 0 {
+ return nil, errorsmod.Wrapf(sdkerrors.ErrLogic, "validator InitGenesis update is already set by another module")
+}
+
+}
+
+}
+
+updatedVM[moduleName] = toVersion
+}
+
+return updatedVM, nil
+}
+
+// PreBlock performs begin block functionality for upgrade module.
+// It takes the current context as a parameter and returns a boolean value
+// indicating whether the migration was successfully executed or not.
+func (m *Manager)
+
+PreBlock(ctx sdk.Context) (*sdk.ResponsePreBlock, error) {
+ paramsChanged := false
+ for _, moduleName := range m.OrderPreBlockers {
+ if module, ok := m.Modules[moduleName].(appmodule.HasPreBlocker); ok {
+ rsp, err := module.PreBlock(ctx)
+ if err != nil {
+ return nil, err
+}
+ if rsp.IsConsensusParamsChanged() {
+ paramsChanged = true
+}
+
+}
+
+}
+
+return &sdk.ResponsePreBlock{
+ ConsensusParamsChanged: paramsChanged,
+}, nil
+}
+
+// BeginBlock performs begin block functionality for all modules. It creates a
+// child context with an event manager to aggregate events emitted from all
+// modules.
+func (m *Manager)
+
+BeginBlock(ctx sdk.Context) (sdk.BeginBlock, error) {
+ ctx = ctx.WithEventManager(sdk.NewEventManager())
+ for _, moduleName := range m.OrderBeginBlockers {
+ if module, ok := m.Modules[moduleName].(appmodule.HasBeginBlocker); ok {
+ if err := module.BeginBlock(ctx); err != nil {
+ return sdk.BeginBlock{
+}, err
+}
+
+}
+
+}
+
+return sdk.BeginBlock{
+ Events: ctx.EventManager().ABCIEvents(),
+}, nil
+}
+
+// EndBlock performs end block functionality for all modules. It creates a
+// child context with an event manager to aggregate events emitted from all
+// modules.
+func (m *Manager)
+
+EndBlock(ctx sdk.Context) (sdk.EndBlock, error) {
+ ctx = ctx.WithEventManager(sdk.NewEventManager())
+ validatorUpdates := []abci.ValidatorUpdate{
+}
+ for _, moduleName := range m.OrderEndBlockers {
+ if module, ok := m.Modules[moduleName].(appmodule.HasEndBlocker); ok {
+ err := module.EndBlock(ctx)
+ if err != nil {
+ return sdk.EndBlock{
+}, err
+}
+
+}
+
+else if module, ok := m.Modules[moduleName].(HasABCIEndBlock); ok {
+ moduleValUpdates, err := module.EndBlock(ctx)
+ if err != nil {
+ return sdk.EndBlock{
+}, err
+}
+ // use these validator updates if provided, the module manager assumes
+ // only one module will update the validator set
+ if len(moduleValUpdates) > 0 {
+ if len(validatorUpdates) > 0 {
+ return sdk.EndBlock{
+}, errors.New("validator EndBlock updates already set by a previous module")
+}
+ for _, updates := range moduleValUpdates {
+ validatorUpdates = append(validatorUpdates, abci.ValidatorUpdate{
+ PubKey: updates.PubKey,
+ Power: updates.Power
+})
+}
+
+}
+
+}
+
+else {
+ continue
+}
+
+}
+
+return sdk.EndBlock{
+ ValidatorUpdates: validatorUpdates,
+ Events: ctx.EventManager().ABCIEvents(),
+}, nil
+}
+
+// Precommit performs precommit functionality for all modules.
+func (m *Manager)
+
+Precommit(ctx sdk.Context)
+
+error {
+ for _, moduleName := range m.OrderPrecommiters {
+ module, ok := m.Modules[moduleName].(appmodule.HasPrecommit)
+ if !ok {
+ continue
+}
+ if err := module.Precommit(ctx); err != nil {
+ return err
+}
+
+}
+
+return nil
+}
+
+// PrepareCheckState performs functionality for preparing the check state for all modules.
+func (m *Manager)
+
+PrepareCheckState(ctx sdk.Context)
+
+error {
+ for _, moduleName := range m.OrderPrepareCheckStaters {
+ module, ok := m.Modules[moduleName].(appmodule.HasPrepareCheckState)
+ if !ok {
+ continue
+}
+ if err := module.PrepareCheckState(ctx); err != nil {
+ return err
+}
+
+}
+
+return nil
+}
+
+// GetVersionMap gets consensus version from all modules
+func (m *Manager)
+
+GetVersionMap()
+
+VersionMap {
+ vermap := make(VersionMap)
+ for name, v := range m.Modules {
+ version := uint64(0)
+ if v, ok := v.(HasConsensusVersion); ok {
+ version = v.ConsensusVersion()
+}
+
+vermap[name] = version
+}
+
+return vermap
+}
+
+// ModuleNames returns list of all module names, without any particular order.
+func (m *Manager)
+
+ModuleNames() []string {
+ return slices.Collect(maps.Keys(m.Modules))
+}
+
+// DefaultMigrationsOrder returns a default migrations order: ascending alphabetical by module name,
+// except x/auth which will run last, see:
+// https://github.com/cosmos/cosmos-sdk/issues/10591
+func DefaultMigrationsOrder(modules []string) []string {
+ const authName = "auth"
+ out := make([]string, 0, len(modules))
+ hasAuth := false
+ for _, m := range modules {
+ if m == authName {
+ hasAuth = true
+}
+
+else {
+ out = append(out, m)
+}
+
+}
+
+sort.Strings(out)
+ if hasAuth {
+ out = append(out, authName)
+}
+
+return out
+}
+```
+
+* `HasName` is an interface that has a method `Name()`. This method returns the name of the module as a `string`.
+
+### Genesis
+
+
+For easily creating an `AppModule` that only has genesis functionalities, use `module.GenesisOnlyAppModule`.
+
+
+#### `module.HasGenesisBasics`
+
+```go expandable
+/*
+Package module contains application module patterns and associated "manager" functionality.
+The module pattern has been broken down by:
+ - independent module functionality (AppModuleBasic)
+ - inter-dependent module simulation functionality (AppModuleSimulation)
+ - inter-dependent module full functionality (AppModule)
+
+inter-dependent module functionality is module functionality which somehow
+depends on other modules, typically through the module keeper. Many of the
+module keepers are dependent on each other, thus in order to access the full
+set of module functionality we need to define all the keepers/params-store/keys
+etc. This full set of advanced functionality is defined by the AppModule interface.
+
+Independent module functions are separated to allow for the construction of the
+basic application structures required early on in the application definition
+and used to enable the definition of full module functionality later in the
+process. This separation is necessary, however we still want to allow for a
+high level pattern for modules to follow - for instance, such that we don't
+have to manually register all of the codecs for all the modules. This basic
+procedure as well as other basic patterns are handled through the use of
+BasicManager.
+
+Lastly the interface for genesis functionality (HasGenesis & HasABCIGenesis)
+
+has been
+separated out from full module functionality (AppModule)
+
+so that modules which
+are only used for genesis can take advantage of the Module patterns without
+needlessly defining many placeholder functions
+*/
+package module
+
+import (
+
+ "context"
+ "encoding/json"
+ "errors"
+ "fmt"
+ "maps"
+ "slices"
+ "sort"
+
+ abci "github.com/cometbft/cometbft/abci/types"
+ "github.com/grpc-ecosystem/grpc-gateway/runtime"
+ "github.com/spf13/cobra"
+ "cosmossdk.io/core/appmodule"
+ "cosmossdk.io/core/genesis"
+ errorsmod "cosmossdk.io/errors"
+ storetypes "cosmossdk.io/store/types"
+ "github.com/cosmos/cosmos-sdk/client"
+ "github.com/cosmos/cosmos-sdk/codec"
+ "github.com/cosmos/cosmos-sdk/codec/types"
+ sdk "github.com/cosmos/cosmos-sdk/types"
+ sdkerrors "github.com/cosmos/cosmos-sdk/types/errors"
+)
+
+// AppModuleBasic is the standard form for basic non-dependant elements of an application module.
+type AppModuleBasic interface {
+ HasName
+ RegisterLegacyAminoCodec(*codec.LegacyAmino)
+
+RegisterInterfaces(types.InterfaceRegistry)
+
+RegisterGRPCGatewayRoutes(client.Context, *runtime.ServeMux)
+}
+
+// HasName allows the module to provide its own name for legacy purposes.
+// Newer apps should specify the name for their modules using a map
+// using NewManagerFromMap.
+type HasName interface {
+ Name()
+
+string
+}
+
+// HasGenesisBasics is the legacy interface for stateless genesis methods.
+type HasGenesisBasics interface {
+ DefaultGenesis(codec.JSONCodec)
+
+json.RawMessage
+ ValidateGenesis(codec.JSONCodec, client.TxEncodingConfig, json.RawMessage)
+
+error
+}
+
+// BasicManager is a collection of AppModuleBasic
+type BasicManager map[string]AppModuleBasic
+
+// NewBasicManager creates a new BasicManager object
+func NewBasicManager(modules ...AppModuleBasic)
+
+BasicManager {
+ moduleMap := make(map[string]AppModuleBasic)
+ for _, module := range modules {
+ moduleMap[module.Name()] = module
+}
+
+return moduleMap
+}
+
+// NewBasicManagerFromManager creates a new BasicManager from a Manager
+// The BasicManager will contain all AppModuleBasic from the AppModule Manager
+// Module's AppModuleBasic can be overridden by passing a custom AppModuleBasic map
+func NewBasicManagerFromManager(manager *Manager, customModuleBasics map[string]AppModuleBasic)
+
+BasicManager {
+ moduleMap := make(map[string]AppModuleBasic)
+ for name, module := range manager.Modules {
+ if customBasicMod, ok := customModuleBasics[name]; ok {
+ moduleMap[name] = customBasicMod
+ continue
+}
+ if appModule, ok := module.(appmodule.AppModule); ok {
+ moduleMap[name] = CoreAppModuleBasicAdaptor(name, appModule)
+
+continue
+}
+ if basicMod, ok := module.(AppModuleBasic); ok {
+ moduleMap[name] = basicMod
+}
+
+}
+
+return moduleMap
+}
+
+// RegisterLegacyAminoCodec registers all module codecs
+func (bm BasicManager)
+
+RegisterLegacyAminoCodec(cdc *codec.LegacyAmino) {
+ for _, b := range bm {
+ b.RegisterLegacyAminoCodec(cdc)
+}
+}
+
+// RegisterInterfaces registers all module interface types
+func (bm BasicManager)
+
+RegisterInterfaces(registry types.InterfaceRegistry) {
+ for _, m := range bm {
+ m.RegisterInterfaces(registry)
+}
+}
+
+// DefaultGenesis provides default genesis information for all modules
+func (bm BasicManager)
+
+DefaultGenesis(cdc codec.JSONCodec)
+
+map[string]json.RawMessage {
+ genesisData := make(map[string]json.RawMessage)
+ for _, b := range bm {
+ if mod, ok := b.(HasGenesisBasics); ok {
+ genesisData[b.Name()] = mod.DefaultGenesis(cdc)
+}
+
+}
+
+return genesisData
+}
+
+// ValidateGenesis performs genesis state validation for all modules
+func (bm BasicManager)
+
+ValidateGenesis(cdc codec.JSONCodec, txEncCfg client.TxEncodingConfig, genesisData map[string]json.RawMessage)
+
+error {
+ for _, b := range bm {
+ // first check if the module is an adapted Core API Module
+ if mod, ok := b.(HasGenesisBasics); ok {
+ if err := mod.ValidateGenesis(cdc, txEncCfg, genesisData[b.Name()]); err != nil {
+ return err
+}
+
+}
+
+}
+
+return nil
+}
+
+// RegisterGRPCGatewayRoutes registers all module rest routes
+func (bm BasicManager)
+
+RegisterGRPCGatewayRoutes(clientCtx client.Context, rtr *runtime.ServeMux) {
+ for _, b := range bm {
+ b.RegisterGRPCGatewayRoutes(clientCtx, rtr)
+}
+}
+
+// AddTxCommands adds all tx commands to the rootTxCmd.
+func (bm BasicManager)
+
+AddTxCommands(rootTxCmd *cobra.Command) {
+ for _, b := range bm {
+ if mod, ok := b.(interface {
+ GetTxCmd() *cobra.Command
+}); ok {
+ if cmd := mod.GetTxCmd(); cmd != nil {
+ rootTxCmd.AddCommand(cmd)
+}
+
+}
+
+}
+}
+
+// AddQueryCommands adds all query commands to the rootQueryCmd.
+func (bm BasicManager)
+
+AddQueryCommands(rootQueryCmd *cobra.Command) {
+ for _, b := range bm {
+ if mod, ok := b.(interface {
+ GetQueryCmd() *cobra.Command
+}); ok {
+ if cmd := mod.GetQueryCmd(); cmd != nil {
+ rootQueryCmd.AddCommand(cmd)
+}
+
+}
+
+}
+}
+
+// HasGenesis is the extension interface for stateful genesis methods.
+type HasGenesis interface {
+ HasGenesisBasics
+ InitGenesis(sdk.Context, codec.JSONCodec, json.RawMessage)
+
+ExportGenesis(sdk.Context, codec.JSONCodec)
+
+json.RawMessage
+}
+
+// HasABCIGenesis is the extension interface for stateful genesis methods which returns validator updates.
+type HasABCIGenesis interface {
+ HasGenesisBasics
+ InitGenesis(sdk.Context, codec.JSONCodec, json.RawMessage) []abci.ValidatorUpdate
+ ExportGenesis(sdk.Context, codec.JSONCodec)
+
+json.RawMessage
+}
+
+// AppModule is the form for an application module. Most of
+// its functionality has been moved to extension interfaces.
+// Deprecated: use appmodule.AppModule with a combination of extension interfaes interfaces instead.
+type AppModule interface {
+ appmodule.AppModule
+
+ AppModuleBasic
+}
+
+// HasInvariants is the interface for registering invariants.
+//
+// Deprecated: this will be removed in the next Cosmos SDK release.
+type HasInvariants interface {
+ // RegisterInvariants registers module invariants.
+ RegisterInvariants(sdk.InvariantRegistry)
+}
+
+// HasServices is the interface for modules to register services.
+type HasServices interface {
+ // RegisterServices allows a module to register services.
+ RegisterServices(Configurator)
+}
+
+// HasConsensusVersion is the interface for declaring a module consensus version.
+type HasConsensusVersion interface {
+ // ConsensusVersion is a sequence number for state-breaking change of the
+ // module. It should be incremented on each consensus-breaking change
+ // introduced by the module. To avoid wrong/empty versions, the initial version
+ // should be set to 1.
+ ConsensusVersion()
+
+uint64
+}
+
+// HasABCIEndblock is a released typo of HasABCIEndBlock.
+// Deprecated: use HasABCIEndBlock instead.
+type HasABCIEndblock HasABCIEndBlock
+
+// HasABCIEndBlock is the interface for modules that need to run code at the end of the block.
+type HasABCIEndBlock interface {
+ AppModule
+ EndBlock(context.Context) ([]abci.ValidatorUpdate, error)
+}
+
+var (
+ _ appmodule.AppModule = (*GenesisOnlyAppModule)(nil)
+ _ AppModuleBasic = (*GenesisOnlyAppModule)(nil)
+)
+
+// AppModuleGenesis is the standard form for an application module genesis functions
+type AppModuleGenesis interface {
+ AppModuleBasic
+ HasABCIGenesis
+}
+
+// GenesisOnlyAppModule is an AppModule that only has import/export functionality
+type GenesisOnlyAppModule struct {
+ AppModuleGenesis
+}
+
+// NewGenesisOnlyAppModule creates a new GenesisOnlyAppModule object
+func NewGenesisOnlyAppModule(amg AppModuleGenesis)
+
+GenesisOnlyAppModule {
+ return GenesisOnlyAppModule{
+ AppModuleGenesis: amg,
+}
+}
+
+// IsOnePerModuleType implements the depinject.OnePerModuleType interface.
+func (GenesisOnlyAppModule)
+
+IsOnePerModuleType() {
+}
+
+// IsAppModule implements the appmodule.AppModule interface.
+func (GenesisOnlyAppModule)
+
+IsAppModule() {
+}
+
+// RegisterInvariants is a placeholder function register no invariants
+func (GenesisOnlyAppModule)
+
+RegisterInvariants(_ sdk.InvariantRegistry) {
+}
+
+// ConsensusVersion implements AppModule/ConsensusVersion.
+func (gam GenesisOnlyAppModule)
+
+ConsensusVersion()
+
+uint64 {
+ return 1
+}
+
+// Manager defines a module manager that provides the high level utility for managing and executing
+// operations for a group of modules
+type Manager struct {
+ Modules map[string]any // interface{
+}
+
+is used now to support the legacy AppModule as well as new core appmodule.AppModule.
+ OrderInitGenesis []string
+ OrderExportGenesis []string
+ OrderPreBlockers []string
+ OrderBeginBlockers []string
+ OrderEndBlockers []string
+ OrderPrepareCheckStaters []string
+ OrderPrecommiters []string
+ OrderMigrations []string
+}
+
+// NewManager creates a new Manager object.
+func NewManager(modules ...AppModule) *Manager {
+ moduleMap := make(map[string]any)
+ modulesStr := make([]string, 0, len(modules))
+ preBlockModulesStr := make([]string, 0)
+ for _, module := range modules {
+ if _, ok := module.(appmodule.AppModule); !ok {
+ panic(fmt.Sprintf("module %s does not implement appmodule.AppModule", module.Name()))
+}
+
+moduleMap[module.Name()] = module
+ modulesStr = append(modulesStr, module.Name())
+ if _, ok := module.(appmodule.HasPreBlocker); ok {
+ preBlockModulesStr = append(preBlockModulesStr, module.Name())
+}
+
+}
+
+return &Manager{
+ Modules: moduleMap,
+ OrderInitGenesis: modulesStr,
+ OrderExportGenesis: modulesStr,
+ OrderPreBlockers: preBlockModulesStr,
+ OrderBeginBlockers: modulesStr,
+ OrderPrepareCheckStaters: modulesStr,
+ OrderPrecommiters: modulesStr,
+ OrderEndBlockers: modulesStr,
+}
+}
+
+// NewManagerFromMap creates a new Manager object from a map of module names to module implementations.
+// This method should be used for apps and modules which have migrated to the cosmossdk.io/core.appmodule.AppModule API.
+func NewManagerFromMap(moduleMap map[string]appmodule.AppModule) *Manager {
+ simpleModuleMap := make(map[string]any)
+ modulesStr := make([]string, 0, len(simpleModuleMap))
+ preBlockModulesStr := make([]string, 0)
+ for name, module := range moduleMap {
+ simpleModuleMap[name] = module
+ modulesStr = append(modulesStr, name)
+ if _, ok := module.(appmodule.HasPreBlocker); ok {
+ preBlockModulesStr = append(preBlockModulesStr, name)
+}
+
+}
+
+ // Sort the modules by name. Given that we are using a map above we can't guarantee the order.
+ sort.Strings(modulesStr)
+
+return &Manager{
+ Modules: simpleModuleMap,
+ OrderInitGenesis: modulesStr,
+ OrderExportGenesis: modulesStr,
+ OrderPreBlockers: preBlockModulesStr,
+ OrderBeginBlockers: modulesStr,
+ OrderEndBlockers: modulesStr,
+ OrderPrecommiters: modulesStr,
+ OrderPrepareCheckStaters: modulesStr,
+}
+}
+
+// SetOrderInitGenesis sets the order of init genesis calls
+func (m *Manager)
+
+SetOrderInitGenesis(moduleNames ...string) {
+ m.assertNoForgottenModules("SetOrderInitGenesis", moduleNames, func(moduleName string)
+
+bool {
+ module := m.Modules[moduleName]
+ if _, hasGenesis := module.(appmodule.HasGenesis); hasGenesis {
+ return !hasGenesis
+}
+ if _, hasABCIGenesis := module.(HasABCIGenesis); hasABCIGenesis {
+ return !hasABCIGenesis
+}
+
+ _, hasGenesis := module.(HasGenesis)
+
+return !hasGenesis
+})
+
+m.OrderInitGenesis = moduleNames
+}
+
+// SetOrderExportGenesis sets the order of export genesis calls
+func (m *Manager)
+
+SetOrderExportGenesis(moduleNames ...string) {
+ m.assertNoForgottenModules("SetOrderExportGenesis", moduleNames, func(moduleName string)
+
+bool {
+ module := m.Modules[moduleName]
+ if _, hasGenesis := module.(appmodule.HasGenesis); hasGenesis {
+ return !hasGenesis
+}
+ if _, hasABCIGenesis := module.(HasABCIGenesis); hasABCIGenesis {
+ return !hasABCIGenesis
+}
+
+ _, hasGenesis := module.(HasGenesis)
+
+return !hasGenesis
+})
+
+m.OrderExportGenesis = moduleNames
+}
+
+// SetOrderPreBlockers sets the order of set pre-blocker calls
+func (m *Manager)
+
+SetOrderPreBlockers(moduleNames ...string) {
+ m.assertNoForgottenModules("SetOrderPreBlockers", moduleNames,
+ func(moduleName string)
+
+bool {
+ module := m.Modules[moduleName]
+ _, hasBlock := module.(appmodule.HasPreBlocker)
+
+return !hasBlock
+})
+
+m.OrderPreBlockers = moduleNames
+}
+
+// SetOrderBeginBlockers sets the order of set begin-blocker calls
+func (m *Manager)
+
+SetOrderBeginBlockers(moduleNames ...string) {
+ m.assertNoForgottenModules("SetOrderBeginBlockers", moduleNames,
+ func(moduleName string)
+
+bool {
+ module := m.Modules[moduleName]
+ _, hasBeginBlock := module.(appmodule.HasBeginBlocker)
+
+return !hasBeginBlock
+})
+
+m.OrderBeginBlockers = moduleNames
+}
+
+// SetOrderEndBlockers sets the order of set end-blocker calls
+func (m *Manager)
+
+SetOrderEndBlockers(moduleNames ...string) {
+ m.assertNoForgottenModules("SetOrderEndBlockers", moduleNames,
+ func(moduleName string)
+
+bool {
+ module := m.Modules[moduleName]
+ if _, hasEndBlock := module.(appmodule.HasEndBlocker); hasEndBlock {
+ return !hasEndBlock
+}
+
+ _, hasABCIEndBlock := module.(HasABCIEndBlock)
+
+return !hasABCIEndBlock
+})
+
+m.OrderEndBlockers = moduleNames
+}
+
+// SetOrderPrepareCheckStaters sets the order of set prepare-check-stater calls
+func (m *Manager)
+
+SetOrderPrepareCheckStaters(moduleNames ...string) {
+ m.assertNoForgottenModules("SetOrderPrepareCheckStaters", moduleNames,
+ func(moduleName string)
+
+bool {
+ module := m.Modules[moduleName]
+ _, hasPrepareCheckState := module.(appmodule.HasPrepareCheckState)
+
+return !hasPrepareCheckState
+})
+
+m.OrderPrepareCheckStaters = moduleNames
+}
+
+// SetOrderPrecommiters sets the order of set precommiter calls
+func (m *Manager)
+
+SetOrderPrecommiters(moduleNames ...string) {
+ m.assertNoForgottenModules("SetOrderPrecommiters", moduleNames,
+ func(moduleName string)
+
+bool {
+ module := m.Modules[moduleName]
+ _, hasPrecommit := module.(appmodule.HasPrecommit)
+
+return !hasPrecommit
+})
+
+m.OrderPrecommiters = moduleNames
+}
+
+// SetOrderMigrations sets the order of migrations to be run. If not set
+// then migrations will be run with an order defined in `DefaultMigrationsOrder`.
+func (m *Manager)
+
+SetOrderMigrations(moduleNames ...string) {
+ m.assertNoForgottenModules("SetOrderMigrations", moduleNames, nil)
+
+m.OrderMigrations = moduleNames
+}
+
+// RegisterInvariants registers all module invariants
+//
+// Deprecated: this function is a no-op and will be removed in the next release of the Cosmos SDK.
+func (m *Manager)
+
+RegisterInvariants(_ sdk.InvariantRegistry) {
+}
+
+// RegisterServices registers all module services
+func (m *Manager)
+
+RegisterServices(cfg Configurator)
+
+error {
+ for _, module := range m.Modules {
+ if module, ok := module.(HasServices); ok {
+ module.RegisterServices(cfg)
+}
+ if module, ok := module.(appmodule.HasServices); ok {
+ err := module.RegisterServices(cfg)
+ if err != nil {
+ return err
+}
+
+}
+ if cfg.Error() != nil {
+ return cfg.Error()
+}
+
+}
+
+return nil
+}
+
+// InitGenesis performs init genesis functionality for modules. Exactly one
+// module must return a non-empty validator set update to correctly initialize
+// the chain.
+func (m *Manager)
+
+InitGenesis(ctx sdk.Context, cdc codec.JSONCodec, genesisData map[string]json.RawMessage) (*abci.ResponseInitChain, error) {
+ var validatorUpdates []abci.ValidatorUpdate
+ ctx.Logger().Info("initializing blockchain state from genesis.json")
+ for _, moduleName := range m.OrderInitGenesis {
+ if genesisData[moduleName] == nil {
+ continue
+}
+ mod := m.Modules[moduleName]
+ // we might get an adapted module, a native core API module or a legacy module
+ if module, ok := mod.(appmodule.HasGenesis); ok {
+ ctx.Logger().Debug("running initialization for module", "module", moduleName)
+ // core API genesis
+ source, err := genesis.SourceFromRawJSON(genesisData[moduleName])
+ if err != nil {
+ return &abci.ResponseInitChain{
+}, err
+}
+
+err = module.InitGenesis(ctx, source)
+ if err != nil {
+ return &abci.ResponseInitChain{
+}, err
+}
+
+}
+
+else if module, ok := mod.(HasGenesis); ok {
+ ctx.Logger().Debug("running initialization for module", "module", moduleName)
+
+module.InitGenesis(ctx, cdc, genesisData[moduleName])
+}
+
+else if module, ok := mod.(HasABCIGenesis); ok {
+ ctx.Logger().Debug("running initialization for module", "module", moduleName)
+ moduleValUpdates := module.InitGenesis(ctx, cdc, genesisData[moduleName])
+
+ // use these validator updates if provided, the module manager assumes
+ // only one module will update the validator set
+ if len(moduleValUpdates) > 0 {
+ if len(validatorUpdates) > 0 {
+ return &abci.ResponseInitChain{
+}, errors.New("validator InitGenesis updates already set by a previous module")
+}
+
+validatorUpdates = moduleValUpdates
+}
+
+}
+
+}
+
+ // a chain must initialize with a non-empty validator set
+ if len(validatorUpdates) == 0 {
+ return &abci.ResponseInitChain{
+}, fmt.Errorf("validator set is empty after InitGenesis, please ensure at least one validator is initialized with a delegation greater than or equal to the DefaultPowerReduction (%d)", sdk.DefaultPowerReduction)
+}
+
+return &abci.ResponseInitChain{
+ Validators: validatorUpdates,
+}, nil
+}
+
+// ExportGenesis performs export genesis functionality for modules
+func (m *Manager)
+
+ExportGenesis(ctx sdk.Context, cdc codec.JSONCodec) (map[string]json.RawMessage, error) {
+ return m.ExportGenesisForModules(ctx, cdc, []string{
+})
+}
+
+// ExportGenesisForModules performs export genesis functionality for modules
+func (m *Manager)
+
+ExportGenesisForModules(ctx sdk.Context, cdc codec.JSONCodec, modulesToExport []string) (map[string]json.RawMessage, error) {
+ if len(modulesToExport) == 0 {
+ modulesToExport = m.OrderExportGenesis
+}
+ // verify modules exists in app, so that we don't panic in the middle of an export
+ if err := m.checkModulesExists(modulesToExport); err != nil {
+ return nil, err
+}
+
+type genesisResult struct {
+ bz json.RawMessage
+ err error
+}
+ channels := make(map[string]chan genesisResult)
+ for _, moduleName := range modulesToExport {
+ mod := m.Modules[moduleName]
+ if module, ok := mod.(appmodule.HasGenesis); ok {
+ // core API genesis
+ channels[moduleName] = make(chan genesisResult)
+
+go func(module appmodule.HasGenesis, ch chan genesisResult) {
+ ctx := ctx.WithGasMeter(storetypes.NewInfiniteGasMeter()) // avoid race conditions
+ target := genesis.RawJSONTarget{
+}
+ err := module.ExportGenesis(ctx, target.Target())
+ if err != nil {
+ ch <- genesisResult{
+ nil, err
+}
+
+return
+}
+
+rawJSON, err := target.JSON()
+ if err != nil {
+ ch <- genesisResult{
+ nil, err
+}
+
+return
+}
+
+ch <- genesisResult{
+ rawJSON, nil
+}
+
+}(module, channels[moduleName])
+}
+
+else if module, ok := mod.(HasGenesis); ok {
+ channels[moduleName] = make(chan genesisResult)
+
+go func(module HasGenesis, ch chan genesisResult) {
+ ctx := ctx.WithGasMeter(storetypes.NewInfiniteGasMeter()) // avoid race conditions
+ ch <- genesisResult{
+ module.ExportGenesis(ctx, cdc), nil
+}
+
+}(module, channels[moduleName])
+}
+
+else if module, ok := mod.(HasABCIGenesis); ok {
+ channels[moduleName] = make(chan genesisResult)
+
+go func(module HasABCIGenesis, ch chan genesisResult) {
+ ctx := ctx.WithGasMeter(storetypes.NewInfiniteGasMeter()) // avoid race conditions
+ ch <- genesisResult{
+ module.ExportGenesis(ctx, cdc), nil
+}
+
+}(module, channels[moduleName])
+}
+
+}
+ genesisData := make(map[string]json.RawMessage)
+ for moduleName := range channels {
+ res := <-channels[moduleName]
+ if res.err != nil {
+ return nil, fmt.Errorf("genesis export error in %s: %w", moduleName, res.err)
+}
+
+genesisData[moduleName] = res.bz
+}
+
+return genesisData, nil
+}
+
+// checkModulesExists verifies that all modules in the list exist in the app
+func (m *Manager)
+
+checkModulesExists(moduleName []string)
+
+error {
+ for _, name := range moduleName {
+ if _, ok := m.Modules[name]; !ok {
+ return fmt.Errorf("module %s does not exist", name)
+}
+
+}
+
+return nil
+}
+
+// assertNoForgottenModules checks that we didn't forget any modules in the SetOrder* functions.
+// `pass` is a closure which allows one to omit modules from `moduleNames`.
+// If you provide non-nil `pass` and it returns true, the module would not be subject of the assertion.
+func (m *Manager)
+
+assertNoForgottenModules(setOrderFnName string, moduleNames []string, pass func(moduleName string)
+
+bool) {
+ ms := make(map[string]bool)
+ for _, m := range moduleNames {
+ ms[m] = true
+}
+
+var missing []string
+ for m := range m.Modules {
+ if pass != nil && pass(m) {
+ continue
+}
+ if !ms[m] {
+ missing = append(missing, m)
+}
+
+}
+ if len(missing) != 0 {
+ sort.Strings(missing)
+
+panic(fmt.Sprintf(
+ "all modules must be defined when setting %s, missing: %v", setOrderFnName, missing))
+}
+}
+
+// MigrationHandler is the migration function that each module registers.
+type MigrationHandler func(sdk.Context)
+
+error
+
+// VersionMap is a map of moduleName -> version
+type VersionMap map[string]uint64
+
+// RunMigrations performs in-place store migrations for all modules. This
+// function MUST be called insde an x/upgrade UpgradeHandler.
+//
+// Recall that in an upgrade handler, the `fromVM` VersionMap is retrieved from
+// x/upgrade's store, and the function needs to return the target VersionMap
+// that will in turn be persisted to the x/upgrade's store. In general,
+// returning RunMigrations should be enough:
+//
+// Example:
+//
+// cfg := module.NewConfigurator(...)
+// app.UpgradeKeeper.SetUpgradeHandler("my-plan", func(ctx context.Context, plan upgradetypes.Plan, fromVM module.VersionMap) (module.VersionMap, error) {
+// return app.mm.RunMigrations(ctx, cfg, fromVM)
+//
+})
+//
+// Internally, RunMigrations will perform the following steps:
+// - create an `updatedVM` VersionMap of module with their latest ConsensusVersion
+// - make a diff of `fromVM` and `udpatedVM`, and for each module:
+// - if the module's `fromVM` version is less than its `updatedVM` version,
+// then run in-place store migrations for that module between those versions.
+// - if the module does not exist in the `fromVM` (which means that it's a new module,
+// because it was not in the previous x/upgrade's store), then run
+// `InitGenesis` on that module.
+//
+// - return the `updatedVM` to be persisted in the x/upgrade's store.
+//
+// Migrations are run in an order defined by `Manager.OrderMigrations` or (if not set)
+
+defined by
+// `DefaultMigrationsOrder` function.
+//
+// As an app developer, if you wish to skip running InitGenesis for your new
+// module "foo", you need to manually pass a `fromVM` argument to this function
+// foo's module version set to its latest ConsensusVersion. That way, the diff
+// between the function's `fromVM` and `udpatedVM` will be empty, hence not
+// running anything for foo.
+//
+// Example:
+//
+// cfg := module.NewConfigurator(...)
+// app.UpgradeKeeper.SetUpgradeHandler("my-plan", func(ctx context.Context, plan upgradetypes.Plan, fromVM module.VersionMap) (module.VersionMap, error) {
+// // Assume "foo" is a new module.
+// // `fromVM` is fetched from existing x/upgrade store. Since foo didn't exist
+// // before this upgrade, `v, exists := fromVM["foo"]; exists == false`, and RunMigration will by default
+// // run InitGenesis on foo.
+// // To skip running foo's InitGenesis, you need set `fromVM`'s foo to its latest
+// // consensus version:
+// fromVM["foo"] = foo.AppModule{
+}.ConsensusVersion()
+//
+// return app.mm.RunMigrations(ctx, cfg, fromVM)
+//
+})
+//
+// Please also refer to /sdk/v0.53/build/migrations/upgrading for more information.
+func (m Manager)
+
+RunMigrations(ctx context.Context, cfg Configurator, fromVM VersionMap) (VersionMap, error) {
+ c, ok := cfg.(*configurator)
+ if !ok {
+ return nil, errorsmod.Wrapf(sdkerrors.ErrInvalidType, "expected %T, got %T", &configurator{
+}, cfg)
+}
+ modules := m.OrderMigrations
+ if modules == nil {
+ modules = DefaultMigrationsOrder(m.ModuleNames())
+}
+ sdkCtx := sdk.UnwrapSDKContext(ctx)
+ updatedVM := VersionMap{
+}
+ for _, moduleName := range modules {
+ module := m.Modules[moduleName]
+ fromVersion, exists := fromVM[moduleName]
+ toVersion := uint64(0)
+ if module, ok := module.(HasConsensusVersion); ok {
+ toVersion = module.ConsensusVersion()
+}
+
+ // We run migration if the module is specified in `fromVM`.
+ // Otherwise we run InitGenesis.
+ //
+ // The module won't exist in the fromVM in two cases:
+ // 1. A new module is added. In this case we run InitGenesis with an
+ // empty genesis state.
+ // 2. An existing chain is upgrading from version < 0.43 to v0.43+ for the first time.
+ // In this case, all modules have yet to be added to x/upgrade's VersionMap store.
+ if exists {
+ err := c.runModuleMigrations(sdkCtx, moduleName, fromVersion, toVersion)
+ if err != nil {
+ return nil, err
+}
+
+}
+
+else {
+ sdkCtx.Logger().Info(fmt.Sprintf("adding a new module: %s", moduleName))
+ if module, ok := m.Modules[moduleName].(HasGenesis); ok {
+ module.InitGenesis(sdkCtx, c.cdc, module.DefaultGenesis(c.cdc))
+}
+ if module, ok := m.Modules[moduleName].(HasABCIGenesis); ok {
+ moduleValUpdates := module.InitGenesis(sdkCtx, c.cdc, module.DefaultGenesis(c.cdc))
+ // The module manager assumes only one module will update the
+ // validator set, and it can't be a new module.
+ if len(moduleValUpdates) > 0 {
+ return nil, errorsmod.Wrapf(sdkerrors.ErrLogic, "validator InitGenesis update is already set by another module")
+}
+
+}
+
+}
+
+updatedVM[moduleName] = toVersion
+}
+
+return updatedVM, nil
+}
+
+// PreBlock performs begin block functionality for upgrade module.
+// It takes the current context as a parameter and returns a boolean value
+// indicating whether the migration was successfully executed or not.
+func (m *Manager)
+
+PreBlock(ctx sdk.Context) (*sdk.ResponsePreBlock, error) {
+ paramsChanged := false
+ for _, moduleName := range m.OrderPreBlockers {
+ if module, ok := m.Modules[moduleName].(appmodule.HasPreBlocker); ok {
+ rsp, err := module.PreBlock(ctx)
+ if err != nil {
+ return nil, err
+}
+ if rsp.IsConsensusParamsChanged() {
+ paramsChanged = true
+}
+
+}
+
+}
+
+return &sdk.ResponsePreBlock{
+ ConsensusParamsChanged: paramsChanged,
+}, nil
+}
+
+// BeginBlock performs begin block functionality for all modules. It creates a
+// child context with an event manager to aggregate events emitted from all
+// modules.
+func (m *Manager)
+
+BeginBlock(ctx sdk.Context) (sdk.BeginBlock, error) {
+ ctx = ctx.WithEventManager(sdk.NewEventManager())
+ for _, moduleName := range m.OrderBeginBlockers {
+ if module, ok := m.Modules[moduleName].(appmodule.HasBeginBlocker); ok {
+ if err := module.BeginBlock(ctx); err != nil {
+ return sdk.BeginBlock{
+}, err
+}
+
+}
+
+}
+
+return sdk.BeginBlock{
+ Events: ctx.EventManager().ABCIEvents(),
+}, nil
+}
+
+// EndBlock performs end block functionality for all modules. It creates a
+// child context with an event manager to aggregate events emitted from all
+// modules.
+func (m *Manager)
+
+EndBlock(ctx sdk.Context) (sdk.EndBlock, error) {
+ ctx = ctx.WithEventManager(sdk.NewEventManager())
+ validatorUpdates := []abci.ValidatorUpdate{
+}
+ for _, moduleName := range m.OrderEndBlockers {
+ if module, ok := m.Modules[moduleName].(appmodule.HasEndBlocker); ok {
+ err := module.EndBlock(ctx)
+ if err != nil {
+ return sdk.EndBlock{
+}, err
+}
+
+}
+
+else if module, ok := m.Modules[moduleName].(HasABCIEndBlock); ok {
+ moduleValUpdates, err := module.EndBlock(ctx)
+ if err != nil {
+ return sdk.EndBlock{
+}, err
+}
+ // use these validator updates if provided, the module manager assumes
+ // only one module will update the validator set
+ if len(moduleValUpdates) > 0 {
+ if len(validatorUpdates) > 0 {
+ return sdk.EndBlock{
+}, errors.New("validator EndBlock updates already set by a previous module")
+}
+ for _, updates := range moduleValUpdates {
+ validatorUpdates = append(validatorUpdates, abci.ValidatorUpdate{
+ PubKey: updates.PubKey,
+ Power: updates.Power
+})
+}
+
+}
+
+}
+
+else {
+ continue
+}
+
+}
+
+return sdk.EndBlock{
+ ValidatorUpdates: validatorUpdates,
+ Events: ctx.EventManager().ABCIEvents(),
+}, nil
+}
+
+// Precommit performs precommit functionality for all modules.
+func (m *Manager)
+
+Precommit(ctx sdk.Context)
+
+error {
+ for _, moduleName := range m.OrderPrecommiters {
+ module, ok := m.Modules[moduleName].(appmodule.HasPrecommit)
+ if !ok {
+ continue
+}
+ if err := module.Precommit(ctx); err != nil {
+ return err
+}
+
+}
+
+return nil
+}
+
+// PrepareCheckState performs functionality for preparing the check state for all modules.
+func (m *Manager)
+
+PrepareCheckState(ctx sdk.Context)
+
+error {
+ for _, moduleName := range m.OrderPrepareCheckStaters {
+ module, ok := m.Modules[moduleName].(appmodule.HasPrepareCheckState)
+ if !ok {
+ continue
+}
+ if err := module.PrepareCheckState(ctx); err != nil {
+ return err
+}
+
+}
+
+return nil
+}
+
+// GetVersionMap gets consensus version from all modules
+func (m *Manager)
+
+GetVersionMap()
+
+VersionMap {
+ vermap := make(VersionMap)
+ for name, v := range m.Modules {
+ version := uint64(0)
+ if v, ok := v.(HasConsensusVersion); ok {
+ version = v.ConsensusVersion()
+}
+
+vermap[name] = version
+}
+
+return vermap
+}
+
+// ModuleNames returns list of all module names, without any particular order.
+func (m *Manager)
+
+ModuleNames() []string {
+ return slices.Collect(maps.Keys(m.Modules))
+}
+
+// DefaultMigrationsOrder returns a default migrations order: ascending alphabetical by module name,
+// except x/auth which will run last, see:
+// https://github.com/cosmos/cosmos-sdk/issues/10591
+func DefaultMigrationsOrder(modules []string) []string {
+ const authName = "auth"
+ out := make([]string, 0, len(modules))
+ hasAuth := false
+ for _, m := range modules {
+ if m == authName {
+ hasAuth = true
+}
+
+else {
+ out = append(out, m)
+}
+
+}
+
+sort.Strings(out)
+ if hasAuth {
+ out = append(out, authName)
+}
+
+return out
+}
+```
+
+Let us go through the methods:
+
+* `DefaultGenesis(codec.JSONCodec)`: Returns a default [`GenesisState`](/sdk/v0.53/build/building-modules/genesis) for the module, marshalled to `json.RawMessage`. The default `GenesisState` need to be defined by the module developer and is primarily used for testing.
+* `ValidateGenesis(codec.JSONCodec, client.TxEncodingConfig, json.RawMessage)`: Used to validate the `GenesisState` defined by a module, given in its `json.RawMessage` form. It will usually unmarshall the `json` before running a custom [`ValidateGenesis`](/sdk/v0.53/build/building-modules/genesis#validategenesis) function defined by the module developer.
+
+#### `module.HasGenesis`
+
+`HasGenesis` is an extension interface for allowing modules to implement genesis functionalities.
+
+```go expandable
+/*
+Package module contains application module patterns and associated "manager" functionality.
+The module pattern has been broken down by:
+ - independent module functionality (AppModuleBasic)
+ - inter-dependent module simulation functionality (AppModuleSimulation)
+ - inter-dependent module full functionality (AppModule)
+
+inter-dependent module functionality is module functionality which somehow
+depends on other modules, typically through the module keeper. Many of the
+module keepers are dependent on each other, thus in order to access the full
+set of module functionality we need to define all the keepers/params-store/keys
+etc. This full set of advanced functionality is defined by the AppModule interface.
+
+Independent module functions are separated to allow for the construction of the
+basic application structures required early on in the application definition
+and used to enable the definition of full module functionality later in the
+process. This separation is necessary, however we still want to allow for a
+high level pattern for modules to follow - for instance, such that we don't
+have to manually register all of the codecs for all the modules. This basic
+procedure as well as other basic patterns are handled through the use of
+BasicManager.
+
+Lastly the interface for genesis functionality (HasGenesis & HasABCIGenesis)
+
+has been
+separated out from full module functionality (AppModule)
+
+so that modules which
+are only used for genesis can take advantage of the Module patterns without
+needlessly defining many placeholder functions
+*/
+package module
+
+import (
+
+ "context"
+ "encoding/json"
+ "errors"
+ "fmt"
+ "sort"
+
+ abci "github.com/cometbft/cometbft/abci/types"
+ "github.com/grpc-ecosystem/grpc-gateway/runtime"
+ "github.com/spf13/cobra"
+ "golang.org/x/exp/maps"
+ "cosmossdk.io/core/appmodule"
+ "cosmossdk.io/core/genesis"
+ errorsmod "cosmossdk.io/errors"
+ storetypes "cosmossdk.io/store/types"
+ "github.com/cosmos/cosmos-sdk/client"
+ "github.com/cosmos/cosmos-sdk/codec"
+ "github.com/cosmos/cosmos-sdk/codec/types"
+ sdk "github.com/cosmos/cosmos-sdk/types"
+ sdkerrors "github.com/cosmos/cosmos-sdk/types/errors"
+)
+
+// AppModuleBasic is the standard form for basic non-dependant elements of an application module.
+type AppModuleBasic interface {
+ HasName
+ RegisterLegacyAminoCodec(*codec.LegacyAmino)
+
+RegisterInterfaces(types.InterfaceRegistry)
+
+RegisterGRPCGatewayRoutes(client.Context, *runtime.ServeMux)
+}
+
+// HasName allows the module to provide its own name for legacy purposes.
+// Newer apps should specify the name for their modules using a map
+// using NewManagerFromMap.
+type HasName interface {
+ Name()
+
+string
+}
+
+// HasGenesisBasics is the legacy interface for stateless genesis methods.
+type HasGenesisBasics interface {
+ DefaultGenesis(codec.JSONCodec)
+
+json.RawMessage
+ ValidateGenesis(codec.JSONCodec, client.TxEncodingConfig, json.RawMessage)
+
+error
+}
+
+// BasicManager is a collection of AppModuleBasic
+type BasicManager map[string]AppModuleBasic
+
+// NewBasicManager creates a new BasicManager object
+func NewBasicManager(modules ...AppModuleBasic)
+
+BasicManager {
+ moduleMap := make(map[string]AppModuleBasic)
+ for _, module := range modules {
+ moduleMap[module.Name()] = module
+}
+
+return moduleMap
+}
+
+// NewBasicManagerFromManager creates a new BasicManager from a Manager
+// The BasicManager will contain all AppModuleBasic from the AppModule Manager
+// Module's AppModuleBasic can be overridden by passing a custom AppModuleBasic map
+func NewBasicManagerFromManager(manager *Manager, customModuleBasics map[string]AppModuleBasic)
+
+BasicManager {
+ moduleMap := make(map[string]AppModuleBasic)
+ for name, module := range manager.Modules {
+ if customBasicMod, ok := customModuleBasics[name]; ok {
+ moduleMap[name] = customBasicMod
+ continue
+}
+ if appModule, ok := module.(appmodule.AppModule); ok {
+ moduleMap[name] = CoreAppModuleBasicAdaptor(name, appModule)
+
+continue
+}
+ if basicMod, ok := module.(AppModuleBasic); ok {
+ moduleMap[name] = basicMod
+}
+
+}
+
+return moduleMap
+}
+
+// RegisterLegacyAminoCodec registers all module codecs
+func (bm BasicManager)
+
+RegisterLegacyAminoCodec(cdc *codec.LegacyAmino) {
+ for _, b := range bm {
+ b.RegisterLegacyAminoCodec(cdc)
+}
+}
+
+// RegisterInterfaces registers all module interface types
+func (bm BasicManager)
+
+RegisterInterfaces(registry types.InterfaceRegistry) {
+ for _, m := range bm {
+ m.RegisterInterfaces(registry)
+}
+}
+
+// DefaultGenesis provides default genesis information for all modules
+func (bm BasicManager)
+
+DefaultGenesis(cdc codec.JSONCodec)
+
+map[string]json.RawMessage {
+ genesisData := make(map[string]json.RawMessage)
+ for _, b := range bm {
+ if mod, ok := b.(HasGenesisBasics); ok {
+ genesisData[b.Name()] = mod.DefaultGenesis(cdc)
+}
+
+}
+
+return genesisData
+}
+
+// ValidateGenesis performs genesis state validation for all modules
+func (bm BasicManager)
+
+ValidateGenesis(cdc codec.JSONCodec, txEncCfg client.TxEncodingConfig, genesisData map[string]json.RawMessage)
+
+error {
+ for _, b := range bm {
+ // first check if the module is an adapted Core API Module
+ if mod, ok := b.(HasGenesisBasics); ok {
+ if err := mod.ValidateGenesis(cdc, txEncCfg, genesisData[b.Name()]); err != nil {
+ return err
+}
+
+}
+
+}
+
+return nil
+}
+
+// RegisterGRPCGatewayRoutes registers all module rest routes
+func (bm BasicManager)
+
+RegisterGRPCGatewayRoutes(clientCtx client.Context, rtr *runtime.ServeMux) {
+ for _, b := range bm {
+ b.RegisterGRPCGatewayRoutes(clientCtx, rtr)
+}
+}
+
+// AddTxCommands adds all tx commands to the rootTxCmd.
+func (bm BasicManager)
+
+AddTxCommands(rootTxCmd *cobra.Command) {
+ for _, b := range bm {
+ if mod, ok := b.(interface {
+ GetTxCmd() *cobra.Command
+}); ok {
+ if cmd := mod.GetTxCmd(); cmd != nil {
+ rootTxCmd.AddCommand(cmd)
+}
+
+}
+
+}
+}
+
+// AddQueryCommands adds all query commands to the rootQueryCmd.
+func (bm BasicManager)
+
+AddQueryCommands(rootQueryCmd *cobra.Command) {
+ for _, b := range bm {
+ if mod, ok := b.(interface {
+ GetQueryCmd() *cobra.Command
+}); ok {
+ if cmd := mod.GetQueryCmd(); cmd != nil {
+ rootQueryCmd.AddCommand(cmd)
+}
+
+}
+
+}
+}
+
+// HasGenesis is the extension interface for stateful genesis methods.
+type HasGenesis interface {
+ HasGenesisBasics
+ InitGenesis(sdk.Context, codec.JSONCodec, json.RawMessage)
+
+ExportGenesis(sdk.Context, codec.JSONCodec)
+
+json.RawMessage
+}
+
+// HasABCIGenesis is the extension interface for stateful genesis methods which returns validator updates.
+type HasABCIGenesis interface {
+ HasGenesisBasics
+ InitGenesis(sdk.Context, codec.JSONCodec, json.RawMessage) []abci.ValidatorUpdate
+ ExportGenesis(sdk.Context, codec.JSONCodec)
+
+json.RawMessage
+}
+
+// AppModule is the form for an application module. Most of
+// its functionality has been moved to extension interfaces.
+type AppModule interface {
+ AppModuleBasic
+}
+
+// HasInvariants is the interface for registering invariants.
+type HasInvariants interface {
+ // RegisterInvariants registers module invariants.
+ RegisterInvariants(sdk.InvariantRegistry)
+}
+
+// HasServices is the interface for modules to register services.
+type HasServices interface {
+ // RegisterServices allows a module to register services.
+ RegisterServices(Configurator)
+}
+
+// HasConsensusVersion is the interface for declaring a module consensus version.
+type HasConsensusVersion interface {
+ // ConsensusVersion is a sequence number for state-breaking change of the
+ // module. It should be incremented on each consensus-breaking change
+ // introduced by the module. To avoid wrong/empty versions, the initial version
+ // should be set to 1.
+ ConsensusVersion()
+
+uint64
+}
+
+type HasABCIEndblock interface {
+ AppModule
+ EndBlock(context.Context) ([]abci.ValidatorUpdate, error)
+}
+
+var (
+ _ appmodule.AppModule = (*GenesisOnlyAppModule)(nil)
+ _ AppModuleBasic = (*GenesisOnlyAppModule)(nil)
+)
+
+// genesisOnlyModule is an interface need to return GenesisOnlyAppModule struct in order to wrap two interfaces
+type genesisOnlyModule interface {
+ AppModuleBasic
+ HasABCIGenesis
+}
+
+// GenesisOnlyAppModule is an AppModule that only has import/export functionality
+type GenesisOnlyAppModule struct {
+ genesisOnlyModule
+}
+
+// NewGenesisOnlyAppModule creates a new GenesisOnlyAppModule object
+func NewGenesisOnlyAppModule(amg genesisOnlyModule)
+
+GenesisOnlyAppModule {
+ return GenesisOnlyAppModule{
+ genesisOnlyModule: amg,
+}
+}
+
+// IsOnePerModuleType implements the depinject.OnePerModuleType interface.
+func (GenesisOnlyAppModule)
+
+IsOnePerModuleType() {
+}
+
+// IsAppModule implements the appmodule.AppModule interface.
+func (GenesisOnlyAppModule)
+
+IsAppModule() {
+}
+
+// RegisterInvariants is a placeholder function register no invariants
+func (GenesisOnlyAppModule)
+
+RegisterInvariants(_ sdk.InvariantRegistry) {
+}
+
+// ConsensusVersion implements AppModule/ConsensusVersion.
+func (gam GenesisOnlyAppModule)
+
+ConsensusVersion()
+
+uint64 {
+ return 1
+}
+
+// Manager defines a module manager that provides the high level utility for managing and executing
+// operations for a group of modules
+type Manager struct {
+ Modules map[string]interface{
+} // interface{
+}
+
+is used now to support the legacy AppModule as well as new core appmodule.AppModule.
+ OrderInitGenesis []string
+ OrderExportGenesis []string
+ OrderBeginBlockers []string
+ OrderEndBlockers []string
+ OrderPrepareCheckStaters []string
+ OrderPrecommiters []string
+ OrderMigrations []string
+}
+
+// NewManager creates a new Manager object.
+func NewManager(modules ...AppModule) *Manager {
+ moduleMap := make(map[string]interface{
+})
+ modulesStr := make([]string, 0, len(modules))
+ for _, module := range modules {
+ moduleMap[module.Name()] = module
+ modulesStr = append(modulesStr, module.Name())
+}
+
+return &Manager{
+ Modules: moduleMap,
+ OrderInitGenesis: modulesStr,
+ OrderExportGenesis: modulesStr,
+ OrderBeginBlockers: modulesStr,
+ OrderPrepareCheckStaters: modulesStr,
+ OrderPrecommiters: modulesStr,
+ OrderEndBlockers: modulesStr,
+}
+}
+
+// NewManagerFromMap creates a new Manager object from a map of module names to module implementations.
+// This method should be used for apps and modules which have migrated to the cosmossdk.io/core.appmodule.AppModule API.
+func NewManagerFromMap(moduleMap map[string]appmodule.AppModule) *Manager {
+ simpleModuleMap := make(map[string]interface{
+})
+ modulesStr := make([]string, 0, len(simpleModuleMap))
+ for name, module := range moduleMap {
+ simpleModuleMap[name] = module
+ modulesStr = append(modulesStr, name)
+}
+
+ // Sort the modules by name. Given that we are using a map above we can't guarantee the order.
+ sort.Strings(modulesStr)
+
+return &Manager{
+ Modules: simpleModuleMap,
+ OrderInitGenesis: modulesStr,
+ OrderExportGenesis: modulesStr,
+ OrderBeginBlockers: modulesStr,
+ OrderEndBlockers: modulesStr,
+ OrderPrecommiters: modulesStr,
+ OrderPrepareCheckStaters: modulesStr,
+}
+}
+
+// SetOrderInitGenesis sets the order of init genesis calls
+func (m *Manager)
+
+SetOrderInitGenesis(moduleNames ...string) {
+ m.assertNoForgottenModules("SetOrderInitGenesis", moduleNames, func(moduleName string)
+
+bool {
+ module := m.Modules[moduleName]
+ if _, hasGenesis := module.(appmodule.HasGenesis); hasGenesis {
+ return !hasGenesis
+}
+ if _, hasABCIGenesis := module.(HasABCIGenesis); hasABCIGenesis {
+ return !hasABCIGenesis
+}
+
+ _, hasGenesis := module.(HasGenesis)
+
+return !hasGenesis
+})
+
+m.OrderInitGenesis = moduleNames
+}
+
+// SetOrderExportGenesis sets the order of export genesis calls
+func (m *Manager)
+
+SetOrderExportGenesis(moduleNames ...string) {
+ m.assertNoForgottenModules("SetOrderExportGenesis", moduleNames, func(moduleName string)
+
+bool {
+ module := m.Modules[moduleName]
+ if _, hasGenesis := module.(appmodule.HasGenesis); hasGenesis {
+ return !hasGenesis
+}
+ if _, hasABCIGenesis := module.(HasABCIGenesis); hasABCIGenesis {
+ return !hasABCIGenesis
+}
+
+ _, hasGenesis := module.(HasGenesis)
+
+return !hasGenesis
+})
+
+m.OrderExportGenesis = moduleNames
+}
+
+// SetOrderBeginBlockers sets the order of set begin-blocker calls
+func (m *Manager)
+
+SetOrderBeginBlockers(moduleNames ...string) {
+ m.assertNoForgottenModules("SetOrderBeginBlockers", moduleNames,
+ func(moduleName string)
+
+bool {
+ module := m.Modules[moduleName]
+ _, hasBeginBlock := module.(appmodule.HasBeginBlocker)
+
+return !hasBeginBlock
+})
+
+m.OrderBeginBlockers = moduleNames
+}
+
+// SetOrderEndBlockers sets the order of set end-blocker calls
+func (m *Manager)
+
+SetOrderEndBlockers(moduleNames ...string) {
+ m.assertNoForgottenModules("SetOrderEndBlockers", moduleNames,
+ func(moduleName string)
+
+bool {
+ module := m.Modules[moduleName]
+ if _, hasEndBlock := module.(appmodule.HasEndBlocker); hasEndBlock {
+ return !hasEndBlock
+}
+
+ _, hasABCIEndBlock := module.(HasABCIEndblock)
+
+return !hasABCIEndBlock
+})
+
+m.OrderEndBlockers = moduleNames
+}
+
+// SetOrderPrepareCheckStaters sets the order of set prepare-check-stater calls
+func (m *Manager)
+
+SetOrderPrepareCheckStaters(moduleNames ...string) {
+ m.assertNoForgottenModules("SetOrderPrepareCheckStaters", moduleNames,
+ func(moduleName string)
+
+bool {
+ module := m.Modules[moduleName]
+ _, hasPrepareCheckState := module.(appmodule.HasPrepareCheckState)
+
+return !hasPrepareCheckState
+})
+
+m.OrderPrepareCheckStaters = moduleNames
+}
+
+// SetOrderPrecommiters sets the order of set precommiter calls
+func (m *Manager)
+
+SetOrderPrecommiters(moduleNames ...string) {
+ m.assertNoForgottenModules("SetOrderPrecommiters", moduleNames,
+ func(moduleName string)
+
+bool {
+ module := m.Modules[moduleName]
+ _, hasPrecommit := module.(appmodule.HasPrecommit)
+
+return !hasPrecommit
+})
+
+m.OrderPrecommiters = moduleNames
+}
+
+// SetOrderMigrations sets the order of migrations to be run. If not set
+// then migrations will be run with an order defined in `DefaultMigrationsOrder`.
+func (m *Manager)
+
+SetOrderMigrations(moduleNames ...string) {
+ m.assertNoForgottenModules("SetOrderMigrations", moduleNames, nil)
+
+m.OrderMigrations = moduleNames
+}
+
+// RegisterInvariants registers all module invariants
+func (m *Manager)
+
+RegisterInvariants(ir sdk.InvariantRegistry) {
+ for _, module := range m.Modules {
+ if module, ok := module.(HasInvariants); ok {
+ module.RegisterInvariants(ir)
+}
+
+}
+}
+
+// RegisterServices registers all module services
+func (m *Manager)
+
+RegisterServices(cfg Configurator)
+
+error {
+ for _, module := range m.Modules {
+ if module, ok := module.(HasServices); ok {
+ module.RegisterServices(cfg)
+}
+ if module, ok := module.(appmodule.HasServices); ok {
+ err := module.RegisterServices(cfg)
+ if err != nil {
+ return err
+}
+
+}
+ if cfg.Error() != nil {
+ return cfg.Error()
+}
+
+}
+
+return nil
+}
+
+// InitGenesis performs init genesis functionality for modules. Exactly one
+// module must return a non-empty validator set update to correctly initialize
+// the chain.
+func (m *Manager)
+
+InitGenesis(ctx sdk.Context, cdc codec.JSONCodec, genesisData map[string]json.RawMessage) (*abci.ResponseInitChain, error) {
+ var validatorUpdates []abci.ValidatorUpdate
+ ctx.Logger().Info("initializing blockchain state from genesis.json")
+ for _, moduleName := range m.OrderInitGenesis {
+ if genesisData[moduleName] == nil {
+ continue
+}
+ mod := m.Modules[moduleName]
+ // we might get an adapted module, a native core API module or a legacy module
+ if module, ok := mod.(appmodule.HasGenesis); ok {
+ ctx.Logger().Debug("running initialization for module", "module", moduleName)
+ // core API genesis
+ source, err := genesis.SourceFromRawJSON(genesisData[moduleName])
+ if err != nil {
+ return &abci.ResponseInitChain{
+}, err
+}
+
+err = module.InitGenesis(ctx, source)
+ if err != nil {
+ return &abci.ResponseInitChain{
+}, err
+}
+
+}
+
+else if module, ok := mod.(HasGenesis); ok {
+ ctx.Logger().Debug("running initialization for module", "module", moduleName)
+
+module.InitGenesis(ctx, cdc, genesisData[moduleName])
+}
+
+else if module, ok := mod.(HasABCIGenesis); ok {
+ ctx.Logger().Debug("running initialization for module", "module", moduleName)
+ moduleValUpdates := module.InitGenesis(ctx, cdc, genesisData[moduleName])
+
+ // use these validator updates if provided, the module manager assumes
+ // only one module will update the validator set
+ if len(moduleValUpdates) > 0 {
+ if len(validatorUpdates) > 0 {
+ return &abci.ResponseInitChain{
+}, errors.New("validator InitGenesis updates already set by a previous module")
+}
+
+validatorUpdates = moduleValUpdates
+}
+
+}
+
+}
+
+ // a chain must initialize with a non-empty validator set
+ if len(validatorUpdates) == 0 {
+ return &abci.ResponseInitChain{
+}, fmt.Errorf("validator set is empty after InitGenesis, please ensure at least one validator is initialized with a delegation greater than or equal to the DefaultPowerReduction (%d)", sdk.DefaultPowerReduction)
+}
+
+return &abci.ResponseInitChain{
+ Validators: validatorUpdates,
+}, nil
+}
+
+// ExportGenesis performs export genesis functionality for modules
+func (m *Manager)
+
+ExportGenesis(ctx sdk.Context, cdc codec.JSONCodec) (map[string]json.RawMessage, error) {
+ return m.ExportGenesisForModules(ctx, cdc, []string{
+})
+}
+
+// ExportGenesisForModules performs export genesis functionality for modules
+func (m *Manager)
+
+ExportGenesisForModules(ctx sdk.Context, cdc codec.JSONCodec, modulesToExport []string) (map[string]json.RawMessage, error) {
+ if len(modulesToExport) == 0 {
+ modulesToExport = m.OrderExportGenesis
+}
+ // verify modules exists in app, so that we don't panic in the middle of an export
+ if err := m.checkModulesExists(modulesToExport); err != nil {
+ return nil, err
+}
+
+type genesisResult struct {
+ bz json.RawMessage
+ err error
+}
+ channels := make(map[string]chan genesisResult)
+ for _, moduleName := range modulesToExport {
+ mod := m.Modules[moduleName]
+ if module, ok := mod.(appmodule.HasGenesis); ok {
+ // core API genesis
+ channels[moduleName] = make(chan genesisResult)
+
+go func(module appmodule.HasGenesis, ch chan genesisResult) {
+ ctx := ctx.WithGasMeter(storetypes.NewInfiniteGasMeter()) // avoid race conditions
+ target := genesis.RawJSONTarget{
+}
+ err := module.ExportGenesis(ctx, target.Target())
+ if err != nil {
+ ch <- genesisResult{
+ nil, err
+}
+
+return
+}
+
+rawJSON, err := target.JSON()
+ if err != nil {
+ ch <- genesisResult{
+ nil, err
+}
+
+return
+}
+
+ch <- genesisResult{
+ rawJSON, nil
+}
+
+}(module, channels[moduleName])
+}
+
+else if module, ok := mod.(HasGenesis); ok {
+ channels[moduleName] = make(chan genesisResult)
+
+go func(module HasGenesis, ch chan genesisResult) {
+ ctx := ctx.WithGasMeter(storetypes.NewInfiniteGasMeter()) // avoid race conditions
+ ch <- genesisResult{
+ module.ExportGenesis(ctx, cdc), nil
+}
+
+}(module, channels[moduleName])
+}
+
+else if module, ok := mod.(HasABCIGenesis); ok {
+ channels[moduleName] = make(chan genesisResult)
+
+go func(module HasABCIGenesis, ch chan genesisResult) {
+ ctx := ctx.WithGasMeter(storetypes.NewInfiniteGasMeter()) // avoid race conditions
+ ch <- genesisResult{
+ module.ExportGenesis(ctx, cdc), nil
+}
+
+}(module, channels[moduleName])
+}
+
+}
+ genesisData := make(map[string]json.RawMessage)
+ for moduleName := range channels {
+ res := <-channels[moduleName]
+ if res.err != nil {
+ return nil, res.err
+}
+
+genesisData[moduleName] = res.bz
+}
+
+return genesisData, nil
+}
+
+// checkModulesExists verifies that all modules in the list exist in the app
+func (m *Manager)
+
+checkModulesExists(moduleName []string)
+
+error {
+ for _, name := range moduleName {
+ if _, ok := m.Modules[name]; !ok {
+ return fmt.Errorf("module %s does not exist", name)
+}
+
+}
+
+return nil
+}
+
+// assertNoForgottenModules checks that we didn't forget any modules in the SetOrder* functions.
+// `pass` is a closure which allows one to omit modules from `moduleNames`.
+// If you provide non-nil `pass` and it returns true, the module would not be subject of the assertion.
+func (m *Manager)
+
+assertNoForgottenModules(setOrderFnName string, moduleNames []string, pass func(moduleName string)
+
+bool) {
+ ms := make(map[string]bool)
+ for _, m := range moduleNames {
+ ms[m] = true
+}
+
+var missing []string
+ for m := range m.Modules {
+ m := m
+ if pass != nil && pass(m) {
+ continue
+}
+ if !ms[m] {
+ missing = append(missing, m)
+}
+
+}
+ if len(missing) != 0 {
+ sort.Strings(missing)
+
+panic(fmt.Sprintf(
+ "all modules must be defined when setting %s, missing: %v", setOrderFnName, missing))
+}
+}
+
+// MigrationHandler is the migration function that each module registers.
+type MigrationHandler func(sdk.Context)
+
+error
+
+// VersionMap is a map of moduleName -> version
+type VersionMap map[string]uint64
+
+// RunMigrations performs in-place store migrations for all modules. This
+// function MUST be called insde an x/upgrade UpgradeHandler.
+//
+// Recall that in an upgrade handler, the `fromVM` VersionMap is retrieved from
+// x/upgrade's store, and the function needs to return the target VersionMap
+// that will in turn be persisted to the x/upgrade's store. In general,
+// returning RunMigrations should be enough:
+//
+// Example:
+//
+// cfg := module.NewConfigurator(...)
+// app.UpgradeKeeper.SetUpgradeHandler("my-plan", func(ctx context.Context, plan upgradetypes.Plan, fromVM module.VersionMap) (module.VersionMap, error) {
+// return app.mm.RunMigrations(ctx, cfg, fromVM)
+//
+})
+//
+// Internally, RunMigrations will perform the following steps:
+// - create an `updatedVM` VersionMap of module with their latest ConsensusVersion
+// - make a diff of `fromVM` and `udpatedVM`, and for each module:
+// - if the module's `fromVM` version is less than its `updatedVM` version,
+// then run in-place store migrations for that module between those versions.
+// - if the module does not exist in the `fromVM` (which means that it's a new module,
+// because it was not in the previous x/upgrade's store), then run
+// `InitGenesis` on that module.
+//
+// - return the `updatedVM` to be persisted in the x/upgrade's store.
+//
+// Migrations are run in an order defined by `Manager.OrderMigrations` or (if not set)
+
+defined by
+// `DefaultMigrationsOrder` function.
+//
+// As an app developer, if you wish to skip running InitGenesis for your new
+// module "foo", you need to manually pass a `fromVM` argument to this function
+// foo's module version set to its latest ConsensusVersion. That way, the diff
+// between the function's `fromVM` and `udpatedVM` will be empty, hence not
+// running anything for foo.
+//
+// Example:
+//
+// cfg := module.NewConfigurator(...)
+// app.UpgradeKeeper.SetUpgradeHandler("my-plan", func(ctx context.Context, plan upgradetypes.Plan, fromVM module.VersionMap) (module.VersionMap, error) {
+// // Assume "foo" is a new module.
+// // `fromVM` is fetched from existing x/upgrade store. Since foo didn't exist
+// // before this upgrade, `v, exists := fromVM["foo"]; exists == false`, and RunMigration will by default
+// // run InitGenesis on foo.
+// // To skip running foo's InitGenesis, you need set `fromVM`'s foo to its latest
+// // consensus version:
+// fromVM["foo"] = foo.AppModule{
+}.ConsensusVersion()
+//
+// return app.mm.RunMigrations(ctx, cfg, fromVM)
+//
+})
+//
+// Please also refer to /sdk/v0.53/build/migrations/upgrading for more information.
+func (m Manager)
+
+RunMigrations(ctx context.Context, cfg Configurator, fromVM VersionMap) (VersionMap, error) {
+ c, ok := cfg.(*configurator)
+ if !ok {
+ return nil, errorsmod.Wrapf(sdkerrors.ErrInvalidType, "expected %T, got %T", &configurator{
+}, cfg)
+}
+ modules := m.OrderMigrations
+ if modules == nil {
+ modules = DefaultMigrationsOrder(m.ModuleNames())
+}
+ sdkCtx := sdk.UnwrapSDKContext(ctx)
+ updatedVM := VersionMap{
+}
+ for _, moduleName := range modules {
+ module := m.Modules[moduleName]
+ fromVersion, exists := fromVM[moduleName]
+ toVersion := uint64(0)
+ if module, ok := module.(HasConsensusVersion); ok {
+ toVersion = module.ConsensusVersion()
+}
+
+ // We run migration if the module is specified in `fromVM`.
+ // Otherwise we run InitGenesis.
+ //
+ // The module won't exist in the fromVM in two cases:
+ // 1. A new module is added. In this case we run InitGenesis with an
+ // empty genesis state.
+ // 2. An existing chain is upgrading from version < 0.43 to v0.43+ for the first time.
+ // In this case, all modules have yet to be added to x/upgrade's VersionMap store.
+ if exists {
+ err := c.runModuleMigrations(sdkCtx, moduleName, fromVersion, toVersion)
+ if err != nil {
+ return nil, err
+}
+
+}
+
+else {
+ sdkCtx.Logger().Info(fmt.Sprintf("adding a new module: %s", moduleName))
+
+module1, ok := m.Modules[moduleName].(HasGenesis)
+ if ok {
+ module1.InitGenesis(sdkCtx, c.cdc, module1.DefaultGenesis(c.cdc))
+}
+ if module2, ok := m.Modules[moduleName].(HasABCIGenesis); ok {
+ moduleValUpdates := module2.InitGenesis(sdkCtx, c.cdc, module1.DefaultGenesis(c.cdc))
+ // The module manager assumes only one module will update the
+ // validator set, and it can't be a new module.
+ if len(moduleValUpdates) > 0 {
+ return nil, errorsmod.Wrapf(sdkerrors.ErrLogic, "validator InitGenesis update is already set by another module")
+}
+
+}
+
+}
+
+updatedVM[moduleName] = toVersion
+}
+
+return updatedVM, nil
+}
+
+// RunMigrationBeginBlock performs begin block functionality for upgrade module.
+// It takes the current context as a parameter and returns a boolean value
+// indicating whether the migration was executed or not and an error if fails.
+func (m *Manager)
+
+RunMigrationBeginBlock(ctx sdk.Context) (bool, error) {
+ for _, moduleName := range m.OrderBeginBlockers {
+ if mod, ok := m.Modules[moduleName].(appmodule.HasBeginBlocker); ok {
+ if _, ok := mod.(appmodule.UpgradeModule); ok {
+ err := mod.BeginBlock(ctx)
+
+return err == nil, err
+}
+
+}
+
+}
+
+return false, nil
+}
+
+// BeginBlock performs begin block functionality for non-upgrade modules. It creates a
+// child context with an event manager to aggregate events emitted from non-upgrade
+// modules.
+func (m *Manager)
+
+BeginBlock(ctx sdk.Context) (sdk.BeginBlock, error) {
+ ctx = ctx.WithEventManager(sdk.NewEventManager())
+ for _, moduleName := range m.OrderBeginBlockers {
+ if module, ok := m.Modules[moduleName].(appmodule.HasBeginBlocker); ok {
+ if _, ok := module.(appmodule.UpgradeModule); !ok {
+ if err := module.BeginBlock(ctx); err != nil {
+ return sdk.BeginBlock{
+}, err
+}
+
+}
+
+}
+
+}
+
+return sdk.BeginBlock{
+ Events: ctx.EventManager().ABCIEvents(),
+}, nil
+}
+
+// EndBlock performs end block functionality for all modules. It creates a
+// child context with an event manager to aggregate events emitted from all
+// modules.
+func (m *Manager)
+
+EndBlock(ctx sdk.Context) (sdk.EndBlock, error) {
+ ctx = ctx.WithEventManager(sdk.NewEventManager())
+ validatorUpdates := []abci.ValidatorUpdate{
+}
+ for _, moduleName := range m.OrderEndBlockers {
+ if module, ok := m.Modules[moduleName].(appmodule.HasEndBlocker); ok {
+ err := module.EndBlock(ctx)
+ if err != nil {
+ return sdk.EndBlock{
+}, err
+}
+
+}
+
+else if module, ok := m.Modules[moduleName].(HasABCIEndblock); ok {
+ moduleValUpdates, err := module.EndBlock(ctx)
+ if err != nil {
+ return sdk.EndBlock{
+}, err
+}
+ // use these validator updates if provided, the module manager assumes
+ // only one module will update the validator set
+ if len(moduleValUpdates) > 0 {
+ if len(validatorUpdates) > 0 {
+ return sdk.EndBlock{
+}, errors.New("validator EndBlock updates already set by a previous module")
+}
+ for _, updates := range moduleValUpdates {
+ validatorUpdates = append(validatorUpdates, abci.ValidatorUpdate{
+ PubKey: updates.PubKey,
+ Power: updates.Power
+})
+}
+
+}
+
+}
+
+else {
+ continue
+}
+
+}
+
+return sdk.EndBlock{
+ ValidatorUpdates: validatorUpdates,
+ Events: ctx.EventManager().ABCIEvents(),
+}, nil
+}
+
+// Precommit performs precommit functionality for all modules.
+func (m *Manager)
+
+Precommit(ctx sdk.Context)
+
+error {
+ for _, moduleName := range m.OrderPrecommiters {
+ module, ok := m.Modules[moduleName].(appmodule.HasPrecommit)
+ if !ok {
+ continue
+}
+ if err := module.Precommit(ctx); err != nil {
+ return err
+}
+
+}
+
+return nil
+}
+
+// PrepareCheckState performs functionality for preparing the check state for all modules.
+func (m *Manager)
+
+PrepareCheckState(ctx sdk.Context)
+
+error {
+ for _, moduleName := range m.OrderPrepareCheckStaters {
+ module, ok := m.Modules[moduleName].(appmodule.HasPrepareCheckState)
+ if !ok {
+ continue
+}
+ if err := module.PrepareCheckState(ctx); err != nil {
+ return err
+}
+
+}
+
+return nil
+}
+
+// GetVersionMap gets consensus version from all modules
+func (m *Manager)
+
+GetVersionMap()
+
+VersionMap {
+ vermap := make(VersionMap)
+ for name, v := range m.Modules {
+ version := uint64(0)
+ if v, ok := v.(HasConsensusVersion); ok {
+ version = v.ConsensusVersion()
+}
+ name := name
+ vermap[name] = version
+}
+
+return vermap
+}
+
+// ModuleNames returns list of all module names, without any particular order.
+func (m *Manager)
+
+ModuleNames() []string {
+ return maps.Keys(m.Modules)
+}
+
+// DefaultMigrationsOrder returns a default migrations order: ascending alphabetical by module name,
+// except x/auth which will run last, see:
+// https://github.com/cosmos/cosmos-sdk/issues/10591
+func DefaultMigrationsOrder(modules []string) []string {
+ const authName = "auth"
+ out := make([]string, 0, len(modules))
+ hasAuth := false
+ for _, m := range modules {
+ if m == authName {
+ hasAuth = true
+}
+
+else {
+ out = append(out, m)
+}
+
+}
+
+sort.Strings(out)
+ if hasAuth {
+ out = append(out, authName)
+}
+
+return out
+}
+```
+
+#### `module.HasABCIGenesis`
+
+`HasABCIGenesis` is an extension interface for allowing modules to implement genesis functionalities and returns validator set updates.
+
+```go expandable
+/*
+Package module contains application module patterns and associated "manager" functionality.
+The module pattern has been broken down by:
+ - independent module functionality (AppModuleBasic)
+ - inter-dependent module simulation functionality (AppModuleSimulation)
+ - inter-dependent module full functionality (AppModule)
+
+inter-dependent module functionality is module functionality which somehow
+depends on other modules, typically through the module keeper. Many of the
+module keepers are dependent on each other, thus in order to access the full
+set of module functionality we need to define all the keepers/params-store/keys
+etc. This full set of advanced functionality is defined by the AppModule interface.
+
+Independent module functions are separated to allow for the construction of the
+basic application structures required early on in the application definition
+and used to enable the definition of full module functionality later in the
+process. This separation is necessary, however we still want to allow for a
+high level pattern for modules to follow - for instance, such that we don't
+have to manually register all of the codecs for all the modules. This basic
+procedure as well as other basic patterns are handled through the use of
+BasicManager.
+
+Lastly the interface for genesis functionality (HasGenesis & HasABCIGenesis)
+
+has been
+separated out from full module functionality (AppModule)
+
+so that modules which
+are only used for genesis can take advantage of the Module patterns without
+needlessly defining many placeholder functions
+*/
+package module
+
+import (
+
+ "context"
+ "encoding/json"
+ "errors"
+ "fmt"
+ "sort"
+
+ abci "github.com/cometbft/cometbft/abci/types"
+ "github.com/grpc-ecosystem/grpc-gateway/runtime"
+ "github.com/spf13/cobra"
+ "golang.org/x/exp/maps"
+ "cosmossdk.io/core/appmodule"
+ "cosmossdk.io/core/genesis"
+ errorsmod "cosmossdk.io/errors"
+ storetypes "cosmossdk.io/store/types"
+ "github.com/cosmos/cosmos-sdk/client"
+ "github.com/cosmos/cosmos-sdk/codec"
+ "github.com/cosmos/cosmos-sdk/codec/types"
+ sdk "github.com/cosmos/cosmos-sdk/types"
+ sdkerrors "github.com/cosmos/cosmos-sdk/types/errors"
+)
+
+// AppModuleBasic is the standard form for basic non-dependant elements of an application module.
+type AppModuleBasic interface {
+ HasName
+ RegisterLegacyAminoCodec(*codec.LegacyAmino)
+
+RegisterInterfaces(types.InterfaceRegistry)
+
+RegisterGRPCGatewayRoutes(client.Context, *runtime.ServeMux)
+}
+
+// HasName allows the module to provide its own name for legacy purposes.
+// Newer apps should specify the name for their modules using a map
+// using NewManagerFromMap.
+type HasName interface {
+ Name()
+
+string
+}
+
+// HasGenesisBasics is the legacy interface for stateless genesis methods.
+type HasGenesisBasics interface {
+ DefaultGenesis(codec.JSONCodec)
+
+json.RawMessage
+ ValidateGenesis(codec.JSONCodec, client.TxEncodingConfig, json.RawMessage)
+
+error
+}
+
+// BasicManager is a collection of AppModuleBasic
+type BasicManager map[string]AppModuleBasic
+
+// NewBasicManager creates a new BasicManager object
+func NewBasicManager(modules ...AppModuleBasic)
+
+BasicManager {
+ moduleMap := make(map[string]AppModuleBasic)
+ for _, module := range modules {
+ moduleMap[module.Name()] = module
+}
+
+return moduleMap
+}
+
+// NewBasicManagerFromManager creates a new BasicManager from a Manager
+// The BasicManager will contain all AppModuleBasic from the AppModule Manager
+// Module's AppModuleBasic can be overridden by passing a custom AppModuleBasic map
+func NewBasicManagerFromManager(manager *Manager, customModuleBasics map[string]AppModuleBasic)
+
+BasicManager {
+ moduleMap := make(map[string]AppModuleBasic)
+ for name, module := range manager.Modules {
+ if customBasicMod, ok := customModuleBasics[name]; ok {
+ moduleMap[name] = customBasicMod
+ continue
+}
+ if appModule, ok := module.(appmodule.AppModule); ok {
+ moduleMap[name] = CoreAppModuleBasicAdaptor(name, appModule)
+
+continue
+}
+ if basicMod, ok := module.(AppModuleBasic); ok {
+ moduleMap[name] = basicMod
+}
+
+}
+
+return moduleMap
+}
+
+// RegisterLegacyAminoCodec registers all module codecs
+func (bm BasicManager)
+
+RegisterLegacyAminoCodec(cdc *codec.LegacyAmino) {
+ for _, b := range bm {
+ b.RegisterLegacyAminoCodec(cdc)
+}
+}
+
+// RegisterInterfaces registers all module interface types
+func (bm BasicManager)
+
+RegisterInterfaces(registry types.InterfaceRegistry) {
+ for _, m := range bm {
+ m.RegisterInterfaces(registry)
+}
+}
+
+// DefaultGenesis provides default genesis information for all modules
+func (bm BasicManager)
+
+DefaultGenesis(cdc codec.JSONCodec)
+
+map[string]json.RawMessage {
+ genesisData := make(map[string]json.RawMessage)
+ for _, b := range bm {
+ if mod, ok := b.(HasGenesisBasics); ok {
+ genesisData[b.Name()] = mod.DefaultGenesis(cdc)
+}
+
+}
+
+return genesisData
+}
+
+// ValidateGenesis performs genesis state validation for all modules
+func (bm BasicManager)
+
+ValidateGenesis(cdc codec.JSONCodec, txEncCfg client.TxEncodingConfig, genesisData map[string]json.RawMessage)
+
+error {
+ for _, b := range bm {
+ // first check if the module is an adapted Core API Module
+ if mod, ok := b.(HasGenesisBasics); ok {
+ if err := mod.ValidateGenesis(cdc, txEncCfg, genesisData[b.Name()]); err != nil {
+ return err
+}
+
+}
+
+}
+
+return nil
+}
+
+// RegisterGRPCGatewayRoutes registers all module rest routes
+func (bm BasicManager)
+
+RegisterGRPCGatewayRoutes(clientCtx client.Context, rtr *runtime.ServeMux) {
+ for _, b := range bm {
+ b.RegisterGRPCGatewayRoutes(clientCtx, rtr)
+}
+}
+
+// AddTxCommands adds all tx commands to the rootTxCmd.
+func (bm BasicManager)
+
+AddTxCommands(rootTxCmd *cobra.Command) {
+ for _, b := range bm {
+ if mod, ok := b.(interface {
+ GetTxCmd() *cobra.Command
+}); ok {
+ if cmd := mod.GetTxCmd(); cmd != nil {
+ rootTxCmd.AddCommand(cmd)
+}
+
+}
+
+}
+}
+
+// AddQueryCommands adds all query commands to the rootQueryCmd.
+func (bm BasicManager)
+
+AddQueryCommands(rootQueryCmd *cobra.Command) {
+ for _, b := range bm {
+ if mod, ok := b.(interface {
+ GetQueryCmd() *cobra.Command
+}); ok {
+ if cmd := mod.GetQueryCmd(); cmd != nil {
+ rootQueryCmd.AddCommand(cmd)
+}
+
+}
+
+}
+}
+
+// HasGenesis is the extension interface for stateful genesis methods.
+type HasGenesis interface {
+ HasGenesisBasics
+ InitGenesis(sdk.Context, codec.JSONCodec, json.RawMessage)
+
+ExportGenesis(sdk.Context, codec.JSONCodec)
+
+json.RawMessage
+}
+
+// HasABCIGenesis is the extension interface for stateful genesis methods which returns validator updates.
+type HasABCIGenesis interface {
+ HasGenesisBasics
+ InitGenesis(sdk.Context, codec.JSONCodec, json.RawMessage) []abci.ValidatorUpdate
+ ExportGenesis(sdk.Context, codec.JSONCodec)
+
+json.RawMessage
+}
+
+// AppModule is the form for an application module. Most of
+// its functionality has been moved to extension interfaces.
+type AppModule interface {
+ AppModuleBasic
+}
+
+// HasInvariants is the interface for registering invariants.
+type HasInvariants interface {
+ // RegisterInvariants registers module invariants.
+ RegisterInvariants(sdk.InvariantRegistry)
+}
+
+// HasServices is the interface for modules to register services.
+type HasServices interface {
+ // RegisterServices allows a module to register services.
+ RegisterServices(Configurator)
+}
+
+// HasConsensusVersion is the interface for declaring a module consensus version.
+type HasConsensusVersion interface {
+ // ConsensusVersion is a sequence number for state-breaking change of the
+ // module. It should be incremented on each consensus-breaking change
+ // introduced by the module. To avoid wrong/empty versions, the initial version
+ // should be set to 1.
+ ConsensusVersion()
+
+uint64
+}
+
+type HasABCIEndblock interface {
+ AppModule
+ EndBlock(context.Context) ([]abci.ValidatorUpdate, error)
+}
+
+var (
+ _ appmodule.AppModule = (*GenesisOnlyAppModule)(nil)
+ _ AppModuleBasic = (*GenesisOnlyAppModule)(nil)
+)
+
+// genesisOnlyModule is an interface need to return GenesisOnlyAppModule struct in order to wrap two interfaces
+type genesisOnlyModule interface {
+ AppModuleBasic
+ HasABCIGenesis
+}
+
+// GenesisOnlyAppModule is an AppModule that only has import/export functionality
+type GenesisOnlyAppModule struct {
+ genesisOnlyModule
+}
+
+// NewGenesisOnlyAppModule creates a new GenesisOnlyAppModule object
+func NewGenesisOnlyAppModule(amg genesisOnlyModule)
+
+GenesisOnlyAppModule {
+ return GenesisOnlyAppModule{
+ genesisOnlyModule: amg,
+}
+}
+
+// IsOnePerModuleType implements the depinject.OnePerModuleType interface.
+func (GenesisOnlyAppModule)
+
+IsOnePerModuleType() {
+}
+
+// IsAppModule implements the appmodule.AppModule interface.
+func (GenesisOnlyAppModule)
+
+IsAppModule() {
+}
+
+// RegisterInvariants is a placeholder function register no invariants
+func (GenesisOnlyAppModule)
+
+RegisterInvariants(_ sdk.InvariantRegistry) {
+}
+
+// ConsensusVersion implements AppModule/ConsensusVersion.
+func (gam GenesisOnlyAppModule)
+
+ConsensusVersion()
+
+uint64 {
+ return 1
+}
+
+// Manager defines a module manager that provides the high level utility for managing and executing
+// operations for a group of modules
+type Manager struct {
+ Modules map[string]interface{
+} // interface{
+}
+
+is used now to support the legacy AppModule as well as new core appmodule.AppModule.
+ OrderInitGenesis []string
+ OrderExportGenesis []string
+ OrderBeginBlockers []string
+ OrderEndBlockers []string
+ OrderPrepareCheckStaters []string
+ OrderPrecommiters []string
+ OrderMigrations []string
+}
+
+// NewManager creates a new Manager object.
+func NewManager(modules ...AppModule) *Manager {
+ moduleMap := make(map[string]interface{
+})
+ modulesStr := make([]string, 0, len(modules))
+ for _, module := range modules {
+ moduleMap[module.Name()] = module
+ modulesStr = append(modulesStr, module.Name())
+}
+
+return &Manager{
+ Modules: moduleMap,
+ OrderInitGenesis: modulesStr,
+ OrderExportGenesis: modulesStr,
+ OrderBeginBlockers: modulesStr,
+ OrderPrepareCheckStaters: modulesStr,
+ OrderPrecommiters: modulesStr,
+ OrderEndBlockers: modulesStr,
+}
+}
+
+// NewManagerFromMap creates a new Manager object from a map of module names to module implementations.
+// This method should be used for apps and modules which have migrated to the cosmossdk.io/core.appmodule.AppModule API.
+func NewManagerFromMap(moduleMap map[string]appmodule.AppModule) *Manager {
+ simpleModuleMap := make(map[string]interface{
+})
+ modulesStr := make([]string, 0, len(simpleModuleMap))
+ for name, module := range moduleMap {
+ simpleModuleMap[name] = module
+ modulesStr = append(modulesStr, name)
+}
+
+ // Sort the modules by name. Given that we are using a map above we can't guarantee the order.
+ sort.Strings(modulesStr)
+
+return &Manager{
+ Modules: simpleModuleMap,
+ OrderInitGenesis: modulesStr,
+ OrderExportGenesis: modulesStr,
+ OrderBeginBlockers: modulesStr,
+ OrderEndBlockers: modulesStr,
+ OrderPrecommiters: modulesStr,
+ OrderPrepareCheckStaters: modulesStr,
+}
+}
+
+// SetOrderInitGenesis sets the order of init genesis calls
+func (m *Manager)
+
+SetOrderInitGenesis(moduleNames ...string) {
+ m.assertNoForgottenModules("SetOrderInitGenesis", moduleNames, func(moduleName string)
+
+bool {
+ module := m.Modules[moduleName]
+ if _, hasGenesis := module.(appmodule.HasGenesis); hasGenesis {
+ return !hasGenesis
+}
+ if _, hasABCIGenesis := module.(HasABCIGenesis); hasABCIGenesis {
+ return !hasABCIGenesis
+}
+
+ _, hasGenesis := module.(HasGenesis)
+
+return !hasGenesis
+})
+
+m.OrderInitGenesis = moduleNames
+}
+
+// SetOrderExportGenesis sets the order of export genesis calls
+func (m *Manager)
+
+SetOrderExportGenesis(moduleNames ...string) {
+ m.assertNoForgottenModules("SetOrderExportGenesis", moduleNames, func(moduleName string)
+
+bool {
+ module := m.Modules[moduleName]
+ if _, hasGenesis := module.(appmodule.HasGenesis); hasGenesis {
+ return !hasGenesis
+}
+ if _, hasABCIGenesis := module.(HasABCIGenesis); hasABCIGenesis {
+ return !hasABCIGenesis
+}
+
+ _, hasGenesis := module.(HasGenesis)
+
+return !hasGenesis
+})
+
+m.OrderExportGenesis = moduleNames
+}
+
+// SetOrderBeginBlockers sets the order of set begin-blocker calls
+func (m *Manager)
+
+SetOrderBeginBlockers(moduleNames ...string) {
+ m.assertNoForgottenModules("SetOrderBeginBlockers", moduleNames,
+ func(moduleName string)
+
+bool {
+ module := m.Modules[moduleName]
+ _, hasBeginBlock := module.(appmodule.HasBeginBlocker)
+
+return !hasBeginBlock
+})
+
+m.OrderBeginBlockers = moduleNames
+}
+
+// SetOrderEndBlockers sets the order of set end-blocker calls
+func (m *Manager)
+
+SetOrderEndBlockers(moduleNames ...string) {
+ m.assertNoForgottenModules("SetOrderEndBlockers", moduleNames,
+ func(moduleName string)
+
+bool {
+ module := m.Modules[moduleName]
+ if _, hasEndBlock := module.(appmodule.HasEndBlocker); hasEndBlock {
+ return !hasEndBlock
+}
+
+ _, hasABCIEndBlock := module.(HasABCIEndblock)
+
+return !hasABCIEndBlock
+})
+
+m.OrderEndBlockers = moduleNames
+}
+
+// SetOrderPrepareCheckStaters sets the order of set prepare-check-stater calls
+func (m *Manager)
+
+SetOrderPrepareCheckStaters(moduleNames ...string) {
+ m.assertNoForgottenModules("SetOrderPrepareCheckStaters", moduleNames,
+ func(moduleName string)
+
+bool {
+ module := m.Modules[moduleName]
+ _, hasPrepareCheckState := module.(appmodule.HasPrepareCheckState)
+
+return !hasPrepareCheckState
+})
+
+m.OrderPrepareCheckStaters = moduleNames
+}
+
+// SetOrderPrecommiters sets the order of set precommiter calls
+func (m *Manager)
+
+SetOrderPrecommiters(moduleNames ...string) {
+ m.assertNoForgottenModules("SetOrderPrecommiters", moduleNames,
+ func(moduleName string)
+
+bool {
+ module := m.Modules[moduleName]
+ _, hasPrecommit := module.(appmodule.HasPrecommit)
+
+return !hasPrecommit
+})
+
+m.OrderPrecommiters = moduleNames
+}
+
+// SetOrderMigrations sets the order of migrations to be run. If not set
+// then migrations will be run with an order defined in `DefaultMigrationsOrder`.
+func (m *Manager)
+
+SetOrderMigrations(moduleNames ...string) {
+ m.assertNoForgottenModules("SetOrderMigrations", moduleNames, nil)
+
+m.OrderMigrations = moduleNames
+}
+
+// RegisterInvariants registers all module invariants
+func (m *Manager)
+
+RegisterInvariants(ir sdk.InvariantRegistry) {
+ for _, module := range m.Modules {
+ if module, ok := module.(HasInvariants); ok {
+ module.RegisterInvariants(ir)
+}
+
+}
+}
+
+// RegisterServices registers all module services
+func (m *Manager)
+
+RegisterServices(cfg Configurator)
+
+error {
+ for _, module := range m.Modules {
+ if module, ok := module.(HasServices); ok {
+ module.RegisterServices(cfg)
+}
+ if module, ok := module.(appmodule.HasServices); ok {
+ err := module.RegisterServices(cfg)
+ if err != nil {
+ return err
+}
+
+}
+ if cfg.Error() != nil {
+ return cfg.Error()
+}
+
+}
+
+return nil
+}
+
+// InitGenesis performs init genesis functionality for modules. Exactly one
+// module must return a non-empty validator set update to correctly initialize
+// the chain.
+func (m *Manager)
+
+InitGenesis(ctx sdk.Context, cdc codec.JSONCodec, genesisData map[string]json.RawMessage) (*abci.ResponseInitChain, error) {
+ var validatorUpdates []abci.ValidatorUpdate
+ ctx.Logger().Info("initializing blockchain state from genesis.json")
+ for _, moduleName := range m.OrderInitGenesis {
+ if genesisData[moduleName] == nil {
+ continue
+}
+ mod := m.Modules[moduleName]
+ // we might get an adapted module, a native core API module or a legacy module
+ if module, ok := mod.(appmodule.HasGenesis); ok {
+ ctx.Logger().Debug("running initialization for module", "module", moduleName)
+ // core API genesis
+ source, err := genesis.SourceFromRawJSON(genesisData[moduleName])
+ if err != nil {
+ return &abci.ResponseInitChain{
+}, err
+}
+
+err = module.InitGenesis(ctx, source)
+ if err != nil {
+ return &abci.ResponseInitChain{
+}, err
+}
+
+}
+
+else if module, ok := mod.(HasGenesis); ok {
+ ctx.Logger().Debug("running initialization for module", "module", moduleName)
+
+module.InitGenesis(ctx, cdc, genesisData[moduleName])
+}
+
+else if module, ok := mod.(HasABCIGenesis); ok {
+ ctx.Logger().Debug("running initialization for module", "module", moduleName)
+ moduleValUpdates := module.InitGenesis(ctx, cdc, genesisData[moduleName])
+
+ // use these validator updates if provided, the module manager assumes
+ // only one module will update the validator set
+ if len(moduleValUpdates) > 0 {
+ if len(validatorUpdates) > 0 {
+ return &abci.ResponseInitChain{
+}, errors.New("validator InitGenesis updates already set by a previous module")
+}
+
+validatorUpdates = moduleValUpdates
+}
+
+}
+
+}
+
+ // a chain must initialize with a non-empty validator set
+ if len(validatorUpdates) == 0 {
+ return &abci.ResponseInitChain{
+}, fmt.Errorf("validator set is empty after InitGenesis, please ensure at least one validator is initialized with a delegation greater than or equal to the DefaultPowerReduction (%d)", sdk.DefaultPowerReduction)
+}
+
+return &abci.ResponseInitChain{
+ Validators: validatorUpdates,
+}, nil
+}
+
+// ExportGenesis performs export genesis functionality for modules
+func (m *Manager)
+
+ExportGenesis(ctx sdk.Context, cdc codec.JSONCodec) (map[string]json.RawMessage, error) {
+ return m.ExportGenesisForModules(ctx, cdc, []string{
+})
+}
+
+// ExportGenesisForModules performs export genesis functionality for modules
+func (m *Manager)
+
+ExportGenesisForModules(ctx sdk.Context, cdc codec.JSONCodec, modulesToExport []string) (map[string]json.RawMessage, error) {
+ if len(modulesToExport) == 0 {
+ modulesToExport = m.OrderExportGenesis
+}
+ // verify modules exists in app, so that we don't panic in the middle of an export
+ if err := m.checkModulesExists(modulesToExport); err != nil {
+ return nil, err
+}
+
+type genesisResult struct {
+ bz json.RawMessage
+ err error
+}
+ channels := make(map[string]chan genesisResult)
+ for _, moduleName := range modulesToExport {
+ mod := m.Modules[moduleName]
+ if module, ok := mod.(appmodule.HasGenesis); ok {
+ // core API genesis
+ channels[moduleName] = make(chan genesisResult)
+
+go func(module appmodule.HasGenesis, ch chan genesisResult) {
+ ctx := ctx.WithGasMeter(storetypes.NewInfiniteGasMeter()) // avoid race conditions
+ target := genesis.RawJSONTarget{
+}
+ err := module.ExportGenesis(ctx, target.Target())
+ if err != nil {
+ ch <- genesisResult{
+ nil, err
+}
+
+return
+}
+
+rawJSON, err := target.JSON()
+ if err != nil {
+ ch <- genesisResult{
+ nil, err
+}
+
+return
+}
+
+ch <- genesisResult{
+ rawJSON, nil
+}
+
+}(module, channels[moduleName])
+}
+
+else if module, ok := mod.(HasGenesis); ok {
+ channels[moduleName] = make(chan genesisResult)
+
+go func(module HasGenesis, ch chan genesisResult) {
+ ctx := ctx.WithGasMeter(storetypes.NewInfiniteGasMeter()) // avoid race conditions
+ ch <- genesisResult{
+ module.ExportGenesis(ctx, cdc), nil
+}
+
+}(module, channels[moduleName])
+}
+
+else if module, ok := mod.(HasABCIGenesis); ok {
+ channels[moduleName] = make(chan genesisResult)
+
+go func(module HasABCIGenesis, ch chan genesisResult) {
+ ctx := ctx.WithGasMeter(storetypes.NewInfiniteGasMeter()) // avoid race conditions
+ ch <- genesisResult{
+ module.ExportGenesis(ctx, cdc), nil
+}
+
+}(module, channels[moduleName])
+}
+
+}
+ genesisData := make(map[string]json.RawMessage)
+ for moduleName := range channels {
+ res := <-channels[moduleName]
+ if res.err != nil {
+ return nil, res.err
+}
+
+genesisData[moduleName] = res.bz
+}
+
+return genesisData, nil
+}
+
+// checkModulesExists verifies that all modules in the list exist in the app
+func (m *Manager)
+
+checkModulesExists(moduleName []string)
+
+error {
+ for _, name := range moduleName {
+ if _, ok := m.Modules[name]; !ok {
+ return fmt.Errorf("module %s does not exist", name)
+}
+
+}
+
+return nil
+}
+
+// assertNoForgottenModules checks that we didn't forget any modules in the SetOrder* functions.
+// `pass` is a closure which allows one to omit modules from `moduleNames`.
+// If you provide non-nil `pass` and it returns true, the module would not be subject of the assertion.
+func (m *Manager)
+
+assertNoForgottenModules(setOrderFnName string, moduleNames []string, pass func(moduleName string)
+
+bool) {
+ ms := make(map[string]bool)
+ for _, m := range moduleNames {
+ ms[m] = true
+}
+
+var missing []string
+ for m := range m.Modules {
+ m := m
+ if pass != nil && pass(m) {
+ continue
+}
+ if !ms[m] {
+ missing = append(missing, m)
+}
+
+}
+ if len(missing) != 0 {
+ sort.Strings(missing)
+
+panic(fmt.Sprintf(
+ "all modules must be defined when setting %s, missing: %v", setOrderFnName, missing))
+}
+}
+
+// MigrationHandler is the migration function that each module registers.
+type MigrationHandler func(sdk.Context)
+
+error
+
+// VersionMap is a map of moduleName -> version
+type VersionMap map[string]uint64
+
+// RunMigrations performs in-place store migrations for all modules. This
+// function MUST be called insde an x/upgrade UpgradeHandler.
+//
+// Recall that in an upgrade handler, the `fromVM` VersionMap is retrieved from
+// x/upgrade's store, and the function needs to return the target VersionMap
+// that will in turn be persisted to the x/upgrade's store. In general,
+// returning RunMigrations should be enough:
+//
+// Example:
+//
+// cfg := module.NewConfigurator(...)
+// app.UpgradeKeeper.SetUpgradeHandler("my-plan", func(ctx context.Context, plan upgradetypes.Plan, fromVM module.VersionMap) (module.VersionMap, error) {
+// return app.mm.RunMigrations(ctx, cfg, fromVM)
+//
+})
+//
+// Internally, RunMigrations will perform the following steps:
+// - create an `updatedVM` VersionMap of module with their latest ConsensusVersion
+// - make a diff of `fromVM` and `udpatedVM`, and for each module:
+// - if the module's `fromVM` version is less than its `updatedVM` version,
+// then run in-place store migrations for that module between those versions.
+// - if the module does not exist in the `fromVM` (which means that it's a new module,
+// because it was not in the previous x/upgrade's store), then run
+// `InitGenesis` on that module.
+//
+// - return the `updatedVM` to be persisted in the x/upgrade's store.
+//
+// Migrations are run in an order defined by `Manager.OrderMigrations` or (if not set)
+
+defined by
+// `DefaultMigrationsOrder` function.
+//
+// As an app developer, if you wish to skip running InitGenesis for your new
+// module "foo", you need to manually pass a `fromVM` argument to this function
+// foo's module version set to its latest ConsensusVersion. That way, the diff
+// between the function's `fromVM` and `udpatedVM` will be empty, hence not
+// running anything for foo.
+//
+// Example:
+//
+// cfg := module.NewConfigurator(...)
+// app.UpgradeKeeper.SetUpgradeHandler("my-plan", func(ctx context.Context, plan upgradetypes.Plan, fromVM module.VersionMap) (module.VersionMap, error) {
+// // Assume "foo" is a new module.
+// // `fromVM` is fetched from existing x/upgrade store. Since foo didn't exist
+// // before this upgrade, `v, exists := fromVM["foo"]; exists == false`, and RunMigration will by default
+// // run InitGenesis on foo.
+// // To skip running foo's InitGenesis, you need set `fromVM`'s foo to its latest
+// // consensus version:
+// fromVM["foo"] = foo.AppModule{
+}.ConsensusVersion()
+//
+// return app.mm.RunMigrations(ctx, cfg, fromVM)
+//
+})
+//
+// Please also refer to /sdk/v0.53/build/migrations/upgrading for more information.
+func (m Manager)
+
+RunMigrations(ctx context.Context, cfg Configurator, fromVM VersionMap) (VersionMap, error) {
+ c, ok := cfg.(*configurator)
+ if !ok {
+ return nil, errorsmod.Wrapf(sdkerrors.ErrInvalidType, "expected %T, got %T", &configurator{
+}, cfg)
+}
+ modules := m.OrderMigrations
+ if modules == nil {
+ modules = DefaultMigrationsOrder(m.ModuleNames())
+}
+ sdkCtx := sdk.UnwrapSDKContext(ctx)
+ updatedVM := VersionMap{
+}
+ for _, moduleName := range modules {
+ module := m.Modules[moduleName]
+ fromVersion, exists := fromVM[moduleName]
+ toVersion := uint64(0)
+ if module, ok := module.(HasConsensusVersion); ok {
+ toVersion = module.ConsensusVersion()
+}
+
+ // We run migration if the module is specified in `fromVM`.
+ // Otherwise we run InitGenesis.
+ //
+ // The module won't exist in the fromVM in two cases:
+ // 1. A new module is added. In this case we run InitGenesis with an
+ // empty genesis state.
+ // 2. An existing chain is upgrading from version < 0.43 to v0.43+ for the first time.
+ // In this case, all modules have yet to be added to x/upgrade's VersionMap store.
+ if exists {
+ err := c.runModuleMigrations(sdkCtx, moduleName, fromVersion, toVersion)
+ if err != nil {
+ return nil, err
+}
+
+}
+
+else {
+ sdkCtx.Logger().Info(fmt.Sprintf("adding a new module: %s", moduleName))
+
+module1, ok := m.Modules[moduleName].(HasGenesis)
+ if ok {
+ module1.InitGenesis(sdkCtx, c.cdc, module1.DefaultGenesis(c.cdc))
+}
+ if module2, ok := m.Modules[moduleName].(HasABCIGenesis); ok {
+ moduleValUpdates := module2.InitGenesis(sdkCtx, c.cdc, module1.DefaultGenesis(c.cdc))
+ // The module manager assumes only one module will update the
+ // validator set, and it can't be a new module.
+ if len(moduleValUpdates) > 0 {
+ return nil, errorsmod.Wrapf(sdkerrors.ErrLogic, "validator InitGenesis update is already set by another module")
+}
+
+}
+
+}
+
+updatedVM[moduleName] = toVersion
+}
+
+return updatedVM, nil
+}
+
+// RunMigrationBeginBlock performs begin block functionality for upgrade module.
+// It takes the current context as a parameter and returns a boolean value
+// indicating whether the migration was executed or not and an error if fails.
+func (m *Manager)
+
+RunMigrationBeginBlock(ctx sdk.Context) (bool, error) {
+ for _, moduleName := range m.OrderBeginBlockers {
+ if mod, ok := m.Modules[moduleName].(appmodule.HasBeginBlocker); ok {
+ if _, ok := mod.(appmodule.UpgradeModule); ok {
+ err := mod.BeginBlock(ctx)
+
+return err == nil, err
+}
+
+}
+
+}
+
+return false, nil
+}
+
+// BeginBlock performs begin block functionality for non-upgrade modules. It creates a
+// child context with an event manager to aggregate events emitted from non-upgrade
+// modules.
+func (m *Manager)
+
+BeginBlock(ctx sdk.Context) (sdk.BeginBlock, error) {
+ ctx = ctx.WithEventManager(sdk.NewEventManager())
+ for _, moduleName := range m.OrderBeginBlockers {
+ if module, ok := m.Modules[moduleName].(appmodule.HasBeginBlocker); ok {
+ if _, ok := module.(appmodule.UpgradeModule); !ok {
+ if err := module.BeginBlock(ctx); err != nil {
+ return sdk.BeginBlock{
+}, err
+}
+
+}
+
+}
+
+}
+
+return sdk.BeginBlock{
+ Events: ctx.EventManager().ABCIEvents(),
+}, nil
+}
+
+// EndBlock performs end block functionality for all modules. It creates a
+// child context with an event manager to aggregate events emitted from all
+// modules.
+func (m *Manager)
+
+EndBlock(ctx sdk.Context) (sdk.EndBlock, error) {
+ ctx = ctx.WithEventManager(sdk.NewEventManager())
+ validatorUpdates := []abci.ValidatorUpdate{
+}
+ for _, moduleName := range m.OrderEndBlockers {
+ if module, ok := m.Modules[moduleName].(appmodule.HasEndBlocker); ok {
+ err := module.EndBlock(ctx)
+ if err != nil {
+ return sdk.EndBlock{
+}, err
+}
+
+}
+
+else if module, ok := m.Modules[moduleName].(HasABCIEndblock); ok {
+ moduleValUpdates, err := module.EndBlock(ctx)
+ if err != nil {
+ return sdk.EndBlock{
+}, err
+}
+ // use these validator updates if provided, the module manager assumes
+ // only one module will update the validator set
+ if len(moduleValUpdates) > 0 {
+ if len(validatorUpdates) > 0 {
+ return sdk.EndBlock{
+}, errors.New("validator EndBlock updates already set by a previous module")
+}
+ for _, updates := range moduleValUpdates {
+ validatorUpdates = append(validatorUpdates, abci.ValidatorUpdate{
+ PubKey: updates.PubKey,
+ Power: updates.Power
+})
+}
+
+}
+
+}
+
+else {
+ continue
+}
+
+}
+
+return sdk.EndBlock{
+ ValidatorUpdates: validatorUpdates,
+ Events: ctx.EventManager().ABCIEvents(),
+}, nil
+}
+
+// Precommit performs precommit functionality for all modules.
+func (m *Manager)
+
+Precommit(ctx sdk.Context)
+
+error {
+ for _, moduleName := range m.OrderPrecommiters {
+ module, ok := m.Modules[moduleName].(appmodule.HasPrecommit)
+ if !ok {
+ continue
+}
+ if err := module.Precommit(ctx); err != nil {
+ return err
+}
+
+}
+
+return nil
+}
+
+// PrepareCheckState performs functionality for preparing the check state for all modules.
+func (m *Manager)
+
+PrepareCheckState(ctx sdk.Context)
+
+error {
+ for _, moduleName := range m.OrderPrepareCheckStaters {
+ module, ok := m.Modules[moduleName].(appmodule.HasPrepareCheckState)
+ if !ok {
+ continue
+}
+ if err := module.PrepareCheckState(ctx); err != nil {
+ return err
+}
+
+}
+
+return nil
+}
+
+// GetVersionMap gets consensus version from all modules
+func (m *Manager)
+
+GetVersionMap()
+
+VersionMap {
+ vermap := make(VersionMap)
+ for name, v := range m.Modules {
+ version := uint64(0)
+ if v, ok := v.(HasConsensusVersion); ok {
+ version = v.ConsensusVersion()
+}
+ name := name
+ vermap[name] = version
+}
+
+return vermap
+}
+
+// ModuleNames returns list of all module names, without any particular order.
+func (m *Manager)
+
+ModuleNames() []string {
+ return maps.Keys(m.Modules)
+}
+
+// DefaultMigrationsOrder returns a default migrations order: ascending alphabetical by module name,
+// except x/auth which will run last, see:
+// https://github.com/cosmos/cosmos-sdk/issues/10591
+func DefaultMigrationsOrder(modules []string) []string {
+ const authName = "auth"
+ out := make([]string, 0, len(modules))
+ hasAuth := false
+ for _, m := range modules {
+ if m == authName {
+ hasAuth = true
+}
+
+else {
+ out = append(out, m)
+}
+
+}
+
+sort.Strings(out)
+ if hasAuth {
+ out = append(out, authName)
+}
+
+return out
+}
+```
+
+#### `appmodule.HasGenesis`
+
+
+`appmodule.HasGenesis` is experimental and should be considered unstable, it is recommended to not use this interface at this time.
+
+
+```go expandable
+package appmodule
+
+import (
+
+ "context"
+ "io"
+)
+
+// HasGenesis is the extension interface that modules should implement to handle
+// genesis data and state initialization.
+// WARNING: This interface is experimental and may change at any time.
+type HasGenesis interface {
+ AppModule
+
+ // DefaultGenesis writes the default genesis for this module to the target.
+ DefaultGenesis(GenesisTarget)
+
+error
+
+ // ValidateGenesis validates the genesis data read from the source.
+ ValidateGenesis(GenesisSource)
+
+error
+
+ // InitGenesis initializes module state from the genesis source.
+ InitGenesis(context.Context, GenesisSource)
+
+error
+
+ // ExportGenesis exports module state to the genesis target.
+ ExportGenesis(context.Context, GenesisTarget)
+
+error
+}
+
+// GenesisSource is a source for genesis data in JSON format. It may abstract over a
+// single JSON object or separate files for each field in a JSON object that can
+// be streamed over. Modules should open a separate io.ReadCloser for each field that
+// is required. When fields represent arrays they can efficiently be streamed
+// over. If there is no data for a field, this function should return nil, nil. It is
+// important that the caller closes the reader when done with it.
+type GenesisSource = func(field string) (io.ReadCloser, error)
+
+// GenesisTarget is a target for writing genesis data in JSON format. It may
+// abstract over a single JSON object or JSON in separate files that can be
+// streamed over. Modules should open a separate io.WriteCloser for each field
+// and should prefer writing fields as arrays when possible to support efficient
+// iteration. It is important the caller closers the writer AND checks the error
+// when done with it. It is expected that a stream of JSON data is written
+// to the writer.
+type GenesisTarget = func(field string) (io.WriteCloser, error)
+```
+
+### `AppModule`
+
+The `AppModule` interface defines a module. Modules can declare their functionalities by implementing extensions interfaces.
+`AppModule`s are managed by the [module manager](#manager), which checks which extension interfaces are implemented by the module.
+
+#### `appmodule.AppModule`
+
+```go expandable
+package appmodule
+
+import (
+
+ "context"
+ "google.golang.org/grpc"
+ "cosmossdk.io/depinject"
+)
+
+// AppModule is a tag interface for app module implementations to use as a basis
+// for extension interfaces. It provides no functionality itself, but is the
+// type that all valid app modules should provide so that they can be identified
+// by other modules (usually via depinject)
+
+as app modules.
+type AppModule interface {
+ depinject.OnePerModuleType
+
+ // IsAppModule is a dummy method to tag a struct as implementing an AppModule.
+ IsAppModule()
+}
+
+// HasServices is the extension interface that modules should implement to register
+// implementations of services defined in .proto files.
+type HasServices interface {
+ AppModule
+
+ // RegisterServices registers the module's services with the app's service
+ // registrar.
+ //
+ // Two types of services are currently supported:
+ // - read-only gRPC query services, which are the default.
+ // - transaction message services, which must have the protobuf service
+ // option "cosmos.msg.v1.service" (defined in "cosmos/msg/v1/service.proto")
+ // set to true.
+ //
+ // The service registrar will figure out which type of service you are
+ // implementing based on the presence (or absence)
+
+of protobuf options. You
+ // do not need to specify this in golang code.
+ RegisterServices(grpc.ServiceRegistrar)
+
+error
+}
+
+// HasPrepareCheckState is an extension interface that contains information about the AppModule
+// and PrepareCheckState.
+type HasPrepareCheckState interface {
+ AppModule
+ PrepareCheckState(context.Context)
+
+error
+}
+
+// HasPrecommit is an extension interface that contains information about the AppModule and Precommit.
+type HasPrecommit interface {
+ AppModule
+ Precommit(context.Context)
+
+error
+}
+
+// HasBeginBlocker is the extension interface that modules should implement to run
+// custom logic before transaction processing in a block.
+type HasBeginBlocker interface {
+ AppModule
+
+ // BeginBlock is a method that will be run before transactions are processed in
+ // a block.
+ BeginBlock(context.Context)
+
+error
+}
+
+// HasEndBlocker is the extension interface that modules should implement to run
+// custom logic after transaction processing in a block.
+type HasEndBlocker interface {
+ AppModule
+
+ // EndBlock is a method that will be run after transactions are processed in
+ // a block.
+ EndBlock(context.Context)
+
+error
+}
+```
+
+#### `module.AppModule`
+
+
+Previously the `module.AppModule` interface was containing all the methods that are defined in the extensions interfaces. This was leading to much boilerplate for modules that did not need all the functionalities.
+
+
+```go expandable
+/*
+Package module contains application module patterns and associated "manager" functionality.
+The module pattern has been broken down by:
+ - independent module functionality (AppModuleBasic)
+ - inter-dependent module simulation functionality (AppModuleSimulation)
+ - inter-dependent module full functionality (AppModule)
+
+inter-dependent module functionality is module functionality which somehow
+depends on other modules, typically through the module keeper. Many of the
+module keepers are dependent on each other, thus in order to access the full
+set of module functionality we need to define all the keepers/params-store/keys
+etc. This full set of advanced functionality is defined by the AppModule interface.
+
+Independent module functions are separated to allow for the construction of the
+basic application structures required early on in the application definition
+and used to enable the definition of full module functionality later in the
+process. This separation is necessary, however we still want to allow for a
+high level pattern for modules to follow - for instance, such that we don't
+have to manually register all of the codecs for all the modules. This basic
+procedure as well as other basic patterns are handled through the use of
+BasicManager.
+
+Lastly the interface for genesis functionality (HasGenesis & HasABCIGenesis)
+
+has been
+separated out from full module functionality (AppModule)
+
+so that modules which
+are only used for genesis can take advantage of the Module patterns without
+needlessly defining many placeholder functions
+*/
+package module
+
+import (
+
+ "context"
+ "encoding/json"
+ "errors"
+ "fmt"
+ "maps"
+ "slices"
+ "sort"
+
+ abci "github.com/cometbft/cometbft/abci/types"
+ "github.com/grpc-ecosystem/grpc-gateway/runtime"
+ "github.com/spf13/cobra"
+ "cosmossdk.io/core/appmodule"
+ "cosmossdk.io/core/genesis"
+ errorsmod "cosmossdk.io/errors"
+ storetypes "cosmossdk.io/store/types"
+ "github.com/cosmos/cosmos-sdk/client"
+ "github.com/cosmos/cosmos-sdk/codec"
+ "github.com/cosmos/cosmos-sdk/codec/types"
+ sdk "github.com/cosmos/cosmos-sdk/types"
+ sdkerrors "github.com/cosmos/cosmos-sdk/types/errors"
+)
+
+// AppModuleBasic is the standard form for basic non-dependant elements of an application module.
+type AppModuleBasic interface {
+ HasName
+ RegisterLegacyAminoCodec(*codec.LegacyAmino)
+
+RegisterInterfaces(types.InterfaceRegistry)
+
+RegisterGRPCGatewayRoutes(client.Context, *runtime.ServeMux)
+}
+
+// HasName allows the module to provide its own name for legacy purposes.
+// Newer apps should specify the name for their modules using a map
+// using NewManagerFromMap.
+type HasName interface {
+ Name()
+
+string
+}
+
+// HasGenesisBasics is the legacy interface for stateless genesis methods.
+type HasGenesisBasics interface {
+ DefaultGenesis(codec.JSONCodec)
+
+json.RawMessage
+ ValidateGenesis(codec.JSONCodec, client.TxEncodingConfig, json.RawMessage)
+
+error
+}
+
+// BasicManager is a collection of AppModuleBasic
+type BasicManager map[string]AppModuleBasic
+
+// NewBasicManager creates a new BasicManager object
+func NewBasicManager(modules ...AppModuleBasic)
+
+BasicManager {
+ moduleMap := make(map[string]AppModuleBasic)
+ for _, module := range modules {
+ moduleMap[module.Name()] = module
+}
+
+return moduleMap
+}
+
+// NewBasicManagerFromManager creates a new BasicManager from a Manager
+// The BasicManager will contain all AppModuleBasic from the AppModule Manager
+// Module's AppModuleBasic can be overridden by passing a custom AppModuleBasic map
+func NewBasicManagerFromManager(manager *Manager, customModuleBasics map[string]AppModuleBasic)
+
+BasicManager {
+ moduleMap := make(map[string]AppModuleBasic)
+ for name, module := range manager.Modules {
+ if customBasicMod, ok := customModuleBasics[name]; ok {
+ moduleMap[name] = customBasicMod
+ continue
+}
+ if appModule, ok := module.(appmodule.AppModule); ok {
+ moduleMap[name] = CoreAppModuleBasicAdaptor(name, appModule)
+
+continue
+}
+ if basicMod, ok := module.(AppModuleBasic); ok {
+ moduleMap[name] = basicMod
+}
+
+}
+
+return moduleMap
+}
+
+// RegisterLegacyAminoCodec registers all module codecs
+func (bm BasicManager)
+
+RegisterLegacyAminoCodec(cdc *codec.LegacyAmino) {
+ for _, b := range bm {
+ b.RegisterLegacyAminoCodec(cdc)
+}
+}
+
+// RegisterInterfaces registers all module interface types
+func (bm BasicManager)
+
+RegisterInterfaces(registry types.InterfaceRegistry) {
+ for _, m := range bm {
+ m.RegisterInterfaces(registry)
+}
+}
+
+// DefaultGenesis provides default genesis information for all modules
+func (bm BasicManager)
+
+DefaultGenesis(cdc codec.JSONCodec)
+
+map[string]json.RawMessage {
+ genesisData := make(map[string]json.RawMessage)
+ for _, b := range bm {
+ if mod, ok := b.(HasGenesisBasics); ok {
+ genesisData[b.Name()] = mod.DefaultGenesis(cdc)
+}
+
+}
+
+return genesisData
+}
+
+// ValidateGenesis performs genesis state validation for all modules
+func (bm BasicManager)
+
+ValidateGenesis(cdc codec.JSONCodec, txEncCfg client.TxEncodingConfig, genesisData map[string]json.RawMessage)
+
+error {
+ for _, b := range bm {
+ // first check if the module is an adapted Core API Module
+ if mod, ok := b.(HasGenesisBasics); ok {
+ if err := mod.ValidateGenesis(cdc, txEncCfg, genesisData[b.Name()]); err != nil {
+ return err
+}
+
+}
+
+}
+
+return nil
+}
+
+// RegisterGRPCGatewayRoutes registers all module rest routes
+func (bm BasicManager)
+
+RegisterGRPCGatewayRoutes(clientCtx client.Context, rtr *runtime.ServeMux) {
+ for _, b := range bm {
+ b.RegisterGRPCGatewayRoutes(clientCtx, rtr)
+}
+}
+
+// AddTxCommands adds all tx commands to the rootTxCmd.
+func (bm BasicManager)
+
+AddTxCommands(rootTxCmd *cobra.Command) {
+ for _, b := range bm {
+ if mod, ok := b.(interface {
+ GetTxCmd() *cobra.Command
+}); ok {
+ if cmd := mod.GetTxCmd(); cmd != nil {
+ rootTxCmd.AddCommand(cmd)
+}
+
+}
+
+}
+}
+
+// AddQueryCommands adds all query commands to the rootQueryCmd.
+func (bm BasicManager)
+
+AddQueryCommands(rootQueryCmd *cobra.Command) {
+ for _, b := range bm {
+ if mod, ok := b.(interface {
+ GetQueryCmd() *cobra.Command
+}); ok {
+ if cmd := mod.GetQueryCmd(); cmd != nil {
+ rootQueryCmd.AddCommand(cmd)
+}
+
+}
+
+}
+}
+
+// HasGenesis is the extension interface for stateful genesis methods.
+type HasGenesis interface {
+ HasGenesisBasics
+ InitGenesis(sdk.Context, codec.JSONCodec, json.RawMessage)
+
+ExportGenesis(sdk.Context, codec.JSONCodec)
+
+json.RawMessage
+}
+
+// HasABCIGenesis is the extension interface for stateful genesis methods which returns validator updates.
+type HasABCIGenesis interface {
+ HasGenesisBasics
+ InitGenesis(sdk.Context, codec.JSONCodec, json.RawMessage) []abci.ValidatorUpdate
+ ExportGenesis(sdk.Context, codec.JSONCodec)
+
+json.RawMessage
+}
+
+// AppModule is the form for an application module. Most of
+// its functionality has been moved to extension interfaces.
+// Deprecated: use appmodule.AppModule with a combination of extension interfaes interfaces instead.
+type AppModule interface {
+ appmodule.AppModule
+
+ AppModuleBasic
+}
+
+// HasInvariants is the interface for registering invariants.
+//
+// Deprecated: this will be removed in the next Cosmos SDK release.
+type HasInvariants interface {
+ // RegisterInvariants registers module invariants.
+ RegisterInvariants(sdk.InvariantRegistry)
+}
+
+// HasServices is the interface for modules to register services.
+type HasServices interface {
+ // RegisterServices allows a module to register services.
+ RegisterServices(Configurator)
+}
+
+// HasConsensusVersion is the interface for declaring a module consensus version.
+type HasConsensusVersion interface {
+ // ConsensusVersion is a sequence number for state-breaking change of the
+ // module. It should be incremented on each consensus-breaking change
+ // introduced by the module. To avoid wrong/empty versions, the initial version
+ // should be set to 1.
+ ConsensusVersion()
+
+uint64
+}
+
+// HasABCIEndblock is a released typo of HasABCIEndBlock.
+// Deprecated: use HasABCIEndBlock instead.
+type HasABCIEndblock HasABCIEndBlock
+
+// HasABCIEndBlock is the interface for modules that need to run code at the end of the block.
+type HasABCIEndBlock interface {
+ AppModule
+ EndBlock(context.Context) ([]abci.ValidatorUpdate, error)
+}
+
+var (
+ _ appmodule.AppModule = (*GenesisOnlyAppModule)(nil)
+ _ AppModuleBasic = (*GenesisOnlyAppModule)(nil)
+)
+
+// AppModuleGenesis is the standard form for an application module genesis functions
+type AppModuleGenesis interface {
+ AppModuleBasic
+ HasABCIGenesis
+}
+
+// GenesisOnlyAppModule is an AppModule that only has import/export functionality
+type GenesisOnlyAppModule struct {
+ AppModuleGenesis
+}
+
+// NewGenesisOnlyAppModule creates a new GenesisOnlyAppModule object
+func NewGenesisOnlyAppModule(amg AppModuleGenesis)
+
+GenesisOnlyAppModule {
+ return GenesisOnlyAppModule{
+ AppModuleGenesis: amg,
+}
+}
+
+// IsOnePerModuleType implements the depinject.OnePerModuleType interface.
+func (GenesisOnlyAppModule)
+
+IsOnePerModuleType() {
+}
+
+// IsAppModule implements the appmodule.AppModule interface.
+func (GenesisOnlyAppModule)
+
+IsAppModule() {
+}
+
+// RegisterInvariants is a placeholder function register no invariants
+func (GenesisOnlyAppModule)
+
+RegisterInvariants(_ sdk.InvariantRegistry) {
+}
+
+// ConsensusVersion implements AppModule/ConsensusVersion.
+func (gam GenesisOnlyAppModule)
+
+ConsensusVersion()
+
+uint64 {
+ return 1
+}
+
+// Manager defines a module manager that provides the high level utility for managing and executing
+// operations for a group of modules
+type Manager struct {
+ Modules map[string]any // interface{
+}
+
+is used now to support the legacy AppModule as well as new core appmodule.AppModule.
+ OrderInitGenesis []string
+ OrderExportGenesis []string
+ OrderPreBlockers []string
+ OrderBeginBlockers []string
+ OrderEndBlockers []string
+ OrderPrepareCheckStaters []string
+ OrderPrecommiters []string
+ OrderMigrations []string
+}
+
+// NewManager creates a new Manager object.
+func NewManager(modules ...AppModule) *Manager {
+ moduleMap := make(map[string]any)
+ modulesStr := make([]string, 0, len(modules))
+ preBlockModulesStr := make([]string, 0)
+ for _, module := range modules {
+ if _, ok := module.(appmodule.AppModule); !ok {
+ panic(fmt.Sprintf("module %s does not implement appmodule.AppModule", module.Name()))
+}
+
+moduleMap[module.Name()] = module
+ modulesStr = append(modulesStr, module.Name())
+ if _, ok := module.(appmodule.HasPreBlocker); ok {
+ preBlockModulesStr = append(preBlockModulesStr, module.Name())
+}
+
+}
+
+return &Manager{
+ Modules: moduleMap,
+ OrderInitGenesis: modulesStr,
+ OrderExportGenesis: modulesStr,
+ OrderPreBlockers: preBlockModulesStr,
+ OrderBeginBlockers: modulesStr,
+ OrderPrepareCheckStaters: modulesStr,
+ OrderPrecommiters: modulesStr,
+ OrderEndBlockers: modulesStr,
+}
+}
+
+// NewManagerFromMap creates a new Manager object from a map of module names to module implementations.
+// This method should be used for apps and modules which have migrated to the cosmossdk.io/core.appmodule.AppModule API.
+func NewManagerFromMap(moduleMap map[string]appmodule.AppModule) *Manager {
+ simpleModuleMap := make(map[string]any)
+ modulesStr := make([]string, 0, len(simpleModuleMap))
+ preBlockModulesStr := make([]string, 0)
+ for name, module := range moduleMap {
+ simpleModuleMap[name] = module
+ modulesStr = append(modulesStr, name)
+ if _, ok := module.(appmodule.HasPreBlocker); ok {
+ preBlockModulesStr = append(preBlockModulesStr, name)
+}
+
+}
+
+ // Sort the modules by name. Given that we are using a map above we can't guarantee the order.
+ sort.Strings(modulesStr)
+
+return &Manager{
+ Modules: simpleModuleMap,
+ OrderInitGenesis: modulesStr,
+ OrderExportGenesis: modulesStr,
+ OrderPreBlockers: preBlockModulesStr,
+ OrderBeginBlockers: modulesStr,
+ OrderEndBlockers: modulesStr,
+ OrderPrecommiters: modulesStr,
+ OrderPrepareCheckStaters: modulesStr,
+}
+}
+
+// SetOrderInitGenesis sets the order of init genesis calls
+func (m *Manager)
+
+SetOrderInitGenesis(moduleNames ...string) {
+ m.assertNoForgottenModules("SetOrderInitGenesis", moduleNames, func(moduleName string)
+
+bool {
+ module := m.Modules[moduleName]
+ if _, hasGenesis := module.(appmodule.HasGenesis); hasGenesis {
+ return !hasGenesis
+}
+ if _, hasABCIGenesis := module.(HasABCIGenesis); hasABCIGenesis {
+ return !hasABCIGenesis
+}
+
+ _, hasGenesis := module.(HasGenesis)
+
+return !hasGenesis
+})
+
+m.OrderInitGenesis = moduleNames
+}
+
+// SetOrderExportGenesis sets the order of export genesis calls
+func (m *Manager)
+
+SetOrderExportGenesis(moduleNames ...string) {
+ m.assertNoForgottenModules("SetOrderExportGenesis", moduleNames, func(moduleName string)
+
+bool {
+ module := m.Modules[moduleName]
+ if _, hasGenesis := module.(appmodule.HasGenesis); hasGenesis {
+ return !hasGenesis
+}
+ if _, hasABCIGenesis := module.(HasABCIGenesis); hasABCIGenesis {
+ return !hasABCIGenesis
+}
+
+ _, hasGenesis := module.(HasGenesis)
+
+return !hasGenesis
+})
+
+m.OrderExportGenesis = moduleNames
+}
+
+// SetOrderPreBlockers sets the order of set pre-blocker calls
+func (m *Manager)
+
+SetOrderPreBlockers(moduleNames ...string) {
+ m.assertNoForgottenModules("SetOrderPreBlockers", moduleNames,
+ func(moduleName string)
+
+bool {
+ module := m.Modules[moduleName]
+ _, hasBlock := module.(appmodule.HasPreBlocker)
+
+return !hasBlock
+})
+
+m.OrderPreBlockers = moduleNames
+}
+
+// SetOrderBeginBlockers sets the order of set begin-blocker calls
+func (m *Manager)
+
+SetOrderBeginBlockers(moduleNames ...string) {
+ m.assertNoForgottenModules("SetOrderBeginBlockers", moduleNames,
+ func(moduleName string)
+
+bool {
+ module := m.Modules[moduleName]
+ _, hasBeginBlock := module.(appmodule.HasBeginBlocker)
+
+return !hasBeginBlock
+})
+
+m.OrderBeginBlockers = moduleNames
+}
+
+// SetOrderEndBlockers sets the order of set end-blocker calls
+func (m *Manager)
+
+SetOrderEndBlockers(moduleNames ...string) {
+ m.assertNoForgottenModules("SetOrderEndBlockers", moduleNames,
+ func(moduleName string)
+
+bool {
+ module := m.Modules[moduleName]
+ if _, hasEndBlock := module.(appmodule.HasEndBlocker); hasEndBlock {
+ return !hasEndBlock
+}
+
+ _, hasABCIEndBlock := module.(HasABCIEndBlock)
+
+return !hasABCIEndBlock
+})
+
+m.OrderEndBlockers = moduleNames
+}
+
+// SetOrderPrepareCheckStaters sets the order of set prepare-check-stater calls
+func (m *Manager)
+
+SetOrderPrepareCheckStaters(moduleNames ...string) {
+ m.assertNoForgottenModules("SetOrderPrepareCheckStaters", moduleNames,
+ func(moduleName string)
+
+bool {
+ module := m.Modules[moduleName]
+ _, hasPrepareCheckState := module.(appmodule.HasPrepareCheckState)
+
+return !hasPrepareCheckState
+})
+
+m.OrderPrepareCheckStaters = moduleNames
+}
+
+// SetOrderPrecommiters sets the order of set precommiter calls
+func (m *Manager)
+
+SetOrderPrecommiters(moduleNames ...string) {
+ m.assertNoForgottenModules("SetOrderPrecommiters", moduleNames,
+ func(moduleName string)
+
+bool {
+ module := m.Modules[moduleName]
+ _, hasPrecommit := module.(appmodule.HasPrecommit)
+
+return !hasPrecommit
+})
+
+m.OrderPrecommiters = moduleNames
+}
+
+// SetOrderMigrations sets the order of migrations to be run. If not set
+// then migrations will be run with an order defined in `DefaultMigrationsOrder`.
+func (m *Manager)
+
+SetOrderMigrations(moduleNames ...string) {
+ m.assertNoForgottenModules("SetOrderMigrations", moduleNames, nil)
+
+m.OrderMigrations = moduleNames
+}
+
+// RegisterInvariants registers all module invariants
+//
+// Deprecated: this function is a no-op and will be removed in the next release of the Cosmos SDK.
+func (m *Manager)
+
+RegisterInvariants(_ sdk.InvariantRegistry) {
+}
+
+// RegisterServices registers all module services
+func (m *Manager)
+
+RegisterServices(cfg Configurator)
+
+error {
+ for _, module := range m.Modules {
+ if module, ok := module.(HasServices); ok {
+ module.RegisterServices(cfg)
+}
+ if module, ok := module.(appmodule.HasServices); ok {
+ err := module.RegisterServices(cfg)
+ if err != nil {
+ return err
+}
+
+}
+ if cfg.Error() != nil {
+ return cfg.Error()
+}
+
+}
+
+return nil
+}
+
+// InitGenesis performs init genesis functionality for modules. Exactly one
+// module must return a non-empty validator set update to correctly initialize
+// the chain.
+func (m *Manager)
+
+InitGenesis(ctx sdk.Context, cdc codec.JSONCodec, genesisData map[string]json.RawMessage) (*abci.ResponseInitChain, error) {
+ var validatorUpdates []abci.ValidatorUpdate
+ ctx.Logger().Info("initializing blockchain state from genesis.json")
+ for _, moduleName := range m.OrderInitGenesis {
+ if genesisData[moduleName] == nil {
+ continue
+}
+ mod := m.Modules[moduleName]
+ // we might get an adapted module, a native core API module or a legacy module
+ if module, ok := mod.(appmodule.HasGenesis); ok {
+ ctx.Logger().Debug("running initialization for module", "module", moduleName)
+ // core API genesis
+ source, err := genesis.SourceFromRawJSON(genesisData[moduleName])
+ if err != nil {
+ return &abci.ResponseInitChain{
+}, err
+}
+
+err = module.InitGenesis(ctx, source)
+ if err != nil {
+ return &abci.ResponseInitChain{
+}, err
+}
+
+}
+
+else if module, ok := mod.(HasGenesis); ok {
+ ctx.Logger().Debug("running initialization for module", "module", moduleName)
+
+module.InitGenesis(ctx, cdc, genesisData[moduleName])
+}
+
+else if module, ok := mod.(HasABCIGenesis); ok {
+ ctx.Logger().Debug("running initialization for module", "module", moduleName)
+ moduleValUpdates := module.InitGenesis(ctx, cdc, genesisData[moduleName])
+
+ // use these validator updates if provided, the module manager assumes
+ // only one module will update the validator set
+ if len(moduleValUpdates) > 0 {
+ if len(validatorUpdates) > 0 {
+ return &abci.ResponseInitChain{
+}, errors.New("validator InitGenesis updates already set by a previous module")
+}
+
+validatorUpdates = moduleValUpdates
+}
+
+}
+
+}
+
+ // a chain must initialize with a non-empty validator set
+ if len(validatorUpdates) == 0 {
+ return &abci.ResponseInitChain{
+}, fmt.Errorf("validator set is empty after InitGenesis, please ensure at least one validator is initialized with a delegation greater than or equal to the DefaultPowerReduction (%d)", sdk.DefaultPowerReduction)
+}
+
+return &abci.ResponseInitChain{
+ Validators: validatorUpdates,
+}, nil
+}
+
+// ExportGenesis performs export genesis functionality for modules
+func (m *Manager)
+
+ExportGenesis(ctx sdk.Context, cdc codec.JSONCodec) (map[string]json.RawMessage, error) {
+ return m.ExportGenesisForModules(ctx, cdc, []string{
+})
+}
+
+// ExportGenesisForModules performs export genesis functionality for modules
+func (m *Manager)
+
+ExportGenesisForModules(ctx sdk.Context, cdc codec.JSONCodec, modulesToExport []string) (map[string]json.RawMessage, error) {
+ if len(modulesToExport) == 0 {
+ modulesToExport = m.OrderExportGenesis
+}
+ // verify modules exists in app, so that we don't panic in the middle of an export
+ if err := m.checkModulesExists(modulesToExport); err != nil {
+ return nil, err
+}
+
+type genesisResult struct {
+ bz json.RawMessage
+ err error
+}
+ channels := make(map[string]chan genesisResult)
+ for _, moduleName := range modulesToExport {
+ mod := m.Modules[moduleName]
+ if module, ok := mod.(appmodule.HasGenesis); ok {
+ // core API genesis
+ channels[moduleName] = make(chan genesisResult)
+
+go func(module appmodule.HasGenesis, ch chan genesisResult) {
+ ctx := ctx.WithGasMeter(storetypes.NewInfiniteGasMeter()) // avoid race conditions
+ target := genesis.RawJSONTarget{
+}
+ err := module.ExportGenesis(ctx, target.Target())
+ if err != nil {
+ ch <- genesisResult{
+ nil, err
+}
+
+return
+}
+
+rawJSON, err := target.JSON()
+ if err != nil {
+ ch <- genesisResult{
+ nil, err
+}
+
+return
+}
+
+ch <- genesisResult{
+ rawJSON, nil
+}
+
+}(module, channels[moduleName])
+}
+
+else if module, ok := mod.(HasGenesis); ok {
+ channels[moduleName] = make(chan genesisResult)
+
+go func(module HasGenesis, ch chan genesisResult) {
+ ctx := ctx.WithGasMeter(storetypes.NewInfiniteGasMeter()) // avoid race conditions
+ ch <- genesisResult{
+ module.ExportGenesis(ctx, cdc), nil
+}
+
+}(module, channels[moduleName])
+}
+
+else if module, ok := mod.(HasABCIGenesis); ok {
+ channels[moduleName] = make(chan genesisResult)
+
+go func(module HasABCIGenesis, ch chan genesisResult) {
+ ctx := ctx.WithGasMeter(storetypes.NewInfiniteGasMeter()) // avoid race conditions
+ ch <- genesisResult{
+ module.ExportGenesis(ctx, cdc), nil
+}
+
+}(module, channels[moduleName])
+}
+
+}
+ genesisData := make(map[string]json.RawMessage)
+ for moduleName := range channels {
+ res := <-channels[moduleName]
+ if res.err != nil {
+ return nil, fmt.Errorf("genesis export error in %s: %w", moduleName, res.err)
+}
+
+genesisData[moduleName] = res.bz
+}
+
+return genesisData, nil
+}
+
+// checkModulesExists verifies that all modules in the list exist in the app
+func (m *Manager)
+
+checkModulesExists(moduleName []string)
+
+error {
+ for _, name := range moduleName {
+ if _, ok := m.Modules[name]; !ok {
+ return fmt.Errorf("module %s does not exist", name)
+}
+
+}
+
+return nil
+}
+
+// assertNoForgottenModules checks that we didn't forget any modules in the SetOrder* functions.
+// `pass` is a closure which allows one to omit modules from `moduleNames`.
+// If you provide non-nil `pass` and it returns true, the module would not be subject of the assertion.
+func (m *Manager)
+
+assertNoForgottenModules(setOrderFnName string, moduleNames []string, pass func(moduleName string)
+
+bool) {
+ ms := make(map[string]bool)
+ for _, m := range moduleNames {
+ ms[m] = true
+}
+
+var missing []string
+ for m := range m.Modules {
+ if pass != nil && pass(m) {
+ continue
+}
+ if !ms[m] {
+ missing = append(missing, m)
+}
+
+}
+ if len(missing) != 0 {
+ sort.Strings(missing)
+
+panic(fmt.Sprintf(
+ "all modules must be defined when setting %s, missing: %v", setOrderFnName, missing))
+}
+}
+
+// MigrationHandler is the migration function that each module registers.
+type MigrationHandler func(sdk.Context)
+
+error
+
+// VersionMap is a map of moduleName -> version
+type VersionMap map[string]uint64
+
+// RunMigrations performs in-place store migrations for all modules. This
+// function MUST be called insde an x/upgrade UpgradeHandler.
+//
+// Recall that in an upgrade handler, the `fromVM` VersionMap is retrieved from
+// x/upgrade's store, and the function needs to return the target VersionMap
+// that will in turn be persisted to the x/upgrade's store. In general,
+// returning RunMigrations should be enough:
+//
+// Example:
+//
+// cfg := module.NewConfigurator(...)
+// app.UpgradeKeeper.SetUpgradeHandler("my-plan", func(ctx context.Context, plan upgradetypes.Plan, fromVM module.VersionMap) (module.VersionMap, error) {
+// return app.mm.RunMigrations(ctx, cfg, fromVM)
+//
+})
+//
+// Internally, RunMigrations will perform the following steps:
+// - create an `updatedVM` VersionMap of module with their latest ConsensusVersion
+// - make a diff of `fromVM` and `udpatedVM`, and for each module:
+// - if the module's `fromVM` version is less than its `updatedVM` version,
+// then run in-place store migrations for that module between those versions.
+// - if the module does not exist in the `fromVM` (which means that it's a new module,
+// because it was not in the previous x/upgrade's store), then run
+// `InitGenesis` on that module.
+//
+// - return the `updatedVM` to be persisted in the x/upgrade's store.
+//
+// Migrations are run in an order defined by `Manager.OrderMigrations` or (if not set)
+
+defined by
+// `DefaultMigrationsOrder` function.
+//
+// As an app developer, if you wish to skip running InitGenesis for your new
+// module "foo", you need to manually pass a `fromVM` argument to this function
+// foo's module version set to its latest ConsensusVersion. That way, the diff
+// between the function's `fromVM` and `udpatedVM` will be empty, hence not
+// running anything for foo.
+//
+// Example:
+//
+// cfg := module.NewConfigurator(...)
+// app.UpgradeKeeper.SetUpgradeHandler("my-plan", func(ctx context.Context, plan upgradetypes.Plan, fromVM module.VersionMap) (module.VersionMap, error) {
+// // Assume "foo" is a new module.
+// // `fromVM` is fetched from existing x/upgrade store. Since foo didn't exist
+// // before this upgrade, `v, exists := fromVM["foo"]; exists == false`, and RunMigration will by default
+// // run InitGenesis on foo.
+// // To skip running foo's InitGenesis, you need set `fromVM`'s foo to its latest
+// // consensus version:
+// fromVM["foo"] = foo.AppModule{
+}.ConsensusVersion()
+//
+// return app.mm.RunMigrations(ctx, cfg, fromVM)
+//
+})
+//
+// Please also refer to /sdk/v0.53/build/migrations/upgrading for more information.
+func (m Manager)
+
+RunMigrations(ctx context.Context, cfg Configurator, fromVM VersionMap) (VersionMap, error) {
+ c, ok := cfg.(*configurator)
+ if !ok {
+ return nil, errorsmod.Wrapf(sdkerrors.ErrInvalidType, "expected %T, got %T", &configurator{
+}, cfg)
+}
+ modules := m.OrderMigrations
+ if modules == nil {
+ modules = DefaultMigrationsOrder(m.ModuleNames())
+}
+ sdkCtx := sdk.UnwrapSDKContext(ctx)
+ updatedVM := VersionMap{
+}
+ for _, moduleName := range modules {
+ module := m.Modules[moduleName]
+ fromVersion, exists := fromVM[moduleName]
+ toVersion := uint64(0)
+ if module, ok := module.(HasConsensusVersion); ok {
+ toVersion = module.ConsensusVersion()
+}
+
+ // We run migration if the module is specified in `fromVM`.
+ // Otherwise we run InitGenesis.
+ //
+ // The module won't exist in the fromVM in two cases:
+ // 1. A new module is added. In this case we run InitGenesis with an
+ // empty genesis state.
+ // 2. An existing chain is upgrading from version < 0.43 to v0.43+ for the first time.
+ // In this case, all modules have yet to be added to x/upgrade's VersionMap store.
+ if exists {
+ err := c.runModuleMigrations(sdkCtx, moduleName, fromVersion, toVersion)
+ if err != nil {
+ return nil, err
+}
+
+}
+
+else {
+ sdkCtx.Logger().Info(fmt.Sprintf("adding a new module: %s", moduleName))
+ if module, ok := m.Modules[moduleName].(HasGenesis); ok {
+ module.InitGenesis(sdkCtx, c.cdc, module.DefaultGenesis(c.cdc))
+}
+ if module, ok := m.Modules[moduleName].(HasABCIGenesis); ok {
+ moduleValUpdates := module.InitGenesis(sdkCtx, c.cdc, module.DefaultGenesis(c.cdc))
+ // The module manager assumes only one module will update the
+ // validator set, and it can't be a new module.
+ if len(moduleValUpdates) > 0 {
+ return nil, errorsmod.Wrapf(sdkerrors.ErrLogic, "validator InitGenesis update is already set by another module")
+}
+
+}
+
+}
+
+updatedVM[moduleName] = toVersion
+}
+
+return updatedVM, nil
+}
+
+// PreBlock performs begin block functionality for upgrade module.
+// It takes the current context as a parameter and returns a boolean value
+// indicating whether the migration was successfully executed or not.
+func (m *Manager)
+
+PreBlock(ctx sdk.Context) (*sdk.ResponsePreBlock, error) {
+ paramsChanged := false
+ for _, moduleName := range m.OrderPreBlockers {
+ if module, ok := m.Modules[moduleName].(appmodule.HasPreBlocker); ok {
+ rsp, err := module.PreBlock(ctx)
+ if err != nil {
+ return nil, err
+}
+ if rsp.IsConsensusParamsChanged() {
+ paramsChanged = true
+}
+
+}
+
+}
+
+return &sdk.ResponsePreBlock{
+ ConsensusParamsChanged: paramsChanged,
+}, nil
+}
+
+// BeginBlock performs begin block functionality for all modules. It creates a
+// child context with an event manager to aggregate events emitted from all
+// modules.
+func (m *Manager)
+
+BeginBlock(ctx sdk.Context) (sdk.BeginBlock, error) {
+ ctx = ctx.WithEventManager(sdk.NewEventManager())
+ for _, moduleName := range m.OrderBeginBlockers {
+ if module, ok := m.Modules[moduleName].(appmodule.HasBeginBlocker); ok {
+ if err := module.BeginBlock(ctx); err != nil {
+ return sdk.BeginBlock{
+}, err
+}
+
+}
+
+}
+
+return sdk.BeginBlock{
+ Events: ctx.EventManager().ABCIEvents(),
+}, nil
+}
+
+// EndBlock performs end block functionality for all modules. It creates a
+// child context with an event manager to aggregate events emitted from all
+// modules.
+func (m *Manager)
+
+EndBlock(ctx sdk.Context) (sdk.EndBlock, error) {
+ ctx = ctx.WithEventManager(sdk.NewEventManager())
+ validatorUpdates := []abci.ValidatorUpdate{
+}
+ for _, moduleName := range m.OrderEndBlockers {
+ if module, ok := m.Modules[moduleName].(appmodule.HasEndBlocker); ok {
+ err := module.EndBlock(ctx)
+ if err != nil {
+ return sdk.EndBlock{
+}, err
+}
+
+}
+
+else if module, ok := m.Modules[moduleName].(HasABCIEndBlock); ok {
+ moduleValUpdates, err := module.EndBlock(ctx)
+ if err != nil {
+ return sdk.EndBlock{
+}, err
+}
+ // use these validator updates if provided, the module manager assumes
+ // only one module will update the validator set
+ if len(moduleValUpdates) > 0 {
+ if len(validatorUpdates) > 0 {
+ return sdk.EndBlock{
+}, errors.New("validator EndBlock updates already set by a previous module")
+}
+ for _, updates := range moduleValUpdates {
+ validatorUpdates = append(validatorUpdates, abci.ValidatorUpdate{
+ PubKey: updates.PubKey,
+ Power: updates.Power
+})
+}
+
+}
+
+}
+
+else {
+ continue
+}
+
+}
+
+return sdk.EndBlock{
+ ValidatorUpdates: validatorUpdates,
+ Events: ctx.EventManager().ABCIEvents(),
+}, nil
+}
+
+// Precommit performs precommit functionality for all modules.
+func (m *Manager)
+
+Precommit(ctx sdk.Context)
+
+error {
+ for _, moduleName := range m.OrderPrecommiters {
+ module, ok := m.Modules[moduleName].(appmodule.HasPrecommit)
+ if !ok {
+ continue
+}
+ if err := module.Precommit(ctx); err != nil {
+ return err
+}
+
+}
+
+return nil
+}
+
+// PrepareCheckState performs functionality for preparing the check state for all modules.
+func (m *Manager)
+
+PrepareCheckState(ctx sdk.Context)
+
+error {
+ for _, moduleName := range m.OrderPrepareCheckStaters {
+ module, ok := m.Modules[moduleName].(appmodule.HasPrepareCheckState)
+ if !ok {
+ continue
+}
+ if err := module.PrepareCheckState(ctx); err != nil {
+ return err
+}
+
+}
+
+return nil
+}
+
+// GetVersionMap gets consensus version from all modules
+func (m *Manager)
+
+GetVersionMap()
+
+VersionMap {
+ vermap := make(VersionMap)
+ for name, v := range m.Modules {
+ version := uint64(0)
+ if v, ok := v.(HasConsensusVersion); ok {
+ version = v.ConsensusVersion()
+}
+
+vermap[name] = version
+}
+
+return vermap
+}
+
+// ModuleNames returns list of all module names, without any particular order.
+func (m *Manager)
+
+ModuleNames() []string {
+ return slices.Collect(maps.Keys(m.Modules))
+}
+
+// DefaultMigrationsOrder returns a default migrations order: ascending alphabetical by module name,
+// except x/auth which will run last, see:
+// https://github.com/cosmos/cosmos-sdk/issues/10591
+func DefaultMigrationsOrder(modules []string) []string {
+ const authName = "auth"
+ out := make([]string, 0, len(modules))
+ hasAuth := false
+ for _, m := range modules {
+ if m == authName {
+ hasAuth = true
+}
+
+else {
+ out = append(out, m)
+}
+
+}
+
+sort.Strings(out)
+ if hasAuth {
+ out = append(out, authName)
+}
+
+return out
+}
+```
+
+### `HasInvariants`
+
+This interface defines one method. It allows to checks if a module can register invariants.
+
+```go expandable
+/*
+Package module contains application module patterns and associated "manager" functionality.
+The module pattern has been broken down by:
+ - independent module functionality (AppModuleBasic)
+ - inter-dependent module simulation functionality (AppModuleSimulation)
+ - inter-dependent module full functionality (AppModule)
+
+inter-dependent module functionality is module functionality which somehow
+depends on other modules, typically through the module keeper. Many of the
+module keepers are dependent on each other, thus in order to access the full
+set of module functionality we need to define all the keepers/params-store/keys
+etc. This full set of advanced functionality is defined by the AppModule interface.
+
+Independent module functions are separated to allow for the construction of the
+basic application structures required early on in the application definition
+and used to enable the definition of full module functionality later in the
+process. This separation is necessary, however we still want to allow for a
+high level pattern for modules to follow - for instance, such that we don't
+have to manually register all of the codecs for all the modules. This basic
+procedure as well as other basic patterns are handled through the use of
+BasicManager.
+
+Lastly the interface for genesis functionality (HasGenesis & HasABCIGenesis)
+
+has been
+separated out from full module functionality (AppModule)
+
+so that modules which
+are only used for genesis can take advantage of the Module patterns without
+needlessly defining many placeholder functions
+*/
+package module
+
+import (
+
+ "context"
+ "encoding/json"
+ "errors"
+ "fmt"
+ "maps"
+ "slices"
+ "sort"
+
+ abci "github.com/cometbft/cometbft/abci/types"
+ "github.com/grpc-ecosystem/grpc-gateway/runtime"
+ "github.com/spf13/cobra"
+ "cosmossdk.io/core/appmodule"
+ "cosmossdk.io/core/genesis"
+ errorsmod "cosmossdk.io/errors"
+ storetypes "cosmossdk.io/store/types"
+ "github.com/cosmos/cosmos-sdk/client"
+ "github.com/cosmos/cosmos-sdk/codec"
+ "github.com/cosmos/cosmos-sdk/codec/types"
+ sdk "github.com/cosmos/cosmos-sdk/types"
+ sdkerrors "github.com/cosmos/cosmos-sdk/types/errors"
+)
+
+// AppModuleBasic is the standard form for basic non-dependant elements of an application module.
+type AppModuleBasic interface {
+ HasName
+ RegisterLegacyAminoCodec(*codec.LegacyAmino)
+
+RegisterInterfaces(types.InterfaceRegistry)
+
+RegisterGRPCGatewayRoutes(client.Context, *runtime.ServeMux)
+}
+
+// HasName allows the module to provide its own name for legacy purposes.
+// Newer apps should specify the name for their modules using a map
+// using NewManagerFromMap.
+type HasName interface {
+ Name()
+
+string
+}
+
+// HasGenesisBasics is the legacy interface for stateless genesis methods.
+type HasGenesisBasics interface {
+ DefaultGenesis(codec.JSONCodec)
+
+json.RawMessage
+ ValidateGenesis(codec.JSONCodec, client.TxEncodingConfig, json.RawMessage)
+
+error
+}
+
+// BasicManager is a collection of AppModuleBasic
+type BasicManager map[string]AppModuleBasic
+
+// NewBasicManager creates a new BasicManager object
+func NewBasicManager(modules ...AppModuleBasic)
+
+BasicManager {
+ moduleMap := make(map[string]AppModuleBasic)
+ for _, module := range modules {
+ moduleMap[module.Name()] = module
+}
+
+return moduleMap
+}
+
+// NewBasicManagerFromManager creates a new BasicManager from a Manager
+// The BasicManager will contain all AppModuleBasic from the AppModule Manager
+// Module's AppModuleBasic can be overridden by passing a custom AppModuleBasic map
+func NewBasicManagerFromManager(manager *Manager, customModuleBasics map[string]AppModuleBasic)
+
+BasicManager {
+ moduleMap := make(map[string]AppModuleBasic)
+ for name, module := range manager.Modules {
+ if customBasicMod, ok := customModuleBasics[name]; ok {
+ moduleMap[name] = customBasicMod
+ continue
+}
+ if appModule, ok := module.(appmodule.AppModule); ok {
+ moduleMap[name] = CoreAppModuleBasicAdaptor(name, appModule)
+
+continue
+}
+ if basicMod, ok := module.(AppModuleBasic); ok {
+ moduleMap[name] = basicMod
+}
+
+}
+
+return moduleMap
+}
+
+// RegisterLegacyAminoCodec registers all module codecs
+func (bm BasicManager)
+
+RegisterLegacyAminoCodec(cdc *codec.LegacyAmino) {
+ for _, b := range bm {
+ b.RegisterLegacyAminoCodec(cdc)
+}
+}
+
+// RegisterInterfaces registers all module interface types
+func (bm BasicManager)
+
+RegisterInterfaces(registry types.InterfaceRegistry) {
+ for _, m := range bm {
+ m.RegisterInterfaces(registry)
+}
+}
+
+// DefaultGenesis provides default genesis information for all modules
+func (bm BasicManager)
+
+DefaultGenesis(cdc codec.JSONCodec)
+
+map[string]json.RawMessage {
+ genesisData := make(map[string]json.RawMessage)
+ for _, b := range bm {
+ if mod, ok := b.(HasGenesisBasics); ok {
+ genesisData[b.Name()] = mod.DefaultGenesis(cdc)
+}
+
+}
+
+return genesisData
+}
+
+// ValidateGenesis performs genesis state validation for all modules
+func (bm BasicManager)
+
+ValidateGenesis(cdc codec.JSONCodec, txEncCfg client.TxEncodingConfig, genesisData map[string]json.RawMessage)
+
+error {
+ for _, b := range bm {
+ // first check if the module is an adapted Core API Module
+ if mod, ok := b.(HasGenesisBasics); ok {
+ if err := mod.ValidateGenesis(cdc, txEncCfg, genesisData[b.Name()]); err != nil {
+ return err
+}
+
+}
+
+}
+
+return nil
+}
+
+// RegisterGRPCGatewayRoutes registers all module rest routes
+func (bm BasicManager)
+
+RegisterGRPCGatewayRoutes(clientCtx client.Context, rtr *runtime.ServeMux) {
+ for _, b := range bm {
+ b.RegisterGRPCGatewayRoutes(clientCtx, rtr)
+}
+}
+
+// AddTxCommands adds all tx commands to the rootTxCmd.
+func (bm BasicManager)
+
+AddTxCommands(rootTxCmd *cobra.Command) {
+ for _, b := range bm {
+ if mod, ok := b.(interface {
+ GetTxCmd() *cobra.Command
+}); ok {
+ if cmd := mod.GetTxCmd(); cmd != nil {
+ rootTxCmd.AddCommand(cmd)
+}
+
+}
+
+}
+}
+
+// AddQueryCommands adds all query commands to the rootQueryCmd.
+func (bm BasicManager)
+
+AddQueryCommands(rootQueryCmd *cobra.Command) {
+ for _, b := range bm {
+ if mod, ok := b.(interface {
+ GetQueryCmd() *cobra.Command
+}); ok {
+ if cmd := mod.GetQueryCmd(); cmd != nil {
+ rootQueryCmd.AddCommand(cmd)
+}
+
+}
+
+}
+}
+
+// HasGenesis is the extension interface for stateful genesis methods.
+type HasGenesis interface {
+ HasGenesisBasics
+ InitGenesis(sdk.Context, codec.JSONCodec, json.RawMessage)
+
+ExportGenesis(sdk.Context, codec.JSONCodec)
+
+json.RawMessage
+}
+
+// HasABCIGenesis is the extension interface for stateful genesis methods which returns validator updates.
+type HasABCIGenesis interface {
+ HasGenesisBasics
+ InitGenesis(sdk.Context, codec.JSONCodec, json.RawMessage) []abci.ValidatorUpdate
+ ExportGenesis(sdk.Context, codec.JSONCodec)
+
+json.RawMessage
+}
+
+// AppModule is the form for an application module. Most of
+// its functionality has been moved to extension interfaces.
+// Deprecated: use appmodule.AppModule with a combination of extension interfaes interfaces instead.
+type AppModule interface {
+ appmodule.AppModule
+
+ AppModuleBasic
+}
+
+// HasInvariants is the interface for registering invariants.
+//
+// Deprecated: this will be removed in the next Cosmos SDK release.
+type HasInvariants interface {
+ // RegisterInvariants registers module invariants.
+ RegisterInvariants(sdk.InvariantRegistry)
+}
+
+// HasServices is the interface for modules to register services.
+type HasServices interface {
+ // RegisterServices allows a module to register services.
+ RegisterServices(Configurator)
+}
+
+// HasConsensusVersion is the interface for declaring a module consensus version.
+type HasConsensusVersion interface {
+ // ConsensusVersion is a sequence number for state-breaking change of the
+ // module. It should be incremented on each consensus-breaking change
+ // introduced by the module. To avoid wrong/empty versions, the initial version
+ // should be set to 1.
+ ConsensusVersion()
+
+uint64
+}
+
+// HasABCIEndblock is a released typo of HasABCIEndBlock.
+// Deprecated: use HasABCIEndBlock instead.
+type HasABCIEndblock HasABCIEndBlock
+
+// HasABCIEndBlock is the interface for modules that need to run code at the end of the block.
+type HasABCIEndBlock interface {
+ AppModule
+ EndBlock(context.Context) ([]abci.ValidatorUpdate, error)
+}
+
+var (
+ _ appmodule.AppModule = (*GenesisOnlyAppModule)(nil)
+ _ AppModuleBasic = (*GenesisOnlyAppModule)(nil)
+)
+
+// AppModuleGenesis is the standard form for an application module genesis functions
+type AppModuleGenesis interface {
+ AppModuleBasic
+ HasABCIGenesis
+}
+
+// GenesisOnlyAppModule is an AppModule that only has import/export functionality
+type GenesisOnlyAppModule struct {
+ AppModuleGenesis
+}
+
+// NewGenesisOnlyAppModule creates a new GenesisOnlyAppModule object
+func NewGenesisOnlyAppModule(amg AppModuleGenesis)
+
+GenesisOnlyAppModule {
+ return GenesisOnlyAppModule{
+ AppModuleGenesis: amg,
+}
+}
+
+// IsOnePerModuleType implements the depinject.OnePerModuleType interface.
+func (GenesisOnlyAppModule)
+
+IsOnePerModuleType() {
+}
+
+// IsAppModule implements the appmodule.AppModule interface.
+func (GenesisOnlyAppModule)
+
+IsAppModule() {
+}
+
+// RegisterInvariants is a placeholder function register no invariants
+func (GenesisOnlyAppModule)
+
+RegisterInvariants(_ sdk.InvariantRegistry) {
+}
+
+// ConsensusVersion implements AppModule/ConsensusVersion.
+func (gam GenesisOnlyAppModule)
+
+ConsensusVersion()
+
+uint64 {
+ return 1
+}
+
+// Manager defines a module manager that provides the high level utility for managing and executing
+// operations for a group of modules
+type Manager struct {
+ Modules map[string]any // interface{
+}
+
+is used now to support the legacy AppModule as well as new core appmodule.AppModule.
+ OrderInitGenesis []string
+ OrderExportGenesis []string
+ OrderPreBlockers []string
+ OrderBeginBlockers []string
+ OrderEndBlockers []string
+ OrderPrepareCheckStaters []string
+ OrderPrecommiters []string
+ OrderMigrations []string
+}
+
+// NewManager creates a new Manager object.
+func NewManager(modules ...AppModule) *Manager {
+ moduleMap := make(map[string]any)
+ modulesStr := make([]string, 0, len(modules))
+ preBlockModulesStr := make([]string, 0)
+ for _, module := range modules {
+ if _, ok := module.(appmodule.AppModule); !ok {
+ panic(fmt.Sprintf("module %s does not implement appmodule.AppModule", module.Name()))
+}
+
+moduleMap[module.Name()] = module
+ modulesStr = append(modulesStr, module.Name())
+ if _, ok := module.(appmodule.HasPreBlocker); ok {
+ preBlockModulesStr = append(preBlockModulesStr, module.Name())
+}
+
+}
+
+return &Manager{
+ Modules: moduleMap,
+ OrderInitGenesis: modulesStr,
+ OrderExportGenesis: modulesStr,
+ OrderPreBlockers: preBlockModulesStr,
+ OrderBeginBlockers: modulesStr,
+ OrderPrepareCheckStaters: modulesStr,
+ OrderPrecommiters: modulesStr,
+ OrderEndBlockers: modulesStr,
+}
+}
+
+// NewManagerFromMap creates a new Manager object from a map of module names to module implementations.
+// This method should be used for apps and modules which have migrated to the cosmossdk.io/core.appmodule.AppModule API.
+func NewManagerFromMap(moduleMap map[string]appmodule.AppModule) *Manager {
+ simpleModuleMap := make(map[string]any)
+ modulesStr := make([]string, 0, len(simpleModuleMap))
+ preBlockModulesStr := make([]string, 0)
+ for name, module := range moduleMap {
+ simpleModuleMap[name] = module
+ modulesStr = append(modulesStr, name)
+ if _, ok := module.(appmodule.HasPreBlocker); ok {
+ preBlockModulesStr = append(preBlockModulesStr, name)
+}
+
+}
+
+ // Sort the modules by name. Given that we are using a map above we can't guarantee the order.
+ sort.Strings(modulesStr)
+
+return &Manager{
+ Modules: simpleModuleMap,
+ OrderInitGenesis: modulesStr,
+ OrderExportGenesis: modulesStr,
+ OrderPreBlockers: preBlockModulesStr,
+ OrderBeginBlockers: modulesStr,
+ OrderEndBlockers: modulesStr,
+ OrderPrecommiters: modulesStr,
+ OrderPrepareCheckStaters: modulesStr,
+}
+}
+
+// SetOrderInitGenesis sets the order of init genesis calls
+func (m *Manager)
+
+SetOrderInitGenesis(moduleNames ...string) {
+ m.assertNoForgottenModules("SetOrderInitGenesis", moduleNames, func(moduleName string)
+
+bool {
+ module := m.Modules[moduleName]
+ if _, hasGenesis := module.(appmodule.HasGenesis); hasGenesis {
+ return !hasGenesis
+}
+ if _, hasABCIGenesis := module.(HasABCIGenesis); hasABCIGenesis {
+ return !hasABCIGenesis
+}
+
+ _, hasGenesis := module.(HasGenesis)
+
+return !hasGenesis
+})
+
+m.OrderInitGenesis = moduleNames
+}
+
+// SetOrderExportGenesis sets the order of export genesis calls
+func (m *Manager)
+
+SetOrderExportGenesis(moduleNames ...string) {
+ m.assertNoForgottenModules("SetOrderExportGenesis", moduleNames, func(moduleName string)
+
+bool {
+ module := m.Modules[moduleName]
+ if _, hasGenesis := module.(appmodule.HasGenesis); hasGenesis {
+ return !hasGenesis
+}
+ if _, hasABCIGenesis := module.(HasABCIGenesis); hasABCIGenesis {
+ return !hasABCIGenesis
+}
+
+ _, hasGenesis := module.(HasGenesis)
+
+return !hasGenesis
+})
+
+m.OrderExportGenesis = moduleNames
+}
+
+// SetOrderPreBlockers sets the order of set pre-blocker calls
+func (m *Manager)
+
+SetOrderPreBlockers(moduleNames ...string) {
+ m.assertNoForgottenModules("SetOrderPreBlockers", moduleNames,
+ func(moduleName string)
+
+bool {
+ module := m.Modules[moduleName]
+ _, hasBlock := module.(appmodule.HasPreBlocker)
+
+return !hasBlock
+})
+
+m.OrderPreBlockers = moduleNames
+}
+
+// SetOrderBeginBlockers sets the order of set begin-blocker calls
+func (m *Manager)
+
+SetOrderBeginBlockers(moduleNames ...string) {
+ m.assertNoForgottenModules("SetOrderBeginBlockers", moduleNames,
+ func(moduleName string)
+
+bool {
+ module := m.Modules[moduleName]
+ _, hasBeginBlock := module.(appmodule.HasBeginBlocker)
+
+return !hasBeginBlock
+})
+
+m.OrderBeginBlockers = moduleNames
+}
+
+// SetOrderEndBlockers sets the order of set end-blocker calls
+func (m *Manager)
+
+SetOrderEndBlockers(moduleNames ...string) {
+ m.assertNoForgottenModules("SetOrderEndBlockers", moduleNames,
+ func(moduleName string)
+
+bool {
+ module := m.Modules[moduleName]
+ if _, hasEndBlock := module.(appmodule.HasEndBlocker); hasEndBlock {
+ return !hasEndBlock
+}
+
+ _, hasABCIEndBlock := module.(HasABCIEndBlock)
+
+return !hasABCIEndBlock
+})
+
+m.OrderEndBlockers = moduleNames
+}
+
+// SetOrderPrepareCheckStaters sets the order of set prepare-check-stater calls
+func (m *Manager)
+
+SetOrderPrepareCheckStaters(moduleNames ...string) {
+ m.assertNoForgottenModules("SetOrderPrepareCheckStaters", moduleNames,
+ func(moduleName string)
+
+bool {
+ module := m.Modules[moduleName]
+ _, hasPrepareCheckState := module.(appmodule.HasPrepareCheckState)
+
+return !hasPrepareCheckState
+})
+
+m.OrderPrepareCheckStaters = moduleNames
+}
+
+// SetOrderPrecommiters sets the order of set precommiter calls
+func (m *Manager)
+
+SetOrderPrecommiters(moduleNames ...string) {
+ m.assertNoForgottenModules("SetOrderPrecommiters", moduleNames,
+ func(moduleName string)
+
+bool {
+ module := m.Modules[moduleName]
+ _, hasPrecommit := module.(appmodule.HasPrecommit)
+
+return !hasPrecommit
+})
+
+m.OrderPrecommiters = moduleNames
+}
+
+// SetOrderMigrations sets the order of migrations to be run. If not set
+// then migrations will be run with an order defined in `DefaultMigrationsOrder`.
+func (m *Manager)
+
+SetOrderMigrations(moduleNames ...string) {
+ m.assertNoForgottenModules("SetOrderMigrations", moduleNames, nil)
+
+m.OrderMigrations = moduleNames
+}
+
+// RegisterInvariants registers all module invariants
+//
+// Deprecated: this function is a no-op and will be removed in the next release of the Cosmos SDK.
+func (m *Manager)
+
+RegisterInvariants(_ sdk.InvariantRegistry) {
+}
+
+// RegisterServices registers all module services
+func (m *Manager)
+
+RegisterServices(cfg Configurator)
+
+error {
+ for _, module := range m.Modules {
+ if module, ok := module.(HasServices); ok {
+ module.RegisterServices(cfg)
+}
+ if module, ok := module.(appmodule.HasServices); ok {
+ err := module.RegisterServices(cfg)
+ if err != nil {
+ return err
+}
+
+}
+ if cfg.Error() != nil {
+ return cfg.Error()
+}
+
+}
+
+return nil
+}
+
+// InitGenesis performs init genesis functionality for modules. Exactly one
+// module must return a non-empty validator set update to correctly initialize
+// the chain.
+func (m *Manager)
+
+InitGenesis(ctx sdk.Context, cdc codec.JSONCodec, genesisData map[string]json.RawMessage) (*abci.ResponseInitChain, error) {
+ var validatorUpdates []abci.ValidatorUpdate
+ ctx.Logger().Info("initializing blockchain state from genesis.json")
+ for _, moduleName := range m.OrderInitGenesis {
+ if genesisData[moduleName] == nil {
+ continue
+}
+ mod := m.Modules[moduleName]
+ // we might get an adapted module, a native core API module or a legacy module
+ if module, ok := mod.(appmodule.HasGenesis); ok {
+ ctx.Logger().Debug("running initialization for module", "module", moduleName)
+ // core API genesis
+ source, err := genesis.SourceFromRawJSON(genesisData[moduleName])
+ if err != nil {
+ return &abci.ResponseInitChain{
+}, err
+}
+
+err = module.InitGenesis(ctx, source)
+ if err != nil {
+ return &abci.ResponseInitChain{
+}, err
+}
+
+}
+
+else if module, ok := mod.(HasGenesis); ok {
+ ctx.Logger().Debug("running initialization for module", "module", moduleName)
+
+module.InitGenesis(ctx, cdc, genesisData[moduleName])
+}
+
+else if module, ok := mod.(HasABCIGenesis); ok {
+ ctx.Logger().Debug("running initialization for module", "module", moduleName)
+ moduleValUpdates := module.InitGenesis(ctx, cdc, genesisData[moduleName])
+
+ // use these validator updates if provided, the module manager assumes
+ // only one module will update the validator set
+ if len(moduleValUpdates) > 0 {
+ if len(validatorUpdates) > 0 {
+ return &abci.ResponseInitChain{
+}, errors.New("validator InitGenesis updates already set by a previous module")
+}
+
+validatorUpdates = moduleValUpdates
+}
+
+}
+
+}
+
+ // a chain must initialize with a non-empty validator set
+ if len(validatorUpdates) == 0 {
+ return &abci.ResponseInitChain{
+}, fmt.Errorf("validator set is empty after InitGenesis, please ensure at least one validator is initialized with a delegation greater than or equal to the DefaultPowerReduction (%d)", sdk.DefaultPowerReduction)
+}
+
+return &abci.ResponseInitChain{
+ Validators: validatorUpdates,
+}, nil
+}
+
+// ExportGenesis performs export genesis functionality for modules
+func (m *Manager)
+
+ExportGenesis(ctx sdk.Context, cdc codec.JSONCodec) (map[string]json.RawMessage, error) {
+ return m.ExportGenesisForModules(ctx, cdc, []string{
+})
+}
+
+// ExportGenesisForModules performs export genesis functionality for modules
+func (m *Manager)
+
+ExportGenesisForModules(ctx sdk.Context, cdc codec.JSONCodec, modulesToExport []string) (map[string]json.RawMessage, error) {
+ if len(modulesToExport) == 0 {
+ modulesToExport = m.OrderExportGenesis
+}
+ // verify modules exists in app, so that we don't panic in the middle of an export
+ if err := m.checkModulesExists(modulesToExport); err != nil {
+ return nil, err
+}
+
+type genesisResult struct {
+ bz json.RawMessage
+ err error
+}
+ channels := make(map[string]chan genesisResult)
+ for _, moduleName := range modulesToExport {
+ mod := m.Modules[moduleName]
+ if module, ok := mod.(appmodule.HasGenesis); ok {
+ // core API genesis
+ channels[moduleName] = make(chan genesisResult)
+
+go func(module appmodule.HasGenesis, ch chan genesisResult) {
+ ctx := ctx.WithGasMeter(storetypes.NewInfiniteGasMeter()) // avoid race conditions
+ target := genesis.RawJSONTarget{
+}
+ err := module.ExportGenesis(ctx, target.Target())
+ if err != nil {
+ ch <- genesisResult{
+ nil, err
+}
+
+return
+}
+
+rawJSON, err := target.JSON()
+ if err != nil {
+ ch <- genesisResult{
+ nil, err
+}
+
+return
+}
+
+ch <- genesisResult{
+ rawJSON, nil
+}
+
+}(module, channels[moduleName])
+}
+
+else if module, ok := mod.(HasGenesis); ok {
+ channels[moduleName] = make(chan genesisResult)
+
+go func(module HasGenesis, ch chan genesisResult) {
+ ctx := ctx.WithGasMeter(storetypes.NewInfiniteGasMeter()) // avoid race conditions
+ ch <- genesisResult{
+ module.ExportGenesis(ctx, cdc), nil
+}
+
+}(module, channels[moduleName])
+}
+
+else if module, ok := mod.(HasABCIGenesis); ok {
+ channels[moduleName] = make(chan genesisResult)
+
+go func(module HasABCIGenesis, ch chan genesisResult) {
+ ctx := ctx.WithGasMeter(storetypes.NewInfiniteGasMeter()) // avoid race conditions
+ ch <- genesisResult{
+ module.ExportGenesis(ctx, cdc), nil
+}
+
+}(module, channels[moduleName])
+}
+
+}
+ genesisData := make(map[string]json.RawMessage)
+ for moduleName := range channels {
+ res := <-channels[moduleName]
+ if res.err != nil {
+ return nil, fmt.Errorf("genesis export error in %s: %w", moduleName, res.err)
+}
+
+genesisData[moduleName] = res.bz
+}
+
+return genesisData, nil
+}
+
+// checkModulesExists verifies that all modules in the list exist in the app
+func (m *Manager)
+
+checkModulesExists(moduleName []string)
+
+error {
+ for _, name := range moduleName {
+ if _, ok := m.Modules[name]; !ok {
+ return fmt.Errorf("module %s does not exist", name)
+}
+
+}
+
+return nil
+}
+
+// assertNoForgottenModules checks that we didn't forget any modules in the SetOrder* functions.
+// `pass` is a closure which allows one to omit modules from `moduleNames`.
+// If you provide non-nil `pass` and it returns true, the module would not be subject of the assertion.
+func (m *Manager)
+
+assertNoForgottenModules(setOrderFnName string, moduleNames []string, pass func(moduleName string)
+
+bool) {
+ ms := make(map[string]bool)
+ for _, m := range moduleNames {
+ ms[m] = true
+}
+
+var missing []string
+ for m := range m.Modules {
+ if pass != nil && pass(m) {
+ continue
+}
+ if !ms[m] {
+ missing = append(missing, m)
+}
+
+}
+ if len(missing) != 0 {
+ sort.Strings(missing)
+
+panic(fmt.Sprintf(
+ "all modules must be defined when setting %s, missing: %v", setOrderFnName, missing))
+}
+}
+
+// MigrationHandler is the migration function that each module registers.
+type MigrationHandler func(sdk.Context)
+
+error
+
+// VersionMap is a map of moduleName -> version
+type VersionMap map[string]uint64
+
+// RunMigrations performs in-place store migrations for all modules. This
+// function MUST be called insde an x/upgrade UpgradeHandler.
+//
+// Recall that in an upgrade handler, the `fromVM` VersionMap is retrieved from
+// x/upgrade's store, and the function needs to return the target VersionMap
+// that will in turn be persisted to the x/upgrade's store. In general,
+// returning RunMigrations should be enough:
+//
+// Example:
+//
+// cfg := module.NewConfigurator(...)
+// app.UpgradeKeeper.SetUpgradeHandler("my-plan", func(ctx context.Context, plan upgradetypes.Plan, fromVM module.VersionMap) (module.VersionMap, error) {
+// return app.mm.RunMigrations(ctx, cfg, fromVM)
+//
+})
+//
+// Internally, RunMigrations will perform the following steps:
+// - create an `updatedVM` VersionMap of module with their latest ConsensusVersion
+// - make a diff of `fromVM` and `udpatedVM`, and for each module:
+// - if the module's `fromVM` version is less than its `updatedVM` version,
+// then run in-place store migrations for that module between those versions.
+// - if the module does not exist in the `fromVM` (which means that it's a new module,
+// because it was not in the previous x/upgrade's store), then run
+// `InitGenesis` on that module.
+//
+// - return the `updatedVM` to be persisted in the x/upgrade's store.
+//
+// Migrations are run in an order defined by `Manager.OrderMigrations` or (if not set)
+
+defined by
+// `DefaultMigrationsOrder` function.
+//
+// As an app developer, if you wish to skip running InitGenesis for your new
+// module "foo", you need to manually pass a `fromVM` argument to this function
+// foo's module version set to its latest ConsensusVersion. That way, the diff
+// between the function's `fromVM` and `udpatedVM` will be empty, hence not
+// running anything for foo.
+//
+// Example:
+//
+// cfg := module.NewConfigurator(...)
+// app.UpgradeKeeper.SetUpgradeHandler("my-plan", func(ctx context.Context, plan upgradetypes.Plan, fromVM module.VersionMap) (module.VersionMap, error) {
+// // Assume "foo" is a new module.
+// // `fromVM` is fetched from existing x/upgrade store. Since foo didn't exist
+// // before this upgrade, `v, exists := fromVM["foo"]; exists == false`, and RunMigration will by default
+// // run InitGenesis on foo.
+// // To skip running foo's InitGenesis, you need set `fromVM`'s foo to its latest
+// // consensus version:
+// fromVM["foo"] = foo.AppModule{
+}.ConsensusVersion()
+//
+// return app.mm.RunMigrations(ctx, cfg, fromVM)
+//
+})
+//
+// Please also refer to /sdk/v0.53/build/migrations/upgrading for more information.
+func (m Manager)
+
+RunMigrations(ctx context.Context, cfg Configurator, fromVM VersionMap) (VersionMap, error) {
+ c, ok := cfg.(*configurator)
+ if !ok {
+ return nil, errorsmod.Wrapf(sdkerrors.ErrInvalidType, "expected %T, got %T", &configurator{
+}, cfg)
+}
+ modules := m.OrderMigrations
+ if modules == nil {
+ modules = DefaultMigrationsOrder(m.ModuleNames())
+}
+ sdkCtx := sdk.UnwrapSDKContext(ctx)
+ updatedVM := VersionMap{
+}
+ for _, moduleName := range modules {
+ module := m.Modules[moduleName]
+ fromVersion, exists := fromVM[moduleName]
+ toVersion := uint64(0)
+ if module, ok := module.(HasConsensusVersion); ok {
+ toVersion = module.ConsensusVersion()
+}
+
+ // We run migration if the module is specified in `fromVM`.
+ // Otherwise we run InitGenesis.
+ //
+ // The module won't exist in the fromVM in two cases:
+ // 1. A new module is added. In this case we run InitGenesis with an
+ // empty genesis state.
+ // 2. An existing chain is upgrading from version < 0.43 to v0.43+ for the first time.
+ // In this case, all modules have yet to be added to x/upgrade's VersionMap store.
+ if exists {
+ err := c.runModuleMigrations(sdkCtx, moduleName, fromVersion, toVersion)
+ if err != nil {
+ return nil, err
+}
+
+}
+
+else {
+ sdkCtx.Logger().Info(fmt.Sprintf("adding a new module: %s", moduleName))
+ if module, ok := m.Modules[moduleName].(HasGenesis); ok {
+ module.InitGenesis(sdkCtx, c.cdc, module.DefaultGenesis(c.cdc))
+}
+ if module, ok := m.Modules[moduleName].(HasABCIGenesis); ok {
+ moduleValUpdates := module.InitGenesis(sdkCtx, c.cdc, module.DefaultGenesis(c.cdc))
+ // The module manager assumes only one module will update the
+ // validator set, and it can't be a new module.
+ if len(moduleValUpdates) > 0 {
+ return nil, errorsmod.Wrapf(sdkerrors.ErrLogic, "validator InitGenesis update is already set by another module")
+}
+
+}
+
+}
+
+updatedVM[moduleName] = toVersion
+}
+
+return updatedVM, nil
+}
+
+// PreBlock performs begin block functionality for upgrade module.
+// It takes the current context as a parameter and returns a boolean value
+// indicating whether the migration was successfully executed or not.
+func (m *Manager)
+
+PreBlock(ctx sdk.Context) (*sdk.ResponsePreBlock, error) {
+ paramsChanged := false
+ for _, moduleName := range m.OrderPreBlockers {
+ if module, ok := m.Modules[moduleName].(appmodule.HasPreBlocker); ok {
+ rsp, err := module.PreBlock(ctx)
+ if err != nil {
+ return nil, err
+}
+ if rsp.IsConsensusParamsChanged() {
+ paramsChanged = true
+}
+
+}
+
+}
+
+return &sdk.ResponsePreBlock{
+ ConsensusParamsChanged: paramsChanged,
+}, nil
+}
+
+// BeginBlock performs begin block functionality for all modules. It creates a
+// child context with an event manager to aggregate events emitted from all
+// modules.
+func (m *Manager)
+
+BeginBlock(ctx sdk.Context) (sdk.BeginBlock, error) {
+ ctx = ctx.WithEventManager(sdk.NewEventManager())
+ for _, moduleName := range m.OrderBeginBlockers {
+ if module, ok := m.Modules[moduleName].(appmodule.HasBeginBlocker); ok {
+ if err := module.BeginBlock(ctx); err != nil {
+ return sdk.BeginBlock{
+}, err
+}
+
+}
+
+}
+
+return sdk.BeginBlock{
+ Events: ctx.EventManager().ABCIEvents(),
+}, nil
+}
+
+// EndBlock performs end block functionality for all modules. It creates a
+// child context with an event manager to aggregate events emitted from all
+// modules.
+func (m *Manager)
+
+EndBlock(ctx sdk.Context) (sdk.EndBlock, error) {
+ ctx = ctx.WithEventManager(sdk.NewEventManager())
+ validatorUpdates := []abci.ValidatorUpdate{
+}
+ for _, moduleName := range m.OrderEndBlockers {
+ if module, ok := m.Modules[moduleName].(appmodule.HasEndBlocker); ok {
+ err := module.EndBlock(ctx)
+ if err != nil {
+ return sdk.EndBlock{
+}, err
+}
+
+}
+
+else if module, ok := m.Modules[moduleName].(HasABCIEndBlock); ok {
+ moduleValUpdates, err := module.EndBlock(ctx)
+ if err != nil {
+ return sdk.EndBlock{
+}, err
+}
+ // use these validator updates if provided, the module manager assumes
+ // only one module will update the validator set
+ if len(moduleValUpdates) > 0 {
+ if len(validatorUpdates) > 0 {
+ return sdk.EndBlock{
+}, errors.New("validator EndBlock updates already set by a previous module")
+}
+ for _, updates := range moduleValUpdates {
+ validatorUpdates = append(validatorUpdates, abci.ValidatorUpdate{
+ PubKey: updates.PubKey,
+ Power: updates.Power
+})
+}
+
+}
+
+}
+
+else {
+ continue
+}
+
+}
+
+return sdk.EndBlock{
+ ValidatorUpdates: validatorUpdates,
+ Events: ctx.EventManager().ABCIEvents(),
+}, nil
+}
+
+// Precommit performs precommit functionality for all modules.
+func (m *Manager)
+
+Precommit(ctx sdk.Context)
+
+error {
+ for _, moduleName := range m.OrderPrecommiters {
+ module, ok := m.Modules[moduleName].(appmodule.HasPrecommit)
+ if !ok {
+ continue
+}
+ if err := module.Precommit(ctx); err != nil {
+ return err
+}
+
+}
+
+return nil
+}
+
+// PrepareCheckState performs functionality for preparing the check state for all modules.
+func (m *Manager)
+
+PrepareCheckState(ctx sdk.Context)
+
+error {
+ for _, moduleName := range m.OrderPrepareCheckStaters {
+ module, ok := m.Modules[moduleName].(appmodule.HasPrepareCheckState)
+ if !ok {
+ continue
+}
+ if err := module.PrepareCheckState(ctx); err != nil {
+ return err
+}
+
+}
+
+return nil
+}
+
+// GetVersionMap gets consensus version from all modules
+func (m *Manager)
+
+GetVersionMap()
+
+VersionMap {
+ vermap := make(VersionMap)
+ for name, v := range m.Modules {
+ version := uint64(0)
+ if v, ok := v.(HasConsensusVersion); ok {
+ version = v.ConsensusVersion()
+}
+
+vermap[name] = version
+}
+
+return vermap
+}
+
+// ModuleNames returns list of all module names, without any particular order.
+func (m *Manager)
+
+ModuleNames() []string {
+ return slices.Collect(maps.Keys(m.Modules))
+}
+
+// DefaultMigrationsOrder returns a default migrations order: ascending alphabetical by module name,
+// except x/auth which will run last, see:
+// https://github.com/cosmos/cosmos-sdk/issues/10591
+func DefaultMigrationsOrder(modules []string) []string {
+ const authName = "auth"
+ out := make([]string, 0, len(modules))
+ hasAuth := false
+ for _, m := range modules {
+ if m == authName {
+ hasAuth = true
+}
+
+else {
+ out = append(out, m)
+}
+
+}
+
+sort.Strings(out)
+ if hasAuth {
+ out = append(out, authName)
+}
+
+return out
+}
+```
+
+* `RegisterInvariants(sdk.InvariantRegistry)`: Registers the [`invariants`](/sdk/v0.53/build/building-modules/invariants) of the module. If an invariant deviates from its predicted value, the [`InvariantRegistry`](/sdk/v0.53/build/building-modules/invariants#invariant-registry) triggers appropriate logic (most often the chain will be halted).
+
+### `HasServices`
+
+This interface defines one method. It allows to checks if a module can register invariants.
+
+#### `appmodule.HasService`
+
+```go expandable
+package appmodule
+
+import (
+
+ "context"
+ "google.golang.org/grpc"
+ "cosmossdk.io/depinject"
+)
+
+// AppModule is a tag interface for app module implementations to use as a basis
+// for extension interfaces. It provides no functionality itself, but is the
+// type that all valid app modules should provide so that they can be identified
+// by other modules (usually via depinject)
+
+as app modules.
+type AppModule interface {
+ depinject.OnePerModuleType
+
+ // IsAppModule is a dummy method to tag a struct as implementing an AppModule.
+ IsAppModule()
+}
+
+// HasServices is the extension interface that modules should implement to register
+// implementations of services defined in .proto files.
+type HasServices interface {
+ AppModule
+
+ // RegisterServices registers the module's services with the app's service
+ // registrar.
+ //
+ // Two types of services are currently supported:
+ // - read-only gRPC query services, which are the default.
+ // - transaction message services, which must have the protobuf service
+ // option "cosmos.msg.v1.service" (defined in "cosmos/msg/v1/service.proto")
+ // set to true.
+ //
+ // The service registrar will figure out which type of service you are
+ // implementing based on the presence (or absence)
+
+of protobuf options. You
+ // do not need to specify this in golang code.
+ RegisterServices(grpc.ServiceRegistrar)
+
+error
+}
+
+// HasPrepareCheckState is an extension interface that contains information about the AppModule
+// and PrepareCheckState.
+type HasPrepareCheckState interface {
+ AppModule
+ PrepareCheckState(context.Context)
+
+error
+}
+
+// HasPrecommit is an extension interface that contains information about the AppModule and Precommit.
+type HasPrecommit interface {
+ AppModule
+ Precommit(context.Context)
+
+error
+}
+
+// HasBeginBlocker is the extension interface that modules should implement to run
+// custom logic before transaction processing in a block.
+type HasBeginBlocker interface {
+ AppModule
+
+ // BeginBlock is a method that will be run before transactions are processed in
+ // a block.
+ BeginBlock(context.Context)
+
+error
+}
+
+// HasEndBlocker is the extension interface that modules should implement to run
+// custom logic after transaction processing in a block.
+type HasEndBlocker interface {
+ AppModule
+
+ // EndBlock is a method that will be run after transactions are processed in
+ // a block.
+ EndBlock(context.Context)
+
+error
+}
+```
+
+#### `module.HasServices`
+
+```go expandable
+/*
+Package module contains application module patterns and associated "manager" functionality.
+The module pattern has been broken down by:
+ - independent module functionality (AppModuleBasic)
+ - inter-dependent module simulation functionality (AppModuleSimulation)
+ - inter-dependent module full functionality (AppModule)
+
+inter-dependent module functionality is module functionality which somehow
+depends on other modules, typically through the module keeper. Many of the
+module keepers are dependent on each other, thus in order to access the full
+set of module functionality we need to define all the keepers/params-store/keys
+etc. This full set of advanced functionality is defined by the AppModule interface.
+
+Independent module functions are separated to allow for the construction of the
+basic application structures required early on in the application definition
+and used to enable the definition of full module functionality later in the
+process. This separation is necessary, however we still want to allow for a
+high level pattern for modules to follow - for instance, such that we don't
+have to manually register all of the codecs for all the modules. This basic
+procedure as well as other basic patterns are handled through the use of
+BasicManager.
+
+Lastly the interface for genesis functionality (HasGenesis & HasABCIGenesis)
+
+has been
+separated out from full module functionality (AppModule)
+
+so that modules which
+are only used for genesis can take advantage of the Module patterns without
+needlessly defining many placeholder functions
+*/
+package module
+
+import (
+
+ "context"
+ "encoding/json"
+ "errors"
+ "fmt"
+ "maps"
+ "slices"
+ "sort"
+
+ abci "github.com/cometbft/cometbft/abci/types"
+ "github.com/grpc-ecosystem/grpc-gateway/runtime"
+ "github.com/spf13/cobra"
+ "cosmossdk.io/core/appmodule"
+ "cosmossdk.io/core/genesis"
+ errorsmod "cosmossdk.io/errors"
+ storetypes "cosmossdk.io/store/types"
+ "github.com/cosmos/cosmos-sdk/client"
+ "github.com/cosmos/cosmos-sdk/codec"
+ "github.com/cosmos/cosmos-sdk/codec/types"
+ sdk "github.com/cosmos/cosmos-sdk/types"
+ sdkerrors "github.com/cosmos/cosmos-sdk/types/errors"
+)
+
+// AppModuleBasic is the standard form for basic non-dependant elements of an application module.
+type AppModuleBasic interface {
+ HasName
+ RegisterLegacyAminoCodec(*codec.LegacyAmino)
+
+RegisterInterfaces(types.InterfaceRegistry)
+
+RegisterGRPCGatewayRoutes(client.Context, *runtime.ServeMux)
+}
+
+// HasName allows the module to provide its own name for legacy purposes.
+// Newer apps should specify the name for their modules using a map
+// using NewManagerFromMap.
+type HasName interface {
+ Name()
+
+string
+}
+
+// HasGenesisBasics is the legacy interface for stateless genesis methods.
+type HasGenesisBasics interface {
+ DefaultGenesis(codec.JSONCodec)
+
+json.RawMessage
+ ValidateGenesis(codec.JSONCodec, client.TxEncodingConfig, json.RawMessage)
+
+error
+}
+
+// BasicManager is a collection of AppModuleBasic
+type BasicManager map[string]AppModuleBasic
+
+// NewBasicManager creates a new BasicManager object
+func NewBasicManager(modules ...AppModuleBasic)
+
+BasicManager {
+ moduleMap := make(map[string]AppModuleBasic)
+ for _, module := range modules {
+ moduleMap[module.Name()] = module
+}
+
+return moduleMap
+}
+
+// NewBasicManagerFromManager creates a new BasicManager from a Manager
+// The BasicManager will contain all AppModuleBasic from the AppModule Manager
+// Module's AppModuleBasic can be overridden by passing a custom AppModuleBasic map
+func NewBasicManagerFromManager(manager *Manager, customModuleBasics map[string]AppModuleBasic)
+
+BasicManager {
+ moduleMap := make(map[string]AppModuleBasic)
+ for name, module := range manager.Modules {
+ if customBasicMod, ok := customModuleBasics[name]; ok {
+ moduleMap[name] = customBasicMod
+ continue
+}
+ if appModule, ok := module.(appmodule.AppModule); ok {
+ moduleMap[name] = CoreAppModuleBasicAdaptor(name, appModule)
+
+continue
+}
+ if basicMod, ok := module.(AppModuleBasic); ok {
+ moduleMap[name] = basicMod
+}
+
+}
+
+return moduleMap
+}
+
+// RegisterLegacyAminoCodec registers all module codecs
+func (bm BasicManager)
+
+RegisterLegacyAminoCodec(cdc *codec.LegacyAmino) {
+ for _, b := range bm {
+ b.RegisterLegacyAminoCodec(cdc)
+}
+}
+
+// RegisterInterfaces registers all module interface types
+func (bm BasicManager)
+
+RegisterInterfaces(registry types.InterfaceRegistry) {
+ for _, m := range bm {
+ m.RegisterInterfaces(registry)
+}
+}
+
+// DefaultGenesis provides default genesis information for all modules
+func (bm BasicManager)
+
+DefaultGenesis(cdc codec.JSONCodec)
+
+map[string]json.RawMessage {
+ genesisData := make(map[string]json.RawMessage)
+ for _, b := range bm {
+ if mod, ok := b.(HasGenesisBasics); ok {
+ genesisData[b.Name()] = mod.DefaultGenesis(cdc)
+}
+
+}
+
+return genesisData
+}
+
+// ValidateGenesis performs genesis state validation for all modules
+func (bm BasicManager)
+
+ValidateGenesis(cdc codec.JSONCodec, txEncCfg client.TxEncodingConfig, genesisData map[string]json.RawMessage)
+
+error {
+ for _, b := range bm {
+ // first check if the module is an adapted Core API Module
+ if mod, ok := b.(HasGenesisBasics); ok {
+ if err := mod.ValidateGenesis(cdc, txEncCfg, genesisData[b.Name()]); err != nil {
+ return err
+}
+
+}
+
+}
+
+return nil
+}
+
+// RegisterGRPCGatewayRoutes registers all module rest routes
+func (bm BasicManager)
+
+RegisterGRPCGatewayRoutes(clientCtx client.Context, rtr *runtime.ServeMux) {
+ for _, b := range bm {
+ b.RegisterGRPCGatewayRoutes(clientCtx, rtr)
+}
+}
+
+// AddTxCommands adds all tx commands to the rootTxCmd.
+func (bm BasicManager)
+
+AddTxCommands(rootTxCmd *cobra.Command) {
+ for _, b := range bm {
+ if mod, ok := b.(interface {
+ GetTxCmd() *cobra.Command
+}); ok {
+ if cmd := mod.GetTxCmd(); cmd != nil {
+ rootTxCmd.AddCommand(cmd)
+}
+
+}
+
+}
+}
+
+// AddQueryCommands adds all query commands to the rootQueryCmd.
+func (bm BasicManager)
+
+AddQueryCommands(rootQueryCmd *cobra.Command) {
+ for _, b := range bm {
+ if mod, ok := b.(interface {
+ GetQueryCmd() *cobra.Command
+}); ok {
+ if cmd := mod.GetQueryCmd(); cmd != nil {
+ rootQueryCmd.AddCommand(cmd)
+}
+
+}
+
+}
+}
+
+// HasGenesis is the extension interface for stateful genesis methods.
+type HasGenesis interface {
+ HasGenesisBasics
+ InitGenesis(sdk.Context, codec.JSONCodec, json.RawMessage)
+
+ExportGenesis(sdk.Context, codec.JSONCodec)
+
+json.RawMessage
+}
+
+// HasABCIGenesis is the extension interface for stateful genesis methods which returns validator updates.
+type HasABCIGenesis interface {
+ HasGenesisBasics
+ InitGenesis(sdk.Context, codec.JSONCodec, json.RawMessage) []abci.ValidatorUpdate
+ ExportGenesis(sdk.Context, codec.JSONCodec)
+
+json.RawMessage
+}
+
+// AppModule is the form for an application module. Most of
+// its functionality has been moved to extension interfaces.
+// Deprecated: use appmodule.AppModule with a combination of extension interfaes interfaces instead.
+type AppModule interface {
+ appmodule.AppModule
+
+ AppModuleBasic
+}
+
+// HasInvariants is the interface for registering invariants.
+//
+// Deprecated: this will be removed in the next Cosmos SDK release.
+type HasInvariants interface {
+ // RegisterInvariants registers module invariants.
+ RegisterInvariants(sdk.InvariantRegistry)
+}
+
+// HasServices is the interface for modules to register services.
+type HasServices interface {
+ // RegisterServices allows a module to register services.
+ RegisterServices(Configurator)
+}
+
+// HasConsensusVersion is the interface for declaring a module consensus version.
+type HasConsensusVersion interface {
+ // ConsensusVersion is a sequence number for state-breaking change of the
+ // module. It should be incremented on each consensus-breaking change
+ // introduced by the module. To avoid wrong/empty versions, the initial version
+ // should be set to 1.
+ ConsensusVersion()
+
+uint64
+}
+
+// HasABCIEndblock is a released typo of HasABCIEndBlock.
+// Deprecated: use HasABCIEndBlock instead.
+type HasABCIEndblock HasABCIEndBlock
+
+// HasABCIEndBlock is the interface for modules that need to run code at the end of the block.
+type HasABCIEndBlock interface {
+ AppModule
+ EndBlock(context.Context) ([]abci.ValidatorUpdate, error)
+}
+
+var (
+ _ appmodule.AppModule = (*GenesisOnlyAppModule)(nil)
+ _ AppModuleBasic = (*GenesisOnlyAppModule)(nil)
+)
+
+// AppModuleGenesis is the standard form for an application module genesis functions
+type AppModuleGenesis interface {
+ AppModuleBasic
+ HasABCIGenesis
+}
+
+// GenesisOnlyAppModule is an AppModule that only has import/export functionality
+type GenesisOnlyAppModule struct {
+ AppModuleGenesis
+}
+
+// NewGenesisOnlyAppModule creates a new GenesisOnlyAppModule object
+func NewGenesisOnlyAppModule(amg AppModuleGenesis)
+
+GenesisOnlyAppModule {
+ return GenesisOnlyAppModule{
+ AppModuleGenesis: amg,
+}
+}
+
+// IsOnePerModuleType implements the depinject.OnePerModuleType interface.
+func (GenesisOnlyAppModule)
+
+IsOnePerModuleType() {
+}
+
+// IsAppModule implements the appmodule.AppModule interface.
+func (GenesisOnlyAppModule)
+
+IsAppModule() {
+}
+
+// RegisterInvariants is a placeholder function register no invariants
+func (GenesisOnlyAppModule)
+
+RegisterInvariants(_ sdk.InvariantRegistry) {
+}
+
+// ConsensusVersion implements AppModule/ConsensusVersion.
+func (gam GenesisOnlyAppModule)
+
+ConsensusVersion()
+
+uint64 {
+ return 1
+}
+
+// Manager defines a module manager that provides the high level utility for managing and executing
+// operations for a group of modules
+type Manager struct {
+ Modules map[string]any // interface{
+}
+
+is used now to support the legacy AppModule as well as new core appmodule.AppModule.
+ OrderInitGenesis []string
+ OrderExportGenesis []string
+ OrderPreBlockers []string
+ OrderBeginBlockers []string
+ OrderEndBlockers []string
+ OrderPrepareCheckStaters []string
+ OrderPrecommiters []string
+ OrderMigrations []string
+}
+
+// NewManager creates a new Manager object.
+func NewManager(modules ...AppModule) *Manager {
+ moduleMap := make(map[string]any)
+ modulesStr := make([]string, 0, len(modules))
+ preBlockModulesStr := make([]string, 0)
+ for _, module := range modules {
+ if _, ok := module.(appmodule.AppModule); !ok {
+ panic(fmt.Sprintf("module %s does not implement appmodule.AppModule", module.Name()))
+}
+
+moduleMap[module.Name()] = module
+ modulesStr = append(modulesStr, module.Name())
+ if _, ok := module.(appmodule.HasPreBlocker); ok {
+ preBlockModulesStr = append(preBlockModulesStr, module.Name())
+}
+
+}
+
+return &Manager{
+ Modules: moduleMap,
+ OrderInitGenesis: modulesStr,
+ OrderExportGenesis: modulesStr,
+ OrderPreBlockers: preBlockModulesStr,
+ OrderBeginBlockers: modulesStr,
+ OrderPrepareCheckStaters: modulesStr,
+ OrderPrecommiters: modulesStr,
+ OrderEndBlockers: modulesStr,
+}
+}
+
+// NewManagerFromMap creates a new Manager object from a map of module names to module implementations.
+// This method should be used for apps and modules which have migrated to the cosmossdk.io/core.appmodule.AppModule API.
+func NewManagerFromMap(moduleMap map[string]appmodule.AppModule) *Manager {
+ simpleModuleMap := make(map[string]any)
+ modulesStr := make([]string, 0, len(simpleModuleMap))
+ preBlockModulesStr := make([]string, 0)
+ for name, module := range moduleMap {
+ simpleModuleMap[name] = module
+ modulesStr = append(modulesStr, name)
+ if _, ok := module.(appmodule.HasPreBlocker); ok {
+ preBlockModulesStr = append(preBlockModulesStr, name)
+}
+
+}
+
+ // Sort the modules by name. Given that we are using a map above we can't guarantee the order.
+ sort.Strings(modulesStr)
+
+return &Manager{
+ Modules: simpleModuleMap,
+ OrderInitGenesis: modulesStr,
+ OrderExportGenesis: modulesStr,
+ OrderPreBlockers: preBlockModulesStr,
+ OrderBeginBlockers: modulesStr,
+ OrderEndBlockers: modulesStr,
+ OrderPrecommiters: modulesStr,
+ OrderPrepareCheckStaters: modulesStr,
+}
+}
+
+// SetOrderInitGenesis sets the order of init genesis calls
+func (m *Manager)
+
+SetOrderInitGenesis(moduleNames ...string) {
+ m.assertNoForgottenModules("SetOrderInitGenesis", moduleNames, func(moduleName string)
+
+bool {
+ module := m.Modules[moduleName]
+ if _, hasGenesis := module.(appmodule.HasGenesis); hasGenesis {
+ return !hasGenesis
+}
+ if _, hasABCIGenesis := module.(HasABCIGenesis); hasABCIGenesis {
+ return !hasABCIGenesis
+}
+
+ _, hasGenesis := module.(HasGenesis)
+
+return !hasGenesis
+})
+
+m.OrderInitGenesis = moduleNames
+}
+
+// SetOrderExportGenesis sets the order of export genesis calls
+func (m *Manager)
+
+SetOrderExportGenesis(moduleNames ...string) {
+ m.assertNoForgottenModules("SetOrderExportGenesis", moduleNames, func(moduleName string)
+
+bool {
+ module := m.Modules[moduleName]
+ if _, hasGenesis := module.(appmodule.HasGenesis); hasGenesis {
+ return !hasGenesis
+}
+ if _, hasABCIGenesis := module.(HasABCIGenesis); hasABCIGenesis {
+ return !hasABCIGenesis
+}
+
+ _, hasGenesis := module.(HasGenesis)
+
+return !hasGenesis
+})
+
+m.OrderExportGenesis = moduleNames
+}
+
+// SetOrderPreBlockers sets the order of set pre-blocker calls
+func (m *Manager)
+
+SetOrderPreBlockers(moduleNames ...string) {
+ m.assertNoForgottenModules("SetOrderPreBlockers", moduleNames,
+ func(moduleName string)
+
+bool {
+ module := m.Modules[moduleName]
+ _, hasBlock := module.(appmodule.HasPreBlocker)
+
+return !hasBlock
+})
+
+m.OrderPreBlockers = moduleNames
+}
+
+// SetOrderBeginBlockers sets the order of set begin-blocker calls
+func (m *Manager)
+
+SetOrderBeginBlockers(moduleNames ...string) {
+ m.assertNoForgottenModules("SetOrderBeginBlockers", moduleNames,
+ func(moduleName string)
+
+bool {
+ module := m.Modules[moduleName]
+ _, hasBeginBlock := module.(appmodule.HasBeginBlocker)
+
+return !hasBeginBlock
+})
+
+m.OrderBeginBlockers = moduleNames
+}
+
+// SetOrderEndBlockers sets the order of set end-blocker calls
+func (m *Manager)
+
+SetOrderEndBlockers(moduleNames ...string) {
+ m.assertNoForgottenModules("SetOrderEndBlockers", moduleNames,
+ func(moduleName string)
+
+bool {
+ module := m.Modules[moduleName]
+ if _, hasEndBlock := module.(appmodule.HasEndBlocker); hasEndBlock {
+ return !hasEndBlock
+}
+
+ _, hasABCIEndBlock := module.(HasABCIEndBlock)
+
+return !hasABCIEndBlock
+})
+
+m.OrderEndBlockers = moduleNames
+}
+
+// SetOrderPrepareCheckStaters sets the order of set prepare-check-stater calls
+func (m *Manager)
+
+SetOrderPrepareCheckStaters(moduleNames ...string) {
+ m.assertNoForgottenModules("SetOrderPrepareCheckStaters", moduleNames,
+ func(moduleName string)
+
+bool {
+ module := m.Modules[moduleName]
+ _, hasPrepareCheckState := module.(appmodule.HasPrepareCheckState)
+
+return !hasPrepareCheckState
+})
+
+m.OrderPrepareCheckStaters = moduleNames
+}
+
+// SetOrderPrecommiters sets the order of set precommiter calls
+func (m *Manager)
+
+SetOrderPrecommiters(moduleNames ...string) {
+ m.assertNoForgottenModules("SetOrderPrecommiters", moduleNames,
+ func(moduleName string)
+
+bool {
+ module := m.Modules[moduleName]
+ _, hasPrecommit := module.(appmodule.HasPrecommit)
+
+return !hasPrecommit
+})
+
+m.OrderPrecommiters = moduleNames
+}
+
+// SetOrderMigrations sets the order of migrations to be run. If not set
+// then migrations will be run with an order defined in `DefaultMigrationsOrder`.
+func (m *Manager)
+
+SetOrderMigrations(moduleNames ...string) {
+ m.assertNoForgottenModules("SetOrderMigrations", moduleNames, nil)
+
+m.OrderMigrations = moduleNames
+}
+
+// RegisterInvariants registers all module invariants
+//
+// Deprecated: this function is a no-op and will be removed in the next release of the Cosmos SDK.
+func (m *Manager)
+
+RegisterInvariants(_ sdk.InvariantRegistry) {
+}
+
+// RegisterServices registers all module services
+func (m *Manager)
+
+RegisterServices(cfg Configurator)
+
+error {
+ for _, module := range m.Modules {
+ if module, ok := module.(HasServices); ok {
+ module.RegisterServices(cfg)
+}
+ if module, ok := module.(appmodule.HasServices); ok {
+ err := module.RegisterServices(cfg)
+ if err != nil {
+ return err
+}
+
+}
+ if cfg.Error() != nil {
+ return cfg.Error()
+}
+
+}
+
+return nil
+}
+
+// InitGenesis performs init genesis functionality for modules. Exactly one
+// module must return a non-empty validator set update to correctly initialize
+// the chain.
+func (m *Manager)
+
+InitGenesis(ctx sdk.Context, cdc codec.JSONCodec, genesisData map[string]json.RawMessage) (*abci.ResponseInitChain, error) {
+ var validatorUpdates []abci.ValidatorUpdate
+ ctx.Logger().Info("initializing blockchain state from genesis.json")
+ for _, moduleName := range m.OrderInitGenesis {
+ if genesisData[moduleName] == nil {
+ continue
+}
+ mod := m.Modules[moduleName]
+ // we might get an adapted module, a native core API module or a legacy module
+ if module, ok := mod.(appmodule.HasGenesis); ok {
+ ctx.Logger().Debug("running initialization for module", "module", moduleName)
+ // core API genesis
+ source, err := genesis.SourceFromRawJSON(genesisData[moduleName])
+ if err != nil {
+ return &abci.ResponseInitChain{
+}, err
+}
+
+err = module.InitGenesis(ctx, source)
+ if err != nil {
+ return &abci.ResponseInitChain{
+}, err
+}
+
+}
+
+else if module, ok := mod.(HasGenesis); ok {
+ ctx.Logger().Debug("running initialization for module", "module", moduleName)
+
+module.InitGenesis(ctx, cdc, genesisData[moduleName])
+}
+
+else if module, ok := mod.(HasABCIGenesis); ok {
+ ctx.Logger().Debug("running initialization for module", "module", moduleName)
+ moduleValUpdates := module.InitGenesis(ctx, cdc, genesisData[moduleName])
+
+ // use these validator updates if provided, the module manager assumes
+ // only one module will update the validator set
+ if len(moduleValUpdates) > 0 {
+ if len(validatorUpdates) > 0 {
+ return &abci.ResponseInitChain{
+}, errors.New("validator InitGenesis updates already set by a previous module")
+}
+
+validatorUpdates = moduleValUpdates
+}
+
+}
+
+}
+
+ // a chain must initialize with a non-empty validator set
+ if len(validatorUpdates) == 0 {
+ return &abci.ResponseInitChain{
+}, fmt.Errorf("validator set is empty after InitGenesis, please ensure at least one validator is initialized with a delegation greater than or equal to the DefaultPowerReduction (%d)", sdk.DefaultPowerReduction)
+}
+
+return &abci.ResponseInitChain{
+ Validators: validatorUpdates,
+}, nil
+}
+
+// ExportGenesis performs export genesis functionality for modules
+func (m *Manager)
+
+ExportGenesis(ctx sdk.Context, cdc codec.JSONCodec) (map[string]json.RawMessage, error) {
+ return m.ExportGenesisForModules(ctx, cdc, []string{
+})
+}
+
+// ExportGenesisForModules performs export genesis functionality for modules
+func (m *Manager)
+
+ExportGenesisForModules(ctx sdk.Context, cdc codec.JSONCodec, modulesToExport []string) (map[string]json.RawMessage, error) {
+ if len(modulesToExport) == 0 {
+ modulesToExport = m.OrderExportGenesis
+}
+ // verify modules exists in app, so that we don't panic in the middle of an export
+ if err := m.checkModulesExists(modulesToExport); err != nil {
+ return nil, err
+}
+
+type genesisResult struct {
+ bz json.RawMessage
+ err error
+}
+ channels := make(map[string]chan genesisResult)
+ for _, moduleName := range modulesToExport {
+ mod := m.Modules[moduleName]
+ if module, ok := mod.(appmodule.HasGenesis); ok {
+ // core API genesis
+ channels[moduleName] = make(chan genesisResult)
+
+go func(module appmodule.HasGenesis, ch chan genesisResult) {
+ ctx := ctx.WithGasMeter(storetypes.NewInfiniteGasMeter()) // avoid race conditions
+ target := genesis.RawJSONTarget{
+}
+ err := module.ExportGenesis(ctx, target.Target())
+ if err != nil {
+ ch <- genesisResult{
+ nil, err
+}
+
+return
+}
+
+rawJSON, err := target.JSON()
+ if err != nil {
+ ch <- genesisResult{
+ nil, err
+}
+
+return
+}
+
+ch <- genesisResult{
+ rawJSON, nil
+}
+
+}(module, channels[moduleName])
+}
+
+else if module, ok := mod.(HasGenesis); ok {
+ channels[moduleName] = make(chan genesisResult)
+
+go func(module HasGenesis, ch chan genesisResult) {
+ ctx := ctx.WithGasMeter(storetypes.NewInfiniteGasMeter()) // avoid race conditions
+ ch <- genesisResult{
+ module.ExportGenesis(ctx, cdc), nil
+}
+
+}(module, channels[moduleName])
+}
+
+else if module, ok := mod.(HasABCIGenesis); ok {
+ channels[moduleName] = make(chan genesisResult)
+
+go func(module HasABCIGenesis, ch chan genesisResult) {
+ ctx := ctx.WithGasMeter(storetypes.NewInfiniteGasMeter()) // avoid race conditions
+ ch <- genesisResult{
+ module.ExportGenesis(ctx, cdc), nil
+}
+
+}(module, channels[moduleName])
+}
+
+}
+ genesisData := make(map[string]json.RawMessage)
+ for moduleName := range channels {
+ res := <-channels[moduleName]
+ if res.err != nil {
+ return nil, fmt.Errorf("genesis export error in %s: %w", moduleName, res.err)
+}
+
+genesisData[moduleName] = res.bz
+}
+
+return genesisData, nil
+}
+
+// checkModulesExists verifies that all modules in the list exist in the app
+func (m *Manager)
+
+checkModulesExists(moduleName []string)
+
+error {
+ for _, name := range moduleName {
+ if _, ok := m.Modules[name]; !ok {
+ return fmt.Errorf("module %s does not exist", name)
+}
+
+}
+
+return nil
+}
+
+// assertNoForgottenModules checks that we didn't forget any modules in the SetOrder* functions.
+// `pass` is a closure which allows one to omit modules from `moduleNames`.
+// If you provide non-nil `pass` and it returns true, the module would not be subject of the assertion.
+func (m *Manager)
+
+assertNoForgottenModules(setOrderFnName string, moduleNames []string, pass func(moduleName string)
+
+bool) {
+ ms := make(map[string]bool)
+ for _, m := range moduleNames {
+ ms[m] = true
+}
+
+var missing []string
+ for m := range m.Modules {
+ if pass != nil && pass(m) {
+ continue
+}
+ if !ms[m] {
+ missing = append(missing, m)
+}
+
+}
+ if len(missing) != 0 {
+ sort.Strings(missing)
+
+panic(fmt.Sprintf(
+ "all modules must be defined when setting %s, missing: %v", setOrderFnName, missing))
+}
+}
+
+// MigrationHandler is the migration function that each module registers.
+type MigrationHandler func(sdk.Context)
+
+error
+
+// VersionMap is a map of moduleName -> version
+type VersionMap map[string]uint64
+
+// RunMigrations performs in-place store migrations for all modules. This
+// function MUST be called insde an x/upgrade UpgradeHandler.
+//
+// Recall that in an upgrade handler, the `fromVM` VersionMap is retrieved from
+// x/upgrade's store, and the function needs to return the target VersionMap
+// that will in turn be persisted to the x/upgrade's store. In general,
+// returning RunMigrations should be enough:
+//
+// Example:
+//
+// cfg := module.NewConfigurator(...)
+// app.UpgradeKeeper.SetUpgradeHandler("my-plan", func(ctx context.Context, plan upgradetypes.Plan, fromVM module.VersionMap) (module.VersionMap, error) {
+// return app.mm.RunMigrations(ctx, cfg, fromVM)
+//
+})
+//
+// Internally, RunMigrations will perform the following steps:
+// - create an `updatedVM` VersionMap of module with their latest ConsensusVersion
+// - make a diff of `fromVM` and `udpatedVM`, and for each module:
+// - if the module's `fromVM` version is less than its `updatedVM` version,
+// then run in-place store migrations for that module between those versions.
+// - if the module does not exist in the `fromVM` (which means that it's a new module,
+// because it was not in the previous x/upgrade's store), then run
+// `InitGenesis` on that module.
+//
+// - return the `updatedVM` to be persisted in the x/upgrade's store.
+//
+// Migrations are run in an order defined by `Manager.OrderMigrations` or (if not set)
+
+defined by
+// `DefaultMigrationsOrder` function.
+//
+// As an app developer, if you wish to skip running InitGenesis for your new
+// module "foo", you need to manually pass a `fromVM` argument to this function
+// foo's module version set to its latest ConsensusVersion. That way, the diff
+// between the function's `fromVM` and `udpatedVM` will be empty, hence not
+// running anything for foo.
+//
+// Example:
+//
+// cfg := module.NewConfigurator(...)
+// app.UpgradeKeeper.SetUpgradeHandler("my-plan", func(ctx context.Context, plan upgradetypes.Plan, fromVM module.VersionMap) (module.VersionMap, error) {
+// // Assume "foo" is a new module.
+// // `fromVM` is fetched from existing x/upgrade store. Since foo didn't exist
+// // before this upgrade, `v, exists := fromVM["foo"]; exists == false`, and RunMigration will by default
+// // run InitGenesis on foo.
+// // To skip running foo's InitGenesis, you need set `fromVM`'s foo to its latest
+// // consensus version:
+// fromVM["foo"] = foo.AppModule{
+}.ConsensusVersion()
+//
+// return app.mm.RunMigrations(ctx, cfg, fromVM)
+//
+})
+//
+// Please also refer to /sdk/v0.53/build/migrations/upgrading for more information.
+func (m Manager)
+
+RunMigrations(ctx context.Context, cfg Configurator, fromVM VersionMap) (VersionMap, error) {
+ c, ok := cfg.(*configurator)
+ if !ok {
+ return nil, errorsmod.Wrapf(sdkerrors.ErrInvalidType, "expected %T, got %T", &configurator{
+}, cfg)
+}
+ modules := m.OrderMigrations
+ if modules == nil {
+ modules = DefaultMigrationsOrder(m.ModuleNames())
+}
+ sdkCtx := sdk.UnwrapSDKContext(ctx)
+ updatedVM := VersionMap{
+}
+ for _, moduleName := range modules {
+ module := m.Modules[moduleName]
+ fromVersion, exists := fromVM[moduleName]
+ toVersion := uint64(0)
+ if module, ok := module.(HasConsensusVersion); ok {
+ toVersion = module.ConsensusVersion()
+}
+
+ // We run migration if the module is specified in `fromVM`.
+ // Otherwise we run InitGenesis.
+ //
+ // The module won't exist in the fromVM in two cases:
+ // 1. A new module is added. In this case we run InitGenesis with an
+ // empty genesis state.
+ // 2. An existing chain is upgrading from version < 0.43 to v0.43+ for the first time.
+ // In this case, all modules have yet to be added to x/upgrade's VersionMap store.
+ if exists {
+ err := c.runModuleMigrations(sdkCtx, moduleName, fromVersion, toVersion)
+ if err != nil {
+ return nil, err
+}
+
+}
+
+else {
+ sdkCtx.Logger().Info(fmt.Sprintf("adding a new module: %s", moduleName))
+ if module, ok := m.Modules[moduleName].(HasGenesis); ok {
+ module.InitGenesis(sdkCtx, c.cdc, module.DefaultGenesis(c.cdc))
+}
+ if module, ok := m.Modules[moduleName].(HasABCIGenesis); ok {
+ moduleValUpdates := module.InitGenesis(sdkCtx, c.cdc, module.DefaultGenesis(c.cdc))
+ // The module manager assumes only one module will update the
+ // validator set, and it can't be a new module.
+ if len(moduleValUpdates) > 0 {
+ return nil, errorsmod.Wrapf(sdkerrors.ErrLogic, "validator InitGenesis update is already set by another module")
+}
+
+}
+
+}
+
+updatedVM[moduleName] = toVersion
+}
+
+return updatedVM, nil
+}
+
+// PreBlock performs begin block functionality for upgrade module.
+// It takes the current context as a parameter and returns a boolean value
+// indicating whether the migration was successfully executed or not.
+func (m *Manager)
+
+PreBlock(ctx sdk.Context) (*sdk.ResponsePreBlock, error) {
+ paramsChanged := false
+ for _, moduleName := range m.OrderPreBlockers {
+ if module, ok := m.Modules[moduleName].(appmodule.HasPreBlocker); ok {
+ rsp, err := module.PreBlock(ctx)
+ if err != nil {
+ return nil, err
+}
+ if rsp.IsConsensusParamsChanged() {
+ paramsChanged = true
+}
+
+}
+
+}
+
+return &sdk.ResponsePreBlock{
+ ConsensusParamsChanged: paramsChanged,
+}, nil
+}
+
+// BeginBlock performs begin block functionality for all modules. It creates a
+// child context with an event manager to aggregate events emitted from all
+// modules.
+func (m *Manager)
+
+BeginBlock(ctx sdk.Context) (sdk.BeginBlock, error) {
+ ctx = ctx.WithEventManager(sdk.NewEventManager())
+ for _, moduleName := range m.OrderBeginBlockers {
+ if module, ok := m.Modules[moduleName].(appmodule.HasBeginBlocker); ok {
+ if err := module.BeginBlock(ctx); err != nil {
+ return sdk.BeginBlock{
+}, err
+}
+
+}
+
+}
+
+return sdk.BeginBlock{
+ Events: ctx.EventManager().ABCIEvents(),
+}, nil
+}
+
+// EndBlock performs end block functionality for all modules. It creates a
+// child context with an event manager to aggregate events emitted from all
+// modules.
+func (m *Manager)
+
+EndBlock(ctx sdk.Context) (sdk.EndBlock, error) {
+ ctx = ctx.WithEventManager(sdk.NewEventManager())
+ validatorUpdates := []abci.ValidatorUpdate{
+}
+ for _, moduleName := range m.OrderEndBlockers {
+ if module, ok := m.Modules[moduleName].(appmodule.HasEndBlocker); ok {
+ err := module.EndBlock(ctx)
+ if err != nil {
+ return sdk.EndBlock{
+}, err
+}
+
+}
+
+else if module, ok := m.Modules[moduleName].(HasABCIEndBlock); ok {
+ moduleValUpdates, err := module.EndBlock(ctx)
+ if err != nil {
+ return sdk.EndBlock{
+}, err
+}
+ // use these validator updates if provided, the module manager assumes
+ // only one module will update the validator set
+ if len(moduleValUpdates) > 0 {
+ if len(validatorUpdates) > 0 {
+ return sdk.EndBlock{
+}, errors.New("validator EndBlock updates already set by a previous module")
+}
+ for _, updates := range moduleValUpdates {
+ validatorUpdates = append(validatorUpdates, abci.ValidatorUpdate{
+ PubKey: updates.PubKey,
+ Power: updates.Power
+})
+}
+
+}
+
+}
+
+else {
+ continue
+}
+
+}
+
+return sdk.EndBlock{
+ ValidatorUpdates: validatorUpdates,
+ Events: ctx.EventManager().ABCIEvents(),
+}, nil
+}
+
+// Precommit performs precommit functionality for all modules.
+func (m *Manager)
+
+Precommit(ctx sdk.Context)
+
+error {
+ for _, moduleName := range m.OrderPrecommiters {
+ module, ok := m.Modules[moduleName].(appmodule.HasPrecommit)
+ if !ok {
+ continue
+}
+ if err := module.Precommit(ctx); err != nil {
+ return err
+}
+
+}
+
+return nil
+}
+
+// PrepareCheckState performs functionality for preparing the check state for all modules.
+func (m *Manager)
+
+PrepareCheckState(ctx sdk.Context)
+
+error {
+ for _, moduleName := range m.OrderPrepareCheckStaters {
+ module, ok := m.Modules[moduleName].(appmodule.HasPrepareCheckState)
+ if !ok {
+ continue
+}
+ if err := module.PrepareCheckState(ctx); err != nil {
+ return err
+}
+
+}
+
+return nil
+}
+
+// GetVersionMap gets consensus version from all modules
+func (m *Manager)
+
+GetVersionMap()
+
+VersionMap {
+ vermap := make(VersionMap)
+ for name, v := range m.Modules {
+ version := uint64(0)
+ if v, ok := v.(HasConsensusVersion); ok {
+ version = v.ConsensusVersion()
+}
+
+vermap[name] = version
+}
+
+return vermap
+}
+
+// ModuleNames returns list of all module names, without any particular order.
+func (m *Manager)
+
+ModuleNames() []string {
+ return slices.Collect(maps.Keys(m.Modules))
+}
+
+// DefaultMigrationsOrder returns a default migrations order: ascending alphabetical by module name,
+// except x/auth which will run last, see:
+// https://github.com/cosmos/cosmos-sdk/issues/10591
+func DefaultMigrationsOrder(modules []string) []string {
+ const authName = "auth"
+ out := make([]string, 0, len(modules))
+ hasAuth := false
+ for _, m := range modules {
+ if m == authName {
+ hasAuth = true
+}
+
+else {
+ out = append(out, m)
+}
+
+}
+
+sort.Strings(out)
+ if hasAuth {
+ out = append(out, authName)
+}
+
+return out
+}
+```
+
+* `RegisterServices(Configurator)`: Allows a module to register services.
+
+### `HasConsensusVersion`
+
+This interface defines one method for checking a module consensus version.
+
+```go expandable
+/*
+Package module contains application module patterns and associated "manager" functionality.
+The module pattern has been broken down by:
+ - independent module functionality (AppModuleBasic)
+ - inter-dependent module simulation functionality (AppModuleSimulation)
+ - inter-dependent module full functionality (AppModule)
+
+inter-dependent module functionality is module functionality which somehow
+depends on other modules, typically through the module keeper. Many of the
+module keepers are dependent on each other, thus in order to access the full
+set of module functionality we need to define all the keepers/params-store/keys
+etc. This full set of advanced functionality is defined by the AppModule interface.
+
+Independent module functions are separated to allow for the construction of the
+basic application structures required early on in the application definition
+and used to enable the definition of full module functionality later in the
+process. This separation is necessary, however we still want to allow for a
+high level pattern for modules to follow - for instance, such that we don't
+have to manually register all of the codecs for all the modules. This basic
+procedure as well as other basic patterns are handled through the use of
+BasicManager.
+
+Lastly the interface for genesis functionality (HasGenesis & HasABCIGenesis)
+
+has been
+separated out from full module functionality (AppModule)
+
+so that modules which
+are only used for genesis can take advantage of the Module patterns without
+needlessly defining many placeholder functions
+*/
+package module
+
+import (
+
+ "context"
+ "encoding/json"
+ "errors"
+ "fmt"
+ "maps"
+ "slices"
+ "sort"
+
+ abci "github.com/cometbft/cometbft/abci/types"
+ "github.com/grpc-ecosystem/grpc-gateway/runtime"
+ "github.com/spf13/cobra"
+ "cosmossdk.io/core/appmodule"
+ "cosmossdk.io/core/genesis"
+ errorsmod "cosmossdk.io/errors"
+ storetypes "cosmossdk.io/store/types"
+ "github.com/cosmos/cosmos-sdk/client"
+ "github.com/cosmos/cosmos-sdk/codec"
+ "github.com/cosmos/cosmos-sdk/codec/types"
+ sdk "github.com/cosmos/cosmos-sdk/types"
+ sdkerrors "github.com/cosmos/cosmos-sdk/types/errors"
+)
+
+// AppModuleBasic is the standard form for basic non-dependant elements of an application module.
+type AppModuleBasic interface {
+ HasName
+ RegisterLegacyAminoCodec(*codec.LegacyAmino)
+
+RegisterInterfaces(types.InterfaceRegistry)
+
+RegisterGRPCGatewayRoutes(client.Context, *runtime.ServeMux)
+}
+
+// HasName allows the module to provide its own name for legacy purposes.
+// Newer apps should specify the name for their modules using a map
+// using NewManagerFromMap.
+type HasName interface {
+ Name()
+
+string
+}
+
+// HasGenesisBasics is the legacy interface for stateless genesis methods.
+type HasGenesisBasics interface {
+ DefaultGenesis(codec.JSONCodec)
+
+json.RawMessage
+ ValidateGenesis(codec.JSONCodec, client.TxEncodingConfig, json.RawMessage)
+
+error
+}
+
+// BasicManager is a collection of AppModuleBasic
+type BasicManager map[string]AppModuleBasic
+
+// NewBasicManager creates a new BasicManager object
+func NewBasicManager(modules ...AppModuleBasic)
+
+BasicManager {
+ moduleMap := make(map[string]AppModuleBasic)
+ for _, module := range modules {
+ moduleMap[module.Name()] = module
+}
+
+return moduleMap
+}
+
+// NewBasicManagerFromManager creates a new BasicManager from a Manager
+// The BasicManager will contain all AppModuleBasic from the AppModule Manager
+// Module's AppModuleBasic can be overridden by passing a custom AppModuleBasic map
+func NewBasicManagerFromManager(manager *Manager, customModuleBasics map[string]AppModuleBasic)
+
+BasicManager {
+ moduleMap := make(map[string]AppModuleBasic)
+ for name, module := range manager.Modules {
+ if customBasicMod, ok := customModuleBasics[name]; ok {
+ moduleMap[name] = customBasicMod
+ continue
+}
+ if appModule, ok := module.(appmodule.AppModule); ok {
+ moduleMap[name] = CoreAppModuleBasicAdaptor(name, appModule)
+
+continue
+}
+ if basicMod, ok := module.(AppModuleBasic); ok {
+ moduleMap[name] = basicMod
+}
+
+}
+
+return moduleMap
+}
+
+// RegisterLegacyAminoCodec registers all module codecs
+func (bm BasicManager)
+
+RegisterLegacyAminoCodec(cdc *codec.LegacyAmino) {
+ for _, b := range bm {
+ b.RegisterLegacyAminoCodec(cdc)
+}
+}
+
+// RegisterInterfaces registers all module interface types
+func (bm BasicManager)
+
+RegisterInterfaces(registry types.InterfaceRegistry) {
+ for _, m := range bm {
+ m.RegisterInterfaces(registry)
+}
+}
+
+// DefaultGenesis provides default genesis information for all modules
+func (bm BasicManager)
+
+DefaultGenesis(cdc codec.JSONCodec)
+
+map[string]json.RawMessage {
+ genesisData := make(map[string]json.RawMessage)
+ for _, b := range bm {
+ if mod, ok := b.(HasGenesisBasics); ok {
+ genesisData[b.Name()] = mod.DefaultGenesis(cdc)
+}
+
+}
+
+return genesisData
+}
+
+// ValidateGenesis performs genesis state validation for all modules
+func (bm BasicManager)
+
+ValidateGenesis(cdc codec.JSONCodec, txEncCfg client.TxEncodingConfig, genesisData map[string]json.RawMessage)
+
+error {
+ for _, b := range bm {
+ // first check if the module is an adapted Core API Module
+ if mod, ok := b.(HasGenesisBasics); ok {
+ if err := mod.ValidateGenesis(cdc, txEncCfg, genesisData[b.Name()]); err != nil {
+ return err
+}
+
+}
+
+}
+
+return nil
+}
+
+// RegisterGRPCGatewayRoutes registers all module rest routes
+func (bm BasicManager)
+
+RegisterGRPCGatewayRoutes(clientCtx client.Context, rtr *runtime.ServeMux) {
+ for _, b := range bm {
+ b.RegisterGRPCGatewayRoutes(clientCtx, rtr)
+}
+}
+
+// AddTxCommands adds all tx commands to the rootTxCmd.
+func (bm BasicManager)
+
+AddTxCommands(rootTxCmd *cobra.Command) {
+ for _, b := range bm {
+ if mod, ok := b.(interface {
+ GetTxCmd() *cobra.Command
+}); ok {
+ if cmd := mod.GetTxCmd(); cmd != nil {
+ rootTxCmd.AddCommand(cmd)
+}
+
+}
+
+}
+}
+
+// AddQueryCommands adds all query commands to the rootQueryCmd.
+func (bm BasicManager)
+
+AddQueryCommands(rootQueryCmd *cobra.Command) {
+ for _, b := range bm {
+ if mod, ok := b.(interface {
+ GetQueryCmd() *cobra.Command
+}); ok {
+ if cmd := mod.GetQueryCmd(); cmd != nil {
+ rootQueryCmd.AddCommand(cmd)
+}
+
+}
+
+}
+}
+
+// HasGenesis is the extension interface for stateful genesis methods.
+type HasGenesis interface {
+ HasGenesisBasics
+ InitGenesis(sdk.Context, codec.JSONCodec, json.RawMessage)
+
+ExportGenesis(sdk.Context, codec.JSONCodec)
+
+json.RawMessage
+}
+
+// HasABCIGenesis is the extension interface for stateful genesis methods which returns validator updates.
+type HasABCIGenesis interface {
+ HasGenesisBasics
+ InitGenesis(sdk.Context, codec.JSONCodec, json.RawMessage) []abci.ValidatorUpdate
+ ExportGenesis(sdk.Context, codec.JSONCodec)
+
+json.RawMessage
+}
+
+// AppModule is the form for an application module. Most of
+// its functionality has been moved to extension interfaces.
+// Deprecated: use appmodule.AppModule with a combination of extension interfaes interfaces instead.
+type AppModule interface {
+ appmodule.AppModule
+
+ AppModuleBasic
+}
+
+// HasInvariants is the interface for registering invariants.
+//
+// Deprecated: this will be removed in the next Cosmos SDK release.
+type HasInvariants interface {
+ // RegisterInvariants registers module invariants.
+ RegisterInvariants(sdk.InvariantRegistry)
+}
+
+// HasServices is the interface for modules to register services.
+type HasServices interface {
+ // RegisterServices allows a module to register services.
+ RegisterServices(Configurator)
+}
+
+// HasConsensusVersion is the interface for declaring a module consensus version.
+type HasConsensusVersion interface {
+ // ConsensusVersion is a sequence number for state-breaking change of the
+ // module. It should be incremented on each consensus-breaking change
+ // introduced by the module. To avoid wrong/empty versions, the initial version
+ // should be set to 1.
+ ConsensusVersion()
+
+uint64
+}
+
+// HasABCIEndblock is a released typo of HasABCIEndBlock.
+// Deprecated: use HasABCIEndBlock instead.
+type HasABCIEndblock HasABCIEndBlock
+
+// HasABCIEndBlock is the interface for modules that need to run code at the end of the block.
+type HasABCIEndBlock interface {
+ AppModule
+ EndBlock(context.Context) ([]abci.ValidatorUpdate, error)
+}
+
+var (
+ _ appmodule.AppModule = (*GenesisOnlyAppModule)(nil)
+ _ AppModuleBasic = (*GenesisOnlyAppModule)(nil)
+)
+
+// AppModuleGenesis is the standard form for an application module genesis functions
+type AppModuleGenesis interface {
+ AppModuleBasic
+ HasABCIGenesis
+}
+
+// GenesisOnlyAppModule is an AppModule that only has import/export functionality
+type GenesisOnlyAppModule struct {
+ AppModuleGenesis
+}
+
+// NewGenesisOnlyAppModule creates a new GenesisOnlyAppModule object
+func NewGenesisOnlyAppModule(amg AppModuleGenesis)
+
+GenesisOnlyAppModule {
+ return GenesisOnlyAppModule{
+ AppModuleGenesis: amg,
+}
+}
+
+// IsOnePerModuleType implements the depinject.OnePerModuleType interface.
+func (GenesisOnlyAppModule)
+
+IsOnePerModuleType() {
+}
+
+// IsAppModule implements the appmodule.AppModule interface.
+func (GenesisOnlyAppModule)
+
+IsAppModule() {
+}
+
+// RegisterInvariants is a placeholder function register no invariants
+func (GenesisOnlyAppModule)
+
+RegisterInvariants(_ sdk.InvariantRegistry) {
+}
+
+// ConsensusVersion implements AppModule/ConsensusVersion.
+func (gam GenesisOnlyAppModule)
+
+ConsensusVersion()
+
+uint64 {
+ return 1
+}
+
+// Manager defines a module manager that provides the high level utility for managing and executing
+// operations for a group of modules
+type Manager struct {
+ Modules map[string]any // interface{
+}
+
+is used now to support the legacy AppModule as well as new core appmodule.AppModule.
+ OrderInitGenesis []string
+ OrderExportGenesis []string
+ OrderPreBlockers []string
+ OrderBeginBlockers []string
+ OrderEndBlockers []string
+ OrderPrepareCheckStaters []string
+ OrderPrecommiters []string
+ OrderMigrations []string
+}
+
+// NewManager creates a new Manager object.
+func NewManager(modules ...AppModule) *Manager {
+ moduleMap := make(map[string]any)
+ modulesStr := make([]string, 0, len(modules))
+ preBlockModulesStr := make([]string, 0)
+ for _, module := range modules {
+ if _, ok := module.(appmodule.AppModule); !ok {
+ panic(fmt.Sprintf("module %s does not implement appmodule.AppModule", module.Name()))
+}
+
+moduleMap[module.Name()] = module
+ modulesStr = append(modulesStr, module.Name())
+ if _, ok := module.(appmodule.HasPreBlocker); ok {
+ preBlockModulesStr = append(preBlockModulesStr, module.Name())
+}
+
+}
+
+return &Manager{
+ Modules: moduleMap,
+ OrderInitGenesis: modulesStr,
+ OrderExportGenesis: modulesStr,
+ OrderPreBlockers: preBlockModulesStr,
+ OrderBeginBlockers: modulesStr,
+ OrderPrepareCheckStaters: modulesStr,
+ OrderPrecommiters: modulesStr,
+ OrderEndBlockers: modulesStr,
+}
+}
+
+// NewManagerFromMap creates a new Manager object from a map of module names to module implementations.
+// This method should be used for apps and modules which have migrated to the cosmossdk.io/core.appmodule.AppModule API.
+func NewManagerFromMap(moduleMap map[string]appmodule.AppModule) *Manager {
+ simpleModuleMap := make(map[string]any)
+ modulesStr := make([]string, 0, len(simpleModuleMap))
+ preBlockModulesStr := make([]string, 0)
+ for name, module := range moduleMap {
+ simpleModuleMap[name] = module
+ modulesStr = append(modulesStr, name)
+ if _, ok := module.(appmodule.HasPreBlocker); ok {
+ preBlockModulesStr = append(preBlockModulesStr, name)
+}
+
+}
+
+ // Sort the modules by name. Given that we are using a map above we can't guarantee the order.
+ sort.Strings(modulesStr)
+
+return &Manager{
+ Modules: simpleModuleMap,
+ OrderInitGenesis: modulesStr,
+ OrderExportGenesis: modulesStr,
+ OrderPreBlockers: preBlockModulesStr,
+ OrderBeginBlockers: modulesStr,
+ OrderEndBlockers: modulesStr,
+ OrderPrecommiters: modulesStr,
+ OrderPrepareCheckStaters: modulesStr,
+}
+}
+
+// SetOrderInitGenesis sets the order of init genesis calls
+func (m *Manager)
+
+SetOrderInitGenesis(moduleNames ...string) {
+ m.assertNoForgottenModules("SetOrderInitGenesis", moduleNames, func(moduleName string)
+
+bool {
+ module := m.Modules[moduleName]
+ if _, hasGenesis := module.(appmodule.HasGenesis); hasGenesis {
+ return !hasGenesis
+}
+ if _, hasABCIGenesis := module.(HasABCIGenesis); hasABCIGenesis {
+ return !hasABCIGenesis
+}
+
+ _, hasGenesis := module.(HasGenesis)
+
+return !hasGenesis
+})
+
+m.OrderInitGenesis = moduleNames
+}
+
+// SetOrderExportGenesis sets the order of export genesis calls
+func (m *Manager)
+
+SetOrderExportGenesis(moduleNames ...string) {
+ m.assertNoForgottenModules("SetOrderExportGenesis", moduleNames, func(moduleName string)
+
+bool {
+ module := m.Modules[moduleName]
+ if _, hasGenesis := module.(appmodule.HasGenesis); hasGenesis {
+ return !hasGenesis
+}
+ if _, hasABCIGenesis := module.(HasABCIGenesis); hasABCIGenesis {
+ return !hasABCIGenesis
+}
+
+ _, hasGenesis := module.(HasGenesis)
+
+return !hasGenesis
+})
+
+m.OrderExportGenesis = moduleNames
+}
+
+// SetOrderPreBlockers sets the order of set pre-blocker calls
+func (m *Manager)
+
+SetOrderPreBlockers(moduleNames ...string) {
+ m.assertNoForgottenModules("SetOrderPreBlockers", moduleNames,
+ func(moduleName string)
+
+bool {
+ module := m.Modules[moduleName]
+ _, hasBlock := module.(appmodule.HasPreBlocker)
+
+return !hasBlock
+})
+
+m.OrderPreBlockers = moduleNames
+}
+
+// SetOrderBeginBlockers sets the order of set begin-blocker calls
+func (m *Manager)
+
+SetOrderBeginBlockers(moduleNames ...string) {
+ m.assertNoForgottenModules("SetOrderBeginBlockers", moduleNames,
+ func(moduleName string)
+
+bool {
+ module := m.Modules[moduleName]
+ _, hasBeginBlock := module.(appmodule.HasBeginBlocker)
+
+return !hasBeginBlock
+})
+
+m.OrderBeginBlockers = moduleNames
+}
+
+// SetOrderEndBlockers sets the order of set end-blocker calls
+func (m *Manager)
+
+SetOrderEndBlockers(moduleNames ...string) {
+ m.assertNoForgottenModules("SetOrderEndBlockers", moduleNames,
+ func(moduleName string)
+
+bool {
+ module := m.Modules[moduleName]
+ if _, hasEndBlock := module.(appmodule.HasEndBlocker); hasEndBlock {
+ return !hasEndBlock
+}
+
+ _, hasABCIEndBlock := module.(HasABCIEndBlock)
+
+return !hasABCIEndBlock
+})
+
+m.OrderEndBlockers = moduleNames
+}
+
+// SetOrderPrepareCheckStaters sets the order of set prepare-check-stater calls
+func (m *Manager)
+
+SetOrderPrepareCheckStaters(moduleNames ...string) {
+ m.assertNoForgottenModules("SetOrderPrepareCheckStaters", moduleNames,
+ func(moduleName string)
+
+bool {
+ module := m.Modules[moduleName]
+ _, hasPrepareCheckState := module.(appmodule.HasPrepareCheckState)
+
+return !hasPrepareCheckState
+})
+
+m.OrderPrepareCheckStaters = moduleNames
+}
+
+// SetOrderPrecommiters sets the order of set precommiter calls
+func (m *Manager)
+
+SetOrderPrecommiters(moduleNames ...string) {
+ m.assertNoForgottenModules("SetOrderPrecommiters", moduleNames,
+ func(moduleName string)
+
+bool {
+ module := m.Modules[moduleName]
+ _, hasPrecommit := module.(appmodule.HasPrecommit)
+
+return !hasPrecommit
+})
+
+m.OrderPrecommiters = moduleNames
+}
+
+// SetOrderMigrations sets the order of migrations to be run. If not set
+// then migrations will be run with an order defined in `DefaultMigrationsOrder`.
+func (m *Manager)
+
+SetOrderMigrations(moduleNames ...string) {
+ m.assertNoForgottenModules("SetOrderMigrations", moduleNames, nil)
+
+m.OrderMigrations = moduleNames
+}
+
+// RegisterInvariants registers all module invariants
+//
+// Deprecated: this function is a no-op and will be removed in the next release of the Cosmos SDK.
+func (m *Manager)
+
+RegisterInvariants(_ sdk.InvariantRegistry) {
+}
+
+// RegisterServices registers all module services
+func (m *Manager)
+
+RegisterServices(cfg Configurator)
+
+error {
+ for _, module := range m.Modules {
+ if module, ok := module.(HasServices); ok {
+ module.RegisterServices(cfg)
+}
+ if module, ok := module.(appmodule.HasServices); ok {
+ err := module.RegisterServices(cfg)
+ if err != nil {
+ return err
+}
+
+}
+ if cfg.Error() != nil {
+ return cfg.Error()
+}
+
+}
+
+return nil
+}
+
+// InitGenesis performs init genesis functionality for modules. Exactly one
+// module must return a non-empty validator set update to correctly initialize
+// the chain.
+func (m *Manager)
+
+InitGenesis(ctx sdk.Context, cdc codec.JSONCodec, genesisData map[string]json.RawMessage) (*abci.ResponseInitChain, error) {
+ var validatorUpdates []abci.ValidatorUpdate
+ ctx.Logger().Info("initializing blockchain state from genesis.json")
+ for _, moduleName := range m.OrderInitGenesis {
+ if genesisData[moduleName] == nil {
+ continue
+}
+ mod := m.Modules[moduleName]
+ // we might get an adapted module, a native core API module or a legacy module
+ if module, ok := mod.(appmodule.HasGenesis); ok {
+ ctx.Logger().Debug("running initialization for module", "module", moduleName)
+ // core API genesis
+ source, err := genesis.SourceFromRawJSON(genesisData[moduleName])
+ if err != nil {
+ return &abci.ResponseInitChain{
+}, err
+}
+
+err = module.InitGenesis(ctx, source)
+ if err != nil {
+ return &abci.ResponseInitChain{
+}, err
+}
+
+}
+
+else if module, ok := mod.(HasGenesis); ok {
+ ctx.Logger().Debug("running initialization for module", "module", moduleName)
+
+module.InitGenesis(ctx, cdc, genesisData[moduleName])
+}
+
+else if module, ok := mod.(HasABCIGenesis); ok {
+ ctx.Logger().Debug("running initialization for module", "module", moduleName)
+ moduleValUpdates := module.InitGenesis(ctx, cdc, genesisData[moduleName])
+
+ // use these validator updates if provided, the module manager assumes
+ // only one module will update the validator set
+ if len(moduleValUpdates) > 0 {
+ if len(validatorUpdates) > 0 {
+ return &abci.ResponseInitChain{
+}, errors.New("validator InitGenesis updates already set by a previous module")
+}
+
+validatorUpdates = moduleValUpdates
+}
+
+}
+
+}
+
+ // a chain must initialize with a non-empty validator set
+ if len(validatorUpdates) == 0 {
+ return &abci.ResponseInitChain{
+}, fmt.Errorf("validator set is empty after InitGenesis, please ensure at least one validator is initialized with a delegation greater than or equal to the DefaultPowerReduction (%d)", sdk.DefaultPowerReduction)
+}
+
+return &abci.ResponseInitChain{
+ Validators: validatorUpdates,
+}, nil
+}
+
+// ExportGenesis performs export genesis functionality for modules
+func (m *Manager)
+
+ExportGenesis(ctx sdk.Context, cdc codec.JSONCodec) (map[string]json.RawMessage, error) {
+ return m.ExportGenesisForModules(ctx, cdc, []string{
+})
+}
+
+// ExportGenesisForModules performs export genesis functionality for modules
+func (m *Manager)
+
+ExportGenesisForModules(ctx sdk.Context, cdc codec.JSONCodec, modulesToExport []string) (map[string]json.RawMessage, error) {
+ if len(modulesToExport) == 0 {
+ modulesToExport = m.OrderExportGenesis
+}
+ // verify modules exists in app, so that we don't panic in the middle of an export
+ if err := m.checkModulesExists(modulesToExport); err != nil {
+ return nil, err
+}
+
+type genesisResult struct {
+ bz json.RawMessage
+ err error
+}
+ channels := make(map[string]chan genesisResult)
+ for _, moduleName := range modulesToExport {
+ mod := m.Modules[moduleName]
+ if module, ok := mod.(appmodule.HasGenesis); ok {
+ // core API genesis
+ channels[moduleName] = make(chan genesisResult)
+
+go func(module appmodule.HasGenesis, ch chan genesisResult) {
+ ctx := ctx.WithGasMeter(storetypes.NewInfiniteGasMeter()) // avoid race conditions
+ target := genesis.RawJSONTarget{
+}
+ err := module.ExportGenesis(ctx, target.Target())
+ if err != nil {
+ ch <- genesisResult{
+ nil, err
+}
+
+return
+}
+
+rawJSON, err := target.JSON()
+ if err != nil {
+ ch <- genesisResult{
+ nil, err
+}
+
+return
+}
+
+ch <- genesisResult{
+ rawJSON, nil
+}
+
+}(module, channels[moduleName])
+}
+
+else if module, ok := mod.(HasGenesis); ok {
+ channels[moduleName] = make(chan genesisResult)
+
+go func(module HasGenesis, ch chan genesisResult) {
+ ctx := ctx.WithGasMeter(storetypes.NewInfiniteGasMeter()) // avoid race conditions
+ ch <- genesisResult{
+ module.ExportGenesis(ctx, cdc), nil
+}
+
+}(module, channels[moduleName])
+}
+
+else if module, ok := mod.(HasABCIGenesis); ok {
+ channels[moduleName] = make(chan genesisResult)
+
+go func(module HasABCIGenesis, ch chan genesisResult) {
+ ctx := ctx.WithGasMeter(storetypes.NewInfiniteGasMeter()) // avoid race conditions
+ ch <- genesisResult{
+ module.ExportGenesis(ctx, cdc), nil
+}
+
+}(module, channels[moduleName])
+}
+
+}
+ genesisData := make(map[string]json.RawMessage)
+ for moduleName := range channels {
+ res := <-channels[moduleName]
+ if res.err != nil {
+ return nil, fmt.Errorf("genesis export error in %s: %w", moduleName, res.err)
+}
+
+genesisData[moduleName] = res.bz
+}
+
+return genesisData, nil
+}
+
+// checkModulesExists verifies that all modules in the list exist in the app
+func (m *Manager)
+
+checkModulesExists(moduleName []string)
+
+error {
+ for _, name := range moduleName {
+ if _, ok := m.Modules[name]; !ok {
+ return fmt.Errorf("module %s does not exist", name)
+}
+
+}
+
+return nil
+}
+
+// assertNoForgottenModules checks that we didn't forget any modules in the SetOrder* functions.
+// `pass` is a closure which allows one to omit modules from `moduleNames`.
+// If you provide non-nil `pass` and it returns true, the module would not be subject of the assertion.
+func (m *Manager)
+
+assertNoForgottenModules(setOrderFnName string, moduleNames []string, pass func(moduleName string)
+
+bool) {
+ ms := make(map[string]bool)
+ for _, m := range moduleNames {
+ ms[m] = true
+}
+
+var missing []string
+ for m := range m.Modules {
+ if pass != nil && pass(m) {
+ continue
+}
+ if !ms[m] {
+ missing = append(missing, m)
+}
+
+}
+ if len(missing) != 0 {
+ sort.Strings(missing)
+
+panic(fmt.Sprintf(
+ "all modules must be defined when setting %s, missing: %v", setOrderFnName, missing))
+}
+}
+
+// MigrationHandler is the migration function that each module registers.
+type MigrationHandler func(sdk.Context)
+
+error
+
+// VersionMap is a map of moduleName -> version
+type VersionMap map[string]uint64
+
+// RunMigrations performs in-place store migrations for all modules. This
+// function MUST be called insde an x/upgrade UpgradeHandler.
+//
+// Recall that in an upgrade handler, the `fromVM` VersionMap is retrieved from
+// x/upgrade's store, and the function needs to return the target VersionMap
+// that will in turn be persisted to the x/upgrade's store. In general,
+// returning RunMigrations should be enough:
+//
+// Example:
+//
+// cfg := module.NewConfigurator(...)
+// app.UpgradeKeeper.SetUpgradeHandler("my-plan", func(ctx context.Context, plan upgradetypes.Plan, fromVM module.VersionMap) (module.VersionMap, error) {
+// return app.mm.RunMigrations(ctx, cfg, fromVM)
+//
+})
+//
+// Internally, RunMigrations will perform the following steps:
+// - create an `updatedVM` VersionMap of module with their latest ConsensusVersion
+// - make a diff of `fromVM` and `udpatedVM`, and for each module:
+// - if the module's `fromVM` version is less than its `updatedVM` version,
+// then run in-place store migrations for that module between those versions.
+// - if the module does not exist in the `fromVM` (which means that it's a new module,
+// because it was not in the previous x/upgrade's store), then run
+// `InitGenesis` on that module.
+//
+// - return the `updatedVM` to be persisted in the x/upgrade's store.
+//
+// Migrations are run in an order defined by `Manager.OrderMigrations` or (if not set)
+
+defined by
+// `DefaultMigrationsOrder` function.
+//
+// As an app developer, if you wish to skip running InitGenesis for your new
+// module "foo", you need to manually pass a `fromVM` argument to this function
+// foo's module version set to its latest ConsensusVersion. That way, the diff
+// between the function's `fromVM` and `udpatedVM` will be empty, hence not
+// running anything for foo.
+//
+// Example:
+//
+// cfg := module.NewConfigurator(...)
+// app.UpgradeKeeper.SetUpgradeHandler("my-plan", func(ctx context.Context, plan upgradetypes.Plan, fromVM module.VersionMap) (module.VersionMap, error) {
+// // Assume "foo" is a new module.
+// // `fromVM` is fetched from existing x/upgrade store. Since foo didn't exist
+// // before this upgrade, `v, exists := fromVM["foo"]; exists == false`, and RunMigration will by default
+// // run InitGenesis on foo.
+// // To skip running foo's InitGenesis, you need set `fromVM`'s foo to its latest
+// // consensus version:
+// fromVM["foo"] = foo.AppModule{
+}.ConsensusVersion()
+//
+// return app.mm.RunMigrations(ctx, cfg, fromVM)
+//
+})
+//
+// Please also refer to /sdk/v0.53/build/migrations/upgrading for more information.
+func (m Manager)
+
+RunMigrations(ctx context.Context, cfg Configurator, fromVM VersionMap) (VersionMap, error) {
+ c, ok := cfg.(*configurator)
+ if !ok {
+ return nil, errorsmod.Wrapf(sdkerrors.ErrInvalidType, "expected %T, got %T", &configurator{
+}, cfg)
+}
+ modules := m.OrderMigrations
+ if modules == nil {
+ modules = DefaultMigrationsOrder(m.ModuleNames())
+}
+ sdkCtx := sdk.UnwrapSDKContext(ctx)
+ updatedVM := VersionMap{
+}
+ for _, moduleName := range modules {
+ module := m.Modules[moduleName]
+ fromVersion, exists := fromVM[moduleName]
+ toVersion := uint64(0)
+ if module, ok := module.(HasConsensusVersion); ok {
+ toVersion = module.ConsensusVersion()
+}
+
+ // We run migration if the module is specified in `fromVM`.
+ // Otherwise we run InitGenesis.
+ //
+ // The module won't exist in the fromVM in two cases:
+ // 1. A new module is added. In this case we run InitGenesis with an
+ // empty genesis state.
+ // 2. An existing chain is upgrading from version < 0.43 to v0.43+ for the first time.
+ // In this case, all modules have yet to be added to x/upgrade's VersionMap store.
+ if exists {
+ err := c.runModuleMigrations(sdkCtx, moduleName, fromVersion, toVersion)
+ if err != nil {
+ return nil, err
+}
+
+}
+
+else {
+ sdkCtx.Logger().Info(fmt.Sprintf("adding a new module: %s", moduleName))
+ if module, ok := m.Modules[moduleName].(HasGenesis); ok {
+ module.InitGenesis(sdkCtx, c.cdc, module.DefaultGenesis(c.cdc))
+}
+ if module, ok := m.Modules[moduleName].(HasABCIGenesis); ok {
+ moduleValUpdates := module.InitGenesis(sdkCtx, c.cdc, module.DefaultGenesis(c.cdc))
+ // The module manager assumes only one module will update the
+ // validator set, and it can't be a new module.
+ if len(moduleValUpdates) > 0 {
+ return nil, errorsmod.Wrapf(sdkerrors.ErrLogic, "validator InitGenesis update is already set by another module")
+}
+
+}
+
+}
+
+updatedVM[moduleName] = toVersion
+}
+
+return updatedVM, nil
+}
+
+// PreBlock performs begin block functionality for upgrade module.
+// It takes the current context as a parameter and returns a boolean value
+// indicating whether the migration was successfully executed or not.
+func (m *Manager)
+
+PreBlock(ctx sdk.Context) (*sdk.ResponsePreBlock, error) {
+ paramsChanged := false
+ for _, moduleName := range m.OrderPreBlockers {
+ if module, ok := m.Modules[moduleName].(appmodule.HasPreBlocker); ok {
+ rsp, err := module.PreBlock(ctx)
+ if err != nil {
+ return nil, err
+}
+ if rsp.IsConsensusParamsChanged() {
+ paramsChanged = true
+}
+
+}
+
+}
+
+return &sdk.ResponsePreBlock{
+ ConsensusParamsChanged: paramsChanged,
+}, nil
+}
+
+// BeginBlock performs begin block functionality for all modules. It creates a
+// child context with an event manager to aggregate events emitted from all
+// modules.
+func (m *Manager)
+
+BeginBlock(ctx sdk.Context) (sdk.BeginBlock, error) {
+ ctx = ctx.WithEventManager(sdk.NewEventManager())
+ for _, moduleName := range m.OrderBeginBlockers {
+ if module, ok := m.Modules[moduleName].(appmodule.HasBeginBlocker); ok {
+ if err := module.BeginBlock(ctx); err != nil {
+ return sdk.BeginBlock{
+}, err
+}
+
+}
+
+}
+
+return sdk.BeginBlock{
+ Events: ctx.EventManager().ABCIEvents(),
+}, nil
+}
+
+// EndBlock performs end block functionality for all modules. It creates a
+// child context with an event manager to aggregate events emitted from all
+// modules.
+func (m *Manager)
+
+EndBlock(ctx sdk.Context) (sdk.EndBlock, error) {
+ ctx = ctx.WithEventManager(sdk.NewEventManager())
+ validatorUpdates := []abci.ValidatorUpdate{
+}
+ for _, moduleName := range m.OrderEndBlockers {
+ if module, ok := m.Modules[moduleName].(appmodule.HasEndBlocker); ok {
+ err := module.EndBlock(ctx)
+ if err != nil {
+ return sdk.EndBlock{
+}, err
+}
+
+}
+
+else if module, ok := m.Modules[moduleName].(HasABCIEndBlock); ok {
+ moduleValUpdates, err := module.EndBlock(ctx)
+ if err != nil {
+ return sdk.EndBlock{
+}, err
+}
+ // use these validator updates if provided, the module manager assumes
+ // only one module will update the validator set
+ if len(moduleValUpdates) > 0 {
+ if len(validatorUpdates) > 0 {
+ return sdk.EndBlock{
+}, errors.New("validator EndBlock updates already set by a previous module")
+}
+ for _, updates := range moduleValUpdates {
+ validatorUpdates = append(validatorUpdates, abci.ValidatorUpdate{
+ PubKey: updates.PubKey,
+ Power: updates.Power
+})
+}
+
+}
+
+}
+
+else {
+ continue
+}
+
+}
+
+return sdk.EndBlock{
+ ValidatorUpdates: validatorUpdates,
+ Events: ctx.EventManager().ABCIEvents(),
+}, nil
+}
+
+// Precommit performs precommit functionality for all modules.
+func (m *Manager)
+
+Precommit(ctx sdk.Context)
+
+error {
+ for _, moduleName := range m.OrderPrecommiters {
+ module, ok := m.Modules[moduleName].(appmodule.HasPrecommit)
+ if !ok {
+ continue
+}
+ if err := module.Precommit(ctx); err != nil {
+ return err
+}
+
+}
+
+return nil
+}
+
+// PrepareCheckState performs functionality for preparing the check state for all modules.
+func (m *Manager)
+
+PrepareCheckState(ctx sdk.Context)
+
+error {
+ for _, moduleName := range m.OrderPrepareCheckStaters {
+ module, ok := m.Modules[moduleName].(appmodule.HasPrepareCheckState)
+ if !ok {
+ continue
+}
+ if err := module.PrepareCheckState(ctx); err != nil {
+ return err
+}
+
+}
+
+return nil
+}
+
+// GetVersionMap gets consensus version from all modules
+func (m *Manager)
+
+GetVersionMap()
+
+VersionMap {
+ vermap := make(VersionMap)
+ for name, v := range m.Modules {
+ version := uint64(0)
+ if v, ok := v.(HasConsensusVersion); ok {
+ version = v.ConsensusVersion()
+}
+
+vermap[name] = version
+}
+
+return vermap
+}
+
+// ModuleNames returns list of all module names, without any particular order.
+func (m *Manager)
+
+ModuleNames() []string {
+ return slices.Collect(maps.Keys(m.Modules))
+}
+
+// DefaultMigrationsOrder returns a default migrations order: ascending alphabetical by module name,
+// except x/auth which will run last, see:
+// https://github.com/cosmos/cosmos-sdk/issues/10591
+func DefaultMigrationsOrder(modules []string) []string {
+ const authName = "auth"
+ out := make([]string, 0, len(modules))
+ hasAuth := false
+ for _, m := range modules {
+ if m == authName {
+ hasAuth = true
+}
+
+else {
+ out = append(out, m)
+}
+
+}
+
+sort.Strings(out)
+ if hasAuth {
+ out = append(out, authName)
+}
+
+return out
+}
+```
+
+* `ConsensusVersion() uint64`: Returns the consensus version of the module.
+
+### `HasPreBlocker`
+
+The `HasPreBlocker` is an extension interface from `appmodule.AppModule`. All modules that have an `PreBlock` method implement this interface.
+
+### `HasBeginBlocker`
+
+The `HasBeginBlocker` is an extension interface from `appmodule.AppModule`. All modules that have an `BeginBlock` method implement this interface.
+
+```go expandable
+package appmodule
+
+import (
+
+ "context"
+ "google.golang.org/grpc"
+ "cosmossdk.io/depinject"
+)
+
+// AppModule is a tag interface for app module implementations to use as a basis
+// for extension interfaces. It provides no functionality itself, but is the
+// type that all valid app modules should provide so that they can be identified
+// by other modules (usually via depinject)
+
+as app modules.
+type AppModule interface {
+ depinject.OnePerModuleType
+
+ // IsAppModule is a dummy method to tag a struct as implementing an AppModule.
+ IsAppModule()
+}
+
+// HasServices is the extension interface that modules should implement to register
+// implementations of services defined in .proto files.
+type HasServices interface {
+ AppModule
+
+ // RegisterServices registers the module's services with the app's service
+ // registrar.
+ //
+ // Two types of services are currently supported:
+ // - read-only gRPC query services, which are the default.
+ // - transaction message services, which must have the protobuf service
+ // option "cosmos.msg.v1.service" (defined in "cosmos/msg/v1/service.proto")
+ // set to true.
+ //
+ // The service registrar will figure out which type of service you are
+ // implementing based on the presence (or absence)
+
+of protobuf options. You
+ // do not need to specify this in golang code.
+ RegisterServices(grpc.ServiceRegistrar)
+
+error
+}
+
+// HasPrepareCheckState is an extension interface that contains information about the AppModule
+// and PrepareCheckState.
+type HasPrepareCheckState interface {
+ AppModule
+ PrepareCheckState(context.Context)
+
+error
+}
+
+// HasPrecommit is an extension interface that contains information about the AppModule and Precommit.
+type HasPrecommit interface {
+ AppModule
+ Precommit(context.Context)
+
+error
+}
+
+// ResponsePreBlock represents the response from the PreBlock method.
+// It can modify consensus parameters in storage and signal the caller through the return value.
+// When it returns ConsensusParamsChanged=true, the caller must refresh the consensus parameter in the finalize context.
+// The new context (ctx)
+
+must be passed to all the other lifecycle methods.
+type ResponsePreBlock interface {
+ IsConsensusParamsChanged()
+
+bool
+}
+
+// HasPreBlocker is the extension interface that modules should implement to run
+// custom logic before BeginBlock.
+type HasPreBlocker interface {
+ AppModule
+ // PreBlock is method that will be run before BeginBlock.
+ PreBlock(context.Context) (ResponsePreBlock, error)
+}
+
+// HasBeginBlocker is the extension interface that modules should implement to run
+// custom logic before transaction processing in a block.
+type HasBeginBlocker interface {
+ AppModule
+
+ // BeginBlock is a method that will be run before transactions are processed in
+ // a block.
+ BeginBlock(context.Context)
+
+error
+}
+
+// HasEndBlocker is the extension interface that modules should implement to run
+// custom logic after transaction processing in a block.
+type HasEndBlocker interface {
+ AppModule
+
+ // EndBlock is a method that will be run after transactions are processed in
+ // a block.
+ EndBlock(context.Context)
+
+error
+}
+
+// UpgradeModule is the extension interface that upgrade module should implement to differentiate
+// it from other modules, migration handler need ensure the upgrade module's migration is executed
+// before the rest of the modules.
+type UpgradeModule interface {
+ IsUpgradeModule()
+}
+```
+
+* `BeginBlock(context.Context) error`: This method gives module developers the option to implement logic that is automatically triggered at the beginning of each block.
+
+### `HasEndBlocker`
+
+The `HasEndBlocker` is an extension interface from `appmodule.AppModule`. All modules that have an `EndBlock` method implement this interface. If a module need to return validator set updates (staking), they can use `HasABCIEndBlock`
+
+```go expandable
+package appmodule
+
+import (
+
+ "context"
+ "google.golang.org/grpc"
+ "cosmossdk.io/depinject"
+)
+
+// AppModule is a tag interface for app module implementations to use as a basis
+// for extension interfaces. It provides no functionality itself, but is the
+// type that all valid app modules should provide so that they can be identified
+// by other modules (usually via depinject)
+
+as app modules.
+type AppModule interface {
+ depinject.OnePerModuleType
+
+ // IsAppModule is a dummy method to tag a struct as implementing an AppModule.
+ IsAppModule()
+}
+
+// HasServices is the extension interface that modules should implement to register
+// implementations of services defined in .proto files.
+type HasServices interface {
+ AppModule
+
+ // RegisterServices registers the module's services with the app's service
+ // registrar.
+ //
+ // Two types of services are currently supported:
+ // - read-only gRPC query services, which are the default.
+ // - transaction message services, which must have the protobuf service
+ // option "cosmos.msg.v1.service" (defined in "cosmos/msg/v1/service.proto")
+ // set to true.
+ //
+ // The service registrar will figure out which type of service you are
+ // implementing based on the presence (or absence)
+
+of protobuf options. You
+ // do not need to specify this in golang code.
+ RegisterServices(grpc.ServiceRegistrar)
+
+error
+}
+
+// HasPrepareCheckState is an extension interface that contains information about the AppModule
+// and PrepareCheckState.
+type HasPrepareCheckState interface {
+ AppModule
+ PrepareCheckState(context.Context)
+
+error
+}
+
+// HasPrecommit is an extension interface that contains information about the AppModule and Precommit.
+type HasPrecommit interface {
+ AppModule
+ Precommit(context.Context)
+
+error
+}
+
+// ResponsePreBlock represents the response from the PreBlock method.
+// It can modify consensus parameters in storage and signal the caller through the return value.
+// When it returns ConsensusParamsChanged=true, the caller must refresh the consensus parameter in the finalize context.
+// The new context (ctx)
+
+must be passed to all the other lifecycle methods.
+type ResponsePreBlock interface {
+ IsConsensusParamsChanged()
+
+bool
+}
+
+// HasPreBlocker is the extension interface that modules should implement to run
+// custom logic before BeginBlock.
+type HasPreBlocker interface {
+ AppModule
+ // PreBlock is method that will be run before BeginBlock.
+ PreBlock(context.Context) (ResponsePreBlock, error)
+}
+
+// HasBeginBlocker is the extension interface that modules should implement to run
+// custom logic before transaction processing in a block.
+type HasBeginBlocker interface {
+ AppModule
+
+ // BeginBlock is a method that will be run before transactions are processed in
+ // a block.
+ BeginBlock(context.Context)
+
+error
+}
+
+// HasEndBlocker is the extension interface that modules should implement to run
+// custom logic after transaction processing in a block.
+type HasEndBlocker interface {
+ AppModule
+
+ // EndBlock is a method that will be run after transactions are processed in
+ // a block.
+ EndBlock(context.Context)
+
+error
+}
+
+// UpgradeModule is the extension interface that upgrade module should implement to differentiate
+// it from other modules, migration handler need ensure the upgrade module's migration is executed
+// before the rest of the modules.
+type UpgradeModule interface {
+ IsUpgradeModule()
+}
+```
+
+* `EndBlock(context.Context) error`: This method gives module developers the option to implement logic that is automatically triggered at the end of each block.
+
+### `HasABCIEndBlock`
+
+The `HasABCIEndBlock` is an extension interface from `module.AppModule`. All modules that have an `EndBlock` which return validator set updates implement this interface.
+
+```go expandable
+/*
+Package module contains application module patterns and associated "manager" functionality.
+The module pattern has been broken down by:
+ - independent module functionality (AppModuleBasic)
+ - inter-dependent module simulation functionality (AppModuleSimulation)
+ - inter-dependent module full functionality (AppModule)
+
+inter-dependent module functionality is module functionality which somehow
+depends on other modules, typically through the module keeper. Many of the
+module keepers are dependent on each other, thus in order to access the full
+set of module functionality we need to define all the keepers/params-store/keys
+etc. This full set of advanced functionality is defined by the AppModule interface.
+
+Independent module functions are separated to allow for the construction of the
+basic application structures required early on in the application definition
+and used to enable the definition of full module functionality later in the
+process. This separation is necessary, however we still want to allow for a
+high level pattern for modules to follow - for instance, such that we don't
+have to manually register all of the codecs for all the modules. This basic
+procedure as well as other basic patterns are handled through the use of
+BasicManager.
+
+Lastly the interface for genesis functionality (HasGenesis & HasABCIGenesis)
+
+has been
+separated out from full module functionality (AppModule)
+
+so that modules which
+are only used for genesis can take advantage of the Module patterns without
+needlessly defining many placeholder functions
+*/
+package module
+
+import (
+
+ "context"
+ "encoding/json"
+ "errors"
+ "fmt"
+ "maps"
+ "slices"
+ "sort"
+
+ abci "github.com/cometbft/cometbft/abci/types"
+ "github.com/grpc-ecosystem/grpc-gateway/runtime"
+ "github.com/spf13/cobra"
+ "cosmossdk.io/core/appmodule"
+ "cosmossdk.io/core/genesis"
+ errorsmod "cosmossdk.io/errors"
+ storetypes "cosmossdk.io/store/types"
+ "github.com/cosmos/cosmos-sdk/client"
+ "github.com/cosmos/cosmos-sdk/codec"
+ "github.com/cosmos/cosmos-sdk/codec/types"
+ sdk "github.com/cosmos/cosmos-sdk/types"
+ sdkerrors "github.com/cosmos/cosmos-sdk/types/errors"
+)
+
+// AppModuleBasic is the standard form for basic non-dependant elements of an application module.
+type AppModuleBasic interface {
+ HasName
+ RegisterLegacyAminoCodec(*codec.LegacyAmino)
+
+RegisterInterfaces(types.InterfaceRegistry)
+
+RegisterGRPCGatewayRoutes(client.Context, *runtime.ServeMux)
+}
+
+// HasName allows the module to provide its own name for legacy purposes.
+// Newer apps should specify the name for their modules using a map
+// using NewManagerFromMap.
+type HasName interface {
+ Name()
+
+string
+}
+
+// HasGenesisBasics is the legacy interface for stateless genesis methods.
+type HasGenesisBasics interface {
+ DefaultGenesis(codec.JSONCodec)
+
+json.RawMessage
+ ValidateGenesis(codec.JSONCodec, client.TxEncodingConfig, json.RawMessage)
+
+error
+}
+
+// BasicManager is a collection of AppModuleBasic
+type BasicManager map[string]AppModuleBasic
+
+// NewBasicManager creates a new BasicManager object
+func NewBasicManager(modules ...AppModuleBasic)
+
+BasicManager {
+ moduleMap := make(map[string]AppModuleBasic)
+ for _, module := range modules {
+ moduleMap[module.Name()] = module
+}
+
+return moduleMap
+}
+
+// NewBasicManagerFromManager creates a new BasicManager from a Manager
+// The BasicManager will contain all AppModuleBasic from the AppModule Manager
+// Module's AppModuleBasic can be overridden by passing a custom AppModuleBasic map
+func NewBasicManagerFromManager(manager *Manager, customModuleBasics map[string]AppModuleBasic)
+
+BasicManager {
+ moduleMap := make(map[string]AppModuleBasic)
+ for name, module := range manager.Modules {
+ if customBasicMod, ok := customModuleBasics[name]; ok {
+ moduleMap[name] = customBasicMod
+ continue
+}
+ if appModule, ok := module.(appmodule.AppModule); ok {
+ moduleMap[name] = CoreAppModuleBasicAdaptor(name, appModule)
+
+continue
+}
+ if basicMod, ok := module.(AppModuleBasic); ok {
+ moduleMap[name] = basicMod
+}
+
+}
+
+return moduleMap
+}
+
+// RegisterLegacyAminoCodec registers all module codecs
+func (bm BasicManager)
+
+RegisterLegacyAminoCodec(cdc *codec.LegacyAmino) {
+ for _, b := range bm {
+ b.RegisterLegacyAminoCodec(cdc)
+}
+}
+
+// RegisterInterfaces registers all module interface types
+func (bm BasicManager)
+
+RegisterInterfaces(registry types.InterfaceRegistry) {
+ for _, m := range bm {
+ m.RegisterInterfaces(registry)
+}
+}
+
+// DefaultGenesis provides default genesis information for all modules
+func (bm BasicManager)
+
+DefaultGenesis(cdc codec.JSONCodec)
+
+map[string]json.RawMessage {
+ genesisData := make(map[string]json.RawMessage)
+ for _, b := range bm {
+ if mod, ok := b.(HasGenesisBasics); ok {
+ genesisData[b.Name()] = mod.DefaultGenesis(cdc)
+}
+
+}
+
+return genesisData
+}
+
+// ValidateGenesis performs genesis state validation for all modules
+func (bm BasicManager)
+
+ValidateGenesis(cdc codec.JSONCodec, txEncCfg client.TxEncodingConfig, genesisData map[string]json.RawMessage)
+
+error {
+ for _, b := range bm {
+ // first check if the module is an adapted Core API Module
+ if mod, ok := b.(HasGenesisBasics); ok {
+ if err := mod.ValidateGenesis(cdc, txEncCfg, genesisData[b.Name()]); err != nil {
+ return err
+}
+
+}
+
+}
+
+return nil
+}
+
+// RegisterGRPCGatewayRoutes registers all module rest routes
+func (bm BasicManager)
+
+RegisterGRPCGatewayRoutes(clientCtx client.Context, rtr *runtime.ServeMux) {
+ for _, b := range bm {
+ b.RegisterGRPCGatewayRoutes(clientCtx, rtr)
+}
+}
+
+// AddTxCommands adds all tx commands to the rootTxCmd.
+func (bm BasicManager)
+
+AddTxCommands(rootTxCmd *cobra.Command) {
+ for _, b := range bm {
+ if mod, ok := b.(interface {
+ GetTxCmd() *cobra.Command
+}); ok {
+ if cmd := mod.GetTxCmd(); cmd != nil {
+ rootTxCmd.AddCommand(cmd)
+}
+
+}
+
+}
+}
+
+// AddQueryCommands adds all query commands to the rootQueryCmd.
+func (bm BasicManager)
+
+AddQueryCommands(rootQueryCmd *cobra.Command) {
+ for _, b := range bm {
+ if mod, ok := b.(interface {
+ GetQueryCmd() *cobra.Command
+}); ok {
+ if cmd := mod.GetQueryCmd(); cmd != nil {
+ rootQueryCmd.AddCommand(cmd)
+}
+
+}
+
+}
+}
+
+// HasGenesis is the extension interface for stateful genesis methods.
+type HasGenesis interface {
+ HasGenesisBasics
+ InitGenesis(sdk.Context, codec.JSONCodec, json.RawMessage)
+
+ExportGenesis(sdk.Context, codec.JSONCodec)
+
+json.RawMessage
+}
+
+// HasABCIGenesis is the extension interface for stateful genesis methods which returns validator updates.
+type HasABCIGenesis interface {
+ HasGenesisBasics
+ InitGenesis(sdk.Context, codec.JSONCodec, json.RawMessage) []abci.ValidatorUpdate
+ ExportGenesis(sdk.Context, codec.JSONCodec)
+
+json.RawMessage
+}
+
+// AppModule is the form for an application module. Most of
+// its functionality has been moved to extension interfaces.
+// Deprecated: use appmodule.AppModule with a combination of extension interfaes interfaces instead.
+type AppModule interface {
+ appmodule.AppModule
+
+ AppModuleBasic
+}
+
+// HasInvariants is the interface for registering invariants.
+//
+// Deprecated: this will be removed in the next Cosmos SDK release.
+type HasInvariants interface {
+ // RegisterInvariants registers module invariants.
+ RegisterInvariants(sdk.InvariantRegistry)
+}
+
+// HasServices is the interface for modules to register services.
+type HasServices interface {
+ // RegisterServices allows a module to register services.
+ RegisterServices(Configurator)
+}
+
+// HasConsensusVersion is the interface for declaring a module consensus version.
+type HasConsensusVersion interface {
+ // ConsensusVersion is a sequence number for state-breaking change of the
+ // module. It should be incremented on each consensus-breaking change
+ // introduced by the module. To avoid wrong/empty versions, the initial version
+ // should be set to 1.
+ ConsensusVersion()
+
+uint64
+}
+
+// HasABCIEndblock is a released typo of HasABCIEndBlock.
+// Deprecated: use HasABCIEndBlock instead.
+type HasABCIEndblock HasABCIEndBlock
+
+// HasABCIEndBlock is the interface for modules that need to run code at the end of the block.
+type HasABCIEndBlock interface {
+ AppModule
+ EndBlock(context.Context) ([]abci.ValidatorUpdate, error)
+}
+
+var (
+ _ appmodule.AppModule = (*GenesisOnlyAppModule)(nil)
+ _ AppModuleBasic = (*GenesisOnlyAppModule)(nil)
+)
+
+// AppModuleGenesis is the standard form for an application module genesis functions
+type AppModuleGenesis interface {
+ AppModuleBasic
+ HasABCIGenesis
+}
+
+// GenesisOnlyAppModule is an AppModule that only has import/export functionality
+type GenesisOnlyAppModule struct {
+ AppModuleGenesis
+}
+
+// NewGenesisOnlyAppModule creates a new GenesisOnlyAppModule object
+func NewGenesisOnlyAppModule(amg AppModuleGenesis)
+
+GenesisOnlyAppModule {
+ return GenesisOnlyAppModule{
+ AppModuleGenesis: amg,
+}
+}
+
+// IsOnePerModuleType implements the depinject.OnePerModuleType interface.
+func (GenesisOnlyAppModule)
+
+IsOnePerModuleType() {
+}
+
+// IsAppModule implements the appmodule.AppModule interface.
+func (GenesisOnlyAppModule)
+
+IsAppModule() {
+}
+
+// RegisterInvariants is a placeholder function register no invariants
+func (GenesisOnlyAppModule)
+
+RegisterInvariants(_ sdk.InvariantRegistry) {
+}
+
+// ConsensusVersion implements AppModule/ConsensusVersion.
+func (gam GenesisOnlyAppModule)
+
+ConsensusVersion()
+
+uint64 {
+ return 1
+}
+
+// Manager defines a module manager that provides the high level utility for managing and executing
+// operations for a group of modules
+type Manager struct {
+ Modules map[string]any // interface{
+}
+
+is used now to support the legacy AppModule as well as new core appmodule.AppModule.
+ OrderInitGenesis []string
+ OrderExportGenesis []string
+ OrderPreBlockers []string
+ OrderBeginBlockers []string
+ OrderEndBlockers []string
+ OrderPrepareCheckStaters []string
+ OrderPrecommiters []string
+ OrderMigrations []string
+}
+
+// NewManager creates a new Manager object.
+func NewManager(modules ...AppModule) *Manager {
+ moduleMap := make(map[string]any)
+ modulesStr := make([]string, 0, len(modules))
+ preBlockModulesStr := make([]string, 0)
+ for _, module := range modules {
+ if _, ok := module.(appmodule.AppModule); !ok {
+ panic(fmt.Sprintf("module %s does not implement appmodule.AppModule", module.Name()))
+}
+
+moduleMap[module.Name()] = module
+ modulesStr = append(modulesStr, module.Name())
+ if _, ok := module.(appmodule.HasPreBlocker); ok {
+ preBlockModulesStr = append(preBlockModulesStr, module.Name())
+}
+
+}
+
+return &Manager{
+ Modules: moduleMap,
+ OrderInitGenesis: modulesStr,
+ OrderExportGenesis: modulesStr,
+ OrderPreBlockers: preBlockModulesStr,
+ OrderBeginBlockers: modulesStr,
+ OrderPrepareCheckStaters: modulesStr,
+ OrderPrecommiters: modulesStr,
+ OrderEndBlockers: modulesStr,
+}
+}
+
+// NewManagerFromMap creates a new Manager object from a map of module names to module implementations.
+// This method should be used for apps and modules which have migrated to the cosmossdk.io/core.appmodule.AppModule API.
+func NewManagerFromMap(moduleMap map[string]appmodule.AppModule) *Manager {
+ simpleModuleMap := make(map[string]any)
+ modulesStr := make([]string, 0, len(simpleModuleMap))
+ preBlockModulesStr := make([]string, 0)
+ for name, module := range moduleMap {
+ simpleModuleMap[name] = module
+ modulesStr = append(modulesStr, name)
+ if _, ok := module.(appmodule.HasPreBlocker); ok {
+ preBlockModulesStr = append(preBlockModulesStr, name)
+}
+
+}
+
+ // Sort the modules by name. Given that we are using a map above we can't guarantee the order.
+ sort.Strings(modulesStr)
+
+return &Manager{
+ Modules: simpleModuleMap,
+ OrderInitGenesis: modulesStr,
+ OrderExportGenesis: modulesStr,
+ OrderPreBlockers: preBlockModulesStr,
+ OrderBeginBlockers: modulesStr,
+ OrderEndBlockers: modulesStr,
+ OrderPrecommiters: modulesStr,
+ OrderPrepareCheckStaters: modulesStr,
+}
+}
+
+// SetOrderInitGenesis sets the order of init genesis calls
+func (m *Manager)
+
+SetOrderInitGenesis(moduleNames ...string) {
+ m.assertNoForgottenModules("SetOrderInitGenesis", moduleNames, func(moduleName string)
+
+bool {
+ module := m.Modules[moduleName]
+ if _, hasGenesis := module.(appmodule.HasGenesis); hasGenesis {
+ return !hasGenesis
+}
+ if _, hasABCIGenesis := module.(HasABCIGenesis); hasABCIGenesis {
+ return !hasABCIGenesis
+}
+
+ _, hasGenesis := module.(HasGenesis)
+
+return !hasGenesis
+})
+
+m.OrderInitGenesis = moduleNames
+}
+
+// SetOrderExportGenesis sets the order of export genesis calls
+func (m *Manager)
+
+SetOrderExportGenesis(moduleNames ...string) {
+ m.assertNoForgottenModules("SetOrderExportGenesis", moduleNames, func(moduleName string)
+
+bool {
+ module := m.Modules[moduleName]
+ if _, hasGenesis := module.(appmodule.HasGenesis); hasGenesis {
+ return !hasGenesis
+}
+ if _, hasABCIGenesis := module.(HasABCIGenesis); hasABCIGenesis {
+ return !hasABCIGenesis
+}
+
+ _, hasGenesis := module.(HasGenesis)
+
+return !hasGenesis
+})
+
+m.OrderExportGenesis = moduleNames
+}
+
+// SetOrderPreBlockers sets the order of set pre-blocker calls
+func (m *Manager)
+
+SetOrderPreBlockers(moduleNames ...string) {
+ m.assertNoForgottenModules("SetOrderPreBlockers", moduleNames,
+ func(moduleName string)
+
+bool {
+ module := m.Modules[moduleName]
+ _, hasBlock := module.(appmodule.HasPreBlocker)
+
+return !hasBlock
+})
+
+m.OrderPreBlockers = moduleNames
+}
+
+// SetOrderBeginBlockers sets the order of set begin-blocker calls
+func (m *Manager)
+
+SetOrderBeginBlockers(moduleNames ...string) {
+ m.assertNoForgottenModules("SetOrderBeginBlockers", moduleNames,
+ func(moduleName string)
+
+bool {
+ module := m.Modules[moduleName]
+ _, hasBeginBlock := module.(appmodule.HasBeginBlocker)
+
+return !hasBeginBlock
+})
+
+m.OrderBeginBlockers = moduleNames
+}
+
+// SetOrderEndBlockers sets the order of set end-blocker calls
+func (m *Manager)
+
+SetOrderEndBlockers(moduleNames ...string) {
+ m.assertNoForgottenModules("SetOrderEndBlockers", moduleNames,
+ func(moduleName string)
+
+bool {
+ module := m.Modules[moduleName]
+ if _, hasEndBlock := module.(appmodule.HasEndBlocker); hasEndBlock {
+ return !hasEndBlock
+}
+
+ _, hasABCIEndBlock := module.(HasABCIEndBlock)
+
+return !hasABCIEndBlock
+})
+
+m.OrderEndBlockers = moduleNames
+}
+
+// SetOrderPrepareCheckStaters sets the order of set prepare-check-stater calls
+func (m *Manager)
+
+SetOrderPrepareCheckStaters(moduleNames ...string) {
+ m.assertNoForgottenModules("SetOrderPrepareCheckStaters", moduleNames,
+ func(moduleName string)
+
+bool {
+ module := m.Modules[moduleName]
+ _, hasPrepareCheckState := module.(appmodule.HasPrepareCheckState)
+
+return !hasPrepareCheckState
+})
+
+m.OrderPrepareCheckStaters = moduleNames
+}
+
+// SetOrderPrecommiters sets the order of set precommiter calls
+func (m *Manager)
+
+SetOrderPrecommiters(moduleNames ...string) {
+ m.assertNoForgottenModules("SetOrderPrecommiters", moduleNames,
+ func(moduleName string)
+
+bool {
+ module := m.Modules[moduleName]
+ _, hasPrecommit := module.(appmodule.HasPrecommit)
+
+return !hasPrecommit
+})
+
+m.OrderPrecommiters = moduleNames
+}
+
+// SetOrderMigrations sets the order of migrations to be run. If not set
+// then migrations will be run with an order defined in `DefaultMigrationsOrder`.
+func (m *Manager)
+
+SetOrderMigrations(moduleNames ...string) {
+ m.assertNoForgottenModules("SetOrderMigrations", moduleNames, nil)
+
+m.OrderMigrations = moduleNames
+}
+
+// RegisterInvariants registers all module invariants
+//
+// Deprecated: this function is a no-op and will be removed in the next release of the Cosmos SDK.
+func (m *Manager)
+
+RegisterInvariants(_ sdk.InvariantRegistry) {
+}
+
+// RegisterServices registers all module services
+func (m *Manager)
+
+RegisterServices(cfg Configurator)
+
+error {
+ for _, module := range m.Modules {
+ if module, ok := module.(HasServices); ok {
+ module.RegisterServices(cfg)
+}
+ if module, ok := module.(appmodule.HasServices); ok {
+ err := module.RegisterServices(cfg)
+ if err != nil {
+ return err
+}
+
+}
+ if cfg.Error() != nil {
+ return cfg.Error()
+}
+
+}
+
+return nil
+}
+
+// InitGenesis performs init genesis functionality for modules. Exactly one
+// module must return a non-empty validator set update to correctly initialize
+// the chain.
+func (m *Manager)
+
+InitGenesis(ctx sdk.Context, cdc codec.JSONCodec, genesisData map[string]json.RawMessage) (*abci.ResponseInitChain, error) {
+ var validatorUpdates []abci.ValidatorUpdate
+ ctx.Logger().Info("initializing blockchain state from genesis.json")
+ for _, moduleName := range m.OrderInitGenesis {
+ if genesisData[moduleName] == nil {
+ continue
+}
+ mod := m.Modules[moduleName]
+ // we might get an adapted module, a native core API module or a legacy module
+ if module, ok := mod.(appmodule.HasGenesis); ok {
+ ctx.Logger().Debug("running initialization for module", "module", moduleName)
+ // core API genesis
+ source, err := genesis.SourceFromRawJSON(genesisData[moduleName])
+ if err != nil {
+ return &abci.ResponseInitChain{
+}, err
+}
+
+err = module.InitGenesis(ctx, source)
+ if err != nil {
+ return &abci.ResponseInitChain{
+}, err
+}
+
+}
+
+else if module, ok := mod.(HasGenesis); ok {
+ ctx.Logger().Debug("running initialization for module", "module", moduleName)
+
+module.InitGenesis(ctx, cdc, genesisData[moduleName])
+}
+
+else if module, ok := mod.(HasABCIGenesis); ok {
+ ctx.Logger().Debug("running initialization for module", "module", moduleName)
+ moduleValUpdates := module.InitGenesis(ctx, cdc, genesisData[moduleName])
+
+ // use these validator updates if provided, the module manager assumes
+ // only one module will update the validator set
+ if len(moduleValUpdates) > 0 {
+ if len(validatorUpdates) > 0 {
+ return &abci.ResponseInitChain{
+}, errors.New("validator InitGenesis updates already set by a previous module")
+}
+
+validatorUpdates = moduleValUpdates
+}
+
+}
+
+}
+
+ // a chain must initialize with a non-empty validator set
+ if len(validatorUpdates) == 0 {
+ return &abci.ResponseInitChain{
+}, fmt.Errorf("validator set is empty after InitGenesis, please ensure at least one validator is initialized with a delegation greater than or equal to the DefaultPowerReduction (%d)", sdk.DefaultPowerReduction)
+}
+
+return &abci.ResponseInitChain{
+ Validators: validatorUpdates,
+}, nil
+}
+
+// ExportGenesis performs export genesis functionality for modules
+func (m *Manager)
+
+ExportGenesis(ctx sdk.Context, cdc codec.JSONCodec) (map[string]json.RawMessage, error) {
+ return m.ExportGenesisForModules(ctx, cdc, []string{
+})
+}
+
+// ExportGenesisForModules performs export genesis functionality for modules
+func (m *Manager)
+
+ExportGenesisForModules(ctx sdk.Context, cdc codec.JSONCodec, modulesToExport []string) (map[string]json.RawMessage, error) {
+ if len(modulesToExport) == 0 {
+ modulesToExport = m.OrderExportGenesis
+}
+ // verify modules exists in app, so that we don't panic in the middle of an export
+ if err := m.checkModulesExists(modulesToExport); err != nil {
+ return nil, err
+}
+
+type genesisResult struct {
+ bz json.RawMessage
+ err error
+}
+ channels := make(map[string]chan genesisResult)
+ for _, moduleName := range modulesToExport {
+ mod := m.Modules[moduleName]
+ if module, ok := mod.(appmodule.HasGenesis); ok {
+ // core API genesis
+ channels[moduleName] = make(chan genesisResult)
+
+go func(module appmodule.HasGenesis, ch chan genesisResult) {
+ ctx := ctx.WithGasMeter(storetypes.NewInfiniteGasMeter()) // avoid race conditions
+ target := genesis.RawJSONTarget{
+}
+ err := module.ExportGenesis(ctx, target.Target())
+ if err != nil {
+ ch <- genesisResult{
+ nil, err
+}
+
+return
+}
+
+rawJSON, err := target.JSON()
+ if err != nil {
+ ch <- genesisResult{
+ nil, err
+}
+
+return
+}
+
+ch <- genesisResult{
+ rawJSON, nil
+}
+
+}(module, channels[moduleName])
+}
+
+else if module, ok := mod.(HasGenesis); ok {
+ channels[moduleName] = make(chan genesisResult)
+
+go func(module HasGenesis, ch chan genesisResult) {
+ ctx := ctx.WithGasMeter(storetypes.NewInfiniteGasMeter()) // avoid race conditions
+ ch <- genesisResult{
+ module.ExportGenesis(ctx, cdc), nil
+}
+
+}(module, channels[moduleName])
+}
+
+else if module, ok := mod.(HasABCIGenesis); ok {
+ channels[moduleName] = make(chan genesisResult)
+
+go func(module HasABCIGenesis, ch chan genesisResult) {
+ ctx := ctx.WithGasMeter(storetypes.NewInfiniteGasMeter()) // avoid race conditions
+ ch <- genesisResult{
+ module.ExportGenesis(ctx, cdc), nil
+}
+
+}(module, channels[moduleName])
+}
+
+}
+ genesisData := make(map[string]json.RawMessage)
+ for moduleName := range channels {
+ res := <-channels[moduleName]
+ if res.err != nil {
+ return nil, fmt.Errorf("genesis export error in %s: %w", moduleName, res.err)
+}
+
+genesisData[moduleName] = res.bz
+}
+
+return genesisData, nil
+}
+
+// checkModulesExists verifies that all modules in the list exist in the app
+func (m *Manager)
+
+checkModulesExists(moduleName []string)
+
+error {
+ for _, name := range moduleName {
+ if _, ok := m.Modules[name]; !ok {
+ return fmt.Errorf("module %s does not exist", name)
+}
+
+}
+
+return nil
+}
+
+// assertNoForgottenModules checks that we didn't forget any modules in the SetOrder* functions.
+// `pass` is a closure which allows one to omit modules from `moduleNames`.
+// If you provide non-nil `pass` and it returns true, the module would not be subject of the assertion.
+func (m *Manager)
+
+assertNoForgottenModules(setOrderFnName string, moduleNames []string, pass func(moduleName string)
+
+bool) {
+ ms := make(map[string]bool)
+ for _, m := range moduleNames {
+ ms[m] = true
+}
+
+var missing []string
+ for m := range m.Modules {
+ if pass != nil && pass(m) {
+ continue
+}
+ if !ms[m] {
+ missing = append(missing, m)
+}
+
+}
+ if len(missing) != 0 {
+ sort.Strings(missing)
+
+panic(fmt.Sprintf(
+ "all modules must be defined when setting %s, missing: %v", setOrderFnName, missing))
+}
+}
+
+// MigrationHandler is the migration function that each module registers.
+type MigrationHandler func(sdk.Context)
+
+error
+
+// VersionMap is a map of moduleName -> version
+type VersionMap map[string]uint64
+
+// RunMigrations performs in-place store migrations for all modules. This
+// function MUST be called insde an x/upgrade UpgradeHandler.
+//
+// Recall that in an upgrade handler, the `fromVM` VersionMap is retrieved from
+// x/upgrade's store, and the function needs to return the target VersionMap
+// that will in turn be persisted to the x/upgrade's store. In general,
+// returning RunMigrations should be enough:
+//
+// Example:
+//
+// cfg := module.NewConfigurator(...)
+// app.UpgradeKeeper.SetUpgradeHandler("my-plan", func(ctx context.Context, plan upgradetypes.Plan, fromVM module.VersionMap) (module.VersionMap, error) {
+// return app.mm.RunMigrations(ctx, cfg, fromVM)
+//
+})
+//
+// Internally, RunMigrations will perform the following steps:
+// - create an `updatedVM` VersionMap of module with their latest ConsensusVersion
+// - make a diff of `fromVM` and `udpatedVM`, and for each module:
+// - if the module's `fromVM` version is less than its `updatedVM` version,
+// then run in-place store migrations for that module between those versions.
+// - if the module does not exist in the `fromVM` (which means that it's a new module,
+// because it was not in the previous x/upgrade's store), then run
+// `InitGenesis` on that module.
+//
+// - return the `updatedVM` to be persisted in the x/upgrade's store.
+//
+// Migrations are run in an order defined by `Manager.OrderMigrations` or (if not set)
+
+defined by
+// `DefaultMigrationsOrder` function.
+//
+// As an app developer, if you wish to skip running InitGenesis for your new
+// module "foo", you need to manually pass a `fromVM` argument to this function
+// foo's module version set to its latest ConsensusVersion. That way, the diff
+// between the function's `fromVM` and `udpatedVM` will be empty, hence not
+// running anything for foo.
+//
+// Example:
+//
+// cfg := module.NewConfigurator(...)
+// app.UpgradeKeeper.SetUpgradeHandler("my-plan", func(ctx context.Context, plan upgradetypes.Plan, fromVM module.VersionMap) (module.VersionMap, error) {
+// // Assume "foo" is a new module.
+// // `fromVM` is fetched from existing x/upgrade store. Since foo didn't exist
+// // before this upgrade, `v, exists := fromVM["foo"]; exists == false`, and RunMigration will by default
+// // run InitGenesis on foo.
+// // To skip running foo's InitGenesis, you need set `fromVM`'s foo to its latest
+// // consensus version:
+// fromVM["foo"] = foo.AppModule{
+}.ConsensusVersion()
+//
+// return app.mm.RunMigrations(ctx, cfg, fromVM)
+//
+})
+//
+// Please also refer to /sdk/v0.53/build/migrations/upgrading for more information.
+func (m Manager)
+
+RunMigrations(ctx context.Context, cfg Configurator, fromVM VersionMap) (VersionMap, error) {
+ c, ok := cfg.(*configurator)
+ if !ok {
+ return nil, errorsmod.Wrapf(sdkerrors.ErrInvalidType, "expected %T, got %T", &configurator{
+}, cfg)
+}
+ modules := m.OrderMigrations
+ if modules == nil {
+ modules = DefaultMigrationsOrder(m.ModuleNames())
+}
+ sdkCtx := sdk.UnwrapSDKContext(ctx)
+ updatedVM := VersionMap{
+}
+ for _, moduleName := range modules {
+ module := m.Modules[moduleName]
+ fromVersion, exists := fromVM[moduleName]
+ toVersion := uint64(0)
+ if module, ok := module.(HasConsensusVersion); ok {
+ toVersion = module.ConsensusVersion()
+}
+
+ // We run migration if the module is specified in `fromVM`.
+ // Otherwise we run InitGenesis.
+ //
+ // The module won't exist in the fromVM in two cases:
+ // 1. A new module is added. In this case we run InitGenesis with an
+ // empty genesis state.
+ // 2. An existing chain is upgrading from version < 0.43 to v0.43+ for the first time.
+ // In this case, all modules have yet to be added to x/upgrade's VersionMap store.
+ if exists {
+ err := c.runModuleMigrations(sdkCtx, moduleName, fromVersion, toVersion)
+ if err != nil {
+ return nil, err
+}
+
+}
+
+else {
+ sdkCtx.Logger().Info(fmt.Sprintf("adding a new module: %s", moduleName))
+ if module, ok := m.Modules[moduleName].(HasGenesis); ok {
+ module.InitGenesis(sdkCtx, c.cdc, module.DefaultGenesis(c.cdc))
+}
+ if module, ok := m.Modules[moduleName].(HasABCIGenesis); ok {
+ moduleValUpdates := module.InitGenesis(sdkCtx, c.cdc, module.DefaultGenesis(c.cdc))
+ // The module manager assumes only one module will update the
+ // validator set, and it can't be a new module.
+ if len(moduleValUpdates) > 0 {
+ return nil, errorsmod.Wrapf(sdkerrors.ErrLogic, "validator InitGenesis update is already set by another module")
+}
+
+}
+
+}
+
+updatedVM[moduleName] = toVersion
+}
+
+return updatedVM, nil
+}
+
+// PreBlock performs begin block functionality for upgrade module.
+// It takes the current context as a parameter and returns a boolean value
+// indicating whether the migration was successfully executed or not.
+func (m *Manager)
+
+PreBlock(ctx sdk.Context) (*sdk.ResponsePreBlock, error) {
+ paramsChanged := false
+ for _, moduleName := range m.OrderPreBlockers {
+ if module, ok := m.Modules[moduleName].(appmodule.HasPreBlocker); ok {
+ rsp, err := module.PreBlock(ctx)
+ if err != nil {
+ return nil, err
+}
+ if rsp.IsConsensusParamsChanged() {
+ paramsChanged = true
+}
+
+}
+
+}
+
+return &sdk.ResponsePreBlock{
+ ConsensusParamsChanged: paramsChanged,
+}, nil
+}
+
+// BeginBlock performs begin block functionality for all modules. It creates a
+// child context with an event manager to aggregate events emitted from all
+// modules.
+func (m *Manager)
+
+BeginBlock(ctx sdk.Context) (sdk.BeginBlock, error) {
+ ctx = ctx.WithEventManager(sdk.NewEventManager())
+ for _, moduleName := range m.OrderBeginBlockers {
+ if module, ok := m.Modules[moduleName].(appmodule.HasBeginBlocker); ok {
+ if err := module.BeginBlock(ctx); err != nil {
+ return sdk.BeginBlock{
+}, err
+}
+
+}
+
+}
+
+return sdk.BeginBlock{
+ Events: ctx.EventManager().ABCIEvents(),
+}, nil
+}
+
+// EndBlock performs end block functionality for all modules. It creates a
+// child context with an event manager to aggregate events emitted from all
+// modules.
+func (m *Manager)
+
+EndBlock(ctx sdk.Context) (sdk.EndBlock, error) {
+ ctx = ctx.WithEventManager(sdk.NewEventManager())
+ validatorUpdates := []abci.ValidatorUpdate{
+}
+ for _, moduleName := range m.OrderEndBlockers {
+ if module, ok := m.Modules[moduleName].(appmodule.HasEndBlocker); ok {
+ err := module.EndBlock(ctx)
+ if err != nil {
+ return sdk.EndBlock{
+}, err
+}
+
+}
+
+else if module, ok := m.Modules[moduleName].(HasABCIEndBlock); ok {
+ moduleValUpdates, err := module.EndBlock(ctx)
+ if err != nil {
+ return sdk.EndBlock{
+}, err
+}
+ // use these validator updates if provided, the module manager assumes
+ // only one module will update the validator set
+ if len(moduleValUpdates) > 0 {
+ if len(validatorUpdates) > 0 {
+ return sdk.EndBlock{
+}, errors.New("validator EndBlock updates already set by a previous module")
+}
+ for _, updates := range moduleValUpdates {
+ validatorUpdates = append(validatorUpdates, abci.ValidatorUpdate{
+ PubKey: updates.PubKey,
+ Power: updates.Power
+})
+}
+
+}
+
+}
+
+else {
+ continue
+}
+
+}
+
+return sdk.EndBlock{
+ ValidatorUpdates: validatorUpdates,
+ Events: ctx.EventManager().ABCIEvents(),
+}, nil
+}
+
+// Precommit performs precommit functionality for all modules.
+func (m *Manager)
+
+Precommit(ctx sdk.Context)
+
+error {
+ for _, moduleName := range m.OrderPrecommiters {
+ module, ok := m.Modules[moduleName].(appmodule.HasPrecommit)
+ if !ok {
+ continue
+}
+ if err := module.Precommit(ctx); err != nil {
+ return err
+}
+
+}
+
+return nil
+}
+
+// PrepareCheckState performs functionality for preparing the check state for all modules.
+func (m *Manager)
+
+PrepareCheckState(ctx sdk.Context)
+
+error {
+ for _, moduleName := range m.OrderPrepareCheckStaters {
+ module, ok := m.Modules[moduleName].(appmodule.HasPrepareCheckState)
+ if !ok {
+ continue
+}
+ if err := module.PrepareCheckState(ctx); err != nil {
+ return err
+}
+
+}
+
+return nil
+}
+
+// GetVersionMap gets consensus version from all modules
+func (m *Manager)
+
+GetVersionMap()
+
+VersionMap {
+ vermap := make(VersionMap)
+ for name, v := range m.Modules {
+ version := uint64(0)
+ if v, ok := v.(HasConsensusVersion); ok {
+ version = v.ConsensusVersion()
+}
+
+vermap[name] = version
+}
+
+return vermap
+}
+
+// ModuleNames returns list of all module names, without any particular order.
+func (m *Manager)
+
+ModuleNames() []string {
+ return slices.Collect(maps.Keys(m.Modules))
+}
+
+// DefaultMigrationsOrder returns a default migrations order: ascending alphabetical by module name,
+// except x/auth which will run last, see:
+// https://github.com/cosmos/cosmos-sdk/issues/10591
+func DefaultMigrationsOrder(modules []string) []string {
+ const authName = "auth"
+ out := make([]string, 0, len(modules))
+ hasAuth := false
+ for _, m := range modules {
+ if m == authName {
+ hasAuth = true
+}
+
+else {
+ out = append(out, m)
+}
+
+}
+
+sort.Strings(out)
+ if hasAuth {
+ out = append(out, authName)
+}
+
+return out
+}
+```
+
+* `EndBlock(context.Context) ([]abci.ValidatorUpdate, error)`: This method gives module developers the option to inform the underlying consensus engine of validator set changes (e.g. the `staking` module).
+
+### `HasPrecommit`
+
+`HasPrecommit` is an extension interface from `appmodule.AppModule`. All modules that have a `Precommit` method implement this interface.
+
+```go expandable
+package appmodule
+
+import (
+
+ "context"
+ "google.golang.org/grpc"
+ "cosmossdk.io/depinject"
+)
+
+// AppModule is a tag interface for app module implementations to use as a basis
+// for extension interfaces. It provides no functionality itself, but is the
+// type that all valid app modules should provide so that they can be identified
+// by other modules (usually via depinject)
+
+as app modules.
+type AppModule interface {
+ depinject.OnePerModuleType
+
+ // IsAppModule is a dummy method to tag a struct as implementing an AppModule.
+ IsAppModule()
+}
+
+// HasServices is the extension interface that modules should implement to register
+// implementations of services defined in .proto files.
+type HasServices interface {
+ AppModule
+
+ // RegisterServices registers the module's services with the app's service
+ // registrar.
+ //
+ // Two types of services are currently supported:
+ // - read-only gRPC query services, which are the default.
+ // - transaction message services, which must have the protobuf service
+ // option "cosmos.msg.v1.service" (defined in "cosmos/msg/v1/service.proto")
+ // set to true.
+ //
+ // The service registrar will figure out which type of service you are
+ // implementing based on the presence (or absence)
+
+of protobuf options. You
+ // do not need to specify this in golang code.
+ RegisterServices(grpc.ServiceRegistrar)
+
+error
+}
+
+// HasPrepareCheckState is an extension interface that contains information about the AppModule
+// and PrepareCheckState.
+type HasPrepareCheckState interface {
+ AppModule
+ PrepareCheckState(context.Context)
+
+error
+}
+
+// HasPrecommit is an extension interface that contains information about the AppModule and Precommit.
+type HasPrecommit interface {
+ AppModule
+ Precommit(context.Context)
+
+error
+}
+
+// ResponsePreBlock represents the response from the PreBlock method.
+// It can modify consensus parameters in storage and signal the caller through the return value.
+// When it returns ConsensusParamsChanged=true, the caller must refresh the consensus parameter in the finalize context.
+// The new context (ctx)
+
+must be passed to all the other lifecycle methods.
+type ResponsePreBlock interface {
+ IsConsensusParamsChanged()
+
+bool
+}
+
+// HasPreBlocker is the extension interface that modules should implement to run
+// custom logic before BeginBlock.
+type HasPreBlocker interface {
+ AppModule
+ // PreBlock is method that will be run before BeginBlock.
+ PreBlock(context.Context) (ResponsePreBlock, error)
+}
+
+// HasBeginBlocker is the extension interface that modules should implement to run
+// custom logic before transaction processing in a block.
+type HasBeginBlocker interface {
+ AppModule
+
+ // BeginBlock is a method that will be run before transactions are processed in
+ // a block.
+ BeginBlock(context.Context)
+
+error
+}
+
+// HasEndBlocker is the extension interface that modules should implement to run
+// custom logic after transaction processing in a block.
+type HasEndBlocker interface {
+ AppModule
+
+ // EndBlock is a method that will be run after transactions are processed in
+ // a block.
+ EndBlock(context.Context)
+
+error
+}
+
+// UpgradeModule is the extension interface that upgrade module should implement to differentiate
+// it from other modules, migration handler need ensure the upgrade module's migration is executed
+// before the rest of the modules.
+type UpgradeModule interface {
+ IsUpgradeModule()
+}
+```
+
+* `Precommit(context.Context)`: This method gives module developers the option to implement logic that is automatically triggered during \[`Commit'](../../learn/advanced/00-baseapp.md#commit) of each block using the [`finalizeblockstate`](../../learn/advanced/00-baseapp.md#state-updates) of the block to be committed. Implement empty if no logic needs to be triggered during `Commit\` of each block for this module.
+
+### `HasPrepareCheckState`
+
+`HasPrepareCheckState` is an extension interface from `appmodule.AppModule`. All modules that have a `PrepareCheckState` method implement this interface.
+
+```go expandable
+package appmodule
+
+import (
+
+ "context"
+ "google.golang.org/grpc"
+ "cosmossdk.io/depinject"
+)
+
+// AppModule is a tag interface for app module implementations to use as a basis
+// for extension interfaces. It provides no functionality itself, but is the
+// type that all valid app modules should provide so that they can be identified
+// by other modules (usually via depinject)
+
+as app modules.
+type AppModule interface {
+ depinject.OnePerModuleType
+
+ // IsAppModule is a dummy method to tag a struct as implementing an AppModule.
+ IsAppModule()
+}
+
+// HasServices is the extension interface that modules should implement to register
+// implementations of services defined in .proto files.
+type HasServices interface {
+ AppModule
+
+ // RegisterServices registers the module's services with the app's service
+ // registrar.
+ //
+ // Two types of services are currently supported:
+ // - read-only gRPC query services, which are the default.
+ // - transaction message services, which must have the protobuf service
+ // option "cosmos.msg.v1.service" (defined in "cosmos/msg/v1/service.proto")
+ // set to true.
+ //
+ // The service registrar will figure out which type of service you are
+ // implementing based on the presence (or absence)
+
+of protobuf options. You
+ // do not need to specify this in golang code.
+ RegisterServices(grpc.ServiceRegistrar)
+
+error
+}
+
+// HasPrepareCheckState is an extension interface that contains information about the AppModule
+// and PrepareCheckState.
+type HasPrepareCheckState interface {
+ AppModule
+ PrepareCheckState(context.Context)
+
+error
+}
+
+// HasPrecommit is an extension interface that contains information about the AppModule and Precommit.
+type HasPrecommit interface {
+ AppModule
+ Precommit(context.Context)
+
+error
+}
+
+// ResponsePreBlock represents the response from the PreBlock method.
+// It can modify consensus parameters in storage and signal the caller through the return value.
+// When it returns ConsensusParamsChanged=true, the caller must refresh the consensus parameter in the finalize context.
+// The new context (ctx)
+
+must be passed to all the other lifecycle methods.
+type ResponsePreBlock interface {
+ IsConsensusParamsChanged()
+
+bool
+}
+
+// HasPreBlocker is the extension interface that modules should implement to run
+// custom logic before BeginBlock.
+type HasPreBlocker interface {
+ AppModule
+ // PreBlock is method that will be run before BeginBlock.
+ PreBlock(context.Context) (ResponsePreBlock, error)
+}
+
+// HasBeginBlocker is the extension interface that modules should implement to run
+// custom logic before transaction processing in a block.
+type HasBeginBlocker interface {
+ AppModule
+
+ // BeginBlock is a method that will be run before transactions are processed in
+ // a block.
+ BeginBlock(context.Context)
+
+error
+}
+
+// HasEndBlocker is the extension interface that modules should implement to run
+// custom logic after transaction processing in a block.
+type HasEndBlocker interface {
+ AppModule
+
+ // EndBlock is a method that will be run after transactions are processed in
+ // a block.
+ EndBlock(context.Context)
+
+error
+}
+
+// UpgradeModule is the extension interface that upgrade module should implement to differentiate
+// it from other modules, migration handler need ensure the upgrade module's migration is executed
+// before the rest of the modules.
+type UpgradeModule interface {
+ IsUpgradeModule()
+}
+```
+
+* `PrepareCheckState(context.Context)`: This method gives module developers the option to implement logic that is automatically triggered during \[`Commit'](../../learn/advanced/00-baseapp.md#commit) of each block using the [`checkState`](../../learn/advanced/00-baseapp.md#state-updates) of the next block. Implement empty if no logic needs to be triggered during `Commit\` of each block for this module.
+
+### Implementing the Application Module Interfaces
+
+Typically, the various application module interfaces are implemented in a file called `module.go`, located in the module's folder (e.g. `./x/module/module.go`).
+
+Almost every module needs to implement the `AppModuleBasic` and `AppModule` interfaces. If the module is only used for genesis, it will implement `AppModuleGenesis` instead of `AppModule`. The concrete type that implements the interface can add parameters that are required for the implementation of the various methods of the interface. For example, the `Route()` function often calls a `NewMsgServerImpl(k keeper)` function defined in `keeper/msg_server.go` and therefore needs to pass the module's [`keeper`](/sdk/v0.53/build/building-modules/keeper) as a parameter.
+
+```go
+// example
+type AppModule struct {
+ AppModuleBasic
+ keeper Keeper
+}
+```
+
+In the example above, you can see that the `AppModule` concrete type references an `AppModuleBasic`, and not an `AppModuleGenesis`. That is because `AppModuleGenesis` only needs to be implemented in modules that focus on genesis-related functionalities. In most modules, the concrete `AppModule` type will have a reference to an `AppModuleBasic` and implement the two added methods of `AppModuleGenesis` directly in the `AppModule` type.
+
+If no parameter is required (which is often the case for `AppModuleBasic`), just declare an empty concrete type like so:
+
+```go
+type AppModuleBasic struct{
+}
+```
+
+## Module Managers
+
+Module managers are used to manage collections of `AppModuleBasic` and `AppModule`.
+
+### `BasicManager`
+
+The `BasicManager` is a structure that lists all the `AppModuleBasic` of an application:
+
+```go expandable
+/*
+Package module contains application module patterns and associated "manager" functionality.
+The module pattern has been broken down by:
+ - independent module functionality (AppModuleBasic)
+ - inter-dependent module simulation functionality (AppModuleSimulation)
+ - inter-dependent module full functionality (AppModule)
+
+inter-dependent module functionality is module functionality which somehow
+depends on other modules, typically through the module keeper. Many of the
+module keepers are dependent on each other, thus in order to access the full
+set of module functionality we need to define all the keepers/params-store/keys
+etc. This full set of advanced functionality is defined by the AppModule interface.
+
+Independent module functions are separated to allow for the construction of the
+basic application structures required early on in the application definition
+and used to enable the definition of full module functionality later in the
+process. This separation is necessary, however we still want to allow for a
+high level pattern for modules to follow - for instance, such that we don't
+have to manually register all of the codecs for all the modules. This basic
+procedure as well as other basic patterns are handled through the use of
+BasicManager.
+
+Lastly the interface for genesis functionality (HasGenesis & HasABCIGenesis)
+
+has been
+separated out from full module functionality (AppModule)
+
+so that modules which
+are only used for genesis can take advantage of the Module patterns without
+needlessly defining many placeholder functions
+*/
+package module
+
+import (
+
+ "context"
+ "encoding/json"
+ "errors"
+ "fmt"
+ "maps"
+ "slices"
+ "sort"
+
+ abci "github.com/cometbft/cometbft/abci/types"
+ "github.com/grpc-ecosystem/grpc-gateway/runtime"
+ "github.com/spf13/cobra"
+ "cosmossdk.io/core/appmodule"
+ "cosmossdk.io/core/genesis"
+ errorsmod "cosmossdk.io/errors"
+ storetypes "cosmossdk.io/store/types"
+ "github.com/cosmos/cosmos-sdk/client"
+ "github.com/cosmos/cosmos-sdk/codec"
+ "github.com/cosmos/cosmos-sdk/codec/types"
+ sdk "github.com/cosmos/cosmos-sdk/types"
+ sdkerrors "github.com/cosmos/cosmos-sdk/types/errors"
+)
+
+// AppModuleBasic is the standard form for basic non-dependant elements of an application module.
+type AppModuleBasic interface {
+ HasName
+ RegisterLegacyAminoCodec(*codec.LegacyAmino)
+
+RegisterInterfaces(types.InterfaceRegistry)
+
+RegisterGRPCGatewayRoutes(client.Context, *runtime.ServeMux)
+}
+
+// HasName allows the module to provide its own name for legacy purposes.
+// Newer apps should specify the name for their modules using a map
+// using NewManagerFromMap.
+type HasName interface {
+ Name()
+
+string
+}
+
+// HasGenesisBasics is the legacy interface for stateless genesis methods.
+type HasGenesisBasics interface {
+ DefaultGenesis(codec.JSONCodec)
+
+json.RawMessage
+ ValidateGenesis(codec.JSONCodec, client.TxEncodingConfig, json.RawMessage)
+
+error
+}
+
+// BasicManager is a collection of AppModuleBasic
+type BasicManager map[string]AppModuleBasic
+
+// NewBasicManager creates a new BasicManager object
+func NewBasicManager(modules ...AppModuleBasic)
+
+BasicManager {
+ moduleMap := make(map[string]AppModuleBasic)
+ for _, module := range modules {
+ moduleMap[module.Name()] = module
+}
+
+return moduleMap
+}
+
+// NewBasicManagerFromManager creates a new BasicManager from a Manager
+// The BasicManager will contain all AppModuleBasic from the AppModule Manager
+// Module's AppModuleBasic can be overridden by passing a custom AppModuleBasic map
+func NewBasicManagerFromManager(manager *Manager, customModuleBasics map[string]AppModuleBasic)
+
+BasicManager {
+ moduleMap := make(map[string]AppModuleBasic)
+ for name, module := range manager.Modules {
+ if customBasicMod, ok := customModuleBasics[name]; ok {
+ moduleMap[name] = customBasicMod
+ continue
+}
+ if appModule, ok := module.(appmodule.AppModule); ok {
+ moduleMap[name] = CoreAppModuleBasicAdaptor(name, appModule)
+
+continue
+}
+ if basicMod, ok := module.(AppModuleBasic); ok {
+ moduleMap[name] = basicMod
+}
+
+}
+
+return moduleMap
+}
+
+// RegisterLegacyAminoCodec registers all module codecs
+func (bm BasicManager)
+
+RegisterLegacyAminoCodec(cdc *codec.LegacyAmino) {
+ for _, b := range bm {
+ b.RegisterLegacyAminoCodec(cdc)
+}
+}
+
+// RegisterInterfaces registers all module interface types
+func (bm BasicManager)
+
+RegisterInterfaces(registry types.InterfaceRegistry) {
+ for _, m := range bm {
+ m.RegisterInterfaces(registry)
+}
+}
+
+// DefaultGenesis provides default genesis information for all modules
+func (bm BasicManager)
+
+DefaultGenesis(cdc codec.JSONCodec)
+
+map[string]json.RawMessage {
+ genesisData := make(map[string]json.RawMessage)
+ for _, b := range bm {
+ if mod, ok := b.(HasGenesisBasics); ok {
+ genesisData[b.Name()] = mod.DefaultGenesis(cdc)
+}
+
+}
+
+return genesisData
+}
+
+// ValidateGenesis performs genesis state validation for all modules
+func (bm BasicManager)
+
+ValidateGenesis(cdc codec.JSONCodec, txEncCfg client.TxEncodingConfig, genesisData map[string]json.RawMessage)
+
+error {
+ for _, b := range bm {
+ // first check if the module is an adapted Core API Module
+ if mod, ok := b.(HasGenesisBasics); ok {
+ if err := mod.ValidateGenesis(cdc, txEncCfg, genesisData[b.Name()]); err != nil {
+ return err
+}
+
+}
+
+}
+
+return nil
+}
+
+// RegisterGRPCGatewayRoutes registers all module rest routes
+func (bm BasicManager)
+
+RegisterGRPCGatewayRoutes(clientCtx client.Context, rtr *runtime.ServeMux) {
+ for _, b := range bm {
+ b.RegisterGRPCGatewayRoutes(clientCtx, rtr)
+}
+}
+
+// AddTxCommands adds all tx commands to the rootTxCmd.
+func (bm BasicManager)
+
+AddTxCommands(rootTxCmd *cobra.Command) {
+ for _, b := range bm {
+ if mod, ok := b.(interface {
+ GetTxCmd() *cobra.Command
+}); ok {
+ if cmd := mod.GetTxCmd(); cmd != nil {
+ rootTxCmd.AddCommand(cmd)
+}
+
+}
+
+}
+}
+
+// AddQueryCommands adds all query commands to the rootQueryCmd.
+func (bm BasicManager)
+
+AddQueryCommands(rootQueryCmd *cobra.Command) {
+ for _, b := range bm {
+ if mod, ok := b.(interface {
+ GetQueryCmd() *cobra.Command
+}); ok {
+ if cmd := mod.GetQueryCmd(); cmd != nil {
+ rootQueryCmd.AddCommand(cmd)
+}
+
+}
+
+}
+}
+
+// HasGenesis is the extension interface for stateful genesis methods.
+type HasGenesis interface {
+ HasGenesisBasics
+ InitGenesis(sdk.Context, codec.JSONCodec, json.RawMessage)
+
+ExportGenesis(sdk.Context, codec.JSONCodec)
+
+json.RawMessage
+}
+
+// HasABCIGenesis is the extension interface for stateful genesis methods which returns validator updates.
+type HasABCIGenesis interface {
+ HasGenesisBasics
+ InitGenesis(sdk.Context, codec.JSONCodec, json.RawMessage) []abci.ValidatorUpdate
+ ExportGenesis(sdk.Context, codec.JSONCodec)
+
+json.RawMessage
+}
+
+// AppModule is the form for an application module. Most of
+// its functionality has been moved to extension interfaces.
+// Deprecated: use appmodule.AppModule with a combination of extension interfaes interfaces instead.
+type AppModule interface {
+ appmodule.AppModule
+
+ AppModuleBasic
+}
+
+// HasInvariants is the interface for registering invariants.
+//
+// Deprecated: this will be removed in the next Cosmos SDK release.
+type HasInvariants interface {
+ // RegisterInvariants registers module invariants.
+ RegisterInvariants(sdk.InvariantRegistry)
+}
+
+// HasServices is the interface for modules to register services.
+type HasServices interface {
+ // RegisterServices allows a module to register services.
+ RegisterServices(Configurator)
+}
+
+// HasConsensusVersion is the interface for declaring a module consensus version.
+type HasConsensusVersion interface {
+ // ConsensusVersion is a sequence number for state-breaking change of the
+ // module. It should be incremented on each consensus-breaking change
+ // introduced by the module. To avoid wrong/empty versions, the initial version
+ // should be set to 1.
+ ConsensusVersion()
+
+uint64
+}
+
+// HasABCIEndblock is a released typo of HasABCIEndBlock.
+// Deprecated: use HasABCIEndBlock instead.
+type HasABCIEndblock HasABCIEndBlock
+
+// HasABCIEndBlock is the interface for modules that need to run code at the end of the block.
+type HasABCIEndBlock interface {
+ AppModule
+ EndBlock(context.Context) ([]abci.ValidatorUpdate, error)
+}
+
+var (
+ _ appmodule.AppModule = (*GenesisOnlyAppModule)(nil)
+ _ AppModuleBasic = (*GenesisOnlyAppModule)(nil)
+)
+
+// AppModuleGenesis is the standard form for an application module genesis functions
+type AppModuleGenesis interface {
+ AppModuleBasic
+ HasABCIGenesis
+}
+
+// GenesisOnlyAppModule is an AppModule that only has import/export functionality
+type GenesisOnlyAppModule struct {
+ AppModuleGenesis
+}
+
+// NewGenesisOnlyAppModule creates a new GenesisOnlyAppModule object
+func NewGenesisOnlyAppModule(amg AppModuleGenesis)
+
+GenesisOnlyAppModule {
+ return GenesisOnlyAppModule{
+ AppModuleGenesis: amg,
+}
+}
+
+// IsOnePerModuleType implements the depinject.OnePerModuleType interface.
+func (GenesisOnlyAppModule)
+
+IsOnePerModuleType() {
+}
+
+// IsAppModule implements the appmodule.AppModule interface.
+func (GenesisOnlyAppModule)
+
+IsAppModule() {
+}
+
+// RegisterInvariants is a placeholder function register no invariants
+func (GenesisOnlyAppModule)
+
+RegisterInvariants(_ sdk.InvariantRegistry) {
+}
+
+// ConsensusVersion implements AppModule/ConsensusVersion.
+func (gam GenesisOnlyAppModule)
+
+ConsensusVersion()
+
+uint64 {
+ return 1
+}
+
+// Manager defines a module manager that provides the high level utility for managing and executing
+// operations for a group of modules
+type Manager struct {
+ Modules map[string]any // interface{
+}
+
+is used now to support the legacy AppModule as well as new core appmodule.AppModule.
+ OrderInitGenesis []string
+ OrderExportGenesis []string
+ OrderPreBlockers []string
+ OrderBeginBlockers []string
+ OrderEndBlockers []string
+ OrderPrepareCheckStaters []string
+ OrderPrecommiters []string
+ OrderMigrations []string
+}
+
+// NewManager creates a new Manager object.
+func NewManager(modules ...AppModule) *Manager {
+ moduleMap := make(map[string]any)
+ modulesStr := make([]string, 0, len(modules))
+ preBlockModulesStr := make([]string, 0)
+ for _, module := range modules {
+ if _, ok := module.(appmodule.AppModule); !ok {
+ panic(fmt.Sprintf("module %s does not implement appmodule.AppModule", module.Name()))
+}
+
+moduleMap[module.Name()] = module
+ modulesStr = append(modulesStr, module.Name())
+ if _, ok := module.(appmodule.HasPreBlocker); ok {
+ preBlockModulesStr = append(preBlockModulesStr, module.Name())
+}
+
+}
+
+return &Manager{
+ Modules: moduleMap,
+ OrderInitGenesis: modulesStr,
+ OrderExportGenesis: modulesStr,
+ OrderPreBlockers: preBlockModulesStr,
+ OrderBeginBlockers: modulesStr,
+ OrderPrepareCheckStaters: modulesStr,
+ OrderPrecommiters: modulesStr,
+ OrderEndBlockers: modulesStr,
+}
+}
+
+// NewManagerFromMap creates a new Manager object from a map of module names to module implementations.
+// This method should be used for apps and modules which have migrated to the cosmossdk.io/core.appmodule.AppModule API.
+func NewManagerFromMap(moduleMap map[string]appmodule.AppModule) *Manager {
+ simpleModuleMap := make(map[string]any)
+ modulesStr := make([]string, 0, len(simpleModuleMap))
+ preBlockModulesStr := make([]string, 0)
+ for name, module := range moduleMap {
+ simpleModuleMap[name] = module
+ modulesStr = append(modulesStr, name)
+ if _, ok := module.(appmodule.HasPreBlocker); ok {
+ preBlockModulesStr = append(preBlockModulesStr, name)
+}
+
+}
+
+ // Sort the modules by name. Given that we are using a map above we can't guarantee the order.
+ sort.Strings(modulesStr)
+
+return &Manager{
+ Modules: simpleModuleMap,
+ OrderInitGenesis: modulesStr,
+ OrderExportGenesis: modulesStr,
+ OrderPreBlockers: preBlockModulesStr,
+ OrderBeginBlockers: modulesStr,
+ OrderEndBlockers: modulesStr,
+ OrderPrecommiters: modulesStr,
+ OrderPrepareCheckStaters: modulesStr,
+}
+}
+
+// SetOrderInitGenesis sets the order of init genesis calls
+func (m *Manager)
+
+SetOrderInitGenesis(moduleNames ...string) {
+ m.assertNoForgottenModules("SetOrderInitGenesis", moduleNames, func(moduleName string)
+
+bool {
+ module := m.Modules[moduleName]
+ if _, hasGenesis := module.(appmodule.HasGenesis); hasGenesis {
+ return !hasGenesis
+}
+ if _, hasABCIGenesis := module.(HasABCIGenesis); hasABCIGenesis {
+ return !hasABCIGenesis
+}
+
+ _, hasGenesis := module.(HasGenesis)
+
+return !hasGenesis
+})
+
+m.OrderInitGenesis = moduleNames
+}
+
+// SetOrderExportGenesis sets the order of export genesis calls
+func (m *Manager)
+
+SetOrderExportGenesis(moduleNames ...string) {
+ m.assertNoForgottenModules("SetOrderExportGenesis", moduleNames, func(moduleName string)
+
+bool {
+ module := m.Modules[moduleName]
+ if _, hasGenesis := module.(appmodule.HasGenesis); hasGenesis {
+ return !hasGenesis
+}
+ if _, hasABCIGenesis := module.(HasABCIGenesis); hasABCIGenesis {
+ return !hasABCIGenesis
+}
+
+ _, hasGenesis := module.(HasGenesis)
+
+return !hasGenesis
+})
+
+m.OrderExportGenesis = moduleNames
+}
+
+// SetOrderPreBlockers sets the order of set pre-blocker calls
+func (m *Manager)
+
+SetOrderPreBlockers(moduleNames ...string) {
+ m.assertNoForgottenModules("SetOrderPreBlockers", moduleNames,
+ func(moduleName string)
+
+bool {
+ module := m.Modules[moduleName]
+ _, hasBlock := module.(appmodule.HasPreBlocker)
+
+return !hasBlock
+})
+
+m.OrderPreBlockers = moduleNames
+}
+
+// SetOrderBeginBlockers sets the order of set begin-blocker calls
+func (m *Manager)
+
+SetOrderBeginBlockers(moduleNames ...string) {
+ m.assertNoForgottenModules("SetOrderBeginBlockers", moduleNames,
+ func(moduleName string)
+
+bool {
+ module := m.Modules[moduleName]
+ _, hasBeginBlock := module.(appmodule.HasBeginBlocker)
+
+return !hasBeginBlock
+})
+
+m.OrderBeginBlockers = moduleNames
+}
+
+// SetOrderEndBlockers sets the order of set end-blocker calls
+func (m *Manager)
+
+SetOrderEndBlockers(moduleNames ...string) {
+ m.assertNoForgottenModules("SetOrderEndBlockers", moduleNames,
+ func(moduleName string)
+
+bool {
+ module := m.Modules[moduleName]
+ if _, hasEndBlock := module.(appmodule.HasEndBlocker); hasEndBlock {
+ return !hasEndBlock
+}
+
+ _, hasABCIEndBlock := module.(HasABCIEndBlock)
+
+return !hasABCIEndBlock
+})
+
+m.OrderEndBlockers = moduleNames
+}
+
+// SetOrderPrepareCheckStaters sets the order of set prepare-check-stater calls
+func (m *Manager)
+
+SetOrderPrepareCheckStaters(moduleNames ...string) {
+ m.assertNoForgottenModules("SetOrderPrepareCheckStaters", moduleNames,
+ func(moduleName string)
+
+bool {
+ module := m.Modules[moduleName]
+ _, hasPrepareCheckState := module.(appmodule.HasPrepareCheckState)
+
+return !hasPrepareCheckState
+})
+
+m.OrderPrepareCheckStaters = moduleNames
+}
+
+// SetOrderPrecommiters sets the order of set precommiter calls
+func (m *Manager)
+
+SetOrderPrecommiters(moduleNames ...string) {
+ m.assertNoForgottenModules("SetOrderPrecommiters", moduleNames,
+ func(moduleName string)
+
+bool {
+ module := m.Modules[moduleName]
+ _, hasPrecommit := module.(appmodule.HasPrecommit)
+
+return !hasPrecommit
+})
+
+m.OrderPrecommiters = moduleNames
+}
+
+// SetOrderMigrations sets the order of migrations to be run. If not set
+// then migrations will be run with an order defined in `DefaultMigrationsOrder`.
+func (m *Manager)
+
+SetOrderMigrations(moduleNames ...string) {
+ m.assertNoForgottenModules("SetOrderMigrations", moduleNames, nil)
+
+m.OrderMigrations = moduleNames
+}
+
+// RegisterInvariants registers all module invariants
+//
+// Deprecated: this function is a no-op and will be removed in the next release of the Cosmos SDK.
+func (m *Manager)
+
+RegisterInvariants(_ sdk.InvariantRegistry) {
+}
+
+// RegisterServices registers all module services
+func (m *Manager)
+
+RegisterServices(cfg Configurator)
+
+error {
+ for _, module := range m.Modules {
+ if module, ok := module.(HasServices); ok {
+ module.RegisterServices(cfg)
+}
+ if module, ok := module.(appmodule.HasServices); ok {
+ err := module.RegisterServices(cfg)
+ if err != nil {
+ return err
+}
+
+}
+ if cfg.Error() != nil {
+ return cfg.Error()
+}
+
+}
+
+return nil
+}
+
+// InitGenesis performs init genesis functionality for modules. Exactly one
+// module must return a non-empty validator set update to correctly initialize
+// the chain.
+func (m *Manager)
+
+InitGenesis(ctx sdk.Context, cdc codec.JSONCodec, genesisData map[string]json.RawMessage) (*abci.ResponseInitChain, error) {
+ var validatorUpdates []abci.ValidatorUpdate
+ ctx.Logger().Info("initializing blockchain state from genesis.json")
+ for _, moduleName := range m.OrderInitGenesis {
+ if genesisData[moduleName] == nil {
+ continue
+}
+ mod := m.Modules[moduleName]
+ // we might get an adapted module, a native core API module or a legacy module
+ if module, ok := mod.(appmodule.HasGenesis); ok {
+ ctx.Logger().Debug("running initialization for module", "module", moduleName)
+ // core API genesis
+ source, err := genesis.SourceFromRawJSON(genesisData[moduleName])
+ if err != nil {
+ return &abci.ResponseInitChain{
+}, err
+}
+
+err = module.InitGenesis(ctx, source)
+ if err != nil {
+ return &abci.ResponseInitChain{
+}, err
+}
+
+}
+
+else if module, ok := mod.(HasGenesis); ok {
+ ctx.Logger().Debug("running initialization for module", "module", moduleName)
+
+module.InitGenesis(ctx, cdc, genesisData[moduleName])
+}
+
+else if module, ok := mod.(HasABCIGenesis); ok {
+ ctx.Logger().Debug("running initialization for module", "module", moduleName)
+ moduleValUpdates := module.InitGenesis(ctx, cdc, genesisData[moduleName])
+
+ // use these validator updates if provided, the module manager assumes
+ // only one module will update the validator set
+ if len(moduleValUpdates) > 0 {
+ if len(validatorUpdates) > 0 {
+ return &abci.ResponseInitChain{
+}, errors.New("validator InitGenesis updates already set by a previous module")
+}
+
+validatorUpdates = moduleValUpdates
+}
+
+}
+
+}
+
+ // a chain must initialize with a non-empty validator set
+ if len(validatorUpdates) == 0 {
+ return &abci.ResponseInitChain{
+}, fmt.Errorf("validator set is empty after InitGenesis, please ensure at least one validator is initialized with a delegation greater than or equal to the DefaultPowerReduction (%d)", sdk.DefaultPowerReduction)
+}
+
+return &abci.ResponseInitChain{
+ Validators: validatorUpdates,
+}, nil
+}
+
+// ExportGenesis performs export genesis functionality for modules
+func (m *Manager)
+
+ExportGenesis(ctx sdk.Context, cdc codec.JSONCodec) (map[string]json.RawMessage, error) {
+ return m.ExportGenesisForModules(ctx, cdc, []string{
+})
+}
+
+// ExportGenesisForModules performs export genesis functionality for modules
+func (m *Manager)
+
+ExportGenesisForModules(ctx sdk.Context, cdc codec.JSONCodec, modulesToExport []string) (map[string]json.RawMessage, error) {
+ if len(modulesToExport) == 0 {
+ modulesToExport = m.OrderExportGenesis
+}
+ // verify modules exists in app, so that we don't panic in the middle of an export
+ if err := m.checkModulesExists(modulesToExport); err != nil {
+ return nil, err
+}
+
+type genesisResult struct {
+ bz json.RawMessage
+ err error
+}
+ channels := make(map[string]chan genesisResult)
+ for _, moduleName := range modulesToExport {
+ mod := m.Modules[moduleName]
+ if module, ok := mod.(appmodule.HasGenesis); ok {
+ // core API genesis
+ channels[moduleName] = make(chan genesisResult)
+
+go func(module appmodule.HasGenesis, ch chan genesisResult) {
+ ctx := ctx.WithGasMeter(storetypes.NewInfiniteGasMeter()) // avoid race conditions
+ target := genesis.RawJSONTarget{
+}
+ err := module.ExportGenesis(ctx, target.Target())
+ if err != nil {
+ ch <- genesisResult{
+ nil, err
+}
+
+return
+}
+
+rawJSON, err := target.JSON()
+ if err != nil {
+ ch <- genesisResult{
+ nil, err
+}
+
+return
+}
+
+ch <- genesisResult{
+ rawJSON, nil
+}
+
+}(module, channels[moduleName])
+}
+
+else if module, ok := mod.(HasGenesis); ok {
+ channels[moduleName] = make(chan genesisResult)
+
+go func(module HasGenesis, ch chan genesisResult) {
+ ctx := ctx.WithGasMeter(storetypes.NewInfiniteGasMeter()) // avoid race conditions
+ ch <- genesisResult{
+ module.ExportGenesis(ctx, cdc), nil
+}
+
+}(module, channels[moduleName])
+}
+
+else if module, ok := mod.(HasABCIGenesis); ok {
+ channels[moduleName] = make(chan genesisResult)
+
+go func(module HasABCIGenesis, ch chan genesisResult) {
+ ctx := ctx.WithGasMeter(storetypes.NewInfiniteGasMeter()) // avoid race conditions
+ ch <- genesisResult{
+ module.ExportGenesis(ctx, cdc), nil
+}
+
+}(module, channels[moduleName])
+}
+
+}
+ genesisData := make(map[string]json.RawMessage)
+ for moduleName := range channels {
+ res := <-channels[moduleName]
+ if res.err != nil {
+ return nil, fmt.Errorf("genesis export error in %s: %w", moduleName, res.err)
+}
+
+genesisData[moduleName] = res.bz
+}
+
+return genesisData, nil
+}
+
+// checkModulesExists verifies that all modules in the list exist in the app
+func (m *Manager)
+
+checkModulesExists(moduleName []string)
+
+error {
+ for _, name := range moduleName {
+ if _, ok := m.Modules[name]; !ok {
+ return fmt.Errorf("module %s does not exist", name)
+}
+
+}
+
+return nil
+}
+
+// assertNoForgottenModules checks that we didn't forget any modules in the SetOrder* functions.
+// `pass` is a closure which allows one to omit modules from `moduleNames`.
+// If you provide non-nil `pass` and it returns true, the module would not be subject of the assertion.
+func (m *Manager)
+
+assertNoForgottenModules(setOrderFnName string, moduleNames []string, pass func(moduleName string)
+
+bool) {
+ ms := make(map[string]bool)
+ for _, m := range moduleNames {
+ ms[m] = true
+}
+
+var missing []string
+ for m := range m.Modules {
+ if pass != nil && pass(m) {
+ continue
+}
+ if !ms[m] {
+ missing = append(missing, m)
+}
+
+}
+ if len(missing) != 0 {
+ sort.Strings(missing)
+
+panic(fmt.Sprintf(
+ "all modules must be defined when setting %s, missing: %v", setOrderFnName, missing))
+}
+}
+
+// MigrationHandler is the migration function that each module registers.
+type MigrationHandler func(sdk.Context)
+
+error
+
+// VersionMap is a map of moduleName -> version
+type VersionMap map[string]uint64
+
+// RunMigrations performs in-place store migrations for all modules. This
+// function MUST be called insde an x/upgrade UpgradeHandler.
+//
+// Recall that in an upgrade handler, the `fromVM` VersionMap is retrieved from
+// x/upgrade's store, and the function needs to return the target VersionMap
+// that will in turn be persisted to the x/upgrade's store. In general,
+// returning RunMigrations should be enough:
+//
+// Example:
+//
+// cfg := module.NewConfigurator(...)
+// app.UpgradeKeeper.SetUpgradeHandler("my-plan", func(ctx context.Context, plan upgradetypes.Plan, fromVM module.VersionMap) (module.VersionMap, error) {
+// return app.mm.RunMigrations(ctx, cfg, fromVM)
+//
+})
+//
+// Internally, RunMigrations will perform the following steps:
+// - create an `updatedVM` VersionMap of module with their latest ConsensusVersion
+// - make a diff of `fromVM` and `udpatedVM`, and for each module:
+// - if the module's `fromVM` version is less than its `updatedVM` version,
+// then run in-place store migrations for that module between those versions.
+// - if the module does not exist in the `fromVM` (which means that it's a new module,
+// because it was not in the previous x/upgrade's store), then run
+// `InitGenesis` on that module.
+//
+// - return the `updatedVM` to be persisted in the x/upgrade's store.
+//
+// Migrations are run in an order defined by `Manager.OrderMigrations` or (if not set)
+
+defined by
+// `DefaultMigrationsOrder` function.
+//
+// As an app developer, if you wish to skip running InitGenesis for your new
+// module "foo", you need to manually pass a `fromVM` argument to this function
+// foo's module version set to its latest ConsensusVersion. That way, the diff
+// between the function's `fromVM` and `udpatedVM` will be empty, hence not
+// running anything for foo.
+//
+// Example:
+//
+// cfg := module.NewConfigurator(...)
+// app.UpgradeKeeper.SetUpgradeHandler("my-plan", func(ctx context.Context, plan upgradetypes.Plan, fromVM module.VersionMap) (module.VersionMap, error) {
+// // Assume "foo" is a new module.
+// // `fromVM` is fetched from existing x/upgrade store. Since foo didn't exist
+// // before this upgrade, `v, exists := fromVM["foo"]; exists == false`, and RunMigration will by default
+// // run InitGenesis on foo.
+// // To skip running foo's InitGenesis, you need set `fromVM`'s foo to its latest
+// // consensus version:
+// fromVM["foo"] = foo.AppModule{
+}.ConsensusVersion()
+//
+// return app.mm.RunMigrations(ctx, cfg, fromVM)
+//
+})
+//
+// Please also refer to /sdk/v0.53/build/migrations/upgrading for more information.
+func (m Manager)
+
+RunMigrations(ctx context.Context, cfg Configurator, fromVM VersionMap) (VersionMap, error) {
+ c, ok := cfg.(*configurator)
+ if !ok {
+ return nil, errorsmod.Wrapf(sdkerrors.ErrInvalidType, "expected %T, got %T", &configurator{
+}, cfg)
+}
+ modules := m.OrderMigrations
+ if modules == nil {
+ modules = DefaultMigrationsOrder(m.ModuleNames())
+}
+ sdkCtx := sdk.UnwrapSDKContext(ctx)
+ updatedVM := VersionMap{
+}
+ for _, moduleName := range modules {
+ module := m.Modules[moduleName]
+ fromVersion, exists := fromVM[moduleName]
+ toVersion := uint64(0)
+ if module, ok := module.(HasConsensusVersion); ok {
+ toVersion = module.ConsensusVersion()
+}
+
+ // We run migration if the module is specified in `fromVM`.
+ // Otherwise we run InitGenesis.
+ //
+ // The module won't exist in the fromVM in two cases:
+ // 1. A new module is added. In this case we run InitGenesis with an
+ // empty genesis state.
+ // 2. An existing chain is upgrading from version < 0.43 to v0.43+ for the first time.
+ // In this case, all modules have yet to be added to x/upgrade's VersionMap store.
+ if exists {
+ err := c.runModuleMigrations(sdkCtx, moduleName, fromVersion, toVersion)
+ if err != nil {
+ return nil, err
+}
+
+}
+
+else {
+ sdkCtx.Logger().Info(fmt.Sprintf("adding a new module: %s", moduleName))
+ if module, ok := m.Modules[moduleName].(HasGenesis); ok {
+ module.InitGenesis(sdkCtx, c.cdc, module.DefaultGenesis(c.cdc))
+}
+ if module, ok := m.Modules[moduleName].(HasABCIGenesis); ok {
+ moduleValUpdates := module.InitGenesis(sdkCtx, c.cdc, module.DefaultGenesis(c.cdc))
+ // The module manager assumes only one module will update the
+ // validator set, and it can't be a new module.
+ if len(moduleValUpdates) > 0 {
+ return nil, errorsmod.Wrapf(sdkerrors.ErrLogic, "validator InitGenesis update is already set by another module")
+}
+
+}
+
+}
+
+updatedVM[moduleName] = toVersion
+}
+
+return updatedVM, nil
+}
+
+// PreBlock performs begin block functionality for upgrade module.
+// It takes the current context as a parameter and returns a boolean value
+// indicating whether the migration was successfully executed or not.
+func (m *Manager)
+
+PreBlock(ctx sdk.Context) (*sdk.ResponsePreBlock, error) {
+ paramsChanged := false
+ for _, moduleName := range m.OrderPreBlockers {
+ if module, ok := m.Modules[moduleName].(appmodule.HasPreBlocker); ok {
+ rsp, err := module.PreBlock(ctx)
+ if err != nil {
+ return nil, err
+}
+ if rsp.IsConsensusParamsChanged() {
+ paramsChanged = true
+}
+
+}
+
+}
+
+return &sdk.ResponsePreBlock{
+ ConsensusParamsChanged: paramsChanged,
+}, nil
+}
+
+// BeginBlock performs begin block functionality for all modules. It creates a
+// child context with an event manager to aggregate events emitted from all
+// modules.
+func (m *Manager)
+
+BeginBlock(ctx sdk.Context) (sdk.BeginBlock, error) {
+ ctx = ctx.WithEventManager(sdk.NewEventManager())
+ for _, moduleName := range m.OrderBeginBlockers {
+ if module, ok := m.Modules[moduleName].(appmodule.HasBeginBlocker); ok {
+ if err := module.BeginBlock(ctx); err != nil {
+ return sdk.BeginBlock{
+}, err
+}
+
+}
+
+}
+
+return sdk.BeginBlock{
+ Events: ctx.EventManager().ABCIEvents(),
+}, nil
+}
+
+// EndBlock performs end block functionality for all modules. It creates a
+// child context with an event manager to aggregate events emitted from all
+// modules.
+func (m *Manager)
+
+EndBlock(ctx sdk.Context) (sdk.EndBlock, error) {
+ ctx = ctx.WithEventManager(sdk.NewEventManager())
+ validatorUpdates := []abci.ValidatorUpdate{
+}
+ for _, moduleName := range m.OrderEndBlockers {
+ if module, ok := m.Modules[moduleName].(appmodule.HasEndBlocker); ok {
+ err := module.EndBlock(ctx)
+ if err != nil {
+ return sdk.EndBlock{
+}, err
+}
+
+}
+
+else if module, ok := m.Modules[moduleName].(HasABCIEndBlock); ok {
+ moduleValUpdates, err := module.EndBlock(ctx)
+ if err != nil {
+ return sdk.EndBlock{
+}, err
+}
+ // use these validator updates if provided, the module manager assumes
+ // only one module will update the validator set
+ if len(moduleValUpdates) > 0 {
+ if len(validatorUpdates) > 0 {
+ return sdk.EndBlock{
+}, errors.New("validator EndBlock updates already set by a previous module")
+}
+ for _, updates := range moduleValUpdates {
+ validatorUpdates = append(validatorUpdates, abci.ValidatorUpdate{
+ PubKey: updates.PubKey,
+ Power: updates.Power
+})
+}
+
+}
+
+}
+
+else {
+ continue
+}
+
+}
+
+return sdk.EndBlock{
+ ValidatorUpdates: validatorUpdates,
+ Events: ctx.EventManager().ABCIEvents(),
+}, nil
+}
+
+// Precommit performs precommit functionality for all modules.
+func (m *Manager)
+
+Precommit(ctx sdk.Context)
+
+error {
+ for _, moduleName := range m.OrderPrecommiters {
+ module, ok := m.Modules[moduleName].(appmodule.HasPrecommit)
+ if !ok {
+ continue
+}
+ if err := module.Precommit(ctx); err != nil {
+ return err
+}
+
+}
+
+return nil
+}
+
+// PrepareCheckState performs functionality for preparing the check state for all modules.
+func (m *Manager)
+
+PrepareCheckState(ctx sdk.Context)
+
+error {
+ for _, moduleName := range m.OrderPrepareCheckStaters {
+ module, ok := m.Modules[moduleName].(appmodule.HasPrepareCheckState)
+ if !ok {
+ continue
+}
+ if err := module.PrepareCheckState(ctx); err != nil {
+ return err
+}
+
+}
+
+return nil
+}
+
+// GetVersionMap gets consensus version from all modules
+func (m *Manager)
+
+GetVersionMap()
+
+VersionMap {
+ vermap := make(VersionMap)
+ for name, v := range m.Modules {
+ version := uint64(0)
+ if v, ok := v.(HasConsensusVersion); ok {
+ version = v.ConsensusVersion()
+}
+
+vermap[name] = version
+}
+
+return vermap
+}
+
+// ModuleNames returns list of all module names, without any particular order.
+func (m *Manager)
+
+ModuleNames() []string {
+ return slices.Collect(maps.Keys(m.Modules))
+}
+
+// DefaultMigrationsOrder returns a default migrations order: ascending alphabetical by module name,
+// except x/auth which will run last, see:
+// https://github.com/cosmos/cosmos-sdk/issues/10591
+func DefaultMigrationsOrder(modules []string) []string {
+ const authName = "auth"
+ out := make([]string, 0, len(modules))
+ hasAuth := false
+ for _, m := range modules {
+ if m == authName {
+ hasAuth = true
+}
+
+else {
+ out = append(out, m)
+}
+
+}
+
+sort.Strings(out)
+ if hasAuth {
+ out = append(out, authName)
+}
+
+return out
+}
+```
+
+It implements the following methods:
+
+* `NewBasicManager(modules ...AppModuleBasic)`: Constructor function. It takes a list of the application's `AppModuleBasic` and builds a new `BasicManager`. This function is generally called in the `init()` function of [`app.go`](/sdk/v0.53/learn/beginner/app-anatomy#core-application-file) to quickly initialize the independent elements of the application's modules (click [here](https://github.com/cosmos/gaia/blob/main/app/app.go#L59-L74) to see an example).
+* `NewBasicManagerFromManager(manager *Manager, customModuleBasics map[string]AppModuleBasic)`: Contructor function. It creates a new `BasicManager` from a `Manager`. The `BasicManager` will contain all `AppModuleBasic` from the `AppModule` manager using `CoreAppModuleBasicAdaptor` whenever possible. Module's `AppModuleBasic` can be overridden by passing a custom AppModuleBasic map
+* `RegisterLegacyAminoCodec(cdc *codec.LegacyAmino)`: Registers the [`codec.LegacyAmino`s](/sdk/v0.53/learn/advanced/encoding#amino) of each of the application's `AppModuleBasic`. This function is usually called early on in the [application's construction](/sdk/v0.53/learn/beginner/app-anatomy#constructor).
+* `RegisterInterfaces(registry codectypes.InterfaceRegistry)`: Registers interface types and implementations of each of the application's `AppModuleBasic`.
+* `DefaultGenesis(cdc codec.JSONCodec)`: Provides default genesis information for modules in the application by calling the [`DefaultGenesis(cdc codec.JSONCodec)`](/sdk/v0.53/build/building-modules/genesis#defaultgenesis) function of each module. It only calls the modules that implements the `HasGenesisBasics` interfaces.
+* `ValidateGenesis(cdc codec.JSONCodec, txEncCfg client.TxEncodingConfig, genesis map[string]json.RawMessage)`: Validates the genesis information modules by calling the [`ValidateGenesis(codec.JSONCodec, client.TxEncodingConfig, json.RawMessage)`](/sdk/v0.53/build/building-modules/genesis#validategenesis) function of modules implementing the `HasGenesisBasics` interface.
+* `RegisterGRPCGatewayRoutes(clientCtx client.Context, rtr *runtime.ServeMux)`: Registers gRPC routes for modules.
+* `AddTxCommands(rootTxCmd *cobra.Command)`: Adds modules' transaction commands (defined as `GetTxCmd() *cobra.Command`) to the application's [`rootTxCommand`](/sdk/v0.53/learn/advanced/cli#transaction-commands). This function is usually called function from the `main.go` function of the [application's command-line interface](/sdk/v0.53/learn/advanced/cli).
+* `AddQueryCommands(rootQueryCmd *cobra.Command)`: Adds modules' query commands (defined as `GetQueryCmd() *cobra.Command`) to the application's [`rootQueryCommand`](/sdk/v0.53/learn/advanced/cli#query-commands). This function is usually called function from the `main.go` function of the [application's command-line interface](/sdk/v0.53/learn/advanced/cli).
+
+### `Manager`
+
+The `Manager` is a structure that holds all the `AppModule` of an application, and defines the order of execution between several key components of these modules:
+
+```go expandable
+/*
+Package module contains application module patterns and associated "manager" functionality.
+The module pattern has been broken down by:
+ - independent module functionality (AppModuleBasic)
+ - inter-dependent module simulation functionality (AppModuleSimulation)
+ - inter-dependent module full functionality (AppModule)
+
+inter-dependent module functionality is module functionality which somehow
+depends on other modules, typically through the module keeper. Many of the
+module keepers are dependent on each other, thus in order to access the full
+set of module functionality we need to define all the keepers/params-store/keys
+etc. This full set of advanced functionality is defined by the AppModule interface.
+
+Independent module functions are separated to allow for the construction of the
+basic application structures required early on in the application definition
+and used to enable the definition of full module functionality later in the
+process. This separation is necessary, however we still want to allow for a
+high level pattern for modules to follow - for instance, such that we don't
+have to manually register all of the codecs for all the modules. This basic
+procedure as well as other basic patterns are handled through the use of
+BasicManager.
+
+Lastly the interface for genesis functionality (HasGenesis & HasABCIGenesis)
+
+has been
+separated out from full module functionality (AppModule)
+
+so that modules which
+are only used for genesis can take advantage of the Module patterns without
+needlessly defining many placeholder functions
+*/
+package module
+
+import (
+
+ "context"
+ "encoding/json"
+ "errors"
+ "fmt"
+ "maps"
+ "slices"
+ "sort"
+
+ abci "github.com/cometbft/cometbft/abci/types"
+ "github.com/grpc-ecosystem/grpc-gateway/runtime"
+ "github.com/spf13/cobra"
+ "cosmossdk.io/core/appmodule"
+ "cosmossdk.io/core/genesis"
+ errorsmod "cosmossdk.io/errors"
+ storetypes "cosmossdk.io/store/types"
+ "github.com/cosmos/cosmos-sdk/client"
+ "github.com/cosmos/cosmos-sdk/codec"
+ "github.com/cosmos/cosmos-sdk/codec/types"
+ sdk "github.com/cosmos/cosmos-sdk/types"
+ sdkerrors "github.com/cosmos/cosmos-sdk/types/errors"
+)
+
+// AppModuleBasic is the standard form for basic non-dependant elements of an application module.
+type AppModuleBasic interface {
+ HasName
+ RegisterLegacyAminoCodec(*codec.LegacyAmino)
+
+RegisterInterfaces(types.InterfaceRegistry)
+
+RegisterGRPCGatewayRoutes(client.Context, *runtime.ServeMux)
+}
+
+// HasName allows the module to provide its own name for legacy purposes.
+// Newer apps should specify the name for their modules using a map
+// using NewManagerFromMap.
+type HasName interface {
+ Name()
+
+string
+}
+
+// HasGenesisBasics is the legacy interface for stateless genesis methods.
+type HasGenesisBasics interface {
+ DefaultGenesis(codec.JSONCodec)
+
+json.RawMessage
+ ValidateGenesis(codec.JSONCodec, client.TxEncodingConfig, json.RawMessage)
+
+error
+}
+
+// BasicManager is a collection of AppModuleBasic
+type BasicManager map[string]AppModuleBasic
+
+// NewBasicManager creates a new BasicManager object
+func NewBasicManager(modules ...AppModuleBasic)
+
+BasicManager {
+ moduleMap := make(map[string]AppModuleBasic)
+ for _, module := range modules {
+ moduleMap[module.Name()] = module
+}
+
+return moduleMap
+}
+
+// NewBasicManagerFromManager creates a new BasicManager from a Manager
+// The BasicManager will contain all AppModuleBasic from the AppModule Manager
+// Module's AppModuleBasic can be overridden by passing a custom AppModuleBasic map
+func NewBasicManagerFromManager(manager *Manager, customModuleBasics map[string]AppModuleBasic)
+
+BasicManager {
+ moduleMap := make(map[string]AppModuleBasic)
+ for name, module := range manager.Modules {
+ if customBasicMod, ok := customModuleBasics[name]; ok {
+ moduleMap[name] = customBasicMod
+ continue
+}
+ if appModule, ok := module.(appmodule.AppModule); ok {
+ moduleMap[name] = CoreAppModuleBasicAdaptor(name, appModule)
+
+continue
+}
+ if basicMod, ok := module.(AppModuleBasic); ok {
+ moduleMap[name] = basicMod
+}
+
+}
+
+return moduleMap
+}
+
+// RegisterLegacyAminoCodec registers all module codecs
+func (bm BasicManager)
+
+RegisterLegacyAminoCodec(cdc *codec.LegacyAmino) {
+ for _, b := range bm {
+ b.RegisterLegacyAminoCodec(cdc)
+}
+}
+
+// RegisterInterfaces registers all module interface types
+func (bm BasicManager)
+
+RegisterInterfaces(registry types.InterfaceRegistry) {
+ for _, m := range bm {
+ m.RegisterInterfaces(registry)
+}
+}
+
+// DefaultGenesis provides default genesis information for all modules
+func (bm BasicManager)
+
+DefaultGenesis(cdc codec.JSONCodec)
+
+map[string]json.RawMessage {
+ genesisData := make(map[string]json.RawMessage)
+ for _, b := range bm {
+ if mod, ok := b.(HasGenesisBasics); ok {
+ genesisData[b.Name()] = mod.DefaultGenesis(cdc)
+}
+
+}
+
+return genesisData
+}
+
+// ValidateGenesis performs genesis state validation for all modules
+func (bm BasicManager)
+
+ValidateGenesis(cdc codec.JSONCodec, txEncCfg client.TxEncodingConfig, genesisData map[string]json.RawMessage)
+
+error {
+ for _, b := range bm {
+ // first check if the module is an adapted Core API Module
+ if mod, ok := b.(HasGenesisBasics); ok {
+ if err := mod.ValidateGenesis(cdc, txEncCfg, genesisData[b.Name()]); err != nil {
+ return err
+}
+
+}
+
+}
+
+return nil
+}
+
+// RegisterGRPCGatewayRoutes registers all module rest routes
+func (bm BasicManager)
+
+RegisterGRPCGatewayRoutes(clientCtx client.Context, rtr *runtime.ServeMux) {
+ for _, b := range bm {
+ b.RegisterGRPCGatewayRoutes(clientCtx, rtr)
+}
+}
+
+// AddTxCommands adds all tx commands to the rootTxCmd.
+func (bm BasicManager)
+
+AddTxCommands(rootTxCmd *cobra.Command) {
+ for _, b := range bm {
+ if mod, ok := b.(interface {
+ GetTxCmd() *cobra.Command
+}); ok {
+ if cmd := mod.GetTxCmd(); cmd != nil {
+ rootTxCmd.AddCommand(cmd)
+}
+
+}
+
+}
+}
+
+// AddQueryCommands adds all query commands to the rootQueryCmd.
+func (bm BasicManager)
+
+AddQueryCommands(rootQueryCmd *cobra.Command) {
+ for _, b := range bm {
+ if mod, ok := b.(interface {
+ GetQueryCmd() *cobra.Command
+}); ok {
+ if cmd := mod.GetQueryCmd(); cmd != nil {
+ rootQueryCmd.AddCommand(cmd)
+}
+
+}
+
+}
+}
+
+// HasGenesis is the extension interface for stateful genesis methods.
+type HasGenesis interface {
+ HasGenesisBasics
+ InitGenesis(sdk.Context, codec.JSONCodec, json.RawMessage)
+
+ExportGenesis(sdk.Context, codec.JSONCodec)
+
+json.RawMessage
+}
+
+// HasABCIGenesis is the extension interface for stateful genesis methods which returns validator updates.
+type HasABCIGenesis interface {
+ HasGenesisBasics
+ InitGenesis(sdk.Context, codec.JSONCodec, json.RawMessage) []abci.ValidatorUpdate
+ ExportGenesis(sdk.Context, codec.JSONCodec)
+
+json.RawMessage
+}
+
+// AppModule is the form for an application module. Most of
+// its functionality has been moved to extension interfaces.
+// Deprecated: use appmodule.AppModule with a combination of extension interfaes interfaces instead.
+type AppModule interface {
+ appmodule.AppModule
+
+ AppModuleBasic
+}
+
+// HasInvariants is the interface for registering invariants.
+//
+// Deprecated: this will be removed in the next Cosmos SDK release.
+type HasInvariants interface {
+ // RegisterInvariants registers module invariants.
+ RegisterInvariants(sdk.InvariantRegistry)
+}
+
+// HasServices is the interface for modules to register services.
+type HasServices interface {
+ // RegisterServices allows a module to register services.
+ RegisterServices(Configurator)
+}
+
+// HasConsensusVersion is the interface for declaring a module consensus version.
+type HasConsensusVersion interface {
+ // ConsensusVersion is a sequence number for state-breaking change of the
+ // module. It should be incremented on each consensus-breaking change
+ // introduced by the module. To avoid wrong/empty versions, the initial version
+ // should be set to 1.
+ ConsensusVersion()
+
+uint64
+}
+
+// HasABCIEndblock is a released typo of HasABCIEndBlock.
+// Deprecated: use HasABCIEndBlock instead.
+type HasABCIEndblock HasABCIEndBlock
+
+// HasABCIEndBlock is the interface for modules that need to run code at the end of the block.
+type HasABCIEndBlock interface {
+ AppModule
+ EndBlock(context.Context) ([]abci.ValidatorUpdate, error)
+}
+
+var (
+ _ appmodule.AppModule = (*GenesisOnlyAppModule)(nil)
+ _ AppModuleBasic = (*GenesisOnlyAppModule)(nil)
+)
+
+// AppModuleGenesis is the standard form for an application module genesis functions
+type AppModuleGenesis interface {
+ AppModuleBasic
+ HasABCIGenesis
+}
+
+// GenesisOnlyAppModule is an AppModule that only has import/export functionality
+type GenesisOnlyAppModule struct {
+ AppModuleGenesis
+}
+
+// NewGenesisOnlyAppModule creates a new GenesisOnlyAppModule object
+func NewGenesisOnlyAppModule(amg AppModuleGenesis)
+
+GenesisOnlyAppModule {
+ return GenesisOnlyAppModule{
+ AppModuleGenesis: amg,
+}
+}
+
+// IsOnePerModuleType implements the depinject.OnePerModuleType interface.
+func (GenesisOnlyAppModule)
+
+IsOnePerModuleType() {
+}
+
+// IsAppModule implements the appmodule.AppModule interface.
+func (GenesisOnlyAppModule)
+
+IsAppModule() {
+}
+
+// RegisterInvariants is a placeholder function register no invariants
+func (GenesisOnlyAppModule)
+
+RegisterInvariants(_ sdk.InvariantRegistry) {
+}
+
+// ConsensusVersion implements AppModule/ConsensusVersion.
+func (gam GenesisOnlyAppModule)
+
+ConsensusVersion()
+
+uint64 {
+ return 1
+}
+
+// Manager defines a module manager that provides the high level utility for managing and executing
+// operations for a group of modules
+type Manager struct {
+ Modules map[string]any // interface{
+}
+
+is used now to support the legacy AppModule as well as new core appmodule.AppModule.
+ OrderInitGenesis []string
+ OrderExportGenesis []string
+ OrderPreBlockers []string
+ OrderBeginBlockers []string
+ OrderEndBlockers []string
+ OrderPrepareCheckStaters []string
+ OrderPrecommiters []string
+ OrderMigrations []string
+}
+
+// NewManager creates a new Manager object.
+func NewManager(modules ...AppModule) *Manager {
+ moduleMap := make(map[string]any)
+ modulesStr := make([]string, 0, len(modules))
+ preBlockModulesStr := make([]string, 0)
+ for _, module := range modules {
+ if _, ok := module.(appmodule.AppModule); !ok {
+ panic(fmt.Sprintf("module %s does not implement appmodule.AppModule", module.Name()))
+}
+
+moduleMap[module.Name()] = module
+ modulesStr = append(modulesStr, module.Name())
+ if _, ok := module.(appmodule.HasPreBlocker); ok {
+ preBlockModulesStr = append(preBlockModulesStr, module.Name())
+}
+
+}
+
+return &Manager{
+ Modules: moduleMap,
+ OrderInitGenesis: modulesStr,
+ OrderExportGenesis: modulesStr,
+ OrderPreBlockers: preBlockModulesStr,
+ OrderBeginBlockers: modulesStr,
+ OrderPrepareCheckStaters: modulesStr,
+ OrderPrecommiters: modulesStr,
+ OrderEndBlockers: modulesStr,
+}
+}
+
+// NewManagerFromMap creates a new Manager object from a map of module names to module implementations.
+// This method should be used for apps and modules which have migrated to the cosmossdk.io/core.appmodule.AppModule API.
+func NewManagerFromMap(moduleMap map[string]appmodule.AppModule) *Manager {
+ simpleModuleMap := make(map[string]any)
+ modulesStr := make([]string, 0, len(simpleModuleMap))
+ preBlockModulesStr := make([]string, 0)
+ for name, module := range moduleMap {
+ simpleModuleMap[name] = module
+ modulesStr = append(modulesStr, name)
+ if _, ok := module.(appmodule.HasPreBlocker); ok {
+ preBlockModulesStr = append(preBlockModulesStr, name)
+}
+
+}
+
+ // Sort the modules by name. Given that we are using a map above we can't guarantee the order.
+ sort.Strings(modulesStr)
+
+return &Manager{
+ Modules: simpleModuleMap,
+ OrderInitGenesis: modulesStr,
+ OrderExportGenesis: modulesStr,
+ OrderPreBlockers: preBlockModulesStr,
+ OrderBeginBlockers: modulesStr,
+ OrderEndBlockers: modulesStr,
+ OrderPrecommiters: modulesStr,
+ OrderPrepareCheckStaters: modulesStr,
+}
+}
+
+// SetOrderInitGenesis sets the order of init genesis calls
+func (m *Manager)
+
+SetOrderInitGenesis(moduleNames ...string) {
+ m.assertNoForgottenModules("SetOrderInitGenesis", moduleNames, func(moduleName string)
+
+bool {
+ module := m.Modules[moduleName]
+ if _, hasGenesis := module.(appmodule.HasGenesis); hasGenesis {
+ return !hasGenesis
+}
+ if _, hasABCIGenesis := module.(HasABCIGenesis); hasABCIGenesis {
+ return !hasABCIGenesis
+}
+
+ _, hasGenesis := module.(HasGenesis)
+
+return !hasGenesis
+})
+
+m.OrderInitGenesis = moduleNames
+}
+
+// SetOrderExportGenesis sets the order of export genesis calls
+func (m *Manager)
+
+SetOrderExportGenesis(moduleNames ...string) {
+ m.assertNoForgottenModules("SetOrderExportGenesis", moduleNames, func(moduleName string)
+
+bool {
+ module := m.Modules[moduleName]
+ if _, hasGenesis := module.(appmodule.HasGenesis); hasGenesis {
+ return !hasGenesis
+}
+ if _, hasABCIGenesis := module.(HasABCIGenesis); hasABCIGenesis {
+ return !hasABCIGenesis
+}
+
+ _, hasGenesis := module.(HasGenesis)
+
+return !hasGenesis
+})
+
+m.OrderExportGenesis = moduleNames
+}
+
+// SetOrderPreBlockers sets the order of set pre-blocker calls
+func (m *Manager)
+
+SetOrderPreBlockers(moduleNames ...string) {
+ m.assertNoForgottenModules("SetOrderPreBlockers", moduleNames,
+ func(moduleName string)
+
+bool {
+ module := m.Modules[moduleName]
+ _, hasBlock := module.(appmodule.HasPreBlocker)
+
+return !hasBlock
+})
+
+m.OrderPreBlockers = moduleNames
+}
+
+// SetOrderBeginBlockers sets the order of set begin-blocker calls
+func (m *Manager)
+
+SetOrderBeginBlockers(moduleNames ...string) {
+ m.assertNoForgottenModules("SetOrderBeginBlockers", moduleNames,
+ func(moduleName string)
+
+bool {
+ module := m.Modules[moduleName]
+ _, hasBeginBlock := module.(appmodule.HasBeginBlocker)
+
+return !hasBeginBlock
+})
+
+m.OrderBeginBlockers = moduleNames
+}
+
+// SetOrderEndBlockers sets the order of set end-blocker calls
+func (m *Manager)
+
+SetOrderEndBlockers(moduleNames ...string) {
+ m.assertNoForgottenModules("SetOrderEndBlockers", moduleNames,
+ func(moduleName string)
+
+bool {
+ module := m.Modules[moduleName]
+ if _, hasEndBlock := module.(appmodule.HasEndBlocker); hasEndBlock {
+ return !hasEndBlock
+}
+
+ _, hasABCIEndBlock := module.(HasABCIEndBlock)
+
+return !hasABCIEndBlock
+})
+
+m.OrderEndBlockers = moduleNames
+}
+
+// SetOrderPrepareCheckStaters sets the order of set prepare-check-stater calls
+func (m *Manager)
+
+SetOrderPrepareCheckStaters(moduleNames ...string) {
+ m.assertNoForgottenModules("SetOrderPrepareCheckStaters", moduleNames,
+ func(moduleName string)
+
+bool {
+ module := m.Modules[moduleName]
+ _, hasPrepareCheckState := module.(appmodule.HasPrepareCheckState)
+
+return !hasPrepareCheckState
+})
+
+m.OrderPrepareCheckStaters = moduleNames
+}
+
+// SetOrderPrecommiters sets the order of set precommiter calls
+func (m *Manager)
+
+SetOrderPrecommiters(moduleNames ...string) {
+ m.assertNoForgottenModules("SetOrderPrecommiters", moduleNames,
+ func(moduleName string)
+
+bool {
+ module := m.Modules[moduleName]
+ _, hasPrecommit := module.(appmodule.HasPrecommit)
+
+return !hasPrecommit
+})
+
+m.OrderPrecommiters = moduleNames
+}
+
+// SetOrderMigrations sets the order of migrations to be run. If not set
+// then migrations will be run with an order defined in `DefaultMigrationsOrder`.
+func (m *Manager)
+
+SetOrderMigrations(moduleNames ...string) {
+ m.assertNoForgottenModules("SetOrderMigrations", moduleNames, nil)
+
+m.OrderMigrations = moduleNames
+}
+
+// RegisterInvariants registers all module invariants
+//
+// Deprecated: this function is a no-op and will be removed in the next release of the Cosmos SDK.
+func (m *Manager)
+
+RegisterInvariants(_ sdk.InvariantRegistry) {
+}
+
+// RegisterServices registers all module services
+func (m *Manager)
+
+RegisterServices(cfg Configurator)
+
+error {
+ for _, module := range m.Modules {
+ if module, ok := module.(HasServices); ok {
+ module.RegisterServices(cfg)
+}
+ if module, ok := module.(appmodule.HasServices); ok {
+ err := module.RegisterServices(cfg)
+ if err != nil {
+ return err
+}
+
+}
+ if cfg.Error() != nil {
+ return cfg.Error()
+}
+
+}
+
+return nil
+}
+
+// InitGenesis performs init genesis functionality for modules. Exactly one
+// module must return a non-empty validator set update to correctly initialize
+// the chain.
+func (m *Manager)
+
+InitGenesis(ctx sdk.Context, cdc codec.JSONCodec, genesisData map[string]json.RawMessage) (*abci.ResponseInitChain, error) {
+ var validatorUpdates []abci.ValidatorUpdate
+ ctx.Logger().Info("initializing blockchain state from genesis.json")
+ for _, moduleName := range m.OrderInitGenesis {
+ if genesisData[moduleName] == nil {
+ continue
+}
+ mod := m.Modules[moduleName]
+ // we might get an adapted module, a native core API module or a legacy module
+ if module, ok := mod.(appmodule.HasGenesis); ok {
+ ctx.Logger().Debug("running initialization for module", "module", moduleName)
+ // core API genesis
+ source, err := genesis.SourceFromRawJSON(genesisData[moduleName])
+ if err != nil {
+ return &abci.ResponseInitChain{
+}, err
+}
+
+err = module.InitGenesis(ctx, source)
+ if err != nil {
+ return &abci.ResponseInitChain{
+}, err
+}
+
+}
+
+else if module, ok := mod.(HasGenesis); ok {
+ ctx.Logger().Debug("running initialization for module", "module", moduleName)
+
+module.InitGenesis(ctx, cdc, genesisData[moduleName])
+}
+
+else if module, ok := mod.(HasABCIGenesis); ok {
+ ctx.Logger().Debug("running initialization for module", "module", moduleName)
+ moduleValUpdates := module.InitGenesis(ctx, cdc, genesisData[moduleName])
+
+ // use these validator updates if provided, the module manager assumes
+ // only one module will update the validator set
+ if len(moduleValUpdates) > 0 {
+ if len(validatorUpdates) > 0 {
+ return &abci.ResponseInitChain{
+}, errors.New("validator InitGenesis updates already set by a previous module")
+}
+
+validatorUpdates = moduleValUpdates
+}
+
+}
+
+}
+
+ // a chain must initialize with a non-empty validator set
+ if len(validatorUpdates) == 0 {
+ return &abci.ResponseInitChain{
+}, fmt.Errorf("validator set is empty after InitGenesis, please ensure at least one validator is initialized with a delegation greater than or equal to the DefaultPowerReduction (%d)", sdk.DefaultPowerReduction)
+}
+
+return &abci.ResponseInitChain{
+ Validators: validatorUpdates,
+}, nil
+}
+
+// ExportGenesis performs export genesis functionality for modules
+func (m *Manager)
+
+ExportGenesis(ctx sdk.Context, cdc codec.JSONCodec) (map[string]json.RawMessage, error) {
+ return m.ExportGenesisForModules(ctx, cdc, []string{
+})
+}
+
+// ExportGenesisForModules performs export genesis functionality for modules
+func (m *Manager)
+
+ExportGenesisForModules(ctx sdk.Context, cdc codec.JSONCodec, modulesToExport []string) (map[string]json.RawMessage, error) {
+ if len(modulesToExport) == 0 {
+ modulesToExport = m.OrderExportGenesis
+}
+ // verify modules exists in app, so that we don't panic in the middle of an export
+ if err := m.checkModulesExists(modulesToExport); err != nil {
+ return nil, err
+}
+
+type genesisResult struct {
+ bz json.RawMessage
+ err error
+}
+ channels := make(map[string]chan genesisResult)
+ for _, moduleName := range modulesToExport {
+ mod := m.Modules[moduleName]
+ if module, ok := mod.(appmodule.HasGenesis); ok {
+ // core API genesis
+ channels[moduleName] = make(chan genesisResult)
+
+go func(module appmodule.HasGenesis, ch chan genesisResult) {
+ ctx := ctx.WithGasMeter(storetypes.NewInfiniteGasMeter()) // avoid race conditions
+ target := genesis.RawJSONTarget{
+}
+ err := module.ExportGenesis(ctx, target.Target())
+ if err != nil {
+ ch <- genesisResult{
+ nil, err
+}
+
+return
+}
+
+rawJSON, err := target.JSON()
+ if err != nil {
+ ch <- genesisResult{
+ nil, err
+}
+
+return
+}
+
+ch <- genesisResult{
+ rawJSON, nil
+}
+
+}(module, channels[moduleName])
+}
+
+else if module, ok := mod.(HasGenesis); ok {
+ channels[moduleName] = make(chan genesisResult)
+
+go func(module HasGenesis, ch chan genesisResult) {
+ ctx := ctx.WithGasMeter(storetypes.NewInfiniteGasMeter()) // avoid race conditions
+ ch <- genesisResult{
+ module.ExportGenesis(ctx, cdc), nil
+}
+
+}(module, channels[moduleName])
+}
+
+else if module, ok := mod.(HasABCIGenesis); ok {
+ channels[moduleName] = make(chan genesisResult)
+
+go func(module HasABCIGenesis, ch chan genesisResult) {
+ ctx := ctx.WithGasMeter(storetypes.NewInfiniteGasMeter()) // avoid race conditions
+ ch <- genesisResult{
+ module.ExportGenesis(ctx, cdc), nil
+}
+
+}(module, channels[moduleName])
+}
+
+}
+ genesisData := make(map[string]json.RawMessage)
+ for moduleName := range channels {
+ res := <-channels[moduleName]
+ if res.err != nil {
+ return nil, fmt.Errorf("genesis export error in %s: %w", moduleName, res.err)
+}
+
+genesisData[moduleName] = res.bz
+}
+
+return genesisData, nil
+}
+
+// checkModulesExists verifies that all modules in the list exist in the app
+func (m *Manager)
+
+checkModulesExists(moduleName []string)
+
+error {
+ for _, name := range moduleName {
+ if _, ok := m.Modules[name]; !ok {
+ return fmt.Errorf("module %s does not exist", name)
+}
+
+}
+
+return nil
+}
+
+// assertNoForgottenModules checks that we didn't forget any modules in the SetOrder* functions.
+// `pass` is a closure which allows one to omit modules from `moduleNames`.
+// If you provide non-nil `pass` and it returns true, the module would not be subject of the assertion.
+func (m *Manager)
+
+assertNoForgottenModules(setOrderFnName string, moduleNames []string, pass func(moduleName string)
+
+bool) {
+ ms := make(map[string]bool)
+ for _, m := range moduleNames {
+ ms[m] = true
+}
+
+var missing []string
+ for m := range m.Modules {
+ if pass != nil && pass(m) {
+ continue
+}
+ if !ms[m] {
+ missing = append(missing, m)
+}
+
+}
+ if len(missing) != 0 {
+ sort.Strings(missing)
+
+panic(fmt.Sprintf(
+ "all modules must be defined when setting %s, missing: %v", setOrderFnName, missing))
+}
+}
+
+// MigrationHandler is the migration function that each module registers.
+type MigrationHandler func(sdk.Context)
+
+error
+
+// VersionMap is a map of moduleName -> version
+type VersionMap map[string]uint64
+
+// RunMigrations performs in-place store migrations for all modules. This
+// function MUST be called insde an x/upgrade UpgradeHandler.
+//
+// Recall that in an upgrade handler, the `fromVM` VersionMap is retrieved from
+// x/upgrade's store, and the function needs to return the target VersionMap
+// that will in turn be persisted to the x/upgrade's store. In general,
+// returning RunMigrations should be enough:
+//
+// Example:
+//
+// cfg := module.NewConfigurator(...)
+// app.UpgradeKeeper.SetUpgradeHandler("my-plan", func(ctx context.Context, plan upgradetypes.Plan, fromVM module.VersionMap) (module.VersionMap, error) {
+// return app.mm.RunMigrations(ctx, cfg, fromVM)
+//
+})
+//
+// Internally, RunMigrations will perform the following steps:
+// - create an `updatedVM` VersionMap of module with their latest ConsensusVersion
+// - make a diff of `fromVM` and `udpatedVM`, and for each module:
+// - if the module's `fromVM` version is less than its `updatedVM` version,
+// then run in-place store migrations for that module between those versions.
+// - if the module does not exist in the `fromVM` (which means that it's a new module,
+// because it was not in the previous x/upgrade's store), then run
+// `InitGenesis` on that module.
+//
+// - return the `updatedVM` to be persisted in the x/upgrade's store.
+//
+// Migrations are run in an order defined by `Manager.OrderMigrations` or (if not set)
+
+defined by
+// `DefaultMigrationsOrder` function.
+//
+// As an app developer, if you wish to skip running InitGenesis for your new
+// module "foo", you need to manually pass a `fromVM` argument to this function
+// foo's module version set to its latest ConsensusVersion. That way, the diff
+// between the function's `fromVM` and `udpatedVM` will be empty, hence not
+// running anything for foo.
+//
+// Example:
+//
+// cfg := module.NewConfigurator(...)
+// app.UpgradeKeeper.SetUpgradeHandler("my-plan", func(ctx context.Context, plan upgradetypes.Plan, fromVM module.VersionMap) (module.VersionMap, error) {
+// // Assume "foo" is a new module.
+// // `fromVM` is fetched from existing x/upgrade store. Since foo didn't exist
+// // before this upgrade, `v, exists := fromVM["foo"]; exists == false`, and RunMigration will by default
+// // run InitGenesis on foo.
+// // To skip running foo's InitGenesis, you need set `fromVM`'s foo to its latest
+// // consensus version:
+// fromVM["foo"] = foo.AppModule{
+}.ConsensusVersion()
+//
+// return app.mm.RunMigrations(ctx, cfg, fromVM)
+//
+})
+//
+// Please also refer to /sdk/v0.53/build/migrations/upgrading for more information.
+func (m Manager)
+
+RunMigrations(ctx context.Context, cfg Configurator, fromVM VersionMap) (VersionMap, error) {
+ c, ok := cfg.(*configurator)
+ if !ok {
+ return nil, errorsmod.Wrapf(sdkerrors.ErrInvalidType, "expected %T, got %T", &configurator{
+}, cfg)
+}
+ modules := m.OrderMigrations
+ if modules == nil {
+ modules = DefaultMigrationsOrder(m.ModuleNames())
+}
+ sdkCtx := sdk.UnwrapSDKContext(ctx)
+ updatedVM := VersionMap{
+}
+ for _, moduleName := range modules {
+ module := m.Modules[moduleName]
+ fromVersion, exists := fromVM[moduleName]
+ toVersion := uint64(0)
+ if module, ok := module.(HasConsensusVersion); ok {
+ toVersion = module.ConsensusVersion()
+}
+
+ // We run migration if the module is specified in `fromVM`.
+ // Otherwise we run InitGenesis.
+ //
+ // The module won't exist in the fromVM in two cases:
+ // 1. A new module is added. In this case we run InitGenesis with an
+ // empty genesis state.
+ // 2. An existing chain is upgrading from version < 0.43 to v0.43+ for the first time.
+ // In this case, all modules have yet to be added to x/upgrade's VersionMap store.
+ if exists {
+ err := c.runModuleMigrations(sdkCtx, moduleName, fromVersion, toVersion)
+ if err != nil {
+ return nil, err
+}
+
+}
+
+else {
+ sdkCtx.Logger().Info(fmt.Sprintf("adding a new module: %s", moduleName))
+ if module, ok := m.Modules[moduleName].(HasGenesis); ok {
+ module.InitGenesis(sdkCtx, c.cdc, module.DefaultGenesis(c.cdc))
+}
+ if module, ok := m.Modules[moduleName].(HasABCIGenesis); ok {
+ moduleValUpdates := module.InitGenesis(sdkCtx, c.cdc, module.DefaultGenesis(c.cdc))
+ // The module manager assumes only one module will update the
+ // validator set, and it can't be a new module.
+ if len(moduleValUpdates) > 0 {
+ return nil, errorsmod.Wrapf(sdkerrors.ErrLogic, "validator InitGenesis update is already set by another module")
+}
+
+}
+
+}
+
+updatedVM[moduleName] = toVersion
+}
+
+return updatedVM, nil
+}
+
+// PreBlock performs begin block functionality for upgrade module.
+// It takes the current context as a parameter and returns a boolean value
+// indicating whether the migration was successfully executed or not.
+func (m *Manager)
+
+PreBlock(ctx sdk.Context) (*sdk.ResponsePreBlock, error) {
+ paramsChanged := false
+ for _, moduleName := range m.OrderPreBlockers {
+ if module, ok := m.Modules[moduleName].(appmodule.HasPreBlocker); ok {
+ rsp, err := module.PreBlock(ctx)
+ if err != nil {
+ return nil, err
+}
+ if rsp.IsConsensusParamsChanged() {
+ paramsChanged = true
+}
+
+}
+
+}
+
+return &sdk.ResponsePreBlock{
+ ConsensusParamsChanged: paramsChanged,
+}, nil
+}
+
+// BeginBlock performs begin block functionality for all modules. It creates a
+// child context with an event manager to aggregate events emitted from all
+// modules.
+func (m *Manager)
+
+BeginBlock(ctx sdk.Context) (sdk.BeginBlock, error) {
+ ctx = ctx.WithEventManager(sdk.NewEventManager())
+ for _, moduleName := range m.OrderBeginBlockers {
+ if module, ok := m.Modules[moduleName].(appmodule.HasBeginBlocker); ok {
+ if err := module.BeginBlock(ctx); err != nil {
+ return sdk.BeginBlock{
+}, err
+}
+
+}
+
+}
+
+return sdk.BeginBlock{
+ Events: ctx.EventManager().ABCIEvents(),
+}, nil
+}
+
+// EndBlock performs end block functionality for all modules. It creates a
+// child context with an event manager to aggregate events emitted from all
+// modules.
+func (m *Manager)
+
+EndBlock(ctx sdk.Context) (sdk.EndBlock, error) {
+ ctx = ctx.WithEventManager(sdk.NewEventManager())
+ validatorUpdates := []abci.ValidatorUpdate{
+}
+ for _, moduleName := range m.OrderEndBlockers {
+ if module, ok := m.Modules[moduleName].(appmodule.HasEndBlocker); ok {
+ err := module.EndBlock(ctx)
+ if err != nil {
+ return sdk.EndBlock{
+}, err
+}
+
+}
+
+else if module, ok := m.Modules[moduleName].(HasABCIEndBlock); ok {
+ moduleValUpdates, err := module.EndBlock(ctx)
+ if err != nil {
+ return sdk.EndBlock{
+}, err
+}
+ // use these validator updates if provided, the module manager assumes
+ // only one module will update the validator set
+ if len(moduleValUpdates) > 0 {
+ if len(validatorUpdates) > 0 {
+ return sdk.EndBlock{
+}, errors.New("validator EndBlock updates already set by a previous module")
+}
+ for _, updates := range moduleValUpdates {
+ validatorUpdates = append(validatorUpdates, abci.ValidatorUpdate{
+ PubKey: updates.PubKey,
+ Power: updates.Power
+})
+}
+
+}
+
+}
+
+else {
+ continue
+}
+
+}
+
+return sdk.EndBlock{
+ ValidatorUpdates: validatorUpdates,
+ Events: ctx.EventManager().ABCIEvents(),
+}, nil
+}
+
+// Precommit performs precommit functionality for all modules.
+func (m *Manager)
+
+Precommit(ctx sdk.Context)
+
+error {
+ for _, moduleName := range m.OrderPrecommiters {
+ module, ok := m.Modules[moduleName].(appmodule.HasPrecommit)
+ if !ok {
+ continue
+}
+ if err := module.Precommit(ctx); err != nil {
+ return err
+}
+
+}
+
+return nil
+}
+
+// PrepareCheckState performs functionality for preparing the check state for all modules.
+func (m *Manager)
+
+PrepareCheckState(ctx sdk.Context)
+
+error {
+ for _, moduleName := range m.OrderPrepareCheckStaters {
+ module, ok := m.Modules[moduleName].(appmodule.HasPrepareCheckState)
+ if !ok {
+ continue
+}
+ if err := module.PrepareCheckState(ctx); err != nil {
+ return err
+}
+
+}
+
+return nil
+}
+
+// GetVersionMap gets consensus version from all modules
+func (m *Manager)
+
+GetVersionMap()
+
+VersionMap {
+ vermap := make(VersionMap)
+ for name, v := range m.Modules {
+ version := uint64(0)
+ if v, ok := v.(HasConsensusVersion); ok {
+ version = v.ConsensusVersion()
+}
+
+vermap[name] = version
+}
+
+return vermap
+}
+
+// ModuleNames returns list of all module names, without any particular order.
+func (m *Manager)
+
+ModuleNames() []string {
+ return slices.Collect(maps.Keys(m.Modules))
+}
+
+// DefaultMigrationsOrder returns a default migrations order: ascending alphabetical by module name,
+// except x/auth which will run last, see:
+// https://github.com/cosmos/cosmos-sdk/issues/10591
+func DefaultMigrationsOrder(modules []string) []string {
+ const authName = "auth"
+ out := make([]string, 0, len(modules))
+ hasAuth := false
+ for _, m := range modules {
+ if m == authName {
+ hasAuth = true
+}
+
+else {
+ out = append(out, m)
+}
+
+}
+
+sort.Strings(out)
+ if hasAuth {
+ out = append(out, authName)
+}
+
+return out
+}
+```
+
+The module manager is used throughout the application whenever an action on a collection of modules is required. It implements the following methods:
+
+* `NewManager(modules ...AppModule)`: Constructor function. It takes a list of the application's `AppModule`s and builds a new `Manager`. It is generally called from the application's main [constructor function](/sdk/v0.53/learn/beginner/app-anatomy#constructor-function).
+* `SetOrderInitGenesis(moduleNames ...string)`: Sets the order in which the [`InitGenesis`](/sdk/v0.53/build/building-modules/genesis#initgenesis) function of each module will be called when the application is first started. This function is generally called from the application's main [constructor function](/sdk/v0.53/learn/beginner/app-anatomy#constructor-function).
+ To initialize modules successfully, module dependencies should be considered. For example, the `genutil` module must occur after `staking` module so that the pools are properly initialized with tokens from genesis accounts, the `genutils` module must also occur after `auth` so that it can access the params from auth, IBC's `capability` module should be initialized before all other modules so that it can initialize any capabilities.
+* `SetOrderExportGenesis(moduleNames ...string)`: Sets the order in which the [`ExportGenesis`](/sdk/v0.53/build/building-modules/genesis#exportgenesis) function of each module will be called in case of an export. This function is generally called from the application's main [constructor function](/sdk/v0.53/learn/beginner/app-anatomy#constructor-function).
+* `SetOrderPreBlockers(moduleNames ...string)`: Sets the order in which the `PreBlock()` function of each module will be called before `BeginBlock()` of all modules. This function is generally called from the application's main [constructor function](/sdk/v0.53/learn/beginner/app-anatomy#constructor-function).
+* `SetOrderBeginBlockers(moduleNames ...string)`: Sets the order in which the `BeginBlock()` function of each module will be called at the beginning of each block. This function is generally called from the application's main [constructor function](/sdk/v0.53/learn/beginner/app-anatomy#constructor-function).
+* `SetOrderEndBlockers(moduleNames ...string)`: Sets the order in which the `EndBlock()` function of each module will be called at the end of each block. This function is generally called from the application's main [constructor function](/sdk/v0.53/learn/beginner/app-anatomy#constructor-function).
+* `SetOrderPrecommiters(moduleNames ...string)`: Sets the order in which the `Precommit()` function of each module will be called during commit of each block. This function is generally called from the application's main [constructor function](/sdk/v0.53/learn/beginner/app-anatomy#constructor-function).
+* `SetOrderPrepareCheckStaters(moduleNames ...string)`: Sets the order in which the `PrepareCheckState()` function of each module will be called during commit of each block. This function is generally called from the application's main [constructor function](/sdk/v0.53/learn/beginner/app-anatomy#constructor-function).
+* `SetOrderMigrations(moduleNames ...string)`: Sets the order of migrations to be run. If not set then migrations will be run with an order defined in `DefaultMigrationsOrder`.
+* `RegisterInvariants(ir sdk.InvariantRegistry)`: Registers the [invariants](/sdk/v0.53/build/building-modules/invariants) of module implementing the `HasInvariants` interface.
+* `RegisterServices(cfg Configurator)`: Registers the services of modules implementing the `HasServices` interface.
+* `InitGenesis(ctx context.Context, cdc codec.JSONCodec, genesisData map[string]json.RawMessage)`: Calls the [`InitGenesis`](/sdk/v0.53/build/building-modules/genesis#initgenesis) function of each module when the application is first started, in the order defined in `OrderInitGenesis`. Returns an `abci.ResponseInitChain` to the underlying consensus engine, which can contain validator updates.
+* `ExportGenesis(ctx context.Context, cdc codec.JSONCodec)`: Calls the [`ExportGenesis`](/sdk/v0.53/build/building-modules/genesis#exportgenesis) function of each module, in the order defined in `OrderExportGenesis`. The export constructs a genesis file from a previously existing state, and is mainly used when a hard-fork upgrade of the chain is required.
+* `ExportGenesisForModules(ctx context.Context, cdc codec.JSONCodec, modulesToExport []string)`: Behaves the same as `ExportGenesis`, except takes a list of modules to export.
+* `BeginBlock(ctx context.Context) error`: At the beginning of each block, this function is called from [`BaseApp`](/sdk/v0.53/learn/advanced/baseapp#beginblock) and, in turn, calls the [`BeginBlock`](/sdk/v0.53/build/building-modules/beginblock-endblock) function of each modules implementing the `appmodule.HasBeginBlocker` interface, in the order defined in `OrderBeginBlockers`. It creates a child [context](/sdk/v0.53/learn/advanced/context) with an event manager to aggregate [events](/sdk/v0.53/learn/advanced/events) emitted from each modules.
+* `EndBlock(ctx context.Context) error`: At the end of each block, this function is called from [`BaseApp`](/sdk/v0.53/learn/advanced/baseapp#endblock) and, in turn, calls the [`EndBlock`](/sdk/v0.53/build/building-modules/beginblock-endblock) function of each modules implementing the `appmodule.HasEndBlocker` interface, in the order defined in `OrderEndBlockers`. It creates a child [context](/sdk/v0.53/learn/advanced/context) with an event manager to aggregate [events](/sdk/v0.53/learn/advanced/events) emitted from all modules. The function returns an `abci` which contains the aforementioned events, as well as validator set updates (if any).
+* `EndBlock(context.Context) ([]abci.ValidatorUpdate, error)`: At the end of each block, this function is called from [`BaseApp`](/sdk/v0.53/learn/advanced/baseapp#endblock) and, in turn, calls the [`EndBlock`](/sdk/v0.53/build/building-modules/beginblock-endblock) function of each modules implementing the `module.HasABCIEndBlock` interface, in the order defined in `OrderEndBlockers`. It creates a child [context](/sdk/v0.53/learn/advanced/context) with an event manager to aggregate [events](/sdk/v0.53/learn/advanced/events) emitted from all modules. The function returns an `abci` which contains the aforementioned events, as well as validator set updates (if any).
+* `Precommit(ctx context.Context)`: During [`Commit`](/sdk/v0.53/learn/advanced/baseapp#commit), this function is called from `BaseApp` immediately before the [`deliverState`](/sdk/v0.53/learn/advanced/baseapp#state-updates) is written to the underlying [`rootMultiStore`](/sdk/v0.53/learn/advanced/store#commitmultistore) and, in turn calls the `Precommit` function of each modules implementing the `HasPrecommit` interface, in the order defined in `OrderPrecommiters`. It creates a child [context](/sdk/v0.53/learn/advanced/context) where the underlying `CacheMultiStore` is that of the newly committed block's [`finalizeblockstate`](/sdk/v0.53/learn/advanced/baseapp#state-updates).
+* `PrepareCheckState(ctx context.Context)`: During [`Commit`](/sdk/v0.53/learn/advanced/baseapp#commit), this function is called from `BaseApp` immediately after the [`deliverState`](/sdk/v0.53/learn/advanced/baseapp#state-updates) is written to the underlying [`rootMultiStore`](/sdk/v0.53/learn/advanced/store#commitmultistore) and, in turn calls the `PrepareCheckState` function of each module implementing the `HasPrepareCheckState` interface, in the order defined in `OrderPrepareCheckStaters`. It creates a child [context](/sdk/v0.53/learn/advanced/context) where the underlying `CacheMultiStore` is that of the next block's [`checkState`](/sdk/v0.53/learn/advanced/baseapp#state-updates). Writes to this state will be present in the [`checkState`](/sdk/v0.53/learn/advanced/baseapp#state-updates) of the next block, and therefore this method can be used to prepare the `checkState` for the next block.
+
+Here's an example of a concrete integration within an `simapp`:
+
+```go expandable
+//go:build app_v1
+
+package simapp
+
+import (
+
+ "encoding/json"
+ "fmt"
+ "io"
+ "maps"
+
+ abci "github.com/cometbft/cometbft/abci/types"
+ dbm "github.com/cosmos/cosmos-db"
+ "github.com/cosmos/gogoproto/proto"
+ "github.com/spf13/cast"
+
+ autocliv1 "cosmossdk.io/api/cosmos/autocli/v1"
+ reflectionv1 "cosmossdk.io/api/cosmos/reflection/v1"
+ "cosmossdk.io/client/v2/autocli"
+ clienthelpers "cosmossdk.io/client/v2/helpers"
+ "cosmossdk.io/core/appmodule"
+ "cosmossdk.io/log"
+ storetypes "cosmossdk.io/store/types"
+ "cosmossdk.io/x/circuit"
+ circuitkeeper "cosmossdk.io/x/circuit/keeper"
+ circuittypes "cosmossdk.io/x/circuit/types"
+ "cosmossdk.io/x/evidence"
+ evidencekeeper "cosmossdk.io/x/evidence/keeper"
+ evidencetypes "cosmossdk.io/x/evidence/types"
+ "cosmossdk.io/x/feegrant"
+ feegrantkeeper "cosmossdk.io/x/feegrant/keeper"
+ feegrantmodule "cosmossdk.io/x/feegrant/module"
+ "cosmossdk.io/x/nft"
+ nftkeeper "cosmossdk.io/x/nft/keeper"
+ nftmodule "cosmossdk.io/x/nft/module"
+ "cosmossdk.io/x/tx/signing"
+ "cosmossdk.io/x/upgrade"
+ upgradekeeper "cosmossdk.io/x/upgrade/keeper"
+ upgradetypes "cosmossdk.io/x/upgrade/types"
+ "github.com/cosmos/cosmos-sdk/baseapp"
+ "github.com/cosmos/cosmos-sdk/client"
+ "github.com/cosmos/cosmos-sdk/client/flags"
+ "github.com/cosmos/cosmos-sdk/client/grpc/cmtservice"
+ nodeservice "github.com/cosmos/cosmos-sdk/client/grpc/node"
+ "github.com/cosmos/cosmos-sdk/codec"
+ "github.com/cosmos/cosmos-sdk/codec/address"
+ "github.com/cosmos/cosmos-sdk/codec/types"
+ "github.com/cosmos/cosmos-sdk/runtime"
+ runtimeservices "github.com/cosmos/cosmos-sdk/runtime/services"
+ "github.com/cosmos/cosmos-sdk/server"
+ "github.com/cosmos/cosmos-sdk/server/api"
+ "github.com/cosmos/cosmos-sdk/server/config"
+ servertypes "github.com/cosmos/cosmos-sdk/server/types"
+ "github.com/cosmos/cosmos-sdk/std"
+ testdata_pulsar "github.com/cosmos/cosmos-sdk/testutil/testdata/testpb"
+ sdk "github.com/cosmos/cosmos-sdk/types"
+ "github.com/cosmos/cosmos-sdk/types/module"
+ sigtypes "github.com/cosmos/cosmos-sdk/types/tx/signing"
+ "github.com/cosmos/cosmos-sdk/version"
+ "github.com/cosmos/cosmos-sdk/x/auth"
+ "github.com/cosmos/cosmos-sdk/x/auth/ante"
+ authcodec "github.com/cosmos/cosmos-sdk/x/auth/codec"
+ authkeeper "github.com/cosmos/cosmos-sdk/x/auth/keeper"
+ "github.com/cosmos/cosmos-sdk/x/auth/posthandler"
+ authsims "github.com/cosmos/cosmos-sdk/x/auth/simulation"
+ "github.com/cosmos/cosmos-sdk/x/auth/tx"
+ authtx "github.com/cosmos/cosmos-sdk/x/auth/tx"
+ txmodule "github.com/cosmos/cosmos-sdk/x/auth/tx/config"
+ authtypes "github.com/cosmos/cosmos-sdk/x/auth/types"
+ "github.com/cosmos/cosmos-sdk/x/auth/vesting"
+ vestingtypes "github.com/cosmos/cosmos-sdk/x/auth/vesting/types"
+ "github.com/cosmos/cosmos-sdk/x/authz"
+ authzkeeper "github.com/cosmos/cosmos-sdk/x/authz/keeper"
+ authzmodule "github.com/cosmos/cosmos-sdk/x/authz/module"
+ "github.com/cosmos/cosmos-sdk/x/bank"
+ bankkeeper "github.com/cosmos/cosmos-sdk/x/bank/keeper"
+ banktypes "github.com/cosmos/cosmos-sdk/x/bank/types"
+ consensus "github.com/cosmos/cosmos-sdk/x/consensus"
+ consensusparamkeeper "github.com/cosmos/cosmos-sdk/x/consensus/keeper"
+ consensusparamtypes "github.com/cosmos/cosmos-sdk/x/consensus/types"
+ distr "github.com/cosmos/cosmos-sdk/x/distribution"
+ distrkeeper "github.com/cosmos/cosmos-sdk/x/distribution/keeper"
+ distrtypes "github.com/cosmos/cosmos-sdk/x/distribution/types"
+ "github.com/cosmos/cosmos-sdk/x/epochs"
+ epochskeeper "github.com/cosmos/cosmos-sdk/x/epochs/keeper"
+ epochstypes "github.com/cosmos/cosmos-sdk/x/epochs/types"
+ "github.com/cosmos/cosmos-sdk/x/genutil"
+ genutiltypes "github.com/cosmos/cosmos-sdk/x/genutil/types"
+ "github.com/cosmos/cosmos-sdk/x/gov"
+ govclient "github.com/cosmos/cosmos-sdk/x/gov/client"
+ govkeeper "github.com/cosmos/cosmos-sdk/x/gov/keeper"
+ govtypes "github.com/cosmos/cosmos-sdk/x/gov/types"
+ govv1beta1 "github.com/cosmos/cosmos-sdk/x/gov/types/v1beta1"
+ "github.com/cosmos/cosmos-sdk/x/group"
+ groupkeeper "github.com/cosmos/cosmos-sdk/x/group/keeper"
+ groupmodule "github.com/cosmos/cosmos-sdk/x/group/module"
+ "github.com/cosmos/cosmos-sdk/x/mint"
+ mintkeeper "github.com/cosmos/cosmos-sdk/x/mint/keeper"
+ minttypes "github.com/cosmos/cosmos-sdk/x/mint/types"
+ "github.com/cosmos/cosmos-sdk/x/protocolpool"
+ protocolpoolkeeper "github.com/cosmos/cosmos-sdk/x/protocolpool/keeper"
+ protocolpooltypes "github.com/cosmos/cosmos-sdk/x/protocolpool/types"
+ "github.com/cosmos/cosmos-sdk/x/slashing"
+ slashingkeeper "github.com/cosmos/cosmos-sdk/x/slashing/keeper"
+ slashingtypes "github.com/cosmos/cosmos-sdk/x/slashing/types"
+ "github.com/cosmos/cosmos-sdk/x/staking"
+ stakingkeeper "github.com/cosmos/cosmos-sdk/x/staking/keeper"
+ stakingtypes "github.com/cosmos/cosmos-sdk/x/staking/types"
+)
+
+const appName = "SimApp"
+
+var (
+ // DefaultNodeHome default home directories for the application daemon
+ DefaultNodeHome string
+
+ // module account permissions
+ maccPerms = map[string][]string{
+ authtypes.FeeCollectorName: nil,
+ distrtypes.ModuleName: nil,
+ minttypes.ModuleName: {
+ authtypes.Minter
+},
+ stakingtypes.BondedPoolName: {
+ authtypes.Burner, authtypes.Staking
+},
+ stakingtypes.NotBondedPoolName: {
+ authtypes.Burner, authtypes.Staking
+},
+ govtypes.ModuleName: {
+ authtypes.Burner
+},
+ nft.ModuleName: nil,
+ protocolpooltypes.ModuleName: nil,
+ protocolpooltypes.ProtocolPoolEscrowAccount: nil
+}
+)
+
+var (
+ _ runtime.AppI = (*SimApp)(nil)
+ _ servertypes.Application = (*SimApp)(nil)
+)
+
+// SimApp extends an ABCI application, but with most of its parameters exported.
+// They are exported for convenience in creating helper functions, as object
+// capabilities aren't needed for testing.
+type SimApp struct {
+ *baseapp.BaseApp
+ legacyAmino *codec.LegacyAmino
+ appCodec codec.Codec
+ txConfig client.TxConfig
+ interfaceRegistry types.InterfaceRegistry
+
+ // keys to access the substores
+ keys map[string]*storetypes.KVStoreKey
+
+ // essential keepers
+ AccountKeeper authkeeper.AccountKeeper
+ BankKeeper bankkeeper.BaseKeeper
+ StakingKeeper *stakingkeeper.Keeper
+ SlashingKeeper slashingkeeper.Keeper
+ MintKeeper mintkeeper.Keeper
+ DistrKeeper distrkeeper.Keeper
+ GovKeeper govkeeper.Keeper
+ UpgradeKeeper *upgradekeeper.Keeper
+ EvidenceKeeper evidencekeeper.Keeper
+ ConsensusParamsKeeper consensusparamkeeper.Keeper
+ CircuitKeeper circuitkeeper.Keeper
+
+ // supplementary keepers
+ FeeGrantKeeper feegrantkeeper.Keeper
+ GroupKeeper groupkeeper.Keeper
+ AuthzKeeper authzkeeper.Keeper
+ NFTKeeper nftkeeper.Keeper
+ EpochsKeeper epochskeeper.Keeper
+ ProtocolPoolKeeper protocolpoolkeeper.Keeper
+
+ // the module manager
+ ModuleManager *module.Manager
+ BasicModuleManager module.BasicManager
+
+ // simulation manager
+ sm *module.SimulationManager
+
+ // module configurator
+ configurator module.Configurator
+}
+
+func init() {
+ var err error
+ DefaultNodeHome, err = clienthelpers.GetNodeHomeDirectory(".simapp")
+ if err != nil {
+ panic(err)
+}
+}
+
+// NewSimApp returns a reference to an initialized SimApp.
+func NewSimApp(
+ logger log.Logger,
+ db dbm.DB,
+ traceStore io.Writer,
+ loadLatest bool,
+ appOpts servertypes.AppOptions,
+ baseAppOptions ...func(*baseapp.BaseApp),
+) *SimApp {
+ interfaceRegistry, _ := types.NewInterfaceRegistryWithOptions(types.InterfaceRegistryOptions{
+ ProtoFiles: proto.HybridResolver,
+ SigningOptions: signing.Options{
+ AddressCodec: address.Bech32Codec{
+ Bech32Prefix: sdk.GetConfig().GetBech32AccountAddrPrefix(),
+},
+ ValidatorAddressCodec: address.Bech32Codec{
+ Bech32Prefix: sdk.GetConfig().GetBech32ValidatorAddrPrefix(),
+},
+},
+})
+ appCodec := codec.NewProtoCodec(interfaceRegistry)
+ legacyAmino := codec.NewLegacyAmino()
+ txConfig := tx.NewTxConfig(appCodec, tx.DefaultSignModes)
+ if err := interfaceRegistry.SigningContext().Validate(); err != nil {
+ panic(err)
+}
+
+std.RegisterLegacyAminoCodec(legacyAmino)
+
+std.RegisterInterfaces(interfaceRegistry)
+
+ // Below we could construct and set an application specific mempool and
+ // ABCI 1.0 PrepareProposal and ProcessProposal handlers. These defaults are
+ // already set in the SDK's BaseApp, this shows an example of how to override
+ // them.
+ //
+ // Example:
+ //
+ // bApp := baseapp.NewBaseApp(...)
+ // nonceMempool := mempool.NewSenderNonceMempool()
+ // abciPropHandler := NewDefaultProposalHandler(nonceMempool, bApp)
+ //
+ // bApp.SetMempool(nonceMempool)
+ // bApp.SetPrepareProposal(abciPropHandler.PrepareProposalHandler())
+ // bApp.SetProcessProposal(abciPropHandler.ProcessProposalHandler())
+ //
+ // Alternatively, you can construct BaseApp options, append those to
+ // baseAppOptions and pass them to NewBaseApp.
+ //
+ // Example:
+ //
+ // prepareOpt = func(app *baseapp.BaseApp) {
+ // abciPropHandler := baseapp.NewDefaultProposalHandler(nonceMempool, app)
+ // app.SetPrepareProposal(abciPropHandler.PrepareProposalHandler())
+ //
+}
+ // baseAppOptions = append(baseAppOptions, prepareOpt)
+
+ // create and set dummy vote extension handler
+ voteExtOp := func(bApp *baseapp.BaseApp) {
+ voteExtHandler := NewVoteExtensionHandler()
+
+voteExtHandler.SetHandlers(bApp)
+}
+
+baseAppOptions = append(baseAppOptions, voteExtOp, baseapp.SetOptimisticExecution())
+ bApp := baseapp.NewBaseApp(appName, logger, db, txConfig.TxDecoder(), baseAppOptions...)
+
+bApp.SetCommitMultiStoreTracer(traceStore)
+
+bApp.SetVersion(version.Version)
+
+bApp.SetInterfaceRegistry(interfaceRegistry)
+
+bApp.SetTxEncoder(txConfig.TxEncoder())
+ keys := storetypes.NewKVStoreKeys(
+ authtypes.StoreKey,
+ banktypes.StoreKey,
+ stakingtypes.StoreKey,
+ minttypes.StoreKey,
+ distrtypes.StoreKey,
+ slashingtypes.StoreKey,
+ govtypes.StoreKey,
+ consensusparamtypes.StoreKey,
+ upgradetypes.StoreKey,
+ feegrant.StoreKey,
+ evidencetypes.StoreKey,
+ circuittypes.StoreKey,
+ authzkeeper.StoreKey,
+ nftkeeper.StoreKey,
+ group.StoreKey,
+ epochstypes.StoreKey,
+ protocolpooltypes.StoreKey,
+ )
+
+ // register streaming services
+ if err := bApp.RegisterStreamingServices(appOpts, keys); err != nil {
+ panic(err)
+}
+ app := &SimApp{
+ BaseApp: bApp,
+ legacyAmino: legacyAmino,
+ appCodec: appCodec,
+ txConfig: txConfig,
+ interfaceRegistry: interfaceRegistry,
+ keys: keys,
+}
+
+ // set the BaseApp's parameter store
+ app.ConsensusParamsKeeper = consensusparamkeeper.NewKeeper(
+ appCodec,
+ runtime.NewKVStoreService(keys[consensusparamtypes.StoreKey]),
+ authtypes.NewModuleAddress(govtypes.ModuleName).String(),
+ runtime.EventService{
+},
+ )
+
+bApp.SetParamStore(app.ConsensusParamsKeeper.ParamsStore)
+
+ // add keepers
+ app.AccountKeeper = authkeeper.NewAccountKeeper(
+ appCodec,
+ runtime.NewKVStoreService(keys[authtypes.StoreKey]),
+ authtypes.ProtoBaseAccount,
+ maccPerms,
+ authcodec.NewBech32Codec(sdk.Bech32MainPrefix),
+ sdk.Bech32MainPrefix,
+ authtypes.NewModuleAddress(govtypes.ModuleName).String(),
+ authkeeper.WithUnorderedTransactions(true),
+ )
+
+app.BankKeeper = bankkeeper.NewBaseKeeper(
+ appCodec,
+ runtime.NewKVStoreService(keys[banktypes.StoreKey]),
+ app.AccountKeeper,
+ BlockedAddresses(),
+ authtypes.NewModuleAddress(govtypes.ModuleName).String(),
+ logger,
+ )
+
+ // optional: enable sign mode textual by overwriting the default tx config (after setting the bank keeper)
+ enabledSignModes := append(tx.DefaultSignModes, sigtypes.SignMode_SIGN_MODE_TEXTUAL)
+ txConfigOpts := tx.ConfigOptions{
+ EnabledSignModes: enabledSignModes,
+ TextualCoinMetadataQueryFn: txmodule.NewBankKeeperCoinMetadataQueryFn(app.BankKeeper),
+}
+
+txConfig, err := tx.NewTxConfigWithOptions(
+ appCodec,
+ txConfigOpts,
+ )
+ if err != nil {
+ panic(err)
+}
+
+app.txConfig = txConfig
+
+ app.StakingKeeper = stakingkeeper.NewKeeper(
+ appCodec,
+ runtime.NewKVStoreService(keys[stakingtypes.StoreKey]),
+ app.AccountKeeper,
+ app.BankKeeper,
+ authtypes.NewModuleAddress(govtypes.ModuleName).String(),
+ authcodec.NewBech32Codec(sdk.Bech32PrefixValAddr),
+ authcodec.NewBech32Codec(sdk.Bech32PrefixConsAddr),
+ )
+
+app.MintKeeper = mintkeeper.NewKeeper(
+ appCodec,
+ runtime.NewKVStoreService(keys[minttypes.StoreKey]),
+ app.StakingKeeper,
+ app.AccountKeeper,
+ app.BankKeeper,
+ authtypes.FeeCollectorName,
+ authtypes.NewModuleAddress(govtypes.ModuleName).String(),
+ // mintkeeper.WithMintFn(mintkeeper.DefaultMintFn(minttypes.DefaultInflationCalculationFn)), custom mintFn can be added here
+ )
+
+app.ProtocolPoolKeeper = protocolpoolkeeper.NewKeeper(
+ appCodec,
+ runtime.NewKVStoreService(keys[protocolpooltypes.StoreKey]),
+ app.AccountKeeper,
+ app.BankKeeper,
+ authtypes.NewModuleAddress(govtypes.ModuleName).String(),
+ )
+
+app.DistrKeeper = distrkeeper.NewKeeper(
+ appCodec,
+ runtime.NewKVStoreService(keys[distrtypes.StoreKey]),
+ app.AccountKeeper,
+ app.BankKeeper,
+ app.StakingKeeper,
+ authtypes.FeeCollectorName,
+ authtypes.NewModuleAddress(govtypes.ModuleName).String(),
+ distrkeeper.WithExternalCommunityPool(app.ProtocolPoolKeeper),
+ )
+
+app.SlashingKeeper = slashingkeeper.NewKeeper(
+ appCodec,
+ legacyAmino,
+ runtime.NewKVStoreService(keys[slashingtypes.StoreKey]),
+ app.StakingKeeper,
+ authtypes.NewModuleAddress(govtypes.ModuleName).String(),
+ )
+
+app.FeeGrantKeeper = feegrantkeeper.NewKeeper(
+ appCodec,
+ runtime.NewKVStoreService(keys[feegrant.StoreKey]),
+ app.AccountKeeper,
+ )
+
+ // register the staking hooks
+ // NOTE: stakingKeeper above is passed by reference, so that it will contain these hooks
+ app.StakingKeeper.SetHooks(
+ stakingtypes.NewMultiStakingHooks(
+ app.DistrKeeper.Hooks(),
+ app.SlashingKeeper.Hooks(),
+ ),
+ )
+
+app.CircuitKeeper = circuitkeeper.NewKeeper(
+ appCodec,
+ runtime.NewKVStoreService(keys[circuittypes.StoreKey]),
+ authtypes.NewModuleAddress(govtypes.ModuleName).String(),
+ app.AccountKeeper.AddressCodec(),
+ )
+
+app.BaseApp.SetCircuitBreaker(&app.CircuitKeeper)
+
+app.AuthzKeeper = authzkeeper.NewKeeper(
+ runtime.NewKVStoreService(keys[authzkeeper.StoreKey]),
+ appCodec,
+ app.MsgServiceRouter(),
+ app.AccountKeeper,
+ )
+ groupConfig := group.DefaultConfig()
+ /*
+ Example of setting group params:
+ groupConfig.MaxMetadataLen = 1000
+ */
+ app.GroupKeeper = groupkeeper.NewKeeper(
+ keys[group.StoreKey],
+ appCodec,
+ app.MsgServiceRouter(),
+ app.AccountKeeper,
+ groupConfig,
+ )
+
+ // get skipUpgradeHeights from the app options
+ skipUpgradeHeights := map[int64]bool{
+}
+ for _, h := range cast.ToIntSlice(appOpts.Get(server.FlagUnsafeSkipUpgrades)) {
+ skipUpgradeHeights[int64(h)] = true
+}
+ homePath := cast.ToString(appOpts.Get(flags.FlagHome))
+ // set the governance module account as the authority for conducting upgrades
+ app.UpgradeKeeper = upgradekeeper.NewKeeper(
+ skipUpgradeHeights,
+ runtime.NewKVStoreService(keys[upgradetypes.StoreKey]),
+ appCodec,
+ homePath,
+ app.BaseApp,
+ authtypes.NewModuleAddress(govtypes.ModuleName).String(),
+ )
+
+ // Register the proposal types
+ // Deprecated: Avoid adding new handlers, instead use the new proposal flow
+ // by granting the governance module the right to execute the message.
+ // See: /sdk/v0.53/build/modules/gov#proposal-messages
+ govRouter := govv1beta1.NewRouter()
+
+govRouter.AddRoute(govtypes.RouterKey, govv1beta1.ProposalHandler)
+ govConfig := govtypes.DefaultConfig()
+ /*
+ Example of setting gov params:
+ govConfig.MaxMetadataLen = 10000
+ */
+ govKeeper := govkeeper.NewKeeper(
+ appCodec,
+ runtime.NewKVStoreService(keys[govtypes.StoreKey]),
+ app.AccountKeeper,
+ app.BankKeeper,
+ app.StakingKeeper,
+ app.DistrKeeper,
+ app.MsgServiceRouter(),
+ govConfig,
+ authtypes.NewModuleAddress(govtypes.ModuleName).String(),
+ // govkeeper.WithCustomCalculateVoteResultsAndVotingPowerFn(...), // Add if you want to use a custom vote calculation function.
+ )
+
+ // Set legacy router for backwards compatibility with gov v1beta1
+ govKeeper.SetLegacyRouter(govRouter)
+
+app.GovKeeper = *govKeeper.SetHooks(
+ govtypes.NewMultiGovHooks(
+ // register the governance hooks
+ ),
+ )
+
+app.NFTKeeper = nftkeeper.NewKeeper(
+ runtime.NewKVStoreService(keys[nftkeeper.StoreKey]),
+ appCodec,
+ app.AccountKeeper,
+ app.BankKeeper,
+ )
+
+ // create evidence keeper with router
+ evidenceKeeper := evidencekeeper.NewKeeper(
+ appCodec,
+ runtime.NewKVStoreService(keys[evidencetypes.StoreKey]),
+ app.StakingKeeper,
+ app.SlashingKeeper,
+ app.AccountKeeper.AddressCodec(),
+ runtime.ProvideCometInfoService(),
+ )
+ // If evidence needs to be handled for the app, set routes in router here and seal
+ app.EvidenceKeeper = *evidenceKeeper
+
+ app.EpochsKeeper = epochskeeper.NewKeeper(
+ runtime.NewKVStoreService(keys[epochstypes.StoreKey]),
+ appCodec,
+ )
+
+app.EpochsKeeper.SetHooks(
+ epochstypes.NewMultiEpochHooks(
+ // insert epoch hooks receivers here
+ ),
+ )
+
+ /**** Module Options ****/
+
+ // NOTE: Any module instantiated in the module manager that is later modified
+ // must be passed by reference here.
+ app.ModuleManager = module.NewManager(
+ genutil.NewAppModule(
+ app.AccountKeeper, app.StakingKeeper, app,
+ txConfig,
+ ),
+ auth.NewAppModule(appCodec, app.AccountKeeper, authsims.RandomGenesisAccounts, nil),
+ vesting.NewAppModule(app.AccountKeeper, app.BankKeeper),
+ bank.NewAppModule(appCodec, app.BankKeeper, app.AccountKeeper, nil),
+ feegrantmodule.NewAppModule(appCodec, app.AccountKeeper, app.BankKeeper, app.FeeGrantKeeper, app.interfaceRegistry),
+ gov.NewAppModule(appCodec, &app.GovKeeper, app.AccountKeeper, app.BankKeeper, nil),
+ mint.NewAppModule(appCodec, app.MintKeeper, app.AccountKeeper, nil, nil),
+ slashing.NewAppModule(appCodec, app.SlashingKeeper, app.AccountKeeper, app.BankKeeper, app.StakingKeeper, nil, app.interfaceRegistry),
+ distr.NewAppModule(appCodec, app.DistrKeeper, app.AccountKeeper, app.BankKeeper, app.StakingKeeper, nil),
+ staking.NewAppModule(appCodec, app.StakingKeeper, app.AccountKeeper, app.BankKeeper, nil),
+ upgrade.NewAppModule(app.UpgradeKeeper, app.AccountKeeper.AddressCodec()),
+ evidence.NewAppModule(app.EvidenceKeeper),
+ authzmodule.NewAppModule(appCodec, app.AuthzKeeper, app.AccountKeeper, app.BankKeeper, app.interfaceRegistry),
+ groupmodule.NewAppModule(appCodec, app.GroupKeeper, app.AccountKeeper, app.BankKeeper, app.interfaceRegistry),
+ nftmodule.NewAppModule(appCodec, app.NFTKeeper, app.AccountKeeper, app.BankKeeper, app.interfaceRegistry),
+ consensus.NewAppModule(appCodec, app.ConsensusParamsKeeper),
+ circuit.NewAppModule(appCodec, app.CircuitKeeper),
+ epochs.NewAppModule(app.EpochsKeeper),
+ protocolpool.NewAppModule(app.ProtocolPoolKeeper, app.AccountKeeper, app.BankKeeper),
+ )
+
+ // BasicModuleManager defines the module BasicManager is in charge of setting up basic,
+ // non-dependant module elements, such as codec registration and genesis verification.
+ // By default it is composed of all the module from the module manager.
+ // Additionally, app module basics can be overwritten by passing them as argument.
+ app.BasicModuleManager = module.NewBasicManagerFromManager(
+ app.ModuleManager,
+ map[string]module.AppModuleBasic{
+ genutiltypes.ModuleName: genutil.NewAppModuleBasic(genutiltypes.DefaultMessageValidator),
+ govtypes.ModuleName: gov.NewAppModuleBasic(
+ []govclient.ProposalHandler{
+},
+ ),
+})
+
+app.BasicModuleManager.RegisterLegacyAminoCodec(legacyAmino)
+
+app.BasicModuleManager.RegisterInterfaces(interfaceRegistry)
+
+ // NOTE: upgrade module is required to be prioritized
+ app.ModuleManager.SetOrderPreBlockers(
+ upgradetypes.ModuleName,
+ authtypes.ModuleName,
+ )
+ // During begin block slashing happens after distr.BeginBlocker so that
+ // there is nothing left over in the validator fee pool, so as to keep the
+ // CanWithdrawInvariant invariant.
+ // NOTE: staking module is required if HistoricalEntries param > 0
+ app.ModuleManager.SetOrderBeginBlockers(
+ minttypes.ModuleName,
+ distrtypes.ModuleName,
+ protocolpooltypes.ModuleName,
+ slashingtypes.ModuleName,
+ evidencetypes.ModuleName,
+ stakingtypes.ModuleName,
+ genutiltypes.ModuleName,
+ authz.ModuleName,
+ epochstypes.ModuleName,
+ )
+
+app.ModuleManager.SetOrderEndBlockers(
+ govtypes.ModuleName,
+ stakingtypes.ModuleName,
+ genutiltypes.ModuleName,
+ feegrant.ModuleName,
+ group.ModuleName,
+ protocolpooltypes.ModuleName,
+ )
+
+ // NOTE: The genutils module must occur after staking so that pools are
+ // properly initialized with tokens from genesis accounts.
+ // NOTE: The genutils module must also occur after auth so that it can access the params from auth.
+ genesisModuleOrder := []string{
+ authtypes.ModuleName,
+ banktypes.ModuleName,
+ distrtypes.ModuleName,
+ stakingtypes.ModuleName,
+ slashingtypes.ModuleName,
+ govtypes.ModuleName,
+ minttypes.ModuleName,
+ genutiltypes.ModuleName,
+ evidencetypes.ModuleName,
+ authz.ModuleName,
+ feegrant.ModuleName,
+ nft.ModuleName,
+ group.ModuleName,
+ upgradetypes.ModuleName,
+ vestingtypes.ModuleName,
+ consensusparamtypes.ModuleName,
+ circuittypes.ModuleName,
+ epochstypes.ModuleName,
+ protocolpooltypes.ModuleName,
+}
+ exportModuleOrder := []string{
+ consensusparamtypes.ModuleName,
+ authtypes.ModuleName,
+ protocolpooltypes.ModuleName, // Must be exported before bank
+ banktypes.ModuleName,
+ distrtypes.ModuleName,
+ stakingtypes.ModuleName,
+ slashingtypes.ModuleName,
+ govtypes.ModuleName,
+ minttypes.ModuleName,
+ genutiltypes.ModuleName,
+ evidencetypes.ModuleName,
+ authz.ModuleName,
+ feegrant.ModuleName,
+ nft.ModuleName,
+ group.ModuleName,
+ upgradetypes.ModuleName,
+ vestingtypes.ModuleName,
+ circuittypes.ModuleName,
+ epochstypes.ModuleName,
+}
+
+app.ModuleManager.SetOrderInitGenesis(genesisModuleOrder...)
+
+app.ModuleManager.SetOrderExportGenesis(exportModuleOrder...)
+
+ // Uncomment if you want to set a custom migration order here.
+ // app.ModuleManager.SetOrderMigrations(custom order)
+
+app.configurator = module.NewConfigurator(app.appCodec, app.MsgServiceRouter(), app.GRPCQueryRouter())
+
+err = app.ModuleManager.RegisterServices(app.configurator)
+ if err != nil {
+ panic(err)
+}
+
+ // RegisterUpgradeHandlers is used for registering any on-chain upgrades.
+ // Make sure it's called after `app.ModuleManager` and `app.configurator` are set.
+ app.RegisterUpgradeHandlers()
+
+autocliv1.RegisterQueryServer(app.GRPCQueryRouter(), runtimeservices.NewAutoCLIQueryService(app.ModuleManager.Modules))
+
+reflectionSvc, err := runtimeservices.NewReflectionService()
+ if err != nil {
+ panic(err)
+}
+
+reflectionv1.RegisterReflectionServiceServer(app.GRPCQueryRouter(), reflectionSvc)
+
+ // add test gRPC service for testing gRPC queries in isolation
+ testdata_pulsar.RegisterQueryServer(app.GRPCQueryRouter(), testdata_pulsar.QueryImpl{
+})
+
+ // create the simulation manager and define the order of the modules for deterministic simulations
+ //
+ // NOTE: this is not required apps that don't use the simulator for fuzz testing
+ // transactions
+ overrideModules := map[string]module.AppModuleSimulation{
+ authtypes.ModuleName: auth.NewAppModule(app.appCodec, app.AccountKeeper, authsims.RandomGenesisAccounts, nil),
+}
+
+app.sm = module.NewSimulationManagerFromAppModules(app.ModuleManager.Modules, overrideModules)
+
+app.sm.RegisterStoreDecoders()
+
+ // initialize stores
+ app.MountKVStores(keys)
+
+ // initialize BaseApp
+ app.SetInitChainer(app.InitChainer)
+
+app.SetPreBlocker(app.PreBlocker)
+
+app.SetBeginBlocker(app.BeginBlocker)
+
+app.SetEndBlocker(app.EndBlocker)
+
+app.setAnteHandler(txConfig)
+
+ // In v0.46, the SDK introduces _postHandlers_. PostHandlers are like
+ // antehandlers, but are run _after_ the `runMsgs` execution. They are also
+ // defined as a chain, and have the same signature as antehandlers.
+ //
+ // In baseapp, postHandlers are run in the same store branch as `runMsgs`,
+ // meaning that both `runMsgs` and `postHandler` state will be committed if
+ // both are successful, and both will be reverted if any of the two fails.
+ //
+ // The SDK exposes a default postHandlers chain
+ //
+ // Please note that changing any of the anteHandler or postHandler chain is
+ // likely to be a state-machine breaking change, which needs a coordinated
+ // upgrade.
+ app.setPostHandler()
+ if loadLatest {
+ if err := app.LoadLatestVersion(); err != nil {
+ panic(fmt.Errorf("error loading last version: %w", err))
+}
+
+}
+
+return app
+}
+
+func (app *SimApp)
+
+setAnteHandler(txConfig client.TxConfig) {
+ anteHandler, err := NewAnteHandler(
+ HandlerOptions{
+ ante.HandlerOptions{
+ AccountKeeper: app.AccountKeeper,
+ BankKeeper: app.BankKeeper,
+ SignModeHandler: txConfig.SignModeHandler(),
+ FeegrantKeeper: app.FeeGrantKeeper,
+ SigGasConsumer: ante.DefaultSigVerificationGasConsumer,
+ SigVerifyOptions: []ante.SigVerificationDecoratorOption{
+ // change below as needed.
+ ante.WithUnorderedTxGasCost(ante.DefaultUnorderedTxGasCost),
+ ante.WithMaxUnorderedTxTimeoutDuration(ante.DefaultMaxTimeoutDuration),
+},
+},
+ &app.CircuitKeeper,
+},
+ )
+ if err != nil {
+ panic(err)
+}
+
+ // Set the AnteHandler for the app
+ app.SetAnteHandler(anteHandler)
+}
+
+func (app *SimApp)
+
+setPostHandler() {
+ postHandler, err := posthandler.NewPostHandler(
+ posthandler.HandlerOptions{
+},
+ )
+ if err != nil {
+ panic(err)
+}
+
+app.SetPostHandler(postHandler)
+}
+
+// Name returns the name of the App
+func (app *SimApp)
+
+Name()
+
+string {
+ return app.BaseApp.Name()
+}
+
+// PreBlocker application updates every pre block
+func (app *SimApp)
+
+PreBlocker(ctx sdk.Context, _ *abci.RequestFinalizeBlock) (*sdk.ResponsePreBlock, error) {
+ return app.ModuleManager.PreBlock(ctx)
+}
+
+// BeginBlocker application updates every begin block
+func (app *SimApp)
+
+BeginBlocker(ctx sdk.Context) (sdk.BeginBlock, error) {
+ return app.ModuleManager.BeginBlock(ctx)
+}
+
+// EndBlocker application updates every end block
+func (app *SimApp)
+
+EndBlocker(ctx sdk.Context) (sdk.EndBlock, error) {
+ return app.ModuleManager.EndBlock(ctx)
+}
+
+func (a *SimApp)
+
+Configurator()
+
+module.Configurator {
+ return a.configurator
+}
+
+// InitChainer application update at chain initialization
+func (app *SimApp)
+
+InitChainer(ctx sdk.Context, req *abci.RequestInitChain) (*abci.ResponseInitChain, error) {
+ var genesisState GenesisState
+ if err := json.Unmarshal(req.AppStateBytes, &genesisState); err != nil {
+ panic(err)
+}
+
+app.UpgradeKeeper.SetModuleVersionMap(ctx, app.ModuleManager.GetVersionMap())
+
+return app.ModuleManager.InitGenesis(ctx, app.appCodec, genesisState)
+}
+
+// LoadHeight loads a particular height
+func (app *SimApp)
+
+LoadHeight(height int64)
+
+error {
+ return app.LoadVersion(height)
+}
+
+// LegacyAmino returns SimApp's amino codec.
+//
+// NOTE: This is solely to be used for testing purposes as it may be desirable
+// for modules to register their own custom testing types.
+func (app *SimApp)
+
+LegacyAmino() *codec.LegacyAmino {
+ return app.legacyAmino
+}
+
+// AppCodec returns SimApp's app codec.
+//
+// NOTE: This is solely to be used for testing purposes as it may be desirable
+// for modules to register their own custom testing types.
+func (app *SimApp)
+
+AppCodec()
+
+codec.Codec {
+ return app.appCodec
+}
+
+// InterfaceRegistry returns SimApp's InterfaceRegistry
+func (app *SimApp)
+
+InterfaceRegistry()
+
+types.InterfaceRegistry {
+ return app.interfaceRegistry
+}
+
+// TxConfig returns SimApp's TxConfig
+func (app *SimApp)
+
+TxConfig()
+
+client.TxConfig {
+ return app.txConfig
+}
+
+// AutoCliOpts returns the autocli options for the app.
+func (app *SimApp)
+
+AutoCliOpts()
+
+autocli.AppOptions {
+ modules := make(map[string]appmodule.AppModule, 0)
+ for _, m := range app.ModuleManager.Modules {
+ if moduleWithName, ok := m.(module.HasName); ok {
+ moduleName := moduleWithName.Name()
+ if appModule, ok := moduleWithName.(appmodule.AppModule); ok {
+ modules[moduleName] = appModule
+}
+
+}
+
+}
+
+return autocli.AppOptions{
+ Modules: modules,
+ ModuleOptions: runtimeservices.ExtractAutoCLIOptions(app.ModuleManager.Modules),
+ AddressCodec: authcodec.NewBech32Codec(sdk.GetConfig().GetBech32AccountAddrPrefix()),
+ ValidatorAddressCodec: authcodec.NewBech32Codec(sdk.GetConfig().GetBech32ValidatorAddrPrefix()),
+ ConsensusAddressCodec: authcodec.NewBech32Codec(sdk.GetConfig().GetBech32ConsensusAddrPrefix()),
+}
+}
+
+// DefaultGenesis returns a default genesis from the registered AppModuleBasic's.
+func (a *SimApp)
+
+DefaultGenesis()
+
+map[string]json.RawMessage {
+ return a.BasicModuleManager.DefaultGenesis(a.appCodec)
+}
+
+// GetKey returns the KVStoreKey for the provided store key.
+//
+// NOTE: This is solely to be used for testing purposes.
+func (app *SimApp)
+
+GetKey(storeKey string) *storetypes.KVStoreKey {
+ return app.keys[storeKey]
+}
+
+// GetStoreKeys returns all the stored store keys.
+func (app *SimApp)
+
+GetStoreKeys() []storetypes.StoreKey {
+ keys := make([]storetypes.StoreKey, 0, len(app.keys))
+ for _, key := range app.keys {
+ keys = append(keys, key)
+}
+
+return keys
+}
+
+// SimulationManager implements the SimulationApp interface
+func (app *SimApp)
+
+SimulationManager() *module.SimulationManager {
+ return app.sm
+}
+
+// RegisterAPIRoutes registers all application module routes with the provided
+// API server.
+func (app *SimApp)
+
+RegisterAPIRoutes(apiSvr *api.Server, apiConfig config.APIConfig) {
+ clientCtx := apiSvr.ClientCtx
+ // Register new tx routes from grpc-gateway.
+ authtx.RegisterGRPCGatewayRoutes(clientCtx, apiSvr.GRPCGatewayRouter)
+
+ // Register new CometBFT queries routes from grpc-gateway.
+ cmtservice.RegisterGRPCGatewayRoutes(clientCtx, apiSvr.GRPCGatewayRouter)
+
+ // Register node gRPC service for grpc-gateway.
+ nodeservice.RegisterGRPCGatewayRoutes(clientCtx, apiSvr.GRPCGatewayRouter)
+
+ // Register grpc-gateway routes for all modules.
+ app.BasicModuleManager.RegisterGRPCGatewayRoutes(clientCtx, apiSvr.GRPCGatewayRouter)
+
+ // register swagger API from root so that other applications can override easily
+ if err := server.RegisterSwaggerAPI(apiSvr.ClientCtx, apiSvr.Router, apiConfig.Swagger); err != nil {
+ panic(err)
+}
+}
+
+// RegisterTxService implements the Application.RegisterTxService method.
+func (app *SimApp)
+
+RegisterTxService(clientCtx client.Context) {
+ authtx.RegisterTxService(app.BaseApp.GRPCQueryRouter(), clientCtx, app.BaseApp.Simulate, app.interfaceRegistry)
+}
+
+// RegisterTendermintService implements the Application.RegisterTendermintService method.
+func (app *SimApp)
+
+RegisterTendermintService(clientCtx client.Context) {
+ cmtApp := server.NewCometABCIWrapper(app)
+
+cmtservice.RegisterTendermintService(
+ clientCtx,
+ app.BaseApp.GRPCQueryRouter(),
+ app.interfaceRegistry,
+ cmtApp.Query,
+ )
+}
+
+func (app *SimApp)
+
+RegisterNodeService(clientCtx client.Context, cfg config.Config) {
+ nodeservice.RegisterNodeService(clientCtx, app.GRPCQueryRouter(), cfg)
+}
+
+// GetMaccPerms returns a copy of the module account permissions
+//
+// NOTE: This is solely to be used for testing purposes.
+func GetMaccPerms()
+
+map[string][]string {
+ return maps.Clone(maccPerms)
+}
+
+// BlockedAddresses returns all the app's blocked account addresses.
+func BlockedAddresses()
+
+map[string]bool {
+ modAccAddrs := make(map[string]bool)
+ for acc := range GetMaccPerms() {
+ modAccAddrs[authtypes.NewModuleAddress(acc).String()] = true
+}
+
+ // allow the following addresses to receive funds
+ delete(modAccAddrs, authtypes.NewModuleAddress(govtypes.ModuleName).String())
+
+return modAccAddrs
+}
+```
+
+This is the same example from `runtime` (the package that powers app di):
+
+```go expandable
+package runtime
+
+import (
+
+ "fmt"
+ "os"
+ "slices"
+ "github.com/cosmos/gogoproto/proto"
+ "google.golang.org/protobuf/reflect/protodesc"
+ "google.golang.org/protobuf/reflect/protoregistry"
+
+ runtimev1alpha1 "cosmossdk.io/api/cosmos/app/runtime/v1alpha1"
+ appv1alpha1 "cosmossdk.io/api/cosmos/app/v1alpha1"
+ authmodulev1 "cosmossdk.io/api/cosmos/auth/module/v1"
+ stakingmodulev1 "cosmossdk.io/api/cosmos/staking/module/v1"
+ "cosmossdk.io/core/address"
+ "cosmossdk.io/core/appmodule"
+ "cosmossdk.io/core/comet"
+ "cosmossdk.io/core/event"
+ "cosmossdk.io/core/genesis"
+ "cosmossdk.io/core/header"
+ "cosmossdk.io/core/store"
+ "cosmossdk.io/depinject"
+ "cosmossdk.io/log"
+ storetypes "cosmossdk.io/store/types"
+ "cosmossdk.io/x/tx/signing"
+ "github.com/cosmos/cosmos-sdk/baseapp"
+ "github.com/cosmos/cosmos-sdk/codec"
+ addresscodec "github.com/cosmos/cosmos-sdk/codec/address"
+ codectypes "github.com/cosmos/cosmos-sdk/codec/types"
+ "github.com/cosmos/cosmos-sdk/std"
+ "github.com/cosmos/cosmos-sdk/types/module"
+ "github.com/cosmos/cosmos-sdk/types/msgservice"
+)
+
+type appModule struct {
+ app *App
+}
+
+func (m appModule)
+
+RegisterServices(configurator module.Configurator) {
+ err := m.app.registerRuntimeServices(configurator)
+ if err != nil {
+ panic(err)
+}
+}
+
+func (m appModule)
+
+IsOnePerModuleType() {
+}
+
+func (m appModule)
+
+IsAppModule() {
+}
+
+var (
+ _ appmodule.AppModule = appModule{
+}
+ _ module.HasServices = appModule{
+}
+)
+
+// BaseAppOption is a depinject.AutoGroupType which can be used to pass
+// BaseApp options into the depinject. It should be used carefully.
+type BaseAppOption func(*baseapp.BaseApp)
+
+// IsManyPerContainerType indicates that this is a depinject.ManyPerContainerType.
+func (b BaseAppOption)
+
+IsManyPerContainerType() {
+}
+
+func init() {
+ appmodule.Register(&runtimev1alpha1.Module{
+},
+ appmodule.Provide(
+ ProvideApp,
+ ProvideInterfaceRegistry,
+ ProvideKVStoreKey,
+ ProvideTransientStoreKey,
+ ProvideMemoryStoreKey,
+ ProvideGenesisTxHandler,
+ ProvideKVStoreService,
+ ProvideMemoryStoreService,
+ ProvideTransientStoreService,
+ ProvideEventService,
+ ProvideHeaderInfoService,
+ ProvideCometInfoService,
+ ProvideBasicManager,
+ ProvideAddressCodec,
+ ),
+ appmodule.Invoke(SetupAppBuilder),
+ )
+}
+
+func ProvideApp(interfaceRegistry codectypes.InterfaceRegistry) (
+ codec.Codec,
+ *codec.LegacyAmino,
+ *AppBuilder,
+ *baseapp.MsgServiceRouter,
+ *baseapp.GRPCQueryRouter,
+ appmodule.AppModule,
+ protodesc.Resolver,
+ protoregistry.MessageTypeResolver,
+ error,
+) {
+ protoFiles := proto.HybridResolver
+ protoTypes := protoregistry.GlobalTypes
+
+ // At startup, check that all proto annotations are correct.
+ if err := msgservice.ValidateProtoAnnotations(protoFiles); err != nil {
+ // Once we switch to using protoreflect-based ante handlers, we might
+ // want to panic here instead of logging a warning.
+ _, _ = fmt.Fprintln(os.Stderr, err.Error())
+}
+ amino := codec.NewLegacyAmino()
+
+std.RegisterInterfaces(interfaceRegistry)
+
+std.RegisterLegacyAminoCodec(amino)
+ cdc := codec.NewProtoCodec(interfaceRegistry)
+ msgServiceRouter := baseapp.NewMsgServiceRouter()
+ grpcQueryRouter := baseapp.NewGRPCQueryRouter()
+ app := &App{
+ storeKeys: nil,
+ interfaceRegistry: interfaceRegistry,
+ cdc: cdc,
+ amino: amino,
+ basicManager: module.BasicManager{
+},
+ msgServiceRouter: msgServiceRouter,
+ grpcQueryRouter: grpcQueryRouter,
+}
+ appBuilder := &AppBuilder{
+ app
+}
+
+return cdc, amino, appBuilder, msgServiceRouter, grpcQueryRouter, appModule{
+ app
+}, protoFiles, protoTypes, nil
+}
+
+type AppInputs struct {
+ depinject.In
+
+ AppConfig *appv1alpha1.Config `optional:"true"`
+ Config *runtimev1alpha1.Module
+ AppBuilder *AppBuilder
+ Modules map[string]appmodule.AppModule
+ CustomModuleBasics map[string]module.AppModuleBasic `optional:"true"`
+ BaseAppOptions []BaseAppOption
+ InterfaceRegistry codectypes.InterfaceRegistry
+ LegacyAmino *codec.LegacyAmino
+ Logger log.Logger
+}
+
+func SetupAppBuilder(inputs AppInputs) {
+ app := inputs.AppBuilder.app
+ app.baseAppOptions = inputs.BaseAppOptions
+ app.config = inputs.Config
+ app.appConfig = inputs.AppConfig
+ app.logger = inputs.Logger
+ app.ModuleManager = module.NewManagerFromMap(inputs.Modules)
+ for name, mod := range inputs.Modules {
+ if customBasicMod, ok := inputs.CustomModuleBasics[name]; ok {
+ app.basicManager[name] = customBasicMod
+ customBasicMod.RegisterInterfaces(inputs.InterfaceRegistry)
+
+customBasicMod.RegisterLegacyAminoCodec(inputs.LegacyAmino)
+
+continue
+}
+ coreAppModuleBasic := module.CoreAppModuleBasicAdaptor(name, mod)
+
+app.basicManager[name] = coreAppModuleBasic
+ coreAppModuleBasic.RegisterInterfaces(inputs.InterfaceRegistry)
+
+coreAppModuleBasic.RegisterLegacyAminoCodec(inputs.LegacyAmino)
+}
+}
+
+func ProvideInterfaceRegistry(addressCodec address.Codec, validatorAddressCodec ValidatorAddressCodec, customGetSigners []signing.CustomGetSigner) (codectypes.InterfaceRegistry, error) {
+ signingOptions := signing.Options{
+ AddressCodec: addressCodec,
+ ValidatorAddressCodec: validatorAddressCodec,
+}
+ for _, signer := range customGetSigners {
+ signingOptions.DefineCustomGetSigners(signer.MsgType, signer.Fn)
+}
+
+interfaceRegistry, err := codectypes.NewInterfaceRegistryWithOptions(codectypes.InterfaceRegistryOptions{
+ ProtoFiles: proto.HybridResolver,
+ SigningOptions: signingOptions,
+})
+ if err != nil {
+ return nil, err
+}
+ if err := interfaceRegistry.SigningContext().Validate(); err != nil {
+ return nil, err
+}
+
+return interfaceRegistry, nil
+}
+
+func registerStoreKey(wrapper *AppBuilder, key storetypes.StoreKey) {
+ wrapper.app.storeKeys = append(wrapper.app.storeKeys, key)
+}
+
+func storeKeyOverride(config *runtimev1alpha1.Module, moduleName string) *runtimev1alpha1.StoreKeyConfig {
+ for _, cfg := range config.OverrideStoreKeys {
+ if cfg.ModuleName == moduleName {
+ return cfg
+}
+
+}
+
+return nil
+}
+
+func ProvideKVStoreKey(config *runtimev1alpha1.Module, key depinject.ModuleKey, app *AppBuilder) *storetypes.KVStoreKey {
+ if slices.Contains(config.SkipStoreKeys, key.Name()) {
+ return nil
+}
+ override := storeKeyOverride(config, key.Name())
+
+var storeKeyName string
+ if override != nil {
+ storeKeyName = override.KvStoreKey
+}
+
+else {
+ storeKeyName = key.Name()
+}
+ storeKey := storetypes.NewKVStoreKey(storeKeyName)
+
+registerStoreKey(app, storeKey)
+
+return storeKey
+}
+
+func ProvideTransientStoreKey(config *runtimev1alpha1.Module, key depinject.ModuleKey, app *AppBuilder) *storetypes.TransientStoreKey {
+ if slices.Contains(config.SkipStoreKeys, key.Name()) {
+ return nil
+}
+ storeKey := storetypes.NewTransientStoreKey(fmt.Sprintf("transient:%s", key.Name()))
+
+registerStoreKey(app, storeKey)
+
+return storeKey
+}
+
+func ProvideMemoryStoreKey(config *runtimev1alpha1.Module, key depinject.ModuleKey, app *AppBuilder) *storetypes.MemoryStoreKey {
+ if slices.Contains(config.SkipStoreKeys, key.Name()) {
+ return nil
+}
+ storeKey := storetypes.NewMemoryStoreKey(fmt.Sprintf("memory:%s", key.Name()))
+
+registerStoreKey(app, storeKey)
+
+return storeKey
+}
+
+func ProvideGenesisTxHandler(appBuilder *AppBuilder)
+
+genesis.TxHandler {
+ return appBuilder.app
+}
+
+func ProvideKVStoreService(config *runtimev1alpha1.Module, key depinject.ModuleKey, app *AppBuilder)
+
+store.KVStoreService {
+ storeKey := ProvideKVStoreKey(config, key, app)
+
+return kvStoreService{
+ key: storeKey
+}
+}
+
+func ProvideMemoryStoreService(config *runtimev1alpha1.Module, key depinject.ModuleKey, app *AppBuilder)
+
+store.MemoryStoreService {
+ storeKey := ProvideMemoryStoreKey(config, key, app)
+
+return memStoreService{
+ key: storeKey
+}
+}
+
+func ProvideTransientStoreService(config *runtimev1alpha1.Module, key depinject.ModuleKey, app *AppBuilder)
+
+store.TransientStoreService {
+ storeKey := ProvideTransientStoreKey(config, key, app)
+
+return transientStoreService{
+ key: storeKey
+}
+}
+
+func ProvideEventService()
+
+event.Service {
+ return EventService{
+}
+}
+
+func ProvideCometInfoService()
+
+comet.BlockInfoService {
+ return cometInfoService{
+}
+}
+
+func ProvideHeaderInfoService(app *AppBuilder)
+
+header.Service {
+ return headerInfoService{
+}
+}
+
+func ProvideBasicManager(app *AppBuilder)
+
+module.BasicManager {
+ return app.app.basicManager
+}
+
+type (
+ // ValidatorAddressCodec is an alias for address.Codec for validator addresses.
+ ValidatorAddressCodec address.Codec
+
+ // ConsensusAddressCodec is an alias for address.Codec for validator consensus addresses.
+ ConsensusAddressCodec address.Codec
+)
+
+type AddressCodecInputs struct {
+ depinject.In
+
+ AuthConfig *authmodulev1.Module `optional:"true"`
+ StakingConfig *stakingmodulev1.Module `optional:"true"`
+
+ AddressCodecFactory func()
+
+address.Codec `optional:"true"`
+ ValidatorAddressCodecFactory func()
+
+ValidatorAddressCodec `optional:"true"`
+ ConsensusAddressCodecFactory func()
+
+ConsensusAddressCodec `optional:"true"`
+}
+
+// ProvideAddressCodec provides an address.Codec to the container for any
+// modules that want to do address string <> bytes conversion.
+func ProvideAddressCodec(in AddressCodecInputs) (address.Codec, ValidatorAddressCodec, ConsensusAddressCodec) {
+ if in.AddressCodecFactory != nil && in.ValidatorAddressCodecFactory != nil && in.ConsensusAddressCodecFactory != nil {
+ return in.AddressCodecFactory(), in.ValidatorAddressCodecFactory(), in.ConsensusAddressCodecFactory()
+}
+ if in.AuthConfig == nil || in.AuthConfig.Bech32Prefix == "" {
+ panic("auth config bech32 prefix cannot be empty if no custom address codec is provided")
+}
+ if in.StakingConfig == nil {
+ in.StakingConfig = &stakingmodulev1.Module{
+}
+
+}
+ if in.StakingConfig.Bech32PrefixValidator == "" {
+ in.StakingConfig.Bech32PrefixValidator = fmt.Sprintf("%svaloper", in.AuthConfig.Bech32Prefix)
+}
+ if in.StakingConfig.Bech32PrefixConsensus == "" {
+ in.StakingConfig.Bech32PrefixConsensus = fmt.Sprintf("%svalcons", in.AuthConfig.Bech32Prefix)
+}
+
+return addresscodec.NewBech32Codec(in.AuthConfig.Bech32Prefix),
+ addresscodec.NewBech32Codec(in.StakingConfig.Bech32PrefixValidator),
+ addresscodec.NewBech32Codec(in.StakingConfig.Bech32PrefixConsensus)
+}
+```
+
+```go expandable
+package runtime
+
+import (
+
+ "fmt"
+ "os"
+ "slices"
+ "github.com/cosmos/gogoproto/proto"
+ "google.golang.org/protobuf/reflect/protodesc"
+ "google.golang.org/protobuf/reflect/protoregistry"
+
+ runtimev1alpha1 "cosmossdk.io/api/cosmos/app/runtime/v1alpha1"
+ appv1alpha1 "cosmossdk.io/api/cosmos/app/v1alpha1"
+ authmodulev1 "cosmossdk.io/api/cosmos/auth/module/v1"
+ stakingmodulev1 "cosmossdk.io/api/cosmos/staking/module/v1"
+ "cosmossdk.io/core/address"
+ "cosmossdk.io/core/appmodule"
+ "cosmossdk.io/core/comet"
+ "cosmossdk.io/core/event"
+ "cosmossdk.io/core/genesis"
+ "cosmossdk.io/core/header"
+ "cosmossdk.io/core/store"
+ "cosmossdk.io/depinject"
+ "cosmossdk.io/log"
+ storetypes "cosmossdk.io/store/types"
+ "cosmossdk.io/x/tx/signing"
+ "github.com/cosmos/cosmos-sdk/baseapp"
+ "github.com/cosmos/cosmos-sdk/codec"
+ addresscodec "github.com/cosmos/cosmos-sdk/codec/address"
+ codectypes "github.com/cosmos/cosmos-sdk/codec/types"
+ "github.com/cosmos/cosmos-sdk/std"
+ "github.com/cosmos/cosmos-sdk/types/module"
+ "github.com/cosmos/cosmos-sdk/types/msgservice"
+)
+
+type appModule struct {
+ app *App
+}
+
+func (m appModule)
+
+RegisterServices(configurator module.Configurator) {
+ err := m.app.registerRuntimeServices(configurator)
+ if err != nil {
+ panic(err)
+}
+}
+
+func (m appModule)
+
+IsOnePerModuleType() {
+}
+
+func (m appModule)
+
+IsAppModule() {
+}
+
+var (
+ _ appmodule.AppModule = appModule{
+}
+ _ module.HasServices = appModule{
+}
+)
+
+// BaseAppOption is a depinject.AutoGroupType which can be used to pass
+// BaseApp options into the depinject. It should be used carefully.
+type BaseAppOption func(*baseapp.BaseApp)
+
+// IsManyPerContainerType indicates that this is a depinject.ManyPerContainerType.
+func (b BaseAppOption)
+
+IsManyPerContainerType() {
+}
+
+func init() {
+ appmodule.Register(&runtimev1alpha1.Module{
+},
+ appmodule.Provide(
+ ProvideApp,
+ ProvideInterfaceRegistry,
+ ProvideKVStoreKey,
+ ProvideTransientStoreKey,
+ ProvideMemoryStoreKey,
+ ProvideGenesisTxHandler,
+ ProvideKVStoreService,
+ ProvideMemoryStoreService,
+ ProvideTransientStoreService,
+ ProvideEventService,
+ ProvideHeaderInfoService,
+ ProvideCometInfoService,
+ ProvideBasicManager,
+ ProvideAddressCodec,
+ ),
+ appmodule.Invoke(SetupAppBuilder),
+ )
+}
+
+func ProvideApp(interfaceRegistry codectypes.InterfaceRegistry) (
+ codec.Codec,
+ *codec.LegacyAmino,
+ *AppBuilder,
+ *baseapp.MsgServiceRouter,
+ *baseapp.GRPCQueryRouter,
+ appmodule.AppModule,
+ protodesc.Resolver,
+ protoregistry.MessageTypeResolver,
+ error,
+) {
+ protoFiles := proto.HybridResolver
+ protoTypes := protoregistry.GlobalTypes
+
+ // At startup, check that all proto annotations are correct.
+ if err := msgservice.ValidateProtoAnnotations(protoFiles); err != nil {
+ // Once we switch to using protoreflect-based ante handlers, we might
+ // want to panic here instead of logging a warning.
+ _, _ = fmt.Fprintln(os.Stderr, err.Error())
+}
+ amino := codec.NewLegacyAmino()
+
+std.RegisterInterfaces(interfaceRegistry)
+
+std.RegisterLegacyAminoCodec(amino)
+ cdc := codec.NewProtoCodec(interfaceRegistry)
+ msgServiceRouter := baseapp.NewMsgServiceRouter()
+ grpcQueryRouter := baseapp.NewGRPCQueryRouter()
+ app := &App{
+ storeKeys: nil,
+ interfaceRegistry: interfaceRegistry,
+ cdc: cdc,
+ amino: amino,
+ basicManager: module.BasicManager{
+},
+ msgServiceRouter: msgServiceRouter,
+ grpcQueryRouter: grpcQueryRouter,
+}
+ appBuilder := &AppBuilder{
+ app
+}
+
+return cdc, amino, appBuilder, msgServiceRouter, grpcQueryRouter, appModule{
+ app
+}, protoFiles, protoTypes, nil
+}
+
+type AppInputs struct {
+ depinject.In
+
+ AppConfig *appv1alpha1.Config `optional:"true"`
+ Config *runtimev1alpha1.Module
+ AppBuilder *AppBuilder
+ Modules map[string]appmodule.AppModule
+ CustomModuleBasics map[string]module.AppModuleBasic `optional:"true"`
+ BaseAppOptions []BaseAppOption
+ InterfaceRegistry codectypes.InterfaceRegistry
+ LegacyAmino *codec.LegacyAmino
+ Logger log.Logger
+}
+
+func SetupAppBuilder(inputs AppInputs) {
+ app := inputs.AppBuilder.app
+ app.baseAppOptions = inputs.BaseAppOptions
+ app.config = inputs.Config
+ app.appConfig = inputs.AppConfig
+ app.logger = inputs.Logger
+ app.ModuleManager = module.NewManagerFromMap(inputs.Modules)
+ for name, mod := range inputs.Modules {
+ if customBasicMod, ok := inputs.CustomModuleBasics[name]; ok {
+ app.basicManager[name] = customBasicMod
+ customBasicMod.RegisterInterfaces(inputs.InterfaceRegistry)
+
+customBasicMod.RegisterLegacyAminoCodec(inputs.LegacyAmino)
+
+continue
+}
+ coreAppModuleBasic := module.CoreAppModuleBasicAdaptor(name, mod)
+
+app.basicManager[name] = coreAppModuleBasic
+ coreAppModuleBasic.RegisterInterfaces(inputs.InterfaceRegistry)
+
+coreAppModuleBasic.RegisterLegacyAminoCodec(inputs.LegacyAmino)
+}
+}
+
+func ProvideInterfaceRegistry(addressCodec address.Codec, validatorAddressCodec ValidatorAddressCodec, customGetSigners []signing.CustomGetSigner) (codectypes.InterfaceRegistry, error) {
+ signingOptions := signing.Options{
+ AddressCodec: addressCodec,
+ ValidatorAddressCodec: validatorAddressCodec,
+}
+ for _, signer := range customGetSigners {
+ signingOptions.DefineCustomGetSigners(signer.MsgType, signer.Fn)
+}
+
+interfaceRegistry, err := codectypes.NewInterfaceRegistryWithOptions(codectypes.InterfaceRegistryOptions{
+ ProtoFiles: proto.HybridResolver,
+ SigningOptions: signingOptions,
+})
+ if err != nil {
+ return nil, err
+}
+ if err := interfaceRegistry.SigningContext().Validate(); err != nil {
+ return nil, err
+}
+
+return interfaceRegistry, nil
+}
+
+func registerStoreKey(wrapper *AppBuilder, key storetypes.StoreKey) {
+ wrapper.app.storeKeys = append(wrapper.app.storeKeys, key)
+}
+
+func storeKeyOverride(config *runtimev1alpha1.Module, moduleName string) *runtimev1alpha1.StoreKeyConfig {
+ for _, cfg := range config.OverrideStoreKeys {
+ if cfg.ModuleName == moduleName {
+ return cfg
+}
+
+}
+
+return nil
+}
+
+func ProvideKVStoreKey(config *runtimev1alpha1.Module, key depinject.ModuleKey, app *AppBuilder) *storetypes.KVStoreKey {
+ if slices.Contains(config.SkipStoreKeys, key.Name()) {
+ return nil
+}
+ override := storeKeyOverride(config, key.Name())
+
+var storeKeyName string
+ if override != nil {
+ storeKeyName = override.KvStoreKey
+}
+
+else {
+ storeKeyName = key.Name()
+}
+ storeKey := storetypes.NewKVStoreKey(storeKeyName)
+
+registerStoreKey(app, storeKey)
+
+return storeKey
+}
+
+func ProvideTransientStoreKey(config *runtimev1alpha1.Module, key depinject.ModuleKey, app *AppBuilder) *storetypes.TransientStoreKey {
+ if slices.Contains(config.SkipStoreKeys, key.Name()) {
+ return nil
+}
+ storeKey := storetypes.NewTransientStoreKey(fmt.Sprintf("transient:%s", key.Name()))
+
+registerStoreKey(app, storeKey)
+
+return storeKey
+}
+
+func ProvideMemoryStoreKey(config *runtimev1alpha1.Module, key depinject.ModuleKey, app *AppBuilder) *storetypes.MemoryStoreKey {
+ if slices.Contains(config.SkipStoreKeys, key.Name()) {
+ return nil
+}
+ storeKey := storetypes.NewMemoryStoreKey(fmt.Sprintf("memory:%s", key.Name()))
+
+registerStoreKey(app, storeKey)
+
+return storeKey
+}
+
+func ProvideGenesisTxHandler(appBuilder *AppBuilder)
+
+genesis.TxHandler {
+ return appBuilder.app
+}
+
+func ProvideKVStoreService(config *runtimev1alpha1.Module, key depinject.ModuleKey, app *AppBuilder)
+
+store.KVStoreService {
+ storeKey := ProvideKVStoreKey(config, key, app)
+
+return kvStoreService{
+ key: storeKey
+}
+}
+
+func ProvideMemoryStoreService(config *runtimev1alpha1.Module, key depinject.ModuleKey, app *AppBuilder)
+
+store.MemoryStoreService {
+ storeKey := ProvideMemoryStoreKey(config, key, app)
+
+return memStoreService{
+ key: storeKey
+}
+}
+
+func ProvideTransientStoreService(config *runtimev1alpha1.Module, key depinject.ModuleKey, app *AppBuilder)
+
+store.TransientStoreService {
+ storeKey := ProvideTransientStoreKey(config, key, app)
+
+return transientStoreService{
+ key: storeKey
+}
+}
+
+func ProvideEventService()
+
+event.Service {
+ return EventService{
+}
+}
+
+func ProvideCometInfoService()
+
+comet.BlockInfoService {
+ return cometInfoService{
+}
+}
+
+func ProvideHeaderInfoService(app *AppBuilder)
+
+header.Service {
+ return headerInfoService{
+}
+}
+
+func ProvideBasicManager(app *AppBuilder)
+
+module.BasicManager {
+ return app.app.basicManager
+}
+
+type (
+ // ValidatorAddressCodec is an alias for address.Codec for validator addresses.
+ ValidatorAddressCodec address.Codec
+
+ // ConsensusAddressCodec is an alias for address.Codec for validator consensus addresses.
+ ConsensusAddressCodec address.Codec
+)
+
+type AddressCodecInputs struct {
+ depinject.In
+
+ AuthConfig *authmodulev1.Module `optional:"true"`
+ StakingConfig *stakingmodulev1.Module `optional:"true"`
+
+ AddressCodecFactory func()
+
+address.Codec `optional:"true"`
+ ValidatorAddressCodecFactory func()
+
+ValidatorAddressCodec `optional:"true"`
+ ConsensusAddressCodecFactory func()
+
+ConsensusAddressCodec `optional:"true"`
+}
+
+// ProvideAddressCodec provides an address.Codec to the container for any
+// modules that want to do address string <> bytes conversion.
+func ProvideAddressCodec(in AddressCodecInputs) (address.Codec, ValidatorAddressCodec, ConsensusAddressCodec) {
+ if in.AddressCodecFactory != nil && in.ValidatorAddressCodecFactory != nil && in.ConsensusAddressCodecFactory != nil {
+ return in.AddressCodecFactory(), in.ValidatorAddressCodecFactory(), in.ConsensusAddressCodecFactory()
+}
+ if in.AuthConfig == nil || in.AuthConfig.Bech32Prefix == "" {
+ panic("auth config bech32 prefix cannot be empty if no custom address codec is provided")
+}
+ if in.StakingConfig == nil {
+ in.StakingConfig = &stakingmodulev1.Module{
+}
+
+}
+ if in.StakingConfig.Bech32PrefixValidator == "" {
+ in.StakingConfig.Bech32PrefixValidator = fmt.Sprintf("%svaloper", in.AuthConfig.Bech32Prefix)
+}
+ if in.StakingConfig.Bech32PrefixConsensus == "" {
+ in.StakingConfig.Bech32PrefixConsensus = fmt.Sprintf("%svalcons", in.AuthConfig.Bech32Prefix)
+}
+
+return addresscodec.NewBech32Codec(in.AuthConfig.Bech32Prefix),
+ addresscodec.NewBech32Codec(in.StakingConfig.Bech32PrefixValidator),
+ addresscodec.NewBech32Codec(in.StakingConfig.Bech32PrefixConsensus)
+}
+```
diff --git a/sdk/next/build/building-modules/msg-services.mdx b/sdk/next/build/building-modules/msg-services.mdx
new file mode 100644
index 000000000..2ff6a3b59
--- /dev/null
+++ b/sdk/next/build/building-modules/msg-services.mdx
@@ -0,0 +1,3600 @@
+---
+title: 'Msg Services'
+---
+{/* force preview rebuild: 2025-09-30 */}
+
+
+**Synopsis**
+A Protobuf `Msg` service processes [messages](/sdk/v0.53/build/building-modules/messages-and-queries). Protobuf `Msg` services are specific to the module in which they are defined, and only process messages defined within the said module. They are called from `BaseApp` during [`DeliverTx`](/sdk/v0.50/learn/advanced/baseapp#delivertx).
+
+
+
+**Prerequisite Readings**
+
+* [Module Manager](/sdk/v0.53/build/building-modules/module-manager)
+* [Messages and Queries](/sdk/v0.53/build/building-modules/messages-and-queries)
+
+
+
+## Implementation of a module `Msg` service
+
+Each module should define a Protobuf `Msg` service, which will be responsible for processing requests (implementing `sdk.Msg`) and returning responses.
+
+As further described in [ADR 031](/sdk/v0.53/build/architecture/adr-031-msg-service), this approach has the advantage of clearly specifying return types and generating server and client code.
+
+Protobuf generates a `MsgServer` interface based on a definition of `Msg` service. It is the role of the module developer to implement this interface, by implementing the state transition logic that should happen upon receival of each `sdk.Msg`. As an example, here is the generated `MsgServer` interface for `x/bank`, which exposes two `sdk.Msg`s:
+
+```go expandable
+// Code generated by protoc-gen-gogo. DO NOT EDIT.
+// source: cosmos/bank/v1beta1/tx.proto
+
+package types
+
+import (
+
+ context "context"
+ fmt "fmt"
+ _ "github.com/cosmos/cosmos-proto"
+ github_com_cosmos_cosmos_sdk_types "github.com/cosmos/cosmos-sdk/types"
+ types "github.com/cosmos/cosmos-sdk/types"
+ _ "github.com/cosmos/cosmos-sdk/types/msgservice"
+ _ "github.com/cosmos/cosmos-sdk/types/tx/amino"
+ _ "github.com/cosmos/gogoproto/gogoproto"
+ grpc1 "github.com/cosmos/gogoproto/grpc"
+ proto "github.com/cosmos/gogoproto/proto"
+ grpc "google.golang.org/grpc"
+ codes "google.golang.org/grpc/codes"
+ status "google.golang.org/grpc/status"
+ io "io"
+ math "math"
+ math_bits "math/bits"
+)
+
+// Reference imports to suppress errors if they are not otherwise used.
+var _ = proto.Marshal
+var _ = fmt.Errorf
+var _ = math.Inf
+
+// This is a compile-time assertion to ensure that this generated file
+// is compatible with the proto package it is being compiled against.
+// A compilation error at this line likely means your copy of the
+// proto package needs to be updated.
+const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package
+
+// MsgSend represents a message to send coins from one account to another.
+type MsgSend struct {
+ FromAddress string `protobuf:"bytes,1,opt,name=from_address,json=fromAddress,proto3" json:"from_address,omitempty"`
+ ToAddress string `protobuf:"bytes,2,opt,name=to_address,json=toAddress,proto3" json:"to_address,omitempty"`
+ Amount github_com_cosmos_cosmos_sdk_types.Coins `protobuf:"bytes,3,rep,name=amount,proto3,castrepeated=github.com/cosmos/cosmos-sdk/types.Coins" json:"amount"`
+}
+
+func (m *MsgSend)
+
+Reset() { *m = MsgSend{
+}
+}
+
+func (m *MsgSend)
+
+String()
+
+string {
+ return proto.CompactTextString(m)
+}
+
+func (*MsgSend)
+
+ProtoMessage() {
+}
+
+func (*MsgSend)
+
+Descriptor() ([]byte, []int) {
+ return fileDescriptor_1d8cb1613481f5b7, []int{0
+}
+}
+
+func (m *MsgSend)
+
+XXX_Unmarshal(b []byte)
+
+error {
+ return m.Unmarshal(b)
+}
+
+func (m *MsgSend)
+
+XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ if deterministic {
+ return xxx_messageInfo_MsgSend.Marshal(b, m, deterministic)
+}
+
+else {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+}
+
+return b[:n], nil
+}
+}
+
+func (m *MsgSend)
+
+XXX_Merge(src proto.Message) {
+ xxx_messageInfo_MsgSend.Merge(m, src)
+}
+
+func (m *MsgSend)
+
+XXX_Size()
+
+int {
+ return m.Size()
+}
+
+func (m *MsgSend)
+
+XXX_DiscardUnknown() {
+ xxx_messageInfo_MsgSend.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_MsgSend proto.InternalMessageInfo
+
+// MsgSendResponse defines the Msg/Send response type.
+type MsgSendResponse struct {
+}
+
+func (m *MsgSendResponse)
+
+Reset() { *m = MsgSendResponse{
+}
+}
+
+func (m *MsgSendResponse)
+
+String()
+
+string {
+ return proto.CompactTextString(m)
+}
+
+func (*MsgSendResponse)
+
+ProtoMessage() {
+}
+
+func (*MsgSendResponse)
+
+Descriptor() ([]byte, []int) {
+ return fileDescriptor_1d8cb1613481f5b7, []int{1
+}
+}
+
+func (m *MsgSendResponse)
+
+XXX_Unmarshal(b []byte)
+
+error {
+ return m.Unmarshal(b)
+}
+
+func (m *MsgSendResponse)
+
+XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ if deterministic {
+ return xxx_messageInfo_MsgSendResponse.Marshal(b, m, deterministic)
+}
+
+else {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+}
+
+return b[:n], nil
+}
+}
+
+func (m *MsgSendResponse)
+
+XXX_Merge(src proto.Message) {
+ xxx_messageInfo_MsgSendResponse.Merge(m, src)
+}
+
+func (m *MsgSendResponse)
+
+XXX_Size()
+
+int {
+ return m.Size()
+}
+
+func (m *MsgSendResponse)
+
+XXX_DiscardUnknown() {
+ xxx_messageInfo_MsgSendResponse.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_MsgSendResponse proto.InternalMessageInfo
+
+// MsgMultiSend represents an arbitrary multi-in, multi-out send message.
+type MsgMultiSend struct {
+ // Inputs, despite being `repeated`, only allows one sender input. This is
+ // checked in MsgMultiSend's ValidateBasic.
+ Inputs []Input `protobuf:"bytes,1,rep,name=inputs,proto3" json:"inputs"`
+ Outputs []Output `protobuf:"bytes,2,rep,name=outputs,proto3" json:"outputs"`
+}
+
+func (m *MsgMultiSend)
+
+Reset() { *m = MsgMultiSend{
+}
+}
+
+func (m *MsgMultiSend)
+
+String()
+
+string {
+ return proto.CompactTextString(m)
+}
+
+func (*MsgMultiSend)
+
+ProtoMessage() {
+}
+
+func (*MsgMultiSend)
+
+Descriptor() ([]byte, []int) {
+ return fileDescriptor_1d8cb1613481f5b7, []int{2
+}
+}
+
+func (m *MsgMultiSend)
+
+XXX_Unmarshal(b []byte)
+
+error {
+ return m.Unmarshal(b)
+}
+
+func (m *MsgMultiSend)
+
+XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ if deterministic {
+ return xxx_messageInfo_MsgMultiSend.Marshal(b, m, deterministic)
+}
+
+else {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+}
+
+return b[:n], nil
+}
+}
+
+func (m *MsgMultiSend)
+
+XXX_Merge(src proto.Message) {
+ xxx_messageInfo_MsgMultiSend.Merge(m, src)
+}
+
+func (m *MsgMultiSend)
+
+XXX_Size()
+
+int {
+ return m.Size()
+}
+
+func (m *MsgMultiSend)
+
+XXX_DiscardUnknown() {
+ xxx_messageInfo_MsgMultiSend.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_MsgMultiSend proto.InternalMessageInfo
+
+func (m *MsgMultiSend)
+
+GetInputs() []Input {
+ if m != nil {
+ return m.Inputs
+}
+
+return nil
+}
+
+func (m *MsgMultiSend)
+
+GetOutputs() []Output {
+ if m != nil {
+ return m.Outputs
+}
+
+return nil
+}
+
+// MsgMultiSendResponse defines the Msg/MultiSend response type.
+type MsgMultiSendResponse struct {
+}
+
+func (m *MsgMultiSendResponse)
+
+Reset() { *m = MsgMultiSendResponse{
+}
+}
+
+func (m *MsgMultiSendResponse)
+
+String()
+
+string {
+ return proto.CompactTextString(m)
+}
+
+func (*MsgMultiSendResponse)
+
+ProtoMessage() {
+}
+
+func (*MsgMultiSendResponse)
+
+Descriptor() ([]byte, []int) {
+ return fileDescriptor_1d8cb1613481f5b7, []int{3
+}
+}
+
+func (m *MsgMultiSendResponse)
+
+XXX_Unmarshal(b []byte)
+
+error {
+ return m.Unmarshal(b)
+}
+
+func (m *MsgMultiSendResponse)
+
+XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ if deterministic {
+ return xxx_messageInfo_MsgMultiSendResponse.Marshal(b, m, deterministic)
+}
+
+else {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+}
+
+return b[:n], nil
+}
+}
+
+func (m *MsgMultiSendResponse)
+
+XXX_Merge(src proto.Message) {
+ xxx_messageInfo_MsgMultiSendResponse.Merge(m, src)
+}
+
+func (m *MsgMultiSendResponse)
+
+XXX_Size()
+
+int {
+ return m.Size()
+}
+
+func (m *MsgMultiSendResponse)
+
+XXX_DiscardUnknown() {
+ xxx_messageInfo_MsgMultiSendResponse.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_MsgMultiSendResponse proto.InternalMessageInfo
+
+// MsgUpdateParams is the Msg/UpdateParams request type.
+//
+// Since: cosmos-sdk 0.47
+type MsgUpdateParams struct {
+ // authority is the address that controls the module (defaults to x/gov unless overwritten).
+ Authority string `protobuf:"bytes,1,opt,name=authority,proto3" json:"authority,omitempty"`
+ // params defines the x/bank parameters to update.
+ //
+ // NOTE: All parameters must be supplied.
+ Params Params `protobuf:"bytes,2,opt,name=params,proto3" json:"params"`
+}
+
+func (m *MsgUpdateParams)
+
+Reset() { *m = MsgUpdateParams{
+}
+}
+
+func (m *MsgUpdateParams)
+
+String()
+
+string {
+ return proto.CompactTextString(m)
+}
+
+func (*MsgUpdateParams)
+
+ProtoMessage() {
+}
+
+func (*MsgUpdateParams)
+
+Descriptor() ([]byte, []int) {
+ return fileDescriptor_1d8cb1613481f5b7, []int{4
+}
+}
+
+func (m *MsgUpdateParams)
+
+XXX_Unmarshal(b []byte)
+
+error {
+ return m.Unmarshal(b)
+}
+
+func (m *MsgUpdateParams)
+
+XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ if deterministic {
+ return xxx_messageInfo_MsgUpdateParams.Marshal(b, m, deterministic)
+}
+
+else {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+}
+
+return b[:n], nil
+}
+}
+
+func (m *MsgUpdateParams)
+
+XXX_Merge(src proto.Message) {
+ xxx_messageInfo_MsgUpdateParams.Merge(m, src)
+}
+
+func (m *MsgUpdateParams)
+
+XXX_Size()
+
+int {
+ return m.Size()
+}
+
+func (m *MsgUpdateParams)
+
+XXX_DiscardUnknown() {
+ xxx_messageInfo_MsgUpdateParams.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_MsgUpdateParams proto.InternalMessageInfo
+
+func (m *MsgUpdateParams)
+
+GetAuthority()
+
+string {
+ if m != nil {
+ return m.Authority
+}
+
+return ""
+}
+
+func (m *MsgUpdateParams)
+
+GetParams()
+
+Params {
+ if m != nil {
+ return m.Params
+}
+
+return Params{
+}
+}
+
+// MsgUpdateParamsResponse defines the response structure for executing a
+// MsgUpdateParams message.
+//
+// Since: cosmos-sdk 0.47
+type MsgUpdateParamsResponse struct {
+}
+
+func (m *MsgUpdateParamsResponse)
+
+Reset() { *m = MsgUpdateParamsResponse{
+}
+}
+
+func (m *MsgUpdateParamsResponse)
+
+String()
+
+string {
+ return proto.CompactTextString(m)
+}
+
+func (*MsgUpdateParamsResponse)
+
+ProtoMessage() {
+}
+
+func (*MsgUpdateParamsResponse)
+
+Descriptor() ([]byte, []int) {
+ return fileDescriptor_1d8cb1613481f5b7, []int{5
+}
+}
+
+func (m *MsgUpdateParamsResponse)
+
+XXX_Unmarshal(b []byte)
+
+error {
+ return m.Unmarshal(b)
+}
+
+func (m *MsgUpdateParamsResponse)
+
+XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ if deterministic {
+ return xxx_messageInfo_MsgUpdateParamsResponse.Marshal(b, m, deterministic)
+}
+
+else {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+}
+
+return b[:n], nil
+}
+}
+
+func (m *MsgUpdateParamsResponse)
+
+XXX_Merge(src proto.Message) {
+ xxx_messageInfo_MsgUpdateParamsResponse.Merge(m, src)
+}
+
+func (m *MsgUpdateParamsResponse)
+
+XXX_Size()
+
+int {
+ return m.Size()
+}
+
+func (m *MsgUpdateParamsResponse)
+
+XXX_DiscardUnknown() {
+ xxx_messageInfo_MsgUpdateParamsResponse.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_MsgUpdateParamsResponse proto.InternalMessageInfo
+
+// MsgSetSendEnabled is the Msg/SetSendEnabled request type.
+//
+// Only entries to add/update/delete need to be included.
+// Existing SendEnabled entries that are not included in this
+// message are left unchanged.
+//
+// Since: cosmos-sdk 0.47
+type MsgSetSendEnabled struct {
+ Authority string `protobuf:"bytes,1,opt,name=authority,proto3" json:"authority,omitempty"`
+ // send_enabled is the list of entries to add or update.
+ SendEnabled []*SendEnabled `protobuf:"bytes,2,rep,name=send_enabled,json=sendEnabled,proto3" json:"send_enabled,omitempty"`
+ // use_default_for is a list of denoms that should use the params.default_send_enabled value.
+ // Denoms listed here will have their SendEnabled entries deleted.
+ // If a denom is included that doesn't have a SendEnabled entry,
+ // it will be ignored.
+ UseDefaultFor []string `protobuf:"bytes,3,rep,name=use_default_for,json=useDefaultFor,proto3" json:"use_default_for,omitempty"`
+}
+
+func (m *MsgSetSendEnabled)
+
+Reset() { *m = MsgSetSendEnabled{
+}
+}
+
+func (m *MsgSetSendEnabled)
+
+String()
+
+string {
+ return proto.CompactTextString(m)
+}
+
+func (*MsgSetSendEnabled)
+
+ProtoMessage() {
+}
+
+func (*MsgSetSendEnabled)
+
+Descriptor() ([]byte, []int) {
+ return fileDescriptor_1d8cb1613481f5b7, []int{6
+}
+}
+
+func (m *MsgSetSendEnabled)
+
+XXX_Unmarshal(b []byte)
+
+error {
+ return m.Unmarshal(b)
+}
+
+func (m *MsgSetSendEnabled)
+
+XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ if deterministic {
+ return xxx_messageInfo_MsgSetSendEnabled.Marshal(b, m, deterministic)
+}
+
+else {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+}
+
+return b[:n], nil
+}
+}
+
+func (m *MsgSetSendEnabled)
+
+XXX_Merge(src proto.Message) {
+ xxx_messageInfo_MsgSetSendEnabled.Merge(m, src)
+}
+
+func (m *MsgSetSendEnabled)
+
+XXX_Size()
+
+int {
+ return m.Size()
+}
+
+func (m *MsgSetSendEnabled)
+
+XXX_DiscardUnknown() {
+ xxx_messageInfo_MsgSetSendEnabled.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_MsgSetSendEnabled proto.InternalMessageInfo
+
+func (m *MsgSetSendEnabled)
+
+GetAuthority()
+
+string {
+ if m != nil {
+ return m.Authority
+}
+
+return ""
+}
+
+func (m *MsgSetSendEnabled)
+
+GetSendEnabled() []*SendEnabled {
+ if m != nil {
+ return m.SendEnabled
+}
+
+return nil
+}
+
+func (m *MsgSetSendEnabled)
+
+GetUseDefaultFor() []string {
+ if m != nil {
+ return m.UseDefaultFor
+}
+
+return nil
+}
+
+// MsgSetSendEnabledResponse defines the Msg/SetSendEnabled response type.
+//
+// Since: cosmos-sdk 0.47
+type MsgSetSendEnabledResponse struct {
+}
+
+func (m *MsgSetSendEnabledResponse)
+
+Reset() { *m = MsgSetSendEnabledResponse{
+}
+}
+
+func (m *MsgSetSendEnabledResponse)
+
+String()
+
+string {
+ return proto.CompactTextString(m)
+}
+
+func (*MsgSetSendEnabledResponse)
+
+ProtoMessage() {
+}
+
+func (*MsgSetSendEnabledResponse)
+
+Descriptor() ([]byte, []int) {
+ return fileDescriptor_1d8cb1613481f5b7, []int{7
+}
+}
+
+func (m *MsgSetSendEnabledResponse)
+
+XXX_Unmarshal(b []byte)
+
+error {
+ return m.Unmarshal(b)
+}
+
+func (m *MsgSetSendEnabledResponse)
+
+XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ if deterministic {
+ return xxx_messageInfo_MsgSetSendEnabledResponse.Marshal(b, m, deterministic)
+}
+
+else {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+}
+
+return b[:n], nil
+}
+}
+
+func (m *MsgSetSendEnabledResponse)
+
+XXX_Merge(src proto.Message) {
+ xxx_messageInfo_MsgSetSendEnabledResponse.Merge(m, src)
+}
+
+func (m *MsgSetSendEnabledResponse)
+
+XXX_Size()
+
+int {
+ return m.Size()
+}
+
+func (m *MsgSetSendEnabledResponse)
+
+XXX_DiscardUnknown() {
+ xxx_messageInfo_MsgSetSendEnabledResponse.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_MsgSetSendEnabledResponse proto.InternalMessageInfo
+
+func init() {
+ proto.RegisterType((*MsgSend)(nil), "cosmos.bank.v1beta1.MsgSend")
+
+proto.RegisterType((*MsgSendResponse)(nil), "cosmos.bank.v1beta1.MsgSendResponse")
+
+proto.RegisterType((*MsgMultiSend)(nil), "cosmos.bank.v1beta1.MsgMultiSend")
+
+proto.RegisterType((*MsgMultiSendResponse)(nil), "cosmos.bank.v1beta1.MsgMultiSendResponse")
+
+proto.RegisterType((*MsgUpdateParams)(nil), "cosmos.bank.v1beta1.MsgUpdateParams")
+
+proto.RegisterType((*MsgUpdateParamsResponse)(nil), "cosmos.bank.v1beta1.MsgUpdateParamsResponse")
+
+proto.RegisterType((*MsgSetSendEnabled)(nil), "cosmos.bank.v1beta1.MsgSetSendEnabled")
+
+proto.RegisterType((*MsgSetSendEnabledResponse)(nil), "cosmos.bank.v1beta1.MsgSetSendEnabledResponse")
+}
+
+func init() {
+ proto.RegisterFile("cosmos/bank/v1beta1/tx.proto", fileDescriptor_1d8cb1613481f5b7)
+}
+
+var fileDescriptor_1d8cb1613481f5b7 = []byte{
+ // 700 bytes of a gzipped FileDescriptorProto
+ 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x9c, 0x54, 0xcf, 0x4f, 0xd3, 0x50,
+ 0x1c, 0x5f, 0x99, 0x8e, 0xec, 0x31, 0x25, 0x54, 0x22, 0xac, 0x90, 0x0e, 0x16, 0x43, 0x00, 0xa5,
+ 0x15, 0x34, 0x9a, 0xcc, 0x68, 0x74, 0x28, 0x89, 0x26, 0x8b, 0x66, 0xc4, 0x83, 0x5e, 0x96, 0xd7,
+ 0xf5, 0x51, 0x1a, 0xd6, 0xbe, 0xa6, 0xef, 0x95, 0xb0, 0x9b, 0x7a, 0x32, 0x9e, 0x3c, 0x7b, 0xe2,
+ 0x68, 0x8c, 0x07, 0x0e, 0x1e, 0x4d, 0xbc, 0x72, 0x24, 0x9e, 0x3c, 0xa9, 0x81, 0x03, 0xfa, 0x5f,
+ 0x98, 0xf7, 0xa3, 0xa5, 0x8c, 0x8d, 0x11, 0x2f, 0x6b, 0xf7, 0x3e, 0x3f, 0xbe, 0xef, 0xf3, 0xed,
+ 0xf7, 0x3d, 0x30, 0xd9, 0xc4, 0xc4, 0xc3, 0xc4, 0xb4, 0xa0, 0xbf, 0x61, 0x6e, 0x2e, 0x5a, 0x88,
+ 0xc2, 0x45, 0x93, 0x6e, 0x19, 0x41, 0x88, 0x29, 0x56, 0x2f, 0x09, 0xd4, 0x60, 0xa8, 0x21, 0x51,
+ 0x6d, 0xd4, 0xc1, 0x0e, 0xe6, 0xb8, 0xc9, 0xde, 0x04, 0x55, 0xd3, 0x13, 0x23, 0x82, 0x12, 0xa3,
+ 0x26, 0x76, 0xfd, 0x13, 0x78, 0xaa, 0x10, 0xf7, 0x15, 0x78, 0x51, 0xe0, 0x0d, 0x61, 0x2c, 0xeb,
+ 0x0a, 0x68, 0x4c, 0x4a, 0x3d, 0xe2, 0x98, 0x9b, 0x8b, 0xec, 0x21, 0x81, 0x11, 0xe8, 0xb9, 0x3e,
+ 0x36, 0xf9, 0xaf, 0x58, 0x2a, 0x7f, 0x1e, 0x00, 0x83, 0x35, 0xe2, 0xac, 0x22, 0xdf, 0x56, 0xef,
+ 0x80, 0xc2, 0x5a, 0x88, 0xbd, 0x06, 0xb4, 0xed, 0x10, 0x11, 0x32, 0xae, 0x4c, 0x29, 0xb3, 0xf9,
+ 0xea, 0xf8, 0xf7, 0x2f, 0x0b, 0xa3, 0xd2, 0xff, 0x81, 0x40, 0x56, 0x69, 0xe8, 0xfa, 0x4e, 0x7d,
+ 0x88, 0xb1, 0xe5, 0x92, 0x7a, 0x1b, 0x00, 0x8a, 0x13, 0xe9, 0x40, 0x1f, 0x69, 0x9e, 0xe2, 0x58,
+ 0xd8, 0x06, 0x39, 0xe8, 0xe1, 0xc8, 0xa7, 0xe3, 0xd9, 0xa9, 0xec, 0xec, 0xd0, 0x52, 0xd1, 0x48,
+ 0x9a, 0x48, 0x50, 0xdc, 0x44, 0x63, 0x19, 0xbb, 0x7e, 0x75, 0x65, 0xf7, 0x67, 0x29, 0xf3, 0xe9,
+ 0x57, 0x69, 0xd6, 0x71, 0xe9, 0x7a, 0x64, 0x19, 0x4d, 0xec, 0xc9, 0xe4, 0xf2, 0xb1, 0x40, 0xec,
+ 0x0d, 0x93, 0xb6, 0x03, 0x44, 0xb8, 0x80, 0x7c, 0x38, 0xdc, 0x99, 0x2f, 0xb4, 0x90, 0x03, 0x9b,
+ 0xed, 0x06, 0xeb, 0x2d, 0xf9, 0x78, 0xb8, 0x33, 0xaf, 0xd4, 0x65, 0xc1, 0xca, 0xf5, 0xb7, 0xdb,
+ 0xa5, 0xcc, 0x9f, 0xed, 0x52, 0xe6, 0x0d, 0xe3, 0xa5, 0xb3, 0xbf, 0x3b, 0xdc, 0x99, 0x57, 0x53,
+ 0x9e, 0xb2, 0x45, 0xe5, 0x11, 0x30, 0x2c, 0x5f, 0xeb, 0x88, 0x04, 0xd8, 0x27, 0xa8, 0xfc, 0x55,
+ 0x01, 0x85, 0x1a, 0x71, 0x6a, 0x51, 0x8b, 0xba, 0xbc, 0x8d, 0x77, 0x41, 0xce, 0xf5, 0x83, 0x88,
+ 0xb2, 0x06, 0xb2, 0x40, 0x9a, 0xd1, 0x65, 0x2a, 0x8c, 0xc7, 0x8c, 0x52, 0xcd, 0xb3, 0x44, 0x72,
+ 0x53, 0x42, 0xa4, 0xde, 0x07, 0x83, 0x38, 0xa2, 0x5c, 0x3f, 0xc0, 0xf5, 0x13, 0x5d, 0xf5, 0x4f,
+ 0x39, 0x27, 0x6d, 0x10, 0xcb, 0x2a, 0x57, 0xe3, 0x48, 0xd2, 0x92, 0x85, 0x19, 0x3b, 0x1e, 0x26,
+ 0xd9, 0x6d, 0xf9, 0x32, 0x18, 0x4d, 0xff, 0x4f, 0x62, 0x7d, 0x53, 0x78, 0xd4, 0xe7, 0x81, 0x0d,
+ 0x29, 0x7a, 0x06, 0x43, 0xe8, 0x11, 0xf5, 0x16, 0xc8, 0xc3, 0x88, 0xae, 0xe3, 0xd0, 0xa5, 0xed,
+ 0xbe, 0xd3, 0x71, 0x44, 0x55, 0xef, 0x81, 0x5c, 0xc0, 0x1d, 0xf8, 0x5c, 0xf4, 0x4a, 0x24, 0x8a,
+ 0x1c, 0x6b, 0x89, 0x50, 0x55, 0x6e, 0xb2, 0x30, 0x47, 0x7e, 0x2c, 0xcf, 0x74, 0x2a, 0xcf, 0x96,
+ 0x38, 0x24, 0x1d, 0xbb, 0x2d, 0x17, 0xc1, 0x58, 0xc7, 0x52, 0x12, 0xee, 0xaf, 0x02, 0x46, 0xf8,
+ 0x77, 0xa4, 0x2c, 0xf3, 0x23, 0x1f, 0x5a, 0x2d, 0x64, 0xff, 0x77, 0xbc, 0x65, 0x50, 0x20, 0xc8,
+ 0xb7, 0x1b, 0x48, 0xf8, 0xc8, 0xcf, 0x36, 0xd5, 0x35, 0x64, 0xaa, 0x5e, 0x7d, 0x88, 0xa4, 0x8a,
+ 0xcf, 0x80, 0xe1, 0x88, 0xa0, 0x86, 0x8d, 0xd6, 0x60, 0xd4, 0xa2, 0x8d, 0x35, 0x1c, 0xf2, 0xf3,
+ 0x90, 0xaf, 0x5f, 0x88, 0x08, 0x7a, 0x28, 0x56, 0x57, 0x70, 0x58, 0x31, 0x4f, 0xf6, 0x62, 0xb2,
+ 0x73, 0x50, 0xd3, 0xa9, 0xca, 0x13, 0xa0, 0x78, 0x62, 0x31, 0x6e, 0xc4, 0xd2, 0xeb, 0x2c, 0xc8,
+ 0xd6, 0x88, 0xa3, 0x3e, 0x01, 0xe7, 0xf8, 0xec, 0x4e, 0x76, 0xdd, 0xb4, 0x1c, 0x79, 0xed, 0xca,
+ 0x69, 0x68, 0xec, 0xa9, 0xbe, 0x00, 0xf9, 0xa3, 0xc3, 0x30, 0xdd, 0x4b, 0x92, 0x50, 0xb4, 0xb9,
+ 0xbe, 0x94, 0xc4, 0xda, 0x02, 0x85, 0x63, 0x03, 0xd9, 0x73, 0x43, 0x69, 0x96, 0x76, 0xed, 0x2c,
+ 0xac, 0xa4, 0xc6, 0x3a, 0xb8, 0xd8, 0x31, 0x17, 0x33, 0xbd, 0x63, 0xa7, 0x79, 0x9a, 0x71, 0x36,
+ 0x5e, 0x5c, 0x49, 0x3b, 0xff, 0x8a, 0x4d, 0x79, 0x75, 0x79, 0x77, 0x5f, 0x57, 0xf6, 0xf6, 0x75,
+ 0xe5, 0xf7, 0xbe, 0xae, 0xbc, 0x3f, 0xd0, 0x33, 0x7b, 0x07, 0x7a, 0xe6, 0xc7, 0x81, 0x9e, 0x79,
+ 0x39, 0x77, 0xea, 0x3d, 0x27, 0xc7, 0x9e, 0x5f, 0x77, 0x56, 0x8e, 0x5f, 0xe7, 0x37, 0xfe, 0x05,
+ 0x00, 0x00, 0xff, 0xff, 0x5b, 0x5b, 0x43, 0xa9, 0xa0, 0x06, 0x00, 0x00,
+}
+
+// Reference imports to suppress errors if they are not otherwise used.
+var _ context.Context
+var _ grpc.ClientConn
+
+// This is a compile-time assertion to ensure that this generated file
+// is compatible with the grpc package it is being compiled against.
+const _ = grpc.SupportPackageIsVersion4
+
+// MsgClient is the client API for Msg service.
+//
+// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream.
+type MsgClient interface {
+ // Send defines a method for sending coins from one account to another account.
+ Send(ctx context.Context, in *MsgSend, opts ...grpc.CallOption) (*MsgSendResponse, error)
+ // MultiSend defines a method for sending coins from some accounts to other accounts.
+ MultiSend(ctx context.Context, in *MsgMultiSend, opts ...grpc.CallOption) (*MsgMultiSendResponse, error)
+ // UpdateParams defines a governance operation for updating the x/bank module parameters.
+ // The authority is defined in the keeper.
+ //
+ // Since: cosmos-sdk 0.47
+ UpdateParams(ctx context.Context, in *MsgUpdateParams, opts ...grpc.CallOption) (*MsgUpdateParamsResponse, error)
+ // SetSendEnabled is a governance operation for setting the SendEnabled flag
+ // on any number of Denoms. Only the entries to add or update should be
+ // included. Entries that already exist in the store, but that aren't
+ // included in this message, will be left unchanged.
+ //
+ // Since: cosmos-sdk 0.47
+ SetSendEnabled(ctx context.Context, in *MsgSetSendEnabled, opts ...grpc.CallOption) (*MsgSetSendEnabledResponse, error)
+}
+
+type msgClient struct {
+ cc grpc1.ClientConn
+}
+
+func NewMsgClient(cc grpc1.ClientConn)
+
+MsgClient {
+ return &msgClient{
+ cc
+}
+}
+
+func (c *msgClient)
+
+Send(ctx context.Context, in *MsgSend, opts ...grpc.CallOption) (*MsgSendResponse, error) {
+ out := new(MsgSendResponse)
+ err := c.cc.Invoke(ctx, "/cosmos.bank.v1beta1.Msg/Send", in, out, opts...)
+ if err != nil {
+ return nil, err
+}
+
+return out, nil
+}
+
+func (c *msgClient)
+
+MultiSend(ctx context.Context, in *MsgMultiSend, opts ...grpc.CallOption) (*MsgMultiSendResponse, error) {
+ out := new(MsgMultiSendResponse)
+ err := c.cc.Invoke(ctx, "/cosmos.bank.v1beta1.Msg/MultiSend", in, out, opts...)
+ if err != nil {
+ return nil, err
+}
+
+return out, nil
+}
+
+func (c *msgClient)
+
+UpdateParams(ctx context.Context, in *MsgUpdateParams, opts ...grpc.CallOption) (*MsgUpdateParamsResponse, error) {
+ out := new(MsgUpdateParamsResponse)
+ err := c.cc.Invoke(ctx, "/cosmos.bank.v1beta1.Msg/UpdateParams", in, out, opts...)
+ if err != nil {
+ return nil, err
+}
+
+return out, nil
+}
+
+func (c *msgClient)
+
+SetSendEnabled(ctx context.Context, in *MsgSetSendEnabled, opts ...grpc.CallOption) (*MsgSetSendEnabledResponse, error) {
+ out := new(MsgSetSendEnabledResponse)
+ err := c.cc.Invoke(ctx, "/cosmos.bank.v1beta1.Msg/SetSendEnabled", in, out, opts...)
+ if err != nil {
+ return nil, err
+}
+
+return out, nil
+}
+
+// MsgServer is the server API for Msg service.
+type MsgServer interface {
+ // Send defines a method for sending coins from one account to another account.
+ Send(context.Context, *MsgSend) (*MsgSendResponse, error)
+ // MultiSend defines a method for sending coins from some accounts to other accounts.
+ MultiSend(context.Context, *MsgMultiSend) (*MsgMultiSendResponse, error)
+ // UpdateParams defines a governance operation for updating the x/bank module parameters.
+ // The authority is defined in the keeper.
+ //
+ // Since: cosmos-sdk 0.47
+ UpdateParams(context.Context, *MsgUpdateParams) (*MsgUpdateParamsResponse, error)
+ // SetSendEnabled is a governance operation for setting the SendEnabled flag
+ // on any number of Denoms. Only the entries to add or update should be
+ // included. Entries that already exist in the store, but that aren't
+ // included in this message, will be left unchanged.
+ //
+ // Since: cosmos-sdk 0.47
+ SetSendEnabled(context.Context, *MsgSetSendEnabled) (*MsgSetSendEnabledResponse, error)
+}
+
+// UnimplementedMsgServer can be embedded to have forward compatible implementations.
+type UnimplementedMsgServer struct {
+}
+
+func (*UnimplementedMsgServer)
+
+Send(ctx context.Context, req *MsgSend) (*MsgSendResponse, error) {
+ return nil, status.Errorf(codes.Unimplemented, "method Send not implemented")
+}
+
+func (*UnimplementedMsgServer)
+
+MultiSend(ctx context.Context, req *MsgMultiSend) (*MsgMultiSendResponse, error) {
+ return nil, status.Errorf(codes.Unimplemented, "method MultiSend not implemented")
+}
+
+func (*UnimplementedMsgServer)
+
+UpdateParams(ctx context.Context, req *MsgUpdateParams) (*MsgUpdateParamsResponse, error) {
+ return nil, status.Errorf(codes.Unimplemented, "method UpdateParams not implemented")
+}
+
+func (*UnimplementedMsgServer)
+
+SetSendEnabled(ctx context.Context, req *MsgSetSendEnabled) (*MsgSetSendEnabledResponse, error) {
+ return nil, status.Errorf(codes.Unimplemented, "method SetSendEnabled not implemented")
+}
+
+func RegisterMsgServer(s grpc1.Server, srv MsgServer) {
+ s.RegisterService(&_Msg_serviceDesc, srv)
+}
+
+func _Msg_Send_Handler(srv interface{
+}, ctx context.Context, dec func(interface{
+})
+
+error, interceptor grpc.UnaryServerInterceptor) (interface{
+}, error) {
+ in := new(MsgSend)
+ if err := dec(in); err != nil {
+ return nil, err
+}
+ if interceptor == nil {
+ return srv.(MsgServer).Send(ctx, in)
+}
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: "/cosmos.bank.v1beta1.Msg/Send",
+}
+ handler := func(ctx context.Context, req interface{
+}) (interface{
+}, error) {
+ return srv.(MsgServer).Send(ctx, req.(*MsgSend))
+}
+
+return interceptor(ctx, in, info, handler)
+}
+
+func _Msg_MultiSend_Handler(srv interface{
+}, ctx context.Context, dec func(interface{
+})
+
+error, interceptor grpc.UnaryServerInterceptor) (interface{
+}, error) {
+ in := new(MsgMultiSend)
+ if err := dec(in); err != nil {
+ return nil, err
+}
+ if interceptor == nil {
+ return srv.(MsgServer).MultiSend(ctx, in)
+}
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: "/cosmos.bank.v1beta1.Msg/MultiSend",
+}
+ handler := func(ctx context.Context, req interface{
+}) (interface{
+}, error) {
+ return srv.(MsgServer).MultiSend(ctx, req.(*MsgMultiSend))
+}
+
+return interceptor(ctx, in, info, handler)
+}
+
+func _Msg_UpdateParams_Handler(srv interface{
+}, ctx context.Context, dec func(interface{
+})
+
+error, interceptor grpc.UnaryServerInterceptor) (interface{
+}, error) {
+ in := new(MsgUpdateParams)
+ if err := dec(in); err != nil {
+ return nil, err
+}
+ if interceptor == nil {
+ return srv.(MsgServer).UpdateParams(ctx, in)
+}
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: "/cosmos.bank.v1beta1.Msg/UpdateParams",
+}
+ handler := func(ctx context.Context, req interface{
+}) (interface{
+}, error) {
+ return srv.(MsgServer).UpdateParams(ctx, req.(*MsgUpdateParams))
+}
+
+return interceptor(ctx, in, info, handler)
+}
+
+func _Msg_SetSendEnabled_Handler(srv interface{
+}, ctx context.Context, dec func(interface{
+})
+
+error, interceptor grpc.UnaryServerInterceptor) (interface{
+}, error) {
+ in := new(MsgSetSendEnabled)
+ if err := dec(in); err != nil {
+ return nil, err
+}
+ if interceptor == nil {
+ return srv.(MsgServer).SetSendEnabled(ctx, in)
+}
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: "/cosmos.bank.v1beta1.Msg/SetSendEnabled",
+}
+ handler := func(ctx context.Context, req interface{
+}) (interface{
+}, error) {
+ return srv.(MsgServer).SetSendEnabled(ctx, req.(*MsgSetSendEnabled))
+}
+
+return interceptor(ctx, in, info, handler)
+}
+
+var _Msg_serviceDesc = grpc.ServiceDesc{
+ ServiceName: "cosmos.bank.v1beta1.Msg",
+ HandlerType: (*MsgServer)(nil),
+ Methods: []grpc.MethodDesc{
+ {
+ MethodName: "Send",
+ Handler: _Msg_Send_Handler,
+},
+ {
+ MethodName: "MultiSend",
+ Handler: _Msg_MultiSend_Handler,
+},
+ {
+ MethodName: "UpdateParams",
+ Handler: _Msg_UpdateParams_Handler,
+},
+ {
+ MethodName: "SetSendEnabled",
+ Handler: _Msg_SetSendEnabled_Handler,
+},
+},
+ Streams: []grpc.StreamDesc{
+},
+ Metadata: "cosmos/bank/v1beta1/tx.proto",
+}
+
+func (m *MsgSend)
+
+Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+
+dAtA = make([]byte, size)
+
+n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+}
+
+return dAtA[:n], nil
+}
+
+func (m *MsgSend)
+
+MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+
+return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *MsgSend)
+
+MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if len(m.Amount) > 0 {
+ for iNdEx := len(m.Amount) - 1; iNdEx >= 0; iNdEx-- {
+ {
+ size, err := m.Amount[iNdEx].MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+}
+
+i -= size
+ i = encodeVarintTx(dAtA, i, uint64(size))
+}
+
+i--
+ dAtA[i] = 0x1a
+}
+
+}
+ if len(m.ToAddress) > 0 {
+ i -= len(m.ToAddress)
+
+copy(dAtA[i:], m.ToAddress)
+
+i = encodeVarintTx(dAtA, i, uint64(len(m.ToAddress)))
+
+i--
+ dAtA[i] = 0x12
+}
+ if len(m.FromAddress) > 0 {
+ i -= len(m.FromAddress)
+
+copy(dAtA[i:], m.FromAddress)
+
+i = encodeVarintTx(dAtA, i, uint64(len(m.FromAddress)))
+
+i--
+ dAtA[i] = 0xa
+}
+
+return len(dAtA) - i, nil
+}
+
+func (m *MsgSendResponse)
+
+Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+
+dAtA = make([]byte, size)
+
+n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+}
+
+return dAtA[:n], nil
+}
+
+func (m *MsgSendResponse)
+
+MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+
+return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *MsgSendResponse)
+
+MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ return len(dAtA) - i, nil
+}
+
+func (m *MsgMultiSend)
+
+Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+
+dAtA = make([]byte, size)
+
+n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+}
+
+return dAtA[:n], nil
+}
+
+func (m *MsgMultiSend)
+
+MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+
+return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *MsgMultiSend)
+
+MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if len(m.Outputs) > 0 {
+ for iNdEx := len(m.Outputs) - 1; iNdEx >= 0; iNdEx-- {
+ {
+ size, err := m.Outputs[iNdEx].MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+}
+
+i -= size
+ i = encodeVarintTx(dAtA, i, uint64(size))
+}
+
+i--
+ dAtA[i] = 0x12
+}
+
+}
+ if len(m.Inputs) > 0 {
+ for iNdEx := len(m.Inputs) - 1; iNdEx >= 0; iNdEx-- {
+ {
+ size, err := m.Inputs[iNdEx].MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+}
+
+i -= size
+ i = encodeVarintTx(dAtA, i, uint64(size))
+}
+
+i--
+ dAtA[i] = 0xa
+}
+
+}
+
+return len(dAtA) - i, nil
+}
+
+func (m *MsgMultiSendResponse)
+
+Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+
+dAtA = make([]byte, size)
+
+n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+}
+
+return dAtA[:n], nil
+}
+
+func (m *MsgMultiSendResponse)
+
+MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+
+return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *MsgMultiSendResponse)
+
+MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ return len(dAtA) - i, nil
+}
+
+func (m *MsgUpdateParams)
+
+Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+
+dAtA = make([]byte, size)
+
+n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+}
+
+return dAtA[:n], nil
+}
+
+func (m *MsgUpdateParams)
+
+MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+
+return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *MsgUpdateParams)
+
+MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ {
+ size, err := m.Params.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+}
+
+i -= size
+ i = encodeVarintTx(dAtA, i, uint64(size))
+}
+
+i--
+ dAtA[i] = 0x12
+ if len(m.Authority) > 0 {
+ i -= len(m.Authority)
+
+copy(dAtA[i:], m.Authority)
+
+i = encodeVarintTx(dAtA, i, uint64(len(m.Authority)))
+
+i--
+ dAtA[i] = 0xa
+}
+
+return len(dAtA) - i, nil
+}
+
+func (m *MsgUpdateParamsResponse)
+
+Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+
+dAtA = make([]byte, size)
+
+n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+}
+
+return dAtA[:n], nil
+}
+
+func (m *MsgUpdateParamsResponse)
+
+MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+
+return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *MsgUpdateParamsResponse)
+
+MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ return len(dAtA) - i, nil
+}
+
+func (m *MsgSetSendEnabled)
+
+Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+
+dAtA = make([]byte, size)
+
+n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+}
+
+return dAtA[:n], nil
+}
+
+func (m *MsgSetSendEnabled)
+
+MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+
+return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *MsgSetSendEnabled)
+
+MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if len(m.UseDefaultFor) > 0 {
+ for iNdEx := len(m.UseDefaultFor) - 1; iNdEx >= 0; iNdEx-- {
+ i -= len(m.UseDefaultFor[iNdEx])
+
+copy(dAtA[i:], m.UseDefaultFor[iNdEx])
+
+i = encodeVarintTx(dAtA, i, uint64(len(m.UseDefaultFor[iNdEx])))
+
+i--
+ dAtA[i] = 0x1a
+}
+
+}
+ if len(m.SendEnabled) > 0 {
+ for iNdEx := len(m.SendEnabled) - 1; iNdEx >= 0; iNdEx-- {
+ {
+ size, err := m.SendEnabled[iNdEx].MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+}
+
+i -= size
+ i = encodeVarintTx(dAtA, i, uint64(size))
+}
+
+i--
+ dAtA[i] = 0x12
+}
+
+}
+ if len(m.Authority) > 0 {
+ i -= len(m.Authority)
+
+copy(dAtA[i:], m.Authority)
+
+i = encodeVarintTx(dAtA, i, uint64(len(m.Authority)))
+
+i--
+ dAtA[i] = 0xa
+}
+
+return len(dAtA) - i, nil
+}
+
+func (m *MsgSetSendEnabledResponse)
+
+Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+
+dAtA = make([]byte, size)
+
+n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+}
+
+return dAtA[:n], nil
+}
+
+func (m *MsgSetSendEnabledResponse)
+
+MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+
+return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *MsgSetSendEnabledResponse)
+
+MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ return len(dAtA) - i, nil
+}
+
+func encodeVarintTx(dAtA []byte, offset int, v uint64)
+
+int {
+ offset -= sovTx(v)
+ base := offset
+ for v >= 1<<7 {
+ dAtA[offset] = uint8(v&0x7f | 0x80)
+
+v >>= 7
+ offset++
+}
+
+dAtA[offset] = uint8(v)
+
+return base
+}
+
+func (m *MsgSend)
+
+Size() (n int) {
+ if m == nil {
+ return 0
+}
+
+var l int
+ _ = l
+ l = len(m.FromAddress)
+ if l > 0 {
+ n += 1 + l + sovTx(uint64(l))
+}
+
+l = len(m.ToAddress)
+ if l > 0 {
+ n += 1 + l + sovTx(uint64(l))
+}
+ if len(m.Amount) > 0 {
+ for _, e := range m.Amount {
+ l = e.Size()
+
+n += 1 + l + sovTx(uint64(l))
+}
+
+}
+
+return n
+}
+
+func (m *MsgSendResponse)
+
+Size() (n int) {
+ if m == nil {
+ return 0
+}
+
+var l int
+ _ = l
+ return n
+}
+
+func (m *MsgMultiSend)
+
+Size() (n int) {
+ if m == nil {
+ return 0
+}
+
+var l int
+ _ = l
+ if len(m.Inputs) > 0 {
+ for _, e := range m.Inputs {
+ l = e.Size()
+
+n += 1 + l + sovTx(uint64(l))
+}
+
+}
+ if len(m.Outputs) > 0 {
+ for _, e := range m.Outputs {
+ l = e.Size()
+
+n += 1 + l + sovTx(uint64(l))
+}
+
+}
+
+return n
+}
+
+func (m *MsgMultiSendResponse)
+
+Size() (n int) {
+ if m == nil {
+ return 0
+}
+
+var l int
+ _ = l
+ return n
+}
+
+func (m *MsgUpdateParams)
+
+Size() (n int) {
+ if m == nil {
+ return 0
+}
+
+var l int
+ _ = l
+ l = len(m.Authority)
+ if l > 0 {
+ n += 1 + l + sovTx(uint64(l))
+}
+
+l = m.Params.Size()
+
+n += 1 + l + sovTx(uint64(l))
+
+return n
+}
+
+func (m *MsgUpdateParamsResponse)
+
+Size() (n int) {
+ if m == nil {
+ return 0
+}
+
+var l int
+ _ = l
+ return n
+}
+
+func (m *MsgSetSendEnabled)
+
+Size() (n int) {
+ if m == nil {
+ return 0
+}
+
+var l int
+ _ = l
+ l = len(m.Authority)
+ if l > 0 {
+ n += 1 + l + sovTx(uint64(l))
+}
+ if len(m.SendEnabled) > 0 {
+ for _, e := range m.SendEnabled {
+ l = e.Size()
+
+n += 1 + l + sovTx(uint64(l))
+}
+
+}
+ if len(m.UseDefaultFor) > 0 {
+ for _, s := range m.UseDefaultFor {
+ l = len(s)
+
+n += 1 + l + sovTx(uint64(l))
+}
+
+}
+
+return n
+}
+
+func (m *MsgSetSendEnabledResponse)
+
+Size() (n int) {
+ if m == nil {
+ return 0
+}
+
+var l int
+ _ = l
+ return n
+}
+
+func sovTx(x uint64) (n int) {
+ return (math_bits.Len64(x|1) + 6) / 7
+}
+
+func sozTx(x uint64) (n int) {
+ return sovTx(uint64((x << 1) ^ uint64((int64(x) >> 63))))
+}
+
+func (m *MsgSend)
+
+Unmarshal(dAtA []byte)
+
+error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowTx
+}
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+}
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+}
+
+}
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: MsgSend: wiretype end group for non-group")
+}
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: MsgSend: illegal tag %d (wire type %d)", fieldNum, wire)
+}
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field FromAddress", wireType)
+}
+
+var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowTx
+}
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+}
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+}
+
+}
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthTx
+}
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthTx
+}
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+}
+
+m.FromAddress = string(dAtA[iNdEx:postIndex])
+
+iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ToAddress", wireType)
+}
+
+var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowTx
+}
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+}
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+}
+
+}
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthTx
+}
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthTx
+}
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+}
+
+m.ToAddress = string(dAtA[iNdEx:postIndex])
+
+iNdEx = postIndex
+ case 3:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Amount", wireType)
+}
+
+var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowTx
+}
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+}
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+}
+
+}
+ if msglen < 0 {
+ return ErrInvalidLengthTx
+}
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthTx
+}
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+}
+
+m.Amount = append(m.Amount, types.Coin{
+})
+ if err := m.Amount[len(m.Amount)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+}
+
+iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipTx(dAtA[iNdEx:])
+ if err != nil {
+ return err
+}
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthTx
+}
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+}
+
+iNdEx += skippy
+}
+
+}
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+}
+
+return nil
+}
+
+func (m *MsgSendResponse)
+
+Unmarshal(dAtA []byte)
+
+error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowTx
+}
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+}
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+}
+
+}
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: MsgSendResponse: wiretype end group for non-group")
+}
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: MsgSendResponse: illegal tag %d (wire type %d)", fieldNum, wire)
+}
+ switch fieldNum {
+ default:
+ iNdEx = preIndex
+ skippy, err := skipTx(dAtA[iNdEx:])
+ if err != nil {
+ return err
+}
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthTx
+}
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+}
+
+iNdEx += skippy
+}
+
+}
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+}
+
+return nil
+}
+
+func (m *MsgMultiSend)
+
+Unmarshal(dAtA []byte)
+
+error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowTx
+}
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+}
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+}
+
+}
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: MsgMultiSend: wiretype end group for non-group")
+}
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: MsgMultiSend: illegal tag %d (wire type %d)", fieldNum, wire)
+}
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Inputs", wireType)
+}
+
+var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowTx
+}
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+}
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+}
+
+}
+ if msglen < 0 {
+ return ErrInvalidLengthTx
+}
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthTx
+}
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+}
+
+m.Inputs = append(m.Inputs, Input{
+})
+ if err := m.Inputs[len(m.Inputs)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+}
+
+iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Outputs", wireType)
+}
+
+var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowTx
+}
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+}
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+}
+
+}
+ if msglen < 0 {
+ return ErrInvalidLengthTx
+}
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthTx
+}
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+}
+
+m.Outputs = append(m.Outputs, Output{
+})
+ if err := m.Outputs[len(m.Outputs)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+}
+
+iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipTx(dAtA[iNdEx:])
+ if err != nil {
+ return err
+}
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthTx
+}
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+}
+
+iNdEx += skippy
+}
+
+}
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+}
+
+return nil
+}
+
+func (m *MsgMultiSendResponse)
+
+Unmarshal(dAtA []byte)
+
+error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowTx
+}
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+}
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+}
+
+}
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: MsgMultiSendResponse: wiretype end group for non-group")
+}
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: MsgMultiSendResponse: illegal tag %d (wire type %d)", fieldNum, wire)
+}
+ switch fieldNum {
+ default:
+ iNdEx = preIndex
+ skippy, err := skipTx(dAtA[iNdEx:])
+ if err != nil {
+ return err
+}
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthTx
+}
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+}
+
+iNdEx += skippy
+}
+
+}
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+}
+
+return nil
+}
+
+func (m *MsgUpdateParams)
+
+Unmarshal(dAtA []byte)
+
+error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowTx
+}
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+}
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+}
+
+}
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: MsgUpdateParams: wiretype end group for non-group")
+}
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: MsgUpdateParams: illegal tag %d (wire type %d)", fieldNum, wire)
+}
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Authority", wireType)
+}
+
+var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowTx
+}
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+}
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+}
+
+}
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthTx
+}
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthTx
+}
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+}
+
+m.Authority = string(dAtA[iNdEx:postIndex])
+
+iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Params", wireType)
+}
+
+var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowTx
+}
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+}
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+}
+
+}
+ if msglen < 0 {
+ return ErrInvalidLengthTx
+}
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthTx
+}
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+}
+ if err := m.Params.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+}
+
+iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipTx(dAtA[iNdEx:])
+ if err != nil {
+ return err
+}
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthTx
+}
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+}
+
+iNdEx += skippy
+}
+
+}
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+}
+
+return nil
+}
+
+func (m *MsgUpdateParamsResponse)
+
+Unmarshal(dAtA []byte)
+
+error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowTx
+}
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+}
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+}
+
+}
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: MsgUpdateParamsResponse: wiretype end group for non-group")
+}
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: MsgUpdateParamsResponse: illegal tag %d (wire type %d)", fieldNum, wire)
+}
+ switch fieldNum {
+ default:
+ iNdEx = preIndex
+ skippy, err := skipTx(dAtA[iNdEx:])
+ if err != nil {
+ return err
+}
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthTx
+}
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+}
+
+iNdEx += skippy
+}
+
+}
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+}
+
+return nil
+}
+
+func (m *MsgSetSendEnabled)
+
+Unmarshal(dAtA []byte)
+
+error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowTx
+}
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+}
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+}
+
+}
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: MsgSetSendEnabled: wiretype end group for non-group")
+}
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: MsgSetSendEnabled: illegal tag %d (wire type %d)", fieldNum, wire)
+}
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Authority", wireType)
+}
+
+var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowTx
+}
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+}
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+}
+
+}
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthTx
+}
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthTx
+}
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+}
+
+m.Authority = string(dAtA[iNdEx:postIndex])
+
+iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field SendEnabled", wireType)
+}
+
+var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowTx
+}
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+}
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+}
+
+}
+ if msglen < 0 {
+ return ErrInvalidLengthTx
+}
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthTx
+}
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+}
+
+m.SendEnabled = append(m.SendEnabled, &SendEnabled{
+})
+ if err := m.SendEnabled[len(m.SendEnabled)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+}
+
+iNdEx = postIndex
+ case 3:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field UseDefaultFor", wireType)
+}
+
+var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowTx
+}
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+}
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+}
+
+}
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthTx
+}
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthTx
+}
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+}
+
+m.UseDefaultFor = append(m.UseDefaultFor, string(dAtA[iNdEx:postIndex]))
+
+iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipTx(dAtA[iNdEx:])
+ if err != nil {
+ return err
+}
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthTx
+}
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+}
+
+iNdEx += skippy
+}
+
+}
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+}
+
+return nil
+}
+
+func (m *MsgSetSendEnabledResponse)
+
+Unmarshal(dAtA []byte)
+
+error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowTx
+}
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+}
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+}
+
+}
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: MsgSetSendEnabledResponse: wiretype end group for non-group")
+}
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: MsgSetSendEnabledResponse: illegal tag %d (wire type %d)", fieldNum, wire)
+}
+ switch fieldNum {
+ default:
+ iNdEx = preIndex
+ skippy, err := skipTx(dAtA[iNdEx:])
+ if err != nil {
+ return err
+}
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthTx
+}
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+}
+
+iNdEx += skippy
+}
+
+}
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+}
+
+return nil
+}
+
+func skipTx(dAtA []byte) (n int, err error) {
+ l := len(dAtA)
+ iNdEx := 0
+ depth := 0
+ for iNdEx < l {
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return 0, ErrIntOverflowTx
+}
+ if iNdEx >= l {
+ return 0, io.ErrUnexpectedEOF
+}
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+}
+
+}
+ wireType := int(wire & 0x7)
+ switch wireType {
+ case 0:
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return 0, ErrIntOverflowTx
+}
+ if iNdEx >= l {
+ return 0, io.ErrUnexpectedEOF
+}
+
+iNdEx++
+ if dAtA[iNdEx-1] < 0x80 {
+ break
+}
+
+}
+ case 1:
+ iNdEx += 8
+ case 2:
+ var length int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return 0, ErrIntOverflowTx
+}
+ if iNdEx >= l {
+ return 0, io.ErrUnexpectedEOF
+}
+ b := dAtA[iNdEx]
+ iNdEx++
+ length |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+}
+
+}
+ if length < 0 {
+ return 0, ErrInvalidLengthTx
+}
+
+iNdEx += length
+ case 3:
+ depth++
+ case 4:
+ if depth == 0 {
+ return 0, ErrUnexpectedEndOfGroupTx
+}
+
+depth--
+ case 5:
+ iNdEx += 4
+ default:
+ return 0, fmt.Errorf("proto: illegal wireType %d", wireType)
+}
+ if iNdEx < 0 {
+ return 0, ErrInvalidLengthTx
+}
+ if depth == 0 {
+ return iNdEx, nil
+}
+
+}
+
+return 0, io.ErrUnexpectedEOF
+}
+
+var (
+ ErrInvalidLengthTx = fmt.Errorf("proto: negative length found during unmarshaling")
+
+ErrIntOverflowTx = fmt.Errorf("proto: integer overflow")
+
+ErrUnexpectedEndOfGroupTx = fmt.Errorf("proto: unexpected end of group")
+)
+```
+
+When possible, the existing module's [`Keeper`](/sdk/v0.50/build/building-modules/keeper) should implement `MsgServer`, otherwise a `msgServer` struct that embeds the `Keeper` can be created, typically in `./keeper/msg_server.go`:
+
+```go expandable
+package keeper
+
+import (
+
+ "context"
+ "github.com/armon/go-metrics"
+
+ errorsmod "cosmossdk.io/errors"
+ "github.com/cosmos/cosmos-sdk/telemetry"
+ sdk "github.com/cosmos/cosmos-sdk/types"
+ sdkerrors "github.com/cosmos/cosmos-sdk/types/errors"
+ "github.com/cosmos/cosmos-sdk/x/bank/types"
+ govtypes "github.com/cosmos/cosmos-sdk/x/gov/types"
+)
+
+type msgServer struct {
+ Keeper
+}
+
+var _ types.MsgServer = msgServer{
+}
+
+// NewMsgServerImpl returns an implementation of the bank MsgServer interface
+// for the provided Keeper.
+func NewMsgServerImpl(keeper Keeper)
+
+types.MsgServer {
+ return &msgServer{
+ Keeper: keeper
+}
+}
+
+func (k msgServer)
+
+Send(goCtx context.Context, msg *types.MsgSend) (*types.MsgSendResponse, error) {
+ var (
+ from, to []byte
+ err error
+ )
+ if base, ok := k.Keeper.(BaseKeeper); ok {
+ from, err = base.ak.AddressCodec().StringToBytes(msg.FromAddress)
+ if err != nil {
+ return nil, sdkerrors.ErrInvalidAddress.Wrapf("invalid from address: %s", err)
+}
+
+to, err = base.ak.AddressCodec().StringToBytes(msg.ToAddress)
+ if err != nil {
+ return nil, sdkerrors.ErrInvalidAddress.Wrapf("invalid to address: %s", err)
+}
+
+}
+
+else {
+ return nil, sdkerrors.ErrInvalidRequest.Wrapf("invalid keeper type: %T", k.Keeper)
+}
+ if !msg.Amount.IsValid() {
+ return nil, errorsmod.Wrap(sdkerrors.ErrInvalidCoins, msg.Amount.String())
+}
+ if !msg.Amount.IsAllPositive() {
+ return nil, errorsmod.Wrap(sdkerrors.ErrInvalidCoins, msg.Amount.String())
+}
+ ctx := sdk.UnwrapSDKContext(goCtx)
+ if err := k.IsSendEnabledCoins(ctx, msg.Amount...); err != nil {
+ return nil, err
+}
+ if k.BlockedAddr(to) {
+ return nil, errorsmod.Wrapf(sdkerrors.ErrUnauthorized, "%s is not allowed to receive funds", msg.ToAddress)
+}
+
+err = k.SendCoins(ctx, from, to, msg.Amount)
+ if err != nil {
+ return nil, err
+}
+
+defer func() {
+ for _, a := range msg.Amount {
+ if a.Amount.IsInt64() {
+ telemetry.SetGaugeWithLabels(
+ []string{"tx", "msg", "send"
+},
+ float32(a.Amount.Int64()),
+ []metrics.Label{
+ telemetry.NewLabel("denom", a.Denom)
+},
+ )
+}
+
+}
+
+}()
+
+return &types.MsgSendResponse{
+}, nil
+}
+
+func (k msgServer)
+
+MultiSend(goCtx context.Context, msg *types.MsgMultiSend) (*types.MsgMultiSendResponse, error) {
+ if len(msg.Inputs) == 0 {
+ return nil, types.ErrNoInputs
+}
+ if len(msg.Inputs) != 1 {
+ return nil, types.ErrMultipleSenders
+}
+ if len(msg.Outputs) == 0 {
+ return nil, types.ErrNoOutputs
+}
+ if err := types.ValidateInputOutputs(msg.Inputs[0], msg.Outputs); err != nil {
+ return nil, err
+}
+ ctx := sdk.UnwrapSDKContext(goCtx)
+
+ // NOTE: totalIn == totalOut should already have been checked
+ for _, in := range msg.Inputs {
+ if err := k.IsSendEnabledCoins(ctx, in.Coins...); err != nil {
+ return nil, err
+}
+
+}
+ for _, out := range msg.Outputs {
+ if base, ok := k.Keeper.(BaseKeeper); ok {
+ accAddr, err := base.ak.AddressCodec().StringToBytes(out.Address)
+ if err != nil {
+ return nil, err
+}
+ if k.BlockedAddr(accAddr) {
+ return nil, errorsmod.Wrapf(sdkerrors.ErrUnauthorized, "%s is not allowed to receive funds", out.Address)
+}
+
+}
+
+else {
+ return nil, sdkerrors.ErrInvalidRequest.Wrapf("invalid keeper type: %T", k.Keeper)
+}
+
+}
+ err := k.InputOutputCoins(ctx, msg.Inputs[0], msg.Outputs)
+ if err != nil {
+ return nil, err
+}
+
+return &types.MsgMultiSendResponse{
+}, nil
+}
+
+func (k msgServer)
+
+UpdateParams(goCtx context.Context, req *types.MsgUpdateParams) (*types.MsgUpdateParamsResponse, error) {
+ if k.GetAuthority() != req.Authority {
+ return nil, errorsmod.Wrapf(govtypes.ErrInvalidSigner, "invalid authority; expected %s, got %s", k.GetAuthority(), req.Authority)
+}
+ if err := req.Params.Validate(); err != nil {
+ return nil, err
+}
+ ctx := sdk.UnwrapSDKContext(goCtx)
+ if err := k.SetParams(ctx, req.Params); err != nil {
+ return nil, err
+}
+
+return &types.MsgUpdateParamsResponse{
+}, nil
+}
+
+func (k msgServer)
+
+SetSendEnabled(goCtx context.Context, msg *types.MsgSetSendEnabled) (*types.MsgSetSendEnabledResponse, error) {
+ if k.GetAuthority() != msg.Authority {
+ return nil, errorsmod.Wrapf(govtypes.ErrInvalidSigner, "invalid authority; expected %s, got %s", k.GetAuthority(), msg.Authority)
+}
+ seen := map[string]bool{
+}
+ for _, se := range msg.SendEnabled {
+ if _, alreadySeen := seen[se.Denom]; alreadySeen {
+ return nil, sdkerrors.ErrInvalidRequest.Wrapf("duplicate denom entries found for %q", se.Denom)
+}
+
+seen[se.Denom] = true
+ if err := se.Validate(); err != nil {
+ return nil, sdkerrors.ErrInvalidRequest.Wrapf("invalid SendEnabled denom %q: %s", se.Denom, err)
+}
+
+}
+ for _, denom := range msg.UseDefaultFor {
+ if err := sdk.ValidateDenom(denom); err != nil {
+ return nil, sdkerrors.ErrInvalidRequest.Wrapf("invalid UseDefaultFor denom %q: %s", denom, err)
+}
+
+}
+ ctx := sdk.UnwrapSDKContext(goCtx)
+ if len(msg.SendEnabled) > 0 {
+ k.SetAllSendEnabled(ctx, msg.SendEnabled)
+}
+ if len(msg.UseDefaultFor) > 0 {
+ k.DeleteSendEnabled(ctx, msg.UseDefaultFor...)
+}
+
+return &types.MsgSetSendEnabledResponse{
+}, nil
+}
+```
+
+`msgServer` methods can retrieve the `context.Context` from the `context.Context` parameter method using the `sdk.UnwrapSDKContext`:
+
+```go expandable
+package keeper
+
+import (
+
+ "context"
+ "github.com/armon/go-metrics"
+
+ errorsmod "cosmossdk.io/errors"
+ "github.com/cosmos/cosmos-sdk/telemetry"
+ sdk "github.com/cosmos/cosmos-sdk/types"
+ sdkerrors "github.com/cosmos/cosmos-sdk/types/errors"
+ "github.com/cosmos/cosmos-sdk/x/bank/types"
+ govtypes "github.com/cosmos/cosmos-sdk/x/gov/types"
+)
+
+type msgServer struct {
+ Keeper
+}
+
+var _ types.MsgServer = msgServer{
+}
+
+// NewMsgServerImpl returns an implementation of the bank MsgServer interface
+// for the provided Keeper.
+func NewMsgServerImpl(keeper Keeper)
+
+types.MsgServer {
+ return &msgServer{
+ Keeper: keeper
+}
+}
+
+func (k msgServer)
+
+Send(goCtx context.Context, msg *types.MsgSend) (*types.MsgSendResponse, error) {
+ var (
+ from, to []byte
+ err error
+ )
+ if base, ok := k.Keeper.(BaseKeeper); ok {
+ from, err = base.ak.AddressCodec().StringToBytes(msg.FromAddress)
+ if err != nil {
+ return nil, sdkerrors.ErrInvalidAddress.Wrapf("invalid from address: %s", err)
+}
+
+to, err = base.ak.AddressCodec().StringToBytes(msg.ToAddress)
+ if err != nil {
+ return nil, sdkerrors.ErrInvalidAddress.Wrapf("invalid to address: %s", err)
+}
+
+}
+
+else {
+ return nil, sdkerrors.ErrInvalidRequest.Wrapf("invalid keeper type: %T", k.Keeper)
+}
+ if !msg.Amount.IsValid() {
+ return nil, errorsmod.Wrap(sdkerrors.ErrInvalidCoins, msg.Amount.String())
+}
+ if !msg.Amount.IsAllPositive() {
+ return nil, errorsmod.Wrap(sdkerrors.ErrInvalidCoins, msg.Amount.String())
+}
+ ctx := sdk.UnwrapSDKContext(goCtx)
+ if err := k.IsSendEnabledCoins(ctx, msg.Amount...); err != nil {
+ return nil, err
+}
+ if k.BlockedAddr(to) {
+ return nil, errorsmod.Wrapf(sdkerrors.ErrUnauthorized, "%s is not allowed to receive funds", msg.ToAddress)
+}
+
+err = k.SendCoins(ctx, from, to, msg.Amount)
+ if err != nil {
+ return nil, err
+}
+
+defer func() {
+ for _, a := range msg.Amount {
+ if a.Amount.IsInt64() {
+ telemetry.SetGaugeWithLabels(
+ []string{"tx", "msg", "send"
+},
+ float32(a.Amount.Int64()),
+ []metrics.Label{
+ telemetry.NewLabel("denom", a.Denom)
+},
+ )
+}
+
+}
+
+}()
+
+return &types.MsgSendResponse{
+}, nil
+}
+
+func (k msgServer)
+
+MultiSend(goCtx context.Context, msg *types.MsgMultiSend) (*types.MsgMultiSendResponse, error) {
+ if len(msg.Inputs) == 0 {
+ return nil, types.ErrNoInputs
+}
+ if len(msg.Inputs) != 1 {
+ return nil, types.ErrMultipleSenders
+}
+ if len(msg.Outputs) == 0 {
+ return nil, types.ErrNoOutputs
+}
+ if err := types.ValidateInputOutputs(msg.Inputs[0], msg.Outputs); err != nil {
+ return nil, err
+}
+ ctx := sdk.UnwrapSDKContext(goCtx)
+
+ // NOTE: totalIn == totalOut should already have been checked
+ for _, in := range msg.Inputs {
+ if err := k.IsSendEnabledCoins(ctx, in.Coins...); err != nil {
+ return nil, err
+}
+
+}
+ for _, out := range msg.Outputs {
+ if base, ok := k.Keeper.(BaseKeeper); ok {
+ accAddr, err := base.ak.AddressCodec().StringToBytes(out.Address)
+ if err != nil {
+ return nil, err
+}
+ if k.BlockedAddr(accAddr) {
+ return nil, errorsmod.Wrapf(sdkerrors.ErrUnauthorized, "%s is not allowed to receive funds", out.Address)
+}
+
+}
+
+else {
+ return nil, sdkerrors.ErrInvalidRequest.Wrapf("invalid keeper type: %T", k.Keeper)
+}
+
+}
+ err := k.InputOutputCoins(ctx, msg.Inputs[0], msg.Outputs)
+ if err != nil {
+ return nil, err
+}
+
+return &types.MsgMultiSendResponse{
+}, nil
+}
+
+func (k msgServer)
+
+UpdateParams(goCtx context.Context, req *types.MsgUpdateParams) (*types.MsgUpdateParamsResponse, error) {
+ if k.GetAuthority() != req.Authority {
+ return nil, errorsmod.Wrapf(govtypes.ErrInvalidSigner, "invalid authority; expected %s, got %s", k.GetAuthority(), req.Authority)
+}
+ if err := req.Params.Validate(); err != nil {
+ return nil, err
+}
+ ctx := sdk.UnwrapSDKContext(goCtx)
+ if err := k.SetParams(ctx, req.Params); err != nil {
+ return nil, err
+}
+
+return &types.MsgUpdateParamsResponse{
+}, nil
+}
+
+func (k msgServer)
+
+SetSendEnabled(goCtx context.Context, msg *types.MsgSetSendEnabled) (*types.MsgSetSendEnabledResponse, error) {
+ if k.GetAuthority() != msg.Authority {
+ return nil, errorsmod.Wrapf(govtypes.ErrInvalidSigner, "invalid authority; expected %s, got %s", k.GetAuthority(), msg.Authority)
+}
+ seen := map[string]bool{
+}
+ for _, se := range msg.SendEnabled {
+ if _, alreadySeen := seen[se.Denom]; alreadySeen {
+ return nil, sdkerrors.ErrInvalidRequest.Wrapf("duplicate denom entries found for %q", se.Denom)
+}
+
+seen[se.Denom] = true
+ if err := se.Validate(); err != nil {
+ return nil, sdkerrors.ErrInvalidRequest.Wrapf("invalid SendEnabled denom %q: %s", se.Denom, err)
+}
+
+}
+ for _, denom := range msg.UseDefaultFor {
+ if err := sdk.ValidateDenom(denom); err != nil {
+ return nil, sdkerrors.ErrInvalidRequest.Wrapf("invalid UseDefaultFor denom %q: %s", denom, err)
+}
+
+}
+ ctx := sdk.UnwrapSDKContext(goCtx)
+ if len(msg.SendEnabled) > 0 {
+ k.SetAllSendEnabled(ctx, msg.SendEnabled)
+}
+ if len(msg.UseDefaultFor) > 0 {
+ k.DeleteSendEnabled(ctx, msg.UseDefaultFor...)
+}
+
+return &types.MsgSetSendEnabledResponse{
+}, nil
+}
+```
+
+`sdk.Msg` processing usually follows these 3 steps:
+
+### Validation
+
+The message server must perform all validation required (both *stateful* and *stateless*) to make sure the `message` is valid.
+The `signer` is charged for the gas cost of this validation.
+
+For example, a `msgServer` method for a `transfer` message should check that the sending account has enough funds to actually perform the transfer.
+
+It is recommended to implement all validation checks in a separate function that passes state values as arguments. This implementation simplifies testing. As expected, expensive validation functions charge additional gas. Example:
+
+```go
+ValidateMsgA(msg MsgA, now Time, gm GasMeter)
+
+error {
+ if now.Before(msg.Expire) {
+ return sdkerrrors.ErrInvalidRequest.Wrap("msg expired")
+}
+
+gm.ConsumeGas(1000, "signature verification")
+
+return signatureVerificaton(msg.Prover, msg.Data)
+}
+```
+
+
+Previously, the `ValidateBasic` method was used to perform simple and stateless validation checks.
+This way of validating is deprecated, this means the `msgServer` must perform all validation checks.
+
+
+### State Transition
+
+After the validation is successful, the `msgServer` method uses the [`keeper`](/sdk/v0.50/build/building-modules/keeper) functions to access the state and perform a state transition.
+
+### Events
+
+Before returning, `msgServer` methods generally emit one or more [events](/sdk/v0.50/learn/advanced/events) by using the `EventManager` held in the `ctx`. Use the new `EmitTypedEvent` function that uses protobuf-based event types:
+
+```go
+ctx.EventManager().EmitTypedEvent(
+ &group.EventABC{
+ Key1: Value1, Key2, Value2
+})
+```
+
+or the older `EmitEvent` function:
+
+```go
+ctx.EventManager().EmitEvent(
+ sdk.NewEvent(
+ eventType, // e.g. sdk.EventTypeMessage for a message, types.CustomEventType for a custom event defined in the module
+ sdk.NewAttribute(key1, value1),
+ sdk.NewAttribute(key2, value2),
+ ),
+)
+```
+
+These events are relayed back to the underlying consensus engine and can be used by service providers to implement services around the application. Click [here](/sdk/v0.50/learn/advanced/events) to learn more about events.
+
+The invoked `msgServer` method returns a `proto.Message` response and an `error`. These return values are then wrapped into an `*sdk.Result` or an `error` using `sdk.WrapServiceResult(ctx context.Context, res proto.Message, err error)`:
+
+```go expandable
+package baseapp
+
+import (
+
+ "context"
+ "fmt"
+
+ gogogrpc "github.com/cosmos/gogoproto/grpc"
+ "github.com/cosmos/gogoproto/proto"
+ "google.golang.org/grpc"
+
+ errorsmod "cosmossdk.io/errors"
+
+ codectypes "github.com/cosmos/cosmos-sdk/codec/types"
+ sdk "github.com/cosmos/cosmos-sdk/types"
+ sdkerrors "github.com/cosmos/cosmos-sdk/types/errors"
+)
+
+// MessageRouter ADR 031 request type routing
+// https://github.com/cosmos/cosmos-sdk/blob/main/docs/architecture/adr-031-msg-service.md
+type MessageRouter interface {
+ Handler(msg sdk.Msg)
+
+MsgServiceHandler
+ HandlerByTypeURL(typeURL string)
+
+MsgServiceHandler
+}
+
+// MsgServiceRouter routes fully-qualified Msg service methods to their handler.
+type MsgServiceRouter struct {
+ interfaceRegistry codectypes.InterfaceRegistry
+ routes map[string]MsgServiceHandler
+ circuitBreaker CircuitBreaker
+}
+
+var _ gogogrpc.Server = &MsgServiceRouter{
+}
+
+// NewMsgServiceRouter creates a new MsgServiceRouter.
+func NewMsgServiceRouter() *MsgServiceRouter {
+ return &MsgServiceRouter{
+ routes: map[string]MsgServiceHandler{
+},
+}
+}
+
+func (msr *MsgServiceRouter)
+
+SetCircuit(cb CircuitBreaker) {
+ msr.circuitBreaker = cb
+}
+
+// MsgServiceHandler defines a function type which handles Msg service message.
+type MsgServiceHandler = func(ctx sdk.Context, req sdk.Msg) (*sdk.Result, error)
+
+// Handler returns the MsgServiceHandler for a given msg or nil if not found.
+func (msr *MsgServiceRouter)
+
+Handler(msg sdk.Msg)
+
+MsgServiceHandler {
+ return msr.routes[sdk.MsgTypeURL(msg)]
+}
+
+// HandlerByTypeURL returns the MsgServiceHandler for a given query route path or nil
+// if not found.
+func (msr *MsgServiceRouter)
+
+HandlerByTypeURL(typeURL string)
+
+MsgServiceHandler {
+ return msr.routes[typeURL]
+}
+
+// RegisterService implements the gRPC Server.RegisterService method. sd is a gRPC
+// service description, handler is an object which implements that gRPC service.
+//
+// This function PANICs:
+// - if it is called before the service `Msg`s have been registered using
+// RegisterInterfaces,
+// - or if a service is being registered twice.
+func (msr *MsgServiceRouter)
+
+RegisterService(sd *grpc.ServiceDesc, handler interface{
+}) {
+ // Adds a top-level query handler based on the gRPC service name.
+ for _, method := range sd.Methods {
+ fqMethod := fmt.Sprintf("/%s/%s", sd.ServiceName, method.MethodName)
+ methodHandler := method.Handler
+
+ var requestTypeName string
+
+ // NOTE: This is how we pull the concrete request type for each handler for registering in the InterfaceRegistry.
+ // This approach is maybe a bit hacky, but less hacky than reflecting on the handler object itself.
+ // We use a no-op interceptor to avoid actually calling into the handler itself.
+ _, _ = methodHandler(nil, context.Background(), func(i interface{
+})
+
+error {
+ msg, ok := i.(sdk.Msg)
+ if !ok {
+ // We panic here because there is no other alternative and the app cannot be initialized correctly
+ // this should only happen if there is a problem with code generation in which case the app won't
+ // work correctly anyway.
+ panic(fmt.Errorf("unable to register service method %s: %T does not implement sdk.Msg", fqMethod, i))
+}
+
+requestTypeName = sdk.MsgTypeURL(msg)
+
+return nil
+}, noopInterceptor)
+
+ // Check that the service Msg fully-qualified method name has already
+ // been registered (via RegisterInterfaces). If the user registers a
+ // service without registering according service Msg type, there might be
+ // some unexpected behavior down the road. Since we can't return an error
+ // (`Server.RegisterService` interface restriction)
+
+we panic (at startup).
+ reqType, err := msr.interfaceRegistry.Resolve(requestTypeName)
+ if err != nil || reqType == nil {
+ panic(
+ fmt.Errorf(
+ "type_url %s has not been registered yet. "+
+ "Before calling RegisterService, you must register all interfaces by calling the `RegisterInterfaces` "+
+ "method on module.BasicManager. Each module should call `msgservice.RegisterMsgServiceDesc` inside its "+
+ "`RegisterInterfaces` method with the `_Msg_serviceDesc` generated by proto-gen",
+ requestTypeName,
+ ),
+ )
+}
+
+ // Check that each service is only registered once. If a service is
+ // registered more than once, then we should error. Since we can't
+ // return an error (`Server.RegisterService` interface restriction)
+
+we
+ // panic (at startup).
+ _, found := msr.routes[requestTypeName]
+ if found {
+ panic(
+ fmt.Errorf(
+ "msg service %s has already been registered. Please make sure to only register each service once. "+
+ "This usually means that there are conflicting modules registering the same msg service",
+ fqMethod,
+ ),
+ )
+}
+
+msr.routes[requestTypeName] = func(ctx sdk.Context, msg sdk.Msg) (*sdk.Result, error) {
+ ctx = ctx.WithEventManager(sdk.NewEventManager())
+ interceptor := func(goCtx context.Context, _ interface{
+}, _ *grpc.UnaryServerInfo, handler grpc.UnaryHandler) (interface{
+}, error) {
+ goCtx = context.WithValue(goCtx, sdk.SdkContextKey, ctx)
+
+return handler(goCtx, msg)
+}
+ if m, ok := msg.(sdk.HasValidateBasic); ok {
+ if err := m.ValidateBasic(); err != nil {
+ return nil, err
+}
+
+}
+ if msr.circuitBreaker != nil {
+ msgURL := sdk.MsgTypeURL(msg)
+
+isAllowed, err := msr.circuitBreaker.IsAllowed(ctx, msgURL)
+ if err != nil {
+ return nil, err
+}
+ if !isAllowed {
+ return nil, fmt.Errorf("circuit breaker disables execution of this message: %s", msgURL)
+}
+
+}
+
+ // Call the method handler from the service description with the handler object.
+ // We don't do any decoding here because the decoding was already done.
+ res, err := methodHandler(handler, ctx, noopDecoder, interceptor)
+ if err != nil {
+ return nil, err
+}
+
+resMsg, ok := res.(proto.Message)
+ if !ok {
+ return nil, errorsmod.Wrapf(sdkerrors.ErrInvalidType, "Expecting proto.Message, got %T", resMsg)
+}
+
+return sdk.WrapServiceResult(ctx, resMsg, err)
+}
+
+}
+}
+
+// SetInterfaceRegistry sets the interface registry for the router.
+func (msr *MsgServiceRouter)
+
+SetInterfaceRegistry(interfaceRegistry codectypes.InterfaceRegistry) {
+ msr.interfaceRegistry = interfaceRegistry
+}
+
+func noopDecoder(_ interface{
+})
+
+error {
+ return nil
+}
+
+func noopInterceptor(_ context.Context, _ interface{
+}, _ *grpc.UnaryServerInfo, _ grpc.UnaryHandler) (interface{
+}, error) {
+ return nil, nil
+}
+```
+
+This method takes care of marshaling the `res` parameter to protobuf and attaching any events on the `ctx.EventManager()` to the `sdk.Result`.
+
+```protobuf
+// Reference: https://github.com/cosmos/cosmos-sdk/blob/v0.50.0-alpha.0/proto/cosmos/base/abci/v1beta1/abci.proto#L93-L113
+```
+
+This diagram shows a typical structure of a Protobuf `Msg` service, and how the message propagates through the module.
+
+
+
+## Telemetry
+
+New [telemetry metrics](/sdk/v0.50/learn/advanced/telemetry) can be created from `msgServer` methods when handling messages.
+
+This is an example from the `x/auth/vesting` module:
+
+```go expandable
+package vesting
+
+import (
+
+ "context"
+ "github.com/armon/go-metrics"
+
+ errorsmod "cosmossdk.io/errors"
+ "github.com/cosmos/cosmos-sdk/telemetry"
+ sdk "github.com/cosmos/cosmos-sdk/types"
+ sdkerrors "github.com/cosmos/cosmos-sdk/types/errors"
+ "github.com/cosmos/cosmos-sdk/x/auth/keeper"
+ authtypes "github.com/cosmos/cosmos-sdk/x/auth/types"
+ "github.com/cosmos/cosmos-sdk/x/auth/vesting/types"
+)
+
+type msgServer struct {
+ keeper.AccountKeeper
+ types.BankKeeper
+}
+
+// NewMsgServerImpl returns an implementation of the vesting MsgServer interface,
+// wrapping the corresponding AccountKeeper and BankKeeper.
+func NewMsgServerImpl(k keeper.AccountKeeper, bk types.BankKeeper)
+
+types.MsgServer {
+ return &msgServer{
+ AccountKeeper: k,
+ BankKeeper: bk
+}
+}
+
+var _ types.MsgServer = msgServer{
+}
+
+func (s msgServer)
+
+CreateVestingAccount(goCtx context.Context, msg *types.MsgCreateVestingAccount) (*types.MsgCreateVestingAccountResponse, error) {
+ from, err := s.AccountKeeper.AddressCodec().StringToBytes(msg.FromAddress)
+ if err != nil {
+ return nil, sdkerrors.ErrInvalidAddress.Wrapf("invalid 'from' address: %s", err)
+}
+
+to, err := s.AccountKeeper.AddressCodec().StringToBytes(msg.ToAddress)
+ if err != nil {
+ return nil, sdkerrors.ErrInvalidAddress.Wrapf("invalid 'to' address: %s", err)
+}
+ if err := validateAmount(msg.Amount); err != nil {
+ return nil, err
+}
+ if msg.EndTime <= 0 {
+ return nil, errorsmod.Wrap(sdkerrors.ErrInvalidRequest, "invalid end time")
+}
+ ctx := sdk.UnwrapSDKContext(goCtx)
+ if err := s.BankKeeper.IsSendEnabledCoins(ctx, msg.Amount...); err != nil {
+ return nil, err
+}
+ if s.BankKeeper.BlockedAddr(to) {
+ return nil, errorsmod.Wrapf(sdkerrors.ErrUnauthorized, "%s is not allowed to receive funds", msg.ToAddress)
+}
+ if acc := s.AccountKeeper.GetAccount(ctx, to); acc != nil {
+ return nil, errorsmod.Wrapf(sdkerrors.ErrInvalidRequest, "account %s already exists", msg.ToAddress)
+}
+ baseAccount := authtypes.NewBaseAccountWithAddress(to)
+
+baseAccount = s.AccountKeeper.NewAccount(ctx, baseAccount).(*authtypes.BaseAccount)
+ baseVestingAccount := types.NewBaseVestingAccount(baseAccount, msg.Amount.Sort(), msg.EndTime)
+
+var vestingAccount sdk.AccountI
+ if msg.Delayed {
+ vestingAccount = types.NewDelayedVestingAccountRaw(baseVestingAccount)
+}
+
+else {
+ vestingAccount = types.NewContinuousVestingAccountRaw(baseVestingAccount, ctx.BlockTime().Unix())
+}
+
+s.AccountKeeper.SetAccount(ctx, vestingAccount)
+
+defer func() {
+ telemetry.IncrCounter(1, "new", "account")
+ for _, a := range msg.Amount {
+ if a.Amount.IsInt64() {
+ telemetry.SetGaugeWithLabels(
+ []string{"tx", "msg", "create_vesting_account"
+},
+ float32(a.Amount.Int64()),
+ []metrics.Label{
+ telemetry.NewLabel("denom", a.Denom)
+},
+ )
+}
+
+}
+
+}()
+ if err = s.BankKeeper.SendCoins(ctx, from, to, msg.Amount); err != nil {
+ return nil, err
+}
+
+return &types.MsgCreateVestingAccountResponse{
+}, nil
+}
+
+func (s msgServer)
+
+CreatePermanentLockedAccount(goCtx context.Context, msg *types.MsgCreatePermanentLockedAccount) (*types.MsgCreatePermanentLockedAccountResponse, error) {
+ from, err := s.AccountKeeper.AddressCodec().StringToBytes(msg.FromAddress)
+ if err != nil {
+ return nil, sdkerrors.ErrInvalidAddress.Wrapf("invalid 'from' address: %s", err)
+}
+
+to, err := s.AccountKeeper.AddressCodec().StringToBytes(msg.ToAddress)
+ if err != nil {
+ return nil, sdkerrors.ErrInvalidAddress.Wrapf("invalid 'to' address: %s", err)
+}
+ if err := validateAmount(msg.Amount); err != nil {
+ return nil, err
+}
+ ctx := sdk.UnwrapSDKContext(goCtx)
+ if err := s.BankKeeper.IsSendEnabledCoins(ctx, msg.Amount...); err != nil {
+ return nil, err
+}
+ if s.BankKeeper.BlockedAddr(to) {
+ return nil, errorsmod.Wrapf(sdkerrors.ErrUnauthorized, "%s is not allowed to receive funds", msg.ToAddress)
+}
+ if acc := s.AccountKeeper.GetAccount(ctx, to); acc != nil {
+ return nil, errorsmod.Wrapf(sdkerrors.ErrInvalidRequest, "account %s already exists", msg.ToAddress)
+}
+ baseAccount := authtypes.NewBaseAccountWithAddress(to)
+
+baseAccount = s.AccountKeeper.NewAccount(ctx, baseAccount).(*authtypes.BaseAccount)
+ vestingAccount := types.NewPermanentLockedAccount(baseAccount, msg.Amount)
+
+s.AccountKeeper.SetAccount(ctx, vestingAccount)
+
+defer func() {
+ telemetry.IncrCounter(1, "new", "account")
+ for _, a := range msg.Amount {
+ if a.Amount.IsInt64() {
+ telemetry.SetGaugeWithLabels(
+ []string{"tx", "msg", "create_permanent_locked_account"
+},
+ float32(a.Amount.Int64()),
+ []metrics.Label{
+ telemetry.NewLabel("denom", a.Denom)
+},
+ )
+}
+
+}
+
+}()
+ if err = s.BankKeeper.SendCoins(ctx, from, to, msg.Amount); err != nil {
+ return nil, err
+}
+
+return &types.MsgCreatePermanentLockedAccountResponse{
+}, nil
+}
+
+func (s msgServer)
+
+CreatePeriodicVestingAccount(goCtx context.Context, msg *types.MsgCreatePeriodicVestingAccount) (*types.MsgCreatePeriodicVestingAccountResponse, error) {
+ from, err := s.AccountKeeper.AddressCodec().StringToBytes(msg.FromAddress)
+ if err != nil {
+ return nil, sdkerrors.ErrInvalidAddress.Wrapf("invalid 'from' address: %s", err)
+}
+
+to, err := s.AccountKeeper.AddressCodec().StringToBytes(msg.ToAddress)
+ if err != nil {
+ return nil, sdkerrors.ErrInvalidAddress.Wrapf("invalid 'to' address: %s", err)
+}
+ if msg.StartTime < 1 {
+ return nil, errorsmod.Wrapf(sdkerrors.ErrInvalidRequest, "invalid start time of %d, length must be greater than 0", msg.StartTime)
+}
+
+var totalCoins sdk.Coins
+ for i, period := range msg.VestingPeriods {
+ if period.Length < 1 {
+ return nil, errorsmod.Wrapf(sdkerrors.ErrInvalidRequest, "invalid period length of %d in period %d, length must be greater than 0", period.Length, i)
+}
+
+totalCoins = totalCoins.Add(period.Amount...)
+}
+ ctx := sdk.UnwrapSDKContext(goCtx)
+ if acc := s.AccountKeeper.GetAccount(ctx, to); acc != nil {
+ return nil, errorsmod.Wrapf(sdkerrors.ErrInvalidRequest, "account %s already exists", msg.ToAddress)
+}
+ if err := s.BankKeeper.IsSendEnabledCoins(ctx, totalCoins...); err != nil {
+ return nil, err
+}
+ baseAccount := authtypes.NewBaseAccountWithAddress(to)
+
+baseAccount = s.AccountKeeper.NewAccount(ctx, baseAccount).(*authtypes.BaseAccount)
+ vestingAccount := types.NewPeriodicVestingAccount(baseAccount, totalCoins.Sort(), msg.StartTime, msg.VestingPeriods)
+
+s.AccountKeeper.SetAccount(ctx, vestingAccount)
+
+defer func() {
+ telemetry.IncrCounter(1, "new", "account")
+ for _, a := range totalCoins {
+ if a.Amount.IsInt64() {
+ telemetry.SetGaugeWithLabels(
+ []string{"tx", "msg", "create_periodic_vesting_account"
+},
+ float32(a.Amount.Int64()),
+ []metrics.Label{
+ telemetry.NewLabel("denom", a.Denom)
+},
+ )
+}
+
+}
+
+}()
+ if err = s.BankKeeper.SendCoins(ctx, from, to, totalCoins); err != nil {
+ return nil, err
+}
+
+return &types.MsgCreatePeriodicVestingAccountResponse{
+}, nil
+}
+
+func validateAmount(amount sdk.Coins)
+
+error {
+ if !amount.IsValid() {
+ return sdkerrors.ErrInvalidCoins.Wrap(amount.String())
+}
+ if !amount.IsAllPositive() {
+ return sdkerrors.ErrInvalidCoins.Wrap(amount.String())
+}
+
+return nil
+}
+```
diff --git a/sdk/next/build/building-modules/preblock.mdx b/sdk/next/build/building-modules/preblock.mdx
new file mode 100644
index 000000000..e3ea5f6ac
--- /dev/null
+++ b/sdk/next/build/building-modules/preblock.mdx
@@ -0,0 +1,32 @@
+---
+title: PreBlocker
+---
+
+
+**Synopsis**
+`PreBlocker` is optional method module developers can implement in their module. They will be triggered before [`BeginBlock`](/sdk/v0.53/learn/advanced/baseapp#beginblock).
+
+
+
+**Prerequisite Readings**
+
+* [Module Manager](/sdk/v0.53/build/building-modules/module-manager)
+
+
+
+## PreBlocker
+
+There are two semantics around the new lifecycle method:
+
+* It runs before the `BeginBlocker` of all modules
+* It can modify consensus parameters in storage, and signal the caller through the return value.
+
+When it returns `ConsensusParamsChanged=true`, the caller must refresh the consensus parameter in the deliver context:
+
+```
+app.finalizeBlockState.ctx = app.finalizeBlockState.ctx.WithConsensusParams(app.GetConsensusParams())
+```
+
+The new ctx must be passed to all the other lifecycle methods.
+
+{/* TODO: leaving this here to update docs with core api changes */}
diff --git a/sdk/next/build/building-modules/protobuf-annotations.mdx b/sdk/next/build/building-modules/protobuf-annotations.mdx
new file mode 100644
index 000000000..24ee051af
--- /dev/null
+++ b/sdk/next/build/building-modules/protobuf-annotations.mdx
@@ -0,0 +1,134 @@
+---
+title: ProtocolBuffer Annotations
+description: >-
+ This document explains the various protobuf scalars that have been added to
+ make working with protobuf easier for Cosmos SDK application developers
+---
+
+This document explains the various protobuf scalars that have been added to make working with protobuf easier for Cosmos SDK application developers
+
+## Signer
+
+Signer specifies which field should be used to determine the signer of a message for the Cosmos SDK. This field can be used for clients as well to infer which field should be used to determine the signer of a message.
+
+Read more about the signer field [here](/sdk/v0.50/build/building-modules/messages-and-queries).
+
+```protobuf
+// Reference: https://github.com/cosmos/cosmos-sdk/blob/e6848d99b55a65d014375b295bdd7f9641aac95e/proto/cosmos/bank/v1beta1/tx.proto#L40
+```
+
+```proto
+option (cosmos.msg.v1.signer) = "from_address";
+```
+
+## Scalar
+
+The scalar type defines a way for clients to understand how to construct protobuf messages according to what is expected by the module and sdk.
+
+```proto
+(cosmos_proto.scalar) = "cosmos.AddressString"
+```
+
+Example of account address string scalar:
+
+```proto
+// Reference: https://github.com/cosmos/cosmos-sdk/blob/e6848d99b55a65d014375b295bdd7f9641aac95e/proto/cosmos/bank/v1beta1/tx.proto#L46
+```
+
+Example of validator address string scalar:
+
+```proto
+// Reference: https://github.com/cosmos/cosmos-sdk/blob/e8f28bf5db18b8d6b7e0d94b542ce4cf48fed9d6/proto/cosmos/distribution/v1beta1/query.proto#L87
+```
+
+Example of Decimals scalar:
+
+```proto
+// Reference: https://github.com/cosmos/cosmos-sdk/blob/e8f28bf5db18b8d6b7e0d94b542ce4cf48fed9d6/proto/cosmos/distribution/v1beta1/distribution.proto#L26
+```
+
+Example of Int scalar:
+
+```proto
+// Reference: https://github.com/cosmos/cosmos-sdk/blob/e8f28bf5db18b8d6b7e0d94b542ce4cf48fed9d6/proto/cosmos/gov/v1/gov.proto#L137
+```
+
+There are a few options for what can be provided as a scalar: `cosmos.AddressString`, `cosmos.ValidatorAddressString`, `cosmos.ConsensusAddressString`, `cosmos.Int`, `cosmos.Dec`.
+
+## Implements\_Interface
+
+Implement interface is used to provide information to client tooling like [telescope](https://github.com/cosmology-tech/telescope) on how to encode and decode protobuf messages.
+
+```proto
+option (cosmos_proto.implements_interface) = "cosmos.auth.v1beta1.AccountI";
+```
+
+## Method,Field,Message Added In
+
+`method_added_in`, `field_added_in` and `message_added_in` are annotations to denotate to clients that a field has been supported in a later version. This is useful when new methods or fields are added in later versions and that the client needs to be aware of what it can call.
+
+The annotation should be worded as follow:
+
+```proto
+option (cosmos_proto.method_added_in) = "cosmos-sdk v0.50.1";
+option (cosmos_proto.method_added_in) = "x/epochs v1.0.0";
+option (cosmos_proto.method_added_in) = "simapp v24.0.0";
+```
+
+## Amino
+
+The amino codec was removed in `v0.50+`, this means there is not a need register `legacyAminoCodec`. To replace the amino codec, Amino protobuf annotations are used to provide information to the amino codec on how to encode and decode protobuf messages.
+
+
+Amino annotations are only used for backwards compatibility with amino. New modules are not required use amino annotations.
+
+
+The below annotations are used to provide information to the amino codec on how to encode and decode protobuf messages in a backwards compatible manner.
+
+### Name
+
+Name specifies the amino name that would show up for the user in order for them see which message they are signing.
+
+```proto
+option (amino.name) = "cosmos-sdk/BaseAccount";
+```
+
+```proto
+// Reference: https://github.com/cosmos/cosmos-sdk/blob/e8f28bf5db18b8d6b7e0d94b542ce4cf48fed9d6/proto/cosmos/bank/v1beta1/tx.proto#L41
+```
+
+### Field\_Name
+
+Field name specifies the amino name that would show up for the user in order for them see which field they are signing.
+
+```proto
+uint64 height = 1 [(amino.field_name) = "public_key"];
+```
+
+```proto
+// Reference: https://github.com/cosmos/cosmos-sdk/blob/e8f28bf5db18b8d6b7e0d94b542ce4cf48fed9d6/proto/cosmos/distribution/v1beta1/distribution.proto#L166
+```
+
+### Dont\_OmitEmpty
+
+Dont omitempty specifies that the field should not be omitted when encoding to amino.
+
+```proto
+repeated cosmos.base.v1beta1.Coin amount = 3 [(amino.dont_omitempty) = true];
+```
+
+```proto
+// Reference: https://github.com/cosmos/cosmos-sdk/blob/e8f28bf5db18b8d6b7e0d94b542ce4cf48fed9d6/proto/cosmos/bank/v1beta1/bank.proto#L56
+```
+
+### Encoding
+
+Encoding instructs the amino json marshaler how to encode certain fields that may differ from the standard encoding behavior. The most common example of this is how `repeated cosmos.base.v1beta1.Coin` is encoded when using the amino json encoding format. The `legacy_coins` option tells the json marshaler [how to encode a null slice](https://github.com/cosmos/cosmos-sdk/blob/e8f28bf5db18b8d6b7e0d94b542ce4cf48fed9d6/x/tx/signing/aminojson/json_marshal.go#L65) of `cosmos.base.v1beta1.Coin`.
+
+```proto
+(amino.encoding) = "legacy_coins",
+```
+
+```proto
+// Reference: https://github.com/cosmos/cosmos-sdk/blob/e8f28bf5db18b8d6b7e0d94b542ce4cf48fed9d6/proto/cosmos/bank/v1beta1/genesis.proto#L23
+```
diff --git a/sdk/next/build/building-modules/query-services.mdx b/sdk/next/build/building-modules/query-services.mdx
new file mode 100644
index 000000000..01783690e
--- /dev/null
+++ b/sdk/next/build/building-modules/query-services.mdx
@@ -0,0 +1,392 @@
+---
+title: Query Services
+---
+{/* force preview rebuild: 2025-09-30 */}
+
+
+**Synopsis**
+A Protobuf Query service processes [`queries`](/sdk/v0.53/build/building-modules/messages-and-queries). Query services are specific to the module in which they are defined, and only process `queries` defined within said module. They are called from `BaseApp`'s [`Query` method](/sdk/v0.53/learn/advanced/baseapp#query).
+
+
+
+**Prerequisite Readings**
+
+* [Module Manager](/sdk/v0.53/build/building-modules/module-manager)
+* [Messages and Queries](/sdk/v0.53/build/building-modules/messages-and-queries)
+
+
+
+## Implementation of a module query service
+
+### gRPC Service
+
+When defining a Protobuf `Query` service, a `QueryServer` interface is generated for each module with all the service methods:
+
+```go
+type QueryServer interface {
+ QueryBalance(context.Context, *QueryBalanceParams) (*types.Coin, error)
+
+QueryAllBalances(context.Context, *QueryAllBalancesParams) (*QueryAllBalancesResponse, error)
+}
+```
+
+These custom queries methods should be implemented by a module's keeper, typically in `./keeper/grpc_query.go`. The first parameter of these methods is a generic `context.Context`. Therefore, the Cosmos SDK provides a function `sdk.UnwrapSDKContext` to retrieve the `context.Context` from the provided
+`context.Context`.
+
+Here's an example implementation for the bank module:
+
+```go expandable
+package keeper
+
+import (
+
+ "context"
+ "cosmossdk.io/collections"
+ "cosmossdk.io/math"
+ "google.golang.org/grpc/codes"
+ "google.golang.org/grpc/status"
+ "cosmossdk.io/store/prefix"
+ "github.com/cosmos/cosmos-sdk/runtime"
+ sdk "github.com/cosmos/cosmos-sdk/types"
+ "github.com/cosmos/cosmos-sdk/types/query"
+ "github.com/cosmos/cosmos-sdk/x/bank/types"
+)
+
+type Querier struct {
+ BaseKeeper
+}
+
+var _ types.QueryServer = BaseKeeper{
+}
+
+func NewQuerier(keeper *BaseKeeper)
+
+Querier {
+ return Querier{
+ BaseKeeper: *keeper
+}
+}
+
+// Balance implements the Query/Balance gRPC method
+func (k BaseKeeper)
+
+Balance(ctx context.Context, req *types.QueryBalanceRequest) (*types.QueryBalanceResponse, error) {
+ if req == nil {
+ return nil, status.Error(codes.InvalidArgument, "empty request")
+}
+ if err := sdk.ValidateDenom(req.Denom); err != nil {
+ return nil, status.Error(codes.InvalidArgument, err.Error())
+}
+ sdkCtx := sdk.UnwrapSDKContext(ctx)
+
+address, err := k.ak.AddressCodec().StringToBytes(req.Address)
+ if err != nil {
+ return nil, status.Errorf(codes.InvalidArgument, "invalid address: %s", err.Error())
+}
+ balance := k.GetBalance(sdkCtx, address, req.Denom)
+
+return &types.QueryBalanceResponse{
+ Balance: &balance
+}, nil
+}
+
+// AllBalances implements the Query/AllBalances gRPC method
+func (k BaseKeeper)
+
+AllBalances(ctx context.Context, req *types.QueryAllBalancesRequest) (*types.QueryAllBalancesResponse, error) {
+ if req == nil {
+ return nil, status.Error(codes.InvalidArgument, "empty request")
+}
+
+addr, err := k.ak.AddressCodec().StringToBytes(req.Address)
+ if err != nil {
+ return nil, status.Errorf(codes.InvalidArgument, "invalid address: %s", err.Error())
+}
+ sdkCtx := sdk.UnwrapSDKContext(ctx)
+ balances := sdk.NewCoins()
+
+ _, pageRes, err := query.CollectionFilteredPaginate(ctx, k.Balances, req.Pagination, func(key collections.Pair[sdk.AccAddress, string], value math.Int) (include bool, err error) {
+ denom := key.K2()
+ if req.ResolveDenom {
+ if metadata, ok := k.GetDenomMetaData(sdkCtx, denom); ok {
+ denom = metadata.Display
+}
+
+}
+
+balances = append(balances, sdk.NewCoin(denom, value))
+
+return false, nil // we don't include results because we're appending them here.
+}, query.WithCollectionPaginationPairPrefix[sdk.AccAddress, string](addr))
+ if err != nil {
+ return nil, status.Errorf(codes.InvalidArgument, "paginate: %v", err)
+}
+
+return &types.QueryAllBalancesResponse{
+ Balances: balances,
+ Pagination: pageRes
+}, nil
+}
+
+// SpendableBalances implements a gRPC query handler for retrieving an account's
+// spendable balances.
+func (k BaseKeeper)
+
+SpendableBalances(ctx context.Context, req *types.QuerySpendableBalancesRequest) (*types.QuerySpendableBalancesResponse, error) {
+ if req == nil {
+ return nil, status.Error(codes.InvalidArgument, "empty request")
+}
+
+addr, err := k.ak.AddressCodec().StringToBytes(req.Address)
+ if err != nil {
+ return nil, status.Errorf(codes.InvalidArgument, "invalid address: %s", err.Error())
+}
+ sdkCtx := sdk.UnwrapSDKContext(ctx)
+ balances := sdk.NewCoins()
+ zeroAmt := math.ZeroInt()
+
+ _, pageRes, err := query.CollectionFilteredPaginate(ctx, k.Balances, req.Pagination, func(key collections.Pair[sdk.AccAddress, string], _ math.Int) (include bool, err error) {
+ balances = append(balances, sdk.NewCoin(key.K2(), zeroAmt))
+
+return false, nil // not including results as they're appended here
+}, query.WithCollectionPaginationPairPrefix[sdk.AccAddress, string](addr))
+ if err != nil {
+ return nil, status.Errorf(codes.InvalidArgument, "paginate: %v", err)
+}
+ result := sdk.NewCoins()
+ spendable := k.SpendableCoins(sdkCtx, addr)
+ for _, c := range balances {
+ result = append(result, sdk.NewCoin(c.Denom, spendable.AmountOf(c.Denom)))
+}
+
+return &types.QuerySpendableBalancesResponse{
+ Balances: result,
+ Pagination: pageRes
+}, nil
+}
+
+// SpendableBalanceByDenom implements a gRPC query handler for retrieving an account's
+// spendable balance for a specific denom.
+func (k BaseKeeper)
+
+SpendableBalanceByDenom(ctx context.Context, req *types.QuerySpendableBalanceByDenomRequest) (*types.QuerySpendableBalanceByDenomResponse, error) {
+ if req == nil {
+ return nil, status.Error(codes.InvalidArgument, "empty request")
+}
+
+addr, err := k.ak.AddressCodec().StringToBytes(req.Address)
+ if err != nil {
+ return nil, status.Errorf(codes.InvalidArgument, "invalid address: %s", err.Error())
+}
+ if err := sdk.ValidateDenom(req.Denom); err != nil {
+ return nil, status.Error(codes.InvalidArgument, err.Error())
+}
+ sdkCtx := sdk.UnwrapSDKContext(ctx)
+ spendable := k.SpendableCoin(sdkCtx, addr, req.Denom)
+
+return &types.QuerySpendableBalanceByDenomResponse{
+ Balance: &spendable
+}, nil
+}
+
+// TotalSupply implements the Query/TotalSupply gRPC method
+func (k BaseKeeper)
+
+TotalSupply(ctx context.Context, req *types.QueryTotalSupplyRequest) (*types.QueryTotalSupplyResponse, error) {
+ sdkCtx := sdk.UnwrapSDKContext(ctx)
+
+totalSupply, pageRes, err := k.GetPaginatedTotalSupply(sdkCtx, req.Pagination)
+ if err != nil {
+ return nil, status.Error(codes.Internal, err.Error())
+}
+
+return &types.QueryTotalSupplyResponse{
+ Supply: totalSupply,
+ Pagination: pageRes
+}, nil
+}
+
+// SupplyOf implements the Query/SupplyOf gRPC method
+func (k BaseKeeper)
+
+SupplyOf(c context.Context, req *types.QuerySupplyOfRequest) (*types.QuerySupplyOfResponse, error) {
+ if req == nil {
+ return nil, status.Error(codes.InvalidArgument, "empty request")
+}
+ if err := sdk.ValidateDenom(req.Denom); err != nil {
+ return nil, status.Error(codes.InvalidArgument, err.Error())
+}
+ ctx := sdk.UnwrapSDKContext(c)
+ supply := k.GetSupply(ctx, req.Denom)
+
+return &types.QuerySupplyOfResponse{
+ Amount: sdk.NewCoin(req.Denom, supply.Amount)
+}, nil
+}
+
+// Params implements the gRPC service handler for querying x/bank parameters.
+func (k BaseKeeper)
+
+Params(ctx context.Context, req *types.QueryParamsRequest) (*types.QueryParamsResponse, error) {
+ if req == nil {
+ return nil, status.Errorf(codes.InvalidArgument, "empty request")
+}
+ sdkCtx := sdk.UnwrapSDKContext(ctx)
+ params := k.GetParams(sdkCtx)
+
+return &types.QueryParamsResponse{
+ Params: params
+}, nil
+}
+
+// DenomsMetadata implements Query/DenomsMetadata gRPC method.
+func (k BaseKeeper)
+
+DenomsMetadata(c context.Context, req *types.QueryDenomsMetadataRequest) (*types.QueryDenomsMetadataResponse, error) {
+ if req == nil {
+ return nil, status.Errorf(codes.InvalidArgument, "empty request")
+}
+ kvStore := runtime.KVStoreAdapter(k.storeService.OpenKVStore(c))
+ store := prefix.NewStore(kvStore, types.DenomMetadataPrefix)
+ metadatas := []types.Metadata{
+}
+
+pageRes, err := query.Paginate(store, req.Pagination, func(_, value []byte)
+
+error {
+ var metadata types.Metadata
+ k.cdc.MustUnmarshal(value, &metadata)
+
+metadatas = append(metadatas, metadata)
+
+return nil
+})
+ if err != nil {
+ return nil, status.Error(codes.Internal, err.Error())
+}
+
+return &types.QueryDenomsMetadataResponse{
+ Metadatas: metadatas,
+ Pagination: pageRes,
+}, nil
+}
+
+// DenomMetadata implements Query/DenomMetadata gRPC method.
+func (k BaseKeeper)
+
+DenomMetadata(c context.Context, req *types.QueryDenomMetadataRequest) (*types.QueryDenomMetadataResponse, error) {
+ if req == nil {
+ return nil, status.Errorf(codes.InvalidArgument, "empty request")
+}
+ if err := sdk.ValidateDenom(req.Denom); err != nil {
+ return nil, status.Error(codes.InvalidArgument, err.Error())
+}
+ ctx := sdk.UnwrapSDKContext(c)
+
+metadata, found := k.GetDenomMetaData(ctx, req.Denom)
+ if !found {
+ return nil, status.Errorf(codes.NotFound, "client metadata for denom %s", req.Denom)
+}
+
+return &types.QueryDenomMetadataResponse{
+ Metadata: metadata,
+}, nil
+}
+
+func (k BaseKeeper)
+
+DenomOwners(
+ goCtx context.Context,
+ req *types.QueryDenomOwnersRequest,
+) (*types.QueryDenomOwnersResponse, error) {
+ if req == nil {
+ return nil, status.Errorf(codes.InvalidArgument, "empty request")
+}
+ if err := sdk.ValidateDenom(req.Denom); err != nil {
+ return nil, status.Error(codes.InvalidArgument, err.Error())
+}
+
+var denomOwners []*types.DenomOwner
+
+ _, pageRes, err := query.CollectionFilteredPaginate(goCtx, k.Balances.Indexes.Denom, req.Pagination,
+ func(key collections.Pair[string, sdk.AccAddress], value collections.NoValue) (include bool, err error) {
+ amt, err := k.Balances.Get(goCtx, collections.Join(key.K2(), req.Denom))
+ if err != nil {
+ return false, err
+}
+
+denomOwners = append(denomOwners, &types.DenomOwner{
+ Address: key.K2().String(),
+ Balance: sdk.NewCoin(req.Denom, amt),
+})
+
+return false, nil
+},
+ query.WithCollectionPaginationPairPrefix[string, sdk.AccAddress](req.Denom),
+ )
+ if err != nil {
+ return nil, err
+}
+
+return &types.QueryDenomOwnersResponse{
+ DenomOwners: denomOwners,
+ Pagination: pageRes
+}, nil
+}
+
+func (k BaseKeeper)
+
+SendEnabled(goCtx context.Context, req *types.QuerySendEnabledRequest) (*types.QuerySendEnabledResponse, error) {
+ if req == nil {
+ return nil, status.Errorf(codes.InvalidArgument, "empty request")
+}
+ ctx := sdk.UnwrapSDKContext(goCtx)
+ resp := &types.QuerySendEnabledResponse{
+}
+ if len(req.Denoms) > 0 {
+ for _, denom := range req.Denoms {
+ if se, ok := k.getSendEnabled(ctx, denom); ok {
+ resp.SendEnabled = append(resp.SendEnabled, types.NewSendEnabled(denom, se))
+}
+
+}
+
+}
+
+else {
+ results, pageResp, err := query.CollectionPaginate[string, bool](ctx, k.BaseViewKeeper.SendEnabled, req.Pagination)
+ if err != nil {
+ return nil, status.Error(codes.Internal, err.Error())
+}
+ for _, r := range results {
+ resp.SendEnabled = append(resp.SendEnabled, &types.SendEnabled{
+ Denom: r.Key,
+ Enabled: r.Value,
+})
+}
+
+resp.Pagination = pageResp
+}
+
+return resp, nil
+}
+```
+
+### Calling queries from the State Machine
+
+The Cosmos SDK v0.47 introduces a new `cosmos.query.v1.module_query_safe` Protobuf annotation which is used to state that a query that is safe to be called from within the state machine, for example:
+
+* a Keeper's query function can be called from another module's Keeper,
+* ADR-033 intermodule query calls,
+* CosmWasm contracts can also directly interact with these queries.
+
+If the `module_query_safe` annotation set to `true`, it means:
+
+* The query is deterministic: given a block height it will return the same response upon multiple calls, and doesn't introduce any state-machine breaking changes across SDK patch versions.
+* Gas consumption never fluctuates across calls and across patch versions.
+
+If you are a module developer and want to use `module_query_safe` annotation for your own query, you have to ensure the following things:
+
+* the query is deterministic and won't introduce state-machine-breaking changes without coordinated upgrades
+* it has its gas tracked, to avoid the attack vector where no gas is accounted for
+ on potentially high-computation queries.
diff --git a/sdk/next/build/building-modules/simulator.mdx b/sdk/next/build/building-modules/simulator.mdx
new file mode 100644
index 000000000..8e9b0fe35
--- /dev/null
+++ b/sdk/next/build/building-modules/simulator.mdx
@@ -0,0 +1,4063 @@
+---
+title: Module Simulation
+---
+
+
+**Prerequisite Readings**
+
+* [Cosmos Blockchain Simulator](/sdk/v0.53/learn/advanced/simulation)
+
+
+
+## Synopsis
+
+This document guides developers on integrating their custom modules with the Cosmos SDK `Simulations`.
+Simulations are useful for testing edge cases in module implementations.
+
+* [Simulation Package](#simulation-package)
+* [Simulation App Module](#simulation-app-module)
+* [SimsX](#simsx)
+ * [Example Implementations](#example-implementations)
+* [Store decoders](#store-decoders)
+* [Randomized genesis](#randomized-genesis)
+* [Random weighted operations](#random-weighted-operations)
+ * [Using Simsx](#using-simsx)
+* [App Simulator manager](#app-simulator-manager)
+* [Running Simulations](#running-simulations)
+
+## Simulation Package
+
+The Cosmos SDK suggests organizing your simulation related code in a `x//simulation` package.
+
+## Simulation App Module
+
+To integrate with the Cosmos SDK `SimulationManager`, app modules must implement the `AppModuleSimulation` interface.
+
+```go expandable
+package module
+
+import (
+
+ "encoding/json"
+ "math/rand"
+ "sort"
+ "time"
+
+ sdkmath "cosmossdk.io/math"
+ "github.com/cosmos/cosmos-sdk/client"
+ "github.com/cosmos/cosmos-sdk/codec"
+ "github.com/cosmos/cosmos-sdk/types/simulation"
+)
+
+// AppModuleSimulation defines the standard functions that every module should expose
+// for the SDK blockchain simulator
+type AppModuleSimulation interface {
+ // randomized genesis states
+ GenerateGenesisState(input *SimulationState)
+
+ // register a func to decode the each module's defined types from their corresponding store key
+ RegisterStoreDecoder(simulation.StoreDecoderRegistry)
+
+ // simulation operations (i.e msgs)
+
+with their respective weight
+ WeightedOperations(simState SimulationState) []simulation.WeightedOperation
+}
+
+// HasProposalMsgs defines the messages that can be used to simulate governance (v1)
+
+proposals
+type HasProposalMsgs interface {
+ // msg functions used to simulate governance proposals
+ ProposalMsgs(simState SimulationState) []simulation.WeightedProposalMsg
+}
+
+// HasProposalContents defines the contents that can be used to simulate legacy governance (v1beta1)
+
+proposals
+type HasProposalContents interface {
+ // content functions used to simulate governance proposals
+ ProposalContents(simState SimulationState) []simulation.WeightedProposalContent //nolint:staticcheck // legacy v1beta1 governance
+}
+
+// SimulationManager defines a simulation manager that provides the high level utility
+// for managing and executing simulation functionalities for a group of modules
+type SimulationManager struct {
+ Modules []AppModuleSimulation // array of app modules; we use an array for deterministic simulation tests
+ StoreDecoders simulation.StoreDecoderRegistry // functions to decode the key-value pairs from each module's store
+}
+
+// NewSimulationManager creates a new SimulationManager object
+//
+// CONTRACT: All the modules provided must be also registered on the module Manager
+func NewSimulationManager(modules ...AppModuleSimulation) *SimulationManager {
+ return &SimulationManager{
+ Modules: modules,
+ StoreDecoders: make(simulation.StoreDecoderRegistry),
+}
+}
+
+// NewSimulationManagerFromAppModules creates a new SimulationManager object.
+//
+// First it sets any SimulationModule provided by overrideModules, and ignores any AppModule
+// with the same moduleName.
+// Then it attempts to cast every provided AppModule into an AppModuleSimulation.
+// If the cast succeeds, its included, otherwise it is excluded.
+func NewSimulationManagerFromAppModules(modules map[string]any, overrideModules map[string]AppModuleSimulation) *SimulationManager {
+ simModules := []AppModuleSimulation{
+}
+ appModuleNamesSorted := make([]string, 0, len(modules))
+ for moduleName := range modules {
+ appModuleNamesSorted = append(appModuleNamesSorted, moduleName)
+}
+
+sort.Strings(appModuleNamesSorted)
+ for _, moduleName := range appModuleNamesSorted {
+ // for every module, see if we override it. If so, use override.
+ // Else, if we can cast the app module into a simulation module add it.
+ // otherwise no simulation module.
+ if simModule, ok := overrideModules[moduleName]; ok {
+ simModules = append(simModules, simModule)
+}
+
+else {
+ appModule := modules[moduleName]
+ if simModule, ok := appModule.(AppModuleSimulation); ok {
+ simModules = append(simModules, simModule)
+}
+ // cannot cast, so we continue
+}
+
+}
+
+return NewSimulationManager(simModules...)
+}
+
+// Deprecated: Use GetProposalMsgs instead.
+// GetProposalContents returns each module's proposal content generator function
+// with their default operation weight and key.
+func (sm *SimulationManager)
+
+GetProposalContents(simState SimulationState) []simulation.WeightedProposalContent {
+ wContents := make([]simulation.WeightedProposalContent, 0, len(sm.Modules))
+ for _, module := range sm.Modules {
+ if module, ok := module.(HasProposalContents); ok {
+ wContents = append(wContents, module.ProposalContents(simState)...)
+}
+
+}
+
+return wContents
+}
+
+// GetProposalMsgs returns each module's proposal msg generator function
+// with their default operation weight and key.
+func (sm *SimulationManager)
+
+GetProposalMsgs(simState SimulationState) []simulation.WeightedProposalMsg {
+ wContents := make([]simulation.WeightedProposalMsg, 0, len(sm.Modules))
+ for _, module := range sm.Modules {
+ if module, ok := module.(HasProposalMsgs); ok {
+ wContents = append(wContents, module.ProposalMsgs(simState)...)
+}
+
+}
+
+return wContents
+}
+
+// RegisterStoreDecoders registers each of the modules' store decoders into a map
+func (sm *SimulationManager)
+
+RegisterStoreDecoders() {
+ for _, module := range sm.Modules {
+ module.RegisterStoreDecoder(sm.StoreDecoders)
+}
+}
+
+// GenerateGenesisStates generates a randomized GenesisState for each of the
+// registered modules
+func (sm *SimulationManager)
+
+GenerateGenesisStates(simState *SimulationState) {
+ for _, module := range sm.Modules {
+ module.GenerateGenesisState(simState)
+}
+}
+
+// WeightedOperations returns all the modules' weighted operations of an application
+func (sm *SimulationManager)
+
+WeightedOperations(simState SimulationState) []simulation.WeightedOperation {
+ wOps := make([]simulation.WeightedOperation, 0, len(sm.Modules))
+ for _, module := range sm.Modules {
+ wOps = append(wOps, module.WeightedOperations(simState)...)
+}
+
+return wOps
+}
+
+// SimulationState is the input parameters used on each of the module's randomized
+// GenesisState generator function
+type SimulationState struct {
+ AppParams simulation.AppParams
+ Cdc codec.JSONCodec // application codec
+ TxConfig client.TxConfig // Shared TxConfig; this is expensive to create and stateless, so create it once up front.
+ Rand *rand.Rand // random number
+ GenState map[string]json.RawMessage // genesis state
+ Accounts []simulation.Account // simulation accounts
+ InitialStake sdkmath.Int // initial coins per account
+ NumBonded int64 // number of initially bonded accounts
+ BondDenom string // denom to be used as default
+ GenTimestamp time.Time // genesis timestamp
+ UnbondTime time.Duration // staking unbond time stored to use it as the slashing maximum evidence duration
+ LegacyParamChange []simulation.LegacyParamChange // simulated parameter changes from modules
+ //nolint:staticcheck // legacy used for testing
+ LegacyProposalContents []simulation.WeightedProposalContent // proposal content generator functions with their default weight and app sim key
+ ProposalMsgs []simulation.WeightedProposalMsg // proposal msg generator functions with their default weight and app sim key
+}
+```
+
+See an example implementation of these methods from `x/distribution` [here](https://github.com/cosmos/cosmos-sdk/blob/b55b9e14fb792cc8075effb373be9d26327fddea/x/distribution/module.go#L170-L194).
+
+## SimsX
+
+Cosmos SDK v0.53.0 introduced a new package, `simsx`, providing improved DevX for writing simulation code.
+
+It exposes the following extension interfaces that modules may implement to integrate with the new `simsx` runner.
+
+```go expandable
+package simsx
+
+import (
+
+ "encoding/json"
+ "fmt"
+ "io"
+ "math"
+ "os"
+ "path/filepath"
+ "strings"
+ "testing"
+
+ dbm "github.com/cosmos/cosmos-db"
+ "github.com/stretchr/testify/require"
+ "cosmossdk.io/log"
+ "github.com/cosmos/cosmos-sdk/baseapp"
+ "github.com/cosmos/cosmos-sdk/client"
+ "github.com/cosmos/cosmos-sdk/client/flags"
+ "github.com/cosmos/cosmos-sdk/codec"
+ "github.com/cosmos/cosmos-sdk/runtime"
+ servertypes "github.com/cosmos/cosmos-sdk/server/types"
+ simtestutil "github.com/cosmos/cosmos-sdk/testutil/sims"
+ sdk "github.com/cosmos/cosmos-sdk/types"
+ "github.com/cosmos/cosmos-sdk/types/module"
+ simtypes "github.com/cosmos/cosmos-sdk/types/simulation"
+ "github.com/cosmos/cosmos-sdk/x/simulation"
+ "github.com/cosmos/cosmos-sdk/x/simulation/client/cli"
+)
+
+const SimAppChainID = "simulation-app"
+
+// this list of seeds was imported from the original simulation runner: https://github.com/cosmos/tools/blob/v1.0.0/cmd/runsim/main.go#L32
+var defaultSeeds = []int64{
+ 1, 2, 4, 7,
+ 32, 123, 124, 582, 1893, 2989,
+ 3012, 4728, 37827, 981928, 87821, 891823782,
+ 989182, 89182391, 11, 22, 44, 77, 99, 2020,
+ 3232, 123123, 124124, 582582, 18931893,
+ 29892989, 30123012, 47284728, 7601778, 8090485,
+ 977367484, 491163361, 424254581, 673398983,
+}
+
+// SimStateFactory is a factory type that provides a convenient way to create a simulation state for testing.
+// It contains the following fields:
+// - Codec: a codec used for serializing other objects
+// - AppStateFn: a function that returns the app state JSON bytes and the genesis accounts
+// - BlockedAddr: a map of blocked addresses
+// - AccountSource: an interface for retrieving accounts
+// - BalanceSource: an interface for retrieving balance-related information
+type SimStateFactory struct {
+ Codec codec.Codec
+ AppStateFn simtypes.AppStateFn
+ BlockedAddr map[string]bool
+ AccountSource AccountSourceX
+ BalanceSource BalanceSource
+}
+
+// SimulationApp abstract app that is used by sims
+type SimulationApp interface {
+ runtime.AppI
+ SetNotSigverifyTx()
+
+GetBaseApp() *baseapp.BaseApp
+ TxConfig()
+
+client.TxConfig
+ Close()
+
+error
+}
+
+// Run is a helper function that runs a simulation test with the given parameters.
+// It calls the RunWithSeeds function with the default seeds and parameters.
+//
+// This is the entrypoint to run simulation tests that used to run with the runsim binary.
+func Run[T SimulationApp](
+ t *testing.T,
+ appFactory func(
+ logger log.Logger,
+ db dbm.DB,
+ traceStore io.Writer,
+ loadLatest bool,
+ appOpts servertypes.AppOptions,
+ baseAppOptions ...func(*baseapp.BaseApp),
+ )
+
+T,
+ setupStateFactory func(app T)
+
+SimStateFactory,
+ postRunActions ...func(t testing.TB, app TestInstance[T], accs []simtypes.Account),
+) {
+ t.Helper()
+
+RunWithSeeds(t, appFactory, setupStateFactory, defaultSeeds, nil, postRunActions...)
+}
+
+// RunWithSeeds is a helper function that runs a simulation test with the given parameters.
+// It iterates over the provided seeds and runs the simulation test for each seed in parallel.
+//
+// It sets up the environment, creates an instance of the simulation app,
+// calls the simulation.SimulateFromSeed function to run the simulation, and performs post-run actions for each seed.
+// The execution is deterministic and can be used for fuzz tests as well.
+//
+// The system under test is isolated for each run but unlike the old runsim command, there is no Process separation.
+// This means, global caches may be reused for example. This implementation build upon the vanilla Go stdlib test framework.
+func RunWithSeeds[T SimulationApp](
+ t *testing.T,
+ appFactory func(
+ logger log.Logger,
+ db dbm.DB,
+ traceStore io.Writer,
+ loadLatest bool,
+ appOpts servertypes.AppOptions,
+ baseAppOptions ...func(*baseapp.BaseApp),
+ )
+
+T,
+ setupStateFactory func(app T)
+
+SimStateFactory,
+ seeds []int64,
+ fuzzSeed []byte,
+ postRunActions ...func(t testing.TB, app TestInstance[T], accs []simtypes.Account),
+) {
+ t.Helper()
+
+RunWithSeedsAndRandAcc(t, appFactory, setupStateFactory, seeds, fuzzSeed, simtypes.RandomAccounts, postRunActions...)
+}
+
+// RunWithSeedsAndRandAcc calls RunWithSeeds with randAccFn
+func RunWithSeedsAndRandAcc[T SimulationApp](
+ t *testing.T,
+ appFactory func(
+ logger log.Logger,
+ db dbm.DB,
+ traceStore io.Writer,
+ loadLatest bool,
+ appOpts servertypes.AppOptions,
+ baseAppOptions ...func(*baseapp.BaseApp),
+ )
+
+T,
+ setupStateFactory func(app T)
+
+SimStateFactory,
+ seeds []int64,
+ fuzzSeed []byte,
+ randAccFn simtypes.RandomAccountFn,
+ postRunActions ...func(t testing.TB, app TestInstance[T], accs []simtypes.Account),
+) {
+ t.Helper()
+ if deprecatedParams := cli.GetDeprecatedFlagUsed(); len(deprecatedParams) != 0 {
+ fmt.Printf("Warning: Deprecated flag are used: %s", strings.Join(deprecatedParams, ","))
+}
+ cfg := cli.NewConfigFromFlags()
+
+cfg.ChainID = SimAppChainID
+ for i := range seeds {
+ seed := seeds[i]
+ t.Run(fmt.Sprintf("seed: %d", seed), func(t *testing.T) {
+ t.Parallel()
+
+RunWithSeed(t, cfg, appFactory, setupStateFactory, seed, fuzzSeed, postRunActions...)
+})
+}
+}
+
+// RunWithSeed is a helper function that runs a simulation test with the given parameters.
+// It iterates over the provided seeds and runs the simulation test for each seed in parallel.
+//
+// It sets up the environment, creates an instance of the simulation app,
+// calls the simulation.SimulateFromSeed function to run the simulation, and performs post-run actions for the seed.
+// The execution is deterministic and can be used for fuzz tests as well.
+func RunWithSeed[T SimulationApp](
+ tb testing.TB,
+ cfg simtypes.Config,
+ appFactory func(logger log.Logger, db dbm.DB, traceStore io.Writer, loadLatest bool, appOpts servertypes.AppOptions, baseAppOptions ...func(*baseapp.BaseApp))
+
+T,
+ setupStateFactory func(app T)
+
+SimStateFactory,
+ seed int64,
+ fuzzSeed []byte,
+ postRunActions ...func(t testing.TB, app TestInstance[T], accs []simtypes.Account),
+) {
+ tb.Helper()
+
+RunWithSeedAndRandAcc(tb, cfg, appFactory, setupStateFactory, seed, fuzzSeed, simtypes.RandomAccounts, postRunActions...)
+}
+
+// RunWithSeedAndRandAcc calls RunWithSeed with randAccFn
+func RunWithSeedAndRandAcc[T SimulationApp](
+ tb testing.TB,
+ cfg simtypes.Config,
+ appFactory func(logger log.Logger, db dbm.DB, traceStore io.Writer, loadLatest bool, appOpts servertypes.AppOptions, baseAppOptions ...func(*baseapp.BaseApp))
+
+T,
+ setupStateFactory func(app T)
+
+SimStateFactory,
+ seed int64,
+ fuzzSeed []byte,
+ randAccFn simtypes.RandomAccountFn,
+ postRunActions ...func(t testing.TB, app TestInstance[T], accs []simtypes.Account),
+) {
+ tb.Helper()
+ // setup environment
+ tCfg := cfg.With(tb, seed, fuzzSeed)
+ testInstance := NewSimulationAppInstance(tb, tCfg, appFactory)
+
+var runLogger log.Logger
+ if cli.FlagVerboseValue {
+ runLogger = log.NewTestLogger(tb)
+}
+
+else {
+ runLogger = log.NewTestLoggerInfo(tb)
+}
+
+runLogger = runLogger.With("seed", tCfg.Seed)
+ app := testInstance.App
+ stateFactory := setupStateFactory(app)
+
+ops, reporter := prepareWeightedOps(app.SimulationManager(), stateFactory, tCfg, testInstance.App.TxConfig(), runLogger)
+
+simParams, accs, err := simulation.SimulateFromSeedX(
+ tb,
+ runLogger,
+ WriteToDebugLog(runLogger),
+ app.GetBaseApp(),
+ stateFactory.AppStateFn,
+ randAccFn,
+ ops,
+ stateFactory.BlockedAddr,
+ tCfg,
+ stateFactory.Codec,
+ testInstance.ExecLogWriter,
+ )
+
+require.NoError(tb, err)
+
+err = simtestutil.CheckExportSimulation(app, tCfg, simParams)
+
+require.NoError(tb, err)
+ if tCfg.Commit {
+ simtestutil.PrintStats(testInstance.DB)
+}
+ // not using tb.Log to always print the summary
+ fmt.Printf("+++ DONE (seed: %d): \n%s\n", seed, reporter.Summary().String())
+ for _, step := range postRunActions {
+ step(tb, testInstance, accs)
+}
+
+require.NoError(tb, app.Close())
+}
+
+type (
+ HasWeightedOperationsX interface {
+ WeightedOperationsX(weight WeightSource, reg Registry)
+}
+
+HasWeightedOperationsXWithProposals interface {
+ WeightedOperationsX(weights WeightSource, reg Registry, proposals WeightedProposalMsgIter,
+ legacyProposals []simtypes.WeightedProposalContent) //nolint: staticcheck // used for legacy proposal types
+}
+
+HasProposalMsgsX interface {
+ ProposalMsgsX(weights WeightSource, reg Registry)
+}
+)
+
+type (
+ HasLegacyWeightedOperations interface {
+ // WeightedOperations simulation operations (i.e msgs)
+
+with their respective weight
+ WeightedOperations(simState module.SimulationState) []simtypes.WeightedOperation
+}
+ // HasLegacyProposalMsgs defines the messages that can be used to simulate governance (v1)
+
+proposals
+ // Deprecated replaced by HasProposalMsgsX
+ HasLegacyProposalMsgs interface {
+ // ProposalMsgs msg fu nctions used to simulate governance proposals
+ ProposalMsgs(simState module.SimulationState) []simtypes.WeightedProposalMsg
+}
+
+ // HasLegacyProposalContents defines the contents that can be used to simulate legacy governance (v1beta1)
+
+proposals
+ // Deprecated replaced by HasProposalMsgsX
+ HasLegacyProposalContents interface {
+ // ProposalContents content functions used to simulate governance proposals
+ ProposalContents(simState module.SimulationState) []simtypes.WeightedProposalContent //nolint:staticcheck // legacy v1beta1 governance
+}
+)
+
+// TestInstance is a generic type that represents an instance of a SimulationApp used for testing simulations.
+// It contains the following fields:
+// - App: The instance of the SimulationApp under test.
+// - DB: The LevelDB database for the simulation app.
+// - WorkDir: The temporary working directory for the simulation app.
+// - Cfg: The configuration flags for the simulator.
+// - AppLogger: The logger used for logging in the app during the simulation, with seed value attached.
+// - ExecLogWriter: Captures block and operation data coming from the simulation
+type TestInstance[T SimulationApp] struct {
+ App T
+ DB dbm.DB
+ WorkDir string
+ Cfg simtypes.Config
+ AppLogger log.Logger
+ ExecLogWriter simulation.LogWriter
+}
+
+// included to avoid cyclic dependency in testutils/sims
+func prepareWeightedOps(
+ sm *module.SimulationManager,
+ stateFact SimStateFactory,
+ config simtypes.Config,
+ txConfig client.TxConfig,
+ logger log.Logger,
+) (simulation.WeightedOperations, *BasicSimulationReporter) {
+ cdc := stateFact.Codec
+ simState := module.SimulationState{
+ AppParams: make(simtypes.AppParams),
+ Cdc: cdc,
+ TxConfig: txConfig,
+ BondDenom: sdk.DefaultBondDenom,
+}
+ if config.ParamsFile != "" {
+ bz, err := os.ReadFile(config.ParamsFile)
+ if err != nil {
+ panic(err)
+}
+
+err = json.Unmarshal(bz, &simState.AppParams)
+ if err != nil {
+ panic(err)
+}
+
+}
+ weights := ParamWeightSource(simState.AppParams)
+ reporter := NewBasicSimulationReporter()
+ pReg := make(UniqueTypeRegistry)
+ wContent := make([]simtypes.WeightedProposalContent, 0) //nolint:staticcheck // required for legacy type
+ legacyPReg := NewWeightedFactoryMethods()
+ // add gov proposals types
+ for _, m := range sm.Modules {
+ switch xm := m.(type) {
+ case HasProposalMsgsX:
+ xm.ProposalMsgsX(weights, pReg)
+ case HasLegacyProposalMsgs:
+ for _, p := range xm.ProposalMsgs(simState) {
+ weight := weights.Get(p.AppParamsKey(), safeUint(p.DefaultWeight()))
+
+legacyPReg.Add(weight, legacyToMsgFactoryAdapter(p.MsgSimulatorFn()))
+}
+ case HasLegacyProposalContents:
+ wContent = append(wContent, xm.ProposalContents(simState)...)
+}
+
+}
+ oReg := NewSimsMsgRegistryAdapter(
+ reporter,
+ stateFact.AccountSource,
+ stateFact.BalanceSource,
+ txConfig,
+ logger,
+ )
+ wOps := make([]simtypes.WeightedOperation, 0, len(sm.Modules))
+ for _, m := range sm.Modules {
+ // add operations
+ switch xm := m.(type) {
+ case HasWeightedOperationsX:
+ xm.WeightedOperationsX(weights, oReg)
+ case HasWeightedOperationsXWithProposals:
+ xm.WeightedOperationsX(weights, oReg, AppendIterators(legacyPReg.Iterator(), pReg.Iterator()), wContent)
+ case HasLegacyWeightedOperations:
+ wOps = append(wOps, xm.WeightedOperations(simState)...)
+}
+
+}
+
+return append(wOps, Collect(oReg.items, func(a weightedOperation)
+
+simtypes.WeightedOperation {
+ return a
+})...), reporter
+}
+
+func safeUint(p int)
+
+uint32 {
+ if p < 0 || p > math.MaxUint32 {
+ panic(fmt.Sprintf("can not cast to uint32: %d", p))
+}
+
+return uint32(p)
+}
+
+// NewSimulationAppInstance initializes and returns a TestInstance of a SimulationApp.
+// The function takes a testing.T instance, a simtypes.Config instance, and an appFactory function as parameters.
+// It creates a temporary working directory and a LevelDB database for the simulation app.
+// The function then initializes a logger based on the verbosity flag and sets the logger's seed to the test configuration's seed.
+// The database is closed and cleaned up on test completion.
+func NewSimulationAppInstance[T SimulationApp](
+ tb testing.TB,
+ tCfg simtypes.Config,
+ appFactory func(logger log.Logger, db dbm.DB, traceStore io.Writer, loadLatest bool, appOpts servertypes.AppOptions, baseAppOptions ...func(*baseapp.BaseApp))
+
+T,
+)
+
+TestInstance[T] {
+ tb.Helper()
+ workDir := tb.TempDir()
+
+require.NoError(tb, os.Mkdir(filepath.Join(workDir, "data"), 0o750))
+ dbDir := filepath.Join(workDir, "leveldb-app-sim")
+
+var logger log.Logger
+ if cli.FlagVerboseValue {
+ logger = log.NewTestLogger(tb)
+}
+
+else {
+ logger = log.NewTestLoggerError(tb)
+}
+
+logger = logger.With("seed", tCfg.Seed)
+
+db, err := dbm.NewDB("Simulation", dbm.BackendType(tCfg.DBBackend), dbDir)
+
+require.NoError(tb, err)
+
+tb.Cleanup(func() {
+ _ = db.Close() // ensure db is closed
+})
+ appOptions := make(simtestutil.AppOptionsMap)
+
+appOptions[flags.FlagHome] = workDir
+ opts := []func(*baseapp.BaseApp) {
+ baseapp.SetChainID(tCfg.ChainID)
+}
+ if tCfg.FauxMerkle {
+ opts = append(opts, FauxMerkleModeOpt)
+}
+ app := appFactory(logger, db, nil, true, appOptions, opts...)
+ if !cli.FlagSigverifyTxValue {
+ app.SetNotSigverifyTx()
+}
+
+return TestInstance[T]{
+ App: app,
+ DB: db,
+ WorkDir: workDir,
+ Cfg: tCfg,
+ AppLogger: logger,
+ ExecLogWriter: &simulation.StandardLogWriter{
+ Seed: tCfg.Seed
+},
+}
+}
+
+var _ io.Writer = writerFn(nil)
+
+type writerFn func(p []byte) (n int, err error)
+
+func (w writerFn)
+
+Write(p []byte) (n int, err error) {
+ return w(p)
+}
+
+// WriteToDebugLog is an adapter to io.Writer interface
+func WriteToDebugLog(logger log.Logger)
+
+io.Writer {
+ return writerFn(func(p []byte) (n int, err error) {
+ logger.Debug(string(p))
+
+return len(p), nil
+})
+}
+
+// FauxMerkleModeOpt returns a BaseApp option to use a dbStoreAdapter instead of
+// an IAVLStore for faster simulation speed.
+func FauxMerkleModeOpt(bapp *baseapp.BaseApp) {
+ bapp.SetFauxMerkleMode()
+}
+```
+
+These methods allow constructing randomized messages and/or proposal messages.
+
+
+Note that modules should **not** implement both `HasWeightedOperationsX` and `HasWeightedOperationsXWithProposals`.
+See the runner code [here](https://github.com/cosmos/cosmos-sdk/blob/main/testutil/simsx/runner.go#L330-L339) for details
+
+If the module does **not** have message handlers or governance proposal handlers, these interface methods do **not** need to be implemented.
+
+
+### Example Implementations
+
+* `HasWeightedOperationsXWithProposals`: [x/gov](https://github.com/cosmos/cosmos-sdk/blob/main/x/gov/module.go#L242-L261)
+* `HasWeightedOperationsX`: [x/bank](https://github.com/cosmos/cosmos-sdk/blob/main/x/bank/module.go#L199-L203)
+* `HasProposalMsgsX`: [x/bank](https://github.com/cosmos/cosmos-sdk/blob/main/x/bank/module.go#L194-L197)
+
+## Store decoders
+
+Registering the store decoders is required for the `AppImportExport` simulation. This allows
+for the key-value pairs from the stores to be decoded to their corresponding types.
+In particular, it matches the key to a concrete type and then unmarshalls the value from the `KVPair` to the type provided.
+
+Modules using [collections](https://github.com/cosmos/cosmos-sdk/blob/main/collections/README.md) can use the `NewStoreDecoderFuncFromCollectionsSchema` function that builds the decoder for you:
+
+```go expandable
+package bank
+
+import (
+
+ "context"
+ "encoding/json"
+ "fmt"
+ "maps"
+ "slices"
+ "sort"
+
+ gwruntime "github.com/grpc-ecosystem/grpc-gateway/runtime"
+ "github.com/spf13/cobra"
+
+ modulev1 "cosmossdk.io/api/cosmos/bank/module/v1"
+ "cosmossdk.io/core/address"
+ "cosmossdk.io/core/appmodule"
+ corestore "cosmossdk.io/core/store"
+ "cosmossdk.io/depinject"
+ "cosmossdk.io/log"
+ "github.com/cosmos/cosmos-sdk/client"
+ "github.com/cosmos/cosmos-sdk/codec"
+ codectypes "github.com/cosmos/cosmos-sdk/codec/types"
+ "github.com/cosmos/cosmos-sdk/testutil/simsx"
+ sdk "github.com/cosmos/cosmos-sdk/types"
+ "github.com/cosmos/cosmos-sdk/types/module"
+ simtypes "github.com/cosmos/cosmos-sdk/types/simulation"
+ authtypes "github.com/cosmos/cosmos-sdk/x/auth/types"
+ "github.com/cosmos/cosmos-sdk/x/bank/client/cli"
+ "github.com/cosmos/cosmos-sdk/x/bank/exported"
+ "github.com/cosmos/cosmos-sdk/x/bank/keeper"
+ v1bank "github.com/cosmos/cosmos-sdk/x/bank/migrations/v1"
+ "github.com/cosmos/cosmos-sdk/x/bank/simulation"
+ "github.com/cosmos/cosmos-sdk/x/bank/types"
+ govtypes "github.com/cosmos/cosmos-sdk/x/gov/types"
+)
+
+// ConsensusVersion defines the current x/bank module consensus version.
+const ConsensusVersion = 4
+
+var (
+ _ module.AppModuleBasic = AppModule{
+}
+ _ module.AppModuleSimulation = AppModule{
+}
+ _ module.HasGenesis = AppModule{
+}
+ _ module.HasServices = AppModule{
+}
+
+ _ appmodule.AppModule = AppModule{
+}
+)
+
+// AppModuleBasic defines the basic application module used by the bank module.
+type AppModuleBasic struct {
+ cdc codec.Codec
+ ac address.Codec
+}
+
+// Name returns the bank module's name.
+func (AppModuleBasic)
+
+Name()
+
+string {
+ return types.ModuleName
+}
+
+// RegisterLegacyAminoCodec registers the bank module's types on the LegacyAmino codec.
+func (AppModuleBasic)
+
+RegisterLegacyAminoCodec(cdc *codec.LegacyAmino) {
+ types.RegisterLegacyAminoCodec(cdc)
+}
+
+// DefaultGenesis returns default genesis state as raw bytes for the bank
+// module.
+func (AppModuleBasic)
+
+DefaultGenesis(cdc codec.JSONCodec)
+
+json.RawMessage {
+ return cdc.MustMarshalJSON(types.DefaultGenesisState())
+}
+
+// ValidateGenesis performs genesis state validation for the bank module.
+func (AppModuleBasic)
+
+ValidateGenesis(cdc codec.JSONCodec, _ client.TxEncodingConfig, bz json.RawMessage)
+
+error {
+ var data types.GenesisState
+ if err := cdc.UnmarshalJSON(bz, &data); err != nil {
+ return fmt.Errorf("failed to unmarshal %s genesis state: %w", types.ModuleName, err)
+}
+
+return data.Validate()
+}
+
+// RegisterGRPCGatewayRoutes registers the gRPC Gateway routes for the bank module.
+func (AppModuleBasic)
+
+RegisterGRPCGatewayRoutes(clientCtx client.Context, mux *gwruntime.ServeMux) {
+ if err := types.RegisterQueryHandlerClient(context.Background(), mux, types.NewQueryClient(clientCtx)); err != nil {
+ panic(err)
+}
+}
+
+// GetTxCmd returns the root tx command for the bank module.
+func (ab AppModuleBasic)
+
+GetTxCmd() *cobra.Command {
+ return cli.NewTxCmd(ab.ac)
+}
+
+// RegisterInterfaces registers interfaces and implementations of the bank module.
+func (AppModuleBasic)
+
+RegisterInterfaces(registry codectypes.InterfaceRegistry) {
+ types.RegisterInterfaces(registry)
+
+ // Register legacy interfaces for migration scripts.
+ v1bank.RegisterInterfaces(registry)
+}
+
+// AppModule implements an application module for the bank module.
+type AppModule struct {
+ AppModuleBasic
+
+ keeper keeper.Keeper
+ accountKeeper types.AccountKeeper
+
+ // legacySubspace is used solely for migration of x/params managed parameters
+ legacySubspace exported.Subspace
+}
+
+// IsOnePerModuleType implements the depinject.OnePerModuleType interface.
+func (am AppModule)
+
+IsOnePerModuleType() {
+}
+
+// IsAppModule implements the appmodule.AppModule interface.
+func (am AppModule)
+
+IsAppModule() {
+}
+
+// RegisterServices registers module services.
+func (am AppModule)
+
+RegisterServices(cfg module.Configurator) {
+ types.RegisterMsgServer(cfg.MsgServer(), keeper.NewMsgServerImpl(am.keeper))
+
+types.RegisterQueryServer(cfg.QueryServer(), am.keeper)
+ m := keeper.NewMigrator(am.keeper.(keeper.BaseKeeper), am.legacySubspace)
+ if err := cfg.RegisterMigration(types.ModuleName, 1, m.Migrate1to2); err != nil {
+ panic(fmt.Sprintf("failed to migrate x/bank from version 1 to 2: %v", err))
+}
+ if err := cfg.RegisterMigration(types.ModuleName, 2, m.Migrate2to3); err != nil {
+ panic(fmt.Sprintf("failed to migrate x/bank from version 2 to 3: %v", err))
+}
+ if err := cfg.RegisterMigration(types.ModuleName, 3, m.Migrate3to4); err != nil {
+ panic(fmt.Sprintf("failed to migrate x/bank from version 3 to 4: %v", err))
+}
+}
+
+// NewAppModule creates a new AppModule object
+func NewAppModule(cdc codec.Codec, keeper keeper.Keeper, accountKeeper types.AccountKeeper, ss exported.Subspace)
+
+AppModule {
+ return AppModule{
+ AppModuleBasic: AppModuleBasic{
+ cdc: cdc, ac: accountKeeper.AddressCodec()
+},
+ keeper: keeper,
+ accountKeeper: accountKeeper,
+ legacySubspace: ss,
+}
+}
+
+// QuerierRoute returns the bank module's querier route name.
+func (AppModule)
+
+QuerierRoute()
+
+string {
+ return types.RouterKey
+}
+
+// InitGenesis performs genesis initialization for the bank module. It returns
+// no validator updates.
+func (am AppModule)
+
+InitGenesis(ctx sdk.Context, cdc codec.JSONCodec, data json.RawMessage) {
+ var genesisState types.GenesisState
+ cdc.MustUnmarshalJSON(data, &genesisState)
+
+am.keeper.InitGenesis(ctx, &genesisState)
+}
+
+// ExportGenesis returns the exported genesis state as raw bytes for the bank
+// module.
+func (am AppModule)
+
+ExportGenesis(ctx sdk.Context, cdc codec.JSONCodec)
+
+json.RawMessage {
+ gs := am.keeper.ExportGenesis(ctx)
+
+return cdc.MustMarshalJSON(gs)
+}
+
+// ConsensusVersion implements AppModule/ConsensusVersion.
+func (AppModule)
+
+ConsensusVersion()
+
+uint64 {
+ return ConsensusVersion
+}
+
+// AppModuleSimulation functions
+
+// GenerateGenesisState creates a randomized GenState of the bank module.
+func (AppModule)
+
+GenerateGenesisState(simState *module.SimulationState) {
+ simulation.RandomizedGenState(simState)
+}
+
+// ProposalMsgs returns msgs used for governance proposals for simulations.
+// migrate to ProposalMsgsX. This method is ignored when ProposalMsgsX exists and will be removed in the future.
+func (AppModule)
+
+ProposalMsgs(simState module.SimulationState) []simtypes.WeightedProposalMsg {
+ return simulation.ProposalMsgs()
+}
+
+// RegisterStoreDecoder registers a decoder for supply module's types
+func (am AppModule)
+
+RegisterStoreDecoder(sdr simtypes.StoreDecoderRegistry) {
+ sdr[types.StoreKey] = simtypes.NewStoreDecoderFuncFromCollectionsSchema(am.keeper.(keeper.BaseKeeper).Schema)
+}
+
+// WeightedOperations returns the all the bank module operations with their respective weights.
+// migrate to WeightedOperationsX. This method is ignored when WeightedOperationsX exists and will be removed in the future
+func (am AppModule)
+
+WeightedOperations(simState module.SimulationState) []simtypes.WeightedOperation {
+ return simulation.WeightedOperations(
+ simState.AppParams, simState.Cdc, simState.TxConfig, am.accountKeeper, am.keeper,
+ )
+}
+
+// ProposalMsgsX registers governance proposal messages in the simulation registry.
+func (AppModule)
+
+ProposalMsgsX(weights simsx.WeightSource, reg simsx.Registry) {
+ reg.Add(weights.Get("msg_update_params", 100), simulation.MsgUpdateParamsFactory())
+}
+
+// WeightedOperationsX registers weighted bank module operations for simulation.
+func (am AppModule)
+
+WeightedOperationsX(weights simsx.WeightSource, reg simsx.Registry) {
+ reg.Add(weights.Get("msg_send", 100), simulation.MsgSendFactory())
+
+reg.Add(weights.Get("msg_multisend", 10), simulation.MsgMultiSendFactory())
+}
+
+// App Wiring Setup
+
+func init() {
+ appmodule.Register(
+ &modulev1.Module{
+},
+ appmodule.Provide(ProvideModule),
+ appmodule.Invoke(InvokeSetSendRestrictions),
+ )
+}
+
+type ModuleInputs struct {
+ depinject.In
+
+ Config *modulev1.Module
+ Cdc codec.Codec
+ StoreService corestore.KVStoreService
+ Logger log.Logger
+
+ AccountKeeper types.AccountKeeper
+
+ // LegacySubspace is used solely for migration of x/params managed parameters
+ LegacySubspace exported.Subspace `optional:"true"`
+}
+
+type ModuleOutputs struct {
+ depinject.Out
+
+ BankKeeper keeper.BaseKeeper
+ Module appmodule.AppModule
+}
+
+func ProvideModule(in ModuleInputs)
+
+ModuleOutputs {
+ // Configure blocked module accounts.
+ //
+ // Default behavior for blockedAddresses is to regard any module mentioned in
+ // AccountKeeper's module account permissions as blocked.
+ blockedAddresses := make(map[string]bool)
+ if len(in.Config.BlockedModuleAccountsOverride) > 0 {
+ for _, moduleName := range in.Config.BlockedModuleAccountsOverride {
+ blockedAddresses[authtypes.NewModuleAddress(moduleName).String()] = true
+}
+
+}
+
+else {
+ for _, permission := range in.AccountKeeper.GetModulePermissions() {
+ blockedAddresses[permission.GetAddress().String()] = true
+}
+
+}
+
+ // default to governance authority if not provided
+ authority := authtypes.NewModuleAddress(govtypes.ModuleName)
+ if in.Config.Authority != "" {
+ authority = authtypes.NewModuleAddressOrBech32Address(in.Config.Authority)
+}
+ bankKeeper := keeper.NewBaseKeeper(
+ in.Cdc,
+ in.StoreService,
+ in.AccountKeeper,
+ blockedAddresses,
+ authority.String(),
+ in.Logger,
+ )
+ m := NewAppModule(in.Cdc, bankKeeper, in.AccountKeeper, in.LegacySubspace)
+
+return ModuleOutputs{
+ BankKeeper: bankKeeper,
+ Module: m
+}
+}
+
+func InvokeSetSendRestrictions(
+ config *modulev1.Module,
+ keeper keeper.BaseKeeper,
+ restrictions map[string]types.SendRestrictionFn,
+)
+
+error {
+ if config == nil {
+ return nil
+}
+ modules := slices.Collect(maps.Keys(restrictions))
+ order := config.RestrictionsOrder
+ if len(order) == 0 {
+ order = modules
+ sort.Strings(order)
+}
+ if len(order) != len(modules) {
+ return fmt.Errorf("len(restrictions order: %v) != len(restriction modules: %v)", order, modules)
+}
+ if len(modules) == 0 {
+ return nil
+}
+ for _, module := range order {
+ restriction, ok := restrictions[module]
+ if !ok {
+ return fmt.Errorf("can't find send restriction for module %s", module)
+}
+
+keeper.AppendSendRestriction(restriction)
+}
+
+return nil
+}
+```
+
+Modules not using collections must manually build the store decoder.
+See the implementation [here](https://github.com/cosmos/cosmos-sdk/blob/main/x/distribution/simulation/decoder.go) from the distribution module for an example.
+
+## Randomized genesis
+
+The simulator tests different scenarios and values for genesis parameters.
+App modules must implement a `GenerateGenesisState` method to generate the initial random `GenesisState` from a given seed.
+
+```go expandable
+package module
+
+import (
+
+ "encoding/json"
+ "math/rand"
+ "sort"
+ "time"
+
+ sdkmath "cosmossdk.io/math"
+ "github.com/cosmos/cosmos-sdk/client"
+ "github.com/cosmos/cosmos-sdk/codec"
+ "github.com/cosmos/cosmos-sdk/types/simulation"
+)
+
+// AppModuleSimulation defines the standard functions that every module should expose
+// for the SDK blockchain simulator
+type AppModuleSimulation interface {
+ // randomized genesis states
+ GenerateGenesisState(input *SimulationState)
+
+ // register a func to decode the each module's defined types from their corresponding store key
+ RegisterStoreDecoder(simulation.StoreDecoderRegistry)
+
+ // simulation operations (i.e msgs)
+
+with their respective weight
+ WeightedOperations(simState SimulationState) []simulation.WeightedOperation
+}
+
+// HasProposalMsgs defines the messages that can be used to simulate governance (v1)
+
+proposals
+type HasProposalMsgs interface {
+ // msg functions used to simulate governance proposals
+ ProposalMsgs(simState SimulationState) []simulation.WeightedProposalMsg
+}
+
+// HasProposalContents defines the contents that can be used to simulate legacy governance (v1beta1)
+
+proposals
+type HasProposalContents interface {
+ // content functions used to simulate governance proposals
+ ProposalContents(simState SimulationState) []simulation.WeightedProposalContent //nolint:staticcheck // legacy v1beta1 governance
+}
+
+// SimulationManager defines a simulation manager that provides the high level utility
+// for managing and executing simulation functionalities for a group of modules
+type SimulationManager struct {
+ Modules []AppModuleSimulation // array of app modules; we use an array for deterministic simulation tests
+ StoreDecoders simulation.StoreDecoderRegistry // functions to decode the key-value pairs from each module's store
+}
+
+// NewSimulationManager creates a new SimulationManager object
+//
+// CONTRACT: All the modules provided must be also registered on the module Manager
+func NewSimulationManager(modules ...AppModuleSimulation) *SimulationManager {
+ return &SimulationManager{
+ Modules: modules,
+ StoreDecoders: make(simulation.StoreDecoderRegistry),
+}
+}
+
+// NewSimulationManagerFromAppModules creates a new SimulationManager object.
+//
+// First it sets any SimulationModule provided by overrideModules, and ignores any AppModule
+// with the same moduleName.
+// Then it attempts to cast every provided AppModule into an AppModuleSimulation.
+// If the cast succeeds, its included, otherwise it is excluded.
+func NewSimulationManagerFromAppModules(modules map[string]any, overrideModules map[string]AppModuleSimulation) *SimulationManager {
+ simModules := []AppModuleSimulation{
+}
+ appModuleNamesSorted := make([]string, 0, len(modules))
+ for moduleName := range modules {
+ appModuleNamesSorted = append(appModuleNamesSorted, moduleName)
+}
+
+sort.Strings(appModuleNamesSorted)
+ for _, moduleName := range appModuleNamesSorted {
+ // for every module, see if we override it. If so, use override.
+ // Else, if we can cast the app module into a simulation module add it.
+ // otherwise no simulation module.
+ if simModule, ok := overrideModules[moduleName]; ok {
+ simModules = append(simModules, simModule)
+}
+
+else {
+ appModule := modules[moduleName]
+ if simModule, ok := appModule.(AppModuleSimulation); ok {
+ simModules = append(simModules, simModule)
+}
+ // cannot cast, so we continue
+}
+
+}
+
+return NewSimulationManager(simModules...)
+}
+
+// Deprecated: Use GetProposalMsgs instead.
+// GetProposalContents returns each module's proposal content generator function
+// with their default operation weight and key.
+func (sm *SimulationManager)
+
+GetProposalContents(simState SimulationState) []simulation.WeightedProposalContent {
+ wContents := make([]simulation.WeightedProposalContent, 0, len(sm.Modules))
+ for _, module := range sm.Modules {
+ if module, ok := module.(HasProposalContents); ok {
+ wContents = append(wContents, module.ProposalContents(simState)...)
+}
+
+}
+
+return wContents
+}
+
+// GetProposalMsgs returns each module's proposal msg generator function
+// with their default operation weight and key.
+func (sm *SimulationManager)
+
+GetProposalMsgs(simState SimulationState) []simulation.WeightedProposalMsg {
+ wContents := make([]simulation.WeightedProposalMsg, 0, len(sm.Modules))
+ for _, module := range sm.Modules {
+ if module, ok := module.(HasProposalMsgs); ok {
+ wContents = append(wContents, module.ProposalMsgs(simState)...)
+}
+
+}
+
+return wContents
+}
+
+// RegisterStoreDecoders registers each of the modules' store decoders into a map
+func (sm *SimulationManager)
+
+RegisterStoreDecoders() {
+ for _, module := range sm.Modules {
+ module.RegisterStoreDecoder(sm.StoreDecoders)
+}
+}
+
+// GenerateGenesisStates generates a randomized GenesisState for each of the
+// registered modules
+func (sm *SimulationManager)
+
+GenerateGenesisStates(simState *SimulationState) {
+ for _, module := range sm.Modules {
+ module.GenerateGenesisState(simState)
+}
+}
+
+// WeightedOperations returns all the modules' weighted operations of an application
+func (sm *SimulationManager)
+
+WeightedOperations(simState SimulationState) []simulation.WeightedOperation {
+ wOps := make([]simulation.WeightedOperation, 0, len(sm.Modules))
+ for _, module := range sm.Modules {
+ wOps = append(wOps, module.WeightedOperations(simState)...)
+}
+
+return wOps
+}
+
+// SimulationState is the input parameters used on each of the module's randomized
+// GenesisState generator function
+type SimulationState struct {
+ AppParams simulation.AppParams
+ Cdc codec.JSONCodec // application codec
+ TxConfig client.TxConfig // Shared TxConfig; this is expensive to create and stateless, so create it once up front.
+ Rand *rand.Rand // random number
+ GenState map[string]json.RawMessage // genesis state
+ Accounts []simulation.Account // simulation accounts
+ InitialStake sdkmath.Int // initial coins per account
+ NumBonded int64 // number of initially bonded accounts
+ BondDenom string // denom to be used as default
+ GenTimestamp time.Time // genesis timestamp
+ UnbondTime time.Duration // staking unbond time stored to use it as the slashing maximum evidence duration
+ LegacyParamChange []simulation.LegacyParamChange // simulated parameter changes from modules
+ //nolint:staticcheck // legacy used for testing
+ LegacyProposalContents []simulation.WeightedProposalContent // proposal content generator functions with their default weight and app sim key
+ ProposalMsgs []simulation.WeightedProposalMsg // proposal msg generator functions with their default weight and app sim key
+}
+```
+
+See an example from `x/auth` [here](https://github.com/cosmos/cosmos-sdk/blob/main/x/auth/module.go#L169-L172).
+
+Once the module's genesis parameters are generated randomly (or with the key and
+values defined in a `params` file), they are marshaled to JSON format and added
+to the app genesis JSON for the simulation.
+
+## Random weighted operations
+
+Operations are one of the crucial parts of the Cosmos SDK simulation. They are the transactions
+(`Msg`) that are simulated with random field values. The sender of the operation
+is also assigned randomly.
+
+Operations on the simulation are simulated using the full [transaction cycle](/sdk/v0.53/learn/advanced/transactions) of a
+`ABCI` application that exposes the `BaseApp`.
+
+### Using Simsx
+
+Simsx introduces the ability to define a `MsgFactory` for each of a module's messages.
+
+These factories are registered in `WeightedOperationsX` and/or `ProposalMsgsX`.
+
+```go expandable
+package distribution
+
+import (
+
+ "context"
+ "encoding/json"
+ "fmt"
+
+ gwruntime "github.com/grpc-ecosystem/grpc-gateway/runtime"
+ "github.com/spf13/cobra"
+
+ modulev1 "cosmossdk.io/api/cosmos/distribution/module/v1"
+ "cosmossdk.io/core/address"
+ "cosmossdk.io/core/appmodule"
+ "cosmossdk.io/core/store"
+ "cosmossdk.io/depinject"
+
+ sdkclient "github.com/cosmos/cosmos-sdk/client"
+ "github.com/cosmos/cosmos-sdk/codec"
+ cdctypes "github.com/cosmos/cosmos-sdk/codec/types"
+ "github.com/cosmos/cosmos-sdk/testutil/simsx"
+ sdk "github.com/cosmos/cosmos-sdk/types"
+ "github.com/cosmos/cosmos-sdk/types/module"
+ simtypes "github.com/cosmos/cosmos-sdk/types/simulation"
+ authtypes "github.com/cosmos/cosmos-sdk/x/auth/types"
+ "github.com/cosmos/cosmos-sdk/x/distribution/client/cli"
+ "github.com/cosmos/cosmos-sdk/x/distribution/exported"
+ "github.com/cosmos/cosmos-sdk/x/distribution/keeper"
+ "github.com/cosmos/cosmos-sdk/x/distribution/simulation"
+ "github.com/cosmos/cosmos-sdk/x/distribution/types"
+ govtypes "github.com/cosmos/cosmos-sdk/x/gov/types"
+ staking "github.com/cosmos/cosmos-sdk/x/staking/types"
+)
+
+// ConsensusVersion defines the current x/distribution module consensus version.
+const ConsensusVersion = 3
+
+var (
+ _ module.AppModuleBasic = AppModule{
+}
+ _ module.AppModuleSimulation = AppModule{
+}
+ _ module.HasGenesis = AppModule{
+}
+ _ module.HasServices = AppModule{
+}
+
+ _ appmodule.AppModule = AppModule{
+}
+ _ appmodule.HasBeginBlocker = AppModule{
+}
+)
+
+// AppModuleBasic defines the basic application module used by the distribution module.
+type AppModuleBasic struct {
+ cdc codec.Codec
+ ac address.Codec
+}
+
+// Name returns the distribution module's name.
+func (AppModuleBasic)
+
+Name()
+
+string {
+ return types.ModuleName
+}
+
+// RegisterLegacyAminoCodec registers the distribution module's types for the given codec.
+func (AppModuleBasic)
+
+RegisterLegacyAminoCodec(cdc *codec.LegacyAmino) {
+ types.RegisterLegacyAminoCodec(cdc)
+}
+
+// DefaultGenesis returns default genesis state as raw bytes for the distribution
+// module.
+func (AppModuleBasic)
+
+DefaultGenesis(cdc codec.JSONCodec)
+
+json.RawMessage {
+ return cdc.MustMarshalJSON(types.DefaultGenesisState())
+}
+
+// ValidateGenesis performs genesis state validation for the distribution module.
+func (AppModuleBasic)
+
+ValidateGenesis(cdc codec.JSONCodec, _ sdkclient.TxEncodingConfig, bz json.RawMessage)
+
+error {
+ var data types.GenesisState
+ if err := cdc.UnmarshalJSON(bz, &data); err != nil {
+ return fmt.Errorf("failed to unmarshal %s genesis state: %w", types.ModuleName, err)
+}
+
+return types.ValidateGenesis(&data)
+}
+
+// RegisterGRPCGatewayRoutes registers the gRPC Gateway routes for the distribution module.
+func (AppModuleBasic)
+
+RegisterGRPCGatewayRoutes(clientCtx sdkclient.Context, mux *gwruntime.ServeMux) {
+ if err := types.RegisterQueryHandlerClient(context.Background(), mux, types.NewQueryClient(clientCtx)); err != nil {
+ panic(err)
+}
+}
+
+// GetTxCmd returns the root tx command for the distribution module.
+func (ab AppModuleBasic)
+
+GetTxCmd() *cobra.Command {
+ return cli.NewTxCmd(ab.cdc.InterfaceRegistry().SigningContext().ValidatorAddressCodec(), ab.cdc.InterfaceRegistry().SigningContext().AddressCodec())
+}
+
+// RegisterInterfaces implements InterfaceModule
+func (AppModuleBasic)
+
+RegisterInterfaces(registry cdctypes.InterfaceRegistry) {
+ types.RegisterInterfaces(registry)
+}
+
+// AppModule implements an application module for the distribution module.
+type AppModule struct {
+ AppModuleBasic
+
+ keeper keeper.Keeper
+ accountKeeper types.AccountKeeper
+ bankKeeper types.BankKeeper
+ stakingKeeper types.StakingKeeper
+
+ // legacySubspace is used solely for migration of x/params managed parameters
+ legacySubspace exported.Subspace
+}
+
+// NewAppModule creates a new AppModule object
+func NewAppModule(
+ cdc codec.Codec, keeper keeper.Keeper, accountKeeper types.AccountKeeper,
+ bankKeeper types.BankKeeper, stakingKeeper types.StakingKeeper, ss exported.Subspace,
+)
+
+AppModule {
+ return AppModule{
+ AppModuleBasic: AppModuleBasic{
+ cdc: cdc, ac: accountKeeper.AddressCodec()
+},
+ keeper: keeper,
+ accountKeeper: accountKeeper,
+ bankKeeper: bankKeeper,
+ stakingKeeper: stakingKeeper,
+ legacySubspace: ss,
+}
+}
+
+// IsOnePerModuleType implements the depinject.OnePerModuleType interface.
+func (am AppModule)
+
+IsOnePerModuleType() {
+}
+
+// IsAppModule implements the appmodule.AppModule interface.
+func (am AppModule)
+
+IsAppModule() {
+}
+
+// RegisterServices registers module services.
+func (am AppModule)
+
+RegisterServices(cfg module.Configurator) {
+ types.RegisterMsgServer(cfg.MsgServer(), keeper.NewMsgServerImpl(am.keeper))
+
+types.RegisterQueryServer(cfg.QueryServer(), keeper.NewQuerier(am.keeper))
+ m := keeper.NewMigrator(am.keeper, am.legacySubspace)
+ if err := cfg.RegisterMigration(types.ModuleName, 1, m.Migrate1to2); err != nil {
+ panic(fmt.Sprintf("failed to migrate x/%s from version 1 to 2: %v", types.ModuleName, err))
+}
+ if err := cfg.RegisterMigration(types.ModuleName, 2, m.Migrate2to3); err != nil {
+ panic(fmt.Sprintf("failed to migrate x/%s from version 2 to 3: %v", types.ModuleName, err))
+}
+}
+
+// InitGenesis performs genesis initialization for the distribution module. It returns
+// no validator updates.
+func (am AppModule)
+
+InitGenesis(ctx sdk.Context, cdc codec.JSONCodec, data json.RawMessage) {
+ var genesisState types.GenesisState
+ cdc.MustUnmarshalJSON(data, &genesisState)
+
+am.keeper.InitGenesis(ctx, genesisState)
+}
+
+// ExportGenesis returns the exported genesis state as raw bytes for the distribution
+// module.
+func (am AppModule)
+
+ExportGenesis(ctx sdk.Context, cdc codec.JSONCodec)
+
+json.RawMessage {
+ gs := am.keeper.ExportGenesis(ctx)
+
+return cdc.MustMarshalJSON(gs)
+}
+
+// ConsensusVersion implements AppModule/ConsensusVersion.
+func (AppModule)
+
+ConsensusVersion()
+
+uint64 {
+ return ConsensusVersion
+}
+
+// BeginBlock returns the begin blocker for the distribution module.
+func (am AppModule)
+
+BeginBlock(ctx context.Context)
+
+error {
+ c := sdk.UnwrapSDKContext(ctx)
+
+return BeginBlocker(c, am.keeper)
+}
+
+// AppModuleSimulation functions
+
+// GenerateGenesisState creates a randomized GenState of the distribution module.
+func (AppModule)
+
+GenerateGenesisState(simState *module.SimulationState) {
+ simulation.RandomizedGenState(simState)
+}
+
+// ProposalMsgs returns msgs used for governance proposals for simulations.
+// migrate to ProposalMsgsX. This method is ignored when ProposalMsgsX exists and will be removed in the future.
+func (AppModule)
+
+ProposalMsgs(_ module.SimulationState) []simtypes.WeightedProposalMsg {
+ return simulation.ProposalMsgs()
+}
+
+// RegisterStoreDecoder registers a decoder for distribution module's types
+func (am AppModule)
+
+RegisterStoreDecoder(sdr simtypes.StoreDecoderRegistry) {
+ sdr[types.StoreKey] = simulation.NewDecodeStore(am.cdc)
+}
+
+// WeightedOperations returns the all the gov module operations with their respective weights.
+// migrate to WeightedOperationsX. This method is ignored when WeightedOperationsX exists and will be removed in the future
+func (am AppModule)
+
+WeightedOperations(simState module.SimulationState) []simtypes.WeightedOperation {
+ return simulation.WeightedOperations(
+ simState.AppParams, simState.Cdc, simState.TxConfig,
+ am.accountKeeper, am.bankKeeper, am.keeper, am.stakingKeeper,
+ )
+}
+
+// ProposalMsgsX registers governance proposal messages in the simulation registry.
+func (AppModule)
+
+ProposalMsgsX(weights simsx.WeightSource, reg simsx.Registry) {
+ reg.Add(weights.Get("msg_update_params", 100), simulation.MsgUpdateParamsFactory())
+}
+
+// WeightedOperationsX registers weighted distribution module operations for simulation.
+func (am AppModule)
+
+WeightedOperationsX(weights simsx.WeightSource, reg simsx.Registry) {
+ reg.Add(weights.Get("msg_set_withdraw_address", 50), simulation.MsgSetWithdrawAddressFactory(am.keeper))
+
+reg.Add(weights.Get("msg_withdraw_delegation_reward", 50), simulation.MsgWithdrawDelegatorRewardFactory(am.keeper, am.stakingKeeper))
+
+reg.Add(weights.Get("msg_withdraw_validator_commission", 50), simulation.MsgWithdrawValidatorCommissionFactory(am.keeper, am.stakingKeeper))
+}
+
+//
+// App Wiring Setup
+//
+
+func init() {
+ appmodule.Register(&modulev1.Module{
+},
+ appmodule.Provide(ProvideModule),
+ )
+}
+
+type ModuleInputs struct {
+ depinject.In
+
+ Config *modulev1.Module
+ StoreService store.KVStoreService
+ Cdc codec.Codec
+
+ AccountKeeper types.AccountKeeper
+ BankKeeper types.BankKeeper
+ StakingKeeper types.StakingKeeper
+ ExternalPoolKeeper types.ExternalCommunityPoolKeeper `optional:"true"`
+
+ // LegacySubspace is used solely for migration of x/params managed parameters
+ LegacySubspace exported.Subspace `optional:"true"`
+}
+
+type ModuleOutputs struct {
+ depinject.Out
+
+ DistrKeeper keeper.Keeper
+ Module appmodule.AppModule
+ Hooks staking.StakingHooksWrapper
+}
+
+func ProvideModule(in ModuleInputs)
+
+ModuleOutputs {
+ feeCollectorName := in.Config.FeeCollectorName
+ if feeCollectorName == "" {
+ feeCollectorName = authtypes.FeeCollectorName
+}
+
+ // default to governance authority if not provided
+ authority := authtypes.NewModuleAddress(govtypes.ModuleName)
+ if in.Config.Authority != "" {
+ authority = authtypes.NewModuleAddressOrBech32Address(in.Config.Authority)
+}
+
+var opts []keeper.InitOption
+ if in.ExternalPoolKeeper != nil {
+ opts = append(opts, keeper.WithExternalCommunityPool(in.ExternalPoolKeeper))
+}
+ k := keeper.NewKeeper(
+ in.Cdc,
+ in.StoreService,
+ in.AccountKeeper,
+ in.BankKeeper,
+ in.StakingKeeper,
+ feeCollectorName,
+ authority.String(),
+ opts...,
+ )
+ m := NewAppModule(in.Cdc, k, in.AccountKeeper, in.BankKeeper, in.StakingKeeper, in.LegacySubspace)
+
+return ModuleOutputs{
+ DistrKeeper: k,
+ Module: m,
+ Hooks: staking.StakingHooksWrapper{
+ StakingHooks: k.Hooks()
+},
+}
+}
+```
+
+Note that the name passed in to `weights.Get` must match the name of the operation set in the `WeightedOperations`.
+
+For example, if the module contains an operation `op_weight_msg_set_withdraw_address`, the name passed to `weights.Get` should be `msg_set_withdraw_address`.
+
+See the `x/distribution` for an example of implementing message factories [here](https://github.com/cosmos/cosmos-sdk/blob/main/x/distribution/simulation/msg_factory.go)
+
+## App Simulator manager
+
+The following step is setting up the `SimulatorManager` at the app level. This
+is required for the simulation test files in the next step.
+
+```go
+type CoolApp struct {
+...
+sm *module.SimulationManager
+}
+```
+
+Within the constructor of the application, construct the simulation manager using the modules from `ModuleManager` and call the `RegisterStoreDecoders` method.
+
+```go expandable
+//go:build app_v1
+
+package simapp
+
+import (
+
+ "encoding/json"
+ "fmt"
+ "io"
+ "maps"
+
+ abci "github.com/cometbft/cometbft/abci/types"
+ dbm "github.com/cosmos/cosmos-db"
+ "github.com/cosmos/gogoproto/proto"
+ "github.com/spf13/cast"
+
+ autocliv1 "cosmossdk.io/api/cosmos/autocli/v1"
+ reflectionv1 "cosmossdk.io/api/cosmos/reflection/v1"
+ "cosmossdk.io/client/v2/autocli"
+ clienthelpers "cosmossdk.io/client/v2/helpers"
+ "cosmossdk.io/core/appmodule"
+ "cosmossdk.io/log"
+ storetypes "cosmossdk.io/store/types"
+ "cosmossdk.io/x/tx/signing"
+ "github.com/cosmos/cosmos-sdk/baseapp"
+ "github.com/cosmos/cosmos-sdk/client"
+ "github.com/cosmos/cosmos-sdk/client/flags"
+ "github.com/cosmos/cosmos-sdk/client/grpc/cmtservice"
+ nodeservice "github.com/cosmos/cosmos-sdk/client/grpc/node"
+ "github.com/cosmos/cosmos-sdk/codec"
+ "github.com/cosmos/cosmos-sdk/codec/address"
+ "github.com/cosmos/cosmos-sdk/codec/types"
+ "github.com/cosmos/cosmos-sdk/runtime"
+ runtimeservices "github.com/cosmos/cosmos-sdk/runtime/services"
+ "github.com/cosmos/cosmos-sdk/server"
+ "github.com/cosmos/cosmos-sdk/server/api"
+ "github.com/cosmos/cosmos-sdk/server/config"
+ servertypes "github.com/cosmos/cosmos-sdk/server/types"
+ "github.com/cosmos/cosmos-sdk/std"
+ testdata_pulsar "github.com/cosmos/cosmos-sdk/testutil/testdata/testpb"
+ sdk "github.com/cosmos/cosmos-sdk/types"
+ "github.com/cosmos/cosmos-sdk/types/module"
+ sigtypes "github.com/cosmos/cosmos-sdk/types/tx/signing"
+ "github.com/cosmos/cosmos-sdk/version"
+ "github.com/cosmos/cosmos-sdk/x/auth"
+ "github.com/cosmos/cosmos-sdk/x/auth/ante"
+ authcodec "github.com/cosmos/cosmos-sdk/x/auth/codec"
+ authkeeper "github.com/cosmos/cosmos-sdk/x/auth/keeper"
+ "github.com/cosmos/cosmos-sdk/x/auth/posthandler"
+ authsims "github.com/cosmos/cosmos-sdk/x/auth/simulation"
+ "github.com/cosmos/cosmos-sdk/x/auth/tx"
+ authtx "github.com/cosmos/cosmos-sdk/x/auth/tx"
+ txmodule "github.com/cosmos/cosmos-sdk/x/auth/tx/config"
+ authtypes "github.com/cosmos/cosmos-sdk/x/auth/types"
+ "github.com/cosmos/cosmos-sdk/x/auth/vesting"
+ vestingtypes "github.com/cosmos/cosmos-sdk/x/auth/vesting/types"
+ "github.com/cosmos/cosmos-sdk/x/authz"
+ authzkeeper "github.com/cosmos/cosmos-sdk/x/authz/keeper"
+ authzmodule "github.com/cosmos/cosmos-sdk/x/authz/module"
+ "github.com/cosmos/cosmos-sdk/x/bank"
+ bankkeeper "github.com/cosmos/cosmos-sdk/x/bank/keeper"
+ banktypes "github.com/cosmos/cosmos-sdk/x/bank/types"
+ consensus "github.com/cosmos/cosmos-sdk/x/consensus"
+ consensusparamkeeper "github.com/cosmos/cosmos-sdk/x/consensus/keeper"
+ consensusparamtypes "github.com/cosmos/cosmos-sdk/x/consensus/types"
+ distr "github.com/cosmos/cosmos-sdk/x/distribution"
+ distrkeeper "github.com/cosmos/cosmos-sdk/x/distribution/keeper"
+ distrtypes "github.com/cosmos/cosmos-sdk/x/distribution/types"
+ "github.com/cosmos/cosmos-sdk/x/epochs"
+ epochskeeper "github.com/cosmos/cosmos-sdk/x/epochs/keeper"
+ epochstypes "github.com/cosmos/cosmos-sdk/x/epochs/types"
+ "github.com/cosmos/cosmos-sdk/x/evidence"
+ evidencekeeper "github.com/cosmos/cosmos-sdk/x/evidence/keeper"
+ evidencetypes "github.com/cosmos/cosmos-sdk/x/evidence/types"
+ "github.com/cosmos/cosmos-sdk/x/feegrant"
+ feegrantkeeper "github.com/cosmos/cosmos-sdk/x/feegrant/keeper"
+ feegrantmodule "github.com/cosmos/cosmos-sdk/x/feegrant/module"
+ "github.com/cosmos/cosmos-sdk/x/genutil"
+ genutiltypes "github.com/cosmos/cosmos-sdk/x/genutil/types"
+ "github.com/cosmos/cosmos-sdk/x/gov"
+ govclient "github.com/cosmos/cosmos-sdk/x/gov/client"
+ govkeeper "github.com/cosmos/cosmos-sdk/x/gov/keeper"
+ govtypes "github.com/cosmos/cosmos-sdk/x/gov/types"
+ govv1beta1 "github.com/cosmos/cosmos-sdk/x/gov/types/v1beta1"
+ "github.com/cosmos/cosmos-sdk/x/mint"
+ mintkeeper "github.com/cosmos/cosmos-sdk/x/mint/keeper"
+ minttypes "github.com/cosmos/cosmos-sdk/x/mint/types"
+ "github.com/cosmos/cosmos-sdk/x/protocolpool"
+ protocolpoolkeeper "github.com/cosmos/cosmos-sdk/x/protocolpool/keeper"
+ protocolpooltypes "github.com/cosmos/cosmos-sdk/x/protocolpool/types"
+ "github.com/cosmos/cosmos-sdk/x/slashing"
+ slashingkeeper "github.com/cosmos/cosmos-sdk/x/slashing/keeper"
+ slashingtypes "github.com/cosmos/cosmos-sdk/x/slashing/types"
+ "github.com/cosmos/cosmos-sdk/x/staking"
+ stakingkeeper "github.com/cosmos/cosmos-sdk/x/staking/keeper"
+ stakingtypes "github.com/cosmos/cosmos-sdk/x/staking/types"
+ "github.com/cosmos/cosmos-sdk/x/upgrade"
+ upgradekeeper "github.com/cosmos/cosmos-sdk/x/upgrade/keeper"
+ upgradetypes "github.com/cosmos/cosmos-sdk/x/upgrade/types"
+)
+
+const appName = "SimApp"
+
+var (
+ // DefaultNodeHome default home directories for the application daemon
+ DefaultNodeHome string
+
+ // module account permissions
+ maccPerms = map[string][]string{
+ authtypes.FeeCollectorName: nil,
+ distrtypes.ModuleName: nil,
+ minttypes.ModuleName: {
+ authtypes.Minter
+},
+ stakingtypes.BondedPoolName: {
+ authtypes.Burner, authtypes.Staking
+},
+ stakingtypes.NotBondedPoolName: {
+ authtypes.Burner, authtypes.Staking
+},
+ govtypes.ModuleName: {
+ authtypes.Burner
+},
+ protocolpooltypes.ModuleName: nil,
+ protocolpooltypes.ProtocolPoolEscrowAccount: nil
+}
+)
+
+var (
+ _ runtime.AppI = (*SimApp)(nil)
+ _ servertypes.Application = (*SimApp)(nil)
+)
+
+// SimApp extends an ABCI application, but with most of its parameters exported.
+// They are exported for convenience in creating helper functions, as object
+// capabilities aren't needed for testing.
+type SimApp struct {
+ *baseapp.BaseApp
+ legacyAmino *codec.LegacyAmino
+ appCodec codec.Codec
+ txConfig client.TxConfig
+ interfaceRegistry types.InterfaceRegistry
+
+ // keys to access the substores
+ keys map[string]*storetypes.KVStoreKey
+
+ // essential keepers
+ AccountKeeper authkeeper.AccountKeeper
+ BankKeeper bankkeeper.BaseKeeper
+ StakingKeeper *stakingkeeper.Keeper
+ SlashingKeeper slashingkeeper.Keeper
+ MintKeeper mintkeeper.Keeper
+ DistrKeeper distrkeeper.Keeper
+ GovKeeper govkeeper.Keeper
+ UpgradeKeeper *upgradekeeper.Keeper
+ EvidenceKeeper evidencekeeper.Keeper
+ ConsensusParamsKeeper consensusparamkeeper.Keeper
+
+ // supplementary keepers
+ FeeGrantKeeper feegrantkeeper.Keeper
+ AuthzKeeper authzkeeper.Keeper
+ EpochsKeeper epochskeeper.Keeper
+ ProtocolPoolKeeper protocolpoolkeeper.Keeper
+
+ // the module manager
+ ModuleManager *module.Manager
+ BasicModuleManager module.BasicManager
+
+ // simulation manager
+ sm *module.SimulationManager
+
+ // module configurator
+ configurator module.Configurator
+}
+
+func init() {
+ var err error
+ DefaultNodeHome, err = clienthelpers.GetNodeHomeDirectory(".simapp")
+ if err != nil {
+ panic(err)
+}
+}
+
+// NewSimApp returns a reference to an initialized SimApp.
+func NewSimApp(
+ logger log.Logger,
+ db dbm.DB,
+ traceStore io.Writer,
+ loadLatest bool,
+ appOpts servertypes.AppOptions,
+ baseAppOptions ...func(*baseapp.BaseApp),
+) *SimApp {
+ interfaceRegistry, _ := types.NewInterfaceRegistryWithOptions(types.InterfaceRegistryOptions{
+ ProtoFiles: proto.HybridResolver,
+ SigningOptions: signing.Options{
+ AddressCodec: address.Bech32Codec{
+ Bech32Prefix: sdk.GetConfig().GetBech32AccountAddrPrefix(),
+},
+ ValidatorAddressCodec: address.Bech32Codec{
+ Bech32Prefix: sdk.GetConfig().GetBech32ValidatorAddrPrefix(),
+},
+},
+})
+ appCodec := codec.NewProtoCodec(interfaceRegistry)
+ legacyAmino := codec.NewLegacyAmino()
+ txConfig := tx.NewTxConfig(appCodec, tx.DefaultSignModes)
+ if err := interfaceRegistry.SigningContext().Validate(); err != nil {
+ panic(err)
+}
+
+std.RegisterLegacyAminoCodec(legacyAmino)
+
+std.RegisterInterfaces(interfaceRegistry)
+
+ // Below we could construct and set an application specific mempool and
+ // ABCI 1.0 PrepareProposal and ProcessProposal handlers. These defaults are
+ // already set in the SDK's BaseApp, this shows an example of how to override
+ // them.
+ //
+ // Example:
+ //
+ // bApp := baseapp.NewBaseApp(...)
+ // nonceMempool := mempool.NewSenderNonceMempool()
+ // abciPropHandler := NewDefaultProposalHandler(nonceMempool, bApp)
+ //
+ // bApp.SetMempool(nonceMempool)
+ // bApp.SetPrepareProposal(abciPropHandler.PrepareProposalHandler())
+ // bApp.SetProcessProposal(abciPropHandler.ProcessProposalHandler())
+ //
+ // Alternatively, you can construct BaseApp options, append those to
+ // baseAppOptions and pass them to NewBaseApp.
+ //
+ // Example:
+ //
+ // prepareOpt = func(app *baseapp.BaseApp) {
+ // abciPropHandler := baseapp.NewDefaultProposalHandler(nonceMempool, app)
+ // app.SetPrepareProposal(abciPropHandler.PrepareProposalHandler())
+ //
+}
+ // baseAppOptions = append(baseAppOptions, prepareOpt)
+
+ // create and set dummy vote extension handler
+ voteExtOp := func(bApp *baseapp.BaseApp) {
+ voteExtHandler := NewVoteExtensionHandler()
+
+voteExtHandler.SetHandlers(bApp)
+}
+
+baseAppOptions = append(baseAppOptions, voteExtOp, baseapp.SetOptimisticExecution())
+ bApp := baseapp.NewBaseApp(appName, logger, db, txConfig.TxDecoder(), baseAppOptions...)
+
+bApp.SetCommitMultiStoreTracer(traceStore)
+
+bApp.SetVersion(version.Version)
+
+bApp.SetInterfaceRegistry(interfaceRegistry)
+
+bApp.SetTxEncoder(txConfig.TxEncoder())
+ keys := storetypes.NewKVStoreKeys(
+ authtypes.StoreKey,
+ banktypes.StoreKey,
+ stakingtypes.StoreKey,
+ minttypes.StoreKey,
+ distrtypes.StoreKey,
+ slashingtypes.StoreKey,
+ govtypes.StoreKey,
+ consensusparamtypes.StoreKey,
+ upgradetypes.StoreKey,
+ feegrant.StoreKey,
+ evidencetypes.StoreKey,
+ authzkeeper.StoreKey,
+ epochstypes.StoreKey,
+ protocolpooltypes.StoreKey,
+ )
+
+ // register streaming services
+ if err := bApp.RegisterStreamingServices(appOpts, keys); err != nil {
+ panic(err)
+}
+ app := &SimApp{
+ BaseApp: bApp,
+ legacyAmino: legacyAmino,
+ appCodec: appCodec,
+ txConfig: txConfig,
+ interfaceRegistry: interfaceRegistry,
+ keys: keys,
+}
+
+ // set the BaseApp's parameter store
+ app.ConsensusParamsKeeper = consensusparamkeeper.NewKeeper(
+ appCodec,
+ runtime.NewKVStoreService(keys[consensusparamtypes.StoreKey]),
+ authtypes.NewModuleAddress(govtypes.ModuleName).String(),
+ runtime.EventService{
+},
+ )
+
+bApp.SetParamStore(app.ConsensusParamsKeeper.ParamsStore)
+
+ // add keepers
+ app.AccountKeeper = authkeeper.NewAccountKeeper(
+ appCodec,
+ runtime.NewKVStoreService(keys[authtypes.StoreKey]),
+ authtypes.ProtoBaseAccount,
+ maccPerms,
+ authcodec.NewBech32Codec(sdk.Bech32MainPrefix),
+ sdk.Bech32MainPrefix,
+ authtypes.NewModuleAddress(govtypes.ModuleName).String(),
+ authkeeper.WithUnorderedTransactions(true),
+ )
+
+app.BankKeeper = bankkeeper.NewBaseKeeper(
+ appCodec,
+ runtime.NewKVStoreService(keys[banktypes.StoreKey]),
+ app.AccountKeeper,
+ BlockedAddresses(),
+ authtypes.NewModuleAddress(govtypes.ModuleName).String(),
+ logger,
+ )
+
+ // optional: enable sign mode textual by overwriting the default tx config (after setting the bank keeper)
+ enabledSignModes := append(tx.DefaultSignModes, sigtypes.SignMode_SIGN_MODE_TEXTUAL)
+ txConfigOpts := tx.ConfigOptions{
+ EnabledSignModes: enabledSignModes,
+ TextualCoinMetadataQueryFn: txmodule.NewBankKeeperCoinMetadataQueryFn(app.BankKeeper),
+}
+
+txConfig, err := tx.NewTxConfigWithOptions(
+ appCodec,
+ txConfigOpts,
+ )
+ if err != nil {
+ panic(err)
+}
+
+app.txConfig = txConfig
+
+ app.StakingKeeper = stakingkeeper.NewKeeper(
+ appCodec,
+ runtime.NewKVStoreService(keys[stakingtypes.StoreKey]),
+ app.AccountKeeper,
+ app.BankKeeper,
+ authtypes.NewModuleAddress(govtypes.ModuleName).String(),
+ authcodec.NewBech32Codec(sdk.Bech32PrefixValAddr),
+ authcodec.NewBech32Codec(sdk.Bech32PrefixConsAddr),
+ )
+
+app.MintKeeper = mintkeeper.NewKeeper(
+ appCodec,
+ runtime.NewKVStoreService(keys[minttypes.StoreKey]),
+ app.StakingKeeper,
+ app.AccountKeeper,
+ app.BankKeeper,
+ authtypes.FeeCollectorName,
+ authtypes.NewModuleAddress(govtypes.ModuleName).String(),
+ // mintkeeper.WithMintFn(mintkeeper.DefaultMintFn(minttypes.DefaultInflationCalculationFn)), custom mintFn can be added here
+ )
+
+app.ProtocolPoolKeeper = protocolpoolkeeper.NewKeeper(
+ appCodec,
+ runtime.NewKVStoreService(keys[protocolpooltypes.StoreKey]),
+ app.AccountKeeper,
+ app.BankKeeper,
+ authtypes.NewModuleAddress(govtypes.ModuleName).String(),
+ )
+
+app.DistrKeeper = distrkeeper.NewKeeper(
+ appCodec,
+ runtime.NewKVStoreService(keys[distrtypes.StoreKey]),
+ app.AccountKeeper,
+ app.BankKeeper,
+ app.StakingKeeper,
+ authtypes.FeeCollectorName,
+ authtypes.NewModuleAddress(govtypes.ModuleName).String(),
+ distrkeeper.WithExternalCommunityPool(app.ProtocolPoolKeeper),
+ )
+
+app.SlashingKeeper = slashingkeeper.NewKeeper(
+ appCodec,
+ legacyAmino,
+ runtime.NewKVStoreService(keys[slashingtypes.StoreKey]),
+ app.StakingKeeper,
+ authtypes.NewModuleAddress(govtypes.ModuleName).String(),
+ )
+
+app.FeeGrantKeeper = feegrantkeeper.NewKeeper(
+ appCodec,
+ runtime.NewKVStoreService(keys[feegrant.StoreKey]),
+ app.AccountKeeper,
+ )
+
+ // register the staking hooks
+ // NOTE: stakingKeeper above is passed by reference, so that it will contain these hooks
+ app.StakingKeeper.SetHooks(
+ stakingtypes.NewMultiStakingHooks(
+ app.DistrKeeper.Hooks(),
+ app.SlashingKeeper.Hooks(),
+ ),
+ )
+
+app.AuthzKeeper = authzkeeper.NewKeeper(
+ runtime.NewKVStoreService(keys[authzkeeper.StoreKey]),
+ appCodec,
+ app.MsgServiceRouter(),
+ app.AccountKeeper,
+ )
+
+ // get skipUpgradeHeights from the app options
+ skipUpgradeHeights := map[int64]bool{
+}
+ for _, h := range cast.ToIntSlice(appOpts.Get(server.FlagUnsafeSkipUpgrades)) {
+ skipUpgradeHeights[int64(h)] = true
+}
+ homePath := cast.ToString(appOpts.Get(flags.FlagHome))
+ // set the governance module account as the authority for conducting upgrades
+ app.UpgradeKeeper = upgradekeeper.NewKeeper(
+ skipUpgradeHeights,
+ runtime.NewKVStoreService(keys[upgradetypes.StoreKey]),
+ appCodec,
+ homePath,
+ app.BaseApp,
+ authtypes.NewModuleAddress(govtypes.ModuleName).String(),
+ )
+
+ // Register the proposal types
+ // Deprecated: Avoid adding new handlers, instead use the new proposal flow
+ // by granting the governance module the right to execute the message.
+ // See: /sdk/v0.53/build/modules/gov#proposal-messages
+ govRouter := govv1beta1.NewRouter()
+
+govRouter.AddRoute(govtypes.RouterKey, govv1beta1.ProposalHandler)
+ govConfig := govtypes.DefaultConfig()
+ /*
+ Example of setting gov params:
+ govConfig.MaxMetadataLen = 10000
+ */
+ govKeeper := govkeeper.NewKeeper(
+ appCodec,
+ runtime.NewKVStoreService(keys[govtypes.StoreKey]),
+ app.AccountKeeper,
+ app.BankKeeper,
+ app.StakingKeeper,
+ app.DistrKeeper,
+ app.MsgServiceRouter(),
+ govConfig,
+ authtypes.NewModuleAddress(govtypes.ModuleName).String(),
+ // govkeeper.WithCustomCalculateVoteResultsAndVotingPowerFn(...), // Add if you want to use a custom vote calculation function.
+ )
+
+ // Set legacy router for backwards compatibility with gov v1beta1
+ govKeeper.SetLegacyRouter(govRouter)
+
+app.GovKeeper = *govKeeper.SetHooks(
+ govtypes.NewMultiGovHooks(
+ // register the governance hooks
+ ),
+ )
+
+ // create evidence keeper with router
+ evidenceKeeper := evidencekeeper.NewKeeper(
+ appCodec,
+ runtime.NewKVStoreService(keys[evidencetypes.StoreKey]),
+ app.StakingKeeper,
+ app.SlashingKeeper,
+ app.AccountKeeper.AddressCodec(),
+ runtime.ProvideCometInfoService(),
+ )
+ // If evidence needs to be handled for the app, set routes in router here and seal
+ app.EvidenceKeeper = *evidenceKeeper
+
+ app.EpochsKeeper = epochskeeper.NewKeeper(
+ runtime.NewKVStoreService(keys[epochstypes.StoreKey]),
+ appCodec,
+ )
+
+app.EpochsKeeper.SetHooks(
+ epochstypes.NewMultiEpochHooks(
+ // insert epoch hooks receivers here
+ ),
+ )
+
+ /**** Module Options ****/
+
+ // NOTE: Any module instantiated in the module manager that is later modified
+ // must be passed by reference here.
+ app.ModuleManager = module.NewManager(
+ genutil.NewAppModule(
+ app.AccountKeeper, app.StakingKeeper, app,
+ txConfig,
+ ),
+ auth.NewAppModule(appCodec, app.AccountKeeper, authsims.RandomGenesisAccounts, nil),
+ vesting.NewAppModule(app.AccountKeeper, app.BankKeeper),
+ bank.NewAppModule(appCodec, app.BankKeeper, app.AccountKeeper, nil),
+ feegrantmodule.NewAppModule(appCodec, app.AccountKeeper, app.BankKeeper, app.FeeGrantKeeper, app.interfaceRegistry),
+ gov.NewAppModule(appCodec, &app.GovKeeper, app.AccountKeeper, app.BankKeeper, nil),
+ mint.NewAppModule(appCodec, app.MintKeeper, app.AccountKeeper, nil, nil),
+ slashing.NewAppModule(appCodec, app.SlashingKeeper, app.AccountKeeper, app.BankKeeper, app.StakingKeeper, nil, app.interfaceRegistry),
+ distr.NewAppModule(appCodec, app.DistrKeeper, app.AccountKeeper, app.BankKeeper, app.StakingKeeper, nil),
+ staking.NewAppModule(appCodec, app.StakingKeeper, app.AccountKeeper, app.BankKeeper, nil),
+ upgrade.NewAppModule(app.UpgradeKeeper, app.AccountKeeper.AddressCodec()),
+ evidence.NewAppModule(app.EvidenceKeeper),
+ authzmodule.NewAppModule(appCodec, app.AuthzKeeper, app.AccountKeeper, app.BankKeeper, app.interfaceRegistry),
+ consensus.NewAppModule(appCodec, app.ConsensusParamsKeeper),
+ epochs.NewAppModule(app.EpochsKeeper),
+ protocolpool.NewAppModule(app.ProtocolPoolKeeper, app.AccountKeeper, app.BankKeeper),
+ )
+
+ // BasicModuleManager defines the module BasicManager is in charge of setting up basic,
+ // non-dependent module elements, such as codec registration and genesis verification.
+ // By default it is composed of all the module from the module manager.
+ // Additionally, app module basics can be overwritten by passing them as argument.
+ app.BasicModuleManager = module.NewBasicManagerFromManager(
+ app.ModuleManager,
+ map[string]module.AppModuleBasic{
+ genutiltypes.ModuleName: genutil.NewAppModuleBasic(genutiltypes.DefaultMessageValidator),
+ govtypes.ModuleName: gov.NewAppModuleBasic(
+ []govclient.ProposalHandler{
+},
+ ),
+})
+
+app.BasicModuleManager.RegisterLegacyAminoCodec(legacyAmino)
+
+app.BasicModuleManager.RegisterInterfaces(interfaceRegistry)
+
+ // NOTE: upgrade module is required to be prioritized
+ app.ModuleManager.SetOrderPreBlockers(
+ upgradetypes.ModuleName,
+ authtypes.ModuleName,
+ )
+ // During begin block slashing happens after distr.BeginBlocker so that
+ // there is nothing left over in the validator fee pool, so as to keep the
+ // CanWithdrawInvariant invariant.
+ // NOTE: staking module is required if HistoricalEntries param > 0
+ app.ModuleManager.SetOrderBeginBlockers(
+ minttypes.ModuleName,
+ distrtypes.ModuleName,
+ protocolpooltypes.ModuleName,
+ slashingtypes.ModuleName,
+ evidencetypes.ModuleName,
+ stakingtypes.ModuleName,
+ genutiltypes.ModuleName,
+ authz.ModuleName,
+ epochstypes.ModuleName,
+ )
+
+app.ModuleManager.SetOrderEndBlockers(
+ govtypes.ModuleName,
+ stakingtypes.ModuleName,
+ genutiltypes.ModuleName,
+ feegrant.ModuleName,
+ protocolpooltypes.ModuleName,
+ )
+
+ // NOTE: The genutils module must occur after staking so that pools are
+ // properly initialized with tokens from genesis accounts.
+ // NOTE: The genutils module must also occur after auth so that it can access the params from auth.
+ genesisModuleOrder := []string{
+ authtypes.ModuleName,
+ banktypes.ModuleName,
+ distrtypes.ModuleName,
+ stakingtypes.ModuleName,
+ slashingtypes.ModuleName,
+ govtypes.ModuleName,
+ minttypes.ModuleName,
+ genutiltypes.ModuleName,
+ evidencetypes.ModuleName,
+ authz.ModuleName,
+ feegrant.ModuleName,
+ upgradetypes.ModuleName,
+ vestingtypes.ModuleName,
+ consensusparamtypes.ModuleName,
+ epochstypes.ModuleName,
+ protocolpooltypes.ModuleName,
+}
+ exportModuleOrder := []string{
+ consensusparamtypes.ModuleName,
+ authtypes.ModuleName,
+ protocolpooltypes.ModuleName, // Must be exported before bank
+ banktypes.ModuleName,
+ distrtypes.ModuleName,
+ stakingtypes.ModuleName,
+ slashingtypes.ModuleName,
+ govtypes.ModuleName,
+ minttypes.ModuleName,
+ genutiltypes.ModuleName,
+ evidencetypes.ModuleName,
+ authz.ModuleName,
+ feegrant.ModuleName,
+ upgradetypes.ModuleName,
+ vestingtypes.ModuleName,
+ epochstypes.ModuleName,
+}
+
+app.ModuleManager.SetOrderInitGenesis(genesisModuleOrder...)
+
+app.ModuleManager.SetOrderExportGenesis(exportModuleOrder...)
+
+ // Uncomment if you want to set a custom migration order here.
+ // app.ModuleManager.SetOrderMigrations(custom order)
+
+app.configurator = module.NewConfigurator(app.appCodec, app.MsgServiceRouter(), app.GRPCQueryRouter())
+
+err = app.ModuleManager.RegisterServices(app.configurator)
+ if err != nil {
+ panic(err)
+}
+
+ // RegisterUpgradeHandlers is used for registering any on-chain upgrades.
+ // Make sure it's called after `app.ModuleManager` and `app.configurator` are set.
+ app.RegisterUpgradeHandlers()
+
+autocliv1.RegisterQueryServer(app.GRPCQueryRouter(), runtimeservices.NewAutoCLIQueryService(app.ModuleManager.Modules))
+
+reflectionSvc, err := runtimeservices.NewReflectionService()
+ if err != nil {
+ panic(err)
+}
+
+reflectionv1.RegisterReflectionServiceServer(app.GRPCQueryRouter(), reflectionSvc)
+
+ // add test gRPC service for testing gRPC queries in isolation
+ testdata_pulsar.RegisterQueryServer(app.GRPCQueryRouter(), testdata_pulsar.QueryImpl{
+})
+
+ // create the simulation manager and define the order of the modules for deterministic simulations
+ //
+ // NOTE: this is not required apps that don't use the simulator for fuzz testing
+ // transactions
+ overrideModules := map[string]module.AppModuleSimulation{
+ authtypes.ModuleName: auth.NewAppModule(app.appCodec, app.AccountKeeper, authsims.RandomGenesisAccounts, nil),
+}
+
+app.sm = module.NewSimulationManagerFromAppModules(app.ModuleManager.Modules, overrideModules)
+
+app.sm.RegisterStoreDecoders()
+
+ // initialize stores
+ app.MountKVStores(keys)
+
+ // initialize BaseApp
+ app.SetInitChainer(app.InitChainer)
+
+app.SetPreBlocker(app.PreBlocker)
+
+app.SetBeginBlocker(app.BeginBlocker)
+
+app.SetEndBlocker(app.EndBlocker)
+
+app.setAnteHandler(txConfig)
+
+ // In v0.46, the SDK introduces _postHandlers_. PostHandlers are like
+ // antehandlers, but are run _after_ the `runMsgs` execution. They are also
+ // defined as a chain, and have the same signature as antehandlers.
+ //
+ // In baseapp, postHandlers are run in the same store branch as `runMsgs`,
+ // meaning that both `runMsgs` and `postHandler` state will be committed if
+ // both are successful, and both will be reverted if any of the two fails.
+ //
+ // The SDK exposes a default postHandlers chain
+ //
+ // Please note that changing any of the anteHandler or postHandler chain is
+ // likely to be a state-machine breaking change, which needs a coordinated
+ // upgrade.
+ app.setPostHandler()
+ if loadLatest {
+ if err := app.LoadLatestVersion(); err != nil {
+ panic(fmt.Errorf("error loading last version: %w", err))
+}
+
+}
+
+return app
+}
+
+func (app *SimApp)
+
+setAnteHandler(txConfig client.TxConfig) {
+ anteHandler, err := ante.NewAnteHandler(
+ ante.HandlerOptions{
+ AccountKeeper: app.AccountKeeper,
+ BankKeeper: app.BankKeeper,
+ SignModeHandler: txConfig.SignModeHandler(),
+ FeegrantKeeper: app.FeeGrantKeeper,
+ SigGasConsumer: ante.DefaultSigVerificationGasConsumer,
+ SigVerifyOptions: []ante.SigVerificationDecoratorOption{
+ // change below as needed.
+ ante.WithUnorderedTxGasCost(ante.DefaultUnorderedTxGasCost),
+ ante.WithMaxUnorderedTxTimeoutDuration(ante.DefaultMaxTimeoutDuration),
+},
+},
+ )
+ if err != nil {
+ panic(err)
+}
+
+ // Set the AnteHandler for the app
+ app.SetAnteHandler(anteHandler)
+}
+
+func (app *SimApp)
+
+setPostHandler() {
+ postHandler, err := posthandler.NewPostHandler(
+ posthandler.HandlerOptions{
+},
+ )
+ if err != nil {
+ panic(err)
+}
+
+app.SetPostHandler(postHandler)
+}
+
+// Name returns the name of the App
+func (app *SimApp)
+
+Name()
+
+string {
+ return app.BaseApp.Name()
+}
+
+// PreBlocker application updates every pre block
+func (app *SimApp)
+
+PreBlocker(ctx sdk.Context, _ *abci.RequestFinalizeBlock) (*sdk.ResponsePreBlock, error) {
+ return app.ModuleManager.PreBlock(ctx)
+}
+
+// BeginBlocker application updates every begin block
+func (app *SimApp)
+
+BeginBlocker(ctx sdk.Context) (sdk.BeginBlock, error) {
+ return app.ModuleManager.BeginBlock(ctx)
+}
+
+// EndBlocker application updates every end block
+func (app *SimApp)
+
+EndBlocker(ctx sdk.Context) (sdk.EndBlock, error) {
+ return app.ModuleManager.EndBlock(ctx)
+}
+
+func (a *SimApp)
+
+Configurator()
+
+module.Configurator {
+ return a.configurator
+}
+
+// InitChainer application update at chain initialization
+func (app *SimApp)
+
+InitChainer(ctx sdk.Context, req *abci.RequestInitChain) (*abci.ResponseInitChain, error) {
+ var genesisState GenesisState
+ if err := json.Unmarshal(req.AppStateBytes, &genesisState); err != nil {
+ panic(err)
+}
+
+app.UpgradeKeeper.SetModuleVersionMap(ctx, app.ModuleManager.GetVersionMap())
+
+return app.ModuleManager.InitGenesis(ctx, app.appCodec, genesisState)
+}
+
+// LoadHeight loads a particular height
+func (app *SimApp)
+
+LoadHeight(height int64)
+
+error {
+ return app.LoadVersion(height)
+}
+
+// LegacyAmino returns SimApp's amino codec.
+//
+// NOTE: This is solely to be used for testing purposes as it may be desirable
+// for modules to register their own custom testing types.
+func (app *SimApp)
+
+LegacyAmino() *codec.LegacyAmino {
+ return app.legacyAmino
+}
+
+// AppCodec returns SimApp's app codec.
+//
+// NOTE: This is solely to be used for testing purposes as it may be desirable
+// for modules to register their own custom testing types.
+func (app *SimApp)
+
+AppCodec()
+
+codec.Codec {
+ return app.appCodec
+}
+
+// InterfaceRegistry returns SimApp's InterfaceRegistry
+func (app *SimApp)
+
+InterfaceRegistry()
+
+types.InterfaceRegistry {
+ return app.interfaceRegistry
+}
+
+// TxConfig returns SimApp's TxConfig
+func (app *SimApp)
+
+TxConfig()
+
+client.TxConfig {
+ return app.txConfig
+}
+
+// AutoCliOpts returns the autocli options for the app.
+func (app *SimApp)
+
+AutoCliOpts()
+
+autocli.AppOptions {
+ modules := make(map[string]appmodule.AppModule, 0)
+ for _, m := range app.ModuleManager.Modules {
+ if moduleWithName, ok := m.(module.HasName); ok {
+ moduleName := moduleWithName.Name()
+ if appModule, ok := moduleWithName.(appmodule.AppModule); ok {
+ modules[moduleName] = appModule
+}
+
+}
+
+}
+
+return autocli.AppOptions{
+ Modules: modules,
+ ModuleOptions: runtimeservices.ExtractAutoCLIOptions(app.ModuleManager.Modules),
+ AddressCodec: authcodec.NewBech32Codec(sdk.GetConfig().GetBech32AccountAddrPrefix()),
+ ValidatorAddressCodec: authcodec.NewBech32Codec(sdk.GetConfig().GetBech32ValidatorAddrPrefix()),
+ ConsensusAddressCodec: authcodec.NewBech32Codec(sdk.GetConfig().GetBech32ConsensusAddrPrefix()),
+}
+}
+
+// DefaultGenesis returns a default genesis from the registered AppModuleBasic's.
+func (a *SimApp)
+
+DefaultGenesis()
+
+map[string]json.RawMessage {
+ return a.BasicModuleManager.DefaultGenesis(a.appCodec)
+}
+
+// GetKey returns the KVStoreKey for the provided store key.
+//
+// NOTE: This is solely to be used for testing purposes.
+func (app *SimApp)
+
+GetKey(storeKey string) *storetypes.KVStoreKey {
+ return app.keys[storeKey]
+}
+
+// GetStoreKeys returns all the stored store keys.
+func (app *SimApp)
+
+GetStoreKeys() []storetypes.StoreKey {
+ keys := make([]storetypes.StoreKey, 0, len(app.keys))
+ for _, key := range app.keys {
+ keys = append(keys, key)
+}
+
+return keys
+}
+
+// SimulationManager implements the SimulationApp interface
+func (app *SimApp)
+
+SimulationManager() *module.SimulationManager {
+ return app.sm
+}
+
+// RegisterAPIRoutes registers all application module routes with the provided
+// API server.
+func (app *SimApp)
+
+RegisterAPIRoutes(apiSvr *api.Server, apiConfig config.APIConfig) {
+ clientCtx := apiSvr.ClientCtx
+ // Register new tx routes from grpc-gateway.
+ authtx.RegisterGRPCGatewayRoutes(clientCtx, apiSvr.GRPCGatewayRouter)
+
+ // Register new CometBFT queries routes from grpc-gateway.
+ cmtservice.RegisterGRPCGatewayRoutes(clientCtx, apiSvr.GRPCGatewayRouter)
+
+ // Register node gRPC service for grpc-gateway.
+ nodeservice.RegisterGRPCGatewayRoutes(clientCtx, apiSvr.GRPCGatewayRouter)
+
+ // Register grpc-gateway routes for all modules.
+ app.BasicModuleManager.RegisterGRPCGatewayRoutes(clientCtx, apiSvr.GRPCGatewayRouter)
+
+ // register swagger API from root so that other applications can override easily
+ if err := server.RegisterSwaggerAPI(apiSvr.ClientCtx, apiSvr.Router, apiConfig.Swagger); err != nil {
+ panic(err)
+}
+}
+
+// RegisterTxService implements the Application.RegisterTxService method.
+func (app *SimApp)
+
+RegisterTxService(clientCtx client.Context) {
+ authtx.RegisterTxService(app.BaseApp.GRPCQueryRouter(), clientCtx, app.BaseApp.Simulate, app.interfaceRegistry)
+}
+
+// RegisterTendermintService implements the Application.RegisterTendermintService method.
+func (app *SimApp)
+
+RegisterTendermintService(clientCtx client.Context) {
+ cmtApp := server.NewCometABCIWrapper(app)
+
+cmtservice.RegisterTendermintService(
+ clientCtx,
+ app.BaseApp.GRPCQueryRouter(),
+ app.interfaceRegistry,
+ cmtApp.Query,
+ )
+}
+
+func (app *SimApp)
+
+RegisterNodeService(clientCtx client.Context, cfg config.Config) {
+ nodeservice.RegisterNodeService(clientCtx, app.GRPCQueryRouter(), cfg)
+}
+
+// GetMaccPerms returns a copy of the module account permissions
+//
+// NOTE: This is solely to be used for testing purposes.
+func GetMaccPerms()
+
+map[string][]string {
+ return maps.Clone(maccPerms)
+}
+
+// BlockedAddresses returns all the app's blocked account addresses.
+func BlockedAddresses()
+
+map[string]bool {
+ modAccAddrs := make(map[string]bool)
+ for acc := range GetMaccPerms() {
+ modAccAddrs[authtypes.NewModuleAddress(acc).String()] = true
+}
+
+ // allow the following addresses to receive funds
+ delete(modAccAddrs, authtypes.NewModuleAddress(govtypes.ModuleName).String())
+
+return modAccAddrs
+}
+```
+
+Note that you may override some modules.
+This is useful if the existing module configuration in the `ModuleManager` should be different in the `SimulationManager`.
+
+Finally, the application should expose the `SimulationManager` via the following method defined in the `Runtime` interface:
+
+```go
+// SimulationManager implements the SimulationApp interface
+func (app *SimApp)
+
+SimulationManager() *module.SimulationManager {
+ return app.sm
+}
+```
+
+## Running Simulations
+
+To run the simulation, use the `simsx` runner.
+
+Call the following function from the `simsx` package to begin simulating with a default seed:
+
+```go expandable
+package simsx
+
+import (
+
+ "encoding/json"
+ "fmt"
+ "io"
+ "math"
+ "os"
+ "path/filepath"
+ "strings"
+ "testing"
+
+ dbm "github.com/cosmos/cosmos-db"
+ "github.com/stretchr/testify/require"
+ "cosmossdk.io/log"
+ "github.com/cosmos/cosmos-sdk/baseapp"
+ "github.com/cosmos/cosmos-sdk/client"
+ "github.com/cosmos/cosmos-sdk/client/flags"
+ "github.com/cosmos/cosmos-sdk/codec"
+ "github.com/cosmos/cosmos-sdk/runtime"
+ servertypes "github.com/cosmos/cosmos-sdk/server/types"
+ simtestutil "github.com/cosmos/cosmos-sdk/testutil/sims"
+ sdk "github.com/cosmos/cosmos-sdk/types"
+ "github.com/cosmos/cosmos-sdk/types/module"
+ simtypes "github.com/cosmos/cosmos-sdk/types/simulation"
+ "github.com/cosmos/cosmos-sdk/x/simulation"
+ "github.com/cosmos/cosmos-sdk/x/simulation/client/cli"
+)
+
+const SimAppChainID = "simulation-app"
+
+// this list of seeds was imported from the original simulation runner: https://github.com/cosmos/tools/blob/v1.0.0/cmd/runsim/main.go#L32
+var defaultSeeds = []int64{
+ 1, 2, 4, 7,
+ 32, 123, 124, 582, 1893, 2989,
+ 3012, 4728, 37827, 981928, 87821, 891823782,
+ 989182, 89182391, 11, 22, 44, 77, 99, 2020,
+ 3232, 123123, 124124, 582582, 18931893,
+ 29892989, 30123012, 47284728, 7601778, 8090485,
+ 977367484, 491163361, 424254581, 673398983,
+}
+
+// SimStateFactory is a factory type that provides a convenient way to create a simulation state for testing.
+// It contains the following fields:
+// - Codec: a codec used for serializing other objects
+// - AppStateFn: a function that returns the app state JSON bytes and the genesis accounts
+// - BlockedAddr: a map of blocked addresses
+// - AccountSource: an interface for retrieving accounts
+// - BalanceSource: an interface for retrieving balance-related information
+type SimStateFactory struct {
+ Codec codec.Codec
+ AppStateFn simtypes.AppStateFn
+ BlockedAddr map[string]bool
+ AccountSource AccountSourceX
+ BalanceSource BalanceSource
+}
+
+// SimulationApp abstract app that is used by sims
+type SimulationApp interface {
+ runtime.AppI
+ SetNotSigverifyTx()
+
+GetBaseApp() *baseapp.BaseApp
+ TxConfig()
+
+client.TxConfig
+ Close()
+
+error
+}
+
+// Run is a helper function that runs a simulation test with the given parameters.
+// It calls the RunWithSeeds function with the default seeds and parameters.
+//
+// This is the entrypoint to run simulation tests that used to run with the runsim binary.
+func Run[T SimulationApp](
+ t *testing.T,
+ appFactory func(
+ logger log.Logger,
+ db dbm.DB,
+ traceStore io.Writer,
+ loadLatest bool,
+ appOpts servertypes.AppOptions,
+ baseAppOptions ...func(*baseapp.BaseApp),
+ )
+
+T,
+ setupStateFactory func(app T)
+
+SimStateFactory,
+ postRunActions ...func(t testing.TB, app TestInstance[T], accs []simtypes.Account),
+) {
+ t.Helper()
+
+RunWithSeeds(t, appFactory, setupStateFactory, defaultSeeds, nil, postRunActions...)
+}
+
+// RunWithSeeds is a helper function that runs a simulation test with the given parameters.
+// It iterates over the provided seeds and runs the simulation test for each seed in parallel.
+//
+// It sets up the environment, creates an instance of the simulation app,
+// calls the simulation.SimulateFromSeed function to run the simulation, and performs post-run actions for each seed.
+// The execution is deterministic and can be used for fuzz tests as well.
+//
+// The system under test is isolated for each run but unlike the old runsim command, there is no Process separation.
+// This means, global caches may be reused for example. This implementation build upon the vanilla Go stdlib test framework.
+func RunWithSeeds[T SimulationApp](
+ t *testing.T,
+ appFactory func(
+ logger log.Logger,
+ db dbm.DB,
+ traceStore io.Writer,
+ loadLatest bool,
+ appOpts servertypes.AppOptions,
+ baseAppOptions ...func(*baseapp.BaseApp),
+ )
+
+T,
+ setupStateFactory func(app T)
+
+SimStateFactory,
+ seeds []int64,
+ fuzzSeed []byte,
+ postRunActions ...func(t testing.TB, app TestInstance[T], accs []simtypes.Account),
+) {
+ t.Helper()
+
+RunWithSeedsAndRandAcc(t, appFactory, setupStateFactory, seeds, fuzzSeed, simtypes.RandomAccounts, postRunActions...)
+}
+
+// RunWithSeedsAndRandAcc calls RunWithSeeds with randAccFn
+func RunWithSeedsAndRandAcc[T SimulationApp](
+ t *testing.T,
+ appFactory func(
+ logger log.Logger,
+ db dbm.DB,
+ traceStore io.Writer,
+ loadLatest bool,
+ appOpts servertypes.AppOptions,
+ baseAppOptions ...func(*baseapp.BaseApp),
+ )
+
+T,
+ setupStateFactory func(app T)
+
+SimStateFactory,
+ seeds []int64,
+ fuzzSeed []byte,
+ randAccFn simtypes.RandomAccountFn,
+ postRunActions ...func(t testing.TB, app TestInstance[T], accs []simtypes.Account),
+) {
+ t.Helper()
+ if deprecatedParams := cli.GetDeprecatedFlagUsed(); len(deprecatedParams) != 0 {
+ fmt.Printf("Warning: Deprecated flag are used: %s", strings.Join(deprecatedParams, ","))
+}
+ cfg := cli.NewConfigFromFlags()
+
+cfg.ChainID = SimAppChainID
+ for i := range seeds {
+ seed := seeds[i]
+ t.Run(fmt.Sprintf("seed: %d", seed), func(t *testing.T) {
+ t.Parallel()
+
+RunWithSeed(t, cfg, appFactory, setupStateFactory, seed, fuzzSeed, postRunActions...)
+})
+}
+}
+
+// RunWithSeed is a helper function that runs a simulation test with the given parameters.
+// It iterates over the provided seeds and runs the simulation test for each seed in parallel.
+//
+// It sets up the environment, creates an instance of the simulation app,
+// calls the simulation.SimulateFromSeed function to run the simulation, and performs post-run actions for the seed.
+// The execution is deterministic and can be used for fuzz tests as well.
+func RunWithSeed[T SimulationApp](
+ tb testing.TB,
+ cfg simtypes.Config,
+ appFactory func(logger log.Logger, db dbm.DB, traceStore io.Writer, loadLatest bool, appOpts servertypes.AppOptions, baseAppOptions ...func(*baseapp.BaseApp))
+
+T,
+ setupStateFactory func(app T)
+
+SimStateFactory,
+ seed int64,
+ fuzzSeed []byte,
+ postRunActions ...func(t testing.TB, app TestInstance[T], accs []simtypes.Account),
+) {
+ tb.Helper()
+
+RunWithSeedAndRandAcc(tb, cfg, appFactory, setupStateFactory, seed, fuzzSeed, simtypes.RandomAccounts, postRunActions...)
+}
+
+// RunWithSeedAndRandAcc calls RunWithSeed with randAccFn
+func RunWithSeedAndRandAcc[T SimulationApp](
+ tb testing.TB,
+ cfg simtypes.Config,
+ appFactory func(logger log.Logger, db dbm.DB, traceStore io.Writer, loadLatest bool, appOpts servertypes.AppOptions, baseAppOptions ...func(*baseapp.BaseApp))
+
+T,
+ setupStateFactory func(app T)
+
+SimStateFactory,
+ seed int64,
+ fuzzSeed []byte,
+ randAccFn simtypes.RandomAccountFn,
+ postRunActions ...func(t testing.TB, app TestInstance[T], accs []simtypes.Account),
+) {
+ tb.Helper()
+ // setup environment
+ tCfg := cfg.With(tb, seed, fuzzSeed)
+ testInstance := NewSimulationAppInstance(tb, tCfg, appFactory)
+
+var runLogger log.Logger
+ if cli.FlagVerboseValue {
+ runLogger = log.NewTestLogger(tb)
+}
+
+else {
+ runLogger = log.NewTestLoggerInfo(tb)
+}
+
+runLogger = runLogger.With("seed", tCfg.Seed)
+ app := testInstance.App
+ stateFactory := setupStateFactory(app)
+
+ops, reporter := prepareWeightedOps(app.SimulationManager(), stateFactory, tCfg, testInstance.App.TxConfig(), runLogger)
+
+simParams, accs, err := simulation.SimulateFromSeedX(
+ tb,
+ runLogger,
+ WriteToDebugLog(runLogger),
+ app.GetBaseApp(),
+ stateFactory.AppStateFn,
+ randAccFn,
+ ops,
+ stateFactory.BlockedAddr,
+ tCfg,
+ stateFactory.Codec,
+ testInstance.ExecLogWriter,
+ )
+
+require.NoError(tb, err)
+
+err = simtestutil.CheckExportSimulation(app, tCfg, simParams)
+
+require.NoError(tb, err)
+ if tCfg.Commit {
+ simtestutil.PrintStats(testInstance.DB)
+}
+ // not using tb.Log to always print the summary
+ fmt.Printf("+++ DONE (seed: %d): \n%s\n", seed, reporter.Summary().String())
+ for _, step := range postRunActions {
+ step(tb, testInstance, accs)
+}
+
+require.NoError(tb, app.Close())
+}
+
+type (
+ HasWeightedOperationsX interface {
+ WeightedOperationsX(weight WeightSource, reg Registry)
+}
+
+HasWeightedOperationsXWithProposals interface {
+ WeightedOperationsX(weights WeightSource, reg Registry, proposals WeightedProposalMsgIter,
+ legacyProposals []simtypes.WeightedProposalContent) //nolint: staticcheck // used for legacy proposal types
+}
+
+HasProposalMsgsX interface {
+ ProposalMsgsX(weights WeightSource, reg Registry)
+}
+)
+
+type (
+ HasLegacyWeightedOperations interface {
+ // WeightedOperations simulation operations (i.e msgs)
+
+with their respective weight
+ WeightedOperations(simState module.SimulationState) []simtypes.WeightedOperation
+}
+ // HasLegacyProposalMsgs defines the messages that can be used to simulate governance (v1)
+
+proposals
+ // Deprecated replaced by HasProposalMsgsX
+ HasLegacyProposalMsgs interface {
+ // ProposalMsgs msg fu nctions used to simulate governance proposals
+ ProposalMsgs(simState module.SimulationState) []simtypes.WeightedProposalMsg
+}
+
+ // HasLegacyProposalContents defines the contents that can be used to simulate legacy governance (v1beta1)
+
+proposals
+ // Deprecated replaced by HasProposalMsgsX
+ HasLegacyProposalContents interface {
+ // ProposalContents content functions used to simulate governance proposals
+ ProposalContents(simState module.SimulationState) []simtypes.WeightedProposalContent //nolint:staticcheck // legacy v1beta1 governance
+}
+)
+
+// TestInstance is a generic type that represents an instance of a SimulationApp used for testing simulations.
+// It contains the following fields:
+// - App: The instance of the SimulationApp under test.
+// - DB: The LevelDB database for the simulation app.
+// - WorkDir: The temporary working directory for the simulation app.
+// - Cfg: The configuration flags for the simulator.
+// - AppLogger: The logger used for logging in the app during the simulation, with seed value attached.
+// - ExecLogWriter: Captures block and operation data coming from the simulation
+type TestInstance[T SimulationApp] struct {
+ App T
+ DB dbm.DB
+ WorkDir string
+ Cfg simtypes.Config
+ AppLogger log.Logger
+ ExecLogWriter simulation.LogWriter
+}
+
+// included to avoid cyclic dependency in testutils/sims
+func prepareWeightedOps(
+ sm *module.SimulationManager,
+ stateFact SimStateFactory,
+ config simtypes.Config,
+ txConfig client.TxConfig,
+ logger log.Logger,
+) (simulation.WeightedOperations, *BasicSimulationReporter) {
+ cdc := stateFact.Codec
+ simState := module.SimulationState{
+ AppParams: make(simtypes.AppParams),
+ Cdc: cdc,
+ TxConfig: txConfig,
+ BondDenom: sdk.DefaultBondDenom,
+}
+ if config.ParamsFile != "" {
+ bz, err := os.ReadFile(config.ParamsFile)
+ if err != nil {
+ panic(err)
+}
+
+err = json.Unmarshal(bz, &simState.AppParams)
+ if err != nil {
+ panic(err)
+}
+
+}
+ weights := ParamWeightSource(simState.AppParams)
+ reporter := NewBasicSimulationReporter()
+ pReg := make(UniqueTypeRegistry)
+ wContent := make([]simtypes.WeightedProposalContent, 0) //nolint:staticcheck // required for legacy type
+ legacyPReg := NewWeightedFactoryMethods()
+ // add gov proposals types
+ for _, m := range sm.Modules {
+ switch xm := m.(type) {
+ case HasProposalMsgsX:
+ xm.ProposalMsgsX(weights, pReg)
+ case HasLegacyProposalMsgs:
+ for _, p := range xm.ProposalMsgs(simState) {
+ weight := weights.Get(p.AppParamsKey(), safeUint(p.DefaultWeight()))
+
+legacyPReg.Add(weight, legacyToMsgFactoryAdapter(p.MsgSimulatorFn()))
+}
+ case HasLegacyProposalContents:
+ wContent = append(wContent, xm.ProposalContents(simState)...)
+}
+
+}
+ oReg := NewSimsMsgRegistryAdapter(
+ reporter,
+ stateFact.AccountSource,
+ stateFact.BalanceSource,
+ txConfig,
+ logger,
+ )
+ wOps := make([]simtypes.WeightedOperation, 0, len(sm.Modules))
+ for _, m := range sm.Modules {
+ // add operations
+ switch xm := m.(type) {
+ case HasWeightedOperationsX:
+ xm.WeightedOperationsX(weights, oReg)
+ case HasWeightedOperationsXWithProposals:
+ xm.WeightedOperationsX(weights, oReg, AppendIterators(legacyPReg.Iterator(), pReg.Iterator()), wContent)
+ case HasLegacyWeightedOperations:
+ wOps = append(wOps, xm.WeightedOperations(simState)...)
+}
+
+}
+
+return append(wOps, Collect(oReg.items, func(a weightedOperation)
+
+simtypes.WeightedOperation {
+ return a
+})...), reporter
+}
+
+func safeUint(p int)
+
+uint32 {
+ if p < 0 || p > math.MaxUint32 {
+ panic(fmt.Sprintf("can not cast to uint32: %d", p))
+}
+
+return uint32(p)
+}
+
+// NewSimulationAppInstance initializes and returns a TestInstance of a SimulationApp.
+// The function takes a testing.T instance, a simtypes.Config instance, and an appFactory function as parameters.
+// It creates a temporary working directory and a LevelDB database for the simulation app.
+// The function then initializes a logger based on the verbosity flag and sets the logger's seed to the test configuration's seed.
+// The database is closed and cleaned up on test completion.
+func NewSimulationAppInstance[T SimulationApp](
+ tb testing.TB,
+ tCfg simtypes.Config,
+ appFactory func(logger log.Logger, db dbm.DB, traceStore io.Writer, loadLatest bool, appOpts servertypes.AppOptions, baseAppOptions ...func(*baseapp.BaseApp))
+
+T,
+)
+
+TestInstance[T] {
+ tb.Helper()
+ workDir := tb.TempDir()
+
+require.NoError(tb, os.Mkdir(filepath.Join(workDir, "data"), 0o750))
+ dbDir := filepath.Join(workDir, "leveldb-app-sim")
+
+var logger log.Logger
+ if cli.FlagVerboseValue {
+ logger = log.NewTestLogger(tb)
+}
+
+else {
+ logger = log.NewTestLoggerError(tb)
+}
+
+logger = logger.With("seed", tCfg.Seed)
+
+db, err := dbm.NewDB("Simulation", dbm.BackendType(tCfg.DBBackend), dbDir)
+
+require.NoError(tb, err)
+
+tb.Cleanup(func() {
+ _ = db.Close() // ensure db is closed
+})
+ appOptions := make(simtestutil.AppOptionsMap)
+
+appOptions[flags.FlagHome] = workDir
+ opts := []func(*baseapp.BaseApp) {
+ baseapp.SetChainID(tCfg.ChainID)
+}
+ if tCfg.FauxMerkle {
+ opts = append(opts, FauxMerkleModeOpt)
+}
+ app := appFactory(logger, db, nil, true, appOptions, opts...)
+ if !cli.FlagSigverifyTxValue {
+ app.SetNotSigverifyTx()
+}
+
+return TestInstance[T]{
+ App: app,
+ DB: db,
+ WorkDir: workDir,
+ Cfg: tCfg,
+ AppLogger: logger,
+ ExecLogWriter: &simulation.StandardLogWriter{
+ Seed: tCfg.Seed
+},
+}
+}
+
+var _ io.Writer = writerFn(nil)
+
+type writerFn func(p []byte) (n int, err error)
+
+func (w writerFn)
+
+Write(p []byte) (n int, err error) {
+ return w(p)
+}
+
+// WriteToDebugLog is an adapter to io.Writer interface
+func WriteToDebugLog(logger log.Logger)
+
+io.Writer {
+ return writerFn(func(p []byte) (n int, err error) {
+ logger.Debug(string(p))
+
+return len(p), nil
+})
+}
+
+// FauxMerkleModeOpt returns a BaseApp option to use a dbStoreAdapter instead of
+// an IAVLStore for faster simulation speed.
+func FauxMerkleModeOpt(bapp *baseapp.BaseApp) {
+ bapp.SetFauxMerkleMode()
+}
+```
+
+If a custom seed is desired, tests should use `RunWithSeed`:
+
+```go expandable
+package simsx
+
+import (
+
+ "encoding/json"
+ "fmt"
+ "io"
+ "math"
+ "os"
+ "path/filepath"
+ "strings"
+ "testing"
+
+ dbm "github.com/cosmos/cosmos-db"
+ "github.com/stretchr/testify/require"
+ "cosmossdk.io/log"
+ "github.com/cosmos/cosmos-sdk/baseapp"
+ "github.com/cosmos/cosmos-sdk/client"
+ "github.com/cosmos/cosmos-sdk/client/flags"
+ "github.com/cosmos/cosmos-sdk/codec"
+ "github.com/cosmos/cosmos-sdk/runtime"
+ servertypes "github.com/cosmos/cosmos-sdk/server/types"
+ simtestutil "github.com/cosmos/cosmos-sdk/testutil/sims"
+ sdk "github.com/cosmos/cosmos-sdk/types"
+ "github.com/cosmos/cosmos-sdk/types/module"
+ simtypes "github.com/cosmos/cosmos-sdk/types/simulation"
+ "github.com/cosmos/cosmos-sdk/x/simulation"
+ "github.com/cosmos/cosmos-sdk/x/simulation/client/cli"
+)
+
+const SimAppChainID = "simulation-app"
+
+// this list of seeds was imported from the original simulation runner: https://github.com/cosmos/tools/blob/v1.0.0/cmd/runsim/main.go#L32
+var defaultSeeds = []int64{
+ 1, 2, 4, 7,
+ 32, 123, 124, 582, 1893, 2989,
+ 3012, 4728, 37827, 981928, 87821, 891823782,
+ 989182, 89182391, 11, 22, 44, 77, 99, 2020,
+ 3232, 123123, 124124, 582582, 18931893,
+ 29892989, 30123012, 47284728, 7601778, 8090485,
+ 977367484, 491163361, 424254581, 673398983,
+}
+
+// SimStateFactory is a factory type that provides a convenient way to create a simulation state for testing.
+// It contains the following fields:
+// - Codec: a codec used for serializing other objects
+// - AppStateFn: a function that returns the app state JSON bytes and the genesis accounts
+// - BlockedAddr: a map of blocked addresses
+// - AccountSource: an interface for retrieving accounts
+// - BalanceSource: an interface for retrieving balance-related information
+type SimStateFactory struct {
+ Codec codec.Codec
+ AppStateFn simtypes.AppStateFn
+ BlockedAddr map[string]bool
+ AccountSource AccountSourceX
+ BalanceSource BalanceSource
+}
+
+// SimulationApp abstract app that is used by sims
+type SimulationApp interface {
+ runtime.AppI
+ SetNotSigverifyTx()
+
+GetBaseApp() *baseapp.BaseApp
+ TxConfig()
+
+client.TxConfig
+ Close()
+
+error
+}
+
+// Run is a helper function that runs a simulation test with the given parameters.
+// It calls the RunWithSeeds function with the default seeds and parameters.
+//
+// This is the entrypoint to run simulation tests that used to run with the runsim binary.
+func Run[T SimulationApp](
+ t *testing.T,
+ appFactory func(
+ logger log.Logger,
+ db dbm.DB,
+ traceStore io.Writer,
+ loadLatest bool,
+ appOpts servertypes.AppOptions,
+ baseAppOptions ...func(*baseapp.BaseApp),
+ )
+
+T,
+ setupStateFactory func(app T)
+
+SimStateFactory,
+ postRunActions ...func(t testing.TB, app TestInstance[T], accs []simtypes.Account),
+) {
+ t.Helper()
+
+RunWithSeeds(t, appFactory, setupStateFactory, defaultSeeds, nil, postRunActions...)
+}
+
+// RunWithSeeds is a helper function that runs a simulation test with the given parameters.
+// It iterates over the provided seeds and runs the simulation test for each seed in parallel.
+//
+// It sets up the environment, creates an instance of the simulation app,
+// calls the simulation.SimulateFromSeed function to run the simulation, and performs post-run actions for each seed.
+// The execution is deterministic and can be used for fuzz tests as well.
+//
+// The system under test is isolated for each run but unlike the old runsim command, there is no Process separation.
+// This means, global caches may be reused for example. This implementation build upon the vanilla Go stdlib test framework.
+func RunWithSeeds[T SimulationApp](
+ t *testing.T,
+ appFactory func(
+ logger log.Logger,
+ db dbm.DB,
+ traceStore io.Writer,
+ loadLatest bool,
+ appOpts servertypes.AppOptions,
+ baseAppOptions ...func(*baseapp.BaseApp),
+ )
+
+T,
+ setupStateFactory func(app T)
+
+SimStateFactory,
+ seeds []int64,
+ fuzzSeed []byte,
+ postRunActions ...func(t testing.TB, app TestInstance[T], accs []simtypes.Account),
+) {
+ t.Helper()
+
+RunWithSeedsAndRandAcc(t, appFactory, setupStateFactory, seeds, fuzzSeed, simtypes.RandomAccounts, postRunActions...)
+}
+
+// RunWithSeedsAndRandAcc calls RunWithSeeds with randAccFn
+func RunWithSeedsAndRandAcc[T SimulationApp](
+ t *testing.T,
+ appFactory func(
+ logger log.Logger,
+ db dbm.DB,
+ traceStore io.Writer,
+ loadLatest bool,
+ appOpts servertypes.AppOptions,
+ baseAppOptions ...func(*baseapp.BaseApp),
+ )
+
+T,
+ setupStateFactory func(app T)
+
+SimStateFactory,
+ seeds []int64,
+ fuzzSeed []byte,
+ randAccFn simtypes.RandomAccountFn,
+ postRunActions ...func(t testing.TB, app TestInstance[T], accs []simtypes.Account),
+) {
+ t.Helper()
+ if deprecatedParams := cli.GetDeprecatedFlagUsed(); len(deprecatedParams) != 0 {
+ fmt.Printf("Warning: Deprecated flag are used: %s", strings.Join(deprecatedParams, ","))
+}
+ cfg := cli.NewConfigFromFlags()
+
+cfg.ChainID = SimAppChainID
+ for i := range seeds {
+ seed := seeds[i]
+ t.Run(fmt.Sprintf("seed: %d", seed), func(t *testing.T) {
+ t.Parallel()
+
+RunWithSeed(t, cfg, appFactory, setupStateFactory, seed, fuzzSeed, postRunActions...)
+})
+}
+}
+
+// RunWithSeed is a helper function that runs a simulation test with the given parameters.
+// It iterates over the provided seeds and runs the simulation test for each seed in parallel.
+//
+// It sets up the environment, creates an instance of the simulation app,
+// calls the simulation.SimulateFromSeed function to run the simulation, and performs post-run actions for the seed.
+// The execution is deterministic and can be used for fuzz tests as well.
+func RunWithSeed[T SimulationApp](
+ tb testing.TB,
+ cfg simtypes.Config,
+ appFactory func(logger log.Logger, db dbm.DB, traceStore io.Writer, loadLatest bool, appOpts servertypes.AppOptions, baseAppOptions ...func(*baseapp.BaseApp))
+
+T,
+ setupStateFactory func(app T)
+
+SimStateFactory,
+ seed int64,
+ fuzzSeed []byte,
+ postRunActions ...func(t testing.TB, app TestInstance[T], accs []simtypes.Account),
+) {
+ tb.Helper()
+
+RunWithSeedAndRandAcc(tb, cfg, appFactory, setupStateFactory, seed, fuzzSeed, simtypes.RandomAccounts, postRunActions...)
+}
+
+// RunWithSeedAndRandAcc calls RunWithSeed with randAccFn
+func RunWithSeedAndRandAcc[T SimulationApp](
+ tb testing.TB,
+ cfg simtypes.Config,
+ appFactory func(logger log.Logger, db dbm.DB, traceStore io.Writer, loadLatest bool, appOpts servertypes.AppOptions, baseAppOptions ...func(*baseapp.BaseApp))
+
+T,
+ setupStateFactory func(app T)
+
+SimStateFactory,
+ seed int64,
+ fuzzSeed []byte,
+ randAccFn simtypes.RandomAccountFn,
+ postRunActions ...func(t testing.TB, app TestInstance[T], accs []simtypes.Account),
+) {
+ tb.Helper()
+ // setup environment
+ tCfg := cfg.With(tb, seed, fuzzSeed)
+ testInstance := NewSimulationAppInstance(tb, tCfg, appFactory)
+
+var runLogger log.Logger
+ if cli.FlagVerboseValue {
+ runLogger = log.NewTestLogger(tb)
+}
+
+else {
+ runLogger = log.NewTestLoggerInfo(tb)
+}
+
+runLogger = runLogger.With("seed", tCfg.Seed)
+ app := testInstance.App
+ stateFactory := setupStateFactory(app)
+
+ops, reporter := prepareWeightedOps(app.SimulationManager(), stateFactory, tCfg, testInstance.App.TxConfig(), runLogger)
+
+simParams, accs, err := simulation.SimulateFromSeedX(
+ tb,
+ runLogger,
+ WriteToDebugLog(runLogger),
+ app.GetBaseApp(),
+ stateFactory.AppStateFn,
+ randAccFn,
+ ops,
+ stateFactory.BlockedAddr,
+ tCfg,
+ stateFactory.Codec,
+ testInstance.ExecLogWriter,
+ )
+
+require.NoError(tb, err)
+
+err = simtestutil.CheckExportSimulation(app, tCfg, simParams)
+
+require.NoError(tb, err)
+ if tCfg.Commit {
+ simtestutil.PrintStats(testInstance.DB)
+}
+ // not using tb.Log to always print the summary
+ fmt.Printf("+++ DONE (seed: %d): \n%s\n", seed, reporter.Summary().String())
+ for _, step := range postRunActions {
+ step(tb, testInstance, accs)
+}
+
+require.NoError(tb, app.Close())
+}
+
+type (
+ HasWeightedOperationsX interface {
+ WeightedOperationsX(weight WeightSource, reg Registry)
+}
+
+HasWeightedOperationsXWithProposals interface {
+ WeightedOperationsX(weights WeightSource, reg Registry, proposals WeightedProposalMsgIter,
+ legacyProposals []simtypes.WeightedProposalContent) //nolint: staticcheck // used for legacy proposal types
+}
+
+HasProposalMsgsX interface {
+ ProposalMsgsX(weights WeightSource, reg Registry)
+}
+)
+
+type (
+ HasLegacyWeightedOperations interface {
+ // WeightedOperations simulation operations (i.e msgs)
+
+with their respective weight
+ WeightedOperations(simState module.SimulationState) []simtypes.WeightedOperation
+}
+ // HasLegacyProposalMsgs defines the messages that can be used to simulate governance (v1)
+
+proposals
+ // Deprecated replaced by HasProposalMsgsX
+ HasLegacyProposalMsgs interface {
+ // ProposalMsgs msg fu nctions used to simulate governance proposals
+ ProposalMsgs(simState module.SimulationState) []simtypes.WeightedProposalMsg
+}
+
+ // HasLegacyProposalContents defines the contents that can be used to simulate legacy governance (v1beta1)
+
+proposals
+ // Deprecated replaced by HasProposalMsgsX
+ HasLegacyProposalContents interface {
+ // ProposalContents content functions used to simulate governance proposals
+ ProposalContents(simState module.SimulationState) []simtypes.WeightedProposalContent //nolint:staticcheck // legacy v1beta1 governance
+}
+)
+
+// TestInstance is a generic type that represents an instance of a SimulationApp used for testing simulations.
+// It contains the following fields:
+// - App: The instance of the SimulationApp under test.
+// - DB: The LevelDB database for the simulation app.
+// - WorkDir: The temporary working directory for the simulation app.
+// - Cfg: The configuration flags for the simulator.
+// - AppLogger: The logger used for logging in the app during the simulation, with seed value attached.
+// - ExecLogWriter: Captures block and operation data coming from the simulation
+type TestInstance[T SimulationApp] struct {
+ App T
+ DB dbm.DB
+ WorkDir string
+ Cfg simtypes.Config
+ AppLogger log.Logger
+ ExecLogWriter simulation.LogWriter
+}
+
+// included to avoid cyclic dependency in testutils/sims
+func prepareWeightedOps(
+ sm *module.SimulationManager,
+ stateFact SimStateFactory,
+ config simtypes.Config,
+ txConfig client.TxConfig,
+ logger log.Logger,
+) (simulation.WeightedOperations, *BasicSimulationReporter) {
+ cdc := stateFact.Codec
+ simState := module.SimulationState{
+ AppParams: make(simtypes.AppParams),
+ Cdc: cdc,
+ TxConfig: txConfig,
+ BondDenom: sdk.DefaultBondDenom,
+}
+ if config.ParamsFile != "" {
+ bz, err := os.ReadFile(config.ParamsFile)
+ if err != nil {
+ panic(err)
+}
+
+err = json.Unmarshal(bz, &simState.AppParams)
+ if err != nil {
+ panic(err)
+}
+
+}
+ weights := ParamWeightSource(simState.AppParams)
+ reporter := NewBasicSimulationReporter()
+ pReg := make(UniqueTypeRegistry)
+ wContent := make([]simtypes.WeightedProposalContent, 0) //nolint:staticcheck // required for legacy type
+ legacyPReg := NewWeightedFactoryMethods()
+ // add gov proposals types
+ for _, m := range sm.Modules {
+ switch xm := m.(type) {
+ case HasProposalMsgsX:
+ xm.ProposalMsgsX(weights, pReg)
+ case HasLegacyProposalMsgs:
+ for _, p := range xm.ProposalMsgs(simState) {
+ weight := weights.Get(p.AppParamsKey(), safeUint(p.DefaultWeight()))
+
+legacyPReg.Add(weight, legacyToMsgFactoryAdapter(p.MsgSimulatorFn()))
+}
+ case HasLegacyProposalContents:
+ wContent = append(wContent, xm.ProposalContents(simState)...)
+}
+
+}
+ oReg := NewSimsMsgRegistryAdapter(
+ reporter,
+ stateFact.AccountSource,
+ stateFact.BalanceSource,
+ txConfig,
+ logger,
+ )
+ wOps := make([]simtypes.WeightedOperation, 0, len(sm.Modules))
+ for _, m := range sm.Modules {
+ // add operations
+ switch xm := m.(type) {
+ case HasWeightedOperationsX:
+ xm.WeightedOperationsX(weights, oReg)
+ case HasWeightedOperationsXWithProposals:
+ xm.WeightedOperationsX(weights, oReg, AppendIterators(legacyPReg.Iterator(), pReg.Iterator()), wContent)
+ case HasLegacyWeightedOperations:
+ wOps = append(wOps, xm.WeightedOperations(simState)...)
+}
+
+}
+
+return append(wOps, Collect(oReg.items, func(a weightedOperation)
+
+simtypes.WeightedOperation {
+ return a
+})...), reporter
+}
+
+func safeUint(p int)
+
+uint32 {
+ if p < 0 || p > math.MaxUint32 {
+ panic(fmt.Sprintf("can not cast to uint32: %d", p))
+}
+
+return uint32(p)
+}
+
+// NewSimulationAppInstance initializes and returns a TestInstance of a SimulationApp.
+// The function takes a testing.T instance, a simtypes.Config instance, and an appFactory function as parameters.
+// It creates a temporary working directory and a LevelDB database for the simulation app.
+// The function then initializes a logger based on the verbosity flag and sets the logger's seed to the test configuration's seed.
+// The database is closed and cleaned up on test completion.
+func NewSimulationAppInstance[T SimulationApp](
+ tb testing.TB,
+ tCfg simtypes.Config,
+ appFactory func(logger log.Logger, db dbm.DB, traceStore io.Writer, loadLatest bool, appOpts servertypes.AppOptions, baseAppOptions ...func(*baseapp.BaseApp))
+
+T,
+)
+
+TestInstance[T] {
+ tb.Helper()
+ workDir := tb.TempDir()
+
+require.NoError(tb, os.Mkdir(filepath.Join(workDir, "data"), 0o750))
+ dbDir := filepath.Join(workDir, "leveldb-app-sim")
+
+var logger log.Logger
+ if cli.FlagVerboseValue {
+ logger = log.NewTestLogger(tb)
+}
+
+else {
+ logger = log.NewTestLoggerError(tb)
+}
+
+logger = logger.With("seed", tCfg.Seed)
+
+db, err := dbm.NewDB("Simulation", dbm.BackendType(tCfg.DBBackend), dbDir)
+
+require.NoError(tb, err)
+
+tb.Cleanup(func() {
+ _ = db.Close() // ensure db is closed
+})
+ appOptions := make(simtestutil.AppOptionsMap)
+
+appOptions[flags.FlagHome] = workDir
+ opts := []func(*baseapp.BaseApp) {
+ baseapp.SetChainID(tCfg.ChainID)
+}
+ if tCfg.FauxMerkle {
+ opts = append(opts, FauxMerkleModeOpt)
+}
+ app := appFactory(logger, db, nil, true, appOptions, opts...)
+ if !cli.FlagSigverifyTxValue {
+ app.SetNotSigverifyTx()
+}
+
+return TestInstance[T]{
+ App: app,
+ DB: db,
+ WorkDir: workDir,
+ Cfg: tCfg,
+ AppLogger: logger,
+ ExecLogWriter: &simulation.StandardLogWriter{
+ Seed: tCfg.Seed
+},
+}
+}
+
+var _ io.Writer = writerFn(nil)
+
+type writerFn func(p []byte) (n int, err error)
+
+func (w writerFn)
+
+Write(p []byte) (n int, err error) {
+ return w(p)
+}
+
+// WriteToDebugLog is an adapter to io.Writer interface
+func WriteToDebugLog(logger log.Logger)
+
+io.Writer {
+ return writerFn(func(p []byte) (n int, err error) {
+ logger.Debug(string(p))
+
+return len(p), nil
+})
+}
+
+// FauxMerkleModeOpt returns a BaseApp option to use a dbStoreAdapter instead of
+// an IAVLStore for faster simulation speed.
+func FauxMerkleModeOpt(bapp *baseapp.BaseApp) {
+ bapp.SetFauxMerkleMode()
+}
+```
+
+These functions should be called in tests (i.e., app\_test.go, app\_sim\_test.go, etc.)
+
+Example:
+
+```go expandable
+//go:build sims
+
+package simapp
+
+import (
+
+ "encoding/binary"
+ "encoding/json"
+ "flag"
+ "io"
+ "math/rand"
+ "strings"
+ "sync"
+ "testing"
+
+ abci "github.com/cometbft/cometbft/abci/types"
+ cmtproto "github.com/cometbft/cometbft/proto/tendermint/types"
+ dbm "github.com/cosmos/cosmos-db"
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+ "cosmossdk.io/log"
+ "cosmossdk.io/store"
+ storetypes "cosmossdk.io/store/types"
+ "github.com/cosmos/cosmos-sdk/baseapp"
+ servertypes "github.com/cosmos/cosmos-sdk/server/types"
+ simtestutil "github.com/cosmos/cosmos-sdk/testutil/sims"
+ sims "github.com/cosmos/cosmos-sdk/testutil/simsx"
+ sdk "github.com/cosmos/cosmos-sdk/types"
+ simtypes "github.com/cosmos/cosmos-sdk/types/simulation"
+ authzkeeper "github.com/cosmos/cosmos-sdk/x/authz/keeper"
+ "github.com/cosmos/cosmos-sdk/x/feegrant"
+ "github.com/cosmos/cosmos-sdk/x/simulation"
+ simcli "github.com/cosmos/cosmos-sdk/x/simulation/client/cli"
+ slashingtypes "github.com/cosmos/cosmos-sdk/x/slashing/types"
+ stakingtypes "github.com/cosmos/cosmos-sdk/x/staking/types"
+)
+
+var FlagEnableStreamingValue bool
+
+// Get flags every time the simulator is run
+func init() {
+ simcli.GetSimulatorFlags()
+
+flag.BoolVar(&FlagEnableStreamingValue, "EnableStreaming", false, "Enable streaming service")
+}
+
+// interBlockCacheOpt returns a BaseApp option function that sets the persistent
+// inter-block write-through cache.
+func interBlockCacheOpt()
+
+func(*baseapp.BaseApp) {
+ return baseapp.SetInterBlockCache(store.NewCommitKVStoreCacheManager())
+}
+
+func TestFullAppSimulation(t *testing.T) {
+ sims.Run(t, NewSimApp, setupStateFactory)
+}
+
+func setupStateFactory(app *SimApp)
+
+sims.SimStateFactory {
+ return sims.SimStateFactory{
+ Codec: app.AppCodec(),
+ AppStateFn: simtestutil.AppStateFn(app.AppCodec(), app.SimulationManager(), app.DefaultGenesis()),
+ BlockedAddr: BlockedAddresses(),
+ AccountSource: app.AccountKeeper,
+ BalanceSource: app.BankKeeper,
+}
+}
+
+var (
+ exportAllModules = []string{
+}
+
+exportWithValidatorSet = []string{
+}
+)
+
+func TestAppImportExport(t *testing.T) {
+ sims.Run(t, NewSimApp, setupStateFactory, func(tb testing.TB, ti sims.TestInstance[*SimApp], accs []simtypes.Account) {
+ tb.Helper()
+ app := ti.App
+ tb.Log("exporting genesis...\n")
+
+exported, err := app.ExportAppStateAndValidators(false, exportWithValidatorSet, exportAllModules)
+
+require.NoError(tb, err)
+
+tb.Log("importing genesis...\n")
+ newTestInstance := sims.NewSimulationAppInstance(tb, ti.Cfg, NewSimApp)
+ newApp := newTestInstance.App
+ var genesisState GenesisState
+ require.NoError(tb, json.Unmarshal(exported.AppState, &genesisState))
+ ctxB := newApp.NewContextLegacy(true, cmtproto.Header{
+ Height: app.LastBlockHeight()
+})
+ _, err = newApp.ModuleManager.InitGenesis(ctxB, newApp.appCodec, genesisState)
+ if IsEmptyValidatorSetErr(err) {
+ tb.Skip("Skipping simulation as all validators have been unbonded")
+
+return
+}
+
+require.NoError(tb, err)
+
+err = newApp.StoreConsensusParams(ctxB, exported.ConsensusParams)
+
+require.NoError(tb, err)
+
+tb.Log("comparing stores...")
+ // skip certain prefixes
+ skipPrefixes := map[string][][]byte{
+ stakingtypes.StoreKey: {
+ stakingtypes.UnbondingQueueKey, stakingtypes.RedelegationQueueKey, stakingtypes.ValidatorQueueKey,
+ stakingtypes.HistoricalInfoKey, stakingtypes.UnbondingIDKey, stakingtypes.UnbondingIndexKey,
+ stakingtypes.UnbondingTypeKey,
+ stakingtypes.ValidatorUpdatesKey, // todo (Alex): double check why there is a diff with test-sim-import-export
+},
+ authzkeeper.StoreKey: {
+ authzkeeper.GrantQueuePrefix
+},
+ feegrant.StoreKey: {
+ feegrant.FeeAllowanceQueueKeyPrefix
+},
+ slashingtypes.StoreKey: {
+ slashingtypes.ValidatorMissedBlockBitmapKeyPrefix
+},
+}
+
+AssertEqualStores(tb, app, newApp, app.SimulationManager().StoreDecoders, skipPrefixes)
+})
+}
+
+// Scenario:
+//
+// Start a fresh node and run n blocks, export state
+// set up a new node instance, Init chain from exported genesis
+// run new instance for n blocks
+func TestAppSimulationAfterImport(t *testing.T) {
+ sims.Run(t, NewSimApp, setupStateFactory, func(tb testing.TB, ti sims.TestInstance[*SimApp], accs []simtypes.Account) {
+ tb.Helper()
+ app := ti.App
+ tb.Log("exporting genesis...\n")
+
+exported, err := app.ExportAppStateAndValidators(false, exportWithValidatorSet, exportAllModules)
+
+require.NoError(tb, err)
+
+tb.Log("importing genesis...\n")
+ newTestInstance := sims.NewSimulationAppInstance(tb, ti.Cfg, NewSimApp)
+ newApp := newTestInstance.App
+ _, err = newApp.InitChain(&abci.RequestInitChain{
+ AppStateBytes: exported.AppState,
+ ChainId: sims.SimAppChainID,
+})
+ if IsEmptyValidatorSetErr(err) {
+ tb.Skip("Skipping simulation as all validators have been unbonded")
+
+return
+}
+
+require.NoError(tb, err)
+ newStateFactory := setupStateFactory(newApp)
+ _, _, err = simulation.SimulateFromSeedX(
+ tb,
+ newTestInstance.AppLogger,
+ sims.WriteToDebugLog(newTestInstance.AppLogger),
+ newApp.BaseApp,
+ newStateFactory.AppStateFn,
+ simtypes.RandomAccounts,
+ simtestutil.BuildSimulationOperations(newApp, newApp.AppCodec(), newTestInstance.Cfg, newApp.TxConfig()),
+ newStateFactory.BlockedAddr,
+ newTestInstance.Cfg,
+ newStateFactory.Codec,
+ ti.ExecLogWriter,
+ )
+
+require.NoError(tb, err)
+})
+}
+
+func IsEmptyValidatorSetErr(err error)
+
+bool {
+ return err != nil && strings.Contains(err.Error(), "validator set is empty after InitGenesis")
+}
+
+func TestAppStateDeterminism(t *testing.T) {
+ const numTimesToRunPerSeed = 3
+ var seeds []int64
+ if s := simcli.NewConfigFromFlags().Seed; s != simcli.DefaultSeedValue {
+ // We will be overriding the random seed and just run a single simulation on the provided seed value
+ for j := 0; j < numTimesToRunPerSeed; j++ { // multiple rounds
+ seeds = append(seeds, s)
+}
+
+}
+
+else {
+ // setup with 3 random seeds
+ for i := 0; i < 3; i++ {
+ seed := rand.Int63()
+ for j := 0; j < numTimesToRunPerSeed; j++ { // multiple rounds
+ seeds = append(seeds, seed)
+}
+
+}
+
+}
+ // overwrite default app config
+ interBlockCachingAppFactory := func(logger log.Logger, db dbm.DB, traceStore io.Writer, loadLatest bool, appOpts servertypes.AppOptions, baseAppOptions ...func(*baseapp.BaseApp)) *SimApp {
+ if FlagEnableStreamingValue {
+ m := map[string]any{
+ "streaming.abci.keys": []string{"*"
+},
+ "streaming.abci.plugin": "abci_v1",
+ "streaming.abci.stop-node-on-err": true,
+}
+ others := appOpts
+ appOpts = appOptionsFn(func(k string)
+
+any {
+ if v, ok := m[k]; ok {
+ return v
+}
+
+return others.Get(k)
+})
+}
+
+return NewSimApp(logger, db, nil, true, appOpts, append(baseAppOptions, interBlockCacheOpt())...)
+}
+
+var mx sync.Mutex
+ appHashResults := make(map[int64][][]byte)
+ appSimLogger := make(map[int64][]simulation.LogWriter)
+ captureAndCheckHash := func(tb testing.TB, ti sims.TestInstance[*SimApp], _ []simtypes.Account) {
+ tb.Helper()
+
+seed, appHash := ti.Cfg.Seed, ti.App.LastCommitID().Hash
+ mx.Lock()
+
+otherHashes, execWriters := appHashResults[seed], appSimLogger[seed]
+ if len(otherHashes) < numTimesToRunPerSeed-1 {
+ appHashResults[seed], appSimLogger[seed] = append(otherHashes, appHash), append(execWriters, ti.ExecLogWriter)
+}
+
+else { // cleanup
+ delete(appHashResults, seed)
+
+delete(appSimLogger, seed)
+}
+
+mx.Unlock()
+
+var failNow bool
+ // and check that all app hashes per seed are equal for each iteration
+ for i := 0; i < len(otherHashes); i++ {
+ if !assert.Equal(tb, otherHashes[i], appHash) {
+ execWriters[i].PrintLogs()
+
+failNow = true
+}
+
+}
+ if failNow {
+ ti.ExecLogWriter.PrintLogs()
+
+tb.Fatalf("non-determinism in seed %d", seed)
+}
+
+}
+ // run simulations
+ sims.RunWithSeeds(t, interBlockCachingAppFactory, setupStateFactory, seeds, []byte{
+}, captureAndCheckHash)
+}
+
+type ComparableStoreApp interface {
+ LastBlockHeight()
+
+int64
+ NewContextLegacy(isCheckTx bool, header cmtproto.Header)
+
+sdk.Context
+ GetKey(storeKey string) *storetypes.KVStoreKey
+ GetStoreKeys() []storetypes.StoreKey
+}
+
+func AssertEqualStores(
+ tb testing.TB,
+ app, newApp ComparableStoreApp,
+ storeDecoders simtypes.StoreDecoderRegistry,
+ skipPrefixes map[string][][]byte,
+) {
+ tb.Helper()
+ ctxA := app.NewContextLegacy(true, cmtproto.Header{
+ Height: app.LastBlockHeight()
+})
+ ctxB := newApp.NewContextLegacy(true, cmtproto.Header{
+ Height: app.LastBlockHeight()
+})
+ storeKeys := app.GetStoreKeys()
+
+require.NotEmpty(tb, storeKeys)
+ for _, appKeyA := range storeKeys {
+ // only compare kvstores
+ if _, ok := appKeyA.(*storetypes.KVStoreKey); !ok {
+ continue
+}
+ keyName := appKeyA.Name()
+ appKeyB := newApp.GetKey(keyName)
+ storeA := ctxA.KVStore(appKeyA)
+ storeB := ctxB.KVStore(appKeyB)
+
+failedKVAs, failedKVBs := simtestutil.DiffKVStores(storeA, storeB, skipPrefixes[keyName])
+
+require.Equal(tb, len(failedKVAs), len(failedKVBs), "unequal sets of key-values to compare %s, key stores %s and %s", keyName, appKeyA, appKeyB)
+
+tb.Logf("compared %d different key/value pairs between %s and %s\n", len(failedKVAs), appKeyA, appKeyB)
+ if !assert.Equal(tb, 0, len(failedKVAs), simtestutil.GetSimulationLog(keyName, storeDecoders, failedKVAs, failedKVBs)) {
+ for _, v := range failedKVAs {
+ tb.Logf("store mismatch: %q\n", v)
+}
+
+tb.FailNow()
+}
+
+}
+}
+
+// appOptionsFn is an adapter to the single method AppOptions interface
+type appOptionsFn func(string)
+
+any
+
+func (f appOptionsFn)
+
+Get(k string)
+
+any {
+ return f(k)
+}
+
+// FauxMerkleModeOpt returns a BaseApp option to use a dbStoreAdapter instead of
+// an IAVLStore for faster simulation speed.
+func FauxMerkleModeOpt(bapp *baseapp.BaseApp) {
+ bapp.SetFauxMerkleMode()
+}
+
+func FuzzFullAppSimulation(f *testing.F) {
+ f.Fuzz(func(t *testing.T, rawSeed []byte) {
+ if len(rawSeed) < 8 {
+ t.Skip()
+
+return
+}
+
+sims.RunWithSeeds(
+ t,
+ NewSimApp,
+ setupStateFactory,
+ []int64{
+ int64(binary.BigEndian.Uint64(rawSeed))
+},
+ rawSeed[8:],
+ )
+})
+}
+```
diff --git a/sdk/next/build/building-modules/structure.mdx b/sdk/next/build/building-modules/structure.mdx
new file mode 100644
index 000000000..ab8ec25ad
--- /dev/null
+++ b/sdk/next/build/building-modules/structure.mdx
@@ -0,0 +1,94 @@
+---
+title: Recommended Folder Structure
+---
+
+
+**Synopsis**
+This document outlines the recommended structure of Cosmos SDK modules. These ideas are meant to be applied as suggestions. Application developers are encouraged to improve upon and contribute to module structure and development design.
+
+
+## Structure
+
+A typical Cosmos SDK module can be structured as follows:
+
+```shell
+proto
+└── {project_name}
+ └── {module_name}
+ └── {proto_version}
+ ├── {module_name}.proto
+ ├── event.proto
+ ├── genesis.proto
+ ├── query.proto
+ └── tx.proto
+```
+
+* `{module_name}.proto`: The module's common message type definitions.
+* `event.proto`: The module's message type definitions related to events.
+* `genesis.proto`: The module's message type definitions related to genesis state.
+* `query.proto`: The module's Query service and related message type definitions.
+* `tx.proto`: The module's Msg service and related message type definitions.
+
+```shell expandable
+x/{module_name}
+├── client
+│ ├── cli
+│ │ ├── query.go
+│ │ └── tx.go
+│ └── testutil
+│ ├── cli_test.go
+│ └── suite.go
+├── exported
+│ └── exported.go
+├── keeper
+│ ├── genesis.go
+│ ├── grpc_query.go
+│ ├── hooks.go
+│ ├── invariants.go
+│ ├── keeper.go
+│ ├── keys.go
+│ ├── msg_server.go
+│ └── querier.go
+├── module
+│ └── module.go
+│ └── abci.go
+│ └── autocli.go
+├── simulation
+│ ├── decoder.go
+│ ├── genesis.go
+│ ├── operations.go
+│ └── params.go
+├── {module_name}.pb.go
+├── codec.go
+├── errors.go
+├── events.go
+├── events.pb.go
+├── expected_keepers.go
+├── genesis.go
+├── genesis.pb.go
+├── keys.go
+├── msgs.go
+├── params.go
+├── query.pb.go
+├── tx.pb.go
+└── README.md
+```
+
+* `client/`: The module's CLI client functionality implementation and the module's CLI testing suite.
+* `exported/`: The module's exported types - typically interface types. If a module relies on keepers from another module, it is expected to receive the keepers as interface contracts through the `expected_keepers.go` file (see below) in order to avoid a direct dependency on the module implementing the keepers. However, these interface contracts can define methods that operate on and/or return types that are specific to the module that is implementing the keepers and this is where `exported/` comes into play. The interface types that are defined in `exported/` use canonical types, allowing for the module to receive the keepers as interface contracts through the `expected_keepers.go` file. This pattern allows for code to remain DRY and also alleviates import cycle chaos.
+* `keeper/`: The module's `Keeper` and `MsgServer` implementation.
+* `module/`: The module's `AppModule` and `AppModuleBasic` implementation.
+ * `abci.go`: The module's `BeginBlocker` and `EndBlocker` implementations (this file is only required if `BeginBlocker` and/or `EndBlocker` need to be defined).
+ * `autocli.go`: The module [autocli](/sdk/v0.53/learn/advanced/autocli) options.
+* `simulation/`: The module's [simulation](/sdk/v0.53/build/building-modules/simulator) package defines functions used by the blockchain simulator application (`simapp`).
+* `REAMDE.md`: The module's specification documents outlining important concepts, state storage structure, and message and event type definitions. Learn more how to write module specs in the [spec guidelines](/sdk/v0.50/build/spec/SPEC_MODULE).
+* The root directory includes type definitions for messages, events, and genesis state, including the type definitions generated by Protocol Buffers.
+ * `codec.go`: The module's registry methods for interface types.
+ * `errors.go`: The module's sentinel errors.
+ * `events.go`: The module's event types and constructors.
+ * `expected_keepers.go`: The module's [expected keeper](/sdk/v0.53/build/building-modules/keeper#type-definition) interfaces.
+ * `genesis.go`: The module's genesis state methods and helper functions.
+ * `keys.go`: The module's store keys and associated helper functions.
+ * `msgs.go`: The module's message type definitions and associated methods.
+ * `params.go`: The module's parameter type definitions and associated methods.
+ * `*.pb.go`: The module's type definitions generated by Protocol Buffers (as defined in the respective `*.proto` files above).
diff --git a/sdk/next/build/building-modules/testing.mdx b/sdk/next/build/building-modules/testing.mdx
new file mode 100644
index 000000000..cfb64b19a
--- /dev/null
+++ b/sdk/next/build/building-modules/testing.mdx
@@ -0,0 +1,2922 @@
+---
+title: Testing
+---
+
+The Cosmos SDK contains different types of [tests](https://martinfowler.com/articles/practical-test-pyramid.html).
+These tests have different goals and are used at different stages of the development cycle.
+We advice, as a general rule, to use tests at all stages of the development cycle.
+It is adviced, as a chain developer, to test your application and modules in a similar way than the SDK.
+
+The rationale behind testing can be found in [ADR-59](/sdk/v0.53/build/architecture/adr-059-test-scopes).
+
+## Unit Tests
+
+Unit tests are the lowest test category of the [test pyramid](https://martinfowler.com/articles/practical-test-pyramid.html).
+All packages and modules should have unit test coverage. Modules should have their dependencies mocked: this means mocking keepers.
+
+The SDK uses `mockgen` to generate mocks for keepers:
+
+```go expandable
+#!/usr/bin/env bash
+
+mockgen_cmd="mockgen"
+$mockgen_cmd -source=baseapp/abci_utils.go -package mock -destination baseapp/testutil/mock/mocks.go
+$mockgen_cmd -source=client/account_retriever.go -package mock -destination testutil/mock/account_retriever.go
+$mockgen_cmd -package mock -destination store/mock/cosmos_cosmos_db_DB.go github.com/cosmos/cosmos-db DB
+$mockgen_cmd -source=types/module/module.go -package mock -destination testutil/mock/types_module_module.go
+$mockgen_cmd -source=types/module/mock_appmodule_test.go -package mock -destination testutil/mock/types_mock_appmodule.go
+$mockgen_cmd -source=types/invariant.go -package mock -destination testutil/mock/types_invariant.go
+$mockgen_cmd -package mock -destination testutil/mock/grpc_server.go github.com/cosmos/gogoproto/grpc Server
+$mockgen_cmd -package mock -destination testutil/mock/logger.go cosmossdk.io/log Logger
+$mockgen_cmd -source=x/nft/expected_keepers.go -package testutil -destination x/nft/testutil/expected_keepers_mocks.go
+$mockgen_cmd -source=x/feegrant/expected_keepers.go -package testutil -destination x/feegrant/testutil/expected_keepers_mocks.go
+$mockgen_cmd -source=x/mint/types/expected_keepers.go -package testutil -destination x/mint/testutil/expected_keepers_mocks.go
+$mockgen_cmd -source=x/params/proposal_handler_test.go -package testutil -destination x/params/testutil/staking_keeper_mock.go
+$mockgen_cmd -source=x/auth/tx/config/expected_keepers.go -package testutil -destination x/auth/tx/testutil/expected_keepers_mocks.go
+$mockgen_cmd -source=x/auth/types/expected_keepers.go -package testutil -destination x/auth/testutil/expected_keepers_mocks.go
+$mockgen_cmd -source=x/auth/ante/expected_keepers.go -package testutil -destination x/auth/ante/testutil/expected_keepers_mocks.go
+$mockgen_cmd -source=x/authz/expected_keepers.go -package testutil -destination x/authz/testutil/expected_keepers_mocks.go
+$mockgen_cmd -source=x/bank/types/expected_keepers.go -package testutil -destination x/bank/testutil/expected_keepers_mocks.go
+$mockgen_cmd -source=x/group/testutil/expected_keepers.go -package testutil -destination x/group/testutil/expected_keepers_mocks.go
+$mockgen_cmd -source=x/evidence/types/expected_keepers.go -package testutil -destination x/evidence/testutil/expected_keepers_mocks.go
+$mockgen_cmd -source=x/distribution/types/expected_keepers.go -package testutil -destination x/distribution/testutil/expected_keepers_mocks.go
+$mockgen_cmd -source=x/slashing/types/expected_keepers.go -package testutil -destination x/slashing/testutil/expected_keepers_mocks.go
+$mockgen_cmd -source=x/genutil/types/expected_keepers.go -package testutil -destination x/genutil/testutil/expected_keepers_mocks.go
+$mockgen_cmd -source=x/gov/testutil/expected_keepers.go -package testutil -destination x/gov/testutil/expected_keepers_mocks.go
+$mockgen_cmd -source=x/staking/types/expected_keepers.go -package testutil -destination x/staking/testutil/expected_keepers_mocks.go
+$mockgen_cmd -source=x/auth/vesting/types/expected_keepers.go -package testutil -destination x/auth/vesting/testutil/expected_keepers_mocks.go
+$mockgen_cmd -source=x/protocolpool/types/expected_keepers.go -package testutil -destination x/protocolpool/testutil/expected_keepers_mocks.go
+```
+
+You can read more about mockgen [here](https://go.uber.org/mock).
+
+### Example
+
+As an example, we will walkthrough the [keeper tests](https://github.com/cosmos/cosmos-sdk/blob/v0.53.0/x/gov/keeper/keeper_test.go) of the `x/gov` module.
+
+The `x/gov` module has a `Keeper` type, which requires a few external dependencies (ie. imports outside `x/gov` to work properly).
+
+```go expandable
+package keeper
+
+import (
+
+ "context"
+ "fmt"
+ "time"
+ "cosmossdk.io/collections"
+ corestoretypes "cosmossdk.io/core/store"
+ "cosmossdk.io/log"
+ "github.com/cosmos/cosmos-sdk/baseapp"
+ "github.com/cosmos/cosmos-sdk/codec"
+ sdk "github.com/cosmos/cosmos-sdk/types"
+ "github.com/cosmos/cosmos-sdk/x/gov/types"
+ v1 "github.com/cosmos/cosmos-sdk/x/gov/types/v1"
+ "github.com/cosmos/cosmos-sdk/x/gov/types/v1beta1"
+)
+
+// Keeper defines the governance module Keeper
+type Keeper struct {
+ authKeeper types.AccountKeeper
+ bankKeeper types.BankKeeper
+ distrKeeper types.DistributionKeeper
+
+ // The reference to the DelegationSet and ValidatorSet to get information about validators and delegators
+ sk types.StakingKeeper
+
+ // GovHooks
+ hooks types.GovHooks
+
+ // The (unexposed)
+
+keys used to access the stores from the Context.
+ storeService corestoretypes.KVStoreService
+
+ // The codec for binary encoding/decoding.
+ cdc codec.Codec
+
+ // Legacy Proposal router
+ legacyRouter v1beta1.Router
+
+ // Msg server router
+ router baseapp.MessageRouter
+
+ config types.Config
+
+ calculateVoteResultsAndVotingPowerFn CalculateVoteResultsAndVotingPowerFn
+
+ // the address capable of executing a MsgUpdateParams message. Typically, this
+ // should be the x/gov module account.
+ authority string
+
+ Schema collections.Schema
+ Constitution collections.Item[string]
+ Params collections.Item[v1.Params]
+ Deposits collections.Map[collections.Pair[uint64, sdk.AccAddress], v1.Deposit]
+ Votes collections.Map[collections.Pair[uint64, sdk.AccAddress], v1.Vote]
+ ProposalID collections.Sequence
+ Proposals collections.Map[uint64, v1.Proposal]
+ ActiveProposalsQueue collections.Map[collections.Pair[time.Time, uint64], uint64] // TODO(tip): this should be simplified and go into an index.
+ InactiveProposalsQueue collections.Map[collections.Pair[time.Time, uint64], uint64] // TODO(tip): this should be simplified and go into an index.
+ VotingPeriodProposals collections.Map[uint64, []byte] // TODO(tip): this could be a keyset or index.
+}
+
+type InitOption func(*Keeper)
+
+// WithCustomCalculateVoteResultsAndVotingPowerFn is an optional input to set a custom CalculateVoteResultsAndVotingPowerFn.
+// If this function is not provided, the default function is used.
+func WithCustomCalculateVoteResultsAndVotingPowerFn(calculateVoteResultsAndVotingPowerFn CalculateVoteResultsAndVotingPowerFn)
+
+InitOption {
+ return func(k *Keeper) {
+ if calculateVoteResultsAndVotingPowerFn == nil {
+ panic("calculateVoteResultsAndVotingPowerFn cannot be nil")
+}
+
+k.calculateVoteResultsAndVotingPowerFn = calculateVoteResultsAndVotingPowerFn
+}
+}
+
+// GetAuthority returns the x/gov module's authority.
+func (k Keeper)
+
+GetAuthority()
+
+string {
+ return k.authority
+}
+
+// NewKeeper returns a governance keeper. It handles:
+// - submitting governance proposals
+// - depositing funds into proposals, and activating upon sufficient funds being deposited
+// - users voting on proposals, with weight proportional to stake in the system
+// - and tallying the result of the vote.
+//
+// CONTRACT: the parameter Subspace must have the param key table already initialized
+func NewKeeper(
+ cdc codec.Codec, storeService corestoretypes.KVStoreService, authKeeper types.AccountKeeper,
+ bankKeeper types.BankKeeper, sk types.StakingKeeper, distrKeeper types.DistributionKeeper,
+ router baseapp.MessageRouter, config types.Config, authority string, initOptions ...InitOption,
+) *Keeper {
+ // ensure governance module account is set
+ if addr := authKeeper.GetModuleAddress(types.ModuleName); addr == nil {
+ panic(fmt.Sprintf("%s module account has not been set", types.ModuleName))
+}
+ if _, err := authKeeper.AddressCodec().StringToBytes(authority); err != nil {
+ panic(fmt.Sprintf("invalid authority address: %s", authority))
+}
+
+ // If MaxMetadataLen not set by app developer, set to default value.
+ if config.MaxMetadataLen == 0 {
+ config.MaxMetadataLen = types.DefaultConfig().MaxMetadataLen
+}
+ sb := collections.NewSchemaBuilder(storeService)
+ k := &Keeper{
+ storeService: storeService,
+ authKeeper: authKeeper,
+ bankKeeper: bankKeeper,
+ distrKeeper: distrKeeper,
+ sk: sk,
+ cdc: cdc,
+ router: router,
+ config: config,
+ calculateVoteResultsAndVotingPowerFn: defaultCalculateVoteResultsAndVotingPower,
+ authority: authority,
+ Constitution: collections.NewItem(sb, types.ConstitutionKey, "constitution", collections.StringValue),
+ Params: collections.NewItem(sb, types.ParamsKey, "params", codec.CollValue[v1.Params](cdc)),
+ Deposits: collections.NewMap(sb, types.DepositsKeyPrefix, "deposits", collections.PairKeyCodec(collections.Uint64Key, sdk.LengthPrefixedAddressKey(sdk.AccAddressKey)), codec.CollValue[v1.Deposit](cdc)), // nolint: staticcheck // sdk.LengthPrefixedAddressKey is needed to retain state compatibility
+ Votes: collections.NewMap(sb, types.VotesKeyPrefix, "votes", collections.PairKeyCodec(collections.Uint64Key, sdk.LengthPrefixedAddressKey(sdk.AccAddressKey)), codec.CollValue[v1.Vote](cdc)), // nolint: staticcheck // sdk.LengthPrefixedAddressKey is needed to retain state compatibility
+ ProposalID: collections.NewSequence(sb, types.ProposalIDKey, "proposal_id"),
+ Proposals: collections.NewMap(sb, types.ProposalsKeyPrefix, "proposals", collections.Uint64Key, codec.CollValue[v1.Proposal](cdc)),
+ ActiveProposalsQueue: collections.NewMap(sb, types.ActiveProposalQueuePrefix, "active_proposals_queue", collections.PairKeyCodec(sdk.TimeKey, collections.Uint64Key), collections.Uint64Value), // sdk.TimeKey is needed to retain state compatibility
+ InactiveProposalsQueue: collections.NewMap(sb, types.InactiveProposalQueuePrefix, "inactive_proposals_queue", collections.PairKeyCodec(sdk.TimeKey, collections.Uint64Key), collections.Uint64Value), // sdk.TimeKey is needed to retain state compatibility
+ VotingPeriodProposals: collections.NewMap(sb, types.VotingPeriodProposalKeyPrefix, "voting_period_proposals", collections.Uint64Key, collections.BytesValue),
+}
+ for _, opt := range initOptions {
+ opt(k)
+}
+
+schema, err := sb.Build()
+ if err != nil {
+ panic(err)
+}
+
+k.Schema = schema
+ return k
+}
+
+// Hooks gets the hooks for governance *Keeper {
+ func (k *Keeper)
+
+Hooks()
+
+types.GovHooks {
+ if k.hooks == nil {
+ // return a no-op implementation if no hooks are set
+ return types.MultiGovHooks{
+}
+
+}
+
+return k.hooks
+}
+
+// SetHooks sets the hooks for governance
+func (k *Keeper)
+
+SetHooks(gh types.GovHooks) *Keeper {
+ if k.hooks != nil {
+ panic("cannot set governance hooks twice")
+}
+
+k.hooks = gh
+
+ return k
+}
+
+// SetLegacyRouter sets the legacy router for governance
+func (k *Keeper)
+
+SetLegacyRouter(router v1beta1.Router) {
+ // It is vital to seal the governance proposal router here as to not allow
+ // further handlers to be registered after the keeper is created since this
+ // could create invalid or non-deterministic behavior.
+ router.Seal()
+
+k.legacyRouter = router
+}
+
+// Logger returns a module-specific logger.
+func (k Keeper)
+
+Logger(ctx context.Context)
+
+log.Logger {
+ sdkCtx := sdk.UnwrapSDKContext(ctx)
+
+return sdkCtx.Logger().With("module", "x/"+types.ModuleName)
+}
+
+// Router returns the gov keeper's router
+func (k Keeper)
+
+Router()
+
+baseapp.MessageRouter {
+ return k.router
+}
+
+// LegacyRouter returns the gov keeper's legacy router
+func (k Keeper)
+
+LegacyRouter()
+
+v1beta1.Router {
+ return k.legacyRouter
+}
+
+// GetGovernanceAccount returns the governance ModuleAccount
+func (k Keeper)
+
+GetGovernanceAccount(ctx context.Context)
+
+sdk.ModuleAccountI {
+ return k.authKeeper.GetModuleAccount(ctx, types.ModuleName)
+}
+
+// ModuleAccountAddress returns gov module account address
+func (k Keeper)
+
+ModuleAccountAddress()
+
+sdk.AccAddress {
+ return k.authKeeper.GetModuleAddress(types.ModuleName)
+}
+
+// assertMetadataLength returns an error if given metadata length
+// is greater than a pre-defined MaxMetadataLen.
+func (k Keeper)
+
+assertMetadataLength(metadata string)
+
+error {
+ if metadata != "" && uint64(len(metadata)) > k.config.MaxMetadataLen {
+ return types.ErrMetadataTooLong.Wrapf("got metadata with length %d", len(metadata))
+}
+
+return nil
+}
+
+// assertSummaryLength returns an error if given summary length
+// is greater than a pre-defined 40*MaxMetadataLen.
+func (k Keeper)
+
+assertSummaryLength(summary string)
+
+error {
+ if summary != "" && uint64(len(summary)) > 40*k.config.MaxMetadataLen {
+ return types.ErrSummaryTooLong.Wrapf("got summary with length %d", len(summary))
+}
+
+return nil
+}
+```
+
+In order to only test `x/gov`, we mock the [expected keepers](/sdk/v0.53/build/building-modules/keeper#type-definition) and instantiate the `Keeper` with the mocked dependencies. Note that we may need to configure the mocked dependencies to return the expected values:
+
+```go expandable
+package keeper_test
+
+import (
+
+ "fmt"
+ "testing"
+
+ cmtproto "github.com/cometbft/cometbft/proto/tendermint/types"
+ cmttime "github.com/cometbft/cometbft/types/time"
+ "github.com/stretchr/testify/require"
+ "go.uber.org/mock/gomock"
+ "cosmossdk.io/math"
+ storetypes "cosmossdk.io/store/types"
+ "github.com/cosmos/cosmos-sdk/baseapp"
+ "github.com/cosmos/cosmos-sdk/codec/address"
+ "github.com/cosmos/cosmos-sdk/runtime"
+ "github.com/cosmos/cosmos-sdk/testutil"
+ "github.com/cosmos/cosmos-sdk/testutil/testdata"
+ sdk "github.com/cosmos/cosmos-sdk/types"
+ moduletestutil "github.com/cosmos/cosmos-sdk/types/module/testutil"
+ authtypes "github.com/cosmos/cosmos-sdk/x/auth/types"
+ banktypes "github.com/cosmos/cosmos-sdk/x/bank/types"
+ disttypes "github.com/cosmos/cosmos-sdk/x/distribution/types"
+ "github.com/cosmos/cosmos-sdk/x/gov/keeper"
+ govtestutil "github.com/cosmos/cosmos-sdk/x/gov/testutil"
+ "github.com/cosmos/cosmos-sdk/x/gov/types"
+ v1 "github.com/cosmos/cosmos-sdk/x/gov/types/v1"
+ "github.com/cosmos/cosmos-sdk/x/gov/types/v1beta1"
+ minttypes "github.com/cosmos/cosmos-sdk/x/mint/types"
+)
+
+var (
+ _, _, addr = testdata.KeyTestPubAddr()
+
+govAcct = authtypes.NewModuleAddress(types.ModuleName)
+
+distAcct = authtypes.NewModuleAddress(disttypes.ModuleName)
+
+TestProposal = getTestProposal()
+)
+
+// getTestProposal creates and returns a test proposal message.
+func getTestProposal() []sdk.Msg {
+ legacyProposalMsg, err := v1.NewLegacyContent(v1beta1.NewTextProposal("Title", "description"), authtypes.NewModuleAddress(types.ModuleName).String())
+ if err != nil {
+ panic(err)
+}
+
+return []sdk.Msg{
+ banktypes.NewMsgSend(govAcct, addr, sdk.NewCoins(sdk.NewCoin("stake", math.NewInt(1000)))),
+ legacyProposalMsg,
+}
+}
+
+// setupGovKeeper creates a govKeeper as well as all its dependencies.
+func setupGovKeeper(t *testing.T) (
+ *keeper.Keeper,
+ *govtestutil.MockAccountKeeper,
+ *govtestutil.MockBankKeeper,
+ *govtestutil.MockStakingKeeper,
+ *govtestutil.MockDistributionKeeper,
+ moduletestutil.TestEncodingConfig,
+ sdk.Context,
+) {
+ t.Helper()
+ key := storetypes.NewKVStoreKey(types.StoreKey)
+ storeService := runtime.NewKVStoreService(key)
+ testCtx := testutil.DefaultContextWithDB(t, key, storetypes.NewTransientStoreKey("transient_test"))
+ ctx := testCtx.Ctx.WithBlockHeader(cmtproto.Header{
+ Time: cmttime.Now()
+})
+ encCfg := moduletestutil.MakeTestEncodingConfig()
+
+v1.RegisterInterfaces(encCfg.InterfaceRegistry)
+
+v1beta1.RegisterInterfaces(encCfg.InterfaceRegistry)
+
+banktypes.RegisterInterfaces(encCfg.InterfaceRegistry)
+
+ // Create MsgServiceRouter, but don't populate it before creating the gov
+ // keeper.
+ msr := baseapp.NewMsgServiceRouter()
+
+ // gomock initializations
+ ctrl := gomock.NewController(t)
+ acctKeeper := govtestutil.NewMockAccountKeeper(ctrl)
+ bankKeeper := govtestutil.NewMockBankKeeper(ctrl)
+ stakingKeeper := govtestutil.NewMockStakingKeeper(ctrl)
+ distributionKeeper := govtestutil.NewMockDistributionKeeper(ctrl)
+
+acctKeeper.EXPECT().GetModuleAddress(types.ModuleName).Return(govAcct).AnyTimes()
+
+acctKeeper.EXPECT().GetModuleAddress(disttypes.ModuleName).Return(distAcct).AnyTimes()
+
+acctKeeper.EXPECT().GetModuleAccount(gomock.Any(), types.ModuleName).Return(authtypes.NewEmptyModuleAccount(types.ModuleName)).AnyTimes()
+
+acctKeeper.EXPECT().AddressCodec().Return(address.NewBech32Codec("cosmos")).AnyTimes()
+
+trackMockBalances(bankKeeper, distributionKeeper)
+
+stakingKeeper.EXPECT().TokensFromConsensusPower(ctx, gomock.Any()).DoAndReturn(func(ctx sdk.Context, power int64)
+
+math.Int {
+ return sdk.TokensFromConsensusPower(power, math.NewIntFromUint64(1000000))
+}).AnyTimes()
+
+stakingKeeper.EXPECT().BondDenom(ctx).Return("stake", nil).AnyTimes()
+
+stakingKeeper.EXPECT().IterateBondedValidatorsByPower(gomock.Any(), gomock.Any()).AnyTimes()
+
+stakingKeeper.EXPECT().IterateDelegations(gomock.Any(), gomock.Any(), gomock.Any()).AnyTimes()
+
+stakingKeeper.EXPECT().TotalBondedTokens(gomock.Any()).Return(math.NewInt(10000000), nil).AnyTimes()
+
+distributionKeeper.EXPECT().FundCommunityPool(gomock.Any(), gomock.Any(), gomock.Any()).Return(nil).AnyTimes()
+
+ // Gov keeper initializations
+ govKeeper := keeper.NewKeeper(encCfg.Codec, storeService, acctKeeper, bankKeeper, stakingKeeper, distributionKeeper, msr, types.DefaultConfig(), govAcct.String())
+
+require.NoError(t, govKeeper.ProposalID.Set(ctx, 1))
+ govRouter := v1beta1.NewRouter() // Also register legacy gov handlers to test them too.
+ govRouter.AddRoute(types.RouterKey, v1beta1.ProposalHandler)
+
+govKeeper.SetLegacyRouter(govRouter)
+ err := govKeeper.Params.Set(ctx, v1.DefaultParams())
+
+require.NoError(t, err)
+
+err = govKeeper.Constitution.Set(ctx, "constitution")
+
+require.NoError(t, err)
+
+ // Register all handlers for the MegServiceRouter.
+ msr.SetInterfaceRegistry(encCfg.InterfaceRegistry)
+
+v1.RegisterMsgServer(msr, keeper.NewMsgServerImpl(govKeeper))
+
+banktypes.RegisterMsgServer(msr, nil) // Nil is fine here as long as we never execute the proposal's Msgs.
+
+ return govKeeper, acctKeeper, bankKeeper, stakingKeeper, distributionKeeper, encCfg, ctx
+}
+
+// trackMockBalances sets up expected calls on the Mock BankKeeper, and also
+// locally tracks accounts balances (not modules balances).
+func trackMockBalances(bankKeeper *govtestutil.MockBankKeeper, distributionKeeper *govtestutil.MockDistributionKeeper) {
+ balances := make(map[string]sdk.Coins)
+
+balances[distAcct.String()] = sdk.NewCoins(sdk.NewCoin(sdk.DefaultBondDenom, math.NewInt(0)))
+
+ // We don't track module account balances.
+ bankKeeper.EXPECT().MintCoins(gomock.Any(), minttypes.ModuleName, gomock.Any()).AnyTimes()
+
+bankKeeper.EXPECT().BurnCoins(gomock.Any(), types.ModuleName, gomock.Any()).AnyTimes()
+
+bankKeeper.EXPECT().SendCoinsFromModuleToModule(gomock.Any(), minttypes.ModuleName, types.ModuleName, gomock.Any()).AnyTimes()
+
+ // But we do track normal account balances.
+ bankKeeper.EXPECT().SendCoinsFromAccountToModule(gomock.Any(), gomock.Any(), types.ModuleName, gomock.Any()).DoAndReturn(func(_ sdk.Context, sender sdk.AccAddress, _ string, coins sdk.Coins)
+
+error {
+ newBalance, negative := balances[sender.String()].SafeSub(coins...)
+ if negative {
+ return fmt.Errorf("not enough balance")
+}
+
+balances[sender.String()] = newBalance
+ return nil
+}).AnyTimes()
+
+bankKeeper.EXPECT().SendCoinsFromModuleToAccount(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).DoAndReturn(func(_ sdk.Context, module string, rcpt sdk.AccAddress, coins sdk.Coins)
+
+error {
+ balances[rcpt.String()] = balances[rcpt.String()].Add(coins...)
+
+return nil
+}).AnyTimes()
+
+bankKeeper.EXPECT().GetAllBalances(gomock.Any(), gomock.Any()).DoAndReturn(func(_ sdk.Context, addr sdk.AccAddress)
+
+sdk.Coins {
+ return balances[addr.String()]
+}).AnyTimes()
+
+bankKeeper.EXPECT().GetBalance(gomock.Any(), gomock.Any(), sdk.DefaultBondDenom).DoAndReturn(func(_ sdk.Context, addr sdk.AccAddress, _ string)
+
+sdk.Coin {
+ balances := balances[addr.String()]
+ for _, balance := range balances {
+ if balance.Denom == sdk.DefaultBondDenom {
+ return balance
+}
+
+}
+
+return sdk.NewCoin(sdk.DefaultBondDenom, math.NewInt(0))
+}).AnyTimes()
+
+distributionKeeper.EXPECT().FundCommunityPool(gomock.Any(), gomock.Any(), gomock.Any()).DoAndReturn(func(_ sdk.Context, coins sdk.Coins, sender sdk.AccAddress)
+
+error {
+ // sender balance
+ newBalance, negative := balances[sender.String()].SafeSub(coins...)
+ if negative {
+ return fmt.Errorf("not enough balance")
+}
+
+balances[sender.String()] = newBalance
+ // receiver balance
+ balances[distAcct.String()] = balances[distAcct.String()].Add(coins...)
+
+return nil
+}).AnyTimes()
+}
+```
+
+This allows us to test the `x/gov` module without having to import other modules.
+
+```go expandable
+package keeper_test
+
+import (
+
+ "testing"
+ "github.com/stretchr/testify/require"
+ "github.com/stretchr/testify/suite"
+ "cosmossdk.io/collections"
+ sdkmath "cosmossdk.io/math"
+ "github.com/cosmos/cosmos-sdk/baseapp"
+ "github.com/cosmos/cosmos-sdk/codec"
+ "github.com/cosmos/cosmos-sdk/codec/address"
+ simtestutil "github.com/cosmos/cosmos-sdk/testutil/sims"
+ sdk "github.com/cosmos/cosmos-sdk/types"
+ "github.com/cosmos/cosmos-sdk/x/gov/keeper"
+ govtestutil "github.com/cosmos/cosmos-sdk/x/gov/testutil"
+ "github.com/cosmos/cosmos-sdk/x/gov/types"
+ v1 "github.com/cosmos/cosmos-sdk/x/gov/types/v1"
+ "github.com/cosmos/cosmos-sdk/x/gov/types/v1beta1"
+ minttypes "github.com/cosmos/cosmos-sdk/x/mint/types"
+)
+
+var address1 = "cosmos1ghekyjucln7y67ntx7cf27m9dpuxxemn4c8g4r"
+
+type KeeperTestSuite struct {
+ suite.Suite
+
+ cdc codec.Codec
+ ctx sdk.Context
+ govKeeper *keeper.Keeper
+ acctKeeper *govtestutil.MockAccountKeeper
+ bankKeeper *govtestutil.MockBankKeeper
+ stakingKeeper *govtestutil.MockStakingKeeper
+ distKeeper *govtestutil.MockDistributionKeeper
+ queryClient v1.QueryClient
+ legacyQueryClient v1beta1.QueryClient
+ addrs []sdk.AccAddress
+ msgSrvr v1.MsgServer
+ legacyMsgSrvr v1beta1.MsgServer
+}
+
+func (suite *KeeperTestSuite)
+
+SetupSuite() {
+ suite.reset()
+}
+
+func (suite *KeeperTestSuite)
+
+reset() {
+ govKeeper, acctKeeper, bankKeeper, stakingKeeper, distKeeper, encCfg, ctx := setupGovKeeper(suite.T())
+
+ // Populate the gov account with some coins, as the TestProposal we have
+ // is a MsgSend from the gov account.
+ coins := sdk.NewCoins(sdk.NewCoin("stake", sdkmath.NewInt(100000)))
+ err := bankKeeper.MintCoins(suite.ctx, minttypes.ModuleName, coins)
+
+suite.NoError(err)
+
+err = bankKeeper.SendCoinsFromModuleToModule(ctx, minttypes.ModuleName, types.ModuleName, coins)
+
+suite.NoError(err)
+ queryHelper := baseapp.NewQueryServerTestHelper(ctx, encCfg.InterfaceRegistry)
+
+v1.RegisterQueryServer(queryHelper, keeper.NewQueryServer(govKeeper))
+ legacyQueryHelper := baseapp.NewQueryServerTestHelper(ctx, encCfg.InterfaceRegistry)
+
+v1beta1.RegisterQueryServer(legacyQueryHelper, keeper.NewLegacyQueryServer(govKeeper))
+ queryClient := v1.NewQueryClient(queryHelper)
+ legacyQueryClient := v1beta1.NewQueryClient(legacyQueryHelper)
+
+suite.ctx = ctx
+ suite.govKeeper = govKeeper
+ suite.acctKeeper = acctKeeper
+ suite.bankKeeper = bankKeeper
+ suite.stakingKeeper = stakingKeeper
+ suite.distKeeper = distKeeper
+ suite.cdc = encCfg.Codec
+ suite.queryClient = queryClient
+ suite.legacyQueryClient = legacyQueryClient
+ suite.msgSrvr = keeper.NewMsgServerImpl(suite.govKeeper)
+
+suite.legacyMsgSrvr = keeper.NewLegacyMsgServerImpl(govAcct.String(), suite.msgSrvr)
+
+suite.addrs = simtestutil.AddTestAddrsIncremental(bankKeeper, stakingKeeper, ctx, 3, sdkmath.NewInt(30000000))
+
+suite.acctKeeper.EXPECT().AddressCodec().Return(address.NewBech32Codec("cosmos")).AnyTimes()
+}
+
+func TestIncrementProposalNumber(t *testing.T) {
+ govKeeper, authKeeper, _, _, _, _, ctx := setupGovKeeper(t)
+
+authKeeper.EXPECT().AddressCodec().Return(address.NewBech32Codec("cosmos")).AnyTimes()
+ ac := address.NewBech32Codec("cosmos")
+
+addrBz, err := ac.StringToBytes(address1)
+
+require.NoError(t, err)
+ tp := TestProposal
+ _, err = govKeeper.SubmitProposal(ctx, tp, "", "test", "summary", addrBz, false)
+
+require.NoError(t, err)
+ _, err = govKeeper.SubmitProposal(ctx, tp, "", "test", "summary", addrBz, false)
+
+require.NoError(t, err)
+ _, err = govKeeper.SubmitProposal(ctx, tp, "", "test", "summary", addrBz, true)
+
+require.NoError(t, err)
+ _, err = govKeeper.SubmitProposal(ctx, tp, "", "test", "summary", addrBz, true)
+
+require.NoError(t, err)
+ _, err = govKeeper.SubmitProposal(ctx, tp, "", "test", "summary", addrBz, false)
+
+require.NoError(t, err)
+
+proposal6, err := govKeeper.SubmitProposal(ctx, tp, "", "test", "summary", addrBz, false)
+
+require.NoError(t, err)
+
+require.Equal(t, uint64(6), proposal6.Id)
+}
+
+func TestProposalQueues(t *testing.T) {
+ govKeeper, authKeeper, _, _, _, _, ctx := setupGovKeeper(t)
+ ac := address.NewBech32Codec("cosmos")
+
+addrBz, err := ac.StringToBytes(address1)
+
+require.NoError(t, err)
+
+authKeeper.EXPECT().AddressCodec().Return(address.NewBech32Codec("cosmos")).AnyTimes()
+
+ // create test proposals
+ tp := TestProposal
+ proposal, err := govKeeper.SubmitProposal(ctx, tp, "", "test", "summary", addrBz, false)
+
+require.NoError(t, err)
+
+has, err := govKeeper.InactiveProposalsQueue.Has(ctx, collections.Join(*proposal.DepositEndTime, proposal.Id))
+
+require.NoError(t, err)
+
+require.True(t, has)
+
+require.NoError(t, govKeeper.ActivateVotingPeriod(ctx, proposal))
+
+proposal, err = govKeeper.Proposals.Get(ctx, proposal.Id)
+
+require.Nil(t, err)
+
+has, err = govKeeper.ActiveProposalsQueue.Has(ctx, collections.Join(*proposal.VotingEndTime, proposal.Id))
+
+require.NoError(t, err)
+
+require.True(t, has)
+}
+
+func TestSetHooks(t *testing.T) {
+ govKeeper, _, _, _, _, _, _ := setupGovKeeper(t)
+
+require.Empty(t, govKeeper.Hooks())
+ govHooksReceiver := MockGovHooksReceiver{
+}
+
+govKeeper.SetHooks(types.NewMultiGovHooks(&govHooksReceiver))
+
+require.NotNil(t, govKeeper.Hooks())
+
+require.Panics(t, func() {
+ govKeeper.SetHooks(&govHooksReceiver)
+})
+}
+
+func TestGetGovGovernanceAndModuleAccountAddress(t *testing.T) {
+ govKeeper, authKeeper, _, _, _, _, ctx := setupGovKeeper(t)
+ mAcc := authKeeper.GetModuleAccount(ctx, "gov")
+
+require.Equal(t, mAcc, govKeeper.GetGovernanceAccount(ctx))
+ mAddr := authKeeper.GetModuleAddress("gov")
+
+require.Equal(t, mAddr, govKeeper.ModuleAccountAddress())
+}
+
+func TestKeeperTestSuite(t *testing.T) {
+ suite.Run(t, new(KeeperTestSuite))
+}
+```
+
+We can test then create unit tests using the newly created `Keeper` instance.
+
+```go expandable
+package keeper_test
+
+import (
+
+ "testing"
+ "github.com/stretchr/testify/require"
+ "github.com/stretchr/testify/suite"
+ "cosmossdk.io/collections"
+ sdkmath "cosmossdk.io/math"
+ "github.com/cosmos/cosmos-sdk/baseapp"
+ "github.com/cosmos/cosmos-sdk/codec"
+ "github.com/cosmos/cosmos-sdk/codec/address"
+ simtestutil "github.com/cosmos/cosmos-sdk/testutil/sims"
+ sdk "github.com/cosmos/cosmos-sdk/types"
+ "github.com/cosmos/cosmos-sdk/x/gov/keeper"
+ govtestutil "github.com/cosmos/cosmos-sdk/x/gov/testutil"
+ "github.com/cosmos/cosmos-sdk/x/gov/types"
+ v1 "github.com/cosmos/cosmos-sdk/x/gov/types/v1"
+ "github.com/cosmos/cosmos-sdk/x/gov/types/v1beta1"
+ minttypes "github.com/cosmos/cosmos-sdk/x/mint/types"
+)
+
+var address1 = "cosmos1ghekyjucln7y67ntx7cf27m9dpuxxemn4c8g4r"
+
+type KeeperTestSuite struct {
+ suite.Suite
+
+ cdc codec.Codec
+ ctx sdk.Context
+ govKeeper *keeper.Keeper
+ acctKeeper *govtestutil.MockAccountKeeper
+ bankKeeper *govtestutil.MockBankKeeper
+ stakingKeeper *govtestutil.MockStakingKeeper
+ distKeeper *govtestutil.MockDistributionKeeper
+ queryClient v1.QueryClient
+ legacyQueryClient v1beta1.QueryClient
+ addrs []sdk.AccAddress
+ msgSrvr v1.MsgServer
+ legacyMsgSrvr v1beta1.MsgServer
+}
+
+func (suite *KeeperTestSuite)
+
+SetupSuite() {
+ suite.reset()
+}
+
+func (suite *KeeperTestSuite)
+
+reset() {
+ govKeeper, acctKeeper, bankKeeper, stakingKeeper, distKeeper, encCfg, ctx := setupGovKeeper(suite.T())
+
+ // Populate the gov account with some coins, as the TestProposal we have
+ // is a MsgSend from the gov account.
+ coins := sdk.NewCoins(sdk.NewCoin("stake", sdkmath.NewInt(100000)))
+ err := bankKeeper.MintCoins(suite.ctx, minttypes.ModuleName, coins)
+
+suite.NoError(err)
+
+err = bankKeeper.SendCoinsFromModuleToModule(ctx, minttypes.ModuleName, types.ModuleName, coins)
+
+suite.NoError(err)
+ queryHelper := baseapp.NewQueryServerTestHelper(ctx, encCfg.InterfaceRegistry)
+
+v1.RegisterQueryServer(queryHelper, keeper.NewQueryServer(govKeeper))
+ legacyQueryHelper := baseapp.NewQueryServerTestHelper(ctx, encCfg.InterfaceRegistry)
+
+v1beta1.RegisterQueryServer(legacyQueryHelper, keeper.NewLegacyQueryServer(govKeeper))
+ queryClient := v1.NewQueryClient(queryHelper)
+ legacyQueryClient := v1beta1.NewQueryClient(legacyQueryHelper)
+
+suite.ctx = ctx
+ suite.govKeeper = govKeeper
+ suite.acctKeeper = acctKeeper
+ suite.bankKeeper = bankKeeper
+ suite.stakingKeeper = stakingKeeper
+ suite.distKeeper = distKeeper
+ suite.cdc = encCfg.Codec
+ suite.queryClient = queryClient
+ suite.legacyQueryClient = legacyQueryClient
+ suite.msgSrvr = keeper.NewMsgServerImpl(suite.govKeeper)
+
+suite.legacyMsgSrvr = keeper.NewLegacyMsgServerImpl(govAcct.String(), suite.msgSrvr)
+
+suite.addrs = simtestutil.AddTestAddrsIncremental(bankKeeper, stakingKeeper, ctx, 3, sdkmath.NewInt(30000000))
+
+suite.acctKeeper.EXPECT().AddressCodec().Return(address.NewBech32Codec("cosmos")).AnyTimes()
+}
+
+func TestIncrementProposalNumber(t *testing.T) {
+ govKeeper, authKeeper, _, _, _, _, ctx := setupGovKeeper(t)
+
+authKeeper.EXPECT().AddressCodec().Return(address.NewBech32Codec("cosmos")).AnyTimes()
+ ac := address.NewBech32Codec("cosmos")
+
+addrBz, err := ac.StringToBytes(address1)
+
+require.NoError(t, err)
+ tp := TestProposal
+ _, err = govKeeper.SubmitProposal(ctx, tp, "", "test", "summary", addrBz, false)
+
+require.NoError(t, err)
+ _, err = govKeeper.SubmitProposal(ctx, tp, "", "test", "summary", addrBz, false)
+
+require.NoError(t, err)
+ _, err = govKeeper.SubmitProposal(ctx, tp, "", "test", "summary", addrBz, true)
+
+require.NoError(t, err)
+ _, err = govKeeper.SubmitProposal(ctx, tp, "", "test", "summary", addrBz, true)
+
+require.NoError(t, err)
+ _, err = govKeeper.SubmitProposal(ctx, tp, "", "test", "summary", addrBz, false)
+
+require.NoError(t, err)
+
+proposal6, err := govKeeper.SubmitProposal(ctx, tp, "", "test", "summary", addrBz, false)
+
+require.NoError(t, err)
+
+require.Equal(t, uint64(6), proposal6.Id)
+}
+
+func TestProposalQueues(t *testing.T) {
+ govKeeper, authKeeper, _, _, _, _, ctx := setupGovKeeper(t)
+ ac := address.NewBech32Codec("cosmos")
+
+addrBz, err := ac.StringToBytes(address1)
+
+require.NoError(t, err)
+
+authKeeper.EXPECT().AddressCodec().Return(address.NewBech32Codec("cosmos")).AnyTimes()
+
+ // create test proposals
+ tp := TestProposal
+ proposal, err := govKeeper.SubmitProposal(ctx, tp, "", "test", "summary", addrBz, false)
+
+require.NoError(t, err)
+
+has, err := govKeeper.InactiveProposalsQueue.Has(ctx, collections.Join(*proposal.DepositEndTime, proposal.Id))
+
+require.NoError(t, err)
+
+require.True(t, has)
+
+require.NoError(t, govKeeper.ActivateVotingPeriod(ctx, proposal))
+
+proposal, err = govKeeper.Proposals.Get(ctx, proposal.Id)
+
+require.Nil(t, err)
+
+has, err = govKeeper.ActiveProposalsQueue.Has(ctx, collections.Join(*proposal.VotingEndTime, proposal.Id))
+
+require.NoError(t, err)
+
+require.True(t, has)
+}
+
+func TestSetHooks(t *testing.T) {
+ govKeeper, _, _, _, _, _, _ := setupGovKeeper(t)
+
+require.Empty(t, govKeeper.Hooks())
+ govHooksReceiver := MockGovHooksReceiver{
+}
+
+govKeeper.SetHooks(types.NewMultiGovHooks(&govHooksReceiver))
+
+require.NotNil(t, govKeeper.Hooks())
+
+require.Panics(t, func() {
+ govKeeper.SetHooks(&govHooksReceiver)
+})
+}
+
+func TestGetGovGovernanceAndModuleAccountAddress(t *testing.T) {
+ govKeeper, authKeeper, _, _, _, _, ctx := setupGovKeeper(t)
+ mAcc := authKeeper.GetModuleAccount(ctx, "gov")
+
+require.Equal(t, mAcc, govKeeper.GetGovernanceAccount(ctx))
+ mAddr := authKeeper.GetModuleAddress("gov")
+
+require.Equal(t, mAddr, govKeeper.ModuleAccountAddress())
+}
+
+func TestKeeperTestSuite(t *testing.T) {
+ suite.Run(t, new(KeeperTestSuite))
+}
+```
+
+## Integration Tests
+
+Integration tests are at the second level of the [test pyramid](https://martinfowler.com/articles/practical-test-pyramid.html).
+In the SDK, we locate our integration tests under [`/tests/integrations`](https://github.com/cosmos/cosmos-sdk/tree/main/tests/integration).
+
+The goal of these integration tests is to test how a component interacts with other dependencies. Compared to unit tests, integration tests do not mock dependencies. Instead, they use the direct dependencies of the component. This differs as well from end-to-end tests, which test the component with a full application.
+
+Integration tests interact with the tested module via the defined `Msg` and `Query` services. The result of the test can be verified by checking the state of the application, by checking the emitted events or the response. It is adviced to combine two of these methods to verify the result of the test.
+
+The SDK provides small helpers for quickly setting up an integration tests. These helpers can be found at [Link](https://github.com/cosmos/cosmos-sdk/blob/main/testutil/integration).
+
+### Example
+
+```go expandable
+package integration_test
+
+import (
+
+ "fmt"
+ "io"
+
+ cmtproto "github.com/cometbft/cometbft/proto/tendermint/types"
+ "github.com/google/go-cmp/cmp"
+ "cosmossdk.io/core/appmodule"
+ "cosmossdk.io/log"
+ storetypes "cosmossdk.io/store/types"
+
+ addresscodec "github.com/cosmos/cosmos-sdk/codec/address"
+ "github.com/cosmos/cosmos-sdk/runtime"
+ "github.com/cosmos/cosmos-sdk/testutil/integration"
+ sdk "github.com/cosmos/cosmos-sdk/types"
+ moduletestutil "github.com/cosmos/cosmos-sdk/types/module/testutil"
+ "github.com/cosmos/cosmos-sdk/x/auth"
+ authkeeper "github.com/cosmos/cosmos-sdk/x/auth/keeper"
+ authsims "github.com/cosmos/cosmos-sdk/x/auth/simulation"
+ authtypes "github.com/cosmos/cosmos-sdk/x/auth/types"
+ "github.com/cosmos/cosmos-sdk/x/mint"
+ mintkeeper "github.com/cosmos/cosmos-sdk/x/mint/keeper"
+ minttypes "github.com/cosmos/cosmos-sdk/x/mint/types"
+)
+
+// Example shows how to use the integration test framework to test the integration of SDK modules.
+// Panics are used in this example, but in a real test case, you should use the testing.T object and assertions.
+func Example() {
+ // in this example we are testing the integration of the following modules:
+ // - mint, which directly depends on auth, bank and staking
+ encodingCfg := moduletestutil.MakeTestEncodingConfig(auth.AppModuleBasic{
+}, mint.AppModuleBasic{
+})
+ keys := storetypes.NewKVStoreKeys(authtypes.StoreKey, minttypes.StoreKey)
+ authority := authtypes.NewModuleAddress("gov").String()
+
+ // replace the logger by testing values in a real test case (e.g. log.NewTestLogger(t))
+ logger := log.NewNopLogger()
+ cms := integration.CreateMultiStore(keys, logger)
+ newCtx := sdk.NewContext(cms, cmtproto.Header{
+}, true, logger)
+ accountKeeper := authkeeper.NewAccountKeeper(
+ encodingCfg.Codec,
+ runtime.NewKVStoreService(keys[authtypes.StoreKey]),
+ authtypes.ProtoBaseAccount,
+ map[string][]string{
+ minttypes.ModuleName: {
+ authtypes.Minter
+}},
+ addresscodec.NewBech32Codec("cosmos"),
+ "cosmos",
+ authority,
+ )
+
+ // subspace is nil because we don't test params (which is legacy anyway)
+ authModule := auth.NewAppModule(encodingCfg.Codec, accountKeeper, authsims.RandomGenesisAccounts, nil)
+
+ // here bankkeeper and staking keeper is nil because we are not testing them
+ // subspace is nil because we don't test params (which is legacy anyway)
+ mintKeeper := mintkeeper.NewKeeper(encodingCfg.Codec, runtime.NewKVStoreService(keys[minttypes.StoreKey]), nil, accountKeeper, nil, authtypes.FeeCollectorName, authority)
+ mintModule := mint.NewAppModule(encodingCfg.Codec, mintKeeper, accountKeeper, nil, nil)
+
+ // create the application and register all the modules from the previous step
+ integrationApp := integration.NewIntegrationApp(
+ newCtx,
+ logger,
+ keys,
+ encodingCfg.Codec,
+ map[string]appmodule.AppModule{
+ authtypes.ModuleName: authModule,
+ minttypes.ModuleName: mintModule,
+},
+ )
+
+ // register the message and query servers
+ authtypes.RegisterMsgServer(integrationApp.MsgServiceRouter(), authkeeper.NewMsgServerImpl(accountKeeper))
+
+minttypes.RegisterMsgServer(integrationApp.MsgServiceRouter(), mintkeeper.NewMsgServerImpl(mintKeeper))
+
+minttypes.RegisterQueryServer(integrationApp.QueryHelper(), mintkeeper.NewQueryServerImpl(mintKeeper))
+ params := minttypes.DefaultParams()
+
+params.BlocksPerYear = 10000
+
+ // now we can use the application to test a mint message
+ result, err := integrationApp.RunMsg(&minttypes.MsgUpdateParams{
+ Authority: authority,
+ Params: params,
+})
+ if err != nil {
+ panic(err)
+}
+
+ // in this example the result is an empty response, a nil check is enough
+ // in other cases, it is recommended to check the result value.
+ if result == nil {
+ panic(fmt.Errorf("unexpected nil result"))
+}
+
+ // we now check the result
+ resp := minttypes.MsgUpdateParamsResponse{
+}
+
+err = encodingCfg.Codec.Unmarshal(result.Value, &resp)
+ if err != nil {
+ panic(err)
+}
+ sdkCtx := sdk.UnwrapSDKContext(integrationApp.Context())
+
+ // we should also check the state of the application
+ got, err := mintKeeper.Params.Get(sdkCtx)
+ if err != nil {
+ panic(err)
+}
+ if diff := cmp.Diff(got, params); diff != "" {
+ panic(diff)
+}
+
+fmt.Println(got.BlocksPerYear)
+ // Output: 10000
+}
+
+// ExampleOneModule shows how to use the integration test framework to test the integration of a single module.
+// That module has no dependency on other modules.
+func Example_oneModule() {
+ // in this example we are testing the integration of the auth module:
+ encodingCfg := moduletestutil.MakeTestEncodingConfig(auth.AppModuleBasic{
+})
+ keys := storetypes.NewKVStoreKeys(authtypes.StoreKey)
+ authority := authtypes.NewModuleAddress("gov").String()
+
+ // replace the logger by testing values in a real test case (e.g. log.NewTestLogger(t))
+ logger := log.NewLogger(io.Discard)
+ cms := integration.CreateMultiStore(keys, logger)
+ newCtx := sdk.NewContext(cms, cmtproto.Header{
+}, true, logger)
+ accountKeeper := authkeeper.NewAccountKeeper(
+ encodingCfg.Codec,
+ runtime.NewKVStoreService(keys[authtypes.StoreKey]),
+ authtypes.ProtoBaseAccount,
+ map[string][]string{
+ minttypes.ModuleName: {
+ authtypes.Minter
+}},
+ addresscodec.NewBech32Codec("cosmos"),
+ "cosmos",
+ authority,
+ )
+
+ // subspace is nil because we don't test params (which is legacy anyway)
+ authModule := auth.NewAppModule(encodingCfg.Codec, accountKeeper, authsims.RandomGenesisAccounts, nil)
+
+ // create the application and register all the modules from the previous step
+ integrationApp := integration.NewIntegrationApp(
+ newCtx,
+ logger,
+ keys,
+ encodingCfg.Codec,
+ map[string]appmodule.AppModule{
+ authtypes.ModuleName: authModule,
+},
+ )
+
+ // register the message and query servers
+ authtypes.RegisterMsgServer(integrationApp.MsgServiceRouter(), authkeeper.NewMsgServerImpl(accountKeeper))
+ params := authtypes.DefaultParams()
+
+params.MaxMemoCharacters = 1000
+
+ // now we can use the application to test a mint message
+ result, err := integrationApp.RunMsg(&authtypes.MsgUpdateParams{
+ Authority: authority,
+ Params: params,
+},
+ // this allows to the begin and end blocker of the module before and after the message
+ integration.WithAutomaticFinalizeBlock(),
+ // this allows to commit the state after the message
+ integration.WithAutomaticCommit(),
+ )
+ if err != nil {
+ panic(err)
+}
+
+ // verify that the begin and end blocker were called
+ // NOTE: in this example, we are testing auth, which doesn't have any begin or end blocker
+ // so verifying the block height is enough
+ if integrationApp.LastBlockHeight() != 2 {
+ panic(fmt.Errorf("expected block height to be 2, got %d", integrationApp.LastBlockHeight()))
+}
+
+ // in this example the result is an empty response, a nil check is enough
+ // in other cases, it is recommended to check the result value.
+ if result == nil {
+ panic(fmt.Errorf("unexpected nil result"))
+}
+
+ // we now check the result
+ resp := authtypes.MsgUpdateParamsResponse{
+}
+
+err = encodingCfg.Codec.Unmarshal(result.Value, &resp)
+ if err != nil {
+ panic(err)
+}
+ sdkCtx := sdk.UnwrapSDKContext(integrationApp.Context())
+
+ // we should also check the state of the application
+ got := accountKeeper.GetParams(sdkCtx)
+ if diff := cmp.Diff(got, params); diff != "" {
+ panic(diff)
+}
+
+fmt.Println(got.MaxMemoCharacters)
+ // Output: 1000
+}
+```
+
+## Deterministic and Regression tests
+
+Tests are written for queries in the Cosmos SDK which have `module_query_safe` Protobuf annotation.
+
+Each query is tested using 2 methods:
+
+* Use property-based testing with the [`rapid`](https://pkg.go.dev/pgregory.net/rapid@v0.5.3) library. The property that is tested is that the query response and gas consumption are the same upon 1000 query calls.
+* Regression tests are written with hardcoded responses and gas, and verify they don't change upon 1000 calls and between SDK patch versions.
+
+Here's an example of regression tests:
+
+```go expandable
+package keeper_test
+
+import (
+
+ "testing"
+
+ cmtproto "github.com/cometbft/cometbft/proto/tendermint/types"
+ "github.com/stretchr/testify/require"
+ "gotest.tools/v3/assert"
+ "pgregory.net/rapid"
+ "cosmossdk.io/core/appmodule"
+ "cosmossdk.io/log"
+ "cosmossdk.io/math"
+ storetypes "cosmossdk.io/store/types"
+
+ addresscodec "github.com/cosmos/cosmos-sdk/codec/address"
+ "github.com/cosmos/cosmos-sdk/runtime"
+ "github.com/cosmos/cosmos-sdk/testutil/integration"
+ "github.com/cosmos/cosmos-sdk/testutil/testdata"
+ sdk "github.com/cosmos/cosmos-sdk/types"
+ moduletestutil "github.com/cosmos/cosmos-sdk/types/module/testutil"
+ "github.com/cosmos/cosmos-sdk/x/auth"
+ authkeeper "github.com/cosmos/cosmos-sdk/x/auth/keeper"
+ authsims "github.com/cosmos/cosmos-sdk/x/auth/simulation"
+ _ "github.com/cosmos/cosmos-sdk/x/auth/tx/config"
+ authtypes "github.com/cosmos/cosmos-sdk/x/auth/types"
+ "github.com/cosmos/cosmos-sdk/x/bank"
+ "github.com/cosmos/cosmos-sdk/x/bank/keeper"
+ banktestutil "github.com/cosmos/cosmos-sdk/x/bank/testutil"
+ banktypes "github.com/cosmos/cosmos-sdk/x/bank/types"
+ _ "github.com/cosmos/cosmos-sdk/x/consensus"
+ minttypes "github.com/cosmos/cosmos-sdk/x/mint/types"
+ _ "github.com/cosmos/cosmos-sdk/x/params"
+ _ "github.com/cosmos/cosmos-sdk/x/staking"
+)
+
+var (
+ denomRegex = sdk.DefaultCoinDenomRegex()
+
+addr1 = sdk.MustAccAddressFromBech32("cosmos139f7kncmglres2nf3h4hc4tade85ekfr8sulz5")
+
+coin1 = sdk.NewCoin("denom", math.NewInt(10))
+
+metadataAtom = banktypes.Metadata{
+ Description: "The native staking token of the Cosmos Hub.",
+ DenomUnits: []*banktypes.DenomUnit{
+ {
+ Denom: "uatom",
+ Exponent: 0,
+ Aliases: []string{"microatom"
+},
+},
+ {
+ Denom: "atom",
+ Exponent: 6,
+ Aliases: []string{"ATOM"
+},
+},
+},
+ Base: "uatom",
+ Display: "atom",
+}
+)
+
+type deterministicFixture struct {
+ ctx sdk.Context
+ bankKeeper keeper.BaseKeeper
+ queryClient banktypes.QueryClient
+}
+
+func initDeterministicFixture(t *testing.T) *deterministicFixture {
+ t.Helper()
+ keys := storetypes.NewKVStoreKeys(authtypes.StoreKey, banktypes.StoreKey)
+ cdc := moduletestutil.MakeTestEncodingConfig(auth.AppModuleBasic{
+}, bank.AppModuleBasic{
+}).Codec
+ logger := log.NewTestLogger(t)
+ cms := integration.CreateMultiStore(keys, logger)
+ newCtx := sdk.NewContext(cms, cmtproto.Header{
+}, true, logger)
+ authority := authtypes.NewModuleAddress("gov")
+ maccPerms := map[string][]string{
+ minttypes.ModuleName: {
+ authtypes.Minter
+},
+}
+ accountKeeper := authkeeper.NewAccountKeeper(
+ cdc,
+ runtime.NewKVStoreService(keys[authtypes.StoreKey]),
+ authtypes.ProtoBaseAccount,
+ maccPerms,
+ addresscodec.NewBech32Codec(sdk.Bech32MainPrefix),
+ sdk.Bech32MainPrefix,
+ authority.String(),
+ )
+ blockedAddresses := map[string]bool{
+ accountKeeper.GetAuthority(): false,
+}
+ bankKeeper := keeper.NewBaseKeeper(
+ cdc,
+ runtime.NewKVStoreService(keys[banktypes.StoreKey]),
+ accountKeeper,
+ blockedAddresses,
+ authority.String(),
+ log.NewNopLogger(),
+ )
+ authModule := auth.NewAppModule(cdc, accountKeeper, authsims.RandomGenesisAccounts, nil)
+ bankModule := bank.NewAppModule(cdc, bankKeeper, accountKeeper, nil)
+ integrationApp := integration.NewIntegrationApp(newCtx, logger, keys, cdc, map[string]appmodule.AppModule{
+ authtypes.ModuleName: authModule,
+ banktypes.ModuleName: bankModule,
+})
+ sdkCtx := sdk.UnwrapSDKContext(integrationApp.Context())
+
+ // Register MsgServer and QueryServer
+ banktypes.RegisterMsgServer(integrationApp.MsgServiceRouter(), keeper.NewMsgServerImpl(bankKeeper))
+
+banktypes.RegisterQueryServer(integrationApp.QueryHelper(), keeper.NewQuerier(&bankKeeper))
+ qr := integrationApp.QueryHelper()
+ queryClient := banktypes.NewQueryClient(qr)
+ f := deterministicFixture{
+ ctx: sdkCtx,
+ bankKeeper: bankKeeper,
+ queryClient: queryClient,
+}
+
+return &f
+}
+
+func fundAccount(f *deterministicFixture, addr sdk.AccAddress, coin ...sdk.Coin) {
+ err := banktestutil.FundAccount(f.ctx, f.bankKeeper, addr, sdk.NewCoins(coin...))
+
+assert.NilError(&testing.T{
+}, err)
+}
+
+func getCoin(rt *rapid.T)
+
+sdk.Coin {
+ return sdk.NewCoin(
+ rapid.StringMatching(denomRegex).Draw(rt, "denom"),
+ math.NewInt(rapid.Int64Min(1).Draw(rt, "amount")),
+ )
+}
+
+func TestGRPCQueryBalance(t *testing.T) {
+ t.Parallel()
+ f := initDeterministicFixture(t)
+
+rapid.Check(t, func(rt *rapid.T) {
+ addr := testdata.AddressGenerator(rt).Draw(rt, "address")
+ coin := getCoin(rt)
+
+fundAccount(f, addr, coin)
+ req := banktypes.NewQueryBalanceRequest(addr, coin.GetDenom())
+
+testdata.DeterministicIterations(f.ctx, t, req, f.queryClient.Balance, 0, true)
+})
+
+fundAccount(f, addr1, coin1)
+ req := banktypes.NewQueryBalanceRequest(addr1, coin1.GetDenom())
+
+testdata.DeterministicIterations(f.ctx, t, req, f.queryClient.Balance, 1087, false)
+}
+
+func TestGRPCQueryAllBalances(t *testing.T) {
+ t.Parallel()
+ f := initDeterministicFixture(t)
+
+rapid.Check(t, func(rt *rapid.T) {
+ addr := testdata.AddressGenerator(rt).Draw(rt, "address")
+ numCoins := rapid.IntRange(1, 10).Draw(rt, "num-count")
+ coins := make(sdk.Coins, 0, numCoins)
+ for i := 0; i < numCoins; i++ {
+ coin := getCoin(rt)
+
+ // NewCoins sorts the denoms
+ coins = sdk.NewCoins(append(coins, coin)...)
+}
+
+fundAccount(f, addr, coins...)
+ req := banktypes.NewQueryAllBalancesRequest(addr, testdata.PaginationGenerator(rt, uint64(numCoins)).Draw(rt, "pagination"), false)
+
+testdata.DeterministicIterations(f.ctx, t, req, f.queryClient.AllBalances, 0, true)
+})
+ coins := sdk.NewCoins(
+ sdk.NewCoin("stake", math.NewInt(10)),
+ sdk.NewCoin("denom", math.NewInt(100)),
+ )
+
+fundAccount(f, addr1, coins...)
+ req := banktypes.NewQueryAllBalancesRequest(addr1, nil, false)
+
+testdata.DeterministicIterations(f.ctx, t, req, f.queryClient.AllBalances, 357, false)
+}
+
+func TestGRPCQuerySpendableBalances(t *testing.T) {
+ t.Parallel()
+ f := initDeterministicFixture(t)
+
+rapid.Check(t, func(rt *rapid.T) {
+ addr := testdata.AddressGenerator(rt).Draw(rt, "address")
+
+ // Denoms must be unique, otherwise sdk.NewCoins will panic.
+ denoms := rapid.SliceOfNDistinct(rapid.StringMatching(denomRegex), 1, 10, rapid.ID[string]).Draw(rt, "denoms")
+ coins := make(sdk.Coins, 0, len(denoms))
+ for _, denom := range denoms {
+ coin := sdk.NewCoin(
+ denom,
+ math.NewInt(rapid.Int64Min(1).Draw(rt, "amount")),
+ )
+
+ // NewCoins sorts the denoms
+ coins = sdk.NewCoins(append(coins, coin)...)
+}
+ err := banktestutil.FundAccount(f.ctx, f.bankKeeper, addr, coins)
+
+assert.NilError(t, err)
+ req := banktypes.NewQuerySpendableBalancesRequest(addr, testdata.PaginationGenerator(rt, uint64(len(denoms))).Draw(rt, "pagination"))
+
+testdata.DeterministicIterations(f.ctx, t, req, f.queryClient.SpendableBalances, 0, true)
+})
+ coins := sdk.NewCoins(
+ sdk.NewCoin("stake", math.NewInt(10)),
+ sdk.NewCoin("denom", math.NewInt(100)),
+ )
+ err := banktestutil.FundAccount(f.ctx, f.bankKeeper, addr1, coins)
+
+assert.NilError(t, err)
+ req := banktypes.NewQuerySpendableBalancesRequest(addr1, nil)
+
+testdata.DeterministicIterations(f.ctx, t, req, f.queryClient.SpendableBalances, 2032, false)
+}
+
+func TestGRPCQueryTotalSupply(t *testing.T) {
+ t.Parallel()
+ f := initDeterministicFixture(t)
+
+res, err := f.queryClient.TotalSupply(f.ctx, &banktypes.QueryTotalSupplyRequest{
+})
+
+assert.NilError(t, err)
+ initialSupply := res.GetSupply()
+
+rapid.Check(t, func(rt *rapid.T) {
+ numCoins := rapid.IntRange(1, 3).Draw(rt, "num-count")
+ coins := make(sdk.Coins, 0, numCoins)
+ for i := 0; i < numCoins; i++ {
+ coin := sdk.NewCoin(
+ rapid.StringMatching(denomRegex).Draw(rt, "denom"),
+ math.NewInt(rapid.Int64Min(1).Draw(rt, "amount")),
+ )
+
+coins = coins.Add(coin)
+}
+
+assert.NilError(t, f.bankKeeper.MintCoins(f.ctx, minttypes.ModuleName, coins))
+
+initialSupply = initialSupply.Add(coins...)
+ req := &banktypes.QueryTotalSupplyRequest{
+ Pagination: testdata.PaginationGenerator(rt, uint64(len(initialSupply))).Draw(rt, "pagination"),
+}
+
+testdata.DeterministicIterations(f.ctx, t, req, f.queryClient.TotalSupply, 0, true)
+})
+
+f = initDeterministicFixture(t) // reset
+ coins := sdk.NewCoins(
+ sdk.NewCoin("foo", math.NewInt(10)),
+ sdk.NewCoin("bar", math.NewInt(100)),
+ )
+
+assert.NilError(t, f.bankKeeper.MintCoins(f.ctx, minttypes.ModuleName, coins))
+ req := &banktypes.QueryTotalSupplyRequest{
+}
+
+testdata.DeterministicIterations(f.ctx, t, req, f.queryClient.TotalSupply, 150, false)
+}
+
+func TestGRPCQueryTotalSupplyOf(t *testing.T) {
+ t.Parallel()
+ f := initDeterministicFixture(t)
+
+rapid.Check(t, func(rt *rapid.T) {
+ coin := sdk.NewCoin(
+ rapid.StringMatching(denomRegex).Draw(rt, "denom"),
+ math.NewInt(rapid.Int64Min(1).Draw(rt, "amount")),
+ )
+
+assert.NilError(t, f.bankKeeper.MintCoins(f.ctx, minttypes.ModuleName, sdk.NewCoins(coin)))
+ req := &banktypes.QuerySupplyOfRequest{
+ Denom: coin.GetDenom()
+}
+
+testdata.DeterministicIterations(f.ctx, t, req, f.queryClient.SupplyOf, 0, true)
+})
+ coin := sdk.NewCoin("bar", math.NewInt(100))
+
+assert.NilError(t, f.bankKeeper.MintCoins(f.ctx, minttypes.ModuleName, sdk.NewCoins(coin)))
+ req := &banktypes.QuerySupplyOfRequest{
+ Denom: coin.GetDenom()
+}
+
+testdata.DeterministicIterations(f.ctx, t, req, f.queryClient.SupplyOf, 1021, false)
+}
+
+func TestGRPCQueryParams(t *testing.T) {
+ t.Parallel()
+ f := initDeterministicFixture(t)
+
+rapid.Check(t, func(rt *rapid.T) {
+ enabledStatus := banktypes.SendEnabled{
+ Denom: rapid.StringMatching(denomRegex).Draw(rt, "denom"),
+ Enabled: rapid.Bool().Draw(rt, "status"),
+}
+ params := banktypes.Params{
+ SendEnabled: []*banktypes.SendEnabled{&enabledStatus
+},
+ DefaultSendEnabled: rapid.Bool().Draw(rt, "send"),
+}
+
+require.NoError(t, f.bankKeeper.SetParams(f.ctx, params))
+ req := &banktypes.QueryParamsRequest{
+}
+
+testdata.DeterministicIterations(f.ctx, t, req, f.queryClient.Params, 0, true)
+})
+ enabledStatus := banktypes.SendEnabled{
+ Denom: "denom",
+ Enabled: true,
+}
+ params := banktypes.Params{
+ SendEnabled: []*banktypes.SendEnabled{&enabledStatus
+},
+ DefaultSendEnabled: false,
+}
+
+require.NoError(t, f.bankKeeper.SetParams(f.ctx, params))
+ req := &banktypes.QueryParamsRequest{
+}
+
+testdata.DeterministicIterations(f.ctx, t, req, f.queryClient.Params, 1003, false)
+}
+
+func createAndReturnMetadatas(t *rapid.T, count int) []banktypes.Metadata {
+ denomsMetadata := make([]banktypes.Metadata, 0, count)
+ for i := 0; i < count; i++ {
+ denom := rapid.StringMatching(denomRegex).Draw(t, "denom")
+ aliases := rapid.SliceOf(rapid.String()).Draw(t, "aliases")
+ // In the GRPC server code, empty arrays are returned as nil
+ if len(aliases) == 0 {
+ aliases = nil
+}
+ metadata := banktypes.Metadata{
+ Description: rapid.StringN(1, 100, 100).Draw(t, "desc"),
+ DenomUnits: []*banktypes.DenomUnit{
+ {
+ Denom: denom,
+ Exponent: rapid.Uint32().Draw(t, "exponent"),
+ Aliases: aliases,
+},
+},
+ Base: denom,
+ Display: denom,
+ Name: rapid.String().Draw(t, "name"),
+ Symbol: rapid.String().Draw(t, "symbol"),
+ URI: rapid.String().Draw(t, "uri"),
+ URIHash: rapid.String().Draw(t, "uri-hash"),
+}
+
+denomsMetadata = append(denomsMetadata, metadata)
+}
+
+return denomsMetadata
+}
+
+func TestGRPCDenomsMetadata(t *testing.T) {
+ t.Parallel()
+ f := initDeterministicFixture(t)
+
+rapid.Check(t, func(rt *rapid.T) {
+ count := rapid.IntRange(1, 3).Draw(rt, "count")
+ denomsMetadata := createAndReturnMetadatas(rt, count)
+
+assert.Assert(t, len(denomsMetadata) == count)
+ for i := 0; i < count; i++ {
+ f.bankKeeper.SetDenomMetaData(f.ctx, denomsMetadata[i])
+}
+ req := &banktypes.QueryDenomsMetadataRequest{
+ Pagination: testdata.PaginationGenerator(rt, uint64(count)).Draw(rt, "pagination"),
+}
+
+testdata.DeterministicIterations(f.ctx, t, req, f.queryClient.DenomsMetadata, 0, true)
+})
+
+f = initDeterministicFixture(t) // reset
+
+ f.bankKeeper.SetDenomMetaData(f.ctx, metadataAtom)
+ req := &banktypes.QueryDenomsMetadataRequest{
+}
+
+testdata.DeterministicIterations(f.ctx, t, req, f.queryClient.DenomsMetadata, 660, false)
+}
+
+func TestGRPCDenomMetadata(t *testing.T) {
+ t.Parallel()
+ f := initDeterministicFixture(t)
+
+rapid.Check(t, func(rt *rapid.T) {
+ denomMetadata := createAndReturnMetadatas(rt, 1)
+
+assert.Assert(t, len(denomMetadata) == 1)
+
+f.bankKeeper.SetDenomMetaData(f.ctx, denomMetadata[0])
+ req := &banktypes.QueryDenomMetadataRequest{
+ Denom: denomMetadata[0].Base,
+}
+
+testdata.DeterministicIterations(f.ctx, t, req, f.queryClient.DenomMetadata, 0, true)
+})
+
+f.bankKeeper.SetDenomMetaData(f.ctx, metadataAtom)
+ req := &banktypes.QueryDenomMetadataRequest{
+ Denom: metadataAtom.Base,
+}
+
+testdata.DeterministicIterations(f.ctx, t, req, f.queryClient.DenomMetadata, 1300, false)
+}
+
+func TestGRPCSendEnabled(t *testing.T) {
+ t.Parallel()
+ f := initDeterministicFixture(t)
+ allDenoms := []string{
+}
+
+rapid.Check(t, func(rt *rapid.T) {
+ count := rapid.IntRange(0, 10).Draw(rt, "count")
+ denoms := make([]string, 0, count)
+ for i := 0; i < count; i++ {
+ coin := banktypes.SendEnabled{
+ Denom: rapid.StringMatching(denomRegex).Draw(rt, "denom"),
+ Enabled: rapid.Bool().Draw(rt, "enabled-status"),
+}
+
+f.bankKeeper.SetSendEnabled(f.ctx, coin.Denom, coin.Enabled)
+
+denoms = append(denoms, coin.Denom)
+}
+
+allDenoms = append(allDenoms, denoms...)
+ req := &banktypes.QuerySendEnabledRequest{
+ Denoms: denoms,
+ // Pagination is only taken into account when `denoms` is an empty array
+ Pagination: testdata.PaginationGenerator(rt, uint64(len(allDenoms))).Draw(rt, "pagination"),
+}
+
+testdata.DeterministicIterations(f.ctx, t, req, f.queryClient.SendEnabled, 0, true)
+})
+
+coin1 := banktypes.SendEnabled{
+ Denom: "falsecoin",
+ Enabled: false,
+}
+
+coin2 := banktypes.SendEnabled{
+ Denom: "truecoin",
+ Enabled: true,
+}
+
+f.bankKeeper.SetSendEnabled(f.ctx, coin1.Denom, false)
+
+f.bankKeeper.SetSendEnabled(f.ctx, coin2.Denom, true)
+ req := &banktypes.QuerySendEnabledRequest{
+ Denoms: []string{
+ coin1.GetDenom(), coin2.GetDenom()
+},
+}
+
+testdata.DeterministicIterations(f.ctx, t, req, f.queryClient.SendEnabled, 4063, false)
+}
+
+func TestGRPCDenomOwners(t *testing.T) {
+ t.Parallel()
+ f := initDeterministicFixture(t)
+
+rapid.Check(t, func(rt *rapid.T) {
+ denom := rapid.StringMatching(denomRegex).Draw(rt, "denom")
+ numAddr := rapid.IntRange(1, 10).Draw(rt, "number-address")
+ for i := 0; i < numAddr; i++ {
+ addr := testdata.AddressGenerator(rt).Draw(rt, "address")
+ coin := sdk.NewCoin(
+ denom,
+ math.NewInt(rapid.Int64Min(1).Draw(rt, "amount")),
+ )
+ err := banktestutil.FundAccount(f.ctx, f.bankKeeper, addr, sdk.NewCoins(coin))
+
+assert.NilError(t, err)
+}
+ req := &banktypes.QueryDenomOwnersRequest{
+ Denom: denom,
+ Pagination: testdata.PaginationGenerator(rt, uint64(numAddr)).Draw(rt, "pagination"),
+}
+
+testdata.DeterministicIterations(f.ctx, t, req, f.queryClient.DenomOwners, 0, true)
+})
+ denomOwners := []*banktypes.DenomOwner{
+ {
+ Address: "cosmos1qg65a9q6k2sqq7l3ycp428sqqpmqcucgzze299",
+ Balance: coin1,
+},
+ {
+ Address: "cosmos1qglnsqgpq48l7qqzgs8qdshr6fh3gqq9ej3qut",
+ Balance: coin1,
+},
+}
+ for i := 0; i < len(denomOwners); i++ {
+ addr, err := sdk.AccAddressFromBech32(denomOwners[i].Address)
+
+assert.NilError(t, err)
+
+err = banktestutil.FundAccount(f.ctx, f.bankKeeper, addr, sdk.NewCoins(coin1))
+
+assert.NilError(t, err)
+}
+ req := &banktypes.QueryDenomOwnersRequest{
+ Denom: coin1.GetDenom(),
+}
+
+testdata.DeterministicIterations(f.ctx, t, req, f.queryClient.DenomOwners, 2516, false)
+}
+```
+
+## Simulations
+
+Simulations uses as well a minimal application, built with [`depinject`](/sdk/v0.53/build/packages/depinject):
+
+
+You can as well use the `AppConfig` `configurator` for creating an `AppConfig` [inline](https://github.com/cosmos/cosmos-sdk/blob/v0.53.0/x/slashing/app_test.go#L54-L62). There is no difference between those two ways, use whichever you prefer.
+
+
+Following is an example for `x/gov/` simulations:
+
+```go expandable
+package simulation_test
+
+import (
+
+ "fmt"
+ "math/rand"
+ "testing"
+ "time"
+
+ abci "github.com/cometbft/cometbft/abci/types"
+ "github.com/cosmos/gogoproto/proto"
+ "github.com/stretchr/testify/require"
+ "cosmossdk.io/depinject"
+ "cosmossdk.io/log"
+ "github.com/cosmos/cosmos-sdk/client"
+ "github.com/cosmos/cosmos-sdk/runtime"
+ "github.com/cosmos/cosmos-sdk/testutil/configurator"
+ simtestutil "github.com/cosmos/cosmos-sdk/testutil/sims"
+ sdk "github.com/cosmos/cosmos-sdk/types"
+ simtypes "github.com/cosmos/cosmos-sdk/types/simulation"
+ _ "github.com/cosmos/cosmos-sdk/x/auth"
+ authkeeper "github.com/cosmos/cosmos-sdk/x/auth/keeper"
+ _ "github.com/cosmos/cosmos-sdk/x/auth/tx/config"
+ _ "github.com/cosmos/cosmos-sdk/x/bank"
+ bankkeeper "github.com/cosmos/cosmos-sdk/x/bank/keeper"
+ "github.com/cosmos/cosmos-sdk/x/bank/testutil"
+ _ "github.com/cosmos/cosmos-sdk/x/consensus"
+ _ "github.com/cosmos/cosmos-sdk/x/distribution"
+ dk "github.com/cosmos/cosmos-sdk/x/distribution/keeper"
+ _ "github.com/cosmos/cosmos-sdk/x/gov"
+ "github.com/cosmos/cosmos-sdk/x/gov/keeper"
+ "github.com/cosmos/cosmos-sdk/x/gov/simulation"
+ "github.com/cosmos/cosmos-sdk/x/gov/types"
+ v1 "github.com/cosmos/cosmos-sdk/x/gov/types/v1"
+ "github.com/cosmos/cosmos-sdk/x/gov/types/v1beta1"
+ _ "github.com/cosmos/cosmos-sdk/x/params"
+ _ "github.com/cosmos/cosmos-sdk/x/staking"
+ stakingkeeper "github.com/cosmos/cosmos-sdk/x/staking/keeper"
+)
+
+var (
+ _ simtypes.WeightedProposalMsg = MockWeightedProposals{
+}
+ //nolint:staticcheck // keeping around for legacy testing
+ _ simtypes.WeightedProposalContent = MockWeightedProposals{
+}
+)
+
+type MockWeightedProposals struct {
+ n int
+}
+
+func (m MockWeightedProposals)
+
+AppParamsKey()
+
+string {
+ return fmt.Sprintf("AppParamsKey-%d", m.n)
+}
+
+func (m MockWeightedProposals)
+
+DefaultWeight()
+
+int {
+ return m.n
+}
+
+func (m MockWeightedProposals)
+
+MsgSimulatorFn()
+
+simtypes.MsgSimulatorFn {
+ return func(r *rand.Rand, _ sdk.Context, _ []simtypes.Account)
+
+sdk.Msg {
+ return nil
+}
+}
+
+//nolint:staticcheck // retaining legacy content to maintain gov functionality
+func (m MockWeightedProposals)
+
+ContentSimulatorFn()
+
+simtypes.ContentSimulatorFn {
+ return func(r *rand.Rand, _ sdk.Context, _ []simtypes.Account)
+
+simtypes.Content {
+ return v1beta1.NewTextProposal(
+ fmt.Sprintf("title-%d: %s", m.n, simtypes.RandStringOfLength(r, 100)),
+ fmt.Sprintf("description-%d: %s", m.n, simtypes.RandStringOfLength(r, 4000)),
+ )
+}
+}
+
+func mockWeightedProposalMsg(n int) []simtypes.WeightedProposalMsg {
+ wpc := make([]simtypes.WeightedProposalMsg, n)
+ for i := range n {
+ wpc[i] = MockWeightedProposals{
+ i
+}
+
+}
+
+return wpc
+}
+
+// nolint // keeping this legacy proposal for testing
+func mockWeightedLegacyProposalContent(n int) []simtypes.WeightedProposalContent {
+ wpc := make([]simtypes.WeightedProposalContent, n)
+ for i := range n {
+ wpc[i] = MockWeightedProposals{
+ i
+}
+
+}
+
+return wpc
+}
+
+// TestWeightedOperations tests the weights of the operations.
+func TestWeightedOperations(t *testing.T) {
+ suite, ctx := createTestSuite(t, false)
+ app := suite.App
+ ctx.WithChainID("test-chain")
+ appParams := make(simtypes.AppParams)
+ weightesOps := simulation.WeightedOperations(appParams, suite.TxConfig, suite.AccountKeeper,
+ suite.BankKeeper, suite.GovKeeper, mockWeightedProposalMsg(3), mockWeightedLegacyProposalContent(1),
+ )
+
+ // setup 3 accounts
+ s := rand.NewSource(1)
+ r := rand.New(s)
+ accs := getTestingAccounts(t, r, suite.AccountKeeper, suite.BankKeeper, suite.StakingKeeper, ctx, 3)
+ expected := []struct {
+ weight int
+ opMsgRoute string
+ opMsgName string
+}{
+ {
+ simulation.DefaultWeightMsgDeposit, types.ModuleName, simulation.TypeMsgDeposit
+},
+ {
+ simulation.DefaultWeightMsgVote, types.ModuleName, simulation.TypeMsgVote
+},
+ {
+ simulation.DefaultWeightMsgVoteWeighted, types.ModuleName, simulation.TypeMsgVoteWeighted
+},
+ {
+ simulation.DefaultWeightMsgCancelProposal, types.ModuleName, simulation.TypeMsgCancelProposal
+},
+ {0, types.ModuleName, simulation.TypeMsgSubmitProposal
+},
+ {1, types.ModuleName, simulation.TypeMsgSubmitProposal
+},
+ {2, types.ModuleName, simulation.TypeMsgSubmitProposal
+},
+ {0, types.ModuleName, simulation.TypeMsgSubmitProposal
+},
+}
+
+require.Equal(t, len(weightesOps), len(expected), "number of operations should be the same")
+ for i, w := range weightesOps {
+ operationMsg, _, err := w.Op()(r, app.BaseApp, ctx, accs, ctx.ChainID())
+
+require.NoError(t, err)
+
+ // the following checks are very much dependent from the ordering of the output given
+ // by WeightedOperations. if the ordering in WeightedOperations changes some tests
+ // will fail
+ require.Equal(t, expected[i].weight, w.Weight(), "weight should be the same")
+
+require.Equal(t, expected[i].opMsgRoute, operationMsg.Route, "route should be the same")
+
+require.Equal(t, expected[i].opMsgName, operationMsg.Name, "operation Msg name should be the same")
+}
+}
+
+// TestSimulateMsgSubmitProposal tests the normal scenario of a valid message of type TypeMsgSubmitProposal.
+// Abnormal scenarios, where errors occur, are not tested here.
+func TestSimulateMsgSubmitProposal(t *testing.T) {
+ suite, ctx := createTestSuite(t, false)
+ app := suite.App
+
+ // setup 3 accounts
+ s := rand.NewSource(1)
+ r := rand.New(s)
+ accounts := getTestingAccounts(t, r, suite.AccountKeeper, suite.BankKeeper, suite.StakingKeeper, ctx, 3)
+
+ _, err := app.FinalizeBlock(&abci.RequestFinalizeBlock{
+ Height: app.LastBlockHeight() + 1,
+ Hash: app.LastCommitID().Hash,
+})
+
+require.NoError(t, err)
+
+ // execute operation
+ op := simulation.SimulateMsgSubmitProposal(suite.TxConfig, suite.AccountKeeper, suite.BankKeeper, suite.GovKeeper, MockWeightedProposals{3
+}.MsgSimulatorFn())
+
+operationMsg, _, err := op(r, app.BaseApp, ctx, accounts, "")
+
+require.NoError(t, err)
+
+var msg v1.MsgSubmitProposal
+ err = proto.Unmarshal(operationMsg.Msg, &msg)
+
+require.NoError(t, err)
+
+require.True(t, operationMsg.OK)
+
+require.Equal(t, "cosmos1ghekyjucln7y67ntx7cf27m9dpuxxemn4c8g4r", msg.Proposer)
+
+require.NotEqual(t, len(msg.InitialDeposit), 0)
+
+require.Equal(t, "47841094stake", msg.InitialDeposit[0].String())
+
+require.Equal(t, simulation.TypeMsgSubmitProposal, sdk.MsgTypeURL(&msg))
+}
+
+// TestSimulateMsgSubmitProposal tests the normal scenario of a valid message of type TypeMsgSubmitProposal.
+// Abnormal scenarios, where errors occur, are not tested here.
+func TestSimulateMsgSubmitLegacyProposal(t *testing.T) {
+ suite, ctx := createTestSuite(t, false)
+ app := suite.App
+
+ // setup 3 accounts
+ s := rand.NewSource(1)
+ r := rand.New(s)
+ accounts := getTestingAccounts(t, r, suite.AccountKeeper, suite.BankKeeper, suite.StakingKeeper, ctx, 3)
+
+ _, err := app.FinalizeBlock(&abci.RequestFinalizeBlock{
+ Height: app.LastBlockHeight() + 1,
+ Hash: app.LastCommitID().Hash,
+})
+
+require.NoError(t, err)
+ // execute operation
+ op := simulation.SimulateMsgSubmitLegacyProposal(suite.TxConfig, suite.AccountKeeper, suite.BankKeeper, suite.GovKeeper, MockWeightedProposals{3
+}.ContentSimulatorFn())
+
+operationMsg, _, err := op(r, app.BaseApp, ctx, accounts, "")
+
+require.NoError(t, err)
+
+var msg v1.MsgSubmitProposal
+ err = proto.Unmarshal(operationMsg.Msg, &msg)
+
+require.NoError(t, err)
+
+var msgLegacyContent v1.MsgExecLegacyContent
+ err = proto.Unmarshal(msg.Messages[0].Value, &msgLegacyContent)
+
+require.NoError(t, err)
+
+var textProposal v1beta1.TextProposal
+ err = proto.Unmarshal(msgLegacyContent.Content.Value, &textProposal)
+
+require.NoError(t, err)
+
+require.True(t, operationMsg.OK)
+
+require.Equal(t, "cosmos1p8wcgrjr4pjju90xg6u9cgq55dxwq8j7u4x9a0", msg.Proposer)
+
+require.NotEqual(t, len(msg.InitialDeposit), 0)
+
+require.Equal(t, "25166256stake", msg.InitialDeposit[0].String())
+
+require.Equal(t, "title-3: ZBSpYuLyYggwexjxusrBqDOTtGTOWeLrQKjLxzIivHSlcxgdXhhuTSkuxKGLwQvuyNhYFmBZHeAerqyNEUzXPFGkqEGqiQWIXnku",
+ textProposal.GetTitle())
+
+require.Equal(t, "description-3: NJWzHdBNpAXKJPHWQdrGYcAHSctgVlqwqHoLfHsXUdStwfefwzqLuKEhmMyYLdbZrcPgYqjNHxPexsruwEGStAneKbWkQDDIlCWBLSiAASNhZqNFlPtfqPJoxKsgMdzjWqLWdqKQuJqWPMvwPQWZUtVMOTMYKJbfdlZsjdsomuScvDmbDkgRualsxDvRJuCAmPOXitIbcyWsKGSdrEunFAOdmXnsuyFVgJqEjbklvmwrUlsxjRSfKZxGcpayDdgoFcnVSutxjRgOSFzPwidAjubMncNweqpbxhXGchpZUxuFDOtpnhNUycJICRYqsPhPSCjPTWZFLkstHWJxvdPEAyEIxXgLwbNOjrgzmaujiBABBIXvcXpLrbcEWNNQsbjvgJFgJkflpRohHUutvnaUqoopuKjTDaemDeSdqbnOzcfJpcTuAQtZoiLZOoAIlboFDAeGmSNwkvObPRvRWQgWkGkxwtPauYgdkmypLjbqhlHJIQTntgWjXwZdOyYEdQRRLfMSdnxqppqUofqLbLQDUjwKVKfZJUJQPsWIPwIVaSTrmKskoAhvmZyJgeRpkaTfGgrJzAigcxtfshmiDCFkuiluqtMOkidknnTBtumyJYlIsWLnCQclqdVmikUoMOPdPWwYbJxXyqUVicNxFxyqJTenNblyyKSdlCbiXxUiYUiMwXZASYfvMDPFgxniSjWaZTjHkqlJvtBsXqwPpyVxnJVGFWhfSxgOcduoxkiopJvFjMmFabrGYeVtTXLhxVUEiGwYUvndjFGzDVntUvibiyZhfMQdMhgsiuysLMiePBNXifRLMsSmXPkwlPloUbJveCvUlaalhZHuvdkCnkSHbMbmOnrfEGPwQiACiPlnihiaOdbjPqPiTXaHDoJXjSlZmltGqNHHNrcKdlFSCdmVOuvDcBLdSklyGJmcLTbSFtALdGlPkqqecJrpLCXNPWefoTJNgEJlyMEPneVaxxduAAEqQpHWZodWyRkDAxzyMnFMcjSVqeRXLqsNyNtQBbuRvunZflWSbbvXXdkyLikYqutQhLPONXbvhcQZJPSWnOulqQaXmbfFxAkqfYeseSHOQidHwbcsOaMnSrrmGjjRmEMQNuknupMxJiIeVjmgZvbmjPIQTEhQFULQLBMPrxcFPvBinaOPYWGvYGRKxLZdwamfRQQFngcdSlvwjfaPbURasIsGJVHtcEAxnIIrhSriiXLOlbEBLXFElXJFGxHJczRBIxAuPKtBisjKBwfzZFagdNmjdwIRvwzLkFKWRTDPxJCmpzHUcrPiiXXHnOIlqNVoGSXZewdnCRhuxeYGPVTfrNTQNOxZmxInOazUYNTNDgzsxlgiVEHPKMfbesvPHUqpNkUqbzeuzfdrsuLDpKHMUbBMKczKKWOdYoIXoPYtEjfOnlQLoGnbQUCuERdEFaptwnsHzTJDsuZkKtzMpFaZobynZdzNydEeJJHDYaQcwUxcqvwfWwNUsCiLvkZQiSfzAHftYgAmVsXgtmcYgTqJIawstRYJrZdSxlfRiqTufgEQVambeZZmaAyRQbcmdjVUZZCgqDrSeltJGXPMgZnGDZqISrGDOClxXCxMjmKqEPwKHoOfOeyGmqWqihqjINXLqnyTesZePQRqaWDQNqpLgNrAUKulklmckTijUltQKuWQDwpLmDyxLppPVMwsmBIpOwQttYFMjgJQZLYFPmxWFLIeZihkRNnkzoypBICIxgEuYsVWGIGRbbxqVasYnstWomJnHwmtOhAFSpttRYYzBmyEtZXiCthvKvWszTXDbiJbGXMcrYpKAgvUVFtdKUfvdMfhAryctklUCEdjetjuGNfJjajZtvzdYaqInKtFPPLYmRaXPdQzxdSQfmZDEVHlHGEGNSPRFJuIfKLLfUmnHxHnRjmzQPNlqrXgifUdzAGKVabYqvcDeYoTYgPsBUqehrBhmQUgTvDnsdpuhUoxskDdppTsYMcnDIPSwKIqhXDCIxOuXrywahvVavvHkPuaenjLmEbMgrkrQLHEAwrhHkPRNvonNQKqprqOFVZKAtpRSpvQUxMoXCMZLSSbnLEFsjVfANdQNQVwTmGxqVjVqRuxREAhuaDrFgEZpYKhwWPEKBevBfsOIcaZKyykQafzmGPLRAKDtTcJxJVgiiuUkmyMYuDUNEUhBEdoBLJnamtLmMJQgmLiUELIhLpiEvpOXOvXCPUeldLFqkKOwfacqIaRcnnZvERKRMCKUkMABbDHytQqQblrvoxOZkwzosQfDKGtIdfcXRJNqlBNwOCWoQBcEWyqrMlYZIAXYJmLfnjoJepgSFvrgajaBAIksoyeHqgqbGvpAstMIGmIhRYGGNPRIfOQKsGoKgxtsidhTaAePRCBFqZgPDWCIkqOJezGVkjfYUCZTlInbxBXwUAVRsxHTQtJFnnpmMvXDYCVlEmnZBKhmmxQOIQzxFWpJQkQoSAYzTEiDWEOsVLNrbfzeHFRyeYATakQQWmFDLPbVMCJcWjFGJjfqCoVzlbNNEsqxdSmNPjTjHYOkuEMFLkXYGaoJlraLqayMeCsTjWNRDPBywBJLAPVkGQqTwApVVwYAetlwSbzsdHWsTwSIcctkyKDuRWYDQikRqsKTMJchrliONJeaZIzwPQrNbTwxsGdwuduvibtYndRwpdsvyCktRHFalvUuEKMqXbItfGcNGWsGzubdPMYayOUOINjpcFBeESdwpdlTYmrPsLsVDhpTzoMegKrytNVZkfJRPuDCUXxSlSthOohmsuxmIZUedzxKmowKOdXTMcEtdpHaPWgIsIjrViKrQOCONlSuazmLuCUjLltOGXeNgJKedTVrrVCpWYWHyVrdXpKgNaMJVjbXxnVMSChdWKuZdqpisvrkBJPoURDYxWOtpjzZoOpWzyUuYNhCzRoHsMjmmWDcXzQiHIyjwdhPNwiPqFxeUfMVFQGImhykFgMIlQEoZCaRoqSBXTSWAeDumdbsOGtATwEdZlLfoBKiTvodQBGOEcuATWXfiinSjPmJKcWgQrTVYVrwlyMWhxqNbCMpIQNoSMGTiWfPTCezUjYcdWppnsYJihLQCqbNLRGgqrwHuIvsazapTpoPZIyZyeeSueJuTIhpHMEJfJpScshJubJGfkusuVBgfTWQoywSSliQQSfbvaHKiLnyjdSbpMkdBgXepoSsHnCQaYuHQqZsoEOmJCiuQUpJkmfyfbIShzlZpHFmLCsbknEAkKXKfRTRnuwdBeuOGgFbJLbDksHVapaRayWzwoYBEpmrlAxrUxYMUekKbpjPNfjUCjhbdMAnJmYQVZBQZkFVweHDAlaqJjRqoQPoOMLhyvYCzqEuQsAFoxWrzRnTVjStPadhsESlERnKhpEPsfDxNvxqcOyIulaCkmPdambLHvGhTZzysvqFauEgkFRItPfvisehFmoBhQqmkfbHVsgfHXDPJVyhwPllQpuYLRYvGodxKjkarnSNgsXoKEMlaSKxKdcVgvOkuLcfLFfdtXGTclqfPOfeoVLbqcjcXCUEBgAGplrkgsmIEhWRZLlGPGCwKWRaCKMkBHTAcypUrYjWwCLtOPVygMwMANGoQwFnCqFrUGMCRZUGJKTZIGPyldsifauoMnJPLTcDHmilcmahlqOELaAUYDBuzsVywnDQfwRLGIWozYaOAilMBcObErwgTDNGWnwQMUgFFSKtPDMEoEQCTKVREqrXZSGLqwTMcxHfWotDllNkIJPMbXzjDVjPOOjCFuIvTyhXKLyhUScOXvYthRXpPfKwMhptXaxIxgqBoUqzrWbaoLTVpQoottZyPFfNOoMioXHRuFwMRYUiKvcWPkrayyTLOCFJlAyslDameIuqVAuxErqFPEWIScKpBORIuZqoXlZuTvAjEdlEWDODFRregDTqGNoFBIHxvimmIZwLfFyKUfEWAnNBdtdzDmTPXtpHRGdIbuucfTjOygZsTxPjfweXhSUkMhPjMaxKlMIJMOXcnQfyzeOcbWwNbeH",
+ textProposal.GetDescription())
+
+require.Equal(t, simulation.TypeMsgSubmitProposal, sdk.MsgTypeURL(&msg))
+}
+
+// TestSimulateMsgCancelProposal tests the normal scenario of a valid message of type TypeMsgCancelProposal.
+// Abnormal scenarios, where errors occur, are not tested here.
+func TestSimulateMsgCancelProposal(t *testing.T) {
+ suite, ctx := createTestSuite(t, false)
+ app := suite.App
+ blockTime := time.Now().UTC()
+
+ctx = ctx.WithBlockTime(blockTime)
+
+ // setup 3 accounts
+ s := rand.NewSource(1)
+ r := rand.New(s)
+ accounts := getTestingAccounts(t, r, suite.AccountKeeper, suite.BankKeeper, suite.StakingKeeper, ctx, 3)
+ // setup a proposal
+ proposer := accounts[0].Address
+ content := v1beta1.NewTextProposal("Test", "description")
+
+contentMsg, err := v1.NewLegacyContent(content, suite.GovKeeper.GetGovernanceAccount(ctx).GetAddress().String())
+
+require.NoError(t, err)
+ submitTime := ctx.BlockHeader().Time
+ params, _ := suite.GovKeeper.Params.Get(ctx)
+ depositPeriod := params.MaxDepositPeriod
+
+ proposal, err := v1.NewProposal([]sdk.Msg{
+ contentMsg
+}, 1, submitTime, submitTime.Add(*depositPeriod), "", "title", "summary", proposer, false)
+
+require.NoError(t, err)
+
+require.NoError(t, suite.GovKeeper.SetProposal(ctx, proposal))
+
+ _, err = app.FinalizeBlock(&abci.RequestFinalizeBlock{
+ Height: app.LastBlockHeight() + 1,
+ Hash: app.LastCommitID().Hash,
+})
+
+require.NoError(t, err)
+
+ // execute operation
+ op := simulation.SimulateMsgCancelProposal(suite.TxConfig, suite.AccountKeeper, suite.BankKeeper, suite.GovKeeper)
+
+operationMsg, _, err := op(r, app.BaseApp, ctx, accounts, "")
+
+require.NoError(t, err)
+
+var msg v1.MsgCancelProposal
+ err = proto.Unmarshal(operationMsg.Msg, &msg)
+
+require.NoError(t, err)
+
+require.NoError(t, err)
+
+require.True(t, operationMsg.OK)
+
+require.Equal(t, uint64(1), msg.ProposalId)
+
+require.Equal(t, proposer.String(), msg.Proposer)
+
+require.Equal(t, simulation.TypeMsgCancelProposal, sdk.MsgTypeURL(&msg))
+}
+
+// TestSimulateMsgDeposit tests the normal scenario of a valid message of type TypeMsgDeposit.
+// Abnormal scenarios, where errors occur, are not tested here.
+func TestSimulateMsgDeposit(t *testing.T) {
+ suite, ctx := createTestSuite(t, false)
+ app := suite.App
+ blockTime := time.Now().UTC()
+
+ctx = ctx.WithBlockTime(blockTime)
+
+ // setup 3 accounts
+ s := rand.NewSource(1)
+ r := rand.New(s)
+ accounts := getTestingAccounts(t, r, suite.AccountKeeper, suite.BankKeeper, suite.StakingKeeper, ctx, 3)
+
+ // setup a proposal
+ content := v1beta1.NewTextProposal("Test", "description")
+
+contentMsg, err := v1.NewLegacyContent(content, suite.GovKeeper.GetGovernanceAccount(ctx).GetAddress().String())
+
+require.NoError(t, err)
+ submitTime := ctx.BlockHeader().Time
+ params, _ := suite.GovKeeper.Params.Get(ctx)
+ depositPeriod := params.MaxDepositPeriod
+
+ proposal, err := v1.NewProposal([]sdk.Msg{
+ contentMsg
+}, 1, submitTime, submitTime.Add(*depositPeriod), "", "text proposal", "description", sdk.AccAddress("cosmos1ghekyjucln7y67ntx7cf27m9dpuxxemn4c8g4r"), false)
+
+require.NoError(t, err)
+
+require.NoError(t, suite.GovKeeper.SetProposal(ctx, proposal))
+
+ _, err = app.FinalizeBlock(&abci.RequestFinalizeBlock{
+ Height: app.LastBlockHeight() + 1,
+ Hash: app.LastCommitID().Hash,
+})
+
+require.NoError(t, err)
+
+ // execute operation
+ op := simulation.SimulateMsgDeposit(suite.TxConfig, suite.AccountKeeper, suite.BankKeeper, suite.GovKeeper)
+
+operationMsg, _, err := op(r, app.BaseApp, ctx, accounts, "")
+
+require.NoError(t, err)
+
+var msg v1.MsgDeposit
+ err = proto.Unmarshal(operationMsg.Msg, &msg)
+
+require.NoError(t, err)
+
+require.True(t, operationMsg.OK)
+
+require.Equal(t, uint64(1), msg.ProposalId)
+
+require.Equal(t, "cosmos1ghekyjucln7y67ntx7cf27m9dpuxxemn4c8g4r", msg.Depositor)
+
+require.NotEqual(t, len(msg.Amount), 0)
+
+require.Equal(t, "560969stake", msg.Amount[0].String())
+
+require.Equal(t, simulation.TypeMsgDeposit, sdk.MsgTypeURL(&msg))
+}
+
+// TestSimulateMsgVote tests the normal scenario of a valid message of type TypeMsgVote.
+// Abnormal scenarios, where errors occur, are not tested here.
+func TestSimulateMsgVote(t *testing.T) {
+ suite, ctx := createTestSuite(t, false)
+ app := suite.App
+ blockTime := time.Now().UTC()
+
+ctx = ctx.WithBlockTime(blockTime)
+
+ // setup 3 accounts
+ s := rand.NewSource(1)
+ r := rand.New(s)
+ accounts := getTestingAccounts(t, r, suite.AccountKeeper, suite.BankKeeper, suite.StakingKeeper, ctx, 3)
+
+ // setup a proposal
+ govAcc := suite.GovKeeper.GetGovernanceAccount(ctx).GetAddress().String()
+
+contentMsg, err := v1.NewLegacyContent(v1beta1.NewTextProposal("Test", "description"), govAcc)
+
+require.NoError(t, err)
+ submitTime := ctx.BlockHeader().Time
+ params, _ := suite.GovKeeper.Params.Get(ctx)
+ depositPeriod := params.MaxDepositPeriod
+
+ proposal, err := v1.NewProposal([]sdk.Msg{
+ contentMsg
+}, 1, submitTime, submitTime.Add(*depositPeriod), "", "text proposal", "description", sdk.AccAddress("cosmos1ghekyjucln7y67ntx7cf27m9dpuxxemn4c8g4r"), false)
+
+require.NoError(t, err)
+
+require.NoError(t, suite.GovKeeper.ActivateVotingPeriod(ctx, proposal))
+
+ _, err = app.FinalizeBlock(&abci.RequestFinalizeBlock{
+ Height: app.LastBlockHeight() + 1,
+ Hash: app.LastCommitID().Hash,
+})
+
+require.NoError(t, err)
+
+ // execute operation
+ op := simulation.SimulateMsgVote(suite.TxConfig, suite.AccountKeeper, suite.BankKeeper, suite.GovKeeper)
+
+operationMsg, _, err := op(r, app.BaseApp, ctx, accounts, "")
+
+require.NoError(t, err)
+
+var msg v1.MsgVote
+ err = proto.Unmarshal(operationMsg.Msg, &msg)
+
+require.NoError(t, err)
+
+require.True(t, operationMsg.OK)
+
+require.Equal(t, uint64(1), msg.ProposalId)
+
+require.Equal(t, "cosmos1ghekyjucln7y67ntx7cf27m9dpuxxemn4c8g4r", msg.Voter)
+
+require.Equal(t, v1.OptionYes, msg.Option)
+
+require.Equal(t, simulation.TypeMsgVote, sdk.MsgTypeURL(&msg))
+}
+
+// TestSimulateMsgVoteWeighted tests the normal scenario of a valid message of type TypeMsgVoteWeighted.
+// Abnormal scenarios, where errors occur, are not tested here.
+func TestSimulateMsgVoteWeighted(t *testing.T) {
+ suite, ctx := createTestSuite(t, false)
+ app := suite.App
+ blockTime := time.Now().UTC()
+
+ctx = ctx.WithBlockTime(blockTime)
+
+ // setup 3 accounts
+ s := rand.NewSource(1)
+ r := rand.New(s)
+ accounts := getTestingAccounts(t, r, suite.AccountKeeper, suite.BankKeeper, suite.StakingKeeper, ctx, 3)
+
+ // setup a proposal
+ govAcc := suite.GovKeeper.GetGovernanceAccount(ctx).GetAddress().String()
+
+contentMsg, err := v1.NewLegacyContent(v1beta1.NewTextProposal("Test", "description"), govAcc)
+
+require.NoError(t, err)
+ submitTime := ctx.BlockHeader().Time
+ params, _ := suite.GovKeeper.Params.Get(ctx)
+ depositPeriod := params.MaxDepositPeriod
+
+ proposal, err := v1.NewProposal([]sdk.Msg{
+ contentMsg
+}, 1, submitTime, submitTime.Add(*depositPeriod), "", "text proposal", "test", sdk.AccAddress("cosmos1ghekyjucln7y67ntx7cf27m9dpuxxemn4c8g4r"), false)
+
+require.NoError(t, err)
+
+require.NoError(t, suite.GovKeeper.ActivateVotingPeriod(ctx, proposal))
+
+ _, err = app.FinalizeBlock(&abci.RequestFinalizeBlock{
+ Height: app.LastBlockHeight() + 1,
+ Hash: app.LastCommitID().Hash,
+})
+
+require.NoError(t, err)
+
+ // execute operation
+ op := simulation.SimulateMsgVoteWeighted(suite.TxConfig, suite.AccountKeeper, suite.BankKeeper, suite.GovKeeper)
+
+operationMsg, _, err := op(r, app.BaseApp, ctx, accounts, "")
+
+require.NoError(t, err)
+
+var msg v1.MsgVoteWeighted
+ err = proto.Unmarshal(operationMsg.Msg, &msg)
+
+require.NoError(t, err)
+
+require.True(t, operationMsg.OK)
+
+require.Equal(t, uint64(1), msg.ProposalId)
+
+require.Equal(t, "cosmos1ghekyjucln7y67ntx7cf27m9dpuxxemn4c8g4r", msg.Voter)
+
+require.True(t, len(msg.Options) >= 1)
+
+require.Equal(t, simulation.TypeMsgVoteWeighted, sdk.MsgTypeURL(&msg))
+}
+
+type suite struct {
+ TxConfig client.TxConfig
+ AccountKeeper authkeeper.AccountKeeper
+ BankKeeper bankkeeper.Keeper
+ GovKeeper *keeper.Keeper
+ StakingKeeper *stakingkeeper.Keeper
+ DistributionKeeper dk.Keeper
+ App *runtime.App
+}
+
+// returns context and an app with updated mint keeper
+func createTestSuite(t *testing.T, isCheckTx bool) (suite, sdk.Context) {
+ t.Helper()
+ res := suite{
+}
+
+app, err := simtestutil.Setup(
+ depinject.Configs(
+ configurator.NewAppConfig(
+ configurator.AuthModule(),
+ configurator.TxModule(),
+ configurator.ParamsModule(),
+ configurator.BankModule(),
+ configurator.StakingModule(),
+ configurator.ConsensusModule(),
+ configurator.DistributionModule(),
+ configurator.GovModule(),
+ ),
+ depinject.Supply(log.NewNopLogger()),
+ ),
+ &res.TxConfig, &res.AccountKeeper, &res.BankKeeper, &res.GovKeeper, &res.StakingKeeper, &res.DistributionKeeper)
+
+require.NoError(t, err)
+ ctx := app.NewContext(isCheckTx)
+
+res.App = app
+ return res, ctx
+}
+
+func getTestingAccounts(
+ t *testing.T,
+ r *rand.Rand,
+ accountKeeper authkeeper.AccountKeeper,
+ bankKeeper bankkeeper.Keeper,
+ stakingKeeper *stakingkeeper.Keeper,
+ ctx sdk.Context,
+ n int,
+) []simtypes.Account {
+ t.Helper()
+ accounts := simtypes.RandomAccounts(r, n)
+ initAmt := stakingKeeper.TokensFromConsensusPower(ctx, 200)
+ initCoins := sdk.NewCoins(sdk.NewCoin(sdk.DefaultBondDenom, initAmt))
+
+ // add coins to the accounts
+ for _, account := range accounts {
+ acc := accountKeeper.NewAccountWithAddress(ctx, account.Address)
+
+accountKeeper.SetAccount(ctx, acc)
+
+require.NoError(t, testutil.FundAccount(ctx, bankKeeper, account.Address, initCoins))
+}
+
+return accounts
+}
+```
+
+```go expandable
+package simulation_test
+
+import (
+
+ "fmt"
+ "math/rand"
+ "testing"
+ "time"
+
+ abci "github.com/cometbft/cometbft/abci/types"
+ "github.com/cosmos/gogoproto/proto"
+ "github.com/stretchr/testify/require"
+ "cosmossdk.io/depinject"
+ "cosmossdk.io/log"
+ "github.com/cosmos/cosmos-sdk/client"
+ "github.com/cosmos/cosmos-sdk/runtime"
+ "github.com/cosmos/cosmos-sdk/testutil/configurator"
+ simtestutil "github.com/cosmos/cosmos-sdk/testutil/sims"
+ sdk "github.com/cosmos/cosmos-sdk/types"
+ simtypes "github.com/cosmos/cosmos-sdk/types/simulation"
+ _ "github.com/cosmos/cosmos-sdk/x/auth"
+ authkeeper "github.com/cosmos/cosmos-sdk/x/auth/keeper"
+ _ "github.com/cosmos/cosmos-sdk/x/auth/tx/config"
+ _ "github.com/cosmos/cosmos-sdk/x/bank"
+ bankkeeper "github.com/cosmos/cosmos-sdk/x/bank/keeper"
+ "github.com/cosmos/cosmos-sdk/x/bank/testutil"
+ _ "github.com/cosmos/cosmos-sdk/x/consensus"
+ _ "github.com/cosmos/cosmos-sdk/x/distribution"
+ dk "github.com/cosmos/cosmos-sdk/x/distribution/keeper"
+ _ "github.com/cosmos/cosmos-sdk/x/gov"
+ "github.com/cosmos/cosmos-sdk/x/gov/keeper"
+ "github.com/cosmos/cosmos-sdk/x/gov/simulation"
+ "github.com/cosmos/cosmos-sdk/x/gov/types"
+ v1 "github.com/cosmos/cosmos-sdk/x/gov/types/v1"
+ "github.com/cosmos/cosmos-sdk/x/gov/types/v1beta1"
+ _ "github.com/cosmos/cosmos-sdk/x/params"
+ _ "github.com/cosmos/cosmos-sdk/x/staking"
+ stakingkeeper "github.com/cosmos/cosmos-sdk/x/staking/keeper"
+)
+
+var (
+ _ simtypes.WeightedProposalMsg = MockWeightedProposals{
+}
+ //nolint:staticcheck // keeping around for legacy testing
+ _ simtypes.WeightedProposalContent = MockWeightedProposals{
+}
+)
+
+type MockWeightedProposals struct {
+ n int
+}
+
+func (m MockWeightedProposals)
+
+AppParamsKey()
+
+string {
+ return fmt.Sprintf("AppParamsKey-%d", m.n)
+}
+
+func (m MockWeightedProposals)
+
+DefaultWeight()
+
+int {
+ return m.n
+}
+
+func (m MockWeightedProposals)
+
+MsgSimulatorFn()
+
+simtypes.MsgSimulatorFn {
+ return func(r *rand.Rand, _ sdk.Context, _ []simtypes.Account)
+
+sdk.Msg {
+ return nil
+}
+}
+
+//nolint:staticcheck // retaining legacy content to maintain gov functionality
+func (m MockWeightedProposals)
+
+ContentSimulatorFn()
+
+simtypes.ContentSimulatorFn {
+ return func(r *rand.Rand, _ sdk.Context, _ []simtypes.Account)
+
+simtypes.Content {
+ return v1beta1.NewTextProposal(
+ fmt.Sprintf("title-%d: %s", m.n, simtypes.RandStringOfLength(r, 100)),
+ fmt.Sprintf("description-%d: %s", m.n, simtypes.RandStringOfLength(r, 4000)),
+ )
+}
+}
+
+func mockWeightedProposalMsg(n int) []simtypes.WeightedProposalMsg {
+ wpc := make([]simtypes.WeightedProposalMsg, n)
+ for i := range n {
+ wpc[i] = MockWeightedProposals{
+ i
+}
+
+}
+
+return wpc
+}
+
+// nolint // keeping this legacy proposal for testing
+func mockWeightedLegacyProposalContent(n int) []simtypes.WeightedProposalContent {
+ wpc := make([]simtypes.WeightedProposalContent, n)
+ for i := range n {
+ wpc[i] = MockWeightedProposals{
+ i
+}
+
+}
+
+return wpc
+}
+
+// TestWeightedOperations tests the weights of the operations.
+func TestWeightedOperations(t *testing.T) {
+ suite, ctx := createTestSuite(t, false)
+ app := suite.App
+ ctx.WithChainID("test-chain")
+ appParams := make(simtypes.AppParams)
+ weightesOps := simulation.WeightedOperations(appParams, suite.TxConfig, suite.AccountKeeper,
+ suite.BankKeeper, suite.GovKeeper, mockWeightedProposalMsg(3), mockWeightedLegacyProposalContent(1),
+ )
+
+ // setup 3 accounts
+ s := rand.NewSource(1)
+ r := rand.New(s)
+ accs := getTestingAccounts(t, r, suite.AccountKeeper, suite.BankKeeper, suite.StakingKeeper, ctx, 3)
+ expected := []struct {
+ weight int
+ opMsgRoute string
+ opMsgName string
+}{
+ {
+ simulation.DefaultWeightMsgDeposit, types.ModuleName, simulation.TypeMsgDeposit
+},
+ {
+ simulation.DefaultWeightMsgVote, types.ModuleName, simulation.TypeMsgVote
+},
+ {
+ simulation.DefaultWeightMsgVoteWeighted, types.ModuleName, simulation.TypeMsgVoteWeighted
+},
+ {
+ simulation.DefaultWeightMsgCancelProposal, types.ModuleName, simulation.TypeMsgCancelProposal
+},
+ {0, types.ModuleName, simulation.TypeMsgSubmitProposal
+},
+ {1, types.ModuleName, simulation.TypeMsgSubmitProposal
+},
+ {2, types.ModuleName, simulation.TypeMsgSubmitProposal
+},
+ {0, types.ModuleName, simulation.TypeMsgSubmitProposal
+},
+}
+
+require.Equal(t, len(weightesOps), len(expected), "number of operations should be the same")
+ for i, w := range weightesOps {
+ operationMsg, _, err := w.Op()(r, app.BaseApp, ctx, accs, ctx.ChainID())
+
+require.NoError(t, err)
+
+ // the following checks are very much dependent from the ordering of the output given
+ // by WeightedOperations. if the ordering in WeightedOperations changes some tests
+ // will fail
+ require.Equal(t, expected[i].weight, w.Weight(), "weight should be the same")
+
+require.Equal(t, expected[i].opMsgRoute, operationMsg.Route, "route should be the same")
+
+require.Equal(t, expected[i].opMsgName, operationMsg.Name, "operation Msg name should be the same")
+}
+}
+
+// TestSimulateMsgSubmitProposal tests the normal scenario of a valid message of type TypeMsgSubmitProposal.
+// Abnormal scenarios, where errors occur, are not tested here.
+func TestSimulateMsgSubmitProposal(t *testing.T) {
+ suite, ctx := createTestSuite(t, false)
+ app := suite.App
+
+ // setup 3 accounts
+ s := rand.NewSource(1)
+ r := rand.New(s)
+ accounts := getTestingAccounts(t, r, suite.AccountKeeper, suite.BankKeeper, suite.StakingKeeper, ctx, 3)
+
+ _, err := app.FinalizeBlock(&abci.RequestFinalizeBlock{
+ Height: app.LastBlockHeight() + 1,
+ Hash: app.LastCommitID().Hash,
+})
+
+require.NoError(t, err)
+
+ // execute operation
+ op := simulation.SimulateMsgSubmitProposal(suite.TxConfig, suite.AccountKeeper, suite.BankKeeper, suite.GovKeeper, MockWeightedProposals{3
+}.MsgSimulatorFn())
+
+operationMsg, _, err := op(r, app.BaseApp, ctx, accounts, "")
+
+require.NoError(t, err)
+
+var msg v1.MsgSubmitProposal
+ err = proto.Unmarshal(operationMsg.Msg, &msg)
+
+require.NoError(t, err)
+
+require.True(t, operationMsg.OK)
+
+require.Equal(t, "cosmos1ghekyjucln7y67ntx7cf27m9dpuxxemn4c8g4r", msg.Proposer)
+
+require.NotEqual(t, len(msg.InitialDeposit), 0)
+
+require.Equal(t, "47841094stake", msg.InitialDeposit[0].String())
+
+require.Equal(t, simulation.TypeMsgSubmitProposal, sdk.MsgTypeURL(&msg))
+}
+
+// TestSimulateMsgSubmitProposal tests the normal scenario of a valid message of type TypeMsgSubmitProposal.
+// Abnormal scenarios, where errors occur, are not tested here.
+func TestSimulateMsgSubmitLegacyProposal(t *testing.T) {
+ suite, ctx := createTestSuite(t, false)
+ app := suite.App
+
+ // setup 3 accounts
+ s := rand.NewSource(1)
+ r := rand.New(s)
+ accounts := getTestingAccounts(t, r, suite.AccountKeeper, suite.BankKeeper, suite.StakingKeeper, ctx, 3)
+
+ _, err := app.FinalizeBlock(&abci.RequestFinalizeBlock{
+ Height: app.LastBlockHeight() + 1,
+ Hash: app.LastCommitID().Hash,
+})
+
+require.NoError(t, err)
+ // execute operation
+ op := simulation.SimulateMsgSubmitLegacyProposal(suite.TxConfig, suite.AccountKeeper, suite.BankKeeper, suite.GovKeeper, MockWeightedProposals{3
+}.ContentSimulatorFn())
+
+operationMsg, _, err := op(r, app.BaseApp, ctx, accounts, "")
+
+require.NoError(t, err)
+
+var msg v1.MsgSubmitProposal
+ err = proto.Unmarshal(operationMsg.Msg, &msg)
+
+require.NoError(t, err)
+
+var msgLegacyContent v1.MsgExecLegacyContent
+ err = proto.Unmarshal(msg.Messages[0].Value, &msgLegacyContent)
+
+require.NoError(t, err)
+
+var textProposal v1beta1.TextProposal
+ err = proto.Unmarshal(msgLegacyContent.Content.Value, &textProposal)
+
+require.NoError(t, err)
+
+require.True(t, operationMsg.OK)
+
+require.Equal(t, "cosmos1p8wcgrjr4pjju90xg6u9cgq55dxwq8j7u4x9a0", msg.Proposer)
+
+require.NotEqual(t, len(msg.InitialDeposit), 0)
+
+require.Equal(t, "25166256stake", msg.InitialDeposit[0].String())
+
+require.Equal(t, "title-3: ZBSpYuLyYggwexjxusrBqDOTtGTOWeLrQKjLxzIivHSlcxgdXhhuTSkuxKGLwQvuyNhYFmBZHeAerqyNEUzXPFGkqEGqiQWIXnku",
+ textProposal.GetTitle())
+
+require.Equal(t, "description-3: NJWzHdBNpAXKJPHWQdrGYcAHSctgVlqwqHoLfHsXUdStwfefwzqLuKEhmMyYLdbZrcPgYqjNHxPexsruwEGStAneKbWkQDDIlCWBLSiAASNhZqNFlPtfqPJoxKsgMdzjWqLWdqKQuJqWPMvwPQWZUtVMOTMYKJbfdlZsjdsomuScvDmbDkgRualsxDvRJuCAmPOXitIbcyWsKGSdrEunFAOdmXnsuyFVgJqEjbklvmwrUlsxjRSfKZxGcpayDdgoFcnVSutxjRgOSFzPwidAjubMncNweqpbxhXGchpZUxuFDOtpnhNUycJICRYqsPhPSCjPTWZFLkstHWJxvdPEAyEIxXgLwbNOjrgzmaujiBABBIXvcXpLrbcEWNNQsbjvgJFgJkflpRohHUutvnaUqoopuKjTDaemDeSdqbnOzcfJpcTuAQtZoiLZOoAIlboFDAeGmSNwkvObPRvRWQgWkGkxwtPauYgdkmypLjbqhlHJIQTntgWjXwZdOyYEdQRRLfMSdnxqppqUofqLbLQDUjwKVKfZJUJQPsWIPwIVaSTrmKskoAhvmZyJgeRpkaTfGgrJzAigcxtfshmiDCFkuiluqtMOkidknnTBtumyJYlIsWLnCQclqdVmikUoMOPdPWwYbJxXyqUVicNxFxyqJTenNblyyKSdlCbiXxUiYUiMwXZASYfvMDPFgxniSjWaZTjHkqlJvtBsXqwPpyVxnJVGFWhfSxgOcduoxkiopJvFjMmFabrGYeVtTXLhxVUEiGwYUvndjFGzDVntUvibiyZhfMQdMhgsiuysLMiePBNXifRLMsSmXPkwlPloUbJveCvUlaalhZHuvdkCnkSHbMbmOnrfEGPwQiACiPlnihiaOdbjPqPiTXaHDoJXjSlZmltGqNHHNrcKdlFSCdmVOuvDcBLdSklyGJmcLTbSFtALdGlPkqqecJrpLCXNPWefoTJNgEJlyMEPneVaxxduAAEqQpHWZodWyRkDAxzyMnFMcjSVqeRXLqsNyNtQBbuRvunZflWSbbvXXdkyLikYqutQhLPONXbvhcQZJPSWnOulqQaXmbfFxAkqfYeseSHOQidHwbcsOaMnSrrmGjjRmEMQNuknupMxJiIeVjmgZvbmjPIQTEhQFULQLBMPrxcFPvBinaOPYWGvYGRKxLZdwamfRQQFngcdSlvwjfaPbURasIsGJVHtcEAxnIIrhSriiXLOlbEBLXFElXJFGxHJczRBIxAuPKtBisjKBwfzZFagdNmjdwIRvwzLkFKWRTDPxJCmpzHUcrPiiXXHnOIlqNVoGSXZewdnCRhuxeYGPVTfrNTQNOxZmxInOazUYNTNDgzsxlgiVEHPKMfbesvPHUqpNkUqbzeuzfdrsuLDpKHMUbBMKczKKWOdYoIXoPYtEjfOnlQLoGnbQUCuERdEFaptwnsHzTJDsuZkKtzMpFaZobynZdzNydEeJJHDYaQcwUxcqvwfWwNUsCiLvkZQiSfzAHftYgAmVsXgtmcYgTqJIawstRYJrZdSxlfRiqTufgEQVambeZZmaAyRQbcmdjVUZZCgqDrSeltJGXPMgZnGDZqISrGDOClxXCxMjmKqEPwKHoOfOeyGmqWqihqjINXLqnyTesZePQRqaWDQNqpLgNrAUKulklmckTijUltQKuWQDwpLmDyxLppPVMwsmBIpOwQttYFMjgJQZLYFPmxWFLIeZihkRNnkzoypBICIxgEuYsVWGIGRbbxqVasYnstWomJnHwmtOhAFSpttRYYzBmyEtZXiCthvKvWszTXDbiJbGXMcrYpKAgvUVFtdKUfvdMfhAryctklUCEdjetjuGNfJjajZtvzdYaqInKtFPPLYmRaXPdQzxdSQfmZDEVHlHGEGNSPRFJuIfKLLfUmnHxHnRjmzQPNlqrXgifUdzAGKVabYqvcDeYoTYgPsBUqehrBhmQUgTvDnsdpuhUoxskDdppTsYMcnDIPSwKIqhXDCIxOuXrywahvVavvHkPuaenjLmEbMgrkrQLHEAwrhHkPRNvonNQKqprqOFVZKAtpRSpvQUxMoXCMZLSSbnLEFsjVfANdQNQVwTmGxqVjVqRuxREAhuaDrFgEZpYKhwWPEKBevBfsOIcaZKyykQafzmGPLRAKDtTcJxJVgiiuUkmyMYuDUNEUhBEdoBLJnamtLmMJQgmLiUELIhLpiEvpOXOvXCPUeldLFqkKOwfacqIaRcnnZvERKRMCKUkMABbDHytQqQblrvoxOZkwzosQfDKGtIdfcXRJNqlBNwOCWoQBcEWyqrMlYZIAXYJmLfnjoJepgSFvrgajaBAIksoyeHqgqbGvpAstMIGmIhRYGGNPRIfOQKsGoKgxtsidhTaAePRCBFqZgPDWCIkqOJezGVkjfYUCZTlInbxBXwUAVRsxHTQtJFnnpmMvXDYCVlEmnZBKhmmxQOIQzxFWpJQkQoSAYzTEiDWEOsVLNrbfzeHFRyeYATakQQWmFDLPbVMCJcWjFGJjfqCoVzlbNNEsqxdSmNPjTjHYOkuEMFLkXYGaoJlraLqayMeCsTjWNRDPBywBJLAPVkGQqTwApVVwYAetlwSbzsdHWsTwSIcctkyKDuRWYDQikRqsKTMJchrliONJeaZIzwPQrNbTwxsGdwuduvibtYndRwpdsvyCktRHFalvUuEKMqXbItfGcNGWsGzubdPMYayOUOINjpcFBeESdwpdlTYmrPsLsVDhpTzoMegKrytNVZkfJRPuDCUXxSlSthOohmsuxmIZUedzxKmowKOdXTMcEtdpHaPWgIsIjrViKrQOCONlSuazmLuCUjLltOGXeNgJKedTVrrVCpWYWHyVrdXpKgNaMJVjbXxnVMSChdWKuZdqpisvrkBJPoURDYxWOtpjzZoOpWzyUuYNhCzRoHsMjmmWDcXzQiHIyjwdhPNwiPqFxeUfMVFQGImhykFgMIlQEoZCaRoqSBXTSWAeDumdbsOGtATwEdZlLfoBKiTvodQBGOEcuATWXfiinSjPmJKcWgQrTVYVrwlyMWhxqNbCMpIQNoSMGTiWfPTCezUjYcdWppnsYJihLQCqbNLRGgqrwHuIvsazapTpoPZIyZyeeSueJuTIhpHMEJfJpScshJubJGfkusuVBgfTWQoywSSliQQSfbvaHKiLnyjdSbpMkdBgXepoSsHnCQaYuHQqZsoEOmJCiuQUpJkmfyfbIShzlZpHFmLCsbknEAkKXKfRTRnuwdBeuOGgFbJLbDksHVapaRayWzwoYBEpmrlAxrUxYMUekKbpjPNfjUCjhbdMAnJmYQVZBQZkFVweHDAlaqJjRqoQPoOMLhyvYCzqEuQsAFoxWrzRnTVjStPadhsESlERnKhpEPsfDxNvxqcOyIulaCkmPdambLHvGhTZzysvqFauEgkFRItPfvisehFmoBhQqmkfbHVsgfHXDPJVyhwPllQpuYLRYvGodxKjkarnSNgsXoKEMlaSKxKdcVgvOkuLcfLFfdtXGTclqfPOfeoVLbqcjcXCUEBgAGplrkgsmIEhWRZLlGPGCwKWRaCKMkBHTAcypUrYjWwCLtOPVygMwMANGoQwFnCqFrUGMCRZUGJKTZIGPyldsifauoMnJPLTcDHmilcmahlqOELaAUYDBuzsVywnDQfwRLGIWozYaOAilMBcObErwgTDNGWnwQMUgFFSKtPDMEoEQCTKVREqrXZSGLqwTMcxHfWotDllNkIJPMbXzjDVjPOOjCFuIvTyhXKLyhUScOXvYthRXpPfKwMhptXaxIxgqBoUqzrWbaoLTVpQoottZyPFfNOoMioXHRuFwMRYUiKvcWPkrayyTLOCFJlAyslDameIuqVAuxErqFPEWIScKpBORIuZqoXlZuTvAjEdlEWDODFRregDTqGNoFBIHxvimmIZwLfFyKUfEWAnNBdtdzDmTPXtpHRGdIbuucfTjOygZsTxPjfweXhSUkMhPjMaxKlMIJMOXcnQfyzeOcbWwNbeH",
+ textProposal.GetDescription())
+
+require.Equal(t, simulation.TypeMsgSubmitProposal, sdk.MsgTypeURL(&msg))
+}
+
+// TestSimulateMsgCancelProposal tests the normal scenario of a valid message of type TypeMsgCancelProposal.
+// Abnormal scenarios, where errors occur, are not tested here.
+func TestSimulateMsgCancelProposal(t *testing.T) {
+ suite, ctx := createTestSuite(t, false)
+ app := suite.App
+ blockTime := time.Now().UTC()
+
+ctx = ctx.WithBlockTime(blockTime)
+
+ // setup 3 accounts
+ s := rand.NewSource(1)
+ r := rand.New(s)
+ accounts := getTestingAccounts(t, r, suite.AccountKeeper, suite.BankKeeper, suite.StakingKeeper, ctx, 3)
+ // setup a proposal
+ proposer := accounts[0].Address
+ content := v1beta1.NewTextProposal("Test", "description")
+
+contentMsg, err := v1.NewLegacyContent(content, suite.GovKeeper.GetGovernanceAccount(ctx).GetAddress().String())
+
+require.NoError(t, err)
+ submitTime := ctx.BlockHeader().Time
+ params, _ := suite.GovKeeper.Params.Get(ctx)
+ depositPeriod := params.MaxDepositPeriod
+
+ proposal, err := v1.NewProposal([]sdk.Msg{
+ contentMsg
+}, 1, submitTime, submitTime.Add(*depositPeriod), "", "title", "summary", proposer, false)
+
+require.NoError(t, err)
+
+require.NoError(t, suite.GovKeeper.SetProposal(ctx, proposal))
+
+ _, err = app.FinalizeBlock(&abci.RequestFinalizeBlock{
+ Height: app.LastBlockHeight() + 1,
+ Hash: app.LastCommitID().Hash,
+})
+
+require.NoError(t, err)
+
+ // execute operation
+ op := simulation.SimulateMsgCancelProposal(suite.TxConfig, suite.AccountKeeper, suite.BankKeeper, suite.GovKeeper)
+
+operationMsg, _, err := op(r, app.BaseApp, ctx, accounts, "")
+
+require.NoError(t, err)
+
+var msg v1.MsgCancelProposal
+ err = proto.Unmarshal(operationMsg.Msg, &msg)
+
+require.NoError(t, err)
+
+require.NoError(t, err)
+
+require.True(t, operationMsg.OK)
+
+require.Equal(t, uint64(1), msg.ProposalId)
+
+require.Equal(t, proposer.String(), msg.Proposer)
+
+require.Equal(t, simulation.TypeMsgCancelProposal, sdk.MsgTypeURL(&msg))
+}
+
+// TestSimulateMsgDeposit tests the normal scenario of a valid message of type TypeMsgDeposit.
+// Abnormal scenarios, where errors occur, are not tested here.
+func TestSimulateMsgDeposit(t *testing.T) {
+ suite, ctx := createTestSuite(t, false)
+ app := suite.App
+ blockTime := time.Now().UTC()
+
+ctx = ctx.WithBlockTime(blockTime)
+
+ // setup 3 accounts
+ s := rand.NewSource(1)
+ r := rand.New(s)
+ accounts := getTestingAccounts(t, r, suite.AccountKeeper, suite.BankKeeper, suite.StakingKeeper, ctx, 3)
+
+ // setup a proposal
+ content := v1beta1.NewTextProposal("Test", "description")
+
+contentMsg, err := v1.NewLegacyContent(content, suite.GovKeeper.GetGovernanceAccount(ctx).GetAddress().String())
+
+require.NoError(t, err)
+ submitTime := ctx.BlockHeader().Time
+ params, _ := suite.GovKeeper.Params.Get(ctx)
+ depositPeriod := params.MaxDepositPeriod
+
+ proposal, err := v1.NewProposal([]sdk.Msg{
+ contentMsg
+}, 1, submitTime, submitTime.Add(*depositPeriod), "", "text proposal", "description", sdk.AccAddress("cosmos1ghekyjucln7y67ntx7cf27m9dpuxxemn4c8g4r"), false)
+
+require.NoError(t, err)
+
+require.NoError(t, suite.GovKeeper.SetProposal(ctx, proposal))
+
+ _, err = app.FinalizeBlock(&abci.RequestFinalizeBlock{
+ Height: app.LastBlockHeight() + 1,
+ Hash: app.LastCommitID().Hash,
+})
+
+require.NoError(t, err)
+
+ // execute operation
+ op := simulation.SimulateMsgDeposit(suite.TxConfig, suite.AccountKeeper, suite.BankKeeper, suite.GovKeeper)
+
+operationMsg, _, err := op(r, app.BaseApp, ctx, accounts, "")
+
+require.NoError(t, err)
+
+var msg v1.MsgDeposit
+ err = proto.Unmarshal(operationMsg.Msg, &msg)
+
+require.NoError(t, err)
+
+require.True(t, operationMsg.OK)
+
+require.Equal(t, uint64(1), msg.ProposalId)
+
+require.Equal(t, "cosmos1ghekyjucln7y67ntx7cf27m9dpuxxemn4c8g4r", msg.Depositor)
+
+require.NotEqual(t, len(msg.Amount), 0)
+
+require.Equal(t, "560969stake", msg.Amount[0].String())
+
+require.Equal(t, simulation.TypeMsgDeposit, sdk.MsgTypeURL(&msg))
+}
+
+// TestSimulateMsgVote tests the normal scenario of a valid message of type TypeMsgVote.
+// Abnormal scenarios, where errors occur, are not tested here.
+func TestSimulateMsgVote(t *testing.T) {
+ suite, ctx := createTestSuite(t, false)
+ app := suite.App
+ blockTime := time.Now().UTC()
+
+ctx = ctx.WithBlockTime(blockTime)
+
+ // setup 3 accounts
+ s := rand.NewSource(1)
+ r := rand.New(s)
+ accounts := getTestingAccounts(t, r, suite.AccountKeeper, suite.BankKeeper, suite.StakingKeeper, ctx, 3)
+
+ // setup a proposal
+ govAcc := suite.GovKeeper.GetGovernanceAccount(ctx).GetAddress().String()
+
+contentMsg, err := v1.NewLegacyContent(v1beta1.NewTextProposal("Test", "description"), govAcc)
+
+require.NoError(t, err)
+ submitTime := ctx.BlockHeader().Time
+ params, _ := suite.GovKeeper.Params.Get(ctx)
+ depositPeriod := params.MaxDepositPeriod
+
+ proposal, err := v1.NewProposal([]sdk.Msg{
+ contentMsg
+}, 1, submitTime, submitTime.Add(*depositPeriod), "", "text proposal", "description", sdk.AccAddress("cosmos1ghekyjucln7y67ntx7cf27m9dpuxxemn4c8g4r"), false)
+
+require.NoError(t, err)
+
+require.NoError(t, suite.GovKeeper.ActivateVotingPeriod(ctx, proposal))
+
+ _, err = app.FinalizeBlock(&abci.RequestFinalizeBlock{
+ Height: app.LastBlockHeight() + 1,
+ Hash: app.LastCommitID().Hash,
+})
+
+require.NoError(t, err)
+
+ // execute operation
+ op := simulation.SimulateMsgVote(suite.TxConfig, suite.AccountKeeper, suite.BankKeeper, suite.GovKeeper)
+
+operationMsg, _, err := op(r, app.BaseApp, ctx, accounts, "")
+
+require.NoError(t, err)
+
+var msg v1.MsgVote
+ err = proto.Unmarshal(operationMsg.Msg, &msg)
+
+require.NoError(t, err)
+
+require.True(t, operationMsg.OK)
+
+require.Equal(t, uint64(1), msg.ProposalId)
+
+require.Equal(t, "cosmos1ghekyjucln7y67ntx7cf27m9dpuxxemn4c8g4r", msg.Voter)
+
+require.Equal(t, v1.OptionYes, msg.Option)
+
+require.Equal(t, simulation.TypeMsgVote, sdk.MsgTypeURL(&msg))
+}
+
+// TestSimulateMsgVoteWeighted tests the normal scenario of a valid message of type TypeMsgVoteWeighted.
+// Abnormal scenarios, where errors occur, are not tested here.
+func TestSimulateMsgVoteWeighted(t *testing.T) {
+ suite, ctx := createTestSuite(t, false)
+ app := suite.App
+ blockTime := time.Now().UTC()
+
+ctx = ctx.WithBlockTime(blockTime)
+
+ // setup 3 accounts
+ s := rand.NewSource(1)
+ r := rand.New(s)
+ accounts := getTestingAccounts(t, r, suite.AccountKeeper, suite.BankKeeper, suite.StakingKeeper, ctx, 3)
+
+ // setup a proposal
+ govAcc := suite.GovKeeper.GetGovernanceAccount(ctx).GetAddress().String()
+
+contentMsg, err := v1.NewLegacyContent(v1beta1.NewTextProposal("Test", "description"), govAcc)
+
+require.NoError(t, err)
+ submitTime := ctx.BlockHeader().Time
+ params, _ := suite.GovKeeper.Params.Get(ctx)
+ depositPeriod := params.MaxDepositPeriod
+
+ proposal, err := v1.NewProposal([]sdk.Msg{
+ contentMsg
+}, 1, submitTime, submitTime.Add(*depositPeriod), "", "text proposal", "test", sdk.AccAddress("cosmos1ghekyjucln7y67ntx7cf27m9dpuxxemn4c8g4r"), false)
+
+require.NoError(t, err)
+
+require.NoError(t, suite.GovKeeper.ActivateVotingPeriod(ctx, proposal))
+
+ _, err = app.FinalizeBlock(&abci.RequestFinalizeBlock{
+ Height: app.LastBlockHeight() + 1,
+ Hash: app.LastCommitID().Hash,
+})
+
+require.NoError(t, err)
+
+ // execute operation
+ op := simulation.SimulateMsgVoteWeighted(suite.TxConfig, suite.AccountKeeper, suite.BankKeeper, suite.GovKeeper)
+
+operationMsg, _, err := op(r, app.BaseApp, ctx, accounts, "")
+
+require.NoError(t, err)
+
+var msg v1.MsgVoteWeighted
+ err = proto.Unmarshal(operationMsg.Msg, &msg)
+
+require.NoError(t, err)
+
+require.True(t, operationMsg.OK)
+
+require.Equal(t, uint64(1), msg.ProposalId)
+
+require.Equal(t, "cosmos1ghekyjucln7y67ntx7cf27m9dpuxxemn4c8g4r", msg.Voter)
+
+require.True(t, len(msg.Options) >= 1)
+
+require.Equal(t, simulation.TypeMsgVoteWeighted, sdk.MsgTypeURL(&msg))
+}
+
+type suite struct {
+ TxConfig client.TxConfig
+ AccountKeeper authkeeper.AccountKeeper
+ BankKeeper bankkeeper.Keeper
+ GovKeeper *keeper.Keeper
+ StakingKeeper *stakingkeeper.Keeper
+ DistributionKeeper dk.Keeper
+ App *runtime.App
+}
+
+// returns context and an app with updated mint keeper
+func createTestSuite(t *testing.T, isCheckTx bool) (suite, sdk.Context) {
+ t.Helper()
+ res := suite{
+}
+
+app, err := simtestutil.Setup(
+ depinject.Configs(
+ configurator.NewAppConfig(
+ configurator.AuthModule(),
+ configurator.TxModule(),
+ configurator.ParamsModule(),
+ configurator.BankModule(),
+ configurator.StakingModule(),
+ configurator.ConsensusModule(),
+ configurator.DistributionModule(),
+ configurator.GovModule(),
+ ),
+ depinject.Supply(log.NewNopLogger()),
+ ),
+ &res.TxConfig, &res.AccountKeeper, &res.BankKeeper, &res.GovKeeper, &res.StakingKeeper, &res.DistributionKeeper)
+
+require.NoError(t, err)
+ ctx := app.NewContext(isCheckTx)
+
+res.App = app
+ return res, ctx
+}
+
+func getTestingAccounts(
+ t *testing.T,
+ r *rand.Rand,
+ accountKeeper authkeeper.AccountKeeper,
+ bankKeeper bankkeeper.Keeper,
+ stakingKeeper *stakingkeeper.Keeper,
+ ctx sdk.Context,
+ n int,
+) []simtypes.Account {
+ t.Helper()
+ accounts := simtypes.RandomAccounts(r, n)
+ initAmt := stakingKeeper.TokensFromConsensusPower(ctx, 200)
+ initCoins := sdk.NewCoins(sdk.NewCoin(sdk.DefaultBondDenom, initAmt))
+
+ // add coins to the accounts
+ for _, account := range accounts {
+ acc := accountKeeper.NewAccountWithAddress(ctx, account.Address)
+
+accountKeeper.SetAccount(ctx, acc)
+
+require.NoError(t, testutil.FundAccount(ctx, bankKeeper, account.Address, initCoins))
+}
+
+return accounts
+}
+```
+
+## End-to-end Tests
+
+End-to-end tests are at the top of the [test pyramid](https://martinfowler.com/articles/practical-test-pyramid.html).
+They must test the whole application flow, from the user perspective (for instance, CLI tests). They are located under [`/tests/e2e`](https://github.com/cosmos/cosmos-sdk/tree/main/tests/e2e).
+
+{/* @julienrbrt: makes more sense to use an app wired app to have 0 simapp dependencies */}
+For that, the SDK is using `simapp` but you should use your own application (`appd`).
+Here are some examples:
+
+* SDK E2E tests: [Link](https://github.com/cosmos/cosmos-sdk/tree/main/tests/e2e).
+* Cosmos Hub E2E tests: [Link](https://github.com/cosmos/gaia/tree/main/tests/e2e).
+* Osmosis E2E tests: [Link](https://github.com/osmosis-labs/osmosis/tree/main/tests/e2e).
+
+
+**warning**
+The SDK is in the process of creating its E2E tests, as defined in [ADR-59](/sdk/v0.53/build/architecture/adr-059-test-scopes). This page will eventually be updated with better examples.
+
+
+## Learn More
+
+Learn more about testing scope in [ADR-59](/sdk/v0.53/build/architecture/adr-059-test-scopes).
diff --git a/sdk/next/build/building-modules/upgrade.mdx b/sdk/next/build/building-modules/upgrade.mdx
new file mode 100644
index 000000000..95950b455
--- /dev/null
+++ b/sdk/next/build/building-modules/upgrade.mdx
@@ -0,0 +1,125 @@
+---
+title: Upgrading Modules
+---
+
+
+**Synopsis**
+[In-Place Store Migrations](/sdk/v0.53/learn/advanced/upgrade) allow your modules to upgrade to new versions that include breaking changes. This document outlines how to build modules to take advantage of this functionality.
+
+
+
+**Prerequisite Readings**
+
+* [In-Place Store Migration](/sdk/v0.53/learn/advanced/upgrade)
+
+
+
+## Consensus Version
+
+Successful upgrades of existing modules require each `AppModule` to implement the function `ConsensusVersion() uint64`.
+
+* The versions must be hard-coded by the module developer.
+* The initial version **must** be set to 1.
+
+Consensus versions serve as state-breaking versions of app modules and must be incremented when the module introduces breaking changes.
+
+## Registering Migrations
+
+To register the functionality that takes place during a module upgrade, you must register which migrations you want to take place.
+
+Migration registration takes place in the `Configurator` using the `RegisterMigration` method. The `AppModule` reference to the configurator is in the `RegisterServices` method.
+
+You can register one or more migrations. If you register more than one migration script, list the migrations in increasing order and ensure there are enough migrations that lead to the desired consensus version. For example, to migrate to version 3 of a module, register separate migrations for version 1 and version 2 as shown in the following example:
+
+```go
+func (am AppModule)
+
+RegisterServices(cfg module.Configurator) {
+ // --snip--
+ cfg.RegisterMigration(types.ModuleName, 1, func(ctx sdk.Context)
+
+error {
+ // Perform in-place store migrations from ConsensusVersion 1 to 2.
+})
+
+cfg.RegisterMigration(types.ModuleName, 2, func(ctx sdk.Context)
+
+error {
+ // Perform in-place store migrations from ConsensusVersion 2 to 3.
+})
+}
+```
+
+Since these migrations are functions that need access to a Keeper's store, use a wrapper around the keepers called `Migrator` as shown in this example:
+
+```go expandable
+package keeper
+
+import (
+
+ sdk "github.com/cosmos/cosmos-sdk/types"
+ "github.com/cosmos/cosmos-sdk/x/bank/exported"
+ v2 "github.com/cosmos/cosmos-sdk/x/bank/migrations/v2"
+ v3 "github.com/cosmos/cosmos-sdk/x/bank/migrations/v3"
+ v4 "github.com/cosmos/cosmos-sdk/x/bank/migrations/v4"
+)
+
+// Migrator is a struct for handling in-place store migrations.
+type Migrator struct {
+ keeper BaseKeeper
+ legacySubspace exported.Subspace
+}
+
+// NewMigrator returns a new Migrator.
+func NewMigrator(keeper BaseKeeper, legacySubspace exported.Subspace)
+
+Migrator {
+ return Migrator{
+ keeper: keeper, legacySubspace: legacySubspace
+}
+}
+
+// Migrate1to2 migrates from version 1 to 2.
+func (m Migrator)
+
+Migrate1to2(ctx sdk.Context)
+
+error {
+ return v2.MigrateStore(ctx, m.keeper.storeService, m.keeper.cdc)
+}
+
+// Migrate2to3 migrates x/bank storage from version 2 to 3.
+func (m Migrator)
+
+Migrate2to3(ctx sdk.Context)
+
+error {
+ return v3.MigrateStore(ctx, m.keeper.storeService, m.keeper.cdc)
+}
+
+// Migrate3to4 migrates x/bank storage from version 3 to 4.
+func (m Migrator)
+
+Migrate3to4(ctx sdk.Context)
+
+error {
+ return v4.MigrateStore(ctx, m.keeper.storeService, m.legacySubspace, m.keeper.cdc)
+}
+```
+
+## Writing Migration Scripts
+
+To define the functionality that takes place during an upgrade, write a migration script and place the functions in a `migrations/` directory. For example, to write migration scripts for the bank module, place the functions in `x/bank/migrations/`. Use the recommended naming convention for these functions. For example, `v2bank` is the script that migrates the package `x/bank/migrations/v2`:
+
+```go
+// Migrating bank module from version 1 to 2
+func (m Migrator)
+
+Migrate1to2(ctx sdk.Context)
+
+error {
+ return v2bank.MigrateStore(ctx, m.keeper.storeKey) // v2bank is package `x/bank/migrations/v2`.
+}
+```
+
+To see example code of changes that were implemented in a migration of balance keys, check out [migrateBalanceKeys](https://github.com/cosmos/cosmos-sdk/blob/v0.50.0-alpha.0/x/bank/migrations/v2/store.go#L55-L76). For context, this code introduced migrations of the bank store that updated addresses to be prefixed by their length in bytes as outlined in [ADR-028](/sdk/v0.53/build/architecture/adr-028-public-key-addresses).
diff --git a/sdk/next/build/migrations/intro.mdx b/sdk/next/build/migrations/intro.mdx
new file mode 100644
index 000000000..f5e3d2380
--- /dev/null
+++ b/sdk/next/build/migrations/intro.mdx
@@ -0,0 +1,13 @@
+---
+title: SDK Migrations
+---
+
+To smooth the update to the latest stable release, the SDK includes a CLI command for hard-fork migrations (under the ` genesis migrate` subcommand).
+Additionally, the SDK includes in-place migrations for its core modules. These in-place migrations are useful to migrate between major releases.
+
+* Hard-fork migrations are supported from the last major release to the current one.
+* [In-place module migrations](/sdk/v0.53/build/migrations/upgrade-guide#overwriting-genesis-functions) are supported from the last two major releases to the current one.
+
+Migration from a version older than the last two major releases is not supported.
+
+When migrating from a previous version, refer to the [`UPGRADING.mdx`](/sdk/v0.53/build/migrations/upgrade-guide) and the `CHANGELOG.md` of the version you are migrating to.
diff --git a/sdk/next/build/migrations/upgrade-guide.mdx b/sdk/next/build/migrations/upgrade-guide.mdx
new file mode 100644
index 000000000..564627d4e
--- /dev/null
+++ b/sdk/next/build/migrations/upgrade-guide.mdx
@@ -0,0 +1,227 @@
+---
+title: Upgrade Guide
+description: >-
+ This document provides a full guide for upgrading a Cosmos SDK chain from
+ v0.53.x to v0.54.x.
+---
+
+This document provides a full guide for upgrading a Cosmos SDK chain from `v0.53.x` to `v0.54.x`.
+
+This guide includes several **required** breaking changes for upgrading to v0.54.
+
+Key changes in v0.54:
+
+* Decouple `x/gov` from `x/staking`
+* Remove deprecated modules (`x/group`, `x/nft`, `x/circuit`, `x/crisis`)
+* Remove sr25519 cryptographic support
+* Update to log v2
+* New BondDenom validation in `x/staking`
+
+## Table of Contents
+
+* [Update x/gov Keeper (REQUIRED)](#update-xgov-keeper-required)
+* [Handle Deprecated Modules](#handle-deprecated-modules)
+* [Remove sr25519 Support](#remove-sr25519-support)
+* [Update to Log v2](#update-to-log-v2)
+* [Update x/staking BondDenom Usage](#update-xstaking-bonddenom-usage)
+* [Upgrade Handler](#upgrade-handler)
+
+## Update x/gov Keeper **REQUIRED**
+
+The `x/gov` module has been decoupled from `x/staking`. The `CalculateVoteResultsAndVotingPowerFn` is now a required parameter to `keeper.NewKeeper` instead of requiring a `StakingKeeper`.
+
+### Update Keeper Initialization
+
+Replace the `StakingKeeper` parameter with `CalculateVoteResultsAndVotingPowerFn`:
+
+**Before (v0.53)**:
+```go
+app.GovKeeper = govkeeper.NewKeeper(
+ appCodec,
+ runtime.NewKVStoreService(keys[govtypes.StoreKey]),
+ app.AccountKeeper,
+ app.BankKeeper,
+ app.StakingKeeper, // OLD: StakingKeeper required
+ app.DistrKeeper,
+ app.MsgServiceRouter(),
+ govtypes.DefaultConfig(),
+ authtypes.NewModuleAddress(govtypes.ModuleName).String(),
+)
+```
+
+**After (v0.54)**:
+```go
+app.GovKeeper = govkeeper.NewKeeper(
+ appCodec,
+ runtime.NewKVStoreService(keys[govtypes.StoreKey]),
+ app.AccountKeeper,
+ app.BankKeeper,
+ govkeeper.NewStakingVotePowerFn(app.StakingKeeper), // NEW: voting power function
+ app.DistrKeeper,
+ app.MsgServiceRouter(),
+ govtypes.DefaultConfig(),
+ authtypes.NewModuleAddress(govtypes.ModuleName).String(),
+)
+```
+
+### Custom Voting Power Calculation
+
+If your application requires custom voting power calculations, implement a custom `CalculateVoteResultsAndVotingPowerFn`:
+
+```go
+customVotingFn := func(
+ ctx context.Context,
+ k govkeeper.Keeper,
+ proposal v1.Proposal,
+) (totalVoterPower math.LegacyDec, totalValPower math.Int, results map[v1.VoteOption]math.LegacyDec, err error) {
+ // Your custom implementation
+ return totalVoterPower, totalValPower, results, nil
+}
+
+app.GovKeeper = govkeeper.NewKeeper(
+ // ... other params ...
+ customVotingFn, // Use your custom function
+ // ... remaining params ...
+)
+```
+
+
+The `DistrKeeper` is now optional. If your application does not use the distribution module for proposal cancel destinations, you can pass `nil`.
+
+
+## Handle Deprecated Modules
+
+Four modules have been moved to `./contrib` and are no longer actively maintained:
+
+* `x/group`
+* `x/nft`
+* `x/circuit`
+* `x/crisis`
+
+
+These modules are still available in the `contrib` directory but will not receive the same level of support as core modules. They will not be covered by the Cosmos SDK Bug Bounty program.
+
+
+### If You Use These Modules
+
+If your application uses any of these modules, you have two options:
+
+**Option 1: Continue Using from contrib**
+
+Update your imports to reference the contrib directory:
+
+```go
+// Before
+import "github.com/cosmos/cosmos-sdk/x/group"
+
+// After
+import "github.com/cosmos/cosmos-sdk/contrib/x/group"
+```
+
+**Option 2: Migrate to Alternatives**
+
+Consider migrating to alternative solutions or maintaining your own fork of these modules.
+
+## Remove sr25519 Support
+
+Support for the sr25519 cryptographic signature scheme has been removed in v0.54 as it was removed from CometBFT v1.x.
+
+
+If your application uses sr25519 keys, you must migrate to a supported cryptographic scheme such as secp256k1, ed25519, or BLS 12-381.
+
+
+### Action Required
+
+1. Audit your codebase for any sr25519 imports or usage
+2. Remove or replace any sr25519-specific code
+3. Migrate existing sr25519 keys to supported schemes before upgrading
+
+```go
+// Remove any imports like:
+// import "github.com/cosmos/cosmos-sdk/crypto/keys/sr25519"
+
+// Replace sr25519 usage with supported alternatives:
+import "github.com/cosmos/cosmos-sdk/crypto/keys/secp256k1"
+import "github.com/cosmos/cosmos-sdk/crypto/keys/ed25519"
+import "github.com/cosmos/cosmos-sdk/crypto/keys/bls12_381"
+```
+
+## Update to Log v2
+
+The Cosmos SDK has updated from log v1 to log v2. Applications using the logging package must update their imports and usage.
+
+### Update Imports
+
+```go
+// Before (v0.53)
+import "cosmossdk.io/log"
+
+// After (v0.54)
+import "cosmossdk.io/log/v2"
+```
+
+### Update Logger Initialization
+
+Review your logger initialization code to ensure compatibility with log v2 APIs. Refer to the log v2 documentation for detailed migration instructions.
+
+## Update x/staking BondDenom Usage
+
+The `x/staking` module now validates that the `bond_denom` parameter in `MsgUpdateParams` references an existing denomination with non-zero supply.
+
+### Action Required
+
+If your application uses `MsgUpdateParams` to update staking parameters, ensure that:
+
+1. The `bond_denom` exists in the chain state
+2. The denomination has a non-zero supply
+
+
+This validation prevents setting invalid or non-existent bond denominations that could cause unexpected behavior.
+
+
+```go
+// When calling MsgUpdateParams, ensure bond_denom is valid:
+params := stakingtypes.Params{
+ BondDenom: "ustake", // Must exist with non-zero supply
+ // ... other params ...
+}
+```
+
+## Upgrade Handler
+
+The v0.54 upgrade primarily involves API changes and does not require store upgrades. The upgrade handler should run the standard module migrations.
+
+```go expandable
+// UpgradeName defines the on-chain upgrade name for the sample SimApp upgrade
+// from v0.53 to v0.54.
+//
+// NOTE: This upgrade defines a reference implementation of what an upgrade
+// could look like when an application is migrating from Cosmos SDK version
+// v0.53.x to v0.54.x.
+const UpgradeName = "v053-to-v054"
+
+func (app SimApp) RegisterUpgradeHandlers() {
+ app.UpgradeKeeper.SetUpgradeHandler(
+ UpgradeName,
+ func(ctx context.Context, _ upgradetypes.Plan, fromVM module.VersionMap) (module.VersionMap, error) {
+ // Run module migrations
+ return app.ModuleManager.RunMigrations(ctx, app.Configurator(), fromVM)
+ },
+ )
+
+ upgradeInfo, err := app.UpgradeKeeper.ReadUpgradeInfoFromDisk()
+ if err != nil {
+ panic(err)
+ }
+
+ if upgradeInfo.Name == UpgradeName && !app.UpgradeKeeper.IsSkipHeight(upgradeInfo.Height) {
+ // No store upgrades required for v0.54
+ // The deprecated modules (x/group, x/nft, x/circuit, x/crisis) remain in store
+ // but are moved to contrib in the codebase
+ }
+}
+```
+
+
+No store keys need to be added or removed for the v0.54 upgrade. The deprecated modules remain in the store but have moved to the `contrib` directory in the codebase.
+
diff --git a/sdk/next/build/migrations/upgrade-reference.mdx b/sdk/next/build/migrations/upgrade-reference.mdx
new file mode 100644
index 000000000..728a1547a
--- /dev/null
+++ b/sdk/next/build/migrations/upgrade-reference.mdx
@@ -0,0 +1,234 @@
+---
+title: Upgrade Reference
+description: >-
+ This document provides a quick reference for the upgrades from v0.50.x to
+ v0.53.x of Cosmos SDK.
+---
+
+This document provides a quick reference for the upgrades from `v0.50.x` to `v0.53.x` of Cosmos SDK.
+
+Note, always read the **App Wiring Changes** section for more information on application wiring updates.
+
+🚨Upgrading to v0.53.x will require a **coordinated** chain upgrade.🚨
+
+### TL;DR
+
+Unordered transactions, `x/protocolpool`, and `x/epoch` are the major new features added in v0.53.x.
+
+We also added the ability to add a `CheckTx` handler and enabled ed25519 signature verification.
+
+For a full list of changes, see the [Changelog](https://github.com/cosmos/cosmos-sdk/blob/release/v0.53.x/CHANGELOG.md).
+
+### Unordered Transactions
+
+The Cosmos SDK now supports unordered transactions. *This is an opt-in feature*.
+
+Clients that use this feature may now submit their transactions in a fire-and-forget manner to chains that enabled unordered transactions.
+
+To submit an unordered transaction, clients must set the `unordered` flag to
+`true` and ensure a reasonable `timeout_timestamp` is set. The `timeout_timestamp` is
+used as a TTL for the transaction and provides replay protection. Each transaction's `timeout_timestamp` must be
+unique to the account; however, the difference may be as small as a nanosecond. See [ADR-070](https://github.com/cosmos/cosmos-sdk/blob/main/docs/architecture/adr-070-unordered-transactions.md) for more details.
+
+Note that unordered transactions require sequence values to be zero, and will **FAIL** if a non-zero sequence value is set.
+Please ensure no sequence value is set when submitting an unordered transaction.
+Services that rely on prior assumptions about sequence values should be updated to handle unordered transactions.
+Services should be aware that when the transaction is unordered, the transaction sequence will always be zero.
+
+#### Enabling Unordered Transactions
+
+To enable unordered transactions, supply the `WithUnorderedTransactions` option to the `x/auth` keeper:
+
+```go
+app.AccountKeeper = authkeeper.NewAccountKeeper(
+ appCodec,
+ runtime.NewKVStoreService(keys[authtypes.StoreKey]),
+ authtypes.ProtoBaseAccount,
+ maccPerms,
+ authcodec.NewBech32Codec(sdk.Bech32MainPrefix),
+ sdk.Bech32MainPrefix,
+ authtypes.NewModuleAddress(govtypes.ModuleName).String(),
+ authkeeper.WithUnorderedTransactions(true), // new option!
+ )
+```
+
+If using dependency injection, update the auth module config.
+
+```go
+{
+ Name: authtypes.ModuleName,
+ Config: appconfig.WrapAny(&authmodulev1.Module{
+ Bech32Prefix: "cosmos",
+ ModuleAccountPermissions: moduleAccPerms,
+ EnableUnorderedTransactions: true, // remove this line if you do not want unordered transactions.
+}),
+},
+```
+
+By default, unordered transactions use a transaction timeout duration of 10 minutes and a default gas charge of 2240 gas units.
+To modify these default values, pass in the corresponding options to the new `SigVerifyOptions` field in `x/auth's` `ante.HandlerOptions`.
+
+```go
+options := ante.HandlerOptions{
+ SigVerifyOptions: []ante.SigVerificationDecoratorOption{
+ // change below as needed.
+ ante.WithUnorderedTxGasCost(ante.DefaultUnorderedTxGasCost),
+ ante.WithMaxUnorderedTxTimeoutDuration(ante.DefaultMaxTimeoutDuration),
+},
+}
+```
+
+```go
+anteDecorators := []sdk.AnteDecorator{
+ // ... other decorators ...
+ ante.NewSigVerificationDecorator(options.AccountKeeper, options.SignModeHandler, options.SigVerifyOptions...), // supply new options
+}
+```
+
+### App Wiring Changes
+
+In this section, we describe the required app wiring changes to run a v0.53.x Cosmos SDK application.
+
+**These changes are directly applicable to your application wiring.**
+
+The `x/auth` module now contains a `PreBlocker` that *must* be set in the module manager's `SetOrderPreBlockers` method.
+
+```go
+app.ModuleManager.SetOrderPreBlockers(
+ upgradetypes.ModuleName,
+ authtypes.ModuleName, // NEW
+)
+```
+
+That's it.
+
+### New Modules
+
+Below are some **optional** new modules you can include in your chain.
+To see a full example of wiring these modules, please check out the [SimApp](https://github.com/cosmos/cosmos-sdk/blob/release/v0.53.x/simapp/app.go).
+
+#### Epochs
+
+⚠️Adding this module requires a `StoreUpgrade`⚠️
+
+The new, supplemental `x/epochs` module provides Cosmos SDK modules functionality to register and execute custom logic at fixed time-intervals.
+
+Required wiring:
+
+* Keeper Instantiation
+* StoreKey addition
+* Hooks Registration
+* App Module Registration
+* entry in SetOrderBeginBlockers
+* entry in SetGenesisModuleOrder
+* entry in SetExportModuleOrder
+
+#### ProtocolPool
+
+
+
+Using `protocolpool` will cause the following `x/distribution` handlers to return an error:
+
+**QueryService**
+
+* `CommunityPool`
+
+**MsgService**
+
+* `CommunityPoolSpend`
+* `FundCommunityPool`
+
+If you have services that rely on this functionality from `x/distribution`, please update them to use the `x/protocolpool` equivalents.
+
+
+
+⚠️Adding this module requires a `StoreUpgrade`⚠️
+
+The new, supplemental `x/protocolpool` module provides extended functionality for managing and distributing block reward revenue.
+
+Required wiring:
+
+* Module Account Permissions
+ * protocolpooltypes.ModuleName (nil)
+ * protocolpooltypes.ProtocolPoolEscrowAccount (nil)
+* Keeper Instantiation
+* StoreKey addition
+* Passing the keeper to the Distribution Keeper
+ * `distrkeeper.WithExternalCommunityPool(app.ProtocolPoolKeeper)`
+* App Module Registration
+* entry in SetOrderBeginBlockers
+* entry in SetOrderEndBlockers
+* entry in SetGenesisModuleOrder
+* entry in SetExportModuleOrder **before `x/bank`**
+
+## Custom Minting Function in `x/mint`
+
+This release introduces the ability to configure a custom mint function in `x/mint`. The minting logic is now abstracted as a `MintFn` with a default implementation that can be overridden.
+
+### What’s New
+
+* **Configurable Mint Function:**\
+ A new `MintFn` abstraction is introduced. By default, the module uses `DefaultMintFn`, but you can supply your own implementation.
+
+* **Deprecated InflationCalculationFn Parameter:**\
+ The `InflationCalculationFn` argument previously provided to `mint.NewAppModule()` is now ignored and must be `nil`. To customize the default minter’s inflation behavior, wrap your custom function with `mintkeeper.DefaultMintFn` and pass it via the `WithMintFn` option:
+
+```go
+mintkeeper.WithMintFn(mintkeeper.DefaultMintFn(customInflationFn))
+```
+
+### How to Upgrade
+
+1. **Using the Default Minting Function**
+
+ No action is needed if you’re happy with the default behavior. Make sure your application wiring initializes the MintKeeper like this:
+
+```go
+mintKeeper := mintkeeper.NewKeeper(
+ appCodec,
+ storeService,
+ stakingKeeper,
+ accountKeeper,
+ bankKeeper,
+ authtypes.FeeCollectorName,
+ authtypes.NewModuleAddress(govtypes.ModuleName).String(),
+ )
+```
+
+2. **Using a Custom Minting Function**
+
+ To use a custom minting function, define it as follows and pass it to your mintKeeper when constructing it:
+
+```go expandable
+func myCustomMintFunc(ctx sdk.Context, k *mintkeeper.Keeper) {
+ // do minting...
+}
+
+// ...
+ mintKeeper := mintkeeper.NewKeeper(
+ appCodec,
+ storeService,
+ stakingKeeper,
+ accountKeeper,
+ bankKeeper,
+ authtypes.FeeCollectorName,
+ authtypes.NewModuleAddress(govtypes.ModuleName).String(),
+ mintkeeper.WithMintFn(myCustomMintFunc), // Use custom minting function
+ )
+```
+
+### Misc Changes
+
+#### Testnet's init-files Command
+
+Some changes were made to `testnet`'s `init-files` command to support our new testing framework, `Systemtest`.
+
+##### Flag Changes
+
+* The flag for validator count was changed from `--v` to `--validator-count`(shorthand: `-v`).
+
+##### Flag Additions
+
+* `--staking-denom` allows changing the default stake denom, `stake`.
+* `--commit-timeout` enables changing the commit timeout of the chain.
+* `--single-host` enables running a multi-node network on a single host. This bumps each subsequent node's network addresses by 1. For example, node1's gRPC address will be 9090, node2's 9091, etc...
diff --git a/sdk/next/build/modules.mdx b/sdk/next/build/modules.mdx
new file mode 100644
index 000000000..95654d7aa
--- /dev/null
+++ b/sdk/next/build/modules.mdx
@@ -0,0 +1,60 @@
+---
+title: "List of Modules"
+description: "Version: v0.53"
+---
+
+Here are some production-grade modules that can be used in Cosmos SDK applications, along with their respective documentation:
+
+## Essential Modules[](#essential-modules "Direct link to Essential Modules")
+
+Essential modules include functionality that *must* be included in your Cosmos SDK blockchain. These modules provide the core behaviors that are needed for users and operators such as balance tracking, proof-of-stake capabilities and governance.
+
+* [Auth](/sdk/v0.53/build/modules/auth/auth) - Authentication of accounts and transactions for Cosmos SDK applications.
+* [Bank](/sdk/v0.53/build/modules/bank/README) - Token transfer functionalities.
+* [Circuit](/sdk/v0.53/build/modules/circuit/README) - Circuit breaker module for pausing messages.
+* [Consensus](/sdk/v0.53/build/modules/consensus/README) - Consensus module for modifying CometBFT's ABCI consensus params.
+* [Distribution](/sdk/v0.53/build/modules/distribution/README) - Fee distribution, and staking token provision distribution.
+* [Evidence](/sdk/v0.53/build/modules/evidence/README) - Evidence handling for double signing, misbehaviour, etc.
+* [Governance](/sdk/v0.53/build/modules/gov/README) - On-chain proposals and voting.
+* [Genutil](/sdk/v0.53/build/modules/genutil/README) - Genesis utilities for the Cosmos SDK.
+* [Mint](/sdk/v0.53/build/modules/mint/README) - Creation of new units of staking token.
+* [Slashing](/sdk/v0.53/build/modules/slashing/README) - Validator punishment mechanisms.
+* [Staking](/sdk/v0.53/build/modules/staking/README) - Proof-of-Stake layer for public blockchains.
+* [Upgrade](/sdk/v0.53/build/modules/upgrade/README) - Software upgrades handling and coordination.
+
+## Supplementary Modules
+
+Supplementary modules are modules that are maintained in the Cosmos SDK but are not necessary for
+the core functionality of your blockchain. They can be thought of as ways to extend the
+capabilities of your blockchain or further specialize it.
+
+* [Authz](/sdk/v0.53/build/modules/authz/README) - Authorization for accounts to perform actions on behalf of other accounts.
+* [Epochs](/sdk/v0.53/build/modules/epochs/README) - Registration so SDK modules can have logic to be executed at the timed tickers.
+* [Feegrant](/sdk/v0.53/build/modules/feegrant/README) - Grant fee allowances for executing transactions.
+* [Group](/sdk/v0.53/build/modules/group/README) - Allows for the creation and management of on-chain multisig accounts.
+* [NFT](/sdk/v0.53/build/modules/nft/README) - NFT module implemented based on [ADR43](/sdk/v0.53/build/architecture/adr-043-nft-module).
+* [ProtocolPool](/sdk/v0.53/build/modules/protocolpool/README) - Extended management of community pool functionality.
+
+## Deprecated Modules
+
+The following modules are deprecated. They will no longer be maintained and eventually will be removed
+in an upcoming release of the Cosmos SDK per our [release process](https://github.com/cosmos/cosmos-sdk/blob/main/RELEASE_PROCESS.md).
+
+* [Crisis](/sdk/v0.53/build/modules/crisis/README) - *Deprecated* halting the blockchain under certain circumstances (e.g. if an invariant is broken).
+* [Params](/sdk/v0.53/build/modules/params/README) - *Deprecated* Globally available parameter store.
+
+To learn more about the process of building modules, visit the [building modules reference documentation](/sdk/v0.53/build/building-modules/intro).
+
+## IBC
+
+The IBC module for the SDK is maintained by the IBC Go team in its [own repository](https://github.com/cosmos/ibc-go).
+
+Additionally, the [capability module](https://github.com/cosmos/ibc-go/tree/fdd664698d79864f1e00e147f9879e58497b5ef1/modules/capability) is from v0.50+ maintained by the IBC Go team in its [own repository](https://github.com/cosmos/ibc-go/tree/fdd664698d79864f1e00e147f9879e58497b5ef1/modules/capability).
+
+## CosmWasm
+
+The CosmWasm module enables smart contracts, learn more by going to their [documentation site](https://book.cosmwasm.com/), or visit [the repository](https://github.com/CosmWasm/cosmwasm).
+
+## EVM
+
+Read more about writing smart contracts with solidity at the official [`evm` documentation page](https://evm.cosmos.network/).
\ No newline at end of file
diff --git a/sdk/next/build/modules/auth/auth.mdx b/sdk/next/build/modules/auth/auth.mdx
new file mode 100644
index 000000000..81a4344d4
--- /dev/null
+++ b/sdk/next/build/modules/auth/auth.mdx
@@ -0,0 +1,742 @@
+---
+title: 'x/auth'
+description: This document specifies the auth module of the Cosmos SDK.
+---
+
+## Abstract
+
+This document specifies the auth module of the Cosmos SDK.
+
+The auth module is responsible for specifying the base transaction and account types
+for an application, since the SDK itself is agnostic to these particulars. It contains
+the middlewares, where all basic transaction validity checks (signatures, nonces, auxiliary fields)
+are performed, and exposes the account keeper, which allows other modules to read, write, and modify accounts.
+
+This module is used in the Cosmos Hub.
+
+## Contents
+
+* [Concepts](#concepts)
+ * [Gas & Fees](#gas--fees)
+* [State](#state)
+ * [Accounts](#accounts)
+* [AnteHandlers](#antehandlers)
+* [Keepers](#keepers)
+ * [Account Keeper](#account-keeper)
+* [Parameters](#parameters)
+* [Client](#client)
+ * [CLI](#cli)
+ * [gRPC](#grpc)
+ * [REST](#rest)
+
+## Concepts
+
+**Note:** The auth module is different from the [authz module](/sdk/v0.53/build/modules/authz/README).
+
+The differences are:
+
+* `auth` - authentication of accounts and transactions for Cosmos SDK applications and is responsible for specifying the base transaction and account types.
+* `authz` - authorization for accounts to perform actions on behalf of other accounts and enables a granter to grant authorizations to a grantee that allows the grantee to execute messages on behalf of the granter.
+
+### Gas & Fees
+
+Fees serve two purposes for an operator of the network.
+
+Fees limit the growth of the state stored by every full node and allow for
+general purpose censorship of transactions of little economic value. Fees
+are best suited as an anti-spam mechanism where validators are disinterested in
+the use of the network and identities of users.
+
+Fees are determined by the gas limits and gas prices transactions provide, where
+`fees = ceil(gasLimit * gasPrices)`. Txs incur gas costs for all state reads/writes,
+signature verification, as well as costs proportional to the tx size. Operators
+should set minimum gas prices when starting their nodes. They must set the unit
+costs of gas in each token denomination they wish to support:
+
+`simd start ... --minimum-gas-prices=0.00001stake;0.05photinos`
+
+When adding transactions to mempool or gossipping transactions, validators check
+if the transaction's gas prices, which are determined by the provided fees, meet
+any of the validator's minimum gas prices. In other words, a transaction must
+provide a fee of at least one denomination that matches a validator's minimum
+gas price.
+
+CometBFT does not currently provide fee based mempool prioritization, and fee
+based mempool filtering is local to node and not part of consensus. But with
+minimum gas prices set, such a mechanism could be implemented by node operators.
+
+Because the market value for tokens will fluctuate, validators are expected to
+dynamically adjust their minimum gas prices to a level that would encourage the
+use of the network.
+
+## State
+
+### Accounts
+
+Accounts contain authentication information for a uniquely identified external user of an SDK blockchain,
+including public key, address, and account number / sequence number for replay protection. For efficiency,
+since account balances must also be fetched to pay fees, account structs also store the balance of a user
+as `sdk.Coins`.
+
+Accounts are exposed externally as an interface, and stored internally as
+either a base account or vesting account. Module clients wishing to add more
+account types may do so.
+
+* `0x01 | Address -> ProtocolBuffer(account)`
+
+#### Account Interface
+
+The account interface exposes methods to read and write standard account information.
+Note that all of these methods operate on an account struct conforming to the
+interface - in order to write the account to the store, the account keeper will
+need to be used.
+
+```go expandable
+// AccountI is an interface used to store coins at a given address within state.
+// It presumes a notion of sequence numbers for replay protection,
+// a notion of account numbers for replay protection for previously pruned accounts,
+// and a pubkey for authentication purposes.
+//
+// Many complex conditions can be used in the concrete struct which implements AccountI.
+type AccountI interface {
+ proto.Message
+
+ GetAddress()
+
+sdk.AccAddress
+ SetAddress(sdk.AccAddress)
+
+error // errors if already set.
+
+ GetPubKey()
+
+crypto.PubKey // can return nil.
+ SetPubKey(crypto.PubKey)
+
+error
+
+ GetAccountNumber()
+
+uint64
+ SetAccountNumber(uint64)
+
+error
+
+ GetSequence()
+
+uint64
+ SetSequence(uint64)
+
+error
+
+ // Ensure that account implements stringer
+ String()
+
+string
+}
+```
+
+##### Base Account
+
+A base account is the simplest and most common account type, which just stores all requisite
+fields directly in a struct.
+
+```protobuf
+// BaseAccount defines a base account type. It contains all the necessary fields
+// for basic account functionality. Any custom account type should extend this
+// type for additional functionality (e.g. vesting).
+message BaseAccount {
+ string address = 1;
+ google.protobuf.Any pub_key = 2;
+ uint64 account_number = 3;
+ uint64 sequence = 4;
+}
+```
+
+### Vesting Account
+
+See [Vesting](/sdk/v0.53/build/modules/auth/auth).
+
+## AnteHandlers
+
+The `x/auth` module presently has no transaction handlers of its own, but does expose the special `AnteHandler`, used for performing basic validity checks on a transaction, such that it could be thrown out of the mempool.
+The `AnteHandler` can be seen as a set of decorators that check transactions within the current context, per [ADR 010](https://github.com/cosmos/cosmos-sdk/blob/main/docs/architecture/adr-010-modular-antehandler.md).
+
+Note that the `AnteHandler` is called on both `CheckTx` and `DeliverTx`, as CometBFT proposers presently have the ability to include in their proposed block transactions which fail `CheckTx`.
+
+### Decorators
+
+The auth module provides `AnteDecorator`s that are recursively chained together into a single `AnteHandler` in the following order:
+
+* `SetUpContextDecorator`: Sets the `GasMeter` in the `Context` and wraps the next `AnteHandler` with a defer clause to recover from any downstream `OutOfGas` panics in the `AnteHandler` chain to return an error with information on gas provided and gas used.
+
+* `RejectExtensionOptionsDecorator`: Rejects all extension options which can optionally be included in protobuf transactions.
+
+* `MempoolFeeDecorator`: Checks if the `tx` fee is above local mempool `minFee` parameter during `CheckTx`.
+
+* `ValidateBasicDecorator`: Calls `tx.ValidateBasic` and returns any non-nil error.
+
+* `TxTimeoutHeightDecorator`: Check for a `tx` height timeout.
+
+* `ValidateMemoDecorator`: Validates `tx` memo with application parameters and returns any non-nil error.
+
+* `ConsumeGasTxSizeDecorator`: Consumes gas proportional to the `tx` size based on application parameters.
+
+* `DeductFeeDecorator`: Deducts the `FeeAmount` from first signer of the `tx`. If the `x/feegrant` module is enabled and a fee granter is set, it deducts fees from the fee granter account.
+
+* `SetPubKeyDecorator`: Sets the pubkey from a `tx`'s signers that does not already have its corresponding pubkey saved in the state machine and in the current context.
+
+* `ValidateSigCountDecorator`: Validates the number of signatures in `tx` based on app-parameters.
+
+* `SigGasConsumeDecorator`: Consumes parameter-defined amount of gas for each signature. This requires pubkeys to be set in context for all signers as part of `SetPubKeyDecorator`.
+
+* `SigVerificationDecorator`: Verifies all signatures are valid. This requires pubkeys to be set in context for all signers as part of `SetPubKeyDecorator`.
+
+* `IncrementSequenceDecorator`: Increments the account sequence for each signer to prevent replay attacks.
+
+
+As of v0.54, ed25519 keys can be used for transaction signing. Previously, ed25519 keys were only supported for consensus validation. This expands the available key types for user accounts.
+
+
+## Keepers
+
+The auth module only exposes one keeper, the account keeper, which can be used to read and write accounts.
+
+### Account Keeper
+
+Presently only one fully-permissioned account keeper is exposed, which has the ability to both read and write
+all fields of all accounts, and to iterate over all stored accounts.
+
+```go expandable
+// AccountKeeperI is the interface contract that x/auth's keeper implements.
+type AccountKeeperI interface {
+ // Return a new account with the next account number and the specified address. Does not save the new account to the store.
+ NewAccountWithAddress(sdk.Context, sdk.AccAddress)
+
+types.AccountI
+
+ // Return a new account with the next account number. Does not save the new account to the store.
+ NewAccount(sdk.Context, types.AccountI)
+
+types.AccountI
+
+ // Check if an account exists in the store.
+ HasAccount(sdk.Context, sdk.AccAddress)
+
+bool
+
+ // Retrieve an account from the store.
+ GetAccount(sdk.Context, sdk.AccAddress)
+
+types.AccountI
+
+ // Set an account in the store.
+ SetAccount(sdk.Context, types.AccountI)
+
+ // Remove an account from the store.
+ RemoveAccount(sdk.Context, types.AccountI)
+
+ // Iterate over all accounts, calling the provided function. Stop iteration when it returns true.
+ IterateAccounts(sdk.Context, func(types.AccountI)
+
+bool)
+
+ // Fetch the public key of an account at a specified address
+ GetPubKey(sdk.Context, sdk.AccAddress) (crypto.PubKey, error)
+
+ // Fetch the sequence of an account at a specified address.
+ GetSequence(sdk.Context, sdk.AccAddress) (uint64, error)
+
+ // Fetch the next account number, and increment the internal counter.
+ NextAccountNumber(sdk.Context)
+
+uint64
+}
+```
+
+## Parameters
+
+The auth module contains the following parameters:
+
+| Key | Type | Example |
+| ---------------------- | ------ | ------- |
+| MaxMemoCharacters | uint64 | 256 |
+| TxSigLimit | uint64 | 7 |
+| TxSizeCostPerByte | uint64 | 10 |
+| SigVerifyCostED25519 | uint64 | 590 |
+| SigVerifyCostSecp256k1 | uint64 | 1000 |
+
+## Client
+
+### CLI
+
+A user can query and interact with the `auth` module using the CLI.
+
+### Query
+
+The `query` commands allow users to query `auth` state.
+
+```bash
+simd query auth --help
+```
+
+#### account
+
+The `account` command allow users to query for an account by it's address.
+
+```bash
+simd query auth account [address] [flags]
+```
+
+Example:
+
+```bash
+simd query auth account cosmos1...
+```
+
+Example Output:
+
+```bash
+'@type': /cosmos.auth.v1beta1.BaseAccount
+account_number: "0"
+address: cosmos1zwg6tpl8aw4rawv8sgag9086lpw5hv33u5ctr2
+pub_key:
+ '@type': /cosmos.crypto.secp256k1.PubKey
+ key: ApDrE38zZdd7wLmFS9YmqO684y5DG6fjZ4rVeihF/AQD
+sequence: "1"
+```
+
+#### accounts
+
+The `accounts` command allow users to query all the available accounts.
+
+```bash
+simd query auth accounts [flags]
+```
+
+Example:
+
+```bash
+simd query auth accounts
+```
+
+Example Output:
+
+```bash expandable
+accounts:
+- '@type': /cosmos.auth.v1beta1.BaseAccount
+ account_number: "0"
+ address: cosmos1zwg6tpl8aw4rawv8sgag9086lpw5hv33u5ctr2
+ pub_key:
+ '@type': /cosmos.crypto.secp256k1.PubKey
+ key: ApDrE38zZdd7wLmFS9YmqO684y5DG6fjZ4rVeihF/AQD
+ sequence: "1"
+- '@type': /cosmos.auth.v1beta1.ModuleAccount
+ base_account:
+ account_number: "8"
+ address: cosmos1yl6hdjhmkf37639730gffanpzndzdpmhwlkfhr
+ pub_key: null
+ sequence: "0"
+ name: transfer
+ permissions:
+ - minter
+ - burner
+- '@type': /cosmos.auth.v1beta1.ModuleAccount
+ base_account:
+ account_number: "4"
+ address: cosmos1fl48vsnmsdzcv85q5d2q4z5ajdha8yu34mf0eh
+ pub_key: null
+ sequence: "0"
+ name: bonded_tokens_pool
+ permissions:
+ - burner
+ - staking
+- '@type': /cosmos.auth.v1beta1.ModuleAccount
+ base_account:
+ account_number: "5"
+ address: cosmos1tygms3xhhs3yv487phx3dw4a95jn7t7lpm470r
+ pub_key: null
+ sequence: "0"
+ name: not_bonded_tokens_pool
+ permissions:
+ - burner
+ - staking
+- '@type': /cosmos.auth.v1beta1.ModuleAccount
+ base_account:
+ account_number: "6"
+ address: cosmos10d07y265gmmuvt4z0w9aw880jnsr700j6zn9kn
+ pub_key: null
+ sequence: "0"
+ name: gov
+ permissions:
+ - burner
+- '@type': /cosmos.auth.v1beta1.ModuleAccount
+ base_account:
+ account_number: "3"
+ address: cosmos1jv65s3grqf6v6jl3dp4t6c9t9rk99cd88lyufl
+ pub_key: null
+ sequence: "0"
+ name: distribution
+ permissions: []
+- '@type': /cosmos.auth.v1beta1.BaseAccount
+ account_number: "1"
+ address: cosmos147k3r7v2tvwqhcmaxcfql7j8rmkrlsemxshd3j
+ pub_key: null
+ sequence: "0"
+- '@type': /cosmos.auth.v1beta1.ModuleAccount
+ base_account:
+ account_number: "7"
+ address: cosmos1m3h30wlvsf8llruxtpukdvsy0km2kum8g38c8q
+ pub_key: null
+ sequence: "0"
+ name: mint
+ permissions:
+ - minter
+- '@type': /cosmos.auth.v1beta1.ModuleAccount
+ base_account:
+ account_number: "2"
+ address: cosmos17xpfvakm2amg962yls6f84z3kell8c5lserqta
+ pub_key: null
+ sequence: "0"
+ name: fee_collector
+ permissions: []
+pagination:
+ next_key: null
+ total: "0"
+```
+
+#### params
+
+The `params` command allow users to query the current auth parameters.
+
+```bash
+simd query auth params [flags]
+```
+
+Example:
+
+```bash
+simd query auth params
+```
+
+Example Output:
+
+```bash
+max_memo_characters: "256"
+sig_verify_cost_ed25519: "590"
+sig_verify_cost_secp256k1: "1000"
+tx_sig_limit: "7"
+tx_size_cost_per_byte: "10"
+```
+
+### Transactions
+
+The `auth` module supports transactions commands to help you with signing and more. Compared to other modules you can access directly the `auth` module transactions commands using the only `tx` command.
+
+Use directly the `--help` flag to get more information about the `tx` command.
+
+```bash
+simd tx --help
+```
+
+#### `sign`
+
+The `sign` command allows users to sign transactions that was generated offline.
+
+```bash
+simd tx sign tx.json --from $ALICE > tx.signed.json
+```
+
+The result is a signed transaction that can be broadcasted to the network thanks to the broadcast command.
+
+More information about the `sign` command can be found running `simd tx sign --help`.
+
+#### `sign-batch`
+
+The `sign-batch` command allows users to sign multiples offline generated transactions.
+The transactions can be in one file, with one tx per line, or in multiple files.
+
+```bash
+simd tx sign txs.json --from $ALICE > tx.signed.json
+```
+
+or
+
+```bash
+simd tx sign tx1.json tx2.json tx3.json --from $ALICE > tx.signed.json
+```
+
+The result is multiples signed transactions. For combining the signed transactions into one transactions, use the `--append` flag.
+
+More information about the `sign-batch` command can be found running `simd tx sign-batch --help`.
+
+#### `multi-sign`
+
+The `multi-sign` command allows users to sign transactions that was generated offline by a multisig account.
+
+```bash
+simd tx multisign transaction.json k1k2k3 k1sig.json k2sig.json k3sig.json
+```
+
+Where `k1k2k3` is the multisig account address, `k1sig.json` is the signature of the first signer, `k2sig.json` is the signature of the second signer, and `k3sig.json` is the signature of the third signer.
+
+##### Nested multisig transactions
+
+To allow transactions to be signed by nested multisigs, meaning that a participant of a multisig account can be another multisig account, the `--skip-signature-verification` flag must be used.
+
+```bash
+# First aggregate signatures of the multisig participant
+simd tx multi-sign transaction.json ms1 ms1p1sig.json ms1p2sig.json --signature-only --skip-signature-verification > ms1sig.json
+
+# Then use the aggregated signatures and the other signatures to sign the final transaction
+simd tx multi-sign transaction.json k1ms1 k1sig.json ms1sig.json --skip-signature-verification
+```
+
+Where `ms1` is the nested multisig account address, `ms1p1sig.json` is the signature of the first participant of the nested multisig account, `ms1p2sig.json` is the signature of the second participant of the nested multisig account, and `ms1sig.json` is the aggregated signature of the nested multisig account.
+
+`k1ms1` is a multisig account comprised of an individual signer and another nested multisig account (`ms1`). `k1sig.json` is the signature of the first signer of the individual member.
+
+More information about the `multi-sign` command can be found running `simd tx multi-sign --help`.
+
+#### `multisign-batch`
+
+The `multisign-batch` works the same way as `sign-batch`, but for multisig accounts.
+With the difference that the `multisign-batch` command requires all transactions to be in one file, and the `--append` flag does not exist.
+
+More information about the `multisign-batch` command can be found running `simd tx multisign-batch --help`.
+
+#### `validate-signatures`
+
+The `validate-signatures` command allows users to validate the signatures of a signed transaction.
+
+```bash
+$ simd tx validate-signatures tx.signed.json
+Signers:
+ 0: cosmos1l6vsqhh7rnwsyr2kyz3jjg3qduaz8gwgyl8275
+
+Signatures:
+ 0: cosmos1l6vsqhh7rnwsyr2kyz3jjg3qduaz8gwgyl8275 [OK]
+```
+
+More information about the `validate-signatures` command can be found running `simd tx validate-signatures --help`.
+
+#### `broadcast`
+
+The `broadcast` command allows users to broadcast a signed transaction to the network.
+
+```bash
+simd tx broadcast tx.signed.json
+```
+
+More information about the `broadcast` command can be found running `simd tx broadcast --help`.
+
+### gRPC
+
+A user can query the `auth` module using gRPC endpoints.
+
+#### Account
+
+The `account` endpoint allow users to query for an account by it's address.
+
+```bash
+cosmos.auth.v1beta1.Query/Account
+```
+
+Example:
+
+```bash
+grpcurl -plaintext \
+ -d '{"address":"cosmos1.."}' \
+ localhost:9090 \
+ cosmos.auth.v1beta1.Query/Account
+```
+
+Example Output:
+
+```bash expandable
+{
+ "account":{
+ "@type":"/cosmos.auth.v1beta1.BaseAccount",
+ "address":"cosmos1zwg6tpl8aw4rawv8sgag9086lpw5hv33u5ctr2",
+ "pubKey":{
+ "@type":"/cosmos.crypto.secp256k1.PubKey",
+ "key":"ApDrE38zZdd7wLmFS9YmqO684y5DG6fjZ4rVeihF/AQD"
+ },
+ "sequence":"1"
+ }
+}
+```
+
+#### Accounts
+
+The `accounts` endpoint allow users to query all the available accounts.
+
+```bash
+cosmos.auth.v1beta1.Query/Accounts
+```
+
+Example:
+
+```bash
+grpcurl -plaintext \
+ localhost:9090 \
+ cosmos.auth.v1beta1.Query/Accounts
+```
+
+Example Output:
+
+```bash expandable
+{
+ "accounts":[
+ {
+ "@type":"/cosmos.auth.v1beta1.BaseAccount",
+ "address":"cosmos1zwg6tpl8aw4rawv8sgag9086lpw5hv33u5ctr2",
+ "pubKey":{
+ "@type":"/cosmos.crypto.secp256k1.PubKey",
+ "key":"ApDrE38zZdd7wLmFS9YmqO684y5DG6fjZ4rVeihF/AQD"
+ },
+ "sequence":"1"
+ },
+ {
+ "@type":"/cosmos.auth.v1beta1.ModuleAccount",
+ "baseAccount":{
+ "address":"cosmos1yl6hdjhmkf37639730gffanpzndzdpmhwlkfhr",
+ "accountNumber":"8"
+ },
+ "name":"transfer",
+ "permissions":[
+ "minter",
+ "burner"
+ ]
+ },
+ {
+ "@type":"/cosmos.auth.v1beta1.ModuleAccount",
+ "baseAccount":{
+ "address":"cosmos1fl48vsnmsdzcv85q5d2q4z5ajdha8yu34mf0eh",
+ "accountNumber":"4"
+ },
+ "name":"bonded_tokens_pool",
+ "permissions":[
+ "burner",
+ "staking"
+ ]
+ },
+ {
+ "@type":"/cosmos.auth.v1beta1.ModuleAccount",
+ "baseAccount":{
+ "address":"cosmos1tygms3xhhs3yv487phx3dw4a95jn7t7lpm470r",
+ "accountNumber":"5"
+ },
+ "name":"not_bonded_tokens_pool",
+ "permissions":[
+ "burner",
+ "staking"
+ ]
+ },
+ {
+ "@type":"/cosmos.auth.v1beta1.ModuleAccount",
+ "baseAccount":{
+ "address":"cosmos10d07y265gmmuvt4z0w9aw880jnsr700j6zn9kn",
+ "accountNumber":"6"
+ },
+ "name":"gov",
+ "permissions":[
+ "burner"
+ ]
+ },
+ {
+ "@type":"/cosmos.auth.v1beta1.ModuleAccount",
+ "baseAccount":{
+ "address":"cosmos1jv65s3grqf6v6jl3dp4t6c9t9rk99cd88lyufl",
+ "accountNumber":"3"
+ },
+ "name":"distribution"
+ },
+ {
+ "@type":"/cosmos.auth.v1beta1.BaseAccount",
+ "accountNumber":"1",
+ "address":"cosmos147k3r7v2tvwqhcmaxcfql7j8rmkrlsemxshd3j"
+ },
+ {
+ "@type":"/cosmos.auth.v1beta1.ModuleAccount",
+ "baseAccount":{
+ "address":"cosmos1m3h30wlvsf8llruxtpukdvsy0km2kum8g38c8q",
+ "accountNumber":"7"
+ },
+ "name":"mint",
+ "permissions":[
+ "minter"
+ ]
+ },
+ {
+ "@type":"/cosmos.auth.v1beta1.ModuleAccount",
+ "baseAccount":{
+ "address":"cosmos17xpfvakm2amg962yls6f84z3kell8c5lserqta",
+ "accountNumber":"2"
+ },
+ "name":"fee_collector"
+ }
+ ],
+ "pagination":{
+ "total":"9"
+ }
+}
+```
+
+#### Params
+
+The `params` endpoint allow users to query the current auth parameters.
+
+```bash
+cosmos.auth.v1beta1.Query/Params
+```
+
+Example:
+
+```bash
+grpcurl -plaintext \
+ localhost:9090 \
+ cosmos.auth.v1beta1.Query/Params
+```
+
+Example Output:
+
+```bash
+{
+ "params": {
+ "maxMemoCharacters": "256",
+ "txSigLimit": "7",
+ "txSizeCostPerByte": "10",
+ "sigVerifyCostEd25519": "590",
+ "sigVerifyCostSecp256k1": "1000"
+ }
+}
+```
+
+### REST
+
+A user can query the `auth` module using REST endpoints.
+
+#### Account
+
+The `account` endpoint allow users to query for an account by it's address.
+
+```bash
+/cosmos/auth/v1beta1/account?address={address}
+```
+
+#### Accounts
+
+The `accounts` endpoint allow users to query all the available accounts.
+
+```bash
+/cosmos/auth/v1beta1/accounts
+```
+
+#### Params
+
+The `params` endpoint allow users to query the current auth parameters.
+
+```bash
+/cosmos/auth/v1beta1/params
+```
diff --git a/sdk/next/build/modules/auth/tx.mdx b/sdk/next/build/modules/auth/tx.mdx
new file mode 100644
index 000000000..4958d0145
--- /dev/null
+++ b/sdk/next/build/modules/auth/tx.mdx
@@ -0,0 +1,272 @@
+---
+title: 'x/auth/tx'
+---
+
+
+**Prerequisite Readings**
+
+* [Transactions](/sdk/v0.53/learn/beginner/tx-lifecycle#transaction-generation)
+* [Encoding](/sdk/v0.53/learn/advanced/encoding#transaction-encoding)
+
+
+
+## Abstract
+
+This document specifies the `x/auth/tx` package of the Cosmos SDK.
+
+This package represents the Cosmos SDK implementation of the `client.TxConfig`, `client.TxBuilder`, `client.TxEncoder` and `client.TxDecoder` interfaces.
+
+## Contents
+
+* [Transactions](#transactions)
+ * [`TxConfig`](#txconfig)
+ * [`TxBuilder`](#txbuilder)
+ * [`TxEncoder`/ `TxDecoder`](#txencoder-txdecoder)
+* [Client](#client)
+ * [CLI](#cli)
+ * [gRPC](#grpc)
+
+## Transactions
+
+### `TxConfig`
+
+`client.TxConfig` defines an interface a client can utilize to generate an application-defined concrete transaction type.
+The interface defines a set of methods for creating a `client.TxBuilder`.
+
+```go
+// Reference: https://github.com/cosmos/cosmos-sdk/tree/release/v0.50.x/client/tx_config.go#L25-L31
+```
+
+The default implementation of `client.TxConfig` is instantiated by `NewTxConfig` in `x/auth/tx` module.
+
+```go
+// Reference: https://github.com/cosmos/cosmos-sdk/tree/release/v0.50.x/x/auth/tx/config.go#L22-L28
+```
+
+### `TxBuilder`
+
+```go
+// Reference: https://github.com/cosmos/cosmos-sdk/tree/release/v0.50.x/client/tx_config.go#L33-L50
+```
+
+The [`client.TxBuilder`](/sdk/v0.53/learn/beginner/tx-lifecycle#transaction-generation) interface is as well implemented by `x/auth/tx`.
+A `client.TxBuilder` can be accessed with `TxConfig.NewTxBuilder()`.
+
+### `TxEncoder`/ `TxDecoder`
+
+More information about `TxEncoder` and `TxDecoder` can be found [here](/sdk/v0.53/learn/advanced/encoding#transaction-encoding).
+
+## Client
+
+### CLI
+
+#### Query
+
+The `x/auth/tx` module provides a CLI command to query any transaction, given its hash, transaction sequence or signature.
+
+Without any argument, the command will query the transaction using the transaction hash.
+
+```shell
+simd query tx DFE87B78A630C0EFDF76C80CD24C997E252792E0317502AE1A02B9809F0D8685
+```
+
+When querying a transaction from an account given its sequence, use the `--type=acc_seq` flag:
+
+```shell
+simd query tx --type=acc_seq cosmos1u69uyr6v9qwe6zaaeaqly2h6wnedac0xpxq325/1
+```
+
+When querying a transaction given its signature, use the `--type=signature` flag:
+
+```shell
+simd query tx --type=signature Ofjvgrqi8twZfqVDmYIhqwRLQjZZ40XbxEamk/veH3gQpRF0hL2PH4ejRaDzAX+2WChnaWNQJQ41ekToIi5Wqw==
+```
+
+When querying a transaction given its events, use the `--type=events` flag:
+
+```shell
+simd query txs --events 'message.sender=cosmos...' --page 1 --limit 30
+```
+
+The `x/auth/block` module provides a CLI command to query any block, given its hash, height, or events.
+
+When querying a block by its hash, use the `--type=hash` flag:
+
+```shell
+simd query block --type=hash DFE87B78A630C0EFDF76C80CD24C997E252792E0317502AE1A02B9809F0D8685
+```
+
+When querying a block by its height, use the `--type=height` flag:
+
+```shell
+simd query block --type=height 1357
+```
+
+When querying a block by its events, use the `--query` flag:
+
+```shell
+simd query blocks --query 'message.sender=cosmos...' --page 1 --limit 30
+```
+
+#### Transactions
+
+The `x/auth/tx` module provides a convenient CLI command for decoding and encoding transactions.
+
+#### `encode`
+
+The `encode` command encodes a transaction created with the `--generate-only` flag or signed with the sign command.
+The transaction is serialized to Protobuf and returned as base64.
+
+```bash
+$ simd tx encode tx.json
+Co8BCowBChwvY29zbW9zLmJhbmsudjFiZXRhMS5Nc2dTZW5kEmwKLWNvc21vczFsNnZzcWhoN3Jud3N5cjJreXozampnM3FkdWF6OGd3Z3lsODI3NRItY29zbW9zMTU4c2FsZHlnOHBteHU3Znd2dDBkNng3amVzd3A0Z3d5a2xrNnkzGgwKBXN0YWtlEgMxMDASBhIEEMCaDA==
+$ simd tx encode tx.signed.json
+```
+
+More information about the `encode` command can be found running `simd tx encode --help`.
+
+#### `decode`
+
+The `decode` command decodes a transaction encoded with the `encode` command.
+
+```bash
+simd tx decode Co8BCowBChwvY29zbW9zLmJhbmsudjFiZXRhMS5Nc2dTZW5kEmwKLWNvc21vczFsNnZzcWhoN3Jud3N5cjJreXozampnM3FkdWF6OGd3Z3lsODI3NRItY29zbW9zMTU4c2FsZHlnOHBteHU3Znd2dDBkNng3amVzd3A0Z3d5a2xrNnkzGgwKBXN0YWtlEgMxMDASBhIEEMCaDA==
+```
+
+More information about the `decode` command can be found running `simd tx decode --help`.
+
+### gRPC
+
+A user can query the `x/auth/tx` module using gRPC endpoints.
+
+#### `TxDecode`
+
+The `TxDecode` endpoint allows to decode a transaction.
+
+```shell
+cosmos.tx.v1beta1.Service/TxDecode
+```
+
+Example:
+
+```shell
+grpcurl -plaintext \
+ -d '{"tx_bytes":"Co8BCowBChwvY29zbW9zLmJhbmsudjFiZXRhMS5Nc2dTZW5kEmwKLWNvc21vczFsNnZzcWhoN3Jud3N5cjJreXozampnM3FkdWF6OGd3Z3lsODI3NRItY29zbW9zMTU4c2FsZHlnOHBteHU3Znd2dDBkNng3amVzd3A0Z3d5a2xrNnkzGgwKBXN0YWtlEgMxMDASBhIEEMCaDA=="}' \
+ localhost:9090 \
+ cosmos.tx.v1beta1.Service/TxDecode
+```
+
+Example Output:
+
+```json expandable
+{
+ "tx": {
+ "body": {
+ "messages": [
+ {
+ "@type": "/cosmos.bank.v1beta1.MsgSend",
+ "amount": [
+ {
+ "denom": "stake",
+ "amount": "100"
+ }
+ ],
+ "fromAddress": "cosmos1l6vsqhh7rnwsyr2kyz3jjg3qduaz8gwgyl8275",
+ "toAddress": "cosmos158saldyg8pmxu7fwvt0d6x7jeswp4gwyklk6y3"
+ }
+ ]
+ },
+ "authInfo": {
+ "fee": {
+ "gasLimit": "200000"
+ }
+ }
+ }
+}
+```
+
+#### `TxEncode`
+
+The `TxEncode` endpoint allows to encode a transaction.
+
+```shell
+cosmos.tx.v1beta1.Service/TxEncode
+```
+
+Example:
+
+```shell expandable
+grpcurl -plaintext \
+ -d '{"tx": {
+ "body": {
+ "messages": [
+ {"@type":"/cosmos.bank.v1beta1.MsgSend","amount":[{"denom":"stake","amount":"100"}],"fromAddress":"cosmos1l6vsqhh7rnwsyr2kyz3jjg3qduaz8gwgyl8275","toAddress":"cosmos158saldyg8pmxu7fwvt0d6x7jeswp4gwyklk6y3"}
+ ]
+ },
+ "authInfo": {
+ "fee": {
+ "gasLimit": "200000"
+ }
+ }
+ }}' \
+ localhost:9090 \
+ cosmos.tx.v1beta1.Service/TxEncode
+```
+
+Example Output:
+
+```json
+{
+ "txBytes": "Co8BCowBChwvY29zbW9zLmJhbmsudjFiZXRhMS5Nc2dTZW5kEmwKLWNvc21vczFsNnZzcWhoN3Jud3N5cjJreXozampnM3FkdWF6OGd3Z3lsODI3NRItY29zbW9zMTU4c2FsZHlnOHBteHU3Znd2dDBkNng3amVzd3A0Z3d5a2xrNnkzGgwKBXN0YWtlEgMxMDASBhIEEMCaDA=="
+}
+```
+
+#### `TxDecodeAmino`
+
+The `TxDecode` endpoint allows to decode an amino transaction.
+
+```shell
+cosmos.tx.v1beta1.Service/TxDecodeAmino
+```
+
+Example:
+
+```shell
+grpcurl -plaintext \
+ -d '{"amino_binary": "KCgWqQpvqKNhmgotY29zbW9zMXRzeno3cDJ6Z2Q3dnZrYWh5ZnJlNHduNXh5dTgwcnB0ZzZ2OWg1Ei1jb3Ntb3MxdHN6ejdwMnpnZDd2dmthaHlmcmU0d241eHl1ODBycHRnNnY5aDUaCwoFc3Rha2USAjEwEhEKCwoFc3Rha2USAjEwEMCaDCIGZm9vYmFy"}' \
+ localhost:9090 \
+ cosmos.tx.v1beta1.Service/TxDecodeAmino
+```
+
+Example Output:
+
+```json
+{
+ "aminoJson": "{\"type\":\"cosmos-sdk/StdTx\",\"value\":{\"msg\":[{\"type\":\"cosmos-sdk/MsgSend\",\"value\":{\"from_address\":\"cosmos1tszz7p2zgd7vvkahyfre4wn5xyu80rptg6v9h5\",\"to_address\":\"cosmos1tszz7p2zgd7vvkahyfre4wn5xyu80rptg6v9h5\",\"amount\":[{\"denom\":\"stake\",\"amount\":\"10\"}]}}],\"fee\":{\"amount\":[{\"denom\":\"stake\",\"amount\":\"10\"}],\"gas\":\"200000\"},\"signatures\":null,\"memo\":\"foobar\",\"timeout_height\":\"0\"}}"
+}
+```
+
+#### `TxEncodeAmino`
+
+The `TxEncodeAmino` endpoint allows to encode an amino transaction.
+
+```shell
+cosmos.tx.v1beta1.Service/TxEncodeAmino
+```
+
+Example:
+
+```shell
+grpcurl -plaintext \
+ -d '{"amino_json":"{\"type\":\"cosmos-sdk/StdTx\",\"value\":{\"msg\":[{\"type\":\"cosmos-sdk/MsgSend\",\"value\":{\"from_address\":\"cosmos1tszz7p2zgd7vvkahyfre4wn5xyu80rptg6v9h5\",\"to_address\":\"cosmos1tszz7p2zgd7vvkahyfre4wn5xyu80rptg6v9h5\",\"amount\":[{\"denom\":\"stake\",\"amount\":\"10\"}]}}],\"fee\":{\"amount\":[{\"denom\":\"stake\",\"amount\":\"10\"}],\"gas\":\"200000\"},\"signatures\":null,\"memo\":\"foobar\",\"timeout_height\":\"0\"}}"}' \
+ localhost:9090 \
+ cosmos.tx.v1beta1.Service/TxEncodeAmino
+```
+
+Example Output:
+
+```json
+{
+ "amino_binary": "KCgWqQpvqKNhmgotY29zbW9zMXRzeno3cDJ6Z2Q3dnZrYWh5ZnJlNHduNXh5dTgwcnB0ZzZ2OWg1Ei1jb3Ntb3MxdHN6ejdwMnpnZDd2dmthaHlmcmU0d241eHl1ODBycHRnNnY5aDUaCwoFc3Rha2USAjEwEhEKCwoFc3Rha2USAjEwEMCaDCIGZm9vYmFy"
+}
+```
diff --git a/sdk/next/build/modules/auth/vesting.mdx b/sdk/next/build/modules/auth/vesting.mdx
new file mode 100644
index 000000000..a1d8ee62d
--- /dev/null
+++ b/sdk/next/build/modules/auth/vesting.mdx
@@ -0,0 +1,679 @@
+---
+title: 'x/auth/vesting'
+---
+
+* [Intro and Requirements](#intro-and-requirements)
+* [Note](#note)
+* [Vesting Account Types](#vesting-account-types)
+ * [BaseVestingAccount](#basevestingaccount)
+ * [ContinuousVestingAccount](#continuousvestingaccount)
+ * [DelayedVestingAccount](#delayedvestingaccount)
+ * [Period](#period)
+ * [PeriodicVestingAccount](#periodicvestingaccount)
+ * [PermanentLockedAccount](#permanentlockedaccount)
+* [Vesting Account Specification](#vesting-account-specification)
+ * [Determining Vesting & Vested Amounts](#determining-vesting--vested-amounts)
+ * [Periodic Vesting Accounts](#periodic-vesting-accounts)
+ * [Transferring/Sending](#transferringsending)
+ * [Delegating](#delegating)
+ * [Undelegating](#undelegating)
+* [Keepers & Handlers](#keepers--handlers)
+* [Genesis Initialization](#genesis-initialization)
+* [Examples](#examples)
+ * [Simple](#simple)
+ * [Slashing](#slashing)
+ * [Periodic Vesting](#periodic-vesting)
+* [Glossary](#glossary)
+
+## Intro and Requirements
+
+This specification defines the vesting account implementation that is used by the Cosmos Hub. The requirements for this vesting account is that it should be initialized during genesis with a starting balance `X` and a vesting end time `ET`. A vesting account may be initialized with a vesting start time `ST` and a number of vesting periods `P`. If a vesting start time is included, the vesting period does not begin until start time is reached. If vesting periods are included, the vesting occurs over the specified number of periods.
+
+For all vesting accounts, the owner of the vesting account is able to delegate and undelegate from validators, however they cannot transfer coins to another account until those coins are vested. This specification allows for four different kinds of vesting:
+
+* Delayed vesting, where all coins are vested once `ET` is reached.
+* Continuous vesting, where coins begin to vest at `ST` and vest linearly with respect to time until `ET` is reached
+* Periodic vesting, where coins begin to vest at `ST` and vest periodically according to number of periods and the vesting amount per period. The number of periods, length per period, and amount per period are configurable. A periodic vesting account is distinguished from a continuous vesting account in that coins can be released in staggered tranches. For example, a periodic vesting account could be used for vesting arrangements where coins are released quarterly, yearly, or over any other function of tokens over time.
+* Permanent locked vesting, where coins are locked forever. Coins in this account can still be used for delegating and for governance votes even while locked.
+
+## Note
+
+Vesting accounts can be initialized with some vesting and non-vesting coins. The non-vesting coins would be immediately transferable. DelayedVesting ContinuousVesting, PeriodicVesting and PermanentVesting accounts can be created with normal messages after genesis. Other types of vesting accounts must be created at genesis, or as part of a manual network upgrade. The current specification only allows for *unconditional* vesting (ie. there is no possibility of reaching `ET` and
+having coins fail to vest).
+
+## Vesting Account Types
+
+```go expandable
+// VestingAccount defines an interface that any vesting account type must
+// implement.
+type VestingAccount interface {
+ Account
+
+ GetVestedCoins(Time)
+
+Coins
+ GetVestingCoins(Time)
+
+Coins
+
+ // TrackDelegation performs internal vesting accounting necessary when
+ // delegating from a vesting account. It accepts the current block time, the
+ // delegation amount and balance of all coins whose denomination exists in
+ // the account's original vesting balance.
+ TrackDelegation(Time, Coins, Coins)
+
+ // TrackUndelegation performs internal vesting accounting necessary when a
+ // vesting account performs an undelegation.
+ TrackUndelegation(Coins)
+
+GetStartTime()
+
+int64
+ GetEndTime()
+
+int64
+}
+```
+
+### BaseVestingAccount
+
+```protobuf
+// Reference: https://github.com/cosmos/cosmos-sdk/tree/release/v0.50.x/proto/cosmos/vesting/v1beta1/vesting.proto#L11-L35
+```
+
+### ContinuousVestingAccount
+
+```protobuf
+// Reference: https://github.com/cosmos/cosmos-sdk/tree/release/v0.50.x/proto/cosmos/vesting/v1beta1/vesting.proto#L37-L46
+```
+
+### DelayedVestingAccount
+
+```protobuf
+// Reference: https://github.com/cosmos/cosmos-sdk/tree/release/v0.50.x/proto/cosmos/vesting/v1beta1/vesting.proto#L48-L57
+```
+
+### Period
+
+```protobuf
+// Reference: https://github.com/cosmos/cosmos-sdk/tree/release/v0.50.x/proto/cosmos/vesting/v1beta1/vesting.proto#L59-L69
+```
+
+```go
+// Stores all vesting periods passed as part of a PeriodicVestingAccount
+type Periods []Period
+```
+
+### PeriodicVestingAccount
+
+```protobuf
+// Reference: https://github.com/cosmos/cosmos-sdk/tree/release/v0.50.x/proto/cosmos/vesting/v1beta1/vesting.proto#L71-L81
+```
+
+In order to facilitate less ad-hoc type checking and assertions and to support flexibility in account balance usage, the existing `x/bank` `ViewKeeper` interface is updated to contain the following:
+
+```go
+type ViewKeeper interface {
+ // ...
+
+ // Calculates the total locked account balance.
+ LockedCoins(ctx sdk.Context, addr sdk.AccAddress)
+
+sdk.Coins
+
+ // Calculates the total spendable balance that can be sent to other accounts.
+ SpendableCoins(ctx sdk.Context, addr sdk.AccAddress)
+
+sdk.Coins
+}
+```
+
+### PermanentLockedAccount
+
+```protobuf
+// Reference: https://github.com/cosmos/cosmos-sdk/tree/release/v0.50.x/proto/cosmos/vesting/v1beta1/vesting.proto#L83-L94
+```
+
+## Vesting Account Specification
+
+Given a vesting account, we define the following in the proceeding operations:
+
+* `OV`: The original vesting coin amount. It is a constant value.
+* `V`: The number of `OV` coins that are still *vesting*. It is derived by
+ `OV`, `StartTime` and `EndTime`. This value is computed on demand and not on a per-block basis.
+* `V'`: The number of `OV` coins that are *vested* (unlocked). This value is computed on demand and not a per-block basis.
+* `DV`: The number of delegated *vesting* coins. It is a variable value. It is stored and modified directly in the vesting account.
+* `DF`: The number of delegated *vested* (unlocked) coins. It is a variable value. It is stored and modified directly in the vesting account.
+* `BC`: The number of `OV` coins less any coins that are transferred
+ (which can be negative or delegated). It is considered to be balance of the embedded base account. It is stored and modified directly in the vesting account.
+
+### Determining Vesting & Vested Amounts
+
+It is important to note that these values are computed on demand and not on a mandatory per-block basis (e.g. `BeginBlocker` or `EndBlocker`).
+
+#### Continuously Vesting Accounts
+
+To determine the amount of coins that are vested for a given block time `T`, the
+following is performed:
+
+1. Compute `X := T - StartTime`
+2. Compute `Y := EndTime - StartTime`
+3. Compute `V' := OV * (X / Y)`
+4. Compute `V := OV - V'`
+
+Thus, the total amount of *vested* coins is `V'` and the remaining amount, `V`,
+is *vesting*.
+
+```go expandable
+func (cva ContinuousVestingAccount)
+
+GetVestedCoins(t Time)
+
+Coins {
+ if t <= cva.StartTime {
+ // We must handle the case where the start time for a vesting account has
+ // been set into the future or when the start of the chain is not exactly
+ // known.
+ return ZeroCoins
+}
+
+else if t >= cva.EndTime {
+ return cva.OriginalVesting
+}
+ x := t - cva.StartTime
+ y := cva.EndTime - cva.StartTime
+
+ return cva.OriginalVesting * (x / y)
+}
+
+func (cva ContinuousVestingAccount)
+
+GetVestingCoins(t Time)
+
+Coins {
+ return cva.OriginalVesting - cva.GetVestedCoins(t)
+}
+```
+
+### Periodic Vesting Accounts
+
+Periodic vesting accounts require calculating the coins released during each period for a given block time `T`. Note that multiple periods could have passed when calling `GetVestedCoins`, so we must iterate over each period until the end of that period is after `T`.
+
+1. Set `CT := StartTime`
+2. Set `V' := 0`
+
+For each Period P:
+
+1. Compute `X := T - CT`
+2. IF `X >= P.Length`
+ 1. Compute `V' += P.Amount`
+ 2. Compute `CT += P.Length`
+ 3. ELSE break
+3. Compute `V := OV - V'`
+
+```go expandable
+func (pva PeriodicVestingAccount)
+
+GetVestedCoins(t Time)
+
+Coins {
+ if t < pva.StartTime {
+ return ZeroCoins
+}
+ ct := pva.StartTime // The start of the vesting schedule
+ vested := 0
+ periods = pva.GetPeriods()
+ for _, period := range periods {
+ if t - ct < period.Length {
+ break
+}
+
+vested += period.Amount
+ ct += period.Length // increment ct to the start of the next vesting period
+}
+
+return vested
+}
+
+func (pva PeriodicVestingAccount)
+
+GetVestingCoins(t Time)
+
+Coins {
+ return pva.OriginalVesting - cva.GetVestedCoins(t)
+}
+```
+
+#### Delayed/Discrete Vesting Accounts
+
+Delayed vesting accounts are easier to reason about as they only have the full amount vesting up until a certain time, then all the coins become vested (unlocked). This does not include any unlocked coins the account may have initially.
+
+```go expandable
+func (dva DelayedVestingAccount)
+
+GetVestedCoins(t Time)
+
+Coins {
+ if t >= dva.EndTime {
+ return dva.OriginalVesting
+}
+
+return ZeroCoins
+}
+
+func (dva DelayedVestingAccount)
+
+GetVestingCoins(t Time)
+
+Coins {
+ return dva.OriginalVesting - dva.GetVestedCoins(t)
+}
+```
+
+### Transferring/Sending
+
+At any given time, a vesting account may transfer: `min((BC + DV) - V, BC)`.
+
+In other words, a vesting account may transfer the minimum of the base account balance and the base account balance plus the number of currently delegated vesting coins less the number of coins vested so far.
+
+However, given that account balances are tracked via the `x/bank` module and that we want to avoid loading the entire account balance, we can instead determine the locked balance, which can be defined as `max(V - DV, 0)`, and infer the spendable balance from that.
+
+```go
+func (va VestingAccount)
+
+LockedCoins(t Time)
+
+Coins {
+ return max(va.GetVestingCoins(t) - va.DelegatedVesting, 0)
+}
+```
+
+The `x/bank` `ViewKeeper` can then provide APIs to determine locked and spendable coins for any account:
+
+```go expandable
+func (k Keeper)
+
+LockedCoins(ctx Context, addr AccAddress)
+
+Coins {
+ acc := k.GetAccount(ctx, addr)
+ if acc != nil {
+ if acc.IsVesting() {
+ return acc.LockedCoins(ctx.BlockTime())
+}
+
+}
+
+ // non-vesting accounts do not have any locked coins
+ return NewCoins()
+}
+```
+
+#### Keepers/Handlers
+
+The corresponding `x/bank` keeper should appropriately handle sending coins based on if the account is a vesting account or not.
+
+```go expandable
+func (k Keeper)
+
+SendCoins(ctx Context, from Account, to Account, amount Coins) {
+ bc := k.GetBalances(ctx, from)
+ v := k.LockedCoins(ctx, from)
+ spendable := bc - v
+ newCoins := spendable - amount
+ assert(newCoins >= 0)
+
+from.SetBalance(newCoins)
+
+to.AddBalance(amount)
+
+ // save balances...
+}
+```
+
+### Delegating
+
+For a vesting account attempting to delegate `D` coins, the following is performed:
+
+1. Verify `BC >= D > 0`
+2. Compute `X := min(max(V - DV, 0), D)` (portion of `D` that is vesting)
+3. Compute `Y := D - X` (portion of `D` that is free)
+4. Set `DV += X`
+5. Set `DF += Y`
+
+```go
+func (va VestingAccount)
+
+TrackDelegation(t Time, balance Coins, amount Coins) {
+ assert(balance <= amount)
+ x := min(max(va.GetVestingCoins(t) - va.DelegatedVesting, 0), amount)
+ y := amount - x
+
+ va.DelegatedVesting += x
+ va.DelegatedFree += y
+}
+```
+
+**Note** `TrackDelegation` only modifies the `DelegatedVesting` and `DelegatedFree` fields, so upstream callers MUST modify the `Coins` field by subtracting `amount`.
+
+#### Keepers/Handlers
+
+```go
+func DelegateCoins(t Time, from Account, amount Coins) {
+ if isVesting(from) {
+ from.TrackDelegation(t, amount)
+}
+
+else {
+ from.SetBalance(sc - amount)
+}
+
+ // save account...
+}
+```
+
+### Undelegating
+
+For a vesting account attempting to undelegate `D` coins, the following is performed:
+
+> NOTE: `DV < D` and `(DV + DF) < D` may be possible due to quirks in the rounding of delegation/undelegation logic.
+
+1. Verify `D > 0`
+2. Compute `X := min(DF, D)` (portion of `D` that should become free, prioritizing free coins)
+3. Compute `Y := min(DV, D - X)` (portion of `D` that should remain vesting)
+4. Set `DF -= X`
+5. Set `DV -= Y`
+
+```go
+func (cva ContinuousVestingAccount)
+
+TrackUndelegation(amount Coins) {
+ x := min(cva.DelegatedFree, amount)
+ y := amount - x
+
+ cva.DelegatedFree -= x
+ cva.DelegatedVesting -= y
+}
+```
+
+**Note** `TrackUnDelegation` only modifies the `DelegatedVesting` and `DelegatedFree` fields, so upstream callers MUST modify the `Coins` field by adding `amount`.
+
+**Note**: If a delegation is slashed, the continuous vesting account ends up with an excess `DV` amount, even after all its coins have vested. This is because undelegating free coins are prioritized.
+
+**Note**: The undelegation (bond refund) amount may exceed the delegated vesting (bond) amount due to the way undelegation truncates the bond refund, which can increase the validator's exchange rate (tokens/shares) slightly if the undelegated tokens are non-integral.
+
+#### Keepers/Handlers
+
+```go expandable
+func UndelegateCoins(to Account, amount Coins) {
+ if isVesting(to) {
+ if to.DelegatedFree + to.DelegatedVesting >= amount {
+ to.TrackUndelegation(amount)
+ // save account ...
+}
+
+}
+
+else {
+ AddBalance(to, amount)
+ // save account...
+}
+}
+```
+
+## Keepers & Handlers
+
+The `VestingAccount` implementations reside in `x/auth`. However, any keeper in a module (e.g. staking in `x/staking`) wishing to potentially utilize any vesting coins, must call explicit methods on the `x/bank` keeper (e.g. `DelegateCoins`) opposed to `SendCoins` and `SubtractCoins`.
+
+In addition, the vesting account should also be able to spend any coins it receives from other users. Thus, the bank module's `MsgSend` handler should error if a vesting account is trying to send an amount that exceeds their unlocked coin amount.
+
+See the above specification for full implementation details.
+
+## Genesis Initialization
+
+To initialize both vesting and non-vesting accounts, the `GenesisAccount` struct includes new fields: `Vesting`, `StartTime`, and `EndTime`. Accounts meant to be of type `BaseAccount` or any non-vesting type have `Vesting = false`. The genesis initialization logic (e.g. `initFromGenesisState`) must parse and return the correct accounts accordingly based off of these fields.
+
+```go expandable
+type GenesisAccount struct {
+ // ...
+
+ // vesting account fields
+ OriginalVesting sdk.Coins `json:"original_vesting"`
+ DelegatedFree sdk.Coins `json:"delegated_free"`
+ DelegatedVesting sdk.Coins `json:"delegated_vesting"`
+ StartTime int64 `json:"start_time"`
+ EndTime int64 `json:"end_time"`
+}
+
+func ToAccount(gacc GenesisAccount)
+
+Account {
+ bacc := NewBaseAccount(gacc)
+ if gacc.OriginalVesting > 0 {
+ if ga.StartTime != 0 && ga.EndTime != 0 {
+ // return a continuous vesting account
+}
+
+else if ga.EndTime != 0 {
+ // return a delayed vesting account
+}
+
+else {
+ // invalid genesis vesting account provided
+ panic()
+}
+
+}
+
+return bacc
+}
+```
+
+## Examples
+
+### Simple
+
+Given a continuous vesting account with 10 vesting coins.
+
+```text
+OV = 10
+DF = 0
+DV = 0
+BC = 10
+V = 10
+V' = 0
+```
+
+1. Immediately receives 1 coin
+
+ ```text
+ BC = 11
+ ```
+
+2. Time passes, 2 coins vest
+
+ ```text
+ V = 8
+ V' = 2
+ ```
+
+3. Delegates 4 coins to validator A
+
+ ```text
+ DV = 4
+ BC = 7
+ ```
+
+4. Sends 3 coins
+
+ ```text
+ BC = 4
+ ```
+
+5. More time passes, 2 more coins vest
+
+ ```text
+ V = 6
+ V' = 4
+ ```
+
+6. Sends 2 coins. At this point the account cannot send anymore until further
+ coins vest or it receives additional coins. It can still however, delegate.
+
+ ```text
+ BC = 2
+ ```
+
+### Slashing
+
+Same initial starting conditions as the simple example.
+
+1. Time passes, 5 coins vest
+
+ ```text
+ V = 5
+ V' = 5
+ ```
+
+2. Delegate 5 coins to validator A
+
+ ```text
+ DV = 5
+ BC = 5
+ ```
+
+3. Delegate 5 coins to validator B
+
+ ```text
+ DF = 5
+ BC = 0
+ ```
+
+4. Validator A gets slashed by 50%, making the delegation to A now worth 2.5 coins
+
+5. Undelegate from validator A (2.5 coins)
+
+ ```text
+ DF = 5 - 2.5 = 2.5
+ BC = 0 + 2.5 = 2.5
+ ```
+
+6. Undelegate from validator B (5 coins). The account at this point can only
+ send 2.5 coins unless it receives more coins or until more coins vest.
+ It can still however, delegate.
+
+ ```text
+ DV = 5 - 2.5 = 2.5
+ DF = 2.5 - 2.5 = 0
+ BC = 2.5 + 5 = 7.5
+ ```
+
+ Notice how we have an excess amount of `DV`.
+
+### Periodic Vesting
+
+A vesting account is created where 100 tokens will be released over 1 year, with
+1/4 of tokens vesting each quarter. The vesting schedule would be as follows:
+
+```yaml
+Periods:
+- amount: 25stake, length: 7884000
+- amount: 25stake, length: 7884000
+- amount: 25stake, length: 7884000
+- amount: 25stake, length: 7884000
+```
+
+```text
+OV = 100
+DF = 0
+DV = 0
+BC = 100
+V = 100
+V' = 0
+```
+
+1. Immediately receives 1 coin
+
+ ```text
+ BC = 101
+ ```
+
+2. Vesting period 1 passes, 25 coins vest
+
+ ```text
+ V = 75
+ V' = 25
+ ```
+
+3. During vesting period 2, 5 coins are transferred and 5 coins are delegated
+
+ ```text
+ DV = 5
+ BC = 91
+ ```
+
+4. Vesting period 2 passes, 25 coins vest
+
+ ```text
+ V = 50
+ V' = 50
+ ```
+
+## Glossary
+
+* OriginalVesting: The amount of coins (per denomination) that are initially
+ part of a vesting account. These coins are set at genesis.
+* StartTime: The BFT time at which a vesting account starts to vest.
+* EndTime: The BFT time at which a vesting account is fully vested.
+* DelegatedFree: The tracked amount of coins (per denomination) that are
+ delegated from a vesting account that have been fully vested at time of delegation.
+* DelegatedVesting: The tracked amount of coins (per denomination) that are
+ delegated from a vesting account that were vesting at time of delegation.
+* ContinuousVestingAccount: A vesting account implementation that vests coins
+ linearly over time.
+* DelayedVestingAccount: A vesting account implementation that only fully vests
+ all coins at a given time.
+* PeriodicVestingAccount: A vesting account implementation that vests coins
+ according to a custom vesting schedule.
+* PermanentLockedAccount: It does not ever release coins, locking them indefinitely.
+ Coins in this account can still be used for delegating and for governance votes even while locked.
+
+## CLI
+
+A user can query and interact with the `vesting` module using the CLI.
+
+### Transactions
+
+The `tx` commands allow users to interact with the `vesting` module.
+
+```bash
+simd tx vesting --help
+```
+
+#### create-periodic-vesting-account
+
+The `create-periodic-vesting-account` command creates a new vesting account funded with an allocation of tokens, where a sequence of coins and period length in seconds. Periods are sequential, in that the duration of a period only starts at the end of the previous period. The duration of the first period starts upon account creation.
+
+```bash
+simd tx vesting create-periodic-vesting-account [to_address] [periods_json_file] [flags]
+```
+
+Example:
+
+```bash
+simd tx vesting create-periodic-vesting-account cosmos1.. periods.json
+```
+
+#### create-vesting-account
+
+The `create-vesting-account` command creates a new vesting account funded with an allocation of tokens. The account can either be a delayed or continuous vesting account, which is determined by the '--delayed' flag. All vesting accouts created will have their start time set by the committed block's time. The end\_time must be provided as a UNIX epoch timestamp.
+
+```bash
+simd tx vesting create-vesting-account [to_address] [amount] [end_time] [flags]
+```
+
+Example:
+
+```bash
+simd tx vesting create-vesting-account cosmos1.. 100stake 2592000
+```
diff --git a/sdk/next/build/modules/authz/README.mdx b/sdk/next/build/modules/authz/README.mdx
new file mode 100644
index 000000000..bbff45963
--- /dev/null
+++ b/sdk/next/build/modules/authz/README.mdx
@@ -0,0 +1,1341 @@
+---
+title: 'x/authz'
+---
+
+## Abstract
+
+`x/authz` is an implementation of a Cosmos SDK module, per [ADR 30](https://github.com/cosmos/cosmos-sdk/blob/main/docs/architecture/adr-030-authz-module.md), that allows
+granting arbitrary privileges from one account (the granter) to another account (the grantee). Authorizations must be granted for a particular Msg service method one by one using an implementation of the `Authorization` interface.
+
+## Contents
+
+* [Concepts](#concepts)
+ * [Authorization and Grant](#authorization-and-grant)
+ * [Built-in Authorizations](#built-in-authorizations)
+ * [Gas](#gas)
+* [State](#state)
+ * [Grant](#grant)
+ * [GrantQueue](#grantqueue)
+* [Messages](#messages)
+ * [MsgGrant](#msggrant)
+ * [MsgRevoke](#msgrevoke)
+ * [MsgExec](#msgexec)
+* [Events](#events)
+* [Client](#client)
+ * [CLI](#cli)
+ * [gRPC](#grpc)
+ * [REST](#rest)
+
+## Concepts
+
+### Authorization and Grant
+
+The `x/authz` module defines interfaces and messages grant authorizations to perform actions
+on behalf of one account to other accounts. The design is defined in the [ADR 030](https://github.com/cosmos/cosmos-sdk/blob/main/docs/architecture/adr-030-authz-module.md).
+
+A *grant* is an allowance to execute a Msg by the grantee on behalf of the granter.
+Authorization is an interface that must be implemented by a concrete authorization logic to validate and execute grants. Authorizations are extensible and can be defined for any Msg service method even outside of the module where the Msg method is defined. See the `SendAuthorization` example in the next section for more details.
+
+**Note:** The authz module is different from the [auth (authentication)](/sdk/v0.53/build/modules/auth/auth/) module that is responsible for specifying the base transaction and account types.
+
+```go expandable
+package authz
+
+import (
+
+ "github.com/cosmos/gogoproto/proto"
+
+ sdk "github.com/cosmos/cosmos-sdk/types"
+)
+
+// Authorization represents the interface of various Authorization types implemented
+// by other modules.
+type Authorization interface {
+ proto.Message
+
+ // MsgTypeURL returns the fully-qualified Msg service method URL (as described in ADR 031),
+ // which will process and accept or reject a request.
+ MsgTypeURL()
+
+string
+
+ // Accept determines whether this grant permits the provided sdk.Msg to be performed,
+ // and if so provides an upgraded authorization instance.
+ Accept(ctx sdk.Context, msg sdk.Msg) (AcceptResponse, error)
+
+ // ValidateBasic does a simple validation check that
+ // doesn't require access to any other information.
+ ValidateBasic()
+
+error
+}
+
+// AcceptResponse instruments the controller of an authz message if the request is accepted
+// and if it should be updated or deleted.
+type AcceptResponse struct {
+ // If Accept=true, the controller can accept and authorization and handle the update.
+ Accept bool
+ // If Delete=true, the controller must delete the authorization object and release
+ // storage resources.
+ Delete bool
+ // Controller, who is calling Authorization.Accept must check if `Updated != nil`. If yes,
+ // it must use the updated version and handle the update on the storage level.
+ Updated Authorization
+}
+```
+
+### Built-in Authorizations
+
+The Cosmos SDK `x/authz` module comes with following authorization types:
+
+#### GenericAuthorization
+
+`GenericAuthorization` implements the `Authorization` interface that gives unrestricted permission to execute the provided Msg on behalf of granter's account.
+
+```protobuf
+// Reference: https://github.com/cosmos/cosmos-sdk/blob/v0.47.0-rc1/proto/cosmos/authz/v1beta1/authz.proto#L14-L22
+```
+
+```go expandable
+package authz
+
+import (
+
+ sdk "github.com/cosmos/cosmos-sdk/types"
+)
+
+var _ Authorization = &GenericAuthorization{
+}
+
+// NewGenericAuthorization creates a new GenericAuthorization object.
+func NewGenericAuthorization(msgTypeURL string) *GenericAuthorization {
+ return &GenericAuthorization{
+ Msg: msgTypeURL,
+}
+}
+
+// MsgTypeURL implements Authorization.MsgTypeURL.
+func (a GenericAuthorization)
+
+MsgTypeURL()
+
+string {
+ return a.Msg
+}
+
+// Accept implements Authorization.Accept.
+func (a GenericAuthorization)
+
+Accept(ctx sdk.Context, msg sdk.Msg) (AcceptResponse, error) {
+ return AcceptResponse{
+ Accept: true
+}, nil
+}
+
+// ValidateBasic implements Authorization.ValidateBasic.
+func (a GenericAuthorization)
+
+ValidateBasic()
+
+error {
+ return nil
+}
+```
+
+* `msg` stores Msg type URL.
+
+#### SendAuthorization
+
+`SendAuthorization` implements the `Authorization` interface for the `cosmos.bank.v1beta1.MsgSend` Msg.
+
+* It takes a (positive) `SpendLimit` that specifies the maximum amount of tokens the grantee can spend. The `SpendLimit` is updated as the tokens are spent.
+* It takes an (optional) `AllowList` that specifies to which addresses a grantee can send token.
+
+```protobuf
+// Reference: https://github.com/cosmos/cosmos-sdk/blob/v0.47.0-rc1/proto/cosmos/bank/v1beta1/authz.proto#L11-L30
+```
+
+```go expandable
+package types
+
+import (
+
+ sdk "github.com/cosmos/cosmos-sdk/types"
+ sdkerrors "github.com/cosmos/cosmos-sdk/types/errors"
+ "github.com/cosmos/cosmos-sdk/x/authz"
+)
+
+// TODO: Revisit this once we have proper gas fee framework.
+// Ref: https://github.com/cosmos/cosmos-sdk/issues/9054
+// Ref: https://github.com/cosmos/cosmos-sdk/discussions/9072
+const gasCostPerIteration = uint64(10)
+
+var _ authz.Authorization = &SendAuthorization{
+}
+
+// NewSendAuthorization creates a new SendAuthorization object.
+func NewSendAuthorization(spendLimit sdk.Coins, allowed []sdk.AccAddress) *SendAuthorization {
+ return &SendAuthorization{
+ AllowList: toBech32Addresses(allowed),
+ SpendLimit: spendLimit,
+}
+}
+
+// MsgTypeURL implements Authorization.MsgTypeURL.
+func (a SendAuthorization)
+
+MsgTypeURL()
+
+string {
+ return sdk.MsgTypeURL(&MsgSend{
+})
+}
+
+// Accept implements Authorization.Accept.
+func (a SendAuthorization)
+
+Accept(ctx sdk.Context, msg sdk.Msg) (authz.AcceptResponse, error) {
+ mSend, ok := msg.(*MsgSend)
+ if !ok {
+ return authz.AcceptResponse{
+}, sdkerrors.ErrInvalidType.Wrap("type mismatch")
+}
+ toAddr := mSend.ToAddress
+
+ limitLeft, isNegative := a.SpendLimit.SafeSub(mSend.Amount...)
+ if isNegative {
+ return authz.AcceptResponse{
+}, sdkerrors.ErrInsufficientFunds.Wrapf("requested amount is more than spend limit")
+}
+ if limitLeft.IsZero() {
+ return authz.AcceptResponse{
+ Accept: true,
+ Delete: true
+}, nil
+}
+ isAddrExists := false
+ allowedList := a.GetAllowList()
+ for _, addr := range allowedList {
+ ctx.GasMeter().ConsumeGas(gasCostPerIteration, "send authorization")
+ if addr == toAddr {
+ isAddrExists = true
+ break
+}
+
+}
+ if len(allowedList) > 0 && !isAddrExists {
+ return authz.AcceptResponse{
+}, sdkerrors.ErrUnauthorized.Wrapf("cannot send to %s address", toAddr)
+}
+
+return authz.AcceptResponse{
+ Accept: true,
+ Delete: false,
+ Updated: &SendAuthorization{
+ SpendLimit: limitLeft,
+ AllowList: allowedList
+}}, nil
+}
+
+// ValidateBasic implements Authorization.ValidateBasic.
+func (a SendAuthorization)
+
+ValidateBasic()
+
+error {
+ if a.SpendLimit == nil {
+ return sdkerrors.ErrInvalidCoins.Wrap("spend limit cannot be nil")
+}
+ if !a.SpendLimit.IsAllPositive() {
+ return sdkerrors.ErrInvalidCoins.Wrapf("spend limit must be positive")
+}
+ found := make(map[string]bool, 0)
+ for i := 0; i < len(a.AllowList); i++ {
+ if found[a.AllowList[i]] {
+ return ErrDuplicateEntry
+}
+
+found[a.AllowList[i]] = true
+}
+
+return nil
+}
+
+func toBech32Addresses(allowed []sdk.AccAddress) []string {
+ if len(allowed) == 0 {
+ return nil
+}
+ allowedAddrs := make([]string, len(allowed))
+ for i, addr := range allowed {
+ allowedAddrs[i] = addr.String()
+}
+
+return allowedAddrs
+}
+```
+
+* `spend_limit` keeps track of how many coins are left in the authorization.
+* `allow_list` specifies an optional list of addresses to whom the grantee can send tokens on behalf of the granter.
+
+#### StakeAuthorization
+
+`StakeAuthorization` implements the `Authorization` interface for messages in the [staking module](/sdk/v0.53/build/modules/staking). It takes an `AuthorizationType` to specify whether you want to authorise delegating, undelegating or redelegating (i.e. these have to be authorised separately). It also takes an optional `MaxTokens` that keeps track of a limit to the amount of tokens that can be delegated/undelegated/redelegated. If left empty, the amount is unlimited. Additionally, this Msg takes an `AllowList` or a `DenyList`, which allows you to select which validators you allow or deny grantees to stake with.
+
+```protobuf
+// Reference: https://github.com/cosmos/cosmos-sdk/blob/v0.47.0-rc1/proto/cosmos/staking/v1beta1/authz.proto#L11-L35
+```
+
+```go expandable
+package types
+
+import (
+
+ sdk "github.com/cosmos/cosmos-sdk/types"
+ sdkerrors "github.com/cosmos/cosmos-sdk/types/errors"
+ "github.com/cosmos/cosmos-sdk/x/authz"
+)
+
+// TODO: Revisit this once we have proper gas fee framework.
+// Tracking issues https://github.com/cosmos/cosmos-sdk/issues/9054, https://github.com/cosmos/cosmos-sdk/discussions/9072
+const gasCostPerIteration = uint64(10)
+
+var _ authz.Authorization = &StakeAuthorization{
+}
+
+// NewStakeAuthorization creates a new StakeAuthorization object.
+func NewStakeAuthorization(allowed []sdk.ValAddress, denied []sdk.ValAddress, authzType AuthorizationType, amount *sdk.Coin) (*StakeAuthorization, error) {
+ allowedValidators, deniedValidators, err := validateAllowAndDenyValidators(allowed, denied)
+ if err != nil {
+ return nil, err
+}
+ a := StakeAuthorization{
+}
+ if allowedValidators != nil {
+ a.Validators = &StakeAuthorization_AllowList{
+ AllowList: &StakeAuthorization_Validators{
+ Address: allowedValidators
+}}
+
+}
+
+else {
+ a.Validators = &StakeAuthorization_DenyList{
+ DenyList: &StakeAuthorization_Validators{
+ Address: deniedValidators
+}}
+
+}
+ if amount != nil {
+ a.MaxTokens = amount
+}
+
+a.AuthorizationType = authzType
+
+ return &a, nil
+}
+
+// MsgTypeURL implements Authorization.MsgTypeURL.
+func (a StakeAuthorization)
+
+MsgTypeURL()
+
+string {
+ authzType, err := normalizeAuthzType(a.AuthorizationType)
+ if err != nil {
+ panic(err)
+}
+
+return authzType
+}
+
+func (a StakeAuthorization)
+
+ValidateBasic()
+
+error {
+ if a.MaxTokens != nil && a.MaxTokens.IsNegative() {
+ return sdkerrors.Wrapf(authz.ErrNegativeMaxTokens, "negative coin amount: %v", a.MaxTokens)
+}
+ if a.AuthorizationType == AuthorizationType_AUTHORIZATION_TYPE_UNSPECIFIED {
+ return authz.ErrUnknownAuthorizationType
+}
+
+return nil
+}
+
+// Accept implements Authorization.Accept.
+func (a StakeAuthorization)
+
+Accept(ctx sdk.Context, msg sdk.Msg) (authz.AcceptResponse, error) {
+ var validatorAddress string
+ var amount sdk.Coin
+ switch msg := msg.(type) {
+ case *MsgDelegate:
+ validatorAddress = msg.ValidatorAddress
+ amount = msg.Amount
+ case *MsgUndelegate:
+ validatorAddress = msg.ValidatorAddress
+ amount = msg.Amount
+ case *MsgBeginRedelegate:
+ validatorAddress = msg.ValidatorDstAddress
+ amount = msg.Amount
+ default:
+ return authz.AcceptResponse{
+}, sdkerrors.ErrInvalidRequest.Wrap("unknown msg type")
+}
+ isValidatorExists := false
+ allowedList := a.GetAllowList().GetAddress()
+ for _, validator := range allowedList {
+ ctx.GasMeter().ConsumeGas(gasCostPerIteration, "stake authorization")
+ if validator == validatorAddress {
+ isValidatorExists = true
+ break
+}
+
+}
+ denyList := a.GetDenyList().GetAddress()
+ for _, validator := range denyList {
+ ctx.GasMeter().ConsumeGas(gasCostPerIteration, "stake authorization")
+ if validator == validatorAddress {
+ return authz.AcceptResponse{
+}, sdkerrors.ErrUnauthorized.Wrapf("cannot delegate/undelegate to %s validator", validator)
+}
+
+}
+ if len(allowedList) > 0 && !isValidatorExists {
+ return authz.AcceptResponse{
+}, sdkerrors.ErrUnauthorized.Wrapf("cannot delegate/undelegate to %s validator", validatorAddress)
+}
+ if a.MaxTokens == nil {
+ return authz.AcceptResponse{
+ Accept: true,
+ Delete: false,
+ Updated: &StakeAuthorization{
+ Validators: a.GetValidators(),
+ AuthorizationType: a.GetAuthorizationType()
+},
+}, nil
+}
+
+limitLeft, err := a.MaxTokens.SafeSub(amount)
+ if err != nil {
+ return authz.AcceptResponse{
+}, err
+}
+ if limitLeft.IsZero() {
+ return authz.AcceptResponse{
+ Accept: true,
+ Delete: true
+}, nil
+}
+
+return authz.AcceptResponse{
+ Accept: true,
+ Delete: false,
+ Updated: &StakeAuthorization{
+ Validators: a.GetValidators(),
+ AuthorizationType: a.GetAuthorizationType(),
+ MaxTokens: &limitLeft
+},
+}, nil
+}
+
+func validateAllowAndDenyValidators(allowed []sdk.ValAddress, denied []sdk.ValAddress) ([]string, []string, error) {
+ if len(allowed) == 0 && len(denied) == 0 {
+ return nil, nil, sdkerrors.ErrInvalidRequest.Wrap("both allowed & deny list cannot be empty")
+}
+ if len(allowed) > 0 && len(denied) > 0 {
+ return nil, nil, sdkerrors.ErrInvalidRequest.Wrap("cannot set both allowed & deny list")
+}
+ allowedValidators := make([]string, len(allowed))
+ if len(allowed) > 0 {
+ for i, validator := range allowed {
+ allowedValidators[i] = validator.String()
+}
+
+return allowedValidators, nil, nil
+}
+ deniedValidators := make([]string, len(denied))
+ for i, validator := range denied {
+ deniedValidators[i] = validator.String()
+}
+
+return nil, deniedValidators, nil
+}
+
+// Normalized Msg type URLs
+func normalizeAuthzType(authzType AuthorizationType) (string, error) {
+ switch authzType {
+ case AuthorizationType_AUTHORIZATION_TYPE_DELEGATE:
+ return sdk.MsgTypeURL(&MsgDelegate{
+}), nil
+ case AuthorizationType_AUTHORIZATION_TYPE_UNDELEGATE:
+ return sdk.MsgTypeURL(&MsgUndelegate{
+}), nil
+ case AuthorizationType_AUTHORIZATION_TYPE_REDELEGATE:
+ return sdk.MsgTypeURL(&MsgBeginRedelegate{
+}), nil
+ default:
+ return "", sdkerrors.Wrapf(authz.ErrUnknownAuthorizationType, "cannot normalize authz type with %T", authzType)
+}
+}
+```
+
+### Gas
+
+In order to prevent DoS attacks, granting `StakeAuthorization`s with `x/authz` incurs gas. `StakeAuthorization` allows you to authorize another account to delegate, undelegate, or redelegate to validators. The authorizer can define a list of validators they allow or deny delegations to. The Cosmos SDK iterates over these lists and charge 10 gas for each validator in both of the lists.
+
+Since the state maintains a list for granter, grantee pair with the same expiration, we are iterating over the list to remove the grant (in case of any revoke of a particular `msgType`) from the list and we are charging 20 gas per iteration.
+
+## State
+
+### Grant
+
+Grants are identified by combining granter address (the address bytes of the granter), grantee address (the address bytes of the grantee) and Authorization type (its type URL). Hence we only allow one grant for the (granter, grantee, Authorization) triple.
+
+* Grant: `0x01 | granter_address_len (1 byte) | granter_address_bytes | grantee_address_len (1 byte) | grantee_address_bytes | msgType_bytes -> ProtocolBuffer(AuthorizationGrant)`
+
+The grant object encapsulates an `Authorization` type and an expiration timestamp:
+
+```protobuf
+// Reference: https://github.com/cosmos/cosmos-sdk/blob/v0.47.0-rc1/proto/cosmos/authz/v1beta1/authz.proto#L24-L32
+```
+
+### GrantQueue
+
+We are maintaining a queue for authz pruning. Whenever a grant is created, an item will be added to `GrantQueue` with a key of expiration, granter, grantee.
+
+In `EndBlock` (which runs for every block) we continuously check and prune the expired grants by forming a prefix key with current blocktime that passed the stored expiration in `GrantQueue`, we iterate through all the matched records from `GrantQueue` and delete them from the `GrantQueue` & `Grant`s store.
+
+```go expandable
+package keeper
+
+import (
+
+ "fmt"
+ "strconv"
+ "time"
+ "github.com/cosmos/gogoproto/proto"
+ abci "github.com/tendermint/tendermint/abci/types"
+ "github.com/tendermint/tendermint/libs/log"
+ "github.com/cosmos/cosmos-sdk/baseapp"
+ "github.com/cosmos/cosmos-sdk/codec"
+ codectypes "github.com/cosmos/cosmos-sdk/codec/types"
+ storetypes "github.com/cosmos/cosmos-sdk/store/types"
+ sdk "github.com/cosmos/cosmos-sdk/types"
+ sdkerrors "github.com/cosmos/cosmos-sdk/types/errors"
+ "github.com/cosmos/cosmos-sdk/x/authz"
+)
+
+// TODO: Revisit this once we have proper gas fee framework.
+// Tracking issues https://github.com/cosmos/cosmos-sdk/issues/9054,
+// https://github.com/cosmos/cosmos-sdk/discussions/9072
+const gasCostPerIteration = uint64(20)
+
+type Keeper struct {
+ storeKey storetypes.StoreKey
+ cdc codec.BinaryCodec
+ router *baseapp.MsgServiceRouter
+ authKeeper authz.AccountKeeper
+}
+
+// NewKeeper constructs a message authorization Keeper
+func NewKeeper(storeKey storetypes.StoreKey, cdc codec.BinaryCodec, router *baseapp.MsgServiceRouter, ak authz.AccountKeeper)
+
+Keeper {
+ return Keeper{
+ storeKey: storeKey,
+ cdc: cdc,
+ router: router,
+ authKeeper: ak,
+}
+}
+
+// Logger returns a module-specific logger.
+func (k Keeper)
+
+Logger(ctx sdk.Context)
+
+log.Logger {
+ return ctx.Logger().With("module", fmt.Sprintf("x/%s", authz.ModuleName))
+}
+
+// getGrant returns grant stored at skey.
+func (k Keeper)
+
+getGrant(ctx sdk.Context, skey []byte) (grant authz.Grant, found bool) {
+ store := ctx.KVStore(k.storeKey)
+ bz := store.Get(skey)
+ if bz == nil {
+ return grant, false
+}
+
+k.cdc.MustUnmarshal(bz, &grant)
+
+return grant, true
+}
+
+func (k Keeper)
+
+update(ctx sdk.Context, grantee sdk.AccAddress, granter sdk.AccAddress, updated authz.Authorization)
+
+error {
+ skey := grantStoreKey(grantee, granter, updated.MsgTypeURL())
+
+grant, found := k.getGrant(ctx, skey)
+ if !found {
+ return authz.ErrNoAuthorizationFound
+}
+
+msg, ok := updated.(proto.Message)
+ if !ok {
+ return sdkerrors.ErrPackAny.Wrapf("cannot proto marshal %T", updated)
+}
+
+any, err := codectypes.NewAnyWithValue(msg)
+ if err != nil {
+ return err
+}
+
+grant.Authorization = any
+ store := ctx.KVStore(k.storeKey)
+
+store.Set(skey, k.cdc.MustMarshal(&grant))
+
+return nil
+}
+
+// DispatchActions attempts to execute the provided messages via authorization
+// grants from the message signer to the grantee.
+func (k Keeper)
+
+DispatchActions(ctx sdk.Context, grantee sdk.AccAddress, msgs []sdk.Msg) ([][]byte, error) {
+ results := make([][]byte, len(msgs))
+ now := ctx.BlockTime()
+ for i, msg := range msgs {
+ signers := msg.GetSigners()
+ if len(signers) != 1 {
+ return nil, authz.ErrAuthorizationNumOfSigners
+}
+ granter := signers[0]
+
+ // If granter != grantee then check authorization.Accept, otherwise we
+ // implicitly accept.
+ if !granter.Equals(grantee) {
+ skey := grantStoreKey(grantee, granter, sdk.MsgTypeURL(msg))
+
+grant, found := k.getGrant(ctx, skey)
+ if !found {
+ return nil, sdkerrors.Wrapf(authz.ErrNoAuthorizationFound, "failed to update grant with key %s", string(skey))
+}
+ if grant.Expiration != nil && grant.Expiration.Before(now) {
+ return nil, authz.ErrAuthorizationExpired
+}
+
+authorization, err := grant.GetAuthorization()
+ if err != nil {
+ return nil, err
+}
+
+resp, err := authorization.Accept(ctx, msg)
+ if err != nil {
+ return nil, err
+}
+ if resp.Delete {
+ err = k.DeleteGrant(ctx, grantee, granter, sdk.MsgTypeURL(msg))
+}
+
+else if resp.Updated != nil {
+ err = k.update(ctx, grantee, granter, resp.Updated)
+}
+ if err != nil {
+ return nil, err
+}
+ if !resp.Accept {
+ return nil, sdkerrors.ErrUnauthorized
+}
+
+}
+ handler := k.router.Handler(msg)
+ if handler == nil {
+ return nil, sdkerrors.ErrUnknownRequest.Wrapf("unrecognized message route: %s", sdk.MsgTypeURL(msg))
+}
+
+msgResp, err := handler(ctx, msg)
+ if err != nil {
+ return nil, sdkerrors.Wrapf(err, "failed to execute message; message %v", msg)
+}
+
+results[i] = msgResp.Data
+
+ // emit the events from the dispatched actions
+ events := msgResp.Events
+ sdkEvents := make([]sdk.Event, 0, len(events))
+ for _, event := range events {
+ e := event
+ e.Attributes = append(e.Attributes, abci.EventAttribute{
+ Key: "authz_msg_index",
+ Value: strconv.Itoa(i)
+})
+
+sdkEvents = append(sdkEvents, sdk.Event(e))
+}
+
+ctx.EventManager().EmitEvents(sdkEvents)
+}
+
+return results, nil
+}
+
+// SaveGrant method grants the provided authorization to the grantee on the granter's account
+// with the provided expiration time and insert authorization key into the grants queue. If there is an existing authorization grant for the
+// same `sdk.Msg` type, this grant overwrites that.
+func (k Keeper)
+
+SaveGrant(ctx sdk.Context, grantee, granter sdk.AccAddress, authorization authz.Authorization, expiration *time.Time)
+
+error {
+ store := ctx.KVStore(k.storeKey)
+ msgType := authorization.MsgTypeURL()
+ skey := grantStoreKey(grantee, granter, msgType)
+
+grant, err := authz.NewGrant(ctx.BlockTime(), authorization, expiration)
+ if err != nil {
+ return err
+}
+
+var oldExp *time.Time
+ if oldGrant, found := k.getGrant(ctx, skey); found {
+ oldExp = oldGrant.Expiration
+}
+ if oldExp != nil && (expiration == nil || !oldExp.Equal(*expiration)) {
+ if err = k.removeFromGrantQueue(ctx, skey, granter, grantee, *oldExp); err != nil {
+ return err
+}
+
+}
+
+ // If the expiration didn't change, then we don't remove it and we should not insert again
+ if expiration != nil && (oldExp == nil || !oldExp.Equal(*expiration)) {
+ if err = k.insertIntoGrantQueue(ctx, granter, grantee, msgType, *expiration); err != nil {
+ return err
+}
+
+}
+ bz := k.cdc.MustMarshal(&grant)
+
+store.Set(skey, bz)
+
+return ctx.EventManager().EmitTypedEvent(&authz.EventGrant{
+ MsgTypeUrl: authorization.MsgTypeURL(),
+ Granter: granter.String(),
+ Grantee: grantee.String(),
+})
+}
+
+// DeleteGrant revokes any authorization for the provided message type granted to the grantee
+// by the granter.
+func (k Keeper)
+
+DeleteGrant(ctx sdk.Context, grantee sdk.AccAddress, granter sdk.AccAddress, msgType string)
+
+error {
+ store := ctx.KVStore(k.storeKey)
+ skey := grantStoreKey(grantee, granter, msgType)
+
+grant, found := k.getGrant(ctx, skey)
+ if !found {
+ return sdkerrors.Wrapf(authz.ErrNoAuthorizationFound, "failed to delete grant with key %s", string(skey))
+}
+ if grant.Expiration != nil {
+ err := k.removeFromGrantQueue(ctx, skey, granter, grantee, *grant.Expiration)
+ if err != nil {
+ return err
+}
+
+}
+
+store.Delete(skey)
+
+return ctx.EventManager().EmitTypedEvent(&authz.EventRevoke{
+ MsgTypeUrl: msgType,
+ Granter: granter.String(),
+ Grantee: grantee.String(),
+})
+}
+
+// GetAuthorizations Returns list of `Authorizations` granted to the grantee by the granter.
+func (k Keeper)
+
+GetAuthorizations(ctx sdk.Context, grantee sdk.AccAddress, granter sdk.AccAddress) ([]authz.Authorization, error) {
+ store := ctx.KVStore(k.storeKey)
+ key := grantStoreKey(grantee, granter, "")
+ iter := sdk.KVStorePrefixIterator(store, key)
+
+defer iter.Close()
+
+var authorization authz.Grant
+ var authorizations []authz.Authorization
+ for ; iter.Valid(); iter.Next() {
+ if err := k.cdc.Unmarshal(iter.Value(), &authorization); err != nil {
+ return nil, err
+}
+
+a, err := authorization.GetAuthorization()
+ if err != nil {
+ return nil, err
+}
+
+authorizations = append(authorizations, a)
+}
+
+return authorizations, nil
+}
+
+// GetAuthorization returns an Authorization and it's expiration time.
+// A nil Authorization is returned under the following circumstances:
+// - No grant is found.
+// - A grant is found, but it is expired.
+// - There was an error getting the authorization from the grant.
+func (k Keeper)
+
+GetAuthorization(ctx sdk.Context, grantee sdk.AccAddress, granter sdk.AccAddress, msgType string) (authz.Authorization, *time.Time) {
+ grant, found := k.getGrant(ctx, grantStoreKey(grantee, granter, msgType))
+ if !found || (grant.Expiration != nil && grant.Expiration.Before(ctx.BlockHeader().Time)) {
+ return nil, nil
+}
+
+auth, err := grant.GetAuthorization()
+ if err != nil {
+ return nil, nil
+}
+
+return auth, grant.Expiration
+}
+
+// IterateGrants iterates over all authorization grants
+// This function should be used with caution because it can involve significant IO operations.
+// It should not be used in query or msg services without charging additional gas.
+// The iteration stops when the handler function returns true or the iterator exhaust.
+func (k Keeper)
+
+IterateGrants(ctx sdk.Context,
+ handler func(granterAddr sdk.AccAddress, granteeAddr sdk.AccAddress, grant authz.Grant)
+
+bool,
+) {
+ store := ctx.KVStore(k.storeKey)
+ iter := sdk.KVStorePrefixIterator(store, GrantKey)
+
+defer iter.Close()
+ for ; iter.Valid(); iter.Next() {
+ var grant authz.Grant
+ granterAddr, granteeAddr, _ := parseGrantStoreKey(iter.Key())
+
+k.cdc.MustUnmarshal(iter.Value(), &grant)
+ if handler(granterAddr, granteeAddr, grant) {
+ break
+}
+
+}
+}
+
+func (k Keeper)
+
+getGrantQueueItem(ctx sdk.Context, expiration time.Time, granter, grantee sdk.AccAddress) (*authz.GrantQueueItem, error) {
+ store := ctx.KVStore(k.storeKey)
+ bz := store.Get(GrantQueueKey(expiration, granter, grantee))
+ if bz == nil {
+ return &authz.GrantQueueItem{
+}, nil
+}
+
+var queueItems authz.GrantQueueItem
+ if err := k.cdc.Unmarshal(bz, &queueItems); err != nil {
+ return nil, err
+}
+
+return &queueItems, nil
+}
+
+func (k Keeper)
+
+setGrantQueueItem(ctx sdk.Context, expiration time.Time,
+ granter sdk.AccAddress, grantee sdk.AccAddress, queueItems *authz.GrantQueueItem,
+)
+
+error {
+ store := ctx.KVStore(k.storeKey)
+
+bz, err := k.cdc.Marshal(queueItems)
+ if err != nil {
+ return err
+}
+
+store.Set(GrantQueueKey(expiration, granter, grantee), bz)
+
+return nil
+}
+
+// insertIntoGrantQueue inserts a grant key into the grant queue
+func (k Keeper)
+
+insertIntoGrantQueue(ctx sdk.Context, granter, grantee sdk.AccAddress, msgType string, expiration time.Time)
+
+error {
+ queueItems, err := k.getGrantQueueItem(ctx, expiration, granter, grantee)
+ if err != nil {
+ return err
+}
+ if len(queueItems.MsgTypeUrls) == 0 {
+ k.setGrantQueueItem(ctx, expiration, granter, grantee, &authz.GrantQueueItem{
+ MsgTypeUrls: []string{
+ msgType
+},
+})
+}
+
+else {
+ queueItems.MsgTypeUrls = append(queueItems.MsgTypeUrls, msgType)
+
+k.setGrantQueueItem(ctx, expiration, granter, grantee, queueItems)
+}
+
+return nil
+}
+
+// removeFromGrantQueue removes a grant key from the grant queue
+func (k Keeper)
+
+removeFromGrantQueue(ctx sdk.Context, grantKey []byte, granter, grantee sdk.AccAddress, expiration time.Time)
+
+error {
+ store := ctx.KVStore(k.storeKey)
+ key := GrantQueueKey(expiration, granter, grantee)
+ bz := store.Get(key)
+ if bz == nil {
+ return sdkerrors.Wrap(authz.ErrNoGrantKeyFound, "can't remove grant from the expire queue, grant key not found")
+}
+
+var queueItem authz.GrantQueueItem
+ if err := k.cdc.Unmarshal(bz, &queueItem); err != nil {
+ return err
+}
+
+ _, _, msgType := parseGrantStoreKey(grantKey)
+ queueItems := queueItem.MsgTypeUrls
+ for index, typeURL := range queueItems {
+ ctx.GasMeter().ConsumeGas(gasCostPerIteration, "grant queue")
+ if typeURL == msgType {
+ end := len(queueItem.MsgTypeUrls) - 1
+ queueItems[index] = queueItems[end]
+ queueItems = queueItems[:end]
+ if err := k.setGrantQueueItem(ctx, expiration, granter, grantee, &authz.GrantQueueItem{
+ MsgTypeUrls: queueItems,
+}); err != nil {
+ return err
+}
+
+break
+}
+
+}
+
+return nil
+}
+
+// DequeueAndDeleteExpiredGrants deletes expired grants from the state and grant queue.
+func (k Keeper)
+
+DequeueAndDeleteExpiredGrants(ctx sdk.Context)
+
+error {
+ store := ctx.KVStore(k.storeKey)
+ iterator := store.Iterator(GrantQueuePrefix, sdk.InclusiveEndBytes(GrantQueueTimePrefix(ctx.BlockTime())))
+
+defer iterator.Close()
+ for ; iterator.Valid(); iterator.Next() {
+ var queueItem authz.GrantQueueItem
+ if err := k.cdc.Unmarshal(iterator.Value(), &queueItem); err != nil {
+ return err
+}
+
+ _, granter, grantee, err := parseGrantQueueKey(iterator.Key())
+ if err != nil {
+ return err
+}
+
+store.Delete(iterator.Key())
+ for _, typeURL := range queueItem.MsgTypeUrls {
+ store.Delete(grantStoreKey(grantee, granter, typeURL))
+}
+
+}
+
+return nil
+}
+```
+
+* GrantQueue: `0x02 | expiration_bytes | granter_address_len (1 byte) | granter_address_bytes | grantee_address_len (1 byte) | grantee_address_bytes -> ProtocalBuffer(GrantQueueItem)`
+
+The `expiration_bytes` are the expiration date in UTC with the format `"2006-01-02T15:04:05.000000000"`.
+
+```go expandable
+package keeper
+
+import (
+
+ "time"
+ "github.com/cosmos/cosmos-sdk/internal/conv"
+ sdk "github.com/cosmos/cosmos-sdk/types"
+ "github.com/cosmos/cosmos-sdk/types/address"
+ "github.com/cosmos/cosmos-sdk/types/kv"
+ "github.com/cosmos/cosmos-sdk/x/authz"
+)
+
+// Keys for store prefixes
+// Items are stored with the following key: values
+//
+// - 0x01: Grant
+// - 0x02: GrantQueueItem
+var (
+ GrantKey = []byte{0x01
+} // prefix for each key
+ GrantQueuePrefix = []byte{0x02
+}
+)
+
+var lenTime = len(sdk.FormatTimeBytes(time.Now()))
+
+// StoreKey is the store key string for authz
+const StoreKey = authz.ModuleName
+
+// grantStoreKey - return authorization store key
+// Items are stored with the following key: values
+//
+// - 0x01: Grant
+func grantStoreKey(grantee sdk.AccAddress, granter sdk.AccAddress, msgType string) []byte {
+ m := conv.UnsafeStrToBytes(msgType)
+
+granter = address.MustLengthPrefix(granter)
+
+grantee = address.MustLengthPrefix(grantee)
+ key := sdk.AppendLengthPrefixedBytes(GrantKey, granter, grantee, m)
+
+return key
+}
+
+// parseGrantStoreKey - split granter, grantee address and msg type from the authorization key
+func parseGrantStoreKey(key []byte) (granterAddr, granteeAddr sdk.AccAddress, msgType string) {
+ // key is of format:
+ // 0x01
+
+ granterAddrLen, granterAddrLenEndIndex := sdk.ParseLengthPrefixedBytes(key, 1, 1) // ignore key[0] since it is a prefix key
+ granterAddr, granterAddrEndIndex := sdk.ParseLengthPrefixedBytes(key, granterAddrLenEndIndex+1, int(granterAddrLen[0]))
+
+granteeAddrLen, granteeAddrLenEndIndex := sdk.ParseLengthPrefixedBytes(key, granterAddrEndIndex+1, 1)
+
+granteeAddr, granteeAddrEndIndex := sdk.ParseLengthPrefixedBytes(key, granteeAddrLenEndIndex+1, int(granteeAddrLen[0]))
+
+kv.AssertKeyAtLeastLength(key, granteeAddrEndIndex+1)
+
+return granterAddr, granteeAddr, conv.UnsafeBytesToStr(key[(granteeAddrEndIndex + 1):])
+}
+
+// parseGrantQueueKey split expiration time, granter and grantee from the grant queue key
+func parseGrantQueueKey(key []byte) (time.Time, sdk.AccAddress, sdk.AccAddress, error) {
+ // key is of format:
+ // 0x02
+
+ expBytes, expEndIndex := sdk.ParseLengthPrefixedBytes(key, 1, lenTime)
+
+exp, err := sdk.ParseTimeBytes(expBytes)
+ if err != nil {
+ return exp, nil, nil, err
+}
+
+granterAddrLen, granterAddrLenEndIndex := sdk.ParseLengthPrefixedBytes(key, expEndIndex+1, 1)
+
+granter, granterEndIndex := sdk.ParseLengthPrefixedBytes(key, granterAddrLenEndIndex+1, int(granterAddrLen[0]))
+
+granteeAddrLen, granteeAddrLenEndIndex := sdk.ParseLengthPrefixedBytes(key, granterEndIndex+1, 1)
+
+grantee, _ := sdk.ParseLengthPrefixedBytes(key, granteeAddrLenEndIndex+1, int(granteeAddrLen[0]))
+
+return exp, granter, grantee, nil
+}
+
+// GrantQueueKey - return grant queue store key. If a given grant doesn't have a defined
+// expiration, then it should not be used in the pruning queue.
+// Key format is:
+//
+// 0x02: GrantQueueItem
+func GrantQueueKey(expiration time.Time, granter sdk.AccAddress, grantee sdk.AccAddress) []byte {
+ exp := sdk.FormatTimeBytes(expiration)
+
+granter = address.MustLengthPrefix(granter)
+
+grantee = address.MustLengthPrefix(grantee)
+
+return sdk.AppendLengthPrefixedBytes(GrantQueuePrefix, exp, granter, grantee)
+}
+
+// GrantQueueTimePrefix - return grant queue time prefix
+func GrantQueueTimePrefix(expiration time.Time) []byte {
+ return append(GrantQueuePrefix, sdk.FormatTimeBytes(expiration)...)
+}
+
+// firstAddressFromGrantStoreKey parses the first address only
+func firstAddressFromGrantStoreKey(key []byte)
+
+sdk.AccAddress {
+ addrLen := key[0]
+ return sdk.AccAddress(key[1 : 1+addrLen])
+}
+```
+
+The `GrantQueueItem` object contains the list of type urls between granter and grantee that expire at the time indicated in the key.
+
+## Messages
+
+In this section we describe the processing of messages for the authz module.
+
+### MsgGrant
+
+An authorization grant is created using the `MsgGrant` message.
+If there is already a grant for the `(granter, grantee, Authorization)` triple, then the new grant overwrites the previous one. To update or extend an existing grant, a new grant with the same `(granter, grantee, Authorization)` triple should be created.
+
+```protobuf
+// Reference: https://github.com/cosmos/cosmos-sdk/blob/v0.47.0-rc1/proto/cosmos/authz/v1beta1/tx.proto#L35-L45
+```
+
+The message handling should fail if:
+
+* both granter and grantee have the same address.
+* provided `Expiration` time is less than current unix timestamp (but a grant will be created if no `expiration` time is provided since `expiration` is optional).
+* provided `Grant.Authorization` is not implemented.
+* `Authorization.MsgTypeURL()` is not defined in the router (there is no defined handler in the app router to handle that Msg types).
+
+### MsgRevoke
+
+A grant can be removed with the `MsgRevoke` message.
+
+```protobuf
+// Reference: https://github.com/cosmos/cosmos-sdk/blob/v0.47.0-rc1/proto/cosmos/authz/v1beta1/tx.proto#L69-L78
+```
+
+The message handling should fail if:
+
+* both granter and grantee have the same address.
+* provided `MsgTypeUrl` is empty.
+
+NOTE: The `MsgExec` message removes a grant if the grant has expired.
+
+### MsgExec
+
+When a grantee wants to execute a transaction on behalf of a granter, they must send `MsgExec`.
+
+```protobuf
+// Reference: https://github.com/cosmos/cosmos-sdk/blob/v0.47.0-rc1/proto/cosmos/authz/v1beta1/tx.proto#L52-L63
+```
+
+The message handling should fail if:
+
+* provided `Authorization` is not implemented.
+* grantee doesn't have permission to run the transaction.
+* if granted authorization is expired.
+
+## Events
+
+The authz module emits proto events defined in [the Protobuf reference](https://buf.build/cosmos/cosmos-sdk/docs/main/cosmos.authz.v1beta1#cosmos.authz.v1beta1.EventGrant).
+
+## Client
+
+### CLI
+
+A user can query and interact with the `authz` module using the CLI.
+
+#### Query
+
+The `query` commands allow users to query `authz` state.
+
+```bash
+simd query authz --help
+```
+
+##### grants
+
+The `grants` command allows users to query grants for a granter-grantee pair. If the message type URL is set, it selects grants only for that message type.
+
+```bash
+simd query authz grants [granter-addr] [grantee-addr] [msg-type-url]? [flags]
+```
+
+Example:
+
+```bash
+simd query authz grants cosmos1.. cosmos1.. /cosmos.bank.v1beta1.MsgSend
+```
+
+Example Output:
+
+```bash
+grants:
+- authorization:
+ '@type': /cosmos.bank.v1beta1.SendAuthorization
+ spend_limit:
+ - amount: "100"
+ denom: stake
+ expiration: "2022-01-01T00:00:00Z"
+pagination: null
+```
+
+#### Transactions
+
+The `tx` commands allow users to interact with the `authz` module.
+
+```bash
+simd tx authz --help
+```
+
+##### exec
+
+The `exec` command allows a grantee to execute a transaction on behalf of granter.
+
+```bash
+ simd tx authz exec [tx-json-file] --from [grantee] [flags]
+```
+
+Example:
+
+```bash
+simd tx authz exec tx.json --from=cosmos1..
+```
+
+##### grant
+
+The `grant` command allows a granter to grant an authorization to a grantee.
+
+```bash
+simd tx authz grant --from [flags]
+```
+
+* The `send` authorization\_type refers to the built-in `SendAuthorization` type. The custom flags available are `spend-limit` (required) and `allow-list` (optional) , documented [here](#SendAuthorization)
+
+Example:
+
+```bash
+ simd tx authz grant cosmos1.. send --spend-limit=100stake --allow-list=cosmos1...,cosmos2... --from=cosmos1..
+```
+
+* The `generic` authorization\_type refers to the built-in `GenericAuthorization` type. The custom flag available is `msg-type` ( required) documented [here](#GenericAuthorization).
+
+> Note: `msg-type` is any valid Cosmos SDK `Msg` type url.
+
+Example:
+
+```bash
+ simd tx authz grant cosmos1.. generic --msg-type=/cosmos.bank.v1beta1.MsgSend --from=cosmos1..
+```
+
+* The `delegate`,`unbond`,`redelegate` authorization\_types refer to the built-in `StakeAuthorization` type. The custom flags available are `spend-limit` (optional), `allowed-validators` (optional) and `deny-validators` (optional) documented [here](#StakeAuthorization).
+
+> Note: `allowed-validators` and `deny-validators` cannot both be empty. `spend-limit` represents the `MaxTokens`
+
+Example:
+
+```bash
+simd tx authz grant cosmos1.. delegate --spend-limit=100stake --allowed-validators=cosmos...,cosmos... --deny-validators=cosmos... --from=cosmos1..
+```
+
+##### revoke
+
+The `revoke` command allows a granter to revoke an authorization from a grantee.
+
+```bash
+simd tx authz revoke [grantee] [msg-type-url] --from=[granter] [flags]
+```
+
+Example:
+
+```bash
+simd tx authz revoke cosmos1.. /cosmos.bank.v1beta1.MsgSend --from=cosmos1..
+```
+
+### gRPC
+
+A user can query the `authz` module using gRPC endpoints.
+
+#### Grants
+
+The `Grants` endpoint allows users to query grants for a granter-grantee pair. If the message type URL is set, it selects grants only for that message type.
+
+```bash
+cosmos.authz.v1beta1.Query/Grants
+```
+
+Example:
+
+```bash
+grpcurl -plaintext \
+ -d '{"granter":"cosmos1..","grantee":"cosmos1..","msg_type_url":"/cosmos.bank.v1beta1.MsgSend"}' \
+ localhost:9090 \
+ cosmos.authz.v1beta1.Query/Grants
+```
+
+Example Output:
+
+```bash expandable
+{
+ "grants": [
+ {
+ "authorization": {
+ "@type": "/cosmos.bank.v1beta1.SendAuthorization",
+ "spendLimit": [
+ {
+ "denom":"stake",
+ "amount":"100"
+ }
+ ]
+ },
+ "expiration": "2022-01-01T00:00:00Z"
+ }
+ ]
+}
+```
+
+### REST
+
+A user can query the `authz` module using REST endpoints.
+
+```bash
+/cosmos/authz/v1beta1/grants
+```
+
+Example:
+
+```bash
+curl "localhost:1317/cosmos/authz/v1beta1/grants?granter=cosmos1..&grantee=cosmos1..&msg_type_url=/cosmos.bank.v1beta1.MsgSend"
+```
+
+Example Output:
+
+```bash expandable
+{
+ "grants": [
+ {
+ "authorization": {
+ "@type": "/cosmos.bank.v1beta1.SendAuthorization",
+ "spend_limit": [
+ {
+ "denom": "stake",
+ "amount": "100"
+ }
+ ]
+ },
+ "expiration": "2022-01-01T00:00:00Z"
+ }
+ ],
+ "pagination": null
+}
+```
diff --git a/sdk/next/build/modules/bank/README.mdx b/sdk/next/build/modules/bank/README.mdx
new file mode 100644
index 000000000..8803705c3
--- /dev/null
+++ b/sdk/next/build/modules/bank/README.mdx
@@ -0,0 +1,1137 @@
+---
+title: 'x/bank'
+description: This document specifies the bank module of the Cosmos SDK.
+---
+
+## Abstract
+
+This document specifies the bank module of the Cosmos SDK.
+
+The bank module is responsible for handling multi-asset coin transfers between
+accounts and tracking special-case pseudo-transfers which must work differently
+with particular kinds of accounts (notably delegating/undelegating for vesting
+accounts). It exposes several interfaces with varying capabilities for secure
+interaction with other modules which must alter user balances.
+
+In addition, the bank module tracks and provides query support for the total
+supply of all assets used in the application.
+
+This module is used in the Cosmos Hub.
+
+## Contents
+
+* [Supply](#supply)
+ * [Total Supply](#total-supply)
+* [Module Accounts](#module-accounts)
+ * [Permissions](#permissions)
+* [State](#state)
+* [Params](#params)
+* [Keepers](#keepers)
+* [Messages](#messages)
+* [Events](#events)
+ * [Message Events](#message-events)
+ * [Keeper Events](#keeper-events)
+* [Parameters](#parameters)
+ * [SendEnabled](#sendenabled)
+ * [DefaultSendEnabled](#defaultsendenabled)
+* [Client](#client)
+ * [CLI](#cli)
+ * [Query](#query)
+ * [Transactions](#transactions)
+* [gRPC](#grpc)
+
+## Supply
+
+The `supply` functionality:
+
+* passively tracks the total supply of coins within a chain,
+* provides a pattern for modules to hold/interact with `Coins`, and
+* introduces the invariant check to verify a chain's total supply.
+
+### Total Supply
+
+The total `Supply` of the network is equal to the sum of all coins from the
+account. The total supply is updated every time a `Coin` is minted (eg: as part
+of the inflation mechanism) or burned (eg: due to slashing or if a governance
+proposal is vetoed).
+
+## Module Accounts
+
+The supply functionality introduces a new type of `auth.Account` which can be used by
+modules to allocate tokens and in special cases mint or burn tokens. At a base
+level these module accounts are capable of sending/receiving tokens to and from
+`auth.Account`s and other module accounts. This design replaces previous
+alternative designs where, to hold tokens, modules would burn the incoming
+tokens from the sender account, and then track those tokens internally. Later,
+in order to send tokens, the module would need to effectively mint tokens
+within a destination account. The new design removes duplicate logic between
+modules to perform this accounting.
+
+The `ModuleAccount` interface is defined as follows:
+
+```go
+type ModuleAccount interface {
+ auth.Account // same methods as the Account interface
+
+ GetName()
+
+string // name of the module; used to obtain the address
+ GetPermissions() []string // permissions of module account
+ HasPermission(string)
+
+bool
+}
+```
+
+> **WARNING!**
+> Any module or message handler that allows either direct or indirect sending of funds must explicitly guarantee those funds cannot be sent to module accounts (unless allowed).
+
+The supply `Keeper` also introduces new wrapper functions for the auth `Keeper`
+and the bank `Keeper` that are related to `ModuleAccount`s in order to be able
+to:
+
+* Get and set `ModuleAccount`s by providing the `Name`.
+* Send coins from and to other `ModuleAccount`s or standard `Account`s
+ (`BaseAccount` or `VestingAccount`) by passing only the `Name`.
+* `Mint` or `Burn` coins for a `ModuleAccount` (restricted to its permissions).
+
+### Permissions
+
+Each `ModuleAccount` has a different set of permissions that provide different
+object capabilities to perform certain actions. Permissions need to be
+registered upon the creation of the supply `Keeper` so that every time a
+`ModuleAccount` calls the allowed functions, the `Keeper` can lookup the
+permissions to that specific account and perform or not perform the action.
+
+The available permissions are:
+
+* `Minter`: allows for a module to mint a specific amount of coins.
+* `Burner`: allows for a module to burn a specific amount of coins.
+* `Staking`: allows for a module to delegate and undelegate a specific amount of coins.
+
+## State
+
+The `x/bank` module keeps state of the following primary objects:
+
+1. Account balances
+2. Denomination metadata
+3. The total supply of all balances
+4. Information on which denominations are allowed to be sent.
+
+In addition, the `x/bank` module keeps the following indexes to manage the
+aforementioned state:
+
+* Supply Index: `0x0 | byte(denom) -> byte(amount)`
+* Denom Metadata Index: `0x1 | byte(denom) -> ProtocolBuffer(Metadata)`
+* Balances Index: `0x2 | byte(address length) | []byte(address) | []byte(balance.Denom) -> ProtocolBuffer(balance)`
+* Reverse Denomination to Address Index: `0x03 | byte(denom) | 0x00 | []byte(address) -> 0`
+
+## Params
+
+The bank module stores its params in state with the prefix of `0x05`,
+it can be updated with governance or the address with authority.
+
+* Params: `0x05 | ProtocolBuffer(Params)`
+
+```protobuf
+// Reference: https://github.com/cosmos/cosmos-sdk/blob/v0.47.0-rc1/proto/cosmos/bank/v1beta1/bank.proto#L12-L23
+```
+
+## Keepers
+
+The bank module provides these exported keeper interfaces that can be
+passed to other modules that read or update account balances. Modules
+should use the least-permissive interface that provides the functionality they
+require.
+
+Best practices dictate careful review of `bank` module code to ensure that
+permissions are limited in the way that you expect.
+
+### Denied Addresses
+
+The `x/bank` module accepts a map of addresses that are considered blocklisted
+from directly and explicitly receiving funds through means such as `MsgSend` and
+`MsgMultiSend` and direct API calls like `SendCoinsFromModuleToAccount`.
+
+Typically, these addresses are module accounts. If these addresses receive funds
+outside the expected rules of the state machine, invariants are likely to be
+broken and could result in a halted network.
+
+By providing the `x/bank` module with a blocklisted set of addresses, an error occurs for the operation if a user or client attempts to directly or indirectly send funds to a blocklisted account, for example, by using [IBC](/ibc/next/intro).
+
+### Common Types
+
+#### Input
+
+An input of a multiparty transfer
+
+```protobuf
+// Input models transaction input.
+message Input {
+ string address = 1;
+ repeated cosmos.base.v1beta1.Coin coins = 2;
+}
+```
+
+#### Output
+
+An output of a multiparty transfer.
+
+```protobuf
+// Output models transaction outputs.
+message Output {
+ string address = 1;
+ repeated cosmos.base.v1beta1.Coin coins = 2;
+}
+```
+
+### BaseKeeper
+
+The base keeper provides full-permission access: the ability to arbitrary modify any account's balance and mint or burn coins.
+
+Restricted permission to mint per module could be achieved by using baseKeeper with `WithMintCoinsRestriction` to give specific restrictions to mint (e.g. only minting certain denom).
+
+```go expandable
+// Keeper defines a module interface that facilitates the transfer of coins
+// between accounts.
+type Keeper interface {
+ SendKeeper
+ WithMintCoinsRestriction(MintingRestrictionFn)
+
+BaseKeeper
+
+ InitGenesis(context.Context, *types.GenesisState)
+
+ExportGenesis(context.Context) *types.GenesisState
+
+ GetSupply(ctx context.Context, denom string)
+
+sdk.Coin
+ HasSupply(ctx context.Context, denom string)
+
+bool
+ GetPaginatedTotalSupply(ctx context.Context, pagination *query.PageRequest) (sdk.Coins, *query.PageResponse, error)
+
+IterateTotalSupply(ctx context.Context, cb func(sdk.Coin)
+
+bool)
+
+GetDenomMetaData(ctx context.Context, denom string) (types.Metadata, bool)
+
+HasDenomMetaData(ctx context.Context, denom string)
+
+bool
+ SetDenomMetaData(ctx context.Context, denomMetaData types.Metadata)
+
+IterateAllDenomMetaData(ctx context.Context, cb func(types.Metadata)
+
+bool)
+
+SendCoinsFromModuleToAccount(ctx context.Context, senderModule string, recipientAddr sdk.AccAddress, amt sdk.Coins)
+
+error
+ SendCoinsFromModuleToModule(ctx context.Context, senderModule, recipientModule string, amt sdk.Coins)
+
+error
+ SendCoinsFromAccountToModule(ctx context.Context, senderAddr sdk.AccAddress, recipientModule string, amt sdk.Coins)
+
+error
+ DelegateCoinsFromAccountToModule(ctx context.Context, senderAddr sdk.AccAddress, recipientModule string, amt sdk.Coins)
+
+error
+ UndelegateCoinsFromModuleToAccount(ctx context.Context, senderModule string, recipientAddr sdk.AccAddress, amt sdk.Coins)
+
+error
+ MintCoins(ctx context.Context, moduleName string, amt sdk.Coins)
+
+error
+ BurnCoins(ctx context.Context, moduleName string, amt sdk.Coins)
+
+error
+
+ DelegateCoins(ctx context.Context, delegatorAddr, moduleAccAddr sdk.AccAddress, amt sdk.Coins)
+
+error
+ UndelegateCoins(ctx context.Context, moduleAccAddr, delegatorAddr sdk.AccAddress, amt sdk.Coins)
+
+error
+
+ // GetAuthority gets the address capable of executing governance proposal messages. Usually the gov module account.
+ GetAuthority()
+
+string
+
+ types.QueryServer
+}
+```
+
+### SendKeeper
+
+The send keeper provides access to account balances and the ability to transfer coins between
+accounts. The send keeper does not alter the total supply (mint or burn coins).
+
+```go expandable
+// SendKeeper defines a module interface that facilitates the transfer of coins
+// between accounts without the possibility of creating coins.
+type SendKeeper interface {
+ ViewKeeper
+
+ AppendSendRestriction(restriction SendRestrictionFn)
+
+PrependSendRestriction(restriction SendRestrictionFn)
+
+ClearSendRestriction()
+
+InputOutputCoins(ctx context.Context, input types.Input, outputs []types.Output)
+
+error
+ SendCoins(ctx context.Context, fromAddr, toAddr sdk.AccAddress, amt sdk.Coins)
+
+error
+
+ GetParams(ctx context.Context)
+
+types.Params
+ SetParams(ctx context.Context, params types.Params)
+
+error
+
+ IsSendEnabledDenom(ctx context.Context, denom string)
+
+bool
+ SetSendEnabled(ctx context.Context, denom string, value bool)
+
+SetAllSendEnabled(ctx context.Context, sendEnableds []*types.SendEnabled)
+
+DeleteSendEnabled(ctx context.Context, denom string)
+
+IterateSendEnabledEntries(ctx context.Context, cb func(denom string, sendEnabled bool) (stop bool))
+
+GetAllSendEnabledEntries(ctx context.Context) []types.SendEnabled
+
+ IsSendEnabledCoin(ctx context.Context, coin sdk.Coin)
+
+bool
+ IsSendEnabledCoins(ctx context.Context, coins ...sdk.Coin)
+
+error
+
+ BlockedAddr(addr sdk.AccAddress)
+
+bool
+}
+```
+
+#### Send Restrictions
+
+The `SendKeeper` applies a `SendRestrictionFn` before each transfer of funds.
+
+```golang
+// A SendRestrictionFn can restrict sends and/or provide a new receiver address.
+type SendRestrictionFn func(ctx context.Context, fromAddr, toAddr sdk.AccAddress, amt sdk.Coins) (newToAddr sdk.AccAddress, err error)
+```
+
+After the `SendKeeper` (or `BaseKeeper`) has been created, send restrictions can be added to it using the `AppendSendRestriction` or `PrependSendRestriction` functions.
+Both functions compose the provided restriction with any previously provided restrictions.
+`AppendSendRestriction` adds the provided restriction to be run after any previously provided send restrictions.
+`PrependSendRestriction` adds the restriction to be run before any previously provided send restrictions.
+The composition will short-circuit when an error is encountered. I.e. if the first one returns an error, the second is not run.
+
+During `SendCoins`, the send restriction is applied before coins are removed from the from address and adding them to the to address.
+During `InputOutputCoins`, the send restriction is applied after the input coins are removed and once for each output before the funds are added.
+
+A send restriction function should make use of a custom value in the context to allow bypassing that specific restriction.
+
+Send Restrictions are not placed on `ModuleToAccount` or `ModuleToModule` transfers. This is done due to modules needing to move funds to user accounts and other module accounts. This is a design decision to allow for more flexibility in the state machine. The state machine should be able to move funds between module accounts and user accounts without restrictions.
+
+Secondly this limitation would limit the usage of the state machine even for itself. users would not be able to receive rewards, not be able to move funds between module accounts. In the case that a user sends funds from a user account to the community pool and then a governance proposal is used to get those tokens into the users account this would fall under the discretion of the app chain developer to what they would like to do here. We can not make strong assumptions here.
+Thirdly, this issue could lead into a chain halt if a token is disabled and the token is moved in the begin/endblock. This is the last reason we see the current change and more damaging then beneficial for users.
+
+For example, in your module's keeper package, you'd define the send restriction function:
+
+```golang expandable
+var _ banktypes.SendRestrictionFn = Keeper{
+}.SendRestrictionFn
+
+func (k Keeper)
+
+SendRestrictionFn(ctx context.Context, fromAddr, toAddr sdk.AccAddress, amt sdk.Coins) (sdk.AccAddress, error) {
+ // Bypass if the context says to.
+ if mymodule.HasBypass(ctx) {
+ return toAddr, nil
+}
+
+ // Your custom send restriction logic goes here.
+ return nil, errors.New("not implemented")
+}
+```
+
+The bank keeper should be provided to your keeper's constructor so the send restriction can be added to it:
+
+```golang
+func NewKeeper(cdc codec.BinaryCodec, storeKey storetypes.StoreKey, bankKeeper mymodule.BankKeeper)
+
+Keeper {
+ rv := Keeper{/*...*/
+}
+
+bankKeeper.AppendSendRestriction(rv.SendRestrictionFn)
+
+return rv
+}
+```
+
+Then, in the `mymodule` package, define the context helpers:
+
+```golang expandable
+const bypassKey = "bypass-mymodule-restriction"
+
+// WithBypass returns a new context that will cause the mymodule bank send restriction to be skipped.
+func WithBypass(ctx context.Context)
+
+context.Context {
+ return sdk.UnwrapSDKContext(ctx).WithValue(bypassKey, true)
+}
+
+// WithoutBypass returns a new context that will cause the mymodule bank send restriction to not be skipped.
+func WithoutBypass(ctx context.Context)
+
+context.Context {
+ return sdk.UnwrapSDKContext(ctx).WithValue(bypassKey, false)
+}
+
+// HasBypass checks the context to see if the mymodule bank send restriction should be skipped.
+func HasBypass(ctx context.Context)
+
+bool {
+ bypassValue := ctx.Value(bypassKey)
+ if bypassValue == nil {
+ return false
+}
+
+bypass, isBool := bypassValue.(bool)
+
+return isBool && bypass
+}
+```
+
+Now, anywhere where you want to use `SendCoins` or `InputOutputCoins`, but you don't want your send restriction applied:
+
+```golang
+func (k Keeper)
+
+DoThing(ctx context.Context, fromAddr, toAddr sdk.AccAddress, amt sdk.Coins)
+
+error {
+ return k.bankKeeper.SendCoins(mymodule.WithBypass(ctx), fromAddr, toAddr, amt)
+}
+```
+
+### ViewKeeper
+
+The view keeper provides read-only access to account balances. The view keeper does not have balance alteration functionality. All balance lookups are `O(1)`.
+
+```go expandable
+// ViewKeeper defines a module interface that facilitates read only access to
+// account balances.
+type ViewKeeper interface {
+ ValidateBalance(ctx context.Context, addr sdk.AccAddress)
+
+error
+ HasBalance(ctx context.Context, addr sdk.AccAddress, amt sdk.Coin)
+
+bool
+
+ GetAllBalances(ctx context.Context, addr sdk.AccAddress)
+
+sdk.Coins
+ GetAccountsBalances(ctx context.Context) []types.Balance
+ GetBalance(ctx context.Context, addr sdk.AccAddress, denom string)
+
+sdk.Coin
+ LockedCoins(ctx context.Context, addr sdk.AccAddress)
+
+sdk.Coins
+ SpendableCoins(ctx context.Context, addr sdk.AccAddress)
+
+sdk.Coins
+ SpendableCoin(ctx context.Context, addr sdk.AccAddress, denom string)
+
+sdk.Coin
+
+ IterateAccountBalances(ctx context.Context, addr sdk.AccAddress, cb func(coin sdk.Coin) (stop bool))
+
+IterateAllBalances(ctx context.Context, cb func(address sdk.AccAddress, coin sdk.Coin) (stop bool))
+}
+```
+
+## Messages
+
+### MsgSend
+
+Send coins from one address to another.
+
+```protobuf
+// Reference: https://github.com/cosmos/cosmos-sdk/blob/v0.47.0-rc1/proto/cosmos/bank/v1beta1/tx.proto#L38-L53
+```
+
+The message will fail under the following conditions:
+
+* The coins do not have sending enabled
+* The `to` address is restricted
+
+### MsgMultiSend
+
+Send coins from one sender and to a series of different address. If any of the receiving addresses do not correspond to an existing account, a new account is created.
+
+```protobuf
+// Reference: https://github.com/cosmos/cosmos-sdk/blob/v0.47.0-rc1/proto/cosmos/bank/v1beta1/tx.proto#L58-L69
+```
+
+The message will fail under the following conditions:
+
+* Any of the coins do not have sending enabled
+* Any of the `to` addresses are restricted
+* Any of the coins are locked
+* The inputs and outputs do not correctly correspond to one another
+
+### MsgUpdateParams
+
+The `bank` module params can be updated through `MsgUpdateParams`, which can be done using governance proposal. The signer will always be the `gov` module account address.
+
+```protobuf
+// Reference: https://github.com/cosmos/cosmos-sdk/blob/v0.47.0-rc1/proto/cosmos/bank/v1beta1/tx.proto#L74-L88
+```
+
+The message handling can fail if:
+
+* signer is not the gov module account address.
+
+### MsgSetSendEnabled
+
+Used with the x/gov module to set create/edit SendEnabled entries.
+
+```protobuf
+// Reference: https://github.com/cosmos/cosmos-sdk/blob/v0.47.0-rc1/proto/cosmos/bank/v1beta1/tx.proto#L96-L117
+```
+
+The message will fail under the following conditions:
+
+* The authority is not a bech32 address.
+* The authority is not x/gov module's address.
+* There are multiple SendEnabled entries with the same Denom.
+* One or more SendEnabled entries has an invalid Denom.
+
+## Events
+
+The bank module emits the following events:
+
+### Message Events
+
+#### MsgSend
+
+| Type | Attribute Key | Attribute Value |
+| -------- | ------------- | ------------------ |
+| transfer | recipient | `{recipientAddress}` |
+| transfer | amount | `{amount}` |
+| message | module | bank |
+| message | action | send |
+| message | sender | `{senderAddress}` |
+
+#### MsgMultiSend
+
+| Type | Attribute Key | Attribute Value |
+| -------- | ------------- | ------------------ |
+| transfer | recipient | `{recipientAddress}` |
+| transfer | amount | `{amount}` |
+| message | module | bank |
+| message | action | multisend |
+| message | sender | `{senderAddress}` |
+
+### Keeper Events
+
+In addition to message events, the bank keeper will produce events when the following methods are called (or any method which ends up calling them)
+
+#### MintCoins
+
+```json expandable
+{
+ "type": "coinbase",
+ "attributes": [
+ {
+ "key": "minter",
+ "value": "{{sdk.AccAddress of the module minting coins}}",
+ "index": true
+ },
+ {
+ "key": "amount",
+ "value": "{{sdk.Coins being minted}}",
+ "index": true
+ }
+ ]
+}
+```
+
+```json expandable
+{
+ "type": "coin_received",
+ "attributes": [
+ {
+ "key": "receiver",
+ "value": "{{sdk.AccAddress of the module minting coins}}",
+ "index": true
+ },
+ {
+ "key": "amount",
+ "value": "{{sdk.Coins being received}}",
+ "index": true
+ }
+ ]
+}
+```
+
+#### BurnCoins
+
+```json expandable
+{
+ "type": "burn",
+ "attributes": [
+ {
+ "key": "burner",
+ "value": "{{sdk.AccAddress of the module burning coins}}",
+ "index": true
+ },
+ {
+ "key": "amount",
+ "value": "{{sdk.Coins being burned}}",
+ "index": true
+ }
+ ]
+}
+```
+
+```json expandable
+{
+ "type": "coin_spent",
+ "attributes": [
+ {
+ "key": "spender",
+ "value": "{{sdk.AccAddress of the module burning coins}}",
+ "index": true
+ },
+ {
+ "key": "amount",
+ "value": "{{sdk.Coins being burned}}",
+ "index": true
+ }
+ ]
+}
+```
+
+#### addCoins
+
+```json expandable
+{
+ "type": "coin_received",
+ "attributes": [
+ {
+ "key": "receiver",
+ "value": "{{sdk.AccAddress of the address beneficiary of the coins}}",
+ "index": true
+ },
+ {
+ "key": "amount",
+ "value": "{{sdk.Coins being received}}",
+ "index": true
+ }
+ ]
+}
+```
+
+#### subUnlockedCoins/DelegateCoins
+
+```json expandable
+{
+ "type": "coin_spent",
+ "attributes": [
+ {
+ "key": "spender",
+ "value": "{{sdk.AccAddress of the address which is spending coins}}",
+ "index": true
+ },
+ {
+ "key": "amount",
+ "value": "{{sdk.Coins being spent}}",
+ "index": true
+ }
+ ]
+}
+```
+
+## Parameters
+
+The bank module contains the following parameters
+
+### SendEnabled
+
+The SendEnabled parameter is now deprecated and not to be use. It is replaced
+with state store records.
+
+### DefaultSendEnabled
+
+The default send enabled value controls send transfer capability for all
+coin denominations unless specifically included in the array of `SendEnabled`
+parameters.
+
+## Client
+
+### CLI
+
+A user can query and interact with the `bank` module using the CLI.
+
+#### Query
+
+The `query` commands allow users to query `bank` state.
+
+```shell
+simd query bank --help
+```
+
+##### balances
+
+The `balances` command allows users to query account balances by address.
+
+```shell
+simd query bank balances [address] [flags]
+```
+
+Example:
+
+```shell
+simd query bank balances cosmos1..
+```
+
+Example Output:
+
+```yml
+balances:
+- amount: "1000000000"
+ denom: stake
+pagination:
+ next_key: null
+ total: "0"
+```
+
+##### denom-metadata
+
+The `denom-metadata` command allows users to query metadata for coin denominations. A user can query metadata for a single denomination using the `--denom` flag or all denominations without it.
+
+```shell
+simd query bank denom-metadata [flags]
+```
+
+Example:
+
+```shell
+simd query bank denom-metadata --denom stake
+```
+
+Example Output:
+
+```yml
+metadata:
+ base: stake
+ denom_units:
+ - aliases:
+ - STAKE
+ denom: stake
+ description: native staking token of simulation app
+ display: stake
+ name: SimApp Token
+ symbol: STK
+```
+
+##### total
+
+The `total` command allows users to query the total supply of coins. A user can query the total supply for a single coin using the `--denom` flag or all coins without it.
+
+```shell
+simd query bank total [flags]
+```
+
+Example:
+
+```shell
+simd query bank total --denom stake
+```
+
+Example Output:
+
+```yml
+amount: "10000000000"
+denom: stake
+```
+
+##### send-enabled
+
+The `send-enabled` command allows users to query for all or some SendEnabled entries.
+
+```shell
+simd query bank send-enabled [denom1 ...] [flags]
+```
+
+Example:
+
+```shell
+simd query bank send-enabled
+```
+
+Example output:
+
+```yml
+send_enabled:
+- denom: foocoin
+ enabled: true
+- denom: barcoin
+pagination:
+ next-key: null
+ total: 2
+```
+
+#### Transactions
+
+The `tx` commands allow users to interact with the `bank` module.
+
+```shell
+simd tx bank --help
+```
+
+##### send
+
+The `send` command allows users to send funds from one account to another.
+
+```shell
+simd tx bank send [from_key_or_address] [to_address] [amount] [flags]
+```
+
+Example:
+
+```shell
+simd tx bank send cosmos1.. cosmos1.. 100stake
+```
+
+## gRPC
+
+A user can query the `bank` module using gRPC endpoints.
+
+### Balance
+
+The `Balance` endpoint allows users to query account balance by address for a given denomination.
+
+```shell
+cosmos.bank.v1beta1.Query/Balance
+```
+
+Example:
+
+```shell
+grpcurl -plaintext \
+ -d '{"address":"cosmos1..","denom":"stake"}' \
+ localhost:9090 \
+ cosmos.bank.v1beta1.Query/Balance
+```
+
+Example Output:
+
+```json
+{
+ "balance": {
+ "denom": "stake",
+ "amount": "1000000000"
+ }
+}
+```
+
+### AllBalances
+
+The `AllBalances` endpoint allows users to query account balance by address for all denominations.
+
+```shell
+cosmos.bank.v1beta1.Query/AllBalances
+```
+
+Example:
+
+```shell
+grpcurl -plaintext \
+ -d '{"address":"cosmos1.."}' \
+ localhost:9090 \
+ cosmos.bank.v1beta1.Query/AllBalances
+```
+
+Example Output:
+
+```json expandable
+{
+ "balances": [
+ {
+ "denom": "stake",
+ "amount": "1000000000"
+ }
+ ],
+ "pagination": {
+ "total": "1"
+ }
+}
+```
+
+### DenomMetadata
+
+The `DenomMetadata` endpoint allows users to query metadata for a single coin denomination.
+
+```shell
+cosmos.bank.v1beta1.Query/DenomMetadata
+```
+
+Example:
+
+```shell
+grpcurl -plaintext \
+ -d '{"denom":"stake"}' \
+ localhost:9090 \
+ cosmos.bank.v1beta1.Query/DenomMetadata
+```
+
+Example Output:
+
+```json expandable
+{
+ "metadata": {
+ "description": "native staking token of simulation app",
+ "denomUnits": [
+ {
+ "denom": "stake",
+ "aliases": [
+ "STAKE"
+ ]
+ }
+ ],
+ "base": "stake",
+ "display": "stake",
+ "name": "SimApp Token",
+ "symbol": "STK"
+ }
+}
+```
+
+### DenomsMetadata
+
+The `DenomsMetadata` endpoint allows users to query metadata for all coin denominations.
+
+```shell
+cosmos.bank.v1beta1.Query/DenomsMetadata
+```
+
+Example:
+
+```shell
+grpcurl -plaintext \
+ localhost:9090 \
+ cosmos.bank.v1beta1.Query/DenomsMetadata
+```
+
+Example Output:
+
+```json expandable
+{
+ "metadatas": [
+ {
+ "description": "native staking token of simulation app",
+ "denomUnits": [
+ {
+ "denom": "stake",
+ "aliases": [
+ "STAKE"
+ ]
+ }
+ ],
+ "base": "stake",
+ "display": "stake",
+ "name": "SimApp Token",
+ "symbol": "STK"
+ }
+ ],
+ "pagination": {
+ "total": "1"
+ }
+}
+```
+
+### DenomOwners
+
+The `DenomOwners` endpoint allows users to query metadata for a single coin denomination.
+
+```shell
+cosmos.bank.v1beta1.Query/DenomOwners
+```
+
+Example:
+
+```shell
+grpcurl -plaintext \
+ -d '{"denom":"stake"}' \
+ localhost:9090 \
+ cosmos.bank.v1beta1.Query/DenomOwners
+```
+
+Example Output:
+
+```json expandable
+{
+ "denomOwners": [
+ {
+ "address": "cosmos1..",
+ "balance": {
+ "denom": "stake",
+ "amount": "5000000000"
+ }
+
+},
+ {
+ "address": "cosmos1..",
+ "balance": {
+ "denom": "stake",
+ "amount": "5000000000"
+ }
+
+},
+ ],
+ "pagination": {
+ "total": "2"
+ }
+}
+```
+
+### TotalSupply
+
+The `TotalSupply` endpoint allows users to query the total supply of all coins.
+
+```shell
+cosmos.bank.v1beta1.Query/TotalSupply
+```
+
+Example:
+
+```shell
+grpcurl -plaintext \
+ localhost:9090 \
+ cosmos.bank.v1beta1.Query/TotalSupply
+```
+
+Example Output:
+
+```json expandable
+{
+ "supply": [
+ {
+ "denom": "stake",
+ "amount": "10000000000"
+ }
+ ],
+ "pagination": {
+ "total": "1"
+ }
+}
+```
+
+### SupplyOf
+
+The `SupplyOf` endpoint allows users to query the total supply of a single coin.
+
+```shell
+cosmos.bank.v1beta1.Query/SupplyOf
+```
+
+Example:
+
+```shell
+grpcurl -plaintext \
+ -d '{"denom":"stake"}' \
+ localhost:9090 \
+ cosmos.bank.v1beta1.Query/SupplyOf
+```
+
+Example Output:
+
+```json
+{
+ "amount": {
+ "denom": "stake",
+ "amount": "10000000000"
+ }
+}
+```
+
+### Params
+
+The `Params` endpoint allows users to query the parameters of the `bank` module.
+
+```shell
+cosmos.bank.v1beta1.Query/Params
+```
+
+Example:
+
+```shell
+grpcurl -plaintext \
+ localhost:9090 \
+ cosmos.bank.v1beta1.Query/Params
+```
+
+Example Output:
+
+```json
+{
+ "params": {
+ "defaultSendEnabled": true
+ }
+}
+```
+
+### SendEnabled
+
+The `SendEnabled` enpoints allows users to query the SendEnabled entries of the `bank` module.
+
+Any denominations NOT returned, use the `Params.DefaultSendEnabled` value.
+
+```shell
+cosmos.bank.v1beta1.Query/SendEnabled
+```
+
+Example:
+
+```shell
+grpcurl -plaintext \
+ localhost:9090 \
+ cosmos.bank.v1beta1.Query/SendEnabled
+```
+
+Example Output:
+
+```json expandable
+{
+ "send_enabled": [
+ {
+ "denom": "foocoin",
+ "enabled": true
+ },
+ {
+ "denom": "barcoin"
+ }
+ ],
+ "pagination": {
+ "next-key": null,
+ "total": 2
+ }
+}
+```
diff --git a/sdk/next/build/modules/circuit/README.mdx b/sdk/next/build/modules/circuit/README.mdx
new file mode 100644
index 000000000..b4d28b32b
--- /dev/null
+++ b/sdk/next/build/modules/circuit/README.mdx
@@ -0,0 +1,595 @@
+---
+title: 'x/circuit'
+---
+
+
+This module has been moved to [contrib/x/circuit](https://github.com/cosmos/cosmos-sdk/tree/main/contrib/x/circuit) and is no longer actively maintained. For continued maintenance, users should fork the module.
+
+
+## Concepts
+
+Circuit Breaker is a module that is meant to avoid a chain needing to halt/shut down in the presence of a vulnerability, instead the module will allow specific messages or all messages to be disabled. When operating a chain, if it is app specific then a halt of the chain is less detrimental, but if there are applications built on top of the chain then halting is expensive due to the disturbance to applications.
+
+Circuit Breaker works with the idea that an address or set of addresses have the right to block messages from being executed and/or included in the mempool. Any address with a permission is able to reset the circuit breaker for the message.
+
+The transactions are checked and can be rejected at two points:
+
+* In `CircuitBreakerDecorator` [ante handler](/sdk/v0.53/learn/advanced/baseapp#antehandler):
+
+```go expandable
+package ante
+
+import (
+
+ "context"
+ "github.com/cockroachdb/errors"
+
+ sdk "github.com/cosmos/cosmos-sdk/types"
+)
+
+// CircuitBreaker is an interface that defines the methods for a circuit breaker.
+type CircuitBreaker interface {
+ IsAllowed(ctx context.Context, typeURL string) (bool, error)
+}
+
+// CircuitBreakerDecorator is an AnteDecorator that checks if the transaction type is allowed to enter the mempool or be executed
+type CircuitBreakerDecorator struct {
+ circuitKeeper CircuitBreaker
+}
+
+func NewCircuitBreakerDecorator(ck CircuitBreaker)
+
+CircuitBreakerDecorator {
+ return CircuitBreakerDecorator{
+ circuitKeeper: ck,
+}
+}
+
+func (cbd CircuitBreakerDecorator)
+
+AnteHandle(ctx sdk.Context, tx sdk.Tx, simulate bool, next sdk.AnteHandler) (sdk.Context, error) {
+ // loop through all the messages and check if the message type is allowed
+ for _, msg := range tx.GetMsgs() {
+ isAllowed, err := cbd.circuitKeeper.IsAllowed(ctx, sdk.MsgTypeURL(msg))
+ if err != nil {
+ return ctx, err
+}
+ if !isAllowed {
+ return ctx, errors.New("tx type not allowed")
+}
+
+}
+
+return next(ctx, tx, simulate)
+}
+```
+
+* With a [message router check](/sdk/v0.53/learn/advanced/baseapp#msg-service-router):
+
+```go expandable
+package baseapp
+
+import (
+
+ "context"
+ "fmt"
+
+ gogogrpc "github.com/cosmos/gogoproto/grpc"
+ "github.com/cosmos/gogoproto/proto"
+ "google.golang.org/grpc"
+ "google.golang.org/protobuf/runtime/protoiface"
+
+ errorsmod "cosmossdk.io/errors"
+ "github.com/cosmos/cosmos-sdk/baseapp/internal/protocompat"
+ "github.com/cosmos/cosmos-sdk/codec"
+ codectypes "github.com/cosmos/cosmos-sdk/codec/types"
+ sdk "github.com/cosmos/cosmos-sdk/types"
+ sdkerrors "github.com/cosmos/cosmos-sdk/types/errors"
+)
+
+// MessageRouter ADR 031 request type routing
+// https://github.com/cosmos/cosmos-sdk/blob/main/docs/architecture/adr-031-msg-service.md
+type MessageRouter interface {
+ Handler(msg sdk.Msg)
+
+MsgServiceHandler
+ HandlerByTypeURL(typeURL string)
+
+MsgServiceHandler
+}
+
+// MsgServiceRouter routes fully-qualified Msg service methods to their handler.
+type MsgServiceRouter struct {
+ interfaceRegistry codectypes.InterfaceRegistry
+ routes map[string]MsgServiceHandler
+ hybridHandlers map[string]func(ctx context.Context, req, resp protoiface.MessageV1)
+
+error
+ circuitBreaker CircuitBreaker
+}
+
+var _ gogogrpc.Server = &MsgServiceRouter{
+}
+
+// NewMsgServiceRouter creates a new MsgServiceRouter.
+func NewMsgServiceRouter() *MsgServiceRouter {
+ return &MsgServiceRouter{
+ routes: map[string]MsgServiceHandler{
+},
+ hybridHandlers: map[string]func(ctx context.Context, req, resp protoiface.MessageV1)
+
+error{
+},
+}
+}
+
+func (msr *MsgServiceRouter)
+
+SetCircuit(cb CircuitBreaker) {
+ msr.circuitBreaker = cb
+}
+
+// MsgServiceHandler defines a function type which handles Msg service message.
+type MsgServiceHandler = func(ctx sdk.Context, req sdk.Msg) (*sdk.Result, error)
+
+// Handler returns the MsgServiceHandler for a given msg or nil if not found.
+func (msr *MsgServiceRouter)
+
+Handler(msg sdk.Msg)
+
+MsgServiceHandler {
+ return msr.routes[sdk.MsgTypeURL(msg)]
+}
+
+// HandlerByTypeURL returns the MsgServiceHandler for a given query route path or nil
+// if not found.
+func (msr *MsgServiceRouter)
+
+HandlerByTypeURL(typeURL string)
+
+MsgServiceHandler {
+ return msr.routes[typeURL]
+}
+
+// RegisterService implements the gRPC Server.RegisterService method. sd is a gRPC
+// service description, handler is an object which implements that gRPC service.
+//
+// This function PANICs:
+// - if it is called before the service `Msg`s have been registered using
+// RegisterInterfaces,
+// - or if a service is being registered twice.
+func (msr *MsgServiceRouter)
+
+RegisterService(sd *grpc.ServiceDesc, handler interface{
+}) {
+ // Adds a top-level query handler based on the gRPC service name.
+ for _, method := range sd.Methods {
+ err := msr.registerMsgServiceHandler(sd, method, handler)
+ if err != nil {
+ panic(err)
+}
+
+err = msr.registerHybridHandler(sd, method, handler)
+ if err != nil {
+ panic(err)
+}
+
+}
+}
+
+func (msr *MsgServiceRouter)
+
+HybridHandlerByMsgName(msgName string)
+
+func(ctx context.Context, req, resp protoiface.MessageV1)
+
+error {
+ return msr.hybridHandlers[msgName]
+}
+
+func (msr *MsgServiceRouter)
+
+registerHybridHandler(sd *grpc.ServiceDesc, method grpc.MethodDesc, handler interface{
+})
+
+error {
+ inputName, err := protocompat.RequestFullNameFromMethodDesc(sd, method)
+ if err != nil {
+ return err
+}
+ cdc := codec.NewProtoCodec(msr.interfaceRegistry)
+
+hybridHandler, err := protocompat.MakeHybridHandler(cdc, sd, method, handler)
+ if err != nil {
+ return err
+}
+ // if circuit breaker is not nil, then we decorate the hybrid handler with the circuit breaker
+ if msr.circuitBreaker == nil {
+ msr.hybridHandlers[string(inputName)] = hybridHandler
+ return nil
+}
+ // decorate the hybrid handler with the circuit breaker
+ circuitBreakerHybridHandler := func(ctx context.Context, req, resp protoiface.MessageV1)
+
+error {
+ messageName := codectypes.MsgTypeURL(req)
+
+allowed, err := msr.circuitBreaker.IsAllowed(ctx, messageName)
+ if err != nil {
+ return err
+}
+ if !allowed {
+ return fmt.Errorf("circuit breaker disallows execution of message %s", messageName)
+}
+
+return hybridHandler(ctx, req, resp)
+}
+
+msr.hybridHandlers[string(inputName)] = circuitBreakerHybridHandler
+ return nil
+}
+
+func (msr *MsgServiceRouter)
+
+registerMsgServiceHandler(sd *grpc.ServiceDesc, method grpc.MethodDesc, handler interface{
+})
+
+error {
+ fqMethod := fmt.Sprintf("/%s/%s", sd.ServiceName, method.MethodName)
+ methodHandler := method.Handler
+
+ var requestTypeName string
+
+ // NOTE: This is how we pull the concrete request type for each handler for registering in the InterfaceRegistry.
+ // This approach is maybe a bit hacky, but less hacky than reflecting on the handler object itself.
+ // We use a no-op interceptor to avoid actually calling into the handler itself.
+ _, _ = methodHandler(nil, context.Background(), func(i interface{
+})
+
+error {
+ msg, ok := i.(sdk.Msg)
+ if !ok {
+ // We panic here because there is no other alternative and the app cannot be initialized correctly
+ // this should only happen if there is a problem with code generation in which case the app won't
+ // work correctly anyway.
+ panic(fmt.Errorf("unable to register service method %s: %T does not implement sdk.Msg", fqMethod, i))
+}
+
+requestTypeName = sdk.MsgTypeURL(msg)
+
+return nil
+}, noopInterceptor)
+
+ // Check that the service Msg fully-qualified method name has already
+ // been registered (via RegisterInterfaces). If the user registers a
+ // service without registering according service Msg type, there might be
+ // some unexpected behavior down the road. Since we can't return an error
+ // (`Server.RegisterService` interface restriction)
+
+we panic (at startup).
+ reqType, err := msr.interfaceRegistry.Resolve(requestTypeName)
+ if err != nil || reqType == nil {
+ return fmt.Errorf(
+ "type_url %s has not been registered yet. "+
+ "Before calling RegisterService, you must register all interfaces by calling the `RegisterInterfaces` "+
+ "method on module.BasicManager. Each module should call `msgservice.RegisterMsgServiceDesc` inside its "+
+ "`RegisterInterfaces` method with the `_Msg_serviceDesc` generated by proto-gen",
+ requestTypeName,
+ )
+}
+
+ // Check that each service is only registered once. If a service is
+ // registered more than once, then we should error. Since we can't
+ // return an error (`Server.RegisterService` interface restriction)
+
+we
+ // panic (at startup).
+ _, found := msr.routes[requestTypeName]
+ if found {
+ return fmt.Errorf(
+ "msg service %s has already been registered. Please make sure to only register each service once. "+
+ "This usually means that there are conflicting modules registering the same msg service",
+ fqMethod,
+ )
+}
+
+msr.routes[requestTypeName] = func(ctx sdk.Context, msg sdk.Msg) (*sdk.Result, error) {
+ ctx = ctx.WithEventManager(sdk.NewEventManager())
+ interceptor := func(goCtx context.Context, _ interface{
+}, _ *grpc.UnaryServerInfo, handler grpc.UnaryHandler) (interface{
+}, error) {
+ goCtx = context.WithValue(goCtx, sdk.SdkContextKey, ctx)
+
+return handler(goCtx, msg)
+}
+ if m, ok := msg.(sdk.HasValidateBasic); ok {
+ if err := m.ValidateBasic(); err != nil {
+ return nil, err
+}
+
+}
+ if msr.circuitBreaker != nil {
+ msgURL := sdk.MsgTypeURL(msg)
+
+isAllowed, err := msr.circuitBreaker.IsAllowed(ctx, msgURL)
+ if err != nil {
+ return nil, err
+}
+ if !isAllowed {
+ return nil, fmt.Errorf("circuit breaker disables execution of this message: %s", msgURL)
+}
+
+}
+
+ // Call the method handler from the service description with the handler object.
+ // We don't do any decoding here because the decoding was already done.
+ res, err := methodHandler(handler, ctx, noopDecoder, interceptor)
+ if err != nil {
+ return nil, err
+}
+
+resMsg, ok := res.(proto.Message)
+ if !ok {
+ return nil, errorsmod.Wrapf(sdkerrors.ErrInvalidType, "Expecting proto.Message, got %T", resMsg)
+}
+
+return sdk.WrapServiceResult(ctx, resMsg, err)
+}
+
+return nil
+}
+
+// SetInterfaceRegistry sets the interface registry for the router.
+func (msr *MsgServiceRouter)
+
+SetInterfaceRegistry(interfaceRegistry codectypes.InterfaceRegistry) {
+ msr.interfaceRegistry = interfaceRegistry
+}
+
+func noopDecoder(_ interface{
+})
+
+error {
+ return nil
+}
+
+func noopInterceptor(_ context.Context, _ interface{
+}, _ *grpc.UnaryServerInfo, _ grpc.UnaryHandler) (interface{
+}, error) {
+ return nil, nil
+}
+```
+
+
+The `CircuitBreakerDecorator` works for most use cases, but [does not check the inner messages of a transaction](/sdk/v0.53/learn/beginner/tx-lifecycle#antehandler). This some transactions (such as `x/authz` transactions or some `x/gov` transactions) may pass the ante handler. **This does not affect the circuit breaker** as the message router check will still fail the transaction.
+This tradeoff is to avoid introducing more dependencies in the `x/circuit` module. Chains can re-define the `CircuitBreakerDecorator` to check for inner messages if they wish to do so.
+
+
+## State
+
+### Accounts
+
+* AccountPermissions `0x1 | account_address -> ProtocolBuffer(CircuitBreakerPermissions)`
+
+```go expandable
+type level int32
+
+const (
+ // LEVEL_NONE_UNSPECIFIED indicates that the account will have no circuit
+ // breaker permissions.
+ LEVEL_NONE_UNSPECIFIED = iota
+ // LEVEL_SOME_MSGS indicates that the account will have permission to
+ // trip or reset the circuit breaker for some Msg type URLs. If this level
+ // is chosen, a non-empty list of Msg type URLs must be provided in
+ // limit_type_urls.
+ LEVEL_SOME_MSGS
+ // LEVEL_ALL_MSGS indicates that the account can trip or reset the circuit
+ // breaker for Msg's of all type URLs.
+ LEVEL_ALL_MSGS
+ // LEVEL_SUPER_ADMIN indicates that the account can take all circuit breaker
+ // actions and can grant permissions to other accounts.
+ LEVEL_SUPER_ADMIN
+)
+
+type Access struct {
+ level int32
+ msgs []string // if full permission, msgs can be empty
+}
+```
+
+### Disable List
+
+List of type urls that are disabled.
+
+* DisableList `0x2 | msg_type_url -> []byte{}` {/* - should this be stored in json to skip encoding and decoding each block, does it matter? */}
+
+## State Transitions
+
+### Authorize
+
+Authorize, is called by the module authority (default governance module account) or any account with `LEVEL_SUPER_ADMIN` to give permission to disable/enable messages to another account. There are three levels of permissions that can be granted. `LEVEL_SOME_MSGS` limits the number of messages that can be disabled. `LEVEL_ALL_MSGS` permits all messages to be disabled. `LEVEL_SUPER_ADMIN` allows an account to take all circuit breaker actions including authorizing and deauthorizing other accounts.
+
+```protobuf
+ // AuthorizeCircuitBreaker allows a super-admin to grant (or revoke) another
+ // account's circuit breaker permissions.
+ rpc AuthorizeCircuitBreaker(MsgAuthorizeCircuitBreaker) returns (MsgAuthorizeCircuitBreakerResponse);
+```
+
+### Trip
+
+Trip, is called by an authorized account to disable message execution for a specific msgURL. If empty, all the msgs will be disabled.
+
+```protobuf
+ // TripCircuitBreaker pauses processing of Msg's in the state machine.
+ rpc TripCircuitBreaker(MsgTripCircuitBreaker) returns (MsgTripCircuitBreakerResponse);
+```
+
+### Reset
+
+Reset is called by an authorized account to enable execution for a specific msgURL of previously disabled message. If empty, all the disabled messages will be enabled.
+
+```protobuf
+ // ResetCircuitBreaker resumes processing of Msg's in the state machine that
+ // have been paused using TripCircuitBreaker.
+ rpc ResetCircuitBreaker(MsgResetCircuitBreaker) returns (MsgResetCircuitBreakerResponse);
+```
+
+## Messages
+
+### MsgAuthorizeCircuitBreaker
+
+```protobuf
+// Reference: https://github.com/cosmos/cosmos-sdk/blob/main/proto/cosmos/circuit/v1/tx.proto#L25-L75
+```
+
+This message is expected to fail if:
+
+* the granter is not an account with permission level `LEVEL_SUPER_ADMIN` or the module authority
+
+### MsgTripCircuitBreaker
+
+```protobuf
+// Reference: https://github.com/cosmos/cosmos-sdk/blob/main/proto/cosmos/circuit/v1/tx.proto#L77-L93
+```
+
+This message is expected to fail if:
+
+* if the signer does not have a permission level with the ability to disable the specified type url message
+
+### MsgResetCircuitBreaker
+
+```protobuf
+// Reference: https://github.com/cosmos/cosmos-sdk/blob/main/proto/cosmos/circuit/v1/tx.proto#L95-109
+```
+
+This message is expected to fail if:
+
+* if the type url is not disabled
+
+## Events - list and describe event tags
+
+The circuit module emits the following events:
+
+### Message Events
+
+#### MsgAuthorizeCircuitBreaker
+
+| Type | Attribute Key | Attribute Value |
+| ------- | ------------- | --------------------------- |
+| string | granter | `{granterAddress}` |
+| string | grantee | `{granteeAddress}` |
+| string | permission | `{granteePermissions}` |
+| message | module | circuit |
+| message | action | authorize\_circuit\_breaker |
+
+#### MsgTripCircuitBreaker
+
+| Type | Attribute Key | Attribute Value |
+| --------- | ------------- | ---------------------- |
+| string | authority | `{authorityAddress}` |
+| \[]string | msg\_urls | \[]string`{msg\_urls}` |
+| message | module | circuit |
+| message | action | trip\_circuit\_breaker |
+
+#### ResetCircuitBreaker
+
+| Type | Attribute Key | Attribute Value |
+| --------- | ------------- | ----------------------- |
+| string | authority | `{authorityAddress}` |
+| \[]string | msg\_urls | \[]string`{msg\_urls}` |
+| message | module | circuit |
+| message | action | reset\_circuit\_breaker |
+
+## Keys - list of key prefixes used by the circuit module
+
+* `AccountPermissionPrefix` - `0x01`
+* `DisableListPrefix` - `0x02`
+
+## Client - list and describe CLI commands and gRPC and REST endpoints
+
+## Examples: Using Circuit Breaker CLI Commands
+
+This section provides practical examples for using the Circuit Breaker module through the command-line interface (CLI). These examples demonstrate how to authorize accounts, disable (trip) specific message types, and re-enable (reset) them when needed.
+
+### Querying Circuit Breaker Permissions
+
+Check an account's current circuit breaker permissions:
+
+```bash
+# Query permissions for a specific account
+ query circuit account-permissions
+
+# Example:
+simd query circuit account-permissions cosmos1...
+```
+
+Check which message types are currently disabled:
+
+```bash
+# Query all disabled message types
+ query circuit disabled-list
+
+# Example:
+simd query circuit disabled-list
+```
+
+### Authorizing an Account as Circuit Breaker
+
+Only a super-admin or the module authority (typically the governance module account) can grant circuit breaker permissions to other accounts:
+
+```bash
+# Grant LEVEL_ALL_MSGS permission (can disable any message type)
+ tx circuit authorize --level=ALL_MSGS --from= --gas=auto --gas-adjustment=1.5
+
+# Grant LEVEL_SOME_MSGS permission (can only disable specific message types)
+ tx circuit authorize --level=SOME_MSGS --limit-type-urls="/cosmos.bank.v1beta1.MsgSend,/cosmos.staking.v1beta1.MsgDelegate" --from= --gas=auto --gas-adjustment=1.5
+
+# Grant LEVEL_SUPER_ADMIN permission (can disable messages and authorize other accounts)
+ tx circuit authorize --level=SUPER_ADMIN --from= --gas=auto --gas-adjustment=1.5
+```
+
+### Disabling Message Processing (Trip)
+
+Disable specific message types to prevent their execution (requires authorization):
+
+```bash
+# Disable a single message type
+ tx circuit trip --type-urls="/cosmos.bank.v1beta1.MsgSend" --from= --gas=auto --gas-adjustment=1.5
+
+# Disable multiple message types
+ tx circuit trip --type-urls="/cosmos.bank.v1beta1.MsgSend,/cosmos.staking.v1beta1.MsgDelegate" --from= --gas=auto --gas-adjustment=1.5
+
+# Disable all message types (emergency measure)
+ tx circuit trip --from= --gas=auto --gas-adjustment=1.5
+```
+
+### Re-enabling Message Processing (Reset)
+
+Re-enable previously disabled message types (requires authorization):
+
+```bash
+# Re-enable a single message type
+ tx circuit reset --type-urls="/cosmos.bank.v1beta1.MsgSend" --from= --gas=auto --gas-adjustment=1.5
+
+# Re-enable multiple message types
+ tx circuit reset --type-urls="/cosmos.bank.v1beta1.MsgSend,/cosmos.staking.v1beta1.MsgDelegate" --from= --gas=auto --gas-adjustment=1.5
+
+# Re-enable all disabled message types
+ tx circuit reset --from= --gas=auto --gas-adjustment=1.5
+```
+
+### Usage in Emergency Scenarios
+
+In case of a critical vulnerability in a specific message type:
+
+1. Quickly disable the vulnerable message type:
+ ```bash
+ tx circuit trip --type-urls="/cosmos.vulnerable.v1beta1.MsgVulnerable" --from= --gas=auto --gas-adjustment=1.5
+ ```
+
+2. After a fix is deployed, re-enable the message type:
+ ```bash
+ tx circuit reset --type-urls="/cosmos.vulnerable.v1beta1.MsgVulnerable" --from= --gas=auto --gas-adjustment=1.5
+ ```
+
+This allows chains to surgically disable problematic functionality without halting the entire chain, providing time for developers to implement and deploy fixes.
diff --git a/sdk/next/build/modules/consensus/README.mdx b/sdk/next/build/modules/consensus/README.mdx
new file mode 100644
index 000000000..d79cb8b0a
--- /dev/null
+++ b/sdk/next/build/modules/consensus/README.mdx
@@ -0,0 +1,6 @@
+---
+title: 'x/consensus'
+description: Functionality to modify CometBFT's ABCI consensus params.
+---
+
+Functionality to modify CometBFT's ABCI consensus params.
diff --git a/sdk/next/build/modules/crisis/README.mdx b/sdk/next/build/modules/crisis/README.mdx
new file mode 100644
index 000000000..e67c8a4f7
--- /dev/null
+++ b/sdk/next/build/modules/crisis/README.mdx
@@ -0,0 +1,114 @@
+---
+title: 'x/crisis'
+description: >-
+ NOTE: x/crisis has been moved to contrib and is no longer actively maintained.
+---
+
+
+This module has been moved to [contrib/x/crisis](https://github.com/cosmos/cosmos-sdk/tree/main/contrib/x/crisis) and is no longer actively maintained. For continued maintenance, users should fork the module.
+
+
+## Overview
+
+The crisis module halts the blockchain under the circumstance that a blockchain
+invariant is broken. Invariants can be registered with the application during the
+application initialization process.
+
+## Contents
+
+* [State](#state)
+* [Messages](#messages)
+* [Events](#events)
+* [Parameters](#parameters)
+* [Client](#client)
+ * [CLI](#cli)
+
+## State
+
+### ConstantFee
+
+Due to the anticipated large gas cost requirement to verify an invariant (and
+potential to exceed the maximum allowable block gas limit) a constant fee is
+used instead of the standard gas consumption method. The constant fee is
+intended to be larger than the anticipated gas cost of running the invariant
+with the standard gas consumption method.
+
+The ConstantFee param is stored in the module params state with the prefix of `0x01`,
+it can be updated with governance or the address with authority.
+
+* Params: `mint/params -> legacy_amino(sdk.Coin)`
+
+## Messages
+
+In this section we describe the processing of the crisis messages and the
+corresponding updates to the state.
+
+### MsgVerifyInvariant
+
+Blockchain invariants can be checked using the `MsgVerifyInvariant` message.
+
+```protobuf
+// Reference: https://github.com/cosmos/cosmos-sdk/blob/v0.47.0-rc1/proto/cosmos/crisis/v1beta1/tx.proto#L26-L42
+```
+
+This message is expected to fail if:
+
+* the sender does not have enough coins for the constant fee
+* the invariant route is not registered
+
+This message checks the invariant provided, and if the invariant is broken it
+panics, halting the blockchain. If the invariant is broken, the constant fee is
+never deducted as the transaction is never committed to a block (equivalent to
+being refunded). However, if the invariant is not broken, the constant fee will
+not be refunded.
+
+## Events
+
+The crisis module emits the following events:
+
+### Handlers
+
+#### MsgVerifyInvariant
+
+| Type | Attribute Key | Attribute Value |
+| --------- | ------------- | ----------------- |
+| invariant | route | `{invariantRoute}` |
+| message | module | crisis |
+| message | action | verify\_invariant |
+| message | sender | `{senderAddress}` |
+
+## Parameters
+
+The crisis module contains the following parameters:
+
+| Key | Type | Example |
+| ----------- | ------------- | --------------------------------- |
+| ConstantFee | object (coin) | `{"denom":"uatom","amount":"1000"}` |
+
+## Client
+
+### CLI
+
+A user can query and interact with the `crisis` module using the CLI.
+
+#### Transactions
+
+The `tx` commands allow users to interact with the `crisis` module.
+
+```bash
+simd tx crisis --help
+```
+
+##### invariant-broken
+
+The `invariant-broken` command submits proof when an invariant was broken to halt the chain
+
+```bash
+simd tx crisis invariant-broken [module-name] [invariant-route] [flags]
+```
+
+Example:
+
+```bash
+simd tx crisis invariant-broken bank total-supply --from=[keyname or address]
+```
diff --git a/sdk/next/build/modules/distribution/README.mdx b/sdk/next/build/modules/distribution/README.mdx
new file mode 100644
index 000000000..f2a206e57
--- /dev/null
+++ b/sdk/next/build/modules/distribution/README.mdx
@@ -0,0 +1,1236 @@
+---
+title: 'x/distribution'
+---
+
+## Overview
+
+This *simple* distribution mechanism describes a functional way to passively
+distribute rewards between validators and delegators. Note that this mechanism does
+not distribute funds in as precisely as active reward distribution mechanisms and
+will therefore be upgraded in the future.
+
+The mechanism operates as follows. Collected rewards are pooled globally and
+divided out passively to validators and delegators. Each validator has the
+opportunity to charge commission to the delegators on the rewards collected on
+behalf of the delegators. Fees are collected directly into a global reward pool
+and validator proposer-reward pool. Due to the nature of passive accounting,
+whenever changes to parameters which affect the rate of reward distribution
+occurs, withdrawal of rewards must also occur.
+
+* Whenever withdrawing, one must withdraw the maximum amount they are entitled
+ to, leaving nothing in the pool.
+* Whenever bonding, unbonding, or re-delegating tokens to an existing account, a
+ full withdrawal of the rewards must occur (as the rules for lazy accounting
+ change).
+* Whenever a validator chooses to change the commission on rewards, all accumulated
+ commission rewards must be simultaneously withdrawn.
+
+The above scenarios are covered in `hooks.md`.
+
+The distribution mechanism outlined herein is used to lazily distribute the
+following rewards between validators and associated delegators:
+
+* multi-token fees to be socially distributed
+* inflated staked asset provisions
+* validator commission on all rewards earned by their delegators stake
+
+Fees are pooled within a global pool. The mechanisms used allow for validators
+and delegators to independently and lazily withdraw their rewards.
+
+## Shortcomings
+
+As a part of the lazy computations, each delegator holds an accumulation term
+specific to each validator which is used to estimate what their approximate
+fair portion of tokens held in the global fee pool is owed to them.
+
+```text
+entitlement = delegator-accumulation / all-delegators-accumulation
+```
+
+Under the circumstance that there was constant and equal flow of incoming
+reward tokens every block, this distribution mechanism would be equal to the
+active distribution (distribute individually to all delegators each block).
+However, this is unrealistic so deviations from the active distribution will
+occur based on fluctuations of incoming reward tokens as well as timing of
+reward withdrawal by other delegators.
+
+If you happen to know that incoming rewards are about to significantly increase,
+you are incentivized to not withdraw until after this event, increasing the
+worth of your existing *accum*. See [#2764](https://github.com/cosmos/cosmos-sdk/issues/2764)
+for further details.
+
+## Effect on Staking
+
+Charging commission on Atom provisions while also allowing for Atom-provisions
+to be auto-bonded (distributed directly to the validators bonded stake) is
+problematic within BPoS. Fundamentally, these two mechanisms are mutually
+exclusive. If both commission and auto-bonding mechanisms are simultaneously
+applied to the staking-token then the distribution of staking-tokens between
+any validator and its delegators will change with each block. This then
+necessitates a calculation for each delegation records for each block -
+which is considered computationally expensive.
+
+In conclusion, we can only have Atom commission and unbonded atoms
+provisions or bonded atom provisions with no Atom commission, and we elect to
+implement the former. Stakeholders wishing to rebond their provisions may elect
+to set up a script to periodically withdraw and rebond rewards.
+
+## Contents
+
+* [Concepts](#concepts)
+* [State](#state)
+ * [FeePool](#feepool)
+ * [Validator Distribution](#validator-distribution)
+ * [Delegation Distribution](#delegation-distribution)
+ * [Params](#params)
+* [Begin Block](#begin-block)
+* [Messages](#messages)
+* [Hooks](#hooks)
+* [Events](#events)
+* [Parameters](#parameters)
+* [Client](#client)
+ * [CLI](#cli)
+ * [gRPC](#grpc)
+
+## Concepts
+
+In Proof of Stake (PoS) blockchains, rewards gained from transaction fees are paid to validators. The fee distribution module fairly distributes the rewards to the validators' constituent delegators.
+
+Rewards are calculated per period. The period is updated each time a validator's delegation changes, for example, when the validator receives a new delegation.
+The rewards for a single validator can then be calculated by taking the total rewards for the period before the delegation started, minus the current total rewards.
+To learn more, see the [F1 Fee Distribution paper](https://github.com/cosmos/cosmos-sdk/tree/main/docs/spec/fee_distribution/f1_fee_distr.pdf).
+
+The commission to the validator is paid when the validator is removed or when the validator requests a withdrawal.
+The commission is calculated and incremented at every `BeginBlock` operation to update accumulated fee amounts.
+
+The rewards to a delegator are distributed when the delegation is changed or removed, or a withdrawal is requested.
+Before rewards are distributed, all slashes to the validator that occurred during the current delegation are applied.
+
+### Reference Counting in F1 Fee Distribution
+
+In F1 fee distribution, the rewards a delegator receives are calculated when their delegation is withdrawn. This calculation must read the terms of the summation of rewards divided by the share of tokens from the period which they ended when they delegated, and the final period that was created for the withdrawal.
+
+Additionally, as slashes change the amount of tokens a delegation will have (but we calculate this lazily,
+only when a delegator un-delegates), we must calculate rewards in separate periods before / after any slashes
+which occurred in between when a delegator delegated and when they withdrew their rewards. Thus slashes, like
+delegations, reference the period which was ended by the slash event.
+
+All stored historical rewards records for periods which are no longer referenced by any delegations
+or any slashes can thus be safely removed, as they will never be read (future delegations and future
+slashes will always reference future periods). This is implemented by tracking a `ReferenceCount`
+along with each historical reward storage entry. Each time a new object (delegation or slash)
+is created which might need to reference the historical record, the reference count is incremented.
+Each time one object which previously needed to reference the historical record is deleted, the reference
+count is decremented. If the reference count hits zero, the historical record is deleted.
+
+### External Community Pool Keepers
+
+An external pool community keeper is defined as:
+
+```go expandable
+// ExternalCommunityPoolKeeper is the interface that an external community pool module keeper must fulfill
+// for x/distribution to properly accept it as a community pool fund destination.
+type ExternalCommunityPoolKeeper interface {
+ // GetCommunityPoolModule gets the module name that funds should be sent to for the community pool.
+ // This is the address that x/distribution will send funds to for external management.
+ GetCommunityPoolModule()
+
+string
+ // FundCommunityPool allows an account to directly fund the community fund pool.
+ FundCommunityPool(ctx sdk.Context, amount sdk.Coins, senderAddr sdk.AccAddress)
+
+error
+ // DistributeFromCommunityPool distributes funds from the community pool module account to
+ // a receiver address.
+ DistributeFromCommunityPool(ctx sdk.Context, amount sdk.Coins, receiveAddr sdk.AccAddress)
+
+error
+}
+```
+
+By default, the distribution module will use a community pool implementation that is internal. An external community pool
+can be provided to the module which will have funds be diverted to it instead of the internal implementation. The reference
+external community pool maintained by the Cosmos SDK is [`x/protocolpool`](/sdk/v0.53/build/modules/protocolpool/README).
+
+## State
+
+### FeePool
+
+All globally tracked parameters for distribution are stored within
+`FeePool`. Rewards are collected and added to the reward pool and
+distributed to validators/delegators from here.
+
+Note that the reward pool holds decimal coins (`DecCoins`) to allow
+for fractions of coins to be received from operations like inflation.
+When coins are distributed from the pool they are truncated back to
+`sdk.Coins` which are non-decimal.
+
+* FeePool: `0x00 -> ProtocolBuffer(FeePool)`
+
+```go
+// coins with decimal
+type DecCoins []DecCoin
+
+type DecCoin struct {
+ Amount math.LegacyDec
+ Denom string
+}
+```
+
+```protobuf
+// Reference: https://github.com/cosmos/cosmos-sdk/blob/v0.47.0-rc1/proto/cosmos/distribution/v1beta1/distribution.proto#L116-L123
+```
+
+### Validator Distribution
+
+Validator distribution information for the relevant validator is updated each time:
+
+1. delegation amount to a validator is updated,
+2. any delegator withdraws from a validator, or
+3. the validator withdraws its commission.
+
+* ValidatorDistInfo: `0x02 | ValOperatorAddrLen (1 byte) | ValOperatorAddr -> ProtocolBuffer(validatorDistribution)`
+
+```go
+type ValidatorDistInfo struct {
+ OperatorAddress sdk.AccAddress
+ SelfBondRewards sdkmath.DecCoins
+ ValidatorCommission types.ValidatorAccumulatedCommission
+}
+```
+
+### Delegation Distribution
+
+Each delegation distribution only needs to record the height at which it last
+withdrew fees. Because a delegation must withdraw fees each time it's
+properties change (aka bonded tokens etc.) its properties will remain constant
+and the delegator's *accumulation* factor can be calculated passively knowing
+only the height of the last withdrawal and its current properties.
+
+* DelegationDistInfo: `0x02 | DelegatorAddrLen (1 byte) | DelegatorAddr | ValOperatorAddrLen (1 byte) | ValOperatorAddr -> ProtocolBuffer(delegatorDist)`
+
+```go
+type DelegationDistInfo struct {
+ WithdrawalHeight int64 // last time this delegation withdrew rewards
+}
+```
+
+### Params
+
+The distribution module stores its params in state with the prefix of `0x09`,
+it can be updated with governance or the address with authority.
+
+* Params: `0x09 | ProtocolBuffer(Params)`
+
+```protobuf
+// Reference: https://github.com/cosmos/cosmos-sdk/blob/v0.47.0-rc1/proto/cosmos/distribution/v1beta1/distribution.proto#L12-L42
+```
+
+## Begin Block
+
+At each `BeginBlock`, all fees received in the previous block are transferred to
+the distribution `ModuleAccount` account. When a delegator or validator
+withdraws their rewards, they are taken out of the `ModuleAccount`. During begin
+block, the different claims on the fees collected are updated as follows:
+
+* The reserve community tax is charged.
+* The remainder is distributed proportionally by voting power to all bonded validators
+
+### The Distribution Scheme
+
+See [params](#params) for description of parameters.
+
+Let `fees` be the total fees collected in the previous block, including
+inflationary rewards to the stake. All fees are collected in a specific module
+account during the block. During `BeginBlock`, they are sent to the
+`"distribution"` `ModuleAccount`. No other sending of tokens occurs. Instead, the
+rewards each account is entitled to are stored, and withdrawals can be triggered
+through the messages `FundCommunityPool`, `WithdrawValidatorCommission` and
+`WithdrawDelegatorReward`.
+
+#### Reward to the Community Pool
+
+The community pool gets `community_tax * fees`, plus any remaining dust after
+validators get their rewards that are always rounded down to the nearest
+integer value.
+
+#### Using an External Community Pool
+
+Starting with Cosmos SDK v0.53.0, an external community pool, such as `x/protocolpool`, can be used in place of the `x/distribution` managed community pool.
+
+Please view the warning in the next section before deciding to use an external community pool.
+
+```go expandable
+// ExternalCommunityPoolKeeper is the interface that an external community pool module keeper must fulfill
+// for x/distribution to properly accept it as a community pool fund destination.
+type ExternalCommunityPoolKeeper interface {
+ // GetCommunityPoolModule gets the module name that funds should be sent to for the community pool.
+ // This is the address that x/distribution will send funds to for external management.
+ GetCommunityPoolModule()
+
+string
+ // FundCommunityPool allows an account to directly fund the community fund pool.
+ FundCommunityPool(ctx sdk.Context, amount sdk.Coins, senderAddr sdk.AccAddress)
+
+error
+ // DistributeFromCommunityPool distributes funds from the community pool module account to
+ // a receiver address.
+ DistributeFromCommunityPool(ctx sdk.Context, amount sdk.Coins, receiveAddr sdk.AccAddress)
+
+error
+}
+```
+
+```go
+app.DistrKeeper = distrkeeper.NewKeeper(
+ appCodec,
+ runtime.NewKVStoreService(keys[distrtypes.StoreKey]),
+ app.AccountKeeper,
+ app.BankKeeper,
+ app.StakingKeeper,
+ authtypes.FeeCollectorName,
+ authtypes.NewModuleAddress(govtypes.ModuleName).String(),
+ distrkeeper.WithExternalCommunityPool(app.ProtocolPoolKeeper), // New option.
+)
+```
+
+#### External Community Pool Usage Warning
+
+When using an external community pool with `x/distribution`, the following handlers will return an error:
+
+**QueryService**
+
+* `CommunityPool`
+
+**MsgService**
+
+* `CommunityPoolSpend`
+* `FundCommunityPool`
+
+If you have services that rely on this functionality from `x/distribution`, please update them to use the `x/protocolpool` equivalents.
+
+#### Reward To the Validators
+
+The proposer receives no extra rewards. All fees are distributed among all the
+bonded validators, including the proposer, in proportion to their consensus power.
+
+```text
+powFrac = validator power / total bonded validator power
+voteMul = 1 - community_tax
+```
+
+All validators receive `fees * voteMul * powFrac`.
+
+#### Rewards to Delegators
+
+Each validator's rewards are distributed to its delegators. The validator also
+has a self-delegation that is treated like a regular delegation in
+distribution calculations.
+
+The validator sets a commission rate. The commission rate is flexible, but each
+validator sets a maximum rate and a maximum daily increase. These maximums cannot be exceeded and protect delegators from sudden increases of validator commission rates to prevent validators from taking all of the rewards.
+
+The outstanding rewards that the operator is entitled to are stored in
+`ValidatorAccumulatedCommission`, while the rewards the delegators are entitled
+to are stored in `ValidatorCurrentRewards`. The [F1 fee distribution scheme](#concepts) is used to calculate the rewards per delegator as they
+withdraw or update their delegation, and is thus not handled in `BeginBlock`.
+
+#### Example Distribution
+
+For this example distribution, the underlying consensus engine selects block proposers in
+proportion to their power relative to the entire bonded power.
+
+All validators are equally performant at including pre-commits in their proposed
+blocks. Then hold `(pre_commits included) / (total bonded validator power)`
+constant so that the amortized block reward for the validator is `( validator power / total bonded power) * (1 - community tax rate)` of
+the total rewards. Consequently, the reward for a single delegator is:
+
+```text
+(delegator proportion of the validator power / validator power) * (validator power / total bonded power)
+ * (1 - community tax rate) * (1 - validator commission rate)
+= (delegator proportion of the validator power / total bonded power) * (1 -
+community tax rate) * (1 - validator commission rate)
+```
+
+## Messages
+
+### MsgSetWithdrawAddress
+
+By default, the withdraw address is the delegator address. To change its withdraw address, a delegator must send a `MsgSetWithdrawAddress` message.
+Changing the withdraw address is possible only if the parameter `WithdrawAddrEnabled` is set to `true`.
+
+The withdraw address cannot be any of the module accounts. These accounts are blocked from being withdraw addresses by being added to the distribution keeper's `blockedAddrs` array at initialization.
+
+Response:
+
+```protobuf
+// Reference: https://github.com/cosmos/cosmos-sdk/blob/v0.47.0-rc1/proto/cosmos/distribution/v1beta1/tx.proto#L49-L60
+```
+
+```go
+func (k Keeper)
+
+SetWithdrawAddr(ctx context.Context, delegatorAddr sdk.AccAddress, withdrawAddr sdk.AccAddress)
+
+error
+ if k.blockedAddrs[withdrawAddr.String()] {
+ fail with "`{
+ withdrawAddr
+}` is not allowed to receive external funds"
+}
+ if !k.GetWithdrawAddrEnabled(ctx) {
+ fail with `ErrSetWithdrawAddrDisabled`
+}
+
+k.SetDelegatorWithdrawAddr(ctx, delegatorAddr, withdrawAddr)
+```
+
+### MsgWithdrawDelegatorReward
+
+A delegator can withdraw its rewards.
+Internally in the distribution module, this transaction simultaneously removes the previous delegation with associated rewards, the same as if the delegator simply started a new delegation of the same value.
+The rewards are sent immediately from the distribution `ModuleAccount` to the withdraw address.
+Any remainder (truncated decimals) are sent to the community pool.
+The starting height of the delegation is set to the current validator period, and the reference count for the previous period is decremented.
+The amount withdrawn is deducted from the `ValidatorOutstandingRewards` variable for the validator.
+
+In the F1 distribution, the total rewards are calculated per validator period, and a delegator receives a piece of those rewards in proportion to their stake in the validator.
+In basic F1, the total rewards that all the delegators are entitled to between to periods is calculated the following way.
+Let `R(X)` be the total accumulated rewards up to period `X` divided by the tokens staked at that time. The delegator allocation is `R(X) * delegator_stake`.
+Then the rewards for all the delegators for staking between periods `A` and `B` are `(R(B) - R(A)) * total stake`.
+However, these calculated rewards don't account for slashing.
+
+Taking the slashes into account requires iteration.
+Let `F(X)` be the fraction a validator is to be slashed for a slashing event that happened at period `X`.
+If the validator was slashed at periods `P1, ..., PN`, where `A < P1`, `PN < B`, the distribution module calculates the individual delegator's rewards, `T(A, B)`, as follows:
+
+```go
+stake := initial stake
+ rewards := 0
+ previous := A
+ for P in P1, ..., PN`:
+ rewards = (R(P) - previous) * stake
+ stake = stake * F(P)
+
+previous = P
+rewards = rewards + (R(B) - R(PN)) * stake
+```
+
+The historical rewards are calculated retroactively by playing back all the slashes and then attenuating the delegator's stake at each step.
+The final calculated stake is equivalent to the actual staked coins in the delegation with a margin of error due to rounding errors.
+
+Response:
+
+```protobuf
+// Reference: https://github.com/cosmos/cosmos-sdk/blob/v0.47.0-rc1/proto/cosmos/distribution/v1beta1/tx.proto#L66-L77
+```
+
+### WithdrawValidatorCommission
+
+The validator can send the WithdrawValidatorCommission message to withdraw their accumulated commission.
+The commission is calculated in every block during `BeginBlock`, so no iteration is required to withdraw.
+The amount withdrawn is deducted from the `ValidatorOutstandingRewards` variable for the validator.
+Only integer amounts can be sent. If the accumulated awards have decimals, the amount is truncated before the withdrawal is sent, and the remainder is left to be withdrawn later.
+
+### FundCommunityPool
+
+
+
+This handler will return an error if an `ExternalCommunityPool` is used.
+
+
+
+This message sends coins directly from the sender to the community pool.
+
+The transaction fails if the amount cannot be transferred from the sender to the distribution module account.
+
+```go expandable
+func (k Keeper)
+
+FundCommunityPool(ctx context.Context, amount sdk.Coins, sender sdk.AccAddress)
+
+error {
+ if err := k.bankKeeper.SendCoinsFromAccountToModule(ctx, sender, types.ModuleName, amount); err != nil {
+ return err
+}
+
+feePool, err := k.FeePool.Get(ctx)
+ if err != nil {
+ return err
+}
+
+feePool.CommunityPool = feePool.CommunityPool.Add(sdk.NewDecCoinsFromCoins(amount...)...)
+ if err := k.FeePool.Set(ctx, feePool); err != nil {
+ return err
+}
+
+return nil
+}
+```
+
+### Common distribution operations
+
+These operations take place during many different messages.
+
+#### Initialize delegation
+
+Each time a delegation is changed, the rewards are withdrawn and the delegation is reinitialized.
+Initializing a delegation increments the validator period and keeps track of the starting period of the delegation.
+
+```go expandable
+// initialize starting info for a new delegation
+func (k Keeper)
+
+initializeDelegation(ctx context.Context, val sdk.ValAddress, del sdk.AccAddress) {
+ // period has already been incremented - we want to store the period ended by this delegation action
+ previousPeriod := k.GetValidatorCurrentRewards(ctx, val).Period - 1
+
+ // increment reference count for the period we're going to track
+ k.incrementReferenceCount(ctx, val, previousPeriod)
+ validator := k.stakingKeeper.Validator(ctx, val)
+ delegation := k.stakingKeeper.Delegation(ctx, del, val)
+
+ // calculate delegation stake in tokens
+ // we don't store directly, so multiply delegation shares * (tokens per share)
+ // note: necessary to truncate so we don't allow withdrawing more rewards than owed
+ stake := validator.TokensFromSharesTruncated(delegation.GetShares())
+
+k.SetDelegatorStartingInfo(ctx, val, del, types.NewDelegatorStartingInfo(previousPeriod, stake, uint64(ctx.BlockHeight())))
+}
+```
+
+### MsgUpdateParams
+
+Distribution module params can be updated through `MsgUpdateParams`, which can be done using governance proposal and the signer will always be gov module account address.
+
+```protobuf
+// Reference: https://github.com/cosmos/cosmos-sdk/blob/v0.47.0-rc1/proto/cosmos/distribution/v1beta1/tx.proto#L133-L147
+```
+
+The message handling can fail if:
+
+* signer is not the gov module account address.
+
+## Hooks
+
+Available hooks that can be called by and from this module.
+
+### Create or modify delegation distribution
+
+* triggered-by: `staking.MsgDelegate`, `staking.MsgBeginRedelegate`, `staking.MsgUndelegate`
+
+#### Before
+
+* The delegation rewards are withdrawn to the withdraw address of the delegator.
+ The rewards include the current period and exclude the starting period.
+* The validator period is incremented.
+ The validator period is incremented because the validator's power and share distribution might have changed.
+* The reference count for the delegator's starting period is decremented.
+
+#### After
+
+The starting height of the delegation is set to the previous period.
+Because of the `Before`-hook, this period is the last period for which the delegator was rewarded.
+
+### Validator created
+
+* triggered-by: `staking.MsgCreateValidator`
+
+When a validator is created, the following validator variables are initialized:
+
+* Historical rewards
+* Current accumulated rewards
+* Accumulated commission
+* Total outstanding rewards
+* Period
+
+By default, all values are set to a `0`, except period, which is set to `1`.
+
+### Validator removed
+
+* triggered-by: `staking.RemoveValidator`
+
+Outstanding commission is sent to the validator's self-delegation withdrawal address.
+Remaining delegator rewards get sent to the community fee pool.
+
+Note: The validator gets removed only when it has no remaining delegations.
+At that time, all outstanding delegator rewards will have been withdrawn.
+Any remaining rewards are dust amounts.
+
+### Validator is slashed
+
+* triggered-by: `staking.Slash`
+* The current validator period reference count is incremented.
+ The reference count is incremented because the slash event has created a reference to it.
+* The validator period is incremented.
+* The slash event is stored for later use.
+ The slash event will be referenced when calculating delegator rewards.
+
+## Events
+
+The distribution module emits the following events:
+
+### BeginBlocker
+
+| Type | Attribute Key | Attribute Value |
+| ---------------- | ------------- | ------------------ |
+| proposer\_reward | validator | `{validatorAddress}` |
+| proposer\_reward | reward | `{proposerReward}` |
+| commission | amount | `{commissionAmount}` |
+| commission | validator | `{validatorAddress}` |
+| rewards | amount | `{rewardAmount}` |
+| rewards | validator | `{validatorAddress}` |
+
+### Handlers
+
+#### MsgSetWithdrawAddress
+
+| Type | Attribute Key | Attribute Value |
+| ---------------------- | ----------------- | ---------------------- |
+| set\_withdraw\_address | withdraw\_address | `{withdrawAddress}` |
+| message | module | distribution |
+| message | action | set\_withdraw\_address |
+| message | sender | `{senderAddress}` |
+
+#### MsgWithdrawDelegatorReward
+
+| Type | Attribute Key | Attribute Value |
+| ----------------- | ------------- | --------------------------- |
+| withdraw\_rewards | amount | `{rewardAmount}` |
+| withdraw\_rewards | validator | `{validatorAddress}` |
+| message | module | distribution |
+| message | action | withdraw\_delegator\_reward |
+| message | sender | `{senderAddress}` |
+
+#### MsgWithdrawValidatorCommission
+
+| Type | Attribute Key | Attribute Value |
+| -------------------- | ------------- | ------------------------------- |
+| withdraw\_commission | amount | `{commissionAmount}` |
+| message | module | distribution |
+| message | action | withdraw\_validator\_commission |
+| message | sender | `{senderAddress}` |
+
+## Parameters
+
+The distribution module contains the following parameters:
+
+| Key | Type | Example |
+| ------------------- | ------------ | --------------------------- |
+| communitytax | string (dec) | "0.020000000000000000" \[0] |
+| withdrawaddrenabled | bool | true |
+
+* \[0] `communitytax` must be positive and cannot exceed 1.00.
+* `baseproposerreward` and `bonusproposerreward` were parameters that are deprecated in v0.47 and are not used.
+
+
+The reserve pool is the pool of collected funds for use by governance taken via the `CommunityTax`.
+Currently with the Cosmos SDK, tokens collected by the CommunityTax are accounted for but unspendable.
+
+
+## Client
+
+## CLI
+
+A user can query and interact with the `distribution` module using the CLI.
+
+#### Query
+
+The `query` commands allow users to query `distribution` state.
+
+```shell
+simd query distribution --help
+```
+
+##### commission
+
+The `commission` command allows users to query validator commission rewards by address.
+
+```shell
+simd query distribution commission [address] [flags]
+```
+
+Example:
+
+```shell
+simd query distribution commission cosmosvaloper1...
+```
+
+Example Output:
+
+```yml
+commission:
+- amount: "1000000.000000000000000000"
+ denom: stake
+```
+
+##### community-pool
+
+The `community-pool` command allows users to query all coin balances within the community pool.
+
+```shell
+simd query distribution community-pool [flags]
+```
+
+Example:
+
+```shell
+simd query distribution community-pool
+```
+
+Example Output:
+
+```yml
+pool:
+- amount: "1000000.000000000000000000"
+ denom: stake
+```
+
+##### params
+
+The `params` command allows users to query the parameters of the `distribution` module.
+
+```shell
+simd query distribution params [flags]
+```
+
+Example:
+
+```shell
+simd query distribution params
+```
+
+Example Output:
+
+```yml
+base_proposer_reward: "0.000000000000000000"
+bonus_proposer_reward: "0.000000000000000000"
+community_tax: "0.020000000000000000"
+withdraw_addr_enabled: true
+```
+
+##### rewards
+
+The `rewards` command allows users to query delegator rewards. Users can optionally include the validator address to query rewards earned from a specific validator.
+
+```shell
+simd query distribution rewards [delegator-addr] [validator-addr] [flags]
+```
+
+Example:
+
+```shell
+simd query distribution rewards cosmos1...
+```
+
+Example Output:
+
+```yml
+rewards:
+- reward:
+ - amount: "1000000.000000000000000000"
+ denom: stake
+ validator_address: cosmosvaloper1..
+total:
+- amount: "1000000.000000000000000000"
+ denom: stake
+```
+
+##### slashes
+
+The `slashes` command allows users to query all slashes for a given block range.
+
+```shell
+simd query distribution slashes [validator] [start-height] [end-height] [flags]
+```
+
+Example:
+
+```shell
+simd query distribution slashes cosmosvaloper1... 1 1000
+```
+
+Example Output:
+
+```yml
+pagination:
+ next_key: null
+ total: "0"
+slashes:
+- validator_period: 20,
+ fraction: "0.009999999999999999"
+```
+
+##### validator-outstanding-rewards
+
+The `validator-outstanding-rewards` command allows users to query all outstanding (un-withdrawn) rewards for a validator and all their delegations.
+
+```shell
+simd query distribution validator-outstanding-rewards [validator] [flags]
+```
+
+Example:
+
+```shell
+simd query distribution validator-outstanding-rewards cosmosvaloper1...
+```
+
+Example Output:
+
+```yml
+rewards:
+- amount: "1000000.000000000000000000"
+ denom: stake
+```
+
+##### validator-distribution-info
+
+The `validator-distribution-info` command allows users to query validator commission and self-delegation rewards for validator.
+
+```shell expandable
+simd query distribution validator-distribution-info cosmosvaloper1...
+```
+
+Example Output:
+
+```yml
+commission:
+- amount: "100000.000000000000000000"
+ denom: stake
+operator_address: cosmosvaloper1...
+self_bond_rewards:
+- amount: "100000.000000000000000000"
+ denom: stake
+```
+
+#### Transactions
+
+The `tx` commands allow users to interact with the `distribution` module.
+
+```shell
+simd tx distribution --help
+```
+
+##### fund-community-pool
+
+The `fund-community-pool` command allows users to send funds to the community pool.
+
+```shell
+simd tx distribution fund-community-pool [amount] [flags]
+```
+
+Example:
+
+```shell
+simd tx distribution fund-community-pool 100stake --from cosmos1...
+```
+
+##### set-withdraw-addr
+
+The `set-withdraw-addr` command allows users to set the withdraw address for rewards associated with a delegator address.
+
+```shell
+simd tx distribution set-withdraw-addr [withdraw-addr] [flags]
+```
+
+Example:
+
+```shell
+simd tx distribution set-withdraw-addr cosmos1... --from cosmos1...
+```
+
+##### withdraw-all-rewards
+
+The `withdraw-all-rewards` command allows users to withdraw all rewards for a delegator.
+
+```shell
+simd tx distribution withdraw-all-rewards [flags]
+```
+
+Example:
+
+```shell
+simd tx distribution withdraw-all-rewards --from cosmos1...
+```
+
+##### withdraw-rewards
+
+The `withdraw-rewards` command allows users to withdraw all rewards from a given delegation address,
+and optionally withdraw validator commission if the delegation address given is a validator operator and the user proves the `--commission` flag.
+
+```shell
+simd tx distribution withdraw-rewards [validator-addr] [flags]
+```
+
+Example:
+
+```shell
+simd tx distribution withdraw-rewards cosmosvaloper1... --from cosmos1... --commission
+```
+
+### gRPC
+
+A user can query the `distribution` module using gRPC endpoints.
+
+#### Params
+
+The `Params` endpoint allows users to query parameters of the `distribution` module.
+
+Example:
+
+```shell
+grpcurl -plaintext \
+ localhost:9090 \
+ cosmos.distribution.v1beta1.Query/Params
+```
+
+Example Output:
+
+```json
+{
+ "params": {
+ "communityTax": "20000000000000000",
+ "baseProposerReward": "00000000000000000",
+ "bonusProposerReward": "00000000000000000",
+ "withdrawAddrEnabled": true
+ }
+}
+```
+
+#### ValidatorDistributionInfo
+
+The `ValidatorDistributionInfo` queries validator commission and self-delegation rewards for validator.
+
+Example:
+
+```shell
+grpcurl -plaintext \
+ -d '{"validator_address":"cosmosvalop1..."}' \
+ localhost:9090 \
+ cosmos.distribution.v1beta1.Query/ValidatorDistributionInfo
+```
+
+Example Output:
+
+```json
+{
+ "commission": {
+ "commission": [
+ {
+ "denom": "stake",
+ "amount": "1000000000000000"
+ }
+ ]
+ },
+ "self_bond_rewards": [
+ {
+ "denom": "stake",
+ "amount": "1000000000000000"
+ }
+ ],
+ "validator_address": "cosmosvalop1..."
+}
+```
+
+#### ValidatorOutstandingRewards
+
+The `ValidatorOutstandingRewards` endpoint allows users to query rewards of a validator address.
+
+Example:
+
+```shell
+grpcurl -plaintext \
+ -d '{"validator_address":"cosmosvalop1.."}' \
+ localhost:9090 \
+ cosmos.distribution.v1beta1.Query/ValidatorOutstandingRewards
+```
+
+Example Output:
+
+```json
+{
+ "rewards": {
+ "rewards": [
+ {
+ "denom": "stake",
+ "amount": "1000000000000000"
+ }
+ ]
+ }
+}
+```
+
+#### ValidatorCommission
+
+The `ValidatorCommission` endpoint allows users to query accumulated commission for a validator.
+
+Example:
+
+```shell
+grpcurl -plaintext \
+ -d '{"validator_address":"cosmosvalop1.."}' \
+ localhost:9090 \
+ cosmos.distribution.v1beta1.Query/ValidatorCommission
+```
+
+Example Output:
+
+```json
+{
+ "commission": {
+ "commission": [
+ {
+ "denom": "stake",
+ "amount": "1000000000000000"
+ }
+ ]
+ }
+}
+```
+
+#### ValidatorSlashes
+
+The `ValidatorSlashes` endpoint allows users to query slash events of a validator.
+
+Example:
+
+```shell
+grpcurl -plaintext \
+ -d '{"validator_address":"cosmosvalop1.."}' \
+ localhost:9090 \
+ cosmos.distribution.v1beta1.Query/ValidatorSlashes
+```
+
+Example Output:
+
+```json
+{
+ "slashes": [
+ {
+ "validator_period": "20",
+ "fraction": "0.009999999999999999"
+ }
+ ],
+ "pagination": {
+ "total": "1"
+ }
+}
+```
+
+#### DelegationRewards
+
+The `DelegationRewards` endpoint allows users to query the total rewards accrued by a delegation.
+
+Example:
+
+```shell
+grpcurl -plaintext \
+ -d '{"delegator_address":"cosmos1...","validator_address":"cosmosvalop1..."}' \
+ localhost:9090 \
+ cosmos.distribution.v1beta1.Query/DelegationRewards
+```
+
+Example Output:
+
+```json
+{
+ "rewards": [
+ {
+ "denom": "stake",
+ "amount": "1000000000000000"
+ }
+ ]
+}
+```
+
+#### DelegationTotalRewards
+
+The `DelegationTotalRewards` endpoint allows users to query the total rewards accrued by each validator.
+
+Example:
+
+```shell
+grpcurl -plaintext \
+ -d '{"delegator_address":"cosmos1..."}' \
+ localhost:9090 \
+ cosmos.distribution.v1beta1.Query/DelegationTotalRewards
+```
+
+Example Output:
+
+```json
+{
+ "rewards": [
+ {
+ "validatorAddress": "cosmosvaloper1...",
+ "reward": [
+ {
+ "denom": "stake",
+ "amount": "1000000000000000"
+ }
+ ]
+ }
+ ],
+ "total": [
+ {
+ "denom": "stake",
+ "amount": "1000000000000000"
+ }
+ ]
+}
+```
+
+#### DelegatorValidators
+
+The `DelegatorValidators` endpoint allows users to query all validators for given delegator.
+
+Example:
+
+```shell
+grpcurl -plaintext \
+ -d '{"delegator_address":"cosmos1..."}' \
+ localhost:9090 \
+ cosmos.distribution.v1beta1.Query/DelegatorValidators
+```
+
+Example Output:
+
+```json
+{
+ "validators": ["cosmosvaloper1..."]
+}
+```
+
+#### DelegatorWithdrawAddress
+
+The `DelegatorWithdrawAddress` endpoint allows users to query the withdraw address of a delegator.
+
+Example:
+
+```shell
+grpcurl -plaintext \
+ -d '{"delegator_address":"cosmos1..."}' \
+ localhost:9090 \
+ cosmos.distribution.v1beta1.Query/DelegatorWithdrawAddress
+```
+
+Example Output:
+
+```json
+{
+ "withdrawAddress": "cosmos1..."
+}
+```
+
+#### CommunityPool
+
+The `CommunityPool` endpoint allows users to query the community pool coins.
+
+Example:
+
+```shell
+grpcurl -plaintext \
+ localhost:9090 \
+ cosmos.distribution.v1beta1.Query/CommunityPool
+```
+
+Example Output:
+
+```json
+{
+ "pool": [
+ {
+ "denom": "stake",
+ "amount": "1000000000000000000"
+ }
+ ]
+}
+```
+
+
+The following query endpoints were added in v0.54 to provide more granular access to distribution state.
+
+
+#### ValidatorHistoricalRewards
+
+The `ValidatorHistoricalRewards` endpoint allows users to query historical rewards for a validator at a specific period.
+
+Example:
+
+```shell
+grpcurl -plaintext \
+ -d '{"validator_address":"cosmosvaloper1...","period":"5"}' \
+ localhost:9090 \
+ cosmos.distribution.v1beta1.Query/ValidatorHistoricalRewards
+```
+
+Example Output:
+
+```json
+{
+ "rewards": {
+ "cumulativeRewardRatio": [
+ {
+ "denom": "stake",
+ "amount": "1000000000000000"
+ }
+ ],
+ "referenceCount": 1
+ }
+}
+```
+
+#### ValidatorCurrentRewards
+
+The `ValidatorCurrentRewards` endpoint allows users to query current rewards for a validator.
+
+Example:
+
+```shell
+grpcurl -plaintext \
+ -d '{"validator_address":"cosmosvaloper1..."}' \
+ localhost:9090 \
+ cosmos.distribution.v1beta1.Query/ValidatorCurrentRewards
+```
+
+Example Output:
+
+```json
+{
+ "rewards": {
+ "rewards": [
+ {
+ "denom": "stake",
+ "amount": "1000000000000000"
+ }
+ ],
+ "period": "5"
+ }
+}
+```
+
+#### DelegatorStartingInfo
+
+The `DelegatorStartingInfo` endpoint allows users to query the starting info for a delegator.
+
+Example:
+
+```shell
+grpcurl -plaintext \
+ -d '{"delegator_address":"cosmos1...","validator_address":"cosmosvaloper1..."}' \
+ localhost:9090 \
+ cosmos.distribution.v1beta1.Query/DelegatorStartingInfo
+```
+
+Example Output:
+
+```json
+{
+ "startingInfo": {
+ "previousPeriod": "4",
+ "stake": "1000000000000000000",
+ "height": "12345"
+ }
+}
+```
+````
diff --git a/sdk/next/build/modules/epochs/README.mdx b/sdk/next/build/modules/epochs/README.mdx
new file mode 100644
index 000000000..bc43a1c6a
--- /dev/null
+++ b/sdk/next/build/modules/epochs/README.mdx
@@ -0,0 +1,179 @@
+---
+title: 'x/epochs'
+---
+
+## Abstract
+
+Often in the SDK, we would like to run certain code every-so often. The
+purpose of `epochs` module is to allow other modules to set that they
+would like to be signaled once every period. So another module can
+specify it wants to execute code once a week, starting at UTC-time = x.
+`epochs` creates a generalized epoch interface to other modules so that
+they can easily be signaled upon such events.
+
+## Contents
+
+1. **[Concept](#concepts)**
+2. **[State](#state)**
+3. **[Events](#events)**
+4. **[Keeper](#keepers)**
+5. **[Hooks](#hooks)**
+6. **[Queries](#queries)**
+
+## Concepts
+
+The epochs module defines on-chain timers that execute at fixed time intervals.
+Other SDK modules can then register logic to be executed at the timer ticks.
+We refer to the period in between two timer ticks as an "epoch".
+
+Every timer has a unique identifier.
+Every epoch will have a start time, and an end time, where `end time = start time + timer interval`.
+On mainnet, we only utilize one identifier, with a time interval of `one day`.
+
+The timer will tick at the first block whose block time is greater than the timer end time,
+and set the start as the prior timer end time. (Notably, it's not set to the block time!)
+This means that if the chain has been down for a while, you will get one timer tick per block,
+until the timer has caught up.
+
+## State
+
+The Epochs module keeps a single `EpochInfo` per identifier.
+This contains the current state of the timer with the corresponding identifier.
+Its fields are modified at every timer tick.
+EpochInfos are initialized as part of genesis initialization or upgrade logic,
+and are only modified on begin blockers.
+
+## Events
+
+The `epochs` module emits the following events:
+
+### BeginBlocker
+
+| Type | Attribute Key | Attribute Value |
+| ------------ | ------------- | --------------- |
+| epoch\_start | epoch\_number | `{epoch\_number}` |
+| epoch\_start | start\_time | `{start\_time}` |
+
+### EndBlocker
+
+| Type | Attribute Key | Attribute Value |
+| ---------- | ------------- | --------------- |
+| epoch\_end | epoch\_number | `{epoch\_number}` |
+
+## Keepers
+
+### Keeper functions
+
+Epochs keeper module provides utility functions to manage epochs.
+
+## Hooks
+
+```go
+// the first block whose timestamp is after the duration is counted as the end of the epoch
+ AfterEpochEnd(ctx sdk.Context, epochIdentifier string, epochNumber int64)
+ // new epoch is next block of epoch end block
+ BeforeEpochStart(ctx sdk.Context, epochIdentifier string, epochNumber int64)
+```
+
+### How modules receive hooks
+
+On hook receiver function of other modules, they need to filter
+`epochIdentifier` and only do executions for only specific
+epochIdentifier. Filtering epochIdentifier could be in `Params` of other
+modules so that they can be modified by governance.
+
+This is the standard dev UX of this:
+
+```golang
+func (k MyModuleKeeper)
+
+AfterEpochEnd(ctx sdk.Context, epochIdentifier string, epochNumber int64) {
+ params := k.GetParams(ctx)
+ if epochIdentifier == params.DistrEpochIdentifier {
+ // my logic
+}
+}
+```
+
+### Panic isolation
+
+If a given epoch hook panics, its state update is reverted, but we keep
+proceeding through the remaining hooks. This allows more advanced epoch
+logic to be used, without concern over state machine halting, or halting
+subsequent modules.
+
+This does mean that if there is behavior you expect from a prior epoch
+hook, and that epoch hook reverted, your hook may also have an issue. So
+do keep in mind "what if a prior hook didn't get executed" in the safety
+checks you consider for a new epoch hook.
+
+## Queries
+
+The Epochs module provides the following queries to check the module's state.
+
+```protobuf
+service Query {
+ // EpochInfos provide running epochInfos
+ rpc EpochInfos(QueryEpochsInfoRequest) returns (QueryEpochsInfoResponse) {}
+ // CurrentEpoch provide current epoch of specified identifier
+ rpc CurrentEpoch(QueryCurrentEpochRequest) returns (QueryCurrentEpochResponse) {}
+}
+```
+
+### Epoch Infos
+
+Query the currently running epochInfos
+
+```sh
+ query epochs epoch-infos
+```
+
+
+**Example**
+
+An example output:
+
+```sh expandable
+epochs:
+- current_epoch: "183"
+ current_epoch_start_height: "2438409"
+ current_epoch_start_time: "2021-12-18T17:16:09.898160996Z"
+ duration: 86400s
+ epoch_counting_started: true
+ identifier: day
+ start_time: "2021-06-18T17:00:00Z"
+- current_epoch: "26"
+ current_epoch_start_height: "2424854"
+ current_epoch_start_time: "2021-12-17T17:02:07.229632445Z"
+ duration: 604800s
+ epoch_counting_started: true
+ identifier: week
+ start_time: "2021-06-18T17:00:00Z"
+```
+
+
+
+### Current Epoch
+
+Query the current epoch by the specified identifier
+
+```sh
+ query epochs current-epoch [identifier]
+```
+
+
+**Example**
+
+Query the current `day` epoch:
+
+```sh
+ query epochs current-epoch day
+```
+
+Which in this example outputs:
+
+```sh
+current_epoch: "183"
+```
+
+
diff --git a/sdk/next/build/modules/evidence/README.mdx b/sdk/next/build/modules/evidence/README.mdx
new file mode 100644
index 000000000..f48be3589
--- /dev/null
+++ b/sdk/next/build/modules/evidence/README.mdx
@@ -0,0 +1,461 @@
+---
+title: 'x/evidence'
+description: Concepts State Messages Events Parameters BeginBlock Client CLI REST gRPC
+---
+
+* [Concepts](#concepts)
+* [State](#state)
+* [Messages](#messages)
+* [Events](#events)
+* [Parameters](#parameters)
+* [BeginBlock](#beginblock)
+* [Client](#client)
+ * [CLI](#cli)
+ * [REST](#rest)
+ * [gRPC](#grpc)
+
+## Abstract
+
+`x/evidence` is an implementation of a Cosmos SDK module, per [ADR 009](https://github.com/cosmos/cosmos-sdk/blob/main/docs/architecture/adr-009-evidence-module.md),
+that allows for the submission and handling of arbitrary evidence of misbehavior such
+as equivocation and counterfactual signing.
+
+The evidence module differs from standard evidence handling which typically expects the
+underlying consensus engine, e.g. CometBFT, to automatically submit evidence when
+it is discovered by allowing clients and foreign chains to submit more complex evidence
+directly.
+
+All concrete evidence types must implement the `Evidence` interface contract. Submitted
+`Evidence` is first routed through the evidence module's `Router` in which it attempts
+to find a corresponding registered `Handler` for that specific `Evidence` type.
+Each `Evidence` type must have a `Handler` registered with the evidence module's
+keeper in order for it to be successfully routed and executed.
+
+Each corresponding handler must also fulfill the `Handler` interface contract. The
+`Handler` for a given `Evidence` type can perform any arbitrary state transitions
+such as slashing, jailing, and tombstoning.
+
+## Concepts
+
+### Evidence
+
+Any concrete type of evidence submitted to the `x/evidence` module must fulfill the
+`Evidence` contract outlined below. Not all concrete types of evidence will fulfill
+this contract in the same way and some data may be entirely irrelevant to certain
+types of evidence. An additional `ValidatorEvidence`, which extends `Evidence`,
+has also been created to define a contract for evidence against malicious validators.
+
+```go expandable
+// Evidence defines the contract which concrete evidence types of misbehavior
+// must implement.
+type Evidence interface {
+ proto.Message
+
+ Route()
+
+string
+ String()
+
+string
+ Hash() []byte
+ ValidateBasic()
+
+error
+
+ // Height at which the infraction occurred
+ GetHeight()
+
+int64
+}
+
+// ValidatorEvidence extends Evidence interface to define contract
+// for evidence against malicious validators
+type ValidatorEvidence interface {
+ Evidence
+
+ // The consensus address of the malicious validator at time of infraction
+ GetConsensusAddress()
+
+sdk.ConsAddress
+
+ // The total power of the malicious validator at time of infraction
+ GetValidatorPower()
+
+int64
+
+ // The total validator set power at time of infraction
+ GetTotalPower()
+
+int64
+}
+```
+
+### Registration & Handling
+
+The `x/evidence` module must first know about all types of evidence it is expected
+to handle. This is accomplished by registering the `Route` method in the `Evidence`
+contract with what is known as a `Router` (defined below). The `Router` accepts
+`Evidence` and attempts to find the corresponding `Handler` for the `Evidence`
+via the `Route` method.
+
+```go
+type Router interface {
+ AddRoute(r string, h Handler)
+
+Router
+ HasRoute(r string)
+
+bool
+ GetRoute(path string)
+
+Handler
+ Seal()
+
+Sealed()
+
+bool
+}
+```
+
+The `Handler` (defined below) is responsible for executing the entirety of the
+business logic for handling `Evidence`. This typically includes validating the
+evidence, both stateless checks via `ValidateBasic` and stateful checks via any
+keepers provided to the `Handler`. In addition, the `Handler` may also perform
+capabilities such as slashing and jailing a validator. All `Evidence` handled
+by the `Handler` should be persisted.
+
+```go
+// Handler defines an agnostic Evidence handler. The handler is responsible
+// for executing all corresponding business logic necessary for verifying the
+// evidence as valid. In addition, the Handler may execute any necessary
+// slashing and potential jailing.
+type Handler func(context.Context, Evidence)
+
+error
+```
+
+## State
+
+Currently the `x/evidence` module only stores valid submitted `Evidence` in state.
+The evidence state is also stored and exported in the `x/evidence` module's `GenesisState`.
+
+```protobuf
+// GenesisState defines the evidence module's genesis state.
+message GenesisState {
+ // evidence defines all the evidence at genesis.
+ repeated google.protobuf.Any evidence = 1;
+}
+
+```
+
+All `Evidence` is retrieved and stored via a prefix `KVStore` using prefix `0x00` (`KeyPrefixEvidence`).
+
+## Messages
+
+### MsgSubmitEvidence
+
+Evidence is submitted through a `MsgSubmitEvidence` message:
+
+```protobuf
+// MsgSubmitEvidence represents a message that supports submitting arbitrary
+// Evidence of misbehavior such as equivocation or counterfactual signing.
+message MsgSubmitEvidence {
+ string submitter = 1;
+ google.protobuf.Any evidence = 2;
+}
+```
+
+Note, the `Evidence` of a `MsgSubmitEvidence` message must have a corresponding
+`Handler` registered with the `x/evidence` module's `Router` in order to be processed
+and routed correctly.
+
+Given the `Evidence` is registered with a corresponding `Handler`, it is processed
+as follows:
+
+```go expandable
+func SubmitEvidence(ctx Context, evidence Evidence)
+
+error {
+ if _, err := GetEvidence(ctx, evidence.Hash()); err == nil {
+ return errorsmod.Wrap(types.ErrEvidenceExists, strings.ToUpper(hex.EncodeToString(evidence.Hash())))
+}
+ if !router.HasRoute(evidence.Route()) {
+ return errorsmod.Wrap(types.ErrNoEvidenceHandlerExists, evidence.Route())
+}
+ handler := router.GetRoute(evidence.Route())
+ if err := handler(ctx, evidence); err != nil {
+ return errorsmod.Wrap(types.ErrInvalidEvidence, err.Error())
+}
+
+ctx.EventManager().EmitEvent(
+ sdk.NewEvent(
+ types.EventTypeSubmitEvidence,
+ sdk.NewAttribute(types.AttributeKeyEvidenceHash, strings.ToUpper(hex.EncodeToString(evidence.Hash()))),
+ ),
+ )
+
+SetEvidence(ctx, evidence)
+
+return nil
+}
+```
+
+First, there must not already exist valid submitted `Evidence` of the exact same
+type. Secondly, the `Evidence` is routed to the `Handler` and executed. Finally,
+if there is no error in handling the `Evidence`, an event is emitted and it is persisted to state.
+
+## Events
+
+The `x/evidence` module emits the following events:
+
+### Handlers
+
+#### MsgSubmitEvidence
+
+| Type | Attribute Key | Attribute Value |
+| ---------------- | -------------- | ---------------- |
+| submit\_evidence | evidence\_hash | `{evidenceHash}` |
+| message | module | evidence |
+| message | sender | `{senderAddress}` |
+| message | action | submit\_evidence |
+
+## Parameters
+
+The evidence module does not contain any parameters.
+
+## BeginBlock
+
+### Evidence Handling
+
+CometBFT blocks can include
+[Evidence](https://github.com/cometbft/cometbft/blob/main/spec/abci/abci%2B%2B_basic_concepts.md#evidence) that indicates if a validator committed malicious behavior. The relevant information is forwarded to the application as ABCI Evidence in `abci.RequestBeginBlock` so that the validator can be punished accordingly.
+
+#### Equivocation
+
+The Cosmos SDK handles two types of evidence inside the ABCI `BeginBlock`:
+
+* `DuplicateVoteEvidence`,
+* `LightClientAttackEvidence`.
+
+The evidence module handles these two evidence types the same way. First, the Cosmos SDK converts the CometBFT concrete evidence type to an SDK `Evidence` interface using `Equivocation` as the concrete type.
+
+```protobuf
+// Reference: https://github.com/cosmos/cosmos-sdk/tree/release/v0.50.x/proto/cosmos/evidence/v1beta1/evidence.proto#L12-L32
+```
+
+For some `Equivocation` submitted in `block` to be valid, it must satisfy:
+
+`Evidence.Timestamp >= block.Timestamp - MaxEvidenceAge`
+
+Where:
+
+* `Evidence.Timestamp` is the timestamp in the block at height `Evidence.Height`
+* `block.Timestamp` is the current block timestamp.
+
+If valid `Equivocation` evidence is included in a block, the validator's stake is
+reduced (slashed) by `SlashFractionDoubleSign` as defined by the `x/slashing` module
+of what their stake was when the infraction occurred, rather than when the evidence was discovered.
+We want to "follow the stake", i.e., the stake that contributed to the infraction
+should be slashed, even if it has since been redelegated or started unbonding.
+
+In addition, the validator is permanently jailed and tombstoned to make it impossible for that
+validator to ever re-enter the validator set.
+
+The `Equivocation` evidence is handled as follows:
+
+```go
+// Reference: https://github.com/cosmos/cosmos-sdk/tree/release/v0.50.x/x/evidence/keeper/infraction.go#L26-L140
+```
+
+**Note:** The slashing, jailing, and tombstoning calls are delegated through the `x/slashing` module
+that emits informative events and finally delegates calls to the `x/staking` module. See documentation
+on slashing and jailing in [State Transitions](/sdk/v0.53/build/modules/staking/README#state-transitions).
+
+## Client
+
+### CLI
+
+A user can query and interact with the `evidence` module using the CLI.
+
+#### Query
+
+The `query` command allows users to query `evidence` state.
+
+```bash
+simd query evidence --help
+```
+
+#### evidence
+
+The `evidence` command allows users to list all evidence or evidence by hash.
+
+Usage:
+
+```bash
+simd query evidence evidence [flags]
+```
+
+To query evidence by hash
+
+Example:
+
+```bash
+simd query evidence evidence "DF0C23E8634E480F84B9D5674A7CDC9816466DEC28A3358F73260F68D28D7660"
+```
+
+Example Output:
+
+```bash
+evidence:
+ consensus_address: cosmosvalcons1ntk8eualewuprz0gamh8hnvcem2nrcdsgz563h
+ height: 11
+ power: 100
+ time: "2021-10-20T16:08:38.194017624Z"
+```
+
+To get all evidence
+
+Example:
+
+```bash
+simd query evidence list
+```
+
+Example Output:
+
+```bash
+evidence:
+ consensus_address: cosmosvalcons1ntk8eualewuprz0gamh8hnvcem2nrcdsgz563h
+ height: 11
+ power: 100
+ time: "2021-10-20T16:08:38.194017624Z"
+pagination:
+ next_key: null
+ total: "1"
+```
+
+### REST
+
+A user can query the `evidence` module using REST endpoints.
+
+#### Evidence
+
+Get evidence by hash
+
+```bash
+/cosmos/evidence/v1beta1/evidence/{hash}
+```
+
+Example:
+
+```bash
+curl -X GET "http://localhost:1317/cosmos/evidence/v1beta1/evidence/DF0C23E8634E480F84B9D5674A7CDC9816466DEC28A3358F73260F68D28D7660"
+```
+
+Example Output:
+
+```bash
+{
+ "evidence": {
+ "consensus_address": "cosmosvalcons1ntk8eualewuprz0gamh8hnvcem2nrcdsgz563h",
+ "height": "11",
+ "power": "100",
+ "time": "2021-10-20T16:08:38.194017624Z"
+ }
+}
+```
+
+#### All evidence
+
+Get all evidence
+
+```bash
+/cosmos/evidence/v1beta1/evidence
+```
+
+Example:
+
+```bash
+curl -X GET "http://localhost:1317/cosmos/evidence/v1beta1/evidence"
+```
+
+Example Output:
+
+```bash expandable
+{
+ "evidence": [
+ {
+ "consensus_address": "cosmosvalcons1ntk8eualewuprz0gamh8hnvcem2nrcdsgz563h",
+ "height": "11",
+ "power": "100",
+ "time": "2021-10-20T16:08:38.194017624Z"
+ }
+ ],
+ "pagination": {
+ "total": "1"
+ }
+}
+```
+
+### gRPC
+
+A user can query the `evidence` module using gRPC endpoints.
+
+#### Evidence
+
+Get evidence by hash
+
+```bash
+cosmos.evidence.v1beta1.Query/Evidence
+```
+
+Example:
+
+```bash
+grpcurl -plaintext -d '{"evidence_hash":"DF0C23E8634E480F84B9D5674A7CDC9816466DEC28A3358F73260F68D28D7660"}' localhost:9090 cosmos.evidence.v1beta1.Query/Evidence
+```
+
+Example Output:
+
+```bash
+{
+ "evidence": {
+ "consensus_address": "cosmosvalcons1ntk8eualewuprz0gamh8hnvcem2nrcdsgz563h",
+ "height": "11",
+ "power": "100",
+ "time": "2021-10-20T16:08:38.194017624Z"
+ }
+}
+```
+
+#### All evidence
+
+Get all evidence
+
+```bash
+cosmos.evidence.v1beta1.Query/AllEvidence
+```
+
+Example:
+
+```bash
+grpcurl -plaintext localhost:9090 cosmos.evidence.v1beta1.Query/AllEvidence
+```
+
+Example Output:
+
+```bash expandable
+{
+ "evidence": [
+ {
+ "consensus_address": "cosmosvalcons1ntk8eualewuprz0gamh8hnvcem2nrcdsgz563h",
+ "height": "11",
+ "power": "100",
+ "time": "2021-10-20T16:08:38.194017624Z"
+ }
+ ],
+ "pagination": {
+ "total": "1"
+ }
+}
+```
diff --git a/sdk/next/build/modules/feegrant/README.mdx b/sdk/next/build/modules/feegrant/README.mdx
new file mode 100644
index 000000000..3016cfe5f
--- /dev/null
+++ b/sdk/next/build/modules/feegrant/README.mdx
@@ -0,0 +1,3655 @@
+---
+title: 'x/feegrant'
+description: >-
+ This document specifies the fee grant module. For the full ADR, please see Fee
+ Grant ADR-029.
+---
+
+## Abstract
+
+This document specifies the fee grant module. For the full ADR, please see [Fee Grant ADR-029](https://github.com/cosmos/cosmos-sdk/blob/main/docs/architecture/adr-029-fee-grant-module.md).
+
+This module allows accounts to grant fee allowances and to use fees from their accounts. Grantees can execute any transaction without the need to maintain sufficient fees.
+
+## Contents
+
+* [Concepts](#concepts)
+* [State](#state)
+ * [FeeAllowance](#feeallowance)
+ * [FeeAllowanceQueue](#feeallowancequeue)
+* [Messages](#messages)
+ * [Msg/GrantAllowance](#msggrantallowance)
+ * [Msg/RevokeAllowance](#msgrevokeallowance)
+* [Events](#events)
+* [Msg Server](#msg-server)
+ * [MsgGrantAllowance](#msggrantallowance-1)
+ * [MsgRevokeAllowance](#msgrevokeallowance-1)
+ * [Exec fee allowance](#exec-fee-allowance)
+* [Client](#client)
+ * [CLI](#cli)
+ * [gRPC](#grpc)
+
+## Concepts
+
+### Grant
+
+`Grant` is stored in the KVStore to record a grant with full context. Every grant will contain `granter`, `grantee` and what kind of `allowance` is granted. `granter` is an account address who is giving permission to `grantee` (the beneficiary account address) to pay for some or all of `grantee`'s transaction fees. `allowance` defines what kind of fee allowance (`BasicAllowance` or `PeriodicAllowance`, see below) is granted to `grantee`. `allowance` accepts an interface which implements `FeeAllowanceI`, encoded as `Any` type. There can be only one existing fee grant allowed for a `grantee` and `granter`, self grants are not allowed.
+
+```protobuf
+// Reference: https://github.com/cosmos/cosmos-sdk/blob/v0.47.0-rc1/proto/cosmos/feegrant/v1beta1/feegrant.proto#L83-L93
+```
+
+`FeeAllowanceI` looks like:
+
+```go expandable
+package feegrant
+
+import (
+
+ "time"
+
+ sdk "github.com/cosmos/cosmos-sdk/types"
+)
+
+// FeeAllowance implementations are tied to a given fee delegator and delegatee,
+// and are used to enforce fee grant limits.
+type FeeAllowanceI interface {
+ // Accept can use fee payment requested as well as timestamp of the current block
+ // to determine whether or not to process this. This is checked in
+ // Keeper.UseGrantedFees and the return values should match how it is handled there.
+ //
+ // If it returns an error, the fee payment is rejected, otherwise it is accepted.
+ // The FeeAllowance implementation is expected to update its internal state
+ // and will be saved again after an acceptance.
+ //
+ // If remove is true (regardless of the error), the FeeAllowance will be deleted from storage
+ // (eg. when it is used up). (See call to RevokeAllowance in Keeper.UseGrantedFees)
+
+Accept(ctx sdk.Context, fee sdk.Coins, msgs []sdk.Msg) (remove bool, err error)
+
+ // ValidateBasic should evaluate this FeeAllowance for internal consistency.
+ // Don't allow negative amounts, or negative periods for example.
+ ValidateBasic()
+
+error
+
+ // ExpiresAt returns the expiry time of the allowance.
+ ExpiresAt() (*time.Time, error)
+}
+```
+
+### Fee Allowance types
+
+There are two types of fee allowances present at the moment:
+
+* `BasicAllowance`
+* `PeriodicAllowance`
+* `AllowedMsgAllowance`
+
+### BasicAllowance
+
+`BasicAllowance` is permission for `grantee` to use fee from a `granter`'s account. If any of the `spend_limit` or `expiration` reaches its limit, the grant will be removed from the state.
+
+```protobuf
+// Reference: https://github.com/cosmos/cosmos-sdk/blob/v0.47.0-rc1/proto/cosmos/feegrant/v1beta1/feegrant.proto#L15-L28
+```
+
+* `spend_limit` is the limit of coins that are allowed to be used from the `granter` account. If it is empty, it assumes there's no spend limit, `grantee` can use any number of available coins from `granter` account address before the expiration.
+
+* `expiration` specifies an optional time when this allowance expires. If the value is left empty, there is no expiry for the grant.
+
+* When a grant is created with empty values for `spend_limit` and `expiration`, it is still a valid grant. It won't restrict the `grantee` to use any number of coins from `granter` and it won't have any expiration. The only way to restrict the `grantee` is by revoking the grant.
+
+### PeriodicAllowance
+
+`PeriodicAllowance` is a repeating fee allowance for the mentioned period, we can mention when the grant can expire as well as when a period can reset. We can also define the maximum number of coins that can be used in a mentioned period of time.
+
+```protobuf
+// Reference: https://github.com/cosmos/cosmos-sdk/blob/v0.47.0-rc1/proto/cosmos/feegrant/v1beta1/feegrant.proto#L34-L68
+```
+
+* `basic` is the instance of `BasicAllowance` which is optional for periodic fee allowance. If empty, the grant will have no `expiration` and no `spend_limit`.
+
+* `period` is the specific period of time, after each period passes, `period_can_spend` will be reset.
+
+* `period_spend_limit` specifies the maximum number of coins that can be spent in the period.
+
+* `period_can_spend` is the number of coins left to be spent before the period\_reset time.
+
+* `period_reset` keeps track of when a next period reset should happen.
+
+### AllowedMsgAllowance
+
+`AllowedMsgAllowance` is a fee allowance, it can be any of `BasicFeeAllowance`, `PeriodicAllowance` but restricted only to the allowed messages mentioned by the granter.
+
+```protobuf
+// Reference: https://github.com/cosmos/cosmos-sdk/blob/v0.47.0-rc1/proto/cosmos/feegrant/v1beta1/feegrant.proto#L70-L81
+```
+
+* `allowance` is either `BasicAllowance` or `PeriodicAllowance`.
+
+* `allowed_messages` is array of messages allowed to execute the given allowance.
+
+### FeeGranter flag
+
+`feegrant` module introduces a `FeeGranter` flag for CLI for the sake of executing transactions with fee granter. When this flag is set, `clientCtx` will append the granter account address for transactions generated through CLI.
+
+```go expandable
+package client
+
+import (
+
+ "crypto/tls"
+ "fmt"
+ "strings"
+ "github.com/pkg/errors"
+ "github.com/spf13/cobra"
+ "github.com/spf13/pflag"
+ "github.com/tendermint/tendermint/libs/cli"
+ "google.golang.org/grpc"
+ "google.golang.org/grpc/credentials"
+ "google.golang.org/grpc/credentials/insecure"
+ "github.com/cosmos/cosmos-sdk/client/flags"
+ "github.com/cosmos/cosmos-sdk/crypto/keyring"
+ sdk "github.com/cosmos/cosmos-sdk/types"
+)
+
+// ClientContextKey defines the context key used to retrieve a client.Context from
+// a command's Context.
+const ClientContextKey = sdk.ContextKey("client.context")
+
+// SetCmdClientContextHandler is to be used in a command pre-hook execution to
+// read flags that populate a Context and sets that to the command's Context.
+func SetCmdClientContextHandler(clientCtx Context, cmd *cobra.Command) (err error) {
+ clientCtx, err = ReadPersistentCommandFlags(clientCtx, cmd.Flags())
+ if err != nil {
+ return err
+}
+
+return SetCmdClientContext(cmd, clientCtx)
+}
+
+// ValidateCmd returns unknown command error or Help display if help flag set
+func ValidateCmd(cmd *cobra.Command, args []string)
+
+error {
+ var unknownCmd string
+ var skipNext bool
+ for _, arg := range args {
+ // search for help flag
+ if arg == "--help" || arg == "-h" {
+ return cmd.Help()
+}
+
+ // check if the current arg is a flag
+ switch {
+ case len(arg) > 0 && (arg[0] == '-'):
+ // the next arg should be skipped if the current arg is a
+ // flag and does not use "=" to assign the flag's value
+ if !strings.Contains(arg, "=") {
+ skipNext = true
+}
+
+else {
+ skipNext = false
+}
+ case skipNext:
+ // skip current arg
+ skipNext = false
+ case unknownCmd == "":
+ // unknown command found
+ // continue searching for help flag
+ unknownCmd = arg
+}
+
+}
+
+ // return the help screen if no unknown command is found
+ if unknownCmd != "" {
+ err := fmt.Sprintf("unknown command \"%s\" for \"%s\"", unknownCmd, cmd.CalledAs())
+
+ // build suggestions for unknown argument
+ if suggestions := cmd.SuggestionsFor(unknownCmd); len(suggestions) > 0 {
+ err += "\n\nDid you mean this?\n"
+ for _, s := range suggestions {
+ err += fmt.Sprintf("\t%v\n", s)
+}
+
+}
+
+return errors.New(err)
+}
+
+return cmd.Help()
+}
+
+// ReadPersistentCommandFlags returns a Context with fields set for "persistent"
+// or common flags that do not necessarily change with context.
+//
+// Note, the provided clientCtx may have field pre-populated. The following order
+// of precedence occurs:
+//
+// - client.Context field not pre-populated & flag not set: uses default flag value
+// - client.Context field not pre-populated & flag set: uses set flag value
+// - client.Context field pre-populated & flag not set: uses pre-populated value
+// - client.Context field pre-populated & flag set: uses set flag value
+func ReadPersistentCommandFlags(clientCtx Context, flagSet *pflag.FlagSet) (Context, error) {
+ if clientCtx.OutputFormat == "" || flagSet.Changed(cli.OutputFlag) {
+ output, _ := flagSet.GetString(cli.OutputFlag)
+
+clientCtx = clientCtx.WithOutputFormat(output)
+}
+ if clientCtx.HomeDir == "" || flagSet.Changed(flags.FlagHome) {
+ homeDir, _ := flagSet.GetString(flags.FlagHome)
+
+clientCtx = clientCtx.WithHomeDir(homeDir)
+}
+ if !clientCtx.Simulate || flagSet.Changed(flags.FlagDryRun) {
+ dryRun, _ := flagSet.GetBool(flags.FlagDryRun)
+
+clientCtx = clientCtx.WithSimulation(dryRun)
+}
+ if clientCtx.KeyringDir == "" || flagSet.Changed(flags.FlagKeyringDir) {
+ keyringDir, _ := flagSet.GetString(flags.FlagKeyringDir)
+
+ // The keyring directory is optional and falls back to the home directory
+ // if omitted.
+ if keyringDir == "" {
+ keyringDir = clientCtx.HomeDir
+}
+
+clientCtx = clientCtx.WithKeyringDir(keyringDir)
+}
+ if clientCtx.ChainID == "" || flagSet.Changed(flags.FlagChainID) {
+ chainID, _ := flagSet.GetString(flags.FlagChainID)
+
+clientCtx = clientCtx.WithChainID(chainID)
+}
+ if clientCtx.Keyring == nil || flagSet.Changed(flags.FlagKeyringBackend) {
+ keyringBackend, _ := flagSet.GetString(flags.FlagKeyringBackend)
+ if keyringBackend != "" {
+ kr, err := NewKeyringFromBackend(clientCtx, keyringBackend)
+ if err != nil {
+ return clientCtx, err
+}
+
+clientCtx = clientCtx.WithKeyring(kr)
+}
+
+}
+ if clientCtx.Client == nil || flagSet.Changed(flags.FlagNode) {
+ rpcURI, _ := flagSet.GetString(flags.FlagNode)
+ if rpcURI != "" {
+ clientCtx = clientCtx.WithNodeURI(rpcURI)
+
+client, err := NewClientFromNode(rpcURI)
+ if err != nil {
+ return clientCtx, err
+}
+
+clientCtx = clientCtx.WithClient(client)
+}
+
+}
+ if clientCtx.GRPCClient == nil || flagSet.Changed(flags.FlagGRPC) {
+ grpcURI, _ := flagSet.GetString(flags.FlagGRPC)
+ if grpcURI != "" {
+ var dialOpts []grpc.DialOption
+
+ useInsecure, _ := flagSet.GetBool(flags.FlagGRPCInsecure)
+ if useInsecure {
+ dialOpts = append(dialOpts, grpc.WithTransportCredentials(insecure.NewCredentials()))
+}
+
+else {
+ dialOpts = append(dialOpts, grpc.WithTransportCredentials(credentials.NewTLS(&tls.Config{
+ MinVersion: tls.VersionTLS12,
+})))
+}
+
+grpcClient, err := grpc.Dial(grpcURI, dialOpts...)
+ if err != nil {
+ return Context{
+}, err
+}
+
+clientCtx = clientCtx.WithGRPCClient(grpcClient)
+}
+
+}
+
+return clientCtx, nil
+}
+
+// readQueryCommandFlags returns an updated Context with fields set based on flags
+// defined in AddQueryFlagsToCmd. An error is returned if any flag query fails.
+//
+// Note, the provided clientCtx may have field pre-populated. The following order
+// of precedence occurs:
+//
+// - client.Context field not pre-populated & flag not set: uses default flag value
+// - client.Context field not pre-populated & flag set: uses set flag value
+// - client.Context field pre-populated & flag not set: uses pre-populated value
+// - client.Context field pre-populated & flag set: uses set flag value
+func readQueryCommandFlags(clientCtx Context, flagSet *pflag.FlagSet) (Context, error) {
+ if clientCtx.Height == 0 || flagSet.Changed(flags.FlagHeight) {
+ height, _ := flagSet.GetInt64(flags.FlagHeight)
+
+clientCtx = clientCtx.WithHeight(height)
+}
+ if !clientCtx.UseLedger || flagSet.Changed(flags.FlagUseLedger) {
+ useLedger, _ := flagSet.GetBool(flags.FlagUseLedger)
+
+clientCtx = clientCtx.WithUseLedger(useLedger)
+}
+
+return ReadPersistentCommandFlags(clientCtx, flagSet)
+}
+
+// readTxCommandFlags returns an updated Context with fields set based on flags
+// defined in AddTxFlagsToCmd. An error is returned if any flag query fails.
+//
+// Note, the provided clientCtx may have field pre-populated. The following order
+// of precedence occurs:
+//
+// - client.Context field not pre-populated & flag not set: uses default flag value
+// - client.Context field not pre-populated & flag set: uses set flag value
+// - client.Context field pre-populated & flag not set: uses pre-populated value
+// - client.Context field pre-populated & flag set: uses set flag value
+func readTxCommandFlags(clientCtx Context, flagSet *pflag.FlagSet) (Context, error) {
+ clientCtx, err := ReadPersistentCommandFlags(clientCtx, flagSet)
+ if err != nil {
+ return clientCtx, err
+}
+ if !clientCtx.GenerateOnly || flagSet.Changed(flags.FlagGenerateOnly) {
+ genOnly, _ := flagSet.GetBool(flags.FlagGenerateOnly)
+
+clientCtx = clientCtx.WithGenerateOnly(genOnly)
+}
+ if !clientCtx.Offline || flagSet.Changed(flags.FlagOffline) {
+ offline, _ := flagSet.GetBool(flags.FlagOffline)
+
+clientCtx = clientCtx.WithOffline(offline)
+}
+ if !clientCtx.UseLedger || flagSet.Changed(flags.FlagUseLedger) {
+ useLedger, _ := flagSet.GetBool(flags.FlagUseLedger)
+
+clientCtx = clientCtx.WithUseLedger(useLedger)
+}
+ if clientCtx.BroadcastMode == "" || flagSet.Changed(flags.FlagBroadcastMode) {
+ bMode, _ := flagSet.GetString(flags.FlagBroadcastMode)
+
+clientCtx = clientCtx.WithBroadcastMode(bMode)
+}
+ if !clientCtx.SkipConfirm || flagSet.Changed(flags.FlagSkipConfirmation) {
+ skipConfirm, _ := flagSet.GetBool(flags.FlagSkipConfirmation)
+
+clientCtx = clientCtx.WithSkipConfirmation(skipConfirm)
+}
+ if clientCtx.SignModeStr == "" || flagSet.Changed(flags.FlagSignMode) {
+ signModeStr, _ := flagSet.GetString(flags.FlagSignMode)
+
+clientCtx = clientCtx.WithSignModeStr(signModeStr)
+}
+ if clientCtx.FeePayer == nil || flagSet.Changed(flags.FlagFeePayer) {
+ payer, _ := flagSet.GetString(flags.FlagFeePayer)
+ if payer != "" {
+ payerAcc, err := sdk.AccAddressFromBech32(payer)
+ if err != nil {
+ return clientCtx, err
+}
+
+clientCtx = clientCtx.WithFeePayerAddress(payerAcc)
+}
+
+}
+ if clientCtx.FeeGranter == nil || flagSet.Changed(flags.FlagFeeGranter) {
+ granter, _ := flagSet.GetString(flags.FlagFeeGranter)
+ if granter != "" {
+ granterAcc, err := sdk.AccAddressFromBech32(granter)
+ if err != nil {
+ return clientCtx, err
+}
+
+clientCtx = clientCtx.WithFeeGranterAddress(granterAcc)
+}
+
+}
+ if clientCtx.From == "" || flagSet.Changed(flags.FlagFrom) {
+ from, _ := flagSet.GetString(flags.FlagFrom)
+
+fromAddr, fromName, keyType, err := GetFromFields(clientCtx, clientCtx.Keyring, from)
+ if err != nil {
+ return clientCtx, err
+}
+
+clientCtx = clientCtx.WithFrom(from).WithFromAddress(fromAddr).WithFromName(fromName)
+
+ // If the `from` signer account is a ledger key, we need to use
+ // SIGN_MODE_AMINO_JSON, because ledger doesn't support proto yet.
+ // ref: https://github.com/cosmos/cosmos-sdk/issues/8109
+ if keyType == keyring.TypeLedger && clientCtx.SignModeStr != flags.SignModeLegacyAminoJSON && !clientCtx.LedgerHasProtobuf {
+ fmt.Println("Default sign-mode 'direct' not supported by Ledger, using sign-mode 'amino-json'.")
+
+clientCtx = clientCtx.WithSignModeStr(flags.SignModeLegacyAminoJSON)
+}
+
+}
+ if !clientCtx.IsAux || flagSet.Changed(flags.FlagAux) {
+ isAux, _ := flagSet.GetBool(flags.FlagAux)
+
+clientCtx = clientCtx.WithAux(isAux)
+ if isAux {
+ // If the user didn't explicitly set an --output flag, use JSON by
+ // default.
+ if clientCtx.OutputFormat == "" || !flagSet.Changed(cli.OutputFlag) {
+ clientCtx = clientCtx.WithOutputFormat("json")
+}
+
+ // If the user didn't explicitly set a --sign-mode flag, use
+ // DIRECT_AUX by default.
+ if clientCtx.SignModeStr == "" || !flagSet.Changed(flags.FlagSignMode) {
+ clientCtx = clientCtx.WithSignModeStr(flags.SignModeDirectAux)
+}
+
+}
+
+}
+
+return clientCtx, nil
+}
+
+// GetClientQueryContext returns a Context from a command with fields set based on flags
+// defined in AddQueryFlagsToCmd. An error is returned if any flag query fails.
+//
+// - client.Context field not pre-populated & flag not set: uses default flag value
+// - client.Context field not pre-populated & flag set: uses set flag value
+// - client.Context field pre-populated & flag not set: uses pre-populated value
+// - client.Context field pre-populated & flag set: uses set flag value
+func GetClientQueryContext(cmd *cobra.Command) (Context, error) {
+ ctx := GetClientContextFromCmd(cmd)
+
+return readQueryCommandFlags(ctx, cmd.Flags())
+}
+
+// GetClientTxContext returns a Context from a command with fields set based on flags
+// defined in AddTxFlagsToCmd. An error is returned if any flag query fails.
+//
+// - client.Context field not pre-populated & flag not set: uses default flag value
+// - client.Context field not pre-populated & flag set: uses set flag value
+// - client.Context field pre-populated & flag not set: uses pre-populated value
+// - client.Context field pre-populated & flag set: uses set flag value
+func GetClientTxContext(cmd *cobra.Command) (Context, error) {
+ ctx := GetClientContextFromCmd(cmd)
+
+return readTxCommandFlags(ctx, cmd.Flags())
+}
+
+// GetClientContextFromCmd returns a Context from a command or an empty Context
+// if it has not been set.
+func GetClientContextFromCmd(cmd *cobra.Command)
+
+Context {
+ if v := cmd.Context().Value(ClientContextKey); v != nil {
+ clientCtxPtr := v.(*Context)
+
+return *clientCtxPtr
+}
+
+return Context{
+}
+}
+
+// SetCmdClientContext sets a command's Context value to the provided argument.
+func SetCmdClientContext(cmd *cobra.Command, clientCtx Context)
+
+error {
+ v := cmd.Context().Value(ClientContextKey)
+ if v == nil {
+ return errors.New("client context not set")
+}
+ clientCtxPtr := v.(*Context)
+ *clientCtxPtr = clientCtx
+
+ return nil
+}
+```
+
+```go expandable
+package tx
+
+import (
+
+ "bufio"
+ "context"
+ "encoding/json"
+ "errors"
+ "fmt"
+ "os"
+
+ gogogrpc "github.com/cosmos/gogoproto/grpc"
+ "github.com/spf13/pflag"
+ "github.com/cosmos/cosmos-sdk/client"
+ "github.com/cosmos/cosmos-sdk/client/input"
+ cryptotypes "github.com/cosmos/cosmos-sdk/crypto/types"
+ sdk "github.com/cosmos/cosmos-sdk/types"
+ sdkerrors "github.com/cosmos/cosmos-sdk/types/errors"
+ "github.com/cosmos/cosmos-sdk/types/tx"
+ "github.com/cosmos/cosmos-sdk/types/tx/signing"
+ authsigning "github.com/cosmos/cosmos-sdk/x/auth/signing"
+)
+
+// GenerateOrBroadcastTxCLI will either generate and print and unsigned transaction
+// or sign it and broadcast it returning an error upon failure.
+func GenerateOrBroadcastTxCLI(clientCtx client.Context, flagSet *pflag.FlagSet, msgs ...sdk.Msg)
+
+error {
+ txf := NewFactoryCLI(clientCtx, flagSet)
+
+return GenerateOrBroadcastTxWithFactory(clientCtx, txf, msgs...)
+}
+
+// GenerateOrBroadcastTxWithFactory will either generate and print and unsigned transaction
+// or sign it and broadcast it returning an error upon failure.
+func GenerateOrBroadcastTxWithFactory(clientCtx client.Context, txf Factory, msgs ...sdk.Msg)
+
+error {
+ // Validate all msgs before generating or broadcasting the tx.
+ // We were calling ValidateBasic separately in each CLI handler before.
+ // Right now, we're factorizing that call inside this function.
+ // ref: https://github.com/cosmos/cosmos-sdk/pull/9236#discussion_r623803504
+ for _, msg := range msgs {
+ if err := msg.ValidateBasic(); err != nil {
+ return err
+}
+
+}
+
+ // If the --aux flag is set, we simply generate and print the AuxSignerData.
+ if clientCtx.IsAux {
+ auxSignerData, err := makeAuxSignerData(clientCtx, txf, msgs...)
+ if err != nil {
+ return err
+}
+
+return clientCtx.PrintProto(&auxSignerData)
+}
+ if clientCtx.GenerateOnly {
+ return txf.PrintUnsignedTx(clientCtx, msgs...)
+}
+
+return BroadcastTx(clientCtx, txf, msgs...)
+}
+
+// BroadcastTx attempts to generate, sign and broadcast a transaction with the
+// given set of messages. It will also simulate gas requirements if necessary.
+// It will return an error upon failure.
+func BroadcastTx(clientCtx client.Context, txf Factory, msgs ...sdk.Msg)
+
+error {
+ txf, err := txf.Prepare(clientCtx)
+ if err != nil {
+ return err
+}
+ if txf.SimulateAndExecute() || clientCtx.Simulate {
+ _, adjusted, err := CalculateGas(clientCtx, txf, msgs...)
+ if err != nil {
+ return err
+}
+
+txf = txf.WithGas(adjusted)
+ _, _ = fmt.Fprintf(os.Stderr, "%s\n", GasEstimateResponse{
+ GasEstimate: txf.Gas()
+})
+}
+ if clientCtx.Simulate {
+ return nil
+}
+
+tx, err := txf.BuildUnsignedTx(msgs...)
+ if err != nil {
+ return err
+}
+ if !clientCtx.SkipConfirm {
+ txBytes, err := clientCtx.TxConfig.TxJSONEncoder()(tx.GetTx())
+ if err != nil {
+ return err
+}
+ if err := clientCtx.PrintRaw(json.RawMessage(txBytes)); err != nil {
+ _, _ = fmt.Fprintf(os.Stderr, "%s\n", txBytes)
+}
+ buf := bufio.NewReader(os.Stdin)
+
+ok, err := input.GetConfirmation("confirm transaction before signing and broadcasting", buf, os.Stderr)
+ if err != nil || !ok {
+ _, _ = fmt.Fprintf(os.Stderr, "%s\n", "cancelled transaction")
+
+return err
+}
+
+}
+
+err = Sign(txf, clientCtx.GetFromName(), tx, true)
+ if err != nil {
+ return err
+}
+
+txBytes, err := clientCtx.TxConfig.TxEncoder()(tx.GetTx())
+ if err != nil {
+ return err
+}
+
+ // broadcast to a Tendermint node
+ res, err := clientCtx.BroadcastTx(txBytes)
+ if err != nil {
+ return err
+}
+
+return clientCtx.PrintProto(res)
+}
+
+// CalculateGas simulates the execution of a transaction and returns the
+// simulation response obtained by the query and the adjusted gas amount.
+func CalculateGas(
+ clientCtx gogogrpc.ClientConn, txf Factory, msgs ...sdk.Msg,
+) (*tx.SimulateResponse, uint64, error) {
+ txBytes, err := txf.BuildSimTx(msgs...)
+ if err != nil {
+ return nil, 0, err
+}
+ txSvcClient := tx.NewServiceClient(clientCtx)
+
+simRes, err := txSvcClient.Simulate(context.Background(), &tx.SimulateRequest{
+ TxBytes: txBytes,
+})
+ if err != nil {
+ return nil, 0, err
+}
+
+return simRes, uint64(txf.GasAdjustment() * float64(simRes.GasInfo.GasUsed)), nil
+}
+
+// SignWithPrivKey signs a given tx with the given private key, and returns the
+// corresponding SignatureV2 if the signing is successful.
+func SignWithPrivKey(
+ signMode signing.SignMode, signerData authsigning.SignerData,
+ txBuilder client.TxBuilder, priv cryptotypes.PrivKey, txConfig client.TxConfig,
+ accSeq uint64,
+) (signing.SignatureV2, error) {
+ var sigV2 signing.SignatureV2
+
+ // Generate the bytes to be signed.
+ signBytes, err := txConfig.SignModeHandler().GetSignBytes(signMode, signerData, txBuilder.GetTx())
+ if err != nil {
+ return sigV2, err
+}
+
+ // Sign those bytes
+ signature, err := priv.Sign(signBytes)
+ if err != nil {
+ return sigV2, err
+}
+
+ // Construct the SignatureV2 struct
+ sigData := signing.SingleSignatureData{
+ SignMode: signMode,
+ Signature: signature,
+}
+
+sigV2 = signing.SignatureV2{
+ PubKey: priv.PubKey(),
+ Data: &sigData,
+ Sequence: accSeq,
+}
+
+return sigV2, nil
+}
+
+// countDirectSigners counts the number of DIRECT signers in a signature data.
+func countDirectSigners(data signing.SignatureData)
+
+int {
+ switch data := data.(type) {
+ case *signing.SingleSignatureData:
+ if data.SignMode == signing.SignMode_SIGN_MODE_DIRECT {
+ return 1
+}
+
+return 0
+ case *signing.MultiSignatureData:
+ directSigners := 0
+ for _, d := range data.Signatures {
+ directSigners += countDirectSigners(d)
+}
+
+return directSigners
+ default:
+ panic("unreachable case")
+}
+}
+
+// checkMultipleSigners checks that there can be maximum one DIRECT signer in
+// a tx.
+func checkMultipleSigners(tx authsigning.Tx)
+
+error {
+ directSigners := 0
+ sigsV2, err := tx.GetSignaturesV2()
+ if err != nil {
+ return err
+}
+ for _, sig := range sigsV2 {
+ directSigners += countDirectSigners(sig.Data)
+ if directSigners > 1 {
+ return sdkerrors.ErrNotSupported.Wrap("txs signed with CLI can have maximum 1 DIRECT signer")
+}
+
+}
+
+return nil
+}
+
+// Sign signs a given tx with a named key. The bytes signed over are canconical.
+// The resulting signature will be added to the transaction builder overwriting the previous
+// ones if overwrite=true (otherwise, the signature will be appended).
+// Signing a transaction with mutltiple signers in the DIRECT mode is not supprted and will
+// return an error.
+// An error is returned upon failure.
+func Sign(txf Factory, name string, txBuilder client.TxBuilder, overwriteSig bool)
+
+error {
+ if txf.keybase == nil {
+ return errors.New("keybase must be set prior to signing a transaction")
+}
+ signMode := txf.signMode
+ if signMode == signing.SignMode_SIGN_MODE_UNSPECIFIED {
+ // use the SignModeHandler's default mode if unspecified
+ signMode = txf.txConfig.SignModeHandler().DefaultMode()
+}
+
+k, err := txf.keybase.Key(name)
+ if err != nil {
+ return err
+}
+
+pubKey, err := k.GetPubKey()
+ if err != nil {
+ return err
+}
+ signerData := authsigning.SignerData{
+ ChainID: txf.chainID,
+ AccountNumber: txf.accountNumber,
+ Sequence: txf.sequence,
+ PubKey: pubKey,
+ Address: sdk.AccAddress(pubKey.Address()).String(),
+}
+
+ // For SIGN_MODE_DIRECT, calling SetSignatures calls setSignerInfos on
+ // TxBuilder under the hood, and SignerInfos is needed to generated the
+ // sign bytes. This is the reason for setting SetSignatures here, with a
+ // nil signature.
+ //
+ // Note: this line is not needed for SIGN_MODE_LEGACY_AMINO, but putting it
+ // also doesn't affect its generated sign bytes, so for code's simplicity
+ // sake, we put it here.
+ sigData := signing.SingleSignatureData{
+ SignMode: signMode,
+ Signature: nil,
+}
+ sig := signing.SignatureV2{
+ PubKey: pubKey,
+ Data: &sigData,
+ Sequence: txf.Sequence(),
+}
+
+var prevSignatures []signing.SignatureV2
+ if !overwriteSig {
+ prevSignatures, err = txBuilder.GetTx().GetSignaturesV2()
+ if err != nil {
+ return err
+}
+
+}
+ // Overwrite or append signer infos.
+ var sigs []signing.SignatureV2
+ if overwriteSig {
+ sigs = []signing.SignatureV2{
+ sig
+}
+
+}
+
+else {
+ sigs = append(sigs, prevSignatures...)
+
+sigs = append(sigs, sig)
+}
+ if err := txBuilder.SetSignatures(sigs...); err != nil {
+ return err
+}
+ if err := checkMultipleSigners(txBuilder.GetTx()); err != nil {
+ return err
+}
+
+ // Generate the bytes to be signed.
+ bytesToSign, err := txf.txConfig.SignModeHandler().GetSignBytes(signMode, signerData, txBuilder.GetTx())
+ if err != nil {
+ return err
+}
+
+ // Sign those bytes
+ sigBytes, _, err := txf.keybase.Sign(name, bytesToSign)
+ if err != nil {
+ return err
+}
+
+ // Construct the SignatureV2 struct
+ sigData = signing.SingleSignatureData{
+ SignMode: signMode,
+ Signature: sigBytes,
+}
+
+sig = signing.SignatureV2{
+ PubKey: pubKey,
+ Data: &sigData,
+ Sequence: txf.Sequence(),
+}
+ if overwriteSig {
+ err = txBuilder.SetSignatures(sig)
+}
+
+else {
+ prevSignatures = append(prevSignatures, sig)
+
+err = txBuilder.SetSignatures(prevSignatures...)
+}
+ if err != nil {
+ return fmt.Errorf("unable to set signatures on payload: %w", err)
+}
+
+ // Run optional preprocessing if specified. By default, this is unset
+ // and will return nil.
+ return txf.PreprocessTx(name, txBuilder)
+}
+
+// GasEstimateResponse defines a response definition for tx gas estimation.
+type GasEstimateResponse struct {
+ GasEstimate uint64 `json:"gas_estimate" yaml:"gas_estimate"`
+}
+
+func (gr GasEstimateResponse)
+
+String()
+
+string {
+ return fmt.Sprintf("gas estimate: %d", gr.GasEstimate)
+}
+
+// makeAuxSignerData generates an AuxSignerData from the client inputs.
+func makeAuxSignerData(clientCtx client.Context, f Factory, msgs ...sdk.Msg) (tx.AuxSignerData, error) {
+ b := NewAuxTxBuilder()
+
+fromAddress, name, _, err := client.GetFromFields(clientCtx, clientCtx.Keyring, clientCtx.From)
+ if err != nil {
+ return tx.AuxSignerData{
+}, err
+}
+
+b.SetAddress(fromAddress.String())
+ if clientCtx.Offline {
+ b.SetAccountNumber(f.accountNumber)
+
+b.SetSequence(f.sequence)
+}
+
+else {
+ accNum, seq, err := clientCtx.AccountRetriever.GetAccountNumberSequence(clientCtx, fromAddress)
+ if err != nil {
+ return tx.AuxSignerData{
+}, err
+}
+
+b.SetAccountNumber(accNum)
+
+b.SetSequence(seq)
+}
+
+err = b.SetMsgs(msgs...)
+ if err != nil {
+ return tx.AuxSignerData{
+}, err
+}
+ if f.tip != nil {
+ if _, err := sdk.AccAddressFromBech32(f.tip.Tipper); err != nil {
+ return tx.AuxSignerData{
+}, sdkerrors.ErrInvalidAddress.Wrap("tipper must be a bech32 address")
+}
+
+b.SetTip(f.tip)
+}
+
+err = b.SetSignMode(f.SignMode())
+ if err != nil {
+ return tx.AuxSignerData{
+}, err
+}
+
+key, err := clientCtx.Keyring.Key(name)
+ if err != nil {
+ return tx.AuxSignerData{
+}, err
+}
+
+pub, err := key.GetPubKey()
+ if err != nil {
+ return tx.AuxSignerData{
+}, err
+}
+
+err = b.SetPubKey(pub)
+ if err != nil {
+ return tx.AuxSignerData{
+}, err
+}
+
+b.SetChainID(clientCtx.ChainID)
+
+signBz, err := b.GetSignBytes()
+ if err != nil {
+ return tx.AuxSignerData{
+}, err
+}
+
+sig, _, err := clientCtx.Keyring.Sign(name, signBz)
+ if err != nil {
+ return tx.AuxSignerData{
+}, err
+}
+
+b.SetSignature(sig)
+
+return b.GetAuxSignerData()
+}
+```
+
+```go expandable
+package tx
+
+import (
+
+ "github.com/cosmos/gogoproto/proto"
+ "github.com/cosmos/cosmos-sdk/client"
+ "github.com/cosmos/cosmos-sdk/codec"
+ codectypes "github.com/cosmos/cosmos-sdk/codec/types"
+ cryptotypes "github.com/cosmos/cosmos-sdk/crypto/types"
+ sdk "github.com/cosmos/cosmos-sdk/types"
+ sdkerrors "github.com/cosmos/cosmos-sdk/types/errors"
+ "github.com/cosmos/cosmos-sdk/types/tx"
+ "github.com/cosmos/cosmos-sdk/types/tx/signing"
+ "github.com/cosmos/cosmos-sdk/x/auth/ante"
+ authsigning "github.com/cosmos/cosmos-sdk/x/auth/signing"
+)
+
+// wrapper is a wrapper around the tx.Tx proto.Message which retain the raw
+// body and auth_info bytes.
+type wrapper struct {
+ cdc codec.Codec
+
+ tx *tx.Tx
+
+ // bodyBz represents the protobuf encoding of TxBody. This should be encoding
+ // from the client using TxRaw if the tx was decoded from the wire
+ bodyBz []byte
+
+ // authInfoBz represents the protobuf encoding of TxBody. This should be encoding
+ // from the client using TxRaw if the tx was decoded from the wire
+ authInfoBz []byte
+
+ txBodyHasUnknownNonCriticals bool
+}
+
+var (
+ _ authsigning.Tx = &wrapper{
+}
+ _ client.TxBuilder = &wrapper{
+}
+ _ tx.TipTx = &wrapper{
+}
+ _ ante.HasExtensionOptionsTx = &wrapper{
+}
+ _ ExtensionOptionsTxBuilder = &wrapper{
+}
+ _ tx.TipTx = &wrapper{
+}
+)
+
+// ExtensionOptionsTxBuilder defines a TxBuilder that can also set extensions.
+type ExtensionOptionsTxBuilder interface {
+ client.TxBuilder
+
+ SetExtensionOptions(...*codectypes.Any)
+
+SetNonCriticalExtensionOptions(...*codectypes.Any)
+}
+
+func newBuilder(cdc codec.Codec) *wrapper {
+ return &wrapper{
+ cdc: cdc,
+ tx: &tx.Tx{
+ Body: &tx.TxBody{
+},
+ AuthInfo: &tx.AuthInfo{
+ Fee: &tx.Fee{
+},
+},
+},
+}
+}
+
+func (w *wrapper)
+
+GetMsgs() []sdk.Msg {
+ return w.tx.GetMsgs()
+}
+
+func (w *wrapper)
+
+ValidateBasic()
+
+error {
+ return w.tx.ValidateBasic()
+}
+
+func (w *wrapper)
+
+getBodyBytes() []byte {
+ if len(w.bodyBz) == 0 {
+ // if bodyBz is empty, then marshal the body. bodyBz will generally
+ // be set to nil whenever SetBody is called so the result of calling
+ // this method should always return the correct bytes. Note that after
+ // decoding bodyBz is derived from TxRaw so that it matches what was
+ // transmitted over the wire
+ var err error
+ w.bodyBz, err = proto.Marshal(w.tx.Body)
+ if err != nil {
+ panic(err)
+}
+
+}
+
+return w.bodyBz
+}
+
+func (w *wrapper)
+
+getAuthInfoBytes() []byte {
+ if len(w.authInfoBz) == 0 {
+ // if authInfoBz is empty, then marshal the body. authInfoBz will generally
+ // be set to nil whenever SetAuthInfo is called so the result of calling
+ // this method should always return the correct bytes. Note that after
+ // decoding authInfoBz is derived from TxRaw so that it matches what was
+ // transmitted over the wire
+ var err error
+ w.authInfoBz, err = proto.Marshal(w.tx.AuthInfo)
+ if err != nil {
+ panic(err)
+}
+
+}
+
+return w.authInfoBz
+}
+
+func (w *wrapper)
+
+GetSigners() []sdk.AccAddress {
+ return w.tx.GetSigners()
+}
+
+func (w *wrapper)
+
+GetPubKeys() ([]cryptotypes.PubKey, error) {
+ signerInfos := w.tx.AuthInfo.SignerInfos
+ pks := make([]cryptotypes.PubKey, len(signerInfos))
+ for i, si := range signerInfos {
+ // NOTE: it is okay to leave this nil if there is no PubKey in the SignerInfo.
+ // PubKey's can be left unset in SignerInfo.
+ if si.PublicKey == nil {
+ continue
+}
+ pkAny := si.PublicKey.GetCachedValue()
+
+pk, ok := pkAny.(cryptotypes.PubKey)
+ if ok {
+ pks[i] = pk
+}
+
+else {
+ return nil, sdkerrors.Wrapf(sdkerrors.ErrLogic, "Expecting PubKey, got: %T", pkAny)
+}
+
+}
+
+return pks, nil
+}
+
+func (w *wrapper)
+
+GetGas()
+
+uint64 {
+ return w.tx.AuthInfo.Fee.GasLimit
+}
+
+func (w *wrapper)
+
+GetFee()
+
+sdk.Coins {
+ return w.tx.AuthInfo.Fee.Amount
+}
+
+func (w *wrapper)
+
+FeePayer()
+
+sdk.AccAddress {
+ feePayer := w.tx.AuthInfo.Fee.Payer
+ if feePayer != "" {
+ return sdk.MustAccAddressFromBech32(feePayer)
+}
+ // use first signer as default if no payer specified
+ return w.GetSigners()[0]
+}
+
+func (w *wrapper)
+
+FeeGranter()
+
+sdk.AccAddress {
+ feePayer := w.tx.AuthInfo.Fee.Granter
+ if feePayer != "" {
+ return sdk.MustAccAddressFromBech32(feePayer)
+}
+
+return nil
+}
+
+func (w *wrapper)
+
+GetTip() *tx.Tip {
+ return w.tx.AuthInfo.Tip
+}
+
+func (w *wrapper)
+
+GetMemo()
+
+string {
+ return w.tx.Body.Memo
+}
+
+// GetTimeoutHeight returns the transaction's timeout height (if set).
+func (w *wrapper)
+
+GetTimeoutHeight()
+
+uint64 {
+ return w.tx.Body.TimeoutHeight
+}
+
+func (w *wrapper)
+
+GetSignaturesV2() ([]signing.SignatureV2, error) {
+ signerInfos := w.tx.AuthInfo.SignerInfos
+ sigs := w.tx.Signatures
+ pubKeys, err := w.GetPubKeys()
+ if err != nil {
+ return nil, err
+}
+ n := len(signerInfos)
+ res := make([]signing.SignatureV2, n)
+ for i, si := range signerInfos {
+ // handle nil signatures (in case of simulation)
+ if si.ModeInfo == nil {
+ res[i] = signing.SignatureV2{
+ PubKey: pubKeys[i],
+}
+
+}
+
+else {
+ var err error
+ sigData, err := ModeInfoAndSigToSignatureData(si.ModeInfo, sigs[i])
+ if err != nil {
+ return nil, err
+}
+ // sequence number is functionally a transaction nonce and referred to as such in the SDK
+ nonce := si.GetSequence()
+
+res[i] = signing.SignatureV2{
+ PubKey: pubKeys[i],
+ Data: sigData,
+ Sequence: nonce,
+}
+
+
+}
+
+}
+
+return res, nil
+}
+
+func (w *wrapper)
+
+SetMsgs(msgs ...sdk.Msg)
+
+error {
+ anys, err := tx.SetMsgs(msgs)
+ if err != nil {
+ return err
+}
+
+w.tx.Body.Messages = anys
+
+ // set bodyBz to nil because the cached bodyBz no longer matches tx.Body
+ w.bodyBz = nil
+
+ return nil
+}
+
+// SetTimeoutHeight sets the transaction's height timeout.
+func (w *wrapper)
+
+SetTimeoutHeight(height uint64) {
+ w.tx.Body.TimeoutHeight = height
+
+ // set bodyBz to nil because the cached bodyBz no longer matches tx.Body
+ w.bodyBz = nil
+}
+
+func (w *wrapper)
+
+SetMemo(memo string) {
+ w.tx.Body.Memo = memo
+
+ // set bodyBz to nil because the cached bodyBz no longer matches tx.Body
+ w.bodyBz = nil
+}
+
+func (w *wrapper)
+
+SetGasLimit(limit uint64) {
+ if w.tx.AuthInfo.Fee == nil {
+ w.tx.AuthInfo.Fee = &tx.Fee{
+}
+
+}
+
+w.tx.AuthInfo.Fee.GasLimit = limit
+
+ // set authInfoBz to nil because the cached authInfoBz no longer matches tx.AuthInfo
+ w.authInfoBz = nil
+}
+
+func (w *wrapper)
+
+SetFeeAmount(coins sdk.Coins) {
+ if w.tx.AuthInfo.Fee == nil {
+ w.tx.AuthInfo.Fee = &tx.Fee{
+}
+
+}
+
+w.tx.AuthInfo.Fee.Amount = coins
+
+ // set authInfoBz to nil because the cached authInfoBz no longer matches tx.AuthInfo
+ w.authInfoBz = nil
+}
+
+func (w *wrapper)
+
+SetTip(tip *tx.Tip) {
+ w.tx.AuthInfo.Tip = tip
+
+ // set authInfoBz to nil because the cached authInfoBz no longer matches tx.AuthInfo
+ w.authInfoBz = nil
+}
+
+func (w *wrapper)
+
+SetFeePayer(feePayer sdk.AccAddress) {
+ if w.tx.AuthInfo.Fee == nil {
+ w.tx.AuthInfo.Fee = &tx.Fee{
+}
+
+}
+
+w.tx.AuthInfo.Fee.Payer = feePayer.String()
+
+ // set authInfoBz to nil because the cached authInfoBz no longer matches tx.AuthInfo
+ w.authInfoBz = nil
+}
+
+func (w *wrapper)
+
+SetFeeGranter(feeGranter sdk.AccAddress) {
+ if w.tx.AuthInfo.Fee == nil {
+ w.tx.AuthInfo.Fee = &tx.Fee{
+}
+
+}
+
+w.tx.AuthInfo.Fee.Granter = feeGranter.String()
+
+ // set authInfoBz to nil because the cached authInfoBz no longer matches tx.AuthInfo
+ w.authInfoBz = nil
+}
+
+func (w *wrapper)
+
+SetSignatures(signatures ...signing.SignatureV2)
+
+error {
+ n := len(signatures)
+ signerInfos := make([]*tx.SignerInfo, n)
+ rawSigs := make([][]byte, n)
+ for i, sig := range signatures {
+ var modeInfo *tx.ModeInfo
+ modeInfo, rawSigs[i] = SignatureDataToModeInfoAndSig(sig.Data)
+
+any, err := codectypes.NewAnyWithValue(sig.PubKey)
+ if err != nil {
+ return err
+}
+
+signerInfos[i] = &tx.SignerInfo{
+ PublicKey: any,
+ ModeInfo: modeInfo,
+ Sequence: sig.Sequence,
+}
+
+}
+
+w.setSignerInfos(signerInfos)
+
+w.setSignatures(rawSigs)
+
+return nil
+}
+
+func (w *wrapper)
+
+setSignerInfos(infos []*tx.SignerInfo) {
+ w.tx.AuthInfo.SignerInfos = infos
+ // set authInfoBz to nil because the cached authInfoBz no longer matches tx.AuthInfo
+ w.authInfoBz = nil
+}
+
+func (w *wrapper)
+
+setSignerInfoAtIndex(index int, info *tx.SignerInfo) {
+ if w.tx.AuthInfo.SignerInfos == nil {
+ w.tx.AuthInfo.SignerInfos = make([]*tx.SignerInfo, len(w.GetSigners()))
+}
+
+w.tx.AuthInfo.SignerInfos[index] = info
+ // set authInfoBz to nil because the cached authInfoBz no longer matches tx.AuthInfo
+ w.authInfoBz = nil
+}
+
+func (w *wrapper)
+
+setSignatures(sigs [][]byte) {
+ w.tx.Signatures = sigs
+}
+
+func (w *wrapper)
+
+setSignatureAtIndex(index int, sig []byte) {
+ if w.tx.Signatures == nil {
+ w.tx.Signatures = make([][]byte, len(w.GetSigners()))
+}
+
+w.tx.Signatures[index] = sig
+}
+
+func (w *wrapper)
+
+GetTx()
+
+authsigning.Tx {
+ return w
+}
+
+func (w *wrapper)
+
+GetProtoTx() *tx.Tx {
+ return w.tx
+}
+
+// Deprecated: AsAny extracts proto Tx and wraps it into Any.
+// NOTE: You should probably use `GetProtoTx` if you want to serialize the transaction.
+func (w *wrapper)
+
+AsAny() *codectypes.Any {
+ return codectypes.UnsafePackAny(w.tx)
+}
+
+// WrapTx creates a TxBuilder wrapper around a tx.Tx proto message.
+func WrapTx(protoTx *tx.Tx)
+
+client.TxBuilder {
+ return &wrapper{
+ tx: protoTx,
+}
+}
+
+func (w *wrapper)
+
+GetExtensionOptions() []*codectypes.Any {
+ return w.tx.Body.ExtensionOptions
+}
+
+func (w *wrapper)
+
+GetNonCriticalExtensionOptions() []*codectypes.Any {
+ return w.tx.Body.NonCriticalExtensionOptions
+}
+
+func (w *wrapper)
+
+SetExtensionOptions(extOpts ...*codectypes.Any) {
+ w.tx.Body.ExtensionOptions = extOpts
+ w.bodyBz = nil
+}
+
+func (w *wrapper)
+
+SetNonCriticalExtensionOptions(extOpts ...*codectypes.Any) {
+ w.tx.Body.NonCriticalExtensionOptions = extOpts
+ w.bodyBz = nil
+}
+
+func (w *wrapper)
+
+AddAuxSignerData(data tx.AuxSignerData)
+
+error {
+ err := data.ValidateBasic()
+ if err != nil {
+ return err
+}
+
+w.bodyBz = data.SignDoc.BodyBytes
+
+ var body tx.TxBody
+ err = w.cdc.Unmarshal(w.bodyBz, &body)
+ if err != nil {
+ return err
+}
+ if w.tx.Body.Memo != "" && w.tx.Body.Memo != body.Memo {
+ return sdkerrors.ErrInvalidRequest.Wrapf("TxBuilder has memo %s, got %s in AuxSignerData", w.tx.Body.Memo, body.Memo)
+}
+ if w.tx.Body.TimeoutHeight != 0 && w.tx.Body.TimeoutHeight != body.TimeoutHeight {
+ return sdkerrors.ErrInvalidRequest.Wrapf("TxBuilder has timeout height %d, got %d in AuxSignerData", w.tx.Body.TimeoutHeight, body.TimeoutHeight)
+}
+ if len(w.tx.Body.ExtensionOptions) != 0 {
+ if len(w.tx.Body.ExtensionOptions) != len(body.ExtensionOptions) {
+ return sdkerrors.ErrInvalidRequest.Wrapf("TxBuilder has %d extension options, got %d in AuxSignerData", len(w.tx.Body.ExtensionOptions), len(body.ExtensionOptions))
+}
+ for i, o := range w.tx.Body.ExtensionOptions {
+ if !o.Equal(body.ExtensionOptions[i]) {
+ return sdkerrors.ErrInvalidRequest.Wrapf("TxBuilder has extension option %+v at index %d, got %+v in AuxSignerData", o, i, body.ExtensionOptions[i])
+}
+
+}
+
+}
+ if len(w.tx.Body.NonCriticalExtensionOptions) != 0 {
+ if len(w.tx.Body.NonCriticalExtensionOptions) != len(body.NonCriticalExtensionOptions) {
+ return sdkerrors.ErrInvalidRequest.Wrapf("TxBuilder has %d non-critical extension options, got %d in AuxSignerData", len(w.tx.Body.NonCriticalExtensionOptions), len(body.NonCriticalExtensionOptions))
+}
+ for i, o := range w.tx.Body.NonCriticalExtensionOptions {
+ if !o.Equal(body.NonCriticalExtensionOptions[i]) {
+ return sdkerrors.ErrInvalidRequest.Wrapf("TxBuilder has non-critical extension option %+v at index %d, got %+v in AuxSignerData", o, i, body.NonCriticalExtensionOptions[i])
+}
+
+}
+
+}
+ if len(w.tx.Body.Messages) != 0 {
+ if len(w.tx.Body.Messages) != len(body.Messages) {
+ return sdkerrors.ErrInvalidRequest.Wrapf("TxBuilder has %d Msgs, got %d in AuxSignerData", len(w.tx.Body.Messages), len(body.Messages))
+}
+ for i, o := range w.tx.Body.Messages {
+ if !o.Equal(body.Messages[i]) {
+ return sdkerrors.ErrInvalidRequest.Wrapf("TxBuilder has Msg %+v at index %d, got %+v in AuxSignerData", o, i, body.Messages[i])
+}
+
+}
+
+}
+ if w.tx.AuthInfo.Tip != nil && data.SignDoc.Tip != nil {
+ if !w.tx.AuthInfo.Tip.Amount.IsEqual(data.SignDoc.Tip.Amount) {
+ return sdkerrors.ErrInvalidRequest.Wrapf("TxBuilder has tip %+v, got %+v in AuxSignerData", w.tx.AuthInfo.Tip.Amount, data.SignDoc.Tip.Amount)
+}
+ if w.tx.AuthInfo.Tip.Tipper != data.SignDoc.Tip.Tipper {
+ return sdkerrors.ErrInvalidRequest.Wrapf("TxBuilder has tipper %s, got %s in AuxSignerData", w.tx.AuthInfo.Tip.Tipper, data.SignDoc.Tip.Tipper)
+}
+
+}
+
+w.SetMemo(body.Memo)
+
+w.SetTimeoutHeight(body.TimeoutHeight)
+
+w.SetExtensionOptions(body.ExtensionOptions...)
+
+w.SetNonCriticalExtensionOptions(body.NonCriticalExtensionOptions...)
+ msgs := make([]sdk.Msg, len(body.Messages))
+ for i, msgAny := range body.Messages {
+ msgs[i] = msgAny.GetCachedValue().(sdk.Msg)
+}
+
+w.SetMsgs(msgs...)
+
+w.SetTip(data.GetSignDoc().GetTip())
+
+ // Get the aux signer's index in GetSigners.
+ signerIndex := -1
+ for i, signer := range w.GetSigners() {
+ if signer.String() == data.Address {
+ signerIndex = i
+}
+
+}
+ if signerIndex < 0 {
+ return sdkerrors.ErrLogic.Wrapf("address %s is not a signer", data.Address)
+}
+
+w.setSignerInfoAtIndex(signerIndex, &tx.SignerInfo{
+ PublicKey: data.SignDoc.PublicKey,
+ ModeInfo: &tx.ModeInfo{
+ Sum: &tx.ModeInfo_Single_{
+ Single: &tx.ModeInfo_Single{
+ Mode: data.Mode
+}}},
+ Sequence: data.SignDoc.Sequence,
+})
+
+w.setSignatureAtIndex(signerIndex, data.Sig)
+
+return nil
+}
+```
+
+```protobuf
+// Reference: https://github.com/cosmos/cosmos-sdk/blob/v0.47.0-rc1/proto/cosmos/tx/v1beta1/tx.proto#L203-L224
+```
+
+Example cmd:
+
+```go
+./simd tx gov submit-proposal --title="Test Proposal" --description="My awesome proposal" --type="Text" --from validator-key --fee-granter=cosmos1xh44hxt7spr67hqaa7nyx5gnutrz5fraw6grxn --chain-id=testnet --fees="10stake"
+```
+
+### Granted Fee Deductions
+
+Fees are deducted from grants in the `x/auth` ante handler. To learn more about how ante handlers work, read the [Auth Module AnteHandlers Guide](/sdk/v0.53/build/modules/auth/auth#antehandlers).
+
+### Gas
+
+In order to prevent DoS attacks, using a filtered `x/feegrant` incurs gas. The SDK must assure that the `grantee`'s transactions all conform to the filter set by the `granter`. The SDK does this by iterating over the allowed messages in the filter and charging 10 gas per filtered message. The SDK will then iterate over the messages being sent by the `grantee` to ensure the messages adhere to the filter, also charging 10 gas per message. The SDK will stop iterating and fail the transaction if it finds a message that does not conform to the filter.
+
+**WARNING**: The gas is charged against the granted allowance. Ensure your messages conform to the filter, if any, before sending transactions using your allowance.
+
+### Pruning
+
+A queue in the state maintained with the prefix of expiration of the grants and checks them on EndBlock with the current block time for every block to prune.
+
+## State
+
+### FeeAllowance
+
+Fee Allowances are identified by combining `Grantee` (the account address of fee allowance grantee) with the `Granter` (the account address of fee allowance granter).
+
+Fee allowance grants are stored in the state as follows:
+
+* Grant: `0x00 | grantee_addr_len (1 byte) | grantee_addr_bytes | granter_addr_len (1 byte) | granter_addr_bytes -> ProtocolBuffer(Grant)`
+
+```go expandable
+// Code generated by protoc-gen-gogo. DO NOT EDIT.
+// source: cosmos/feegrant/v1beta1/feegrant.proto
+
+package feegrant
+
+import (
+
+ fmt "fmt"
+ _ "github.com/cosmos/cosmos-proto"
+ types1 "github.com/cosmos/cosmos-sdk/codec/types"
+ github_com_cosmos_cosmos_sdk_types "github.com/cosmos/cosmos-sdk/types"
+ types "github.com/cosmos/cosmos-sdk/types"
+ _ "github.com/cosmos/cosmos-sdk/types/tx/amino"
+ _ "github.com/cosmos/gogoproto/gogoproto"
+ proto "github.com/cosmos/gogoproto/proto"
+ github_com_cosmos_gogoproto_types "github.com/cosmos/gogoproto/types"
+ _ "google.golang.org/protobuf/types/known/durationpb"
+ _ "google.golang.org/protobuf/types/known/timestamppb"
+ io "io"
+ math "math"
+ math_bits "math/bits"
+ time "time"
+)
+
+// Reference imports to suppress errors if they are not otherwise used.
+var _ = proto.Marshal
+var _ = fmt.Errorf
+var _ = math.Inf
+var _ = time.Kitchen
+
+// This is a compile-time assertion to ensure that this generated file
+// is compatible with the proto package it is being compiled against.
+// A compilation error at this line likely means your copy of the
+// proto package needs to be updated.
+const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package
+
+// BasicAllowance implements Allowance with a one-time grant of coins
+// that optionally expires. The grantee can use up to SpendLimit to cover fees.
+type BasicAllowance struct {
+ // spend_limit specifies the maximum amount of coins that can be spent
+ // by this allowance and will be updated as coins are spent. If it is
+ // empty, there is no spend limit and any amount of coins can be spent.
+ SpendLimit github_com_cosmos_cosmos_sdk_types.Coins `protobuf:"bytes,1,rep,name=spend_limit,json=spendLimit,proto3,castrepeated=github.com/cosmos/cosmos-sdk/types.Coins" json:"spend_limit"`
+ // expiration specifies an optional time when this allowance expires
+ Expiration *time.Time `protobuf:"bytes,2,opt,name=expiration,proto3,stdtime" json:"expiration,omitempty"`
+}
+
+func (m *BasicAllowance)
+
+Reset() { *m = BasicAllowance{
+}
+}
+
+func (m *BasicAllowance)
+
+String()
+
+string {
+ return proto.CompactTextString(m)
+}
+
+func (*BasicAllowance)
+
+ProtoMessage() {
+}
+
+func (*BasicAllowance)
+
+Descriptor() ([]byte, []int) {
+ return fileDescriptor_7279582900c30aea, []int{0
+}
+}
+
+func (m *BasicAllowance)
+
+XXX_Unmarshal(b []byte)
+
+error {
+ return m.Unmarshal(b)
+}
+
+func (m *BasicAllowance)
+
+XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ if deterministic {
+ return xxx_messageInfo_BasicAllowance.Marshal(b, m, deterministic)
+}
+
+else {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+}
+
+return b[:n], nil
+}
+}
+
+func (m *BasicAllowance)
+
+XXX_Merge(src proto.Message) {
+ xxx_messageInfo_BasicAllowance.Merge(m, src)
+}
+
+func (m *BasicAllowance)
+
+XXX_Size()
+
+int {
+ return m.Size()
+}
+
+func (m *BasicAllowance)
+
+XXX_DiscardUnknown() {
+ xxx_messageInfo_BasicAllowance.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_BasicAllowance proto.InternalMessageInfo
+
+func (m *BasicAllowance)
+
+GetSpendLimit()
+
+github_com_cosmos_cosmos_sdk_types.Coins {
+ if m != nil {
+ return m.SpendLimit
+}
+
+return nil
+}
+
+func (m *BasicAllowance)
+
+GetExpiration() *time.Time {
+ if m != nil {
+ return m.Expiration
+}
+
+return nil
+}
+
+// PeriodicAllowance extends Allowance to allow for both a maximum cap,
+// as well as a limit per time period.
+type PeriodicAllowance struct {
+ // basic specifies a struct of `BasicAllowance`
+ Basic BasicAllowance `protobuf:"bytes,1,opt,name=basic,proto3" json:"basic"`
+ // period specifies the time duration in which period_spend_limit coins can
+ // be spent before that allowance is reset
+ Period time.Duration `protobuf:"bytes,2,opt,name=period,proto3,stdduration" json:"period"`
+ // period_spend_limit specifies the maximum number of coins that can be spent
+ // in the period
+ PeriodSpendLimit github_com_cosmos_cosmos_sdk_types.Coins `protobuf:"bytes,3,rep,name=period_spend_limit,json=periodSpendLimit,proto3,castrepeated=github.com/cosmos/cosmos-sdk/types.Coins" json:"period_spend_limit"`
+ // period_can_spend is the number of coins left to be spent before the period_reset time
+ PeriodCanSpend github_com_cosmos_cosmos_sdk_types.Coins `protobuf:"bytes,4,rep,name=period_can_spend,json=periodCanSpend,proto3,castrepeated=github.com/cosmos/cosmos-sdk/types.Coins" json:"period_can_spend"`
+ // period_reset is the time at which this period resets and a new one begins,
+ // it is calculated from the start time of the first transaction after the
+ // last period ended
+ PeriodReset time.Time `protobuf:"bytes,5,opt,name=period_reset,json=periodReset,proto3,stdtime" json:"period_reset"`
+}
+
+func (m *PeriodicAllowance)
+
+Reset() { *m = PeriodicAllowance{
+}
+}
+
+func (m *PeriodicAllowance)
+
+String()
+
+string {
+ return proto.CompactTextString(m)
+}
+
+func (*PeriodicAllowance)
+
+ProtoMessage() {
+}
+
+func (*PeriodicAllowance)
+
+Descriptor() ([]byte, []int) {
+ return fileDescriptor_7279582900c30aea, []int{1
+}
+}
+
+func (m *PeriodicAllowance)
+
+XXX_Unmarshal(b []byte)
+
+error {
+ return m.Unmarshal(b)
+}
+
+func (m *PeriodicAllowance)
+
+XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ if deterministic {
+ return xxx_messageInfo_PeriodicAllowance.Marshal(b, m, deterministic)
+}
+
+else {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+}
+
+return b[:n], nil
+}
+}
+
+func (m *PeriodicAllowance)
+
+XXX_Merge(src proto.Message) {
+ xxx_messageInfo_PeriodicAllowance.Merge(m, src)
+}
+
+func (m *PeriodicAllowance)
+
+XXX_Size()
+
+int {
+ return m.Size()
+}
+
+func (m *PeriodicAllowance)
+
+XXX_DiscardUnknown() {
+ xxx_messageInfo_PeriodicAllowance.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_PeriodicAllowance proto.InternalMessageInfo
+
+func (m *PeriodicAllowance)
+
+GetBasic()
+
+BasicAllowance {
+ if m != nil {
+ return m.Basic
+}
+
+return BasicAllowance{
+}
+}
+
+func (m *PeriodicAllowance)
+
+GetPeriod()
+
+time.Duration {
+ if m != nil {
+ return m.Period
+}
+
+return 0
+}
+
+func (m *PeriodicAllowance)
+
+GetPeriodSpendLimit()
+
+github_com_cosmos_cosmos_sdk_types.Coins {
+ if m != nil {
+ return m.PeriodSpendLimit
+}
+
+return nil
+}
+
+func (m *PeriodicAllowance)
+
+GetPeriodCanSpend()
+
+github_com_cosmos_cosmos_sdk_types.Coins {
+ if m != nil {
+ return m.PeriodCanSpend
+}
+
+return nil
+}
+
+func (m *PeriodicAllowance)
+
+GetPeriodReset()
+
+time.Time {
+ if m != nil {
+ return m.PeriodReset
+}
+
+return time.Time{
+}
+}
+
+// AllowedMsgAllowance creates allowance only for specified message types.
+type AllowedMsgAllowance struct {
+ // allowance can be any of basic and periodic fee allowance.
+ Allowance *types1.Any `protobuf:"bytes,1,opt,name=allowance,proto3" json:"allowance,omitempty"`
+ // allowed_messages are the messages for which the grantee has the access.
+ AllowedMessages []string `protobuf:"bytes,2,rep,name=allowed_messages,json=allowedMessages,proto3" json:"allowed_messages,omitempty"`
+}
+
+func (m *AllowedMsgAllowance)
+
+Reset() { *m = AllowedMsgAllowance{
+}
+}
+
+func (m *AllowedMsgAllowance)
+
+String()
+
+string {
+ return proto.CompactTextString(m)
+}
+
+func (*AllowedMsgAllowance)
+
+ProtoMessage() {
+}
+
+func (*AllowedMsgAllowance)
+
+Descriptor() ([]byte, []int) {
+ return fileDescriptor_7279582900c30aea, []int{2
+}
+}
+
+func (m *AllowedMsgAllowance)
+
+XXX_Unmarshal(b []byte)
+
+error {
+ return m.Unmarshal(b)
+}
+
+func (m *AllowedMsgAllowance)
+
+XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ if deterministic {
+ return xxx_messageInfo_AllowedMsgAllowance.Marshal(b, m, deterministic)
+}
+
+else {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+}
+
+return b[:n], nil
+}
+}
+
+func (m *AllowedMsgAllowance)
+
+XXX_Merge(src proto.Message) {
+ xxx_messageInfo_AllowedMsgAllowance.Merge(m, src)
+}
+
+func (m *AllowedMsgAllowance)
+
+XXX_Size()
+
+int {
+ return m.Size()
+}
+
+func (m *AllowedMsgAllowance)
+
+XXX_DiscardUnknown() {
+ xxx_messageInfo_AllowedMsgAllowance.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_AllowedMsgAllowance proto.InternalMessageInfo
+
+// Grant is stored in the KVStore to record a grant with full context
+type Grant struct {
+ // granter is the address of the user granting an allowance of their funds.
+ Granter string `protobuf:"bytes,1,opt,name=granter,proto3" json:"granter,omitempty"`
+ // grantee is the address of the user being granted an allowance of another user's funds.
+ Grantee string `protobuf:"bytes,2,opt,name=grantee,proto3" json:"grantee,omitempty"`
+ // allowance can be any of basic, periodic, allowed fee allowance.
+ Allowance *types1.Any `protobuf:"bytes,3,opt,name=allowance,proto3" json:"allowance,omitempty"`
+}
+
+func (m *Grant)
+
+Reset() { *m = Grant{
+}
+}
+
+func (m *Grant)
+
+String()
+
+string {
+ return proto.CompactTextString(m)
+}
+
+func (*Grant)
+
+ProtoMessage() {
+}
+
+func (*Grant)
+
+Descriptor() ([]byte, []int) {
+ return fileDescriptor_7279582900c30aea, []int{3
+}
+}
+
+func (m *Grant)
+
+XXX_Unmarshal(b []byte)
+
+error {
+ return m.Unmarshal(b)
+}
+
+func (m *Grant)
+
+XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ if deterministic {
+ return xxx_messageInfo_Grant.Marshal(b, m, deterministic)
+}
+
+else {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+}
+
+return b[:n], nil
+}
+}
+
+func (m *Grant)
+
+XXX_Merge(src proto.Message) {
+ xxx_messageInfo_Grant.Merge(m, src)
+}
+
+func (m *Grant)
+
+XXX_Size()
+
+int {
+ return m.Size()
+}
+
+func (m *Grant)
+
+XXX_DiscardUnknown() {
+ xxx_messageInfo_Grant.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_Grant proto.InternalMessageInfo
+
+func (m *Grant)
+
+GetGranter()
+
+string {
+ if m != nil {
+ return m.Granter
+}
+
+return ""
+}
+
+func (m *Grant)
+
+GetGrantee()
+
+string {
+ if m != nil {
+ return m.Grantee
+}
+
+return ""
+}
+
+func (m *Grant)
+
+GetAllowance() *types1.Any {
+ if m != nil {
+ return m.Allowance
+}
+
+return nil
+}
+
+func init() {
+ proto.RegisterType((*BasicAllowance)(nil), "cosmos.feegrant.v1beta1.BasicAllowance")
+
+proto.RegisterType((*PeriodicAllowance)(nil), "cosmos.feegrant.v1beta1.PeriodicAllowance")
+
+proto.RegisterType((*AllowedMsgAllowance)(nil), "cosmos.feegrant.v1beta1.AllowedMsgAllowance")
+
+proto.RegisterType((*Grant)(nil), "cosmos.feegrant.v1beta1.Grant")
+}
+
+func init() {
+ proto.RegisterFile("cosmos/feegrant/v1beta1/feegrant.proto", fileDescriptor_7279582900c30aea)
+}
+
+var fileDescriptor_7279582900c30aea = []byte{
+ // 639 bytes of a gzipped FileDescriptorProto
+ 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xb4, 0x55, 0x3f, 0x6f, 0xd3, 0x40,
+ 0x14, 0x8f, 0x9b, 0xb6, 0x28, 0x17, 0x28, 0xad, 0xa9, 0x84, 0x53, 0x21, 0xbb, 0x8a, 0x04, 0x4d,
+ 0x2b, 0xd5, 0x56, 0x8b, 0x58, 0x3a, 0x35, 0x2e, 0xa2, 0x80, 0x5a, 0xa9, 0x72, 0x99, 0x90, 0x50,
+ 0x74, 0xb6, 0xaf, 0xe6, 0x44, 0xec, 0x33, 0x3e, 0x17, 0x1a, 0x06, 0x66, 0xc4, 0x80, 0x32, 0x32,
+ 0x32, 0x22, 0xa6, 0x0e, 0xe5, 0x3b, 0x54, 0x0c, 0xa8, 0x62, 0x62, 0x22, 0x28, 0x19, 0x3a, 0xf3,
+ 0x0d, 0x90, 0xef, 0xce, 0x8e, 0x9b, 0x50, 0x68, 0x25, 0xba, 0x24, 0x77, 0xef, 0xde, 0xfb, 0xfd,
+ 0x79, 0xef, 0x45, 0x01, 0xb7, 0x1c, 0x42, 0x7d, 0x42, 0x8d, 0x1d, 0x84, 0xbc, 0x08, 0x06, 0xb1,
+ 0xf1, 0x62, 0xc9, 0x46, 0x31, 0x5c, 0xca, 0x02, 0x7a, 0x18, 0x91, 0x98, 0xc8, 0xd7, 0x79, 0x9e,
+ 0x9e, 0x85, 0x45, 0xde, 0xcc, 0xb4, 0x47, 0x3c, 0xc2, 0x72, 0x8c, 0xe4, 0xc4, 0xd3, 0x67, 0x2a,
+ 0x1e, 0x21, 0x5e, 0x13, 0x19, 0xec, 0x66, 0xef, 0xee, 0x18, 0x30, 0x68, 0xa5, 0x4f, 0x1c, 0xa9,
+ 0xc1, 0x6b, 0x04, 0x2c, 0x7f, 0x52, 0x85, 0x18, 0x1b, 0x52, 0x94, 0x09, 0x71, 0x08, 0x0e, 0xc4,
+ 0xfb, 0x14, 0xf4, 0x71, 0x40, 0x0c, 0xf6, 0x29, 0x42, 0xda, 0x20, 0x51, 0x8c, 0x7d, 0x44, 0x63,
+ 0xe8, 0x87, 0x29, 0xe6, 0x60, 0x82, 0xbb, 0x1b, 0xc1, 0x18, 0x13, 0x81, 0x59, 0x7d, 0x37, 0x02,
+ 0x26, 0x4c, 0x48, 0xb1, 0x53, 0x6f, 0x36, 0xc9, 0x4b, 0x18, 0x38, 0x48, 0x7e, 0x0e, 0xca, 0x34,
+ 0x44, 0x81, 0xdb, 0x68, 0x62, 0x1f, 0xc7, 0x8a, 0x34, 0x5b, 0xac, 0x95, 0x97, 0x2b, 0xba, 0x90,
+ 0x9a, 0x88, 0x4b, 0xdd, 0xeb, 0x6b, 0x04, 0x07, 0xe6, 0x9d, 0xc3, 0x1f, 0x5a, 0xe1, 0x53, 0x47,
+ 0xab, 0x79, 0x38, 0x7e, 0xba, 0x6b, 0xeb, 0x0e, 0xf1, 0x85, 0x2f, 0xf1, 0xb5, 0x48, 0xdd, 0x67,
+ 0x46, 0xdc, 0x0a, 0x11, 0x65, 0x05, 0xf4, 0xe3, 0xf1, 0xfe, 0x82, 0x64, 0x01, 0x46, 0xb2, 0x91,
+ 0x70, 0xc8, 0xab, 0x00, 0xa0, 0xbd, 0x10, 0x73, 0x65, 0xca, 0xc8, 0xac, 0x54, 0x2b, 0x2f, 0xcf,
+ 0xe8, 0x5c, 0xba, 0x9e, 0x4a, 0xd7, 0x1f, 0xa5, 0xde, 0xcc, 0xd1, 0x76, 0x47, 0x93, 0xac, 0x5c,
+ 0xcd, 0xca, 0xfa, 0x97, 0x83, 0xc5, 0x9b, 0xa7, 0x0c, 0x49, 0xbf, 0x87, 0x50, 0x66, 0xef, 0xc1,
+ 0xdb, 0xe3, 0xfd, 0x85, 0x4a, 0x4e, 0xd8, 0x49, 0xf7, 0xd5, 0xcf, 0xa3, 0x60, 0x6a, 0x0b, 0x45,
+ 0x98, 0xb8, 0xf9, 0x9e, 0xdc, 0x07, 0x63, 0x76, 0x92, 0xa7, 0x48, 0x4c, 0xdb, 0x9c, 0x7e, 0x1a,
+ 0xd5, 0x49, 0x34, 0xb3, 0x94, 0xf4, 0x86, 0xfb, 0xe5, 0x00, 0xf2, 0x2a, 0x18, 0x0f, 0x19, 0xbc,
+ 0xb0, 0x59, 0x19, 0xb2, 0x79, 0x57, 0x4c, 0xc8, 0xbc, 0x92, 0x14, 0xbf, 0xef, 0x68, 0x12, 0x07,
+ 0x10, 0x75, 0xf2, 0x6b, 0x20, 0xf3, 0x53, 0x23, 0x3f, 0xa6, 0xe2, 0x05, 0x8d, 0x69, 0x92, 0x73,
+ 0x6d, 0xf7, 0x87, 0xf5, 0x0a, 0x88, 0x58, 0xc3, 0x81, 0x01, 0xd7, 0xa0, 0x8c, 0x5e, 0x10, 0xfb,
+ 0x04, 0x67, 0x5a, 0x83, 0x01, 0x13, 0x20, 0x6f, 0x80, 0xcb, 0x82, 0x3b, 0x42, 0x14, 0xc5, 0xca,
+ 0xd8, 0x3f, 0x57, 0x85, 0x35, 0xb1, 0x9d, 0x35, 0xb1, 0xcc, 0xcb, 0xad, 0xa4, 0x7a, 0xe5, 0xe1,
+ 0xb9, 0x96, 0xe6, 0x46, 0x4e, 0xe8, 0xd0, 0x86, 0x54, 0x7f, 0x49, 0xe0, 0x1a, 0xbb, 0x21, 0x77,
+ 0x93, 0x7a, 0xfd, 0xcd, 0x79, 0x02, 0x4a, 0x30, 0xbd, 0x88, 0xed, 0x99, 0x1e, 0x92, 0x5b, 0x0f,
+ 0x5a, 0xe6, 0xfc, 0x99, 0xc5, 0x58, 0x7d, 0x44, 0x79, 0x1e, 0x4c, 0x42, 0xce, 0xda, 0xf0, 0x11,
+ 0xa5, 0xd0, 0x43, 0x54, 0x19, 0x99, 0x2d, 0xd6, 0x4a, 0xd6, 0x55, 0x11, 0xdf, 0x14, 0xe1, 0x95,
+ 0xad, 0x37, 0x1f, 0xb4, 0xc2, 0xb9, 0x1c, 0xab, 0x39, 0xc7, 0x7f, 0xf0, 0x56, 0xfd, 0x2a, 0x81,
+ 0xb1, 0xf5, 0x04, 0x42, 0x5e, 0x06, 0x97, 0x18, 0x16, 0x8a, 0x98, 0xc7, 0x92, 0xa9, 0x7c, 0x3b,
+ 0x58, 0x9c, 0x16, 0x44, 0x75, 0xd7, 0x8d, 0x10, 0xa5, 0xdb, 0x71, 0x84, 0x03, 0xcf, 0x4a, 0x13,
+ 0xfb, 0x35, 0x88, 0xfd, 0x14, 0xce, 0x50, 0x33, 0xd0, 0xcd, 0xe2, 0xff, 0xee, 0xa6, 0x59, 0x3f,
+ 0xec, 0xaa, 0xd2, 0x51, 0x57, 0x95, 0x7e, 0x76, 0x55, 0xa9, 0xdd, 0x53, 0x0b, 0x47, 0x3d, 0xb5,
+ 0xf0, 0xbd, 0xa7, 0x16, 0x1e, 0xcf, 0xfd, 0x75, 0x6f, 0xf7, 0xb2, 0xff, 0x0b, 0x7b, 0x9c, 0xc9,
+ 0xb8, 0xfd, 0x3b, 0x00, 0x00, 0xff, 0xff, 0xe4, 0x3d, 0x09, 0x1d, 0x5a, 0x06, 0x00, 0x00,
+}
+
+func (m *BasicAllowance)
+
+Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+
+dAtA = make([]byte, size)
+
+n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+}
+
+return dAtA[:n], nil
+}
+
+func (m *BasicAllowance)
+
+MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+
+return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *BasicAllowance)
+
+MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if m.Expiration != nil {
+ n1, err1 := github_com_cosmos_gogoproto_types.StdTimeMarshalTo(*m.Expiration, dAtA[i-github_com_cosmos_gogoproto_types.SizeOfStdTime(*m.Expiration):])
+ if err1 != nil {
+ return 0, err1
+}
+
+i -= n1
+ i = encodeVarintFeegrant(dAtA, i, uint64(n1))
+
+i--
+ dAtA[i] = 0x12
+}
+ if len(m.SpendLimit) > 0 {
+ for iNdEx := len(m.SpendLimit) - 1; iNdEx >= 0; iNdEx-- {
+ {
+ size, err := m.SpendLimit[iNdEx].MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+}
+
+i -= size
+ i = encodeVarintFeegrant(dAtA, i, uint64(size))
+}
+
+i--
+ dAtA[i] = 0xa
+}
+
+}
+
+return len(dAtA) - i, nil
+}
+
+func (m *PeriodicAllowance)
+
+Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+
+dAtA = make([]byte, size)
+
+n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+}
+
+return dAtA[:n], nil
+}
+
+func (m *PeriodicAllowance)
+
+MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+
+return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *PeriodicAllowance)
+
+MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ n2, err2 := github_com_cosmos_gogoproto_types.StdTimeMarshalTo(m.PeriodReset, dAtA[i-github_com_cosmos_gogoproto_types.SizeOfStdTime(m.PeriodReset):])
+ if err2 != nil {
+ return 0, err2
+}
+
+i -= n2
+ i = encodeVarintFeegrant(dAtA, i, uint64(n2))
+
+i--
+ dAtA[i] = 0x2a
+ if len(m.PeriodCanSpend) > 0 {
+ for iNdEx := len(m.PeriodCanSpend) - 1; iNdEx >= 0; iNdEx-- {
+ {
+ size, err := m.PeriodCanSpend[iNdEx].MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+}
+
+i -= size
+ i = encodeVarintFeegrant(dAtA, i, uint64(size))
+}
+
+i--
+ dAtA[i] = 0x22
+}
+
+}
+ if len(m.PeriodSpendLimit) > 0 {
+ for iNdEx := len(m.PeriodSpendLimit) - 1; iNdEx >= 0; iNdEx-- {
+ {
+ size, err := m.PeriodSpendLimit[iNdEx].MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+}
+
+i -= size
+ i = encodeVarintFeegrant(dAtA, i, uint64(size))
+}
+
+i--
+ dAtA[i] = 0x1a
+}
+
+}
+
+n3, err3 := github_com_cosmos_gogoproto_types.StdDurationMarshalTo(m.Period, dAtA[i-github_com_cosmos_gogoproto_types.SizeOfStdDuration(m.Period):])
+ if err3 != nil {
+ return 0, err3
+}
+
+i -= n3
+ i = encodeVarintFeegrant(dAtA, i, uint64(n3))
+
+i--
+ dAtA[i] = 0x12
+ {
+ size, err := m.Basic.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+}
+
+i -= size
+ i = encodeVarintFeegrant(dAtA, i, uint64(size))
+}
+
+i--
+ dAtA[i] = 0xa
+ return len(dAtA) - i, nil
+}
+
+func (m *AllowedMsgAllowance)
+
+Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+
+dAtA = make([]byte, size)
+
+n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+}
+
+return dAtA[:n], nil
+}
+
+func (m *AllowedMsgAllowance)
+
+MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+
+return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *AllowedMsgAllowance)
+
+MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if len(m.AllowedMessages) > 0 {
+ for iNdEx := len(m.AllowedMessages) - 1; iNdEx >= 0; iNdEx-- {
+ i -= len(m.AllowedMessages[iNdEx])
+
+copy(dAtA[i:], m.AllowedMessages[iNdEx])
+
+i = encodeVarintFeegrant(dAtA, i, uint64(len(m.AllowedMessages[iNdEx])))
+
+i--
+ dAtA[i] = 0x12
+}
+
+}
+ if m.Allowance != nil {
+ {
+ size, err := m.Allowance.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+}
+
+i -= size
+ i = encodeVarintFeegrant(dAtA, i, uint64(size))
+}
+
+i--
+ dAtA[i] = 0xa
+}
+
+return len(dAtA) - i, nil
+}
+
+func (m *Grant)
+
+Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+
+dAtA = make([]byte, size)
+
+n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+}
+
+return dAtA[:n], nil
+}
+
+func (m *Grant)
+
+MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+
+return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *Grant)
+
+MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if m.Allowance != nil {
+ {
+ size, err := m.Allowance.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+}
+
+i -= size
+ i = encodeVarintFeegrant(dAtA, i, uint64(size))
+}
+
+i--
+ dAtA[i] = 0x1a
+}
+ if len(m.Grantee) > 0 {
+ i -= len(m.Grantee)
+
+copy(dAtA[i:], m.Grantee)
+
+i = encodeVarintFeegrant(dAtA, i, uint64(len(m.Grantee)))
+
+i--
+ dAtA[i] = 0x12
+}
+ if len(m.Granter) > 0 {
+ i -= len(m.Granter)
+
+copy(dAtA[i:], m.Granter)
+
+i = encodeVarintFeegrant(dAtA, i, uint64(len(m.Granter)))
+
+i--
+ dAtA[i] = 0xa
+}
+
+return len(dAtA) - i, nil
+}
+
+func encodeVarintFeegrant(dAtA []byte, offset int, v uint64)
+
+int {
+ offset -= sovFeegrant(v)
+ base := offset
+ for v >= 1<<7 {
+ dAtA[offset] = uint8(v&0x7f | 0x80)
+
+v >>= 7
+ offset++
+}
+
+dAtA[offset] = uint8(v)
+
+return base
+}
+
+func (m *BasicAllowance)
+
+Size() (n int) {
+ if m == nil {
+ return 0
+}
+
+var l int
+ _ = l
+ if len(m.SpendLimit) > 0 {
+ for _, e := range m.SpendLimit {
+ l = e.Size()
+
+n += 1 + l + sovFeegrant(uint64(l))
+}
+
+}
+ if m.Expiration != nil {
+ l = github_com_cosmos_gogoproto_types.SizeOfStdTime(*m.Expiration)
+
+n += 1 + l + sovFeegrant(uint64(l))
+}
+
+return n
+}
+
+func (m *PeriodicAllowance)
+
+Size() (n int) {
+ if m == nil {
+ return 0
+}
+
+var l int
+ _ = l
+ l = m.Basic.Size()
+
+n += 1 + l + sovFeegrant(uint64(l))
+
+l = github_com_cosmos_gogoproto_types.SizeOfStdDuration(m.Period)
+
+n += 1 + l + sovFeegrant(uint64(l))
+ if len(m.PeriodSpendLimit) > 0 {
+ for _, e := range m.PeriodSpendLimit {
+ l = e.Size()
+
+n += 1 + l + sovFeegrant(uint64(l))
+}
+
+}
+ if len(m.PeriodCanSpend) > 0 {
+ for _, e := range m.PeriodCanSpend {
+ l = e.Size()
+
+n += 1 + l + sovFeegrant(uint64(l))
+}
+
+}
+
+l = github_com_cosmos_gogoproto_types.SizeOfStdTime(m.PeriodReset)
+
+n += 1 + l + sovFeegrant(uint64(l))
+
+return n
+}
+
+func (m *AllowedMsgAllowance)
+
+Size() (n int) {
+ if m == nil {
+ return 0
+}
+
+var l int
+ _ = l
+ if m.Allowance != nil {
+ l = m.Allowance.Size()
+
+n += 1 + l + sovFeegrant(uint64(l))
+}
+ if len(m.AllowedMessages) > 0 {
+ for _, s := range m.AllowedMessages {
+ l = len(s)
+
+n += 1 + l + sovFeegrant(uint64(l))
+}
+
+}
+
+return n
+}
+
+func (m *Grant)
+
+Size() (n int) {
+ if m == nil {
+ return 0
+}
+
+var l int
+ _ = l
+ l = len(m.Granter)
+ if l > 0 {
+ n += 1 + l + sovFeegrant(uint64(l))
+}
+
+l = len(m.Grantee)
+ if l > 0 {
+ n += 1 + l + sovFeegrant(uint64(l))
+}
+ if m.Allowance != nil {
+ l = m.Allowance.Size()
+
+n += 1 + l + sovFeegrant(uint64(l))
+}
+
+return n
+}
+
+func sovFeegrant(x uint64) (n int) {
+ return (math_bits.Len64(x|1) + 6) / 7
+}
+
+func sozFeegrant(x uint64) (n int) {
+ return sovFeegrant(uint64((x << 1) ^ uint64((int64(x) >> 63))))
+}
+
+func (m *BasicAllowance)
+
+Unmarshal(dAtA []byte)
+
+error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowFeegrant
+}
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+}
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+}
+
+}
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: BasicAllowance: wiretype end group for non-group")
+}
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: BasicAllowance: illegal tag %d (wire type %d)", fieldNum, wire)
+}
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field SpendLimit", wireType)
+}
+
+var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowFeegrant
+}
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+}
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+}
+
+}
+ if msglen < 0 {
+ return ErrInvalidLengthFeegrant
+}
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthFeegrant
+}
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+}
+
+m.SpendLimit = append(m.SpendLimit, types.Coin{
+})
+ if err := m.SpendLimit[len(m.SpendLimit)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+}
+
+iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Expiration", wireType)
+}
+
+var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowFeegrant
+}
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+}
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+}
+
+}
+ if msglen < 0 {
+ return ErrInvalidLengthFeegrant
+}
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthFeegrant
+}
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+}
+ if m.Expiration == nil {
+ m.Expiration = new(time.Time)
+}
+ if err := github_com_cosmos_gogoproto_types.StdTimeUnmarshal(m.Expiration, dAtA[iNdEx:postIndex]); err != nil {
+ return err
+}
+
+iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipFeegrant(dAtA[iNdEx:])
+ if err != nil {
+ return err
+}
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthFeegrant
+}
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+}
+
+iNdEx += skippy
+}
+
+}
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+}
+
+return nil
+}
+
+func (m *PeriodicAllowance)
+
+Unmarshal(dAtA []byte)
+
+error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowFeegrant
+}
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+}
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+}
+
+}
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: PeriodicAllowance: wiretype end group for non-group")
+}
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: PeriodicAllowance: illegal tag %d (wire type %d)", fieldNum, wire)
+}
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Basic", wireType)
+}
+
+var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowFeegrant
+}
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+}
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+}
+
+}
+ if msglen < 0 {
+ return ErrInvalidLengthFeegrant
+}
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthFeegrant
+}
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+}
+ if err := m.Basic.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+}
+
+iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Period", wireType)
+}
+
+var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowFeegrant
+}
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+}
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+}
+
+}
+ if msglen < 0 {
+ return ErrInvalidLengthFeegrant
+}
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthFeegrant
+}
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+}
+ if err := github_com_cosmos_gogoproto_types.StdDurationUnmarshal(&m.Period, dAtA[iNdEx:postIndex]); err != nil {
+ return err
+}
+
+iNdEx = postIndex
+ case 3:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field PeriodSpendLimit", wireType)
+}
+
+var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowFeegrant
+}
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+}
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+}
+
+}
+ if msglen < 0 {
+ return ErrInvalidLengthFeegrant
+}
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthFeegrant
+}
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+}
+
+m.PeriodSpendLimit = append(m.PeriodSpendLimit, types.Coin{
+})
+ if err := m.PeriodSpendLimit[len(m.PeriodSpendLimit)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+}
+
+iNdEx = postIndex
+ case 4:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field PeriodCanSpend", wireType)
+}
+
+var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowFeegrant
+}
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+}
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+}
+
+}
+ if msglen < 0 {
+ return ErrInvalidLengthFeegrant
+}
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthFeegrant
+}
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+}
+
+m.PeriodCanSpend = append(m.PeriodCanSpend, types.Coin{
+})
+ if err := m.PeriodCanSpend[len(m.PeriodCanSpend)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+}
+
+iNdEx = postIndex
+ case 5:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field PeriodReset", wireType)
+}
+
+var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowFeegrant
+}
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+}
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+}
+
+}
+ if msglen < 0 {
+ return ErrInvalidLengthFeegrant
+}
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthFeegrant
+}
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+}
+ if err := github_com_cosmos_gogoproto_types.StdTimeUnmarshal(&m.PeriodReset, dAtA[iNdEx:postIndex]); err != nil {
+ return err
+}
+
+iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipFeegrant(dAtA[iNdEx:])
+ if err != nil {
+ return err
+}
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthFeegrant
+}
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+}
+
+iNdEx += skippy
+}
+
+}
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+}
+
+return nil
+}
+
+func (m *AllowedMsgAllowance)
+
+Unmarshal(dAtA []byte)
+
+error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowFeegrant
+}
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+}
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+}
+
+}
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: AllowedMsgAllowance: wiretype end group for non-group")
+}
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: AllowedMsgAllowance: illegal tag %d (wire type %d)", fieldNum, wire)
+}
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Allowance", wireType)
+}
+
+var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowFeegrant
+}
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+}
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+}
+
+}
+ if msglen < 0 {
+ return ErrInvalidLengthFeegrant
+}
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthFeegrant
+}
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+}
+ if m.Allowance == nil {
+ m.Allowance = &types1.Any{
+}
+
+}
+ if err := m.Allowance.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+}
+
+iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field AllowedMessages", wireType)
+}
+
+var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowFeegrant
+}
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+}
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+}
+
+}
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthFeegrant
+}
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthFeegrant
+}
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+}
+
+m.AllowedMessages = append(m.AllowedMessages, string(dAtA[iNdEx:postIndex]))
+
+iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipFeegrant(dAtA[iNdEx:])
+ if err != nil {
+ return err
+}
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthFeegrant
+}
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+}
+
+iNdEx += skippy
+}
+
+}
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+}
+
+return nil
+}
+
+func (m *Grant)
+
+Unmarshal(dAtA []byte)
+
+error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowFeegrant
+}
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+}
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+}
+
+}
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: Grant: wiretype end group for non-group")
+}
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: Grant: illegal tag %d (wire type %d)", fieldNum, wire)
+}
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Granter", wireType)
+}
+
+var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowFeegrant
+}
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+}
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+}
+
+}
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthFeegrant
+}
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthFeegrant
+}
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+}
+
+m.Granter = string(dAtA[iNdEx:postIndex])
+
+iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Grantee", wireType)
+}
+
+var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowFeegrant
+}
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+}
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+}
+
+}
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthFeegrant
+}
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthFeegrant
+}
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+}
+
+m.Grantee = string(dAtA[iNdEx:postIndex])
+
+iNdEx = postIndex
+ case 3:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Allowance", wireType)
+}
+
+var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowFeegrant
+}
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+}
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+}
+
+}
+ if msglen < 0 {
+ return ErrInvalidLengthFeegrant
+}
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthFeegrant
+}
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+}
+ if m.Allowance == nil {
+ m.Allowance = &types1.Any{
+}
+
+}
+ if err := m.Allowance.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+}
+
+iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipFeegrant(dAtA[iNdEx:])
+ if err != nil {
+ return err
+}
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthFeegrant
+}
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+}
+
+iNdEx += skippy
+}
+
+}
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+}
+
+return nil
+}
+
+func skipFeegrant(dAtA []byte) (n int, err error) {
+ l := len(dAtA)
+ iNdEx := 0
+ depth := 0
+ for iNdEx < l {
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return 0, ErrIntOverflowFeegrant
+}
+ if iNdEx >= l {
+ return 0, io.ErrUnexpectedEOF
+}
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+}
+
+}
+ wireType := int(wire & 0x7)
+ switch wireType {
+ case 0:
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return 0, ErrIntOverflowFeegrant
+}
+ if iNdEx >= l {
+ return 0, io.ErrUnexpectedEOF
+}
+
+iNdEx++
+ if dAtA[iNdEx-1] < 0x80 {
+ break
+}
+
+}
+ case 1:
+ iNdEx += 8
+ case 2:
+ var length int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return 0, ErrIntOverflowFeegrant
+}
+ if iNdEx >= l {
+ return 0, io.ErrUnexpectedEOF
+}
+ b := dAtA[iNdEx]
+ iNdEx++
+ length |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+}
+
+}
+ if length < 0 {
+ return 0, ErrInvalidLengthFeegrant
+}
+
+iNdEx += length
+ case 3:
+ depth++
+ case 4:
+ if depth == 0 {
+ return 0, ErrUnexpectedEndOfGroupFeegrant
+}
+
+depth--
+ case 5:
+ iNdEx += 4
+ default:
+ return 0, fmt.Errorf("proto: illegal wireType %d", wireType)
+}
+ if iNdEx < 0 {
+ return 0, ErrInvalidLengthFeegrant
+}
+ if depth == 0 {
+ return iNdEx, nil
+}
+
+}
+
+return 0, io.ErrUnexpectedEOF
+}
+
+var (
+ ErrInvalidLengthFeegrant = fmt.Errorf("proto: negative length found during unmarshaling")
+
+ErrIntOverflowFeegrant = fmt.Errorf("proto: integer overflow")
+
+ErrUnexpectedEndOfGroupFeegrant = fmt.Errorf("proto: unexpected end of group")
+)
+```
+
+### FeeAllowanceQueue
+
+Fee Allowances queue items are identified by combining the `FeeAllowancePrefixQueue` (i.e., 0x01), `expiration`, `grantee` (the account address of fee allowance grantee), `granter` (the account address of fee allowance granter). Endblocker checks `FeeAllowanceQueue` state for the expired grants and prunes them from `FeeAllowance` if there are any found.
+
+Fee allowance queue keys are stored in the state as follows:
+
+* Grant: `0x01 | expiration_bytes | grantee_addr_len (1 byte) | grantee_addr_bytes | granter_addr_len (1 byte) | granter_addr_bytes -> EmptyBytes`
+
+## Messages
+
+### Msg/GrantAllowance
+
+A fee allowance grant will be created with the `MsgGrantAllowance` message.
+
+```protobuf
+// Reference: https://github.com/cosmos/cosmos-sdk/blob/v0.47.0-rc1/proto/cosmos/feegrant/v1beta1/tx.proto#L25-L39
+```
+
+### Msg/RevokeAllowance
+
+An allowed grant fee allowance can be removed with the `MsgRevokeAllowance` message.
+
+```protobuf
+// Reference: https://github.com/cosmos/cosmos-sdk/blob/v0.47.0-rc1/proto/cosmos/feegrant/v1beta1/tx.proto#L41-L54
+```
+
+## Events
+
+The feegrant module emits the following events:
+
+## Msg Server
+
+### MsgGrantAllowance
+
+| Type | Attribute Key | Attribute Value |
+| ------- | ------------- | ---------------- |
+| message | action | set\_feegrant |
+| message | granter | `{granterAddress}` |
+| message | grantee | `{granteeAddress}` |
+
+### MsgRevokeAllowance
+
+| Type | Attribute Key | Attribute Value |
+| ------- | ------------- | ---------------- |
+| message | action | revoke\_feegrant |
+| message | granter | `{granterAddress}` |
+| message | grantee | `{granteeAddress}` |
+
+### Exec fee allowance
+
+| Type | Attribute Key | Attribute Value |
+| ------- | ------------- | ---------------- |
+| message | action | use\_feegrant |
+| message | granter | `{granterAddress}` |
+| message | grantee | `{granteeAddress}` |
+
+### Prune fee allowances
+
+| Type | Attribute Key | Attribute Value |
+| ------- | ------------- | --------------- |
+| message | action | prune\_feegrant |
+| message | pruner | `{prunerAddress}` |
+
+## Client
+
+### CLI
+
+A user can query and interact with the `feegrant` module using the CLI.
+
+#### Query
+
+The `query` commands allow users to query `feegrant` state.
+
+```shell
+simd query feegrant --help
+```
+
+##### grant
+
+The `grant` command allows users to query a grant for a given granter-grantee pair.
+
+```shell
+simd query feegrant grant [granter] [grantee] [flags]
+```
+
+Example:
+
+```shell
+simd query feegrant grant cosmos1.. cosmos1..
+```
+
+Example Output:
+
+```yml
+allowance:
+ '@type': /cosmos.feegrant.v1beta1.BasicAllowance
+ expiration: null
+ spend_limit:
+ - amount: "100"
+ denom: stake
+grantee: cosmos1..
+granter: cosmos1..
+```
+
+##### grants
+
+The `grants` command allows users to query all grants for a given grantee.
+
+```shell
+simd query feegrant grants [grantee] [flags]
+```
+
+Example:
+
+```shell
+simd query feegrant grants cosmos1..
+```
+
+Example Output:
+
+```yml expandable
+allowances:
+- allowance:
+ '@type': /cosmos.feegrant.v1beta1.BasicAllowance
+ expiration: null
+ spend_limit:
+ - amount: "100"
+ denom: stake
+ grantee: cosmos1..
+ granter: cosmos1..
+pagination:
+ next_key: null
+ total: "0"
+```
+
+#### Transactions
+
+The `tx` commands allow users to interact with the `feegrant` module.
+
+```shell
+simd tx feegrant --help
+```
+
+##### grant
+
+The `grant` command allows users to grant fee allowances to another account. The fee allowance can have an expiration date, a total spend limit, and/or a periodic spend limit.
+
+```shell
+simd tx feegrant grant [granter] [grantee] [flags]
+```
+
+Example (one-time spend limit):
+
+```shell
+simd tx feegrant grant cosmos1.. cosmos1.. --spend-limit 100stake
+```
+
+Example (periodic spend limit):
+
+```shell
+simd tx feegrant grant cosmos1.. cosmos1.. --period 3600 --period-limit 10stake
+```
+
+##### revoke
+
+The `revoke` command allows users to revoke a granted fee allowance.
+
+```shell
+simd tx feegrant revoke [granter] [grantee] [flags]
+```
+
+Example:
+
+```shell
+simd tx feegrant revoke cosmos1.. cosmos1..
+```
+
+### gRPC
+
+A user can query the `feegrant` module using gRPC endpoints.
+
+#### Allowance
+
+The `Allowance` endpoint allows users to query a granted fee allowance.
+
+```shell
+cosmos.feegrant.v1beta1.Query/Allowance
+```
+
+Example:
+
+```shell
+grpcurl -plaintext \
+ -d '{"grantee":"cosmos1..","granter":"cosmos1.."}' \
+ localhost:9090 \
+ cosmos.feegrant.v1beta1.Query/Allowance
+```
+
+Example Output:
+
+```json
+{
+ "allowance": {
+ "granter": "cosmos1..",
+ "grantee": "cosmos1..",
+ "allowance": {
+ "@type": "/cosmos.feegrant.v1beta1.BasicAllowance",
+ "spendLimit": [
+ {
+ "denom": "stake",
+ "amount": "100"
+ }
+ ]
+ }
+ }
+}
+```
+
+#### Allowances
+
+The `Allowances` endpoint allows users to query all granted fee allowances for a given grantee.
+
+```shell
+cosmos.feegrant.v1beta1.Query/Allowances
+```
+
+Example:
+
+```shell
+grpcurl -plaintext \
+ -d '{"address":"cosmos1.."}' \
+ localhost:9090 \
+ cosmos.feegrant.v1beta1.Query/Allowances
+```
+
+Example Output:
+
+```json expandable
+{
+ "allowances": [
+ {
+ "granter": "cosmos1..",
+ "grantee": "cosmos1..",
+ "allowance": {
+ "@type": "/cosmos.feegrant.v1beta1.BasicAllowance",
+ "spendLimit": [
+ {
+ "denom": "stake",
+ "amount": "100"
+ }
+ ]
+ }
+ }
+ ],
+ "pagination": {
+ "total": "1"
+ }
+}
+```
diff --git a/sdk/next/build/modules/genutil/README.mdx b/sdk/next/build/modules/genutil/README.mdx
new file mode 100644
index 000000000..929aa21c6
--- /dev/null
+++ b/sdk/next/build/modules/genutil/README.mdx
@@ -0,0 +1,1251 @@
+---
+title: 'x/genutil'
+description: >-
+ The genutil package contains a variety of genesis utility functionalities for
+ usage within a blockchain application. Namely:
+---
+
+## Concepts
+
+The `genutil` package contains a variety of genesis utility functionalities for usage within a blockchain application. Namely:
+
+* Genesis transactions related (gentx)
+* Commands for collection and creation of gentxs
+* `InitChain` processing of gentxs
+* Genesis file creation
+* Genesis file validation
+* Genesis file migration
+* CometBFT related initialization
+ * Translation of an app genesis to a CometBFT genesis
+
+## Genesis
+
+Genutil contains the data structure that defines an application genesis.
+An application genesis consist of a consensus genesis (g.e. CometBFT genesis) and application related genesis data.
+
+```go expandable
+package types
+
+import (
+
+ "bytes"
+ "encoding/json"
+ "errors"
+ "fmt"
+ "os"
+ "time"
+
+ cmtjson "github.com/cometbft/cometbft/libs/json"
+ cmtproto "github.com/cometbft/cometbft/proto/tendermint/types"
+ cmttypes "github.com/cometbft/cometbft/types"
+ cmttime "github.com/cometbft/cometbft/types/time"
+ "github.com/cosmos/cosmos-sdk/version"
+)
+
+const (
+ // MaxChainIDLen is the maximum length of a chain ID.
+ MaxChainIDLen = cmttypes.MaxChainIDLen
+)
+
+// AppGenesis defines the app's genesis.
+type AppGenesis struct {
+ AppName string `json:"app_name"`
+ AppVersion string `json:"app_version"`
+ GenesisTime time.Time `json:"genesis_time"`
+ ChainID string `json:"chain_id"`
+ InitialHeight int64 `json:"initial_height"`
+ AppHash []byte `json:"app_hash"`
+ AppState json.RawMessage `json:"app_state,omitempty"`
+ Consensus *ConsensusGenesis `json:"consensus,omitempty"`
+}
+
+// NewAppGenesisWithVersion returns a new AppGenesis with the app name and app version already.
+func NewAppGenesisWithVersion(chainID string, appState json.RawMessage) *AppGenesis {
+ return &AppGenesis{
+ AppName: version.AppName,
+ AppVersion: version.Version,
+ ChainID: chainID,
+ AppState: appState,
+ Consensus: &ConsensusGenesis{
+ Validators: nil,
+},
+}
+}
+
+// ValidateAndComplete performs validation and completes the AppGenesis.
+func (ag *AppGenesis)
+
+ValidateAndComplete()
+
+error {
+ if ag.ChainID == "" {
+ return errors.New("genesis doc must include non-empty chain_id")
+}
+ if len(ag.ChainID) > MaxChainIDLen {
+ return fmt.Errorf("chain_id in genesis doc is too long (max: %d)", MaxChainIDLen)
+}
+ if ag.InitialHeight < 0 {
+ return fmt.Errorf("initial_height cannot be negative (got %v)", ag.InitialHeight)
+}
+ if ag.InitialHeight == 0 {
+ ag.InitialHeight = 1
+}
+ if ag.GenesisTime.IsZero() {
+ ag.GenesisTime = cmttime.Now()
+}
+ if err := ag.Consensus.ValidateAndComplete(); err != nil {
+ return err
+}
+
+return nil
+}
+
+// SaveAs is a utility method for saving AppGenesis as a JSON file.
+func (ag *AppGenesis)
+
+SaveAs(file string)
+
+error {
+ appGenesisBytes, err := json.MarshalIndent(ag, "", "
+ ")
+ if err != nil {
+ return err
+}
+
+return os.WriteFile(file, appGenesisBytes, 0o600)
+}
+
+// AppGenesisFromFile reads the AppGenesis from the provided file.
+func AppGenesisFromFile(genFile string) (*AppGenesis, error) {
+ jsonBlob, err := os.ReadFile(genFile)
+ if err != nil {
+ return nil, fmt.Errorf("couldn't read AppGenesis file (%s): %w", genFile, err)
+}
+
+var appGenesis AppGenesis
+ if err := json.Unmarshal(jsonBlob, &appGenesis); err != nil {
+ // fallback to CometBFT genesis
+ var ctmGenesis cmttypes.GenesisDoc
+ if err2 := cmtjson.Unmarshal(jsonBlob, &ctmGenesis); err2 != nil {
+ return nil, fmt.Errorf("error unmarshalling AppGenesis at %s: %w\n failed fallback to CometBFT GenDoc: %w", genFile, err, err2)
+}
+
+appGenesis = AppGenesis{
+ AppName: version.AppName,
+ // AppVersion is not filled as we do not know it from a CometBFT genesis
+ GenesisTime: ctmGenesis.GenesisTime,
+ ChainID: ctmGenesis.ChainID,
+ InitialHeight: ctmGenesis.InitialHeight,
+ AppHash: ctmGenesis.AppHash,
+ AppState: ctmGenesis.AppState,
+ Consensus: &ConsensusGenesis{
+ Validators: ctmGenesis.Validators,
+ Params: ctmGenesis.ConsensusParams,
+},
+}
+
+}
+
+return &appGenesis, nil
+}
+
+// --------------------------
+// CometBFT Genesis Handling
+// --------------------------
+
+// ToGenesisDoc converts the AppGenesis to a CometBFT GenesisDoc.
+func (ag *AppGenesis)
+
+ToGenesisDoc() (*cmttypes.GenesisDoc, error) {
+ return &cmttypes.GenesisDoc{
+ GenesisTime: ag.GenesisTime,
+ ChainID: ag.ChainID,
+ InitialHeight: ag.InitialHeight,
+ AppHash: ag.AppHash,
+ AppState: ag.AppState,
+ Validators: ag.Consensus.Validators,
+ ConsensusParams: ag.Consensus.Params,
+}, nil
+}
+
+// ConsensusGenesis defines the consensus layer's genesis.
+// TODO(@julienrbrt)
+
+eventually abstract from CometBFT types
+type ConsensusGenesis struct {
+ Validators []cmttypes.GenesisValidator `json:"validators,omitempty"`
+ Params *cmttypes.ConsensusParams `json:"params,omitempty"`
+}
+
+// NewConsensusGenesis returns a ConsensusGenesis with given values.
+// It takes a proto consensus params so it can called from server export command.
+func NewConsensusGenesis(params cmtproto.ConsensusParams, validators []cmttypes.GenesisValidator) *ConsensusGenesis {
+ return &ConsensusGenesis{
+ Params: &cmttypes.ConsensusParams{
+ Block: cmttypes.BlockParams{
+ MaxBytes: params.Block.MaxBytes,
+ MaxGas: params.Block.MaxGas,
+},
+ Evidence: cmttypes.EvidenceParams{
+ MaxAgeNumBlocks: params.Evidence.MaxAgeNumBlocks,
+ MaxAgeDuration: params.Evidence.MaxAgeDuration,
+ MaxBytes: params.Evidence.MaxBytes,
+},
+ Validator: cmttypes.ValidatorParams{
+ PubKeyTypes: params.Validator.PubKeyTypes,
+},
+},
+ Validators: validators,
+}
+}
+
+func (cs *ConsensusGenesis)
+
+MarshalJSON() ([]byte, error) {
+ type Alias ConsensusGenesis
+ return cmtjson.Marshal(&Alias{
+ Validators: cs.Validators,
+ Params: cs.Params,
+})
+}
+
+func (cs *ConsensusGenesis)
+
+UnmarshalJSON(b []byte)
+
+error {
+ type Alias ConsensusGenesis
+ result := Alias{
+}
+ if err := cmtjson.Unmarshal(b, &result); err != nil {
+ return err
+}
+
+cs.Params = result.Params
+ cs.Validators = result.Validators
+
+ return nil
+}
+
+func (cs *ConsensusGenesis)
+
+ValidateAndComplete()
+
+error {
+ if cs == nil {
+ return fmt.Errorf("consensus genesis cannot be nil")
+}
+ if cs.Params == nil {
+ cs.Params = cmttypes.DefaultConsensusParams()
+}
+
+else if err := cs.Params.ValidateBasic(); err != nil {
+ return err
+}
+ for i, v := range cs.Validators {
+ if v.Power == 0 {
+ return fmt.Errorf("the genesis file cannot contain validators with no voting power: %v", v)
+}
+ if len(v.Address) > 0 && !bytes.Equal(v.PubKey.Address(), v.Address) {
+ return fmt.Errorf("incorrect address for validator %v in the genesis file, should be %v", v, v.PubKey.Address())
+}
+ if len(v.Address) == 0 {
+ cs.Validators[i].Address = v.PubKey.Address()
+}
+
+}
+
+return nil
+}
+```
+
+The application genesis can then be translated to the consensus engine to the right format:
+
+```go expandable
+package types
+
+import (
+
+ "bytes"
+ "encoding/json"
+ "errors"
+ "fmt"
+ "os"
+ "time"
+
+ cmtjson "github.com/cometbft/cometbft/libs/json"
+ cmtproto "github.com/cometbft/cometbft/proto/tendermint/types"
+ cmttypes "github.com/cometbft/cometbft/types"
+ cmttime "github.com/cometbft/cometbft/types/time"
+ "github.com/cosmos/cosmos-sdk/version"
+)
+
+const (
+ // MaxChainIDLen is the maximum length of a chain ID.
+ MaxChainIDLen = cmttypes.MaxChainIDLen
+)
+
+// AppGenesis defines the app's genesis.
+type AppGenesis struct {
+ AppName string `json:"app_name"`
+ AppVersion string `json:"app_version"`
+ GenesisTime time.Time `json:"genesis_time"`
+ ChainID string `json:"chain_id"`
+ InitialHeight int64 `json:"initial_height"`
+ AppHash []byte `json:"app_hash"`
+ AppState json.RawMessage `json:"app_state,omitempty"`
+ Consensus *ConsensusGenesis `json:"consensus,omitempty"`
+}
+
+// NewAppGenesisWithVersion returns a new AppGenesis with the app name and app version already.
+func NewAppGenesisWithVersion(chainID string, appState json.RawMessage) *AppGenesis {
+ return &AppGenesis{
+ AppName: version.AppName,
+ AppVersion: version.Version,
+ ChainID: chainID,
+ AppState: appState,
+ Consensus: &ConsensusGenesis{
+ Validators: nil,
+},
+}
+}
+
+// ValidateAndComplete performs validation and completes the AppGenesis.
+func (ag *AppGenesis)
+
+ValidateAndComplete()
+
+error {
+ if ag.ChainID == "" {
+ return errors.New("genesis doc must include non-empty chain_id")
+}
+ if len(ag.ChainID) > MaxChainIDLen {
+ return fmt.Errorf("chain_id in genesis doc is too long (max: %d)", MaxChainIDLen)
+}
+ if ag.InitialHeight < 0 {
+ return fmt.Errorf("initial_height cannot be negative (got %v)", ag.InitialHeight)
+}
+ if ag.InitialHeight == 0 {
+ ag.InitialHeight = 1
+}
+ if ag.GenesisTime.IsZero() {
+ ag.GenesisTime = cmttime.Now()
+}
+ if err := ag.Consensus.ValidateAndComplete(); err != nil {
+ return err
+}
+
+return nil
+}
+
+// SaveAs is a utility method for saving AppGenesis as a JSON file.
+func (ag *AppGenesis)
+
+SaveAs(file string)
+
+error {
+ appGenesisBytes, err := json.MarshalIndent(ag, "", "
+ ")
+ if err != nil {
+ return err
+}
+
+return os.WriteFile(file, appGenesisBytes, 0o600)
+}
+
+// AppGenesisFromFile reads the AppGenesis from the provided file.
+func AppGenesisFromFile(genFile string) (*AppGenesis, error) {
+ jsonBlob, err := os.ReadFile(genFile)
+ if err != nil {
+ return nil, fmt.Errorf("couldn't read AppGenesis file (%s): %w", genFile, err)
+}
+
+var appGenesis AppGenesis
+ if err := json.Unmarshal(jsonBlob, &appGenesis); err != nil {
+ // fallback to CometBFT genesis
+ var ctmGenesis cmttypes.GenesisDoc
+ if err2 := cmtjson.Unmarshal(jsonBlob, &ctmGenesis); err2 != nil {
+ return nil, fmt.Errorf("error unmarshalling AppGenesis at %s: %w\n failed fallback to CometBFT GenDoc: %w", genFile, err, err2)
+}
+
+appGenesis = AppGenesis{
+ AppName: version.AppName,
+ // AppVersion is not filled as we do not know it from a CometBFT genesis
+ GenesisTime: ctmGenesis.GenesisTime,
+ ChainID: ctmGenesis.ChainID,
+ InitialHeight: ctmGenesis.InitialHeight,
+ AppHash: ctmGenesis.AppHash,
+ AppState: ctmGenesis.AppState,
+ Consensus: &ConsensusGenesis{
+ Validators: ctmGenesis.Validators,
+ Params: ctmGenesis.ConsensusParams,
+},
+}
+
+}
+
+return &appGenesis, nil
+}
+
+// --------------------------
+// CometBFT Genesis Handling
+// --------------------------
+
+// ToGenesisDoc converts the AppGenesis to a CometBFT GenesisDoc.
+func (ag *AppGenesis)
+
+ToGenesisDoc() (*cmttypes.GenesisDoc, error) {
+ return &cmttypes.GenesisDoc{
+ GenesisTime: ag.GenesisTime,
+ ChainID: ag.ChainID,
+ InitialHeight: ag.InitialHeight,
+ AppHash: ag.AppHash,
+ AppState: ag.AppState,
+ Validators: ag.Consensus.Validators,
+ ConsensusParams: ag.Consensus.Params,
+}, nil
+}
+
+// ConsensusGenesis defines the consensus layer's genesis.
+// TODO(@julienrbrt)
+
+eventually abstract from CometBFT types
+type ConsensusGenesis struct {
+ Validators []cmttypes.GenesisValidator `json:"validators,omitempty"`
+ Params *cmttypes.ConsensusParams `json:"params,omitempty"`
+}
+
+// NewConsensusGenesis returns a ConsensusGenesis with given values.
+// It takes a proto consensus params so it can called from server export command.
+func NewConsensusGenesis(params cmtproto.ConsensusParams, validators []cmttypes.GenesisValidator) *ConsensusGenesis {
+ return &ConsensusGenesis{
+ Params: &cmttypes.ConsensusParams{
+ Block: cmttypes.BlockParams{
+ MaxBytes: params.Block.MaxBytes,
+ MaxGas: params.Block.MaxGas,
+},
+ Evidence: cmttypes.EvidenceParams{
+ MaxAgeNumBlocks: params.Evidence.MaxAgeNumBlocks,
+ MaxAgeDuration: params.Evidence.MaxAgeDuration,
+ MaxBytes: params.Evidence.MaxBytes,
+},
+ Validator: cmttypes.ValidatorParams{
+ PubKeyTypes: params.Validator.PubKeyTypes,
+},
+},
+ Validators: validators,
+}
+}
+
+func (cs *ConsensusGenesis)
+
+MarshalJSON() ([]byte, error) {
+ type Alias ConsensusGenesis
+ return cmtjson.Marshal(&Alias{
+ Validators: cs.Validators,
+ Params: cs.Params,
+})
+}
+
+func (cs *ConsensusGenesis)
+
+UnmarshalJSON(b []byte)
+
+error {
+ type Alias ConsensusGenesis
+ result := Alias{
+}
+ if err := cmtjson.Unmarshal(b, &result); err != nil {
+ return err
+}
+
+cs.Params = result.Params
+ cs.Validators = result.Validators
+
+ return nil
+}
+
+func (cs *ConsensusGenesis)
+
+ValidateAndComplete()
+
+error {
+ if cs == nil {
+ return fmt.Errorf("consensus genesis cannot be nil")
+}
+ if cs.Params == nil {
+ cs.Params = cmttypes.DefaultConsensusParams()
+}
+
+else if err := cs.Params.ValidateBasic(); err != nil {
+ return err
+}
+ for i, v := range cs.Validators {
+ if v.Power == 0 {
+ return fmt.Errorf("the genesis file cannot contain validators with no voting power: %v", v)
+}
+ if len(v.Address) > 0 && !bytes.Equal(v.PubKey.Address(), v.Address) {
+ return fmt.Errorf("incorrect address for validator %v in the genesis file, should be %v", v, v.PubKey.Address())
+}
+ if len(v.Address) == 0 {
+ cs.Validators[i].Address = v.PubKey.Address()
+}
+
+}
+
+return nil
+}
+```
+
+```go expandable
+package server
+
+import (
+
+ "context"
+ "errors"
+ "fmt"
+ "io"
+ "net"
+ "os"
+ "runtime/pprof"
+ "github.com/cometbft/cometbft/abci/server"
+ cmtcmd "github.com/cometbft/cometbft/cmd/cometbft/commands"
+ cmtcfg "github.com/cometbft/cometbft/config"
+ "github.com/cometbft/cometbft/node"
+ "github.com/cometbft/cometbft/p2p"
+ pvm "github.com/cometbft/cometbft/privval"
+ "github.com/cometbft/cometbft/proxy"
+ "github.com/cometbft/cometbft/rpc/client/local"
+ cmttypes "github.com/cometbft/cometbft/types"
+ dbm "github.com/cosmos/cosmos-db"
+ "github.com/hashicorp/go-metrics"
+ "github.com/spf13/cobra"
+ "github.com/spf13/pflag"
+ "golang.org/x/sync/errgroup"
+ "google.golang.org/grpc"
+ "google.golang.org/grpc/credentials/insecure"
+
+ pruningtypes "cosmossdk.io/store/pruning/types"
+ "github.com/cosmos/cosmos-sdk/client"
+ "github.com/cosmos/cosmos-sdk/client/flags"
+ "github.com/cosmos/cosmos-sdk/codec"
+ "github.com/cosmos/cosmos-sdk/server/api"
+ serverconfig "github.com/cosmos/cosmos-sdk/server/config"
+ servergrpc "github.com/cosmos/cosmos-sdk/server/grpc"
+ servercmtlog "github.com/cosmos/cosmos-sdk/server/log"
+ "github.com/cosmos/cosmos-sdk/server/types"
+ "github.com/cosmos/cosmos-sdk/telemetry"
+ "github.com/cosmos/cosmos-sdk/types/mempool"
+ "github.com/cosmos/cosmos-sdk/version"
+ genutiltypes "github.com/cosmos/cosmos-sdk/x/genutil/types"
+)
+
+const (
+ // CometBFT full-node start flags
+ flagWithComet = "with-comet"
+ flagAddress = "address"
+ flagTransport = "transport"
+ flagTraceStore = "trace-store"
+ flagCPUProfile = "cpu-profile"
+ FlagMinGasPrices = "minimum-gas-prices"
+ FlagQueryGasLimit = "query-gas-limit"
+ FlagHaltHeight = "halt-height"
+ FlagHaltTime = "halt-time"
+ FlagInterBlockCache = "inter-block-cache"
+ FlagUnsafeSkipUpgrades = "unsafe-skip-upgrades"
+ FlagTrace = "trace"
+ FlagInvCheckPeriod = "inv-check-period"
+
+ FlagPruning = "pruning"
+ FlagPruningKeepRecent = "pruning-keep-recent"
+ FlagPruningInterval = "pruning-interval"
+ FlagIndexEvents = "index-events"
+ FlagMinRetainBlocks = "min-retain-blocks"
+ FlagIAVLCacheSize = "iavl-cache-size"
+ FlagDisableIAVLFastNode = "iavl-disable-fastnode"
+
+ // state sync-related flags
+ FlagStateSyncSnapshotInterval = "state-sync.snapshot-interval"
+ FlagStateSyncSnapshotKeepRecent = "state-sync.snapshot-keep-recent"
+
+ // api-related flags
+ FlagAPIEnable = "api.enable"
+ FlagAPISwagger = "api.swagger"
+ FlagAPIAddress = "api.address"
+ FlagAPIMaxOpenConnections = "api.max-open-connections"
+ FlagRPCReadTimeout = "api.rpc-read-timeout"
+ FlagRPCWriteTimeout = "api.rpc-write-timeout"
+ FlagRPCMaxBodyBytes = "api.rpc-max-body-bytes"
+ FlagAPIEnableUnsafeCORS = "api.enabled-unsafe-cors"
+
+ // gRPC-related flags
+ flagGRPCOnly = "grpc-only"
+ flagGRPCEnable = "grpc.enable"
+ flagGRPCAddress = "grpc.address"
+ flagGRPCWebEnable = "grpc-web.enable"
+
+ // mempool flags
+ FlagMempoolMaxTxs = "mempool.max-txs"
+)
+
+// StartCmdOptions defines options that can be customized in `StartCmdWithOptions`,
+type StartCmdOptions struct {
+ // DBOpener can be used to customize db opening, for example customize db options or support different db backends,
+ // default to the builtin db opener.
+ DBOpener func(rootDir string, backendType dbm.BackendType) (dbm.DB, error)
+ // PostSetup can be used to setup extra services under the same cancellable context,
+ // it's not called in stand-alone mode, only for in-process mode.
+ PostSetup func(svrCtx *Context, clientCtx client.Context, ctx context.Context, g *errgroup.Group)
+
+error
+ // AddFlags add custom flags to start cmd
+ AddFlags func(cmd *cobra.Command)
+}
+
+// StartCmd runs the service passed in, either stand-alone or in-process with
+// CometBFT.
+func StartCmd(appCreator types.AppCreator, defaultNodeHome string) *cobra.Command {
+ return StartCmdWithOptions(appCreator, defaultNodeHome, StartCmdOptions{
+})
+}
+
+// StartCmdWithOptions runs the service passed in, either stand-alone or in-process with
+// CometBFT.
+func StartCmdWithOptions(appCreator types.AppCreator, defaultNodeHome string, opts StartCmdOptions) *cobra.Command {
+ if opts.DBOpener == nil {
+ opts.DBOpener = openDB
+}
+ cmd := &cobra.Command{
+ Use: "start",
+ Short: "Run the full node",
+ Long: `Run the full node application with CometBFT in or out of process. By
+default, the application will run with CometBFT in process.
+
+Pruning options can be provided via the '--pruning' flag or alternatively with '--pruning-keep-recent', and
+'pruning-interval' together.
+
+For '--pruning' the options are as follows:
+
+default: the last 362880 states are kept, pruning at 10 block intervals
+nothing: all historic states will be saved, nothing will be deleted (i.e. archiving node)
+
+everything: 2 latest states will be kept; pruning at 10 block intervals.
+custom: allow pruning options to be manually specified through 'pruning-keep-recent', and 'pruning-interval'
+
+Node halting configurations exist in the form of two flags: '--halt-height' and '--halt-time'. During
+the ABCI Commit phase, the node will check if the current block height is greater than or equal to
+the halt-height or if the current block time is greater than or equal to the halt-time. If so, the
+node will attempt to gracefully shutdown and the block will not be committed. In addition, the node
+will not be able to commit subsequent blocks.
+
+For profiling and benchmarking purposes, CPU profiling can be enabled via the '--cpu-profile' flag
+which accepts a path for the resulting pprof file.
+
+The node may be started in a 'query only' mode where only the gRPC and JSON HTTP
+API services are enabled via the 'grpc-only' flag. In this mode, CometBFT is
+bypassed and can be used when legacy queries are needed after an on-chain upgrade
+is performed. Note, when enabled, gRPC will also be automatically enabled.
+`,
+ PreRunE: func(cmd *cobra.Command, _ []string)
+
+error {
+ serverCtx := GetServerContextFromCmd(cmd)
+
+ // Bind flags to the Context's Viper so the app construction can set
+ // options accordingly.
+ if err := serverCtx.Viper.BindPFlags(cmd.Flags()); err != nil {
+ return err
+}
+
+ _, err := GetPruningOptionsFromFlags(serverCtx.Viper)
+
+return err
+},
+ RunE: func(cmd *cobra.Command, _ []string)
+
+error {
+ serverCtx := GetServerContextFromCmd(cmd)
+
+clientCtx, err := client.GetClientQueryContext(cmd)
+ if err != nil {
+ return err
+}
+
+withCMT, _ := cmd.Flags().GetBool(flagWithComet)
+ if !withCMT {
+ serverCtx.Logger.Info("starting ABCI without CometBFT")
+}
+
+return wrapCPUProfile(serverCtx, func()
+
+error {
+ return start(serverCtx, clientCtx, appCreator, withCMT, opts)
+})
+},
+}
+
+cmd.Flags().String(flags.FlagHome, defaultNodeHome, "The application home directory")
+
+cmd.Flags().Bool(flagWithComet, true, "Run abci app embedded in-process with CometBFT")
+
+cmd.Flags().String(flagAddress, "tcp://0.0.0.0:26658", "Listen address")
+
+cmd.Flags().String(flagTransport, "socket", "Transport protocol: socket, grpc")
+
+cmd.Flags().String(flagTraceStore, "", "Enable KVStore tracing to an output file")
+
+cmd.Flags().String(FlagMinGasPrices, "", "Minimum gas prices to accept for transactions; Any fee in a tx must meet this minimum (e.g. 0.01photino;0.0001stake)")
+
+cmd.Flags().Uint64(FlagQueryGasLimit, 0, "Maximum gas a Rest/Grpc query can consume. Blank and 0 imply unbounded.")
+
+cmd.Flags().IntSlice(FlagUnsafeSkipUpgrades, []int{
+}, "Skip a set of upgrade heights to continue the old binary")
+
+cmd.Flags().Uint64(FlagHaltHeight, 0, "Block height at which to gracefully halt the chain and shutdown the node")
+
+cmd.Flags().Uint64(FlagHaltTime, 0, "Minimum block time (in Unix seconds)
+
+at which to gracefully halt the chain and shutdown the node")
+
+cmd.Flags().Bool(FlagInterBlockCache, true, "Enable inter-block caching")
+
+cmd.Flags().String(flagCPUProfile, "", "Enable CPU profiling and write to the provided file")
+
+cmd.Flags().Bool(FlagTrace, false, "Provide full stack traces for errors in ABCI Log")
+
+cmd.Flags().String(FlagPruning, pruningtypes.PruningOptionDefault, "Pruning strategy (default|nothing|everything|custom)")
+
+cmd.Flags().Uint64(FlagPruningKeepRecent, 0, "Number of recent heights to keep on disk (ignored if pruning is not 'custom')")
+
+cmd.Flags().Uint64(FlagPruningInterval, 0, "Height interval at which pruned heights are removed from disk (ignored if pruning is not 'custom')")
+
+cmd.Flags().Uint(FlagInvCheckPeriod, 0, "Assert registered invariants every N blocks")
+
+cmd.Flags().Uint64(FlagMinRetainBlocks, 0, "Minimum block height offset during ABCI commit to prune CometBFT blocks")
+
+cmd.Flags().Bool(FlagAPIEnable, false, "Define if the API server should be enabled")
+
+cmd.Flags().Bool(FlagAPISwagger, false, "Define if swagger documentation should automatically be registered (Note: the API must also be enabled)")
+
+cmd.Flags().String(FlagAPIAddress, serverconfig.DefaultAPIAddress, "the API server address to listen on")
+
+cmd.Flags().Uint(FlagAPIMaxOpenConnections, 1000, "Define the number of maximum open connections")
+
+cmd.Flags().Uint(FlagRPCReadTimeout, 10, "Define the CometBFT RPC read timeout (in seconds)")
+
+cmd.Flags().Uint(FlagRPCWriteTimeout, 0, "Define the CometBFT RPC write timeout (in seconds)")
+
+cmd.Flags().Uint(FlagRPCMaxBodyBytes, 1000000, "Define the CometBFT maximum request body (in bytes)")
+
+cmd.Flags().Bool(FlagAPIEnableUnsafeCORS, false, "Define if CORS should be enabled (unsafe - use it at your own risk)")
+
+cmd.Flags().Bool(flagGRPCOnly, false, "Start the node in gRPC query only mode (no CometBFT process is started)")
+
+cmd.Flags().Bool(flagGRPCEnable, true, "Define if the gRPC server should be enabled")
+
+cmd.Flags().String(flagGRPCAddress, serverconfig.DefaultGRPCAddress, "the gRPC server address to listen on")
+
+cmd.Flags().Bool(flagGRPCWebEnable, true, "Define if the gRPC-Web server should be enabled. (Note: gRPC must also be enabled)")
+
+cmd.Flags().Uint64(FlagStateSyncSnapshotInterval, 0, "State sync snapshot interval")
+
+cmd.Flags().Uint32(FlagStateSyncSnapshotKeepRecent, 2, "State sync snapshot to keep")
+
+cmd.Flags().Bool(FlagDisableIAVLFastNode, false, "Disable fast node for IAVL tree")
+
+cmd.Flags().Int(FlagMempoolMaxTxs, mempool.DefaultMaxTx, "Sets MaxTx value for the app-side mempool")
+
+ // support old flags name for backwards compatibility
+ cmd.Flags().SetNormalizeFunc(func(f *pflag.FlagSet, name string)
+
+pflag.NormalizedName {
+ if name == "with-tendermint" {
+ name = flagWithComet
+}
+
+return pflag.NormalizedName(name)
+})
+
+ // add support for all CometBFT-specific command line options
+ cmtcmd.AddNodeFlags(cmd)
+ if opts.AddFlags != nil {
+ opts.AddFlags(cmd)
+}
+
+return cmd
+}
+
+func start(svrCtx *Context, clientCtx client.Context, appCreator types.AppCreator, withCmt bool, opts StartCmdOptions)
+
+error {
+ svrCfg, err := getAndValidateConfig(svrCtx)
+ if err != nil {
+ return err
+}
+
+app, appCleanupFn, err := startApp(svrCtx, appCreator, opts)
+ if err != nil {
+ return err
+}
+
+defer appCleanupFn()
+
+metrics, err := startTelemetry(svrCfg)
+ if err != nil {
+ return err
+}
+
+emitServerInfoMetrics()
+ if !withCmt {
+ return startStandAlone(svrCtx, app, opts)
+}
+
+return startInProcess(svrCtx, svrCfg, clientCtx, app, metrics, opts)
+}
+
+func startStandAlone(svrCtx *Context, app types.Application, opts StartCmdOptions)
+
+error {
+ addr := svrCtx.Viper.GetString(flagAddress)
+ transport := svrCtx.Viper.GetString(flagTransport)
+ cmtApp := NewCometABCIWrapper(app)
+
+svr, err := server.NewServer(addr, transport, cmtApp)
+ if err != nil {
+ return fmt.Errorf("error creating listener: %v", err)
+}
+
+svr.SetLogger(servercmtlog.CometLoggerWrapper{
+ Logger: svrCtx.Logger.With("module", "abci-server")
+})
+
+g, ctx := getCtx(svrCtx, false)
+
+g.Go(func()
+
+error {
+ if err := svr.Start(); err != nil {
+ svrCtx.Logger.Error("failed to start out-of-process ABCI server", "err", err)
+
+return err
+}
+
+ // Wait for the calling process to be canceled or close the provided context,
+ // so we can gracefully stop the ABCI server.
+ <-ctx.Done()
+
+svrCtx.Logger.Info("stopping the ABCI server...")
+
+return errors.Join(svr.Stop(), app.Close())
+})
+
+return g.Wait()
+}
+
+func startInProcess(svrCtx *Context, svrCfg serverconfig.Config, clientCtx client.Context, app types.Application,
+ metrics *telemetry.Metrics, opts StartCmdOptions,
+)
+
+error {
+ cmtCfg := svrCtx.Config
+ home := cmtCfg.RootDir
+ gRPCOnly := svrCtx.Viper.GetBool(flagGRPCOnly)
+
+g, ctx := getCtx(svrCtx, true)
+ if gRPCOnly {
+ // TODO: Generalize logic so that gRPC only is really in startStandAlone
+ svrCtx.Logger.Info("starting node in gRPC only mode; CometBFT is disabled")
+
+svrCfg.GRPC.Enable = true
+}
+
+else {
+ svrCtx.Logger.Info("starting node with ABCI CometBFT in-process")
+
+tmNode, cleanupFn, err := startCmtNode(ctx, cmtCfg, app, svrCtx)
+ if err != nil {
+ return err
+}
+
+defer cleanupFn()
+
+ // Add the tx service to the gRPC router. We only need to register this
+ // service if API or gRPC is enabled, and avoid doing so in the general
+ // case, because it spawns a new local CometBFT RPC client.
+ if svrCfg.API.Enable || svrCfg.GRPC.Enable {
+ // Re-assign for making the client available below do not use := to avoid
+ // shadowing the clientCtx variable.
+ clientCtx = clientCtx.WithClient(local.New(tmNode))
+
+app.RegisterTxService(clientCtx)
+
+app.RegisterTendermintService(clientCtx)
+
+app.RegisterNodeService(clientCtx, svrCfg)
+}
+
+}
+
+grpcSrv, clientCtx, err := startGrpcServer(ctx, g, svrCfg.GRPC, clientCtx, svrCtx, app)
+ if err != nil {
+ return err
+}
+
+err = startAPIServer(ctx, g, cmtCfg, svrCfg, clientCtx, svrCtx, app, home, grpcSrv, metrics)
+ if err != nil {
+ return err
+}
+ if opts.PostSetup != nil {
+ if err := opts.PostSetup(svrCtx, clientCtx, ctx, g); err != nil {
+ return err
+}
+
+}
+
+ // wait for signal capture and gracefully return
+ // we are guaranteed to be waiting for the "ListenForQuitSignals" goroutine.
+ return g.Wait()
+}
+
+// TODO: Move nodeKey into being created within the function.
+func startCmtNode(
+ ctx context.Context,
+ cfg *cmtcfg.Config,
+ app types.Application,
+ svrCtx *Context,
+) (tmNode *node.Node, cleanupFn func(), err error) {
+ nodeKey, err := p2p.LoadOrGenNodeKey(cfg.NodeKeyFile())
+ if err != nil {
+ return nil, cleanupFn, err
+}
+ cmtApp := NewCometABCIWrapper(app)
+
+tmNode, err = node.NewNodeWithContext(
+ ctx,
+ cfg,
+ pvm.LoadOrGenFilePV(cfg.PrivValidatorKeyFile(), cfg.PrivValidatorStateFile()),
+ nodeKey,
+ proxy.NewLocalClientCreator(cmtApp),
+ getGenDocProvider(cfg),
+ cmtcfg.DefaultDBProvider,
+ node.DefaultMetricsProvider(cfg.Instrumentation),
+ servercmtlog.CometLoggerWrapper{
+ Logger: svrCtx.Logger
+},
+ )
+ if err != nil {
+ return tmNode, cleanupFn, err
+}
+ if err := tmNode.Start(); err != nil {
+ return tmNode, cleanupFn, err
+}
+
+cleanupFn = func() {
+ if tmNode != nil && tmNode.IsRunning() {
+ _ = tmNode.Stop()
+ _ = app.Close()
+}
+
+}
+
+return tmNode, cleanupFn, nil
+}
+
+func getAndValidateConfig(svrCtx *Context) (serverconfig.Config, error) {
+ config, err := serverconfig.GetConfig(svrCtx.Viper)
+ if err != nil {
+ return config, err
+}
+ if err := config.ValidateBasic(); err != nil {
+ return config, err
+}
+
+return config, nil
+}
+
+// returns a function which returns the genesis doc from the genesis file.
+func getGenDocProvider(cfg *cmtcfg.Config)
+
+func() (*cmttypes.GenesisDoc, error) {
+ return func() (*cmttypes.GenesisDoc, error) {
+ appGenesis, err := genutiltypes.AppGenesisFromFile(cfg.GenesisFile())
+ if err != nil {
+ return nil, err
+}
+
+return appGenesis.ToGenesisDoc()
+}
+}
+
+func setupTraceWriter(svrCtx *Context) (traceWriter io.WriteCloser, cleanup func(), err error) {
+ // clean up the traceWriter when the server is shutting down
+ cleanup = func() {
+}
+ traceWriterFile := svrCtx.Viper.GetString(flagTraceStore)
+
+traceWriter, err = openTraceWriter(traceWriterFile)
+ if err != nil {
+ return traceWriter, cleanup, err
+}
+
+ // if flagTraceStore is not used then traceWriter is nil
+ if traceWriter != nil {
+ cleanup = func() {
+ if err = traceWriter.Close(); err != nil {
+ svrCtx.Logger.Error("failed to close trace writer", "err", err)
+}
+
+}
+
+}
+
+return traceWriter, cleanup, nil
+}
+
+func startGrpcServer(
+ ctx context.Context,
+ g *errgroup.Group,
+ config serverconfig.GRPCConfig,
+ clientCtx client.Context,
+ svrCtx *Context,
+ app types.Application,
+) (*grpc.Server, client.Context, error) {
+ if !config.Enable {
+ // return grpcServer as nil if gRPC is disabled
+ return nil, clientCtx, nil
+}
+ _, port, err := net.SplitHostPort(config.Address)
+ if err != nil {
+ return nil, clientCtx, err
+}
+ maxSendMsgSize := config.MaxSendMsgSize
+ if maxSendMsgSize == 0 {
+ maxSendMsgSize = serverconfig.DefaultGRPCMaxSendMsgSize
+}
+ maxRecvMsgSize := config.MaxRecvMsgSize
+ if maxRecvMsgSize == 0 {
+ maxRecvMsgSize = serverconfig.DefaultGRPCMaxRecvMsgSize
+}
+ grpcAddress := fmt.Sprintf("127.0.0.1:%s", port)
+
+ // if gRPC is enabled, configure gRPC client for gRPC gateway
+ grpcClient, err := grpc.Dial(
+ grpcAddress,
+ grpc.WithTransportCredentials(insecure.NewCredentials()),
+ grpc.WithDefaultCallOptions(
+ grpc.ForceCodec(codec.NewProtoCodec(clientCtx.InterfaceRegistry).GRPCCodec()),
+ grpc.MaxCallRecvMsgSize(maxRecvMsgSize),
+ grpc.MaxCallSendMsgSize(maxSendMsgSize),
+ ),
+ )
+ if err != nil {
+ return nil, clientCtx, err
+}
+
+clientCtx = clientCtx.WithGRPCClient(grpcClient)
+
+svrCtx.Logger.Debug("gRPC client assigned to client context", "target", grpcAddress)
+
+grpcSrv, err := servergrpc.NewGRPCServer(clientCtx, app, config)
+ if err != nil {
+ return nil, clientCtx, err
+}
+
+ // Start the gRPC server in a goroutine. Note, the provided ctx will ensure
+ // that the server is gracefully shut down.
+ g.Go(func()
+
+error {
+ return servergrpc.StartGRPCServer(ctx, svrCtx.Logger.With("module", "grpc-server"), config, grpcSrv)
+})
+
+return grpcSrv, clientCtx, nil
+}
+
+func startAPIServer(
+ ctx context.Context,
+ g *errgroup.Group,
+ cmtCfg *cmtcfg.Config,
+ svrCfg serverconfig.Config,
+ clientCtx client.Context,
+ svrCtx *Context,
+ app types.Application,
+ home string,
+ grpcSrv *grpc.Server,
+ metrics *telemetry.Metrics,
+)
+
+error {
+ if !svrCfg.API.Enable {
+ return nil
+}
+
+clientCtx = clientCtx.WithHomeDir(home)
+ apiSrv := api.New(clientCtx, svrCtx.Logger.With("module", "api-server"), grpcSrv)
+
+app.RegisterAPIRoutes(apiSrv, svrCfg.API)
+ if svrCfg.Telemetry.Enabled {
+ apiSrv.SetTelemetry(metrics)
+}
+
+g.Go(func()
+
+error {
+ return apiSrv.Start(ctx, svrCfg)
+})
+
+return nil
+}
+
+func startTelemetry(cfg serverconfig.Config) (*telemetry.Metrics, error) {
+ if !cfg.Telemetry.Enabled {
+ return nil, nil
+}
+
+return telemetry.New(cfg.Telemetry)
+}
+
+// wrapCPUProfile starts CPU profiling, if enabled, and executes the provided
+// callbackFn in a separate goroutine, then will wait for that callback to
+// return.
+//
+// NOTE: We expect the caller to handle graceful shutdown and signal handling.
+func wrapCPUProfile(svrCtx *Context, callbackFn func()
+
+error)
+
+error {
+ if cpuProfile := svrCtx.Viper.GetString(flagCPUProfile); cpuProfile != "" {
+ f, err := os.Create(cpuProfile)
+ if err != nil {
+ return err
+}
+
+svrCtx.Logger.Info("starting CPU profiler", "profile", cpuProfile)
+ if err := pprof.StartCPUProfile(f); err != nil {
+ return err
+}
+
+defer func() {
+ svrCtx.Logger.Info("stopping CPU profiler", "profile", cpuProfile)
+
+pprof.StopCPUProfile()
+ if err := f.Close(); err != nil {
+ svrCtx.Logger.Info("failed to close cpu-profile file", "profile", cpuProfile, "err", err.Error())
+}
+
+}()
+}
+
+return callbackFn()
+}
+
+// emitServerInfoMetrics emits server info related metrics using application telemetry.
+func emitServerInfoMetrics() {
+ var ls []metrics.Label
+ versionInfo := version.NewInfo()
+ if len(versionInfo.GoVersion) > 0 {
+ ls = append(ls, telemetry.NewLabel("go", versionInfo.GoVersion))
+}
+ if len(versionInfo.CosmosSdkVersion) > 0 {
+ ls = append(ls, telemetry.NewLabel("version", versionInfo.CosmosSdkVersion))
+}
+ if len(ls) == 0 {
+ return
+}
+
+telemetry.SetGaugeWithLabels([]string{"server", "info"
+}, 1, ls)
+}
+
+func getCtx(svrCtx *Context, block bool) (*errgroup.Group, context.Context) {
+ ctx, cancelFn := context.WithCancel(context.Background())
+
+g, ctx := errgroup.WithContext(ctx)
+ // listen for quit signals so the calling parent process can gracefully exit
+ ListenForQuitSignals(g, block, cancelFn, svrCtx.Logger)
+
+return g, ctx
+}
+
+func startApp(svrCtx *Context, appCreator types.AppCreator, opts StartCmdOptions) (app types.Application, cleanupFn func(), err error) {
+ traceWriter, traceCleanupFn, err := setupTraceWriter(svrCtx)
+ if err != nil {
+ return app, traceCleanupFn, err
+}
+ home := svrCtx.Config.RootDir
+ db, err := opts.DBOpener(home, GetAppDBBackend(svrCtx.Viper))
+ if err != nil {
+ return app, traceCleanupFn, err
+}
+
+app = appCreator(svrCtx.Logger, db, traceWriter, svrCtx.Viper)
+
+cleanupFn = func() {
+ traceCleanupFn()
+ if localErr := app.Close(); localErr != nil {
+ svrCtx.Logger.Error(localErr.Error())
+}
+
+}
+
+return app, cleanupFn, nil
+}
+```
+
+## Client
+
+### CLI
+
+The genutil commands are available under the `genesis` subcommand.
+
+#### add-genesis-account
+
+Add a genesis account to `genesis.json`. Learn more [here](/sdk/v0.53/user/run-node/run-node#adding-genesis-accounts).
+
+#### collect-gentxs
+
+Collect genesis txs and output a `genesis.json` file.
+
+```shell
+simd genesis collect-gentxs
+```
+
+This will create a new `genesis.json` file that includes data from all the validators (we sometimes call it the "super genesis file" to distinguish it from single-validator genesis files).
+
+#### gentx
+
+Generate a genesis tx carrying a self delegation.
+
+```shell
+simd genesis gentx [key_name] [amount] --chain-id [chain-id]
+```
+
+This will create the genesis transaction for your new chain. Here `amount` should be at least `1000000000stake`.
+If you provide too much or too little, you will encounter an error when starting a node.
+
+#### migrate
+
+Migrate genesis to a specified target (SDK) version.
+
+```shell
+simd genesis migrate [target-version]
+```
+
+
+The `migrate` command is extensible and takes a `MigrationMap`. This map is a mapping of target versions to genesis migrations functions.
+When not using the default `MigrationMap`, it is recommended to still call the default `MigrationMap` corresponding the SDK version of the chain and prepend/append your own genesis migrations.
+
+
+#### validate-genesis
+
+Validates the genesis file at the default location or at the location passed as an argument.
+
+```shell
+simd genesis validate-genesis
+```
+
+
+Validate genesis only validates if the genesis is valid at the **current application binary**. For validating a genesis from a previous version of the application, use the `migrate` command to migrate the genesis to the current version.
+
diff --git a/sdk/next/build/modules/gov/README.mdx b/sdk/next/build/modules/gov/README.mdx
new file mode 100644
index 000000000..49c2b1f21
--- /dev/null
+++ b/sdk/next/build/modules/gov/README.mdx
@@ -0,0 +1,2653 @@
+---
+title: 'x/gov'
+description: >-
+ This paper specifies the Governance module of the Cosmos SDK, which was first
+ described in the Cosmos Whitepaper in June 2016.
+---
+
+## Abstract
+
+This paper specifies the Governance module of the Cosmos SDK, which was first
+described in the [Cosmos Whitepaper](https://cosmos.network/about/whitepaper) in
+June 2016.
+
+The module enables Cosmos SDK based blockchain to support an on-chain governance
+system. In this system, holders of the native staking token of the chain can vote
+on proposals on a 1 token 1 vote basis. Next is a list of features the module
+currently supports:
+
+* **Proposal submission:** Users can submit proposals with a deposit. Once the
+ minimum deposit is reached, the proposal enters voting period. The minimum deposit can be reached by collecting deposits from different users (including proposer) within deposit period.
+* **Vote:** Participants can vote on proposals that reached MinDeposit and entered voting period.
+* **Inheritance and penalties:** Delegators inherit their validator's vote if
+ they don't vote themselves.
+* **Claiming deposit:** Users that deposited on proposals can recover their
+ deposits if the proposal was accepted or rejected. If the proposal was vetoed, or never entered voting period (minimum deposit not reached within deposit period), the deposit is burned.
+
+This module is in use on the Cosmos Hub (a.k.a [gaia](https://github.com/cosmos/gaia)).
+Features that may be added in the future are described in [Future Improvements](#future-improvements).
+
+## Contents
+
+The following specification uses *ATOM* as the native staking token. The module
+can be adapted to any Proof-Of-Stake blockchain by replacing *ATOM* with the native
+staking token of the chain.
+
+* [Concepts](#concepts)
+ * [Proposal submission](#proposal-submission)
+ * [Deposit](#deposit)
+ * [Vote](#vote)
+ * [Software Upgrade](#software-upgrade)
+* [State](#state)
+ * [Proposals](#proposals)
+ * [Parameters and base types](#parameters-and-base-types)
+ * [Deposit](#deposit-1)
+ * [ValidatorGovInfo](#validatorgovinfo)
+ * [Stores](#stores)
+ * [Proposal Processing Queue](#proposal-processing-queue)
+ * [Legacy Proposal](#legacy-proposal)
+* [Messages](#messages)
+ * [Proposal Submission](#proposal-submission-1)
+ * [Deposit](#deposit-2)
+ * [Vote](#vote-1)
+* [Events](#events)
+ * [EndBlocker](#endblocker)
+ * [Handlers](#handlers)
+* [Parameters](#parameters)
+* [Client](#client)
+ * [CLI](#cli)
+ * [gRPC](#grpc)
+ * [REST](#rest)
+* [Metadata](#metadata)
+ * [Proposal](#proposal-3)
+ * [Vote](#vote-5)
+* [Future Improvements](#future-improvements)
+
+## Concepts
+
+{/* *Disclaimer: This is work in progress. Mechanisms are susceptible to change.* */}
+
+The governance process is divided in a few steps that are outlined below:
+
+* **Proposal submission:** Proposal is submitted to the blockchain with a
+ deposit.
+* **Vote:** Once deposit reaches a certain value (`MinDeposit`), proposal is
+ confirmed and vote opens. Bonded Atom holders can then send `TxGovVote`
+ transactions to vote on the proposal.
+* **Execution** After a period of time, the votes are tallied and depending
+ on the result, the messages in the proposal will be executed.
+
+### Proposal submission
+
+#### Right to submit a proposal
+
+Every account can submit proposals by sending a `MsgSubmitProposal` transaction.
+Once a proposal is submitted, it is identified by its unique `proposalID`.
+
+#### Proposal Messages
+
+A proposal includes an array of `sdk.Msg`s which are executed automatically if the
+proposal passes. The messages are executed by the governance `ModuleAccount` itself. Modules
+such as `x/upgrade`, that want to allow certain messages to be executed by governance
+only should add a whitelist within the respective msg server, granting the governance
+module the right to execute the message once a quorum has been reached. The governance
+module uses the `MsgServiceRouter` to check that these messages are correctly constructed
+and have a respective path to execute on but do not perform a full validity check.
+
+### Deposit
+
+To prevent spam, proposals must be submitted with a deposit in the coins defined by
+the `MinDeposit` param.
+
+When a proposal is submitted, it has to be accompanied with a deposit that must be
+strictly positive, but can be inferior to `MinDeposit`. The submitter doesn't need
+to pay for the entire deposit on their own. The newly created proposal is stored in
+an *inactive proposal queue* and stays there until its deposit passes the `MinDeposit`.
+Other token holders can increase the proposal's deposit by sending a `Deposit`
+transaction. If a proposal doesn't pass the `MinDeposit` before the deposit end time
+(the time when deposits are no longer accepted), the proposal will be destroyed: the
+proposal will be removed from state and the deposit will be burned (see x/gov `EndBlocker`).
+When a proposal deposit passes the `MinDeposit` threshold (even during the proposal
+submission) before the deposit end time, the proposal will be moved into the
+*active proposal queue* and the voting period will begin.
+
+The deposit is kept in escrow and held by the governance `ModuleAccount` until the
+proposal is finalized (passed or rejected).
+
+#### Deposit refund and burn
+
+When a proposal is finalized, the coins from the deposit are either refunded or burned
+according to the final tally of the proposal:
+
+* If the proposal is approved or rejected but *not* vetoed, each deposit will be
+ automatically refunded to its respective depositor (transferred from the governance
+ `ModuleAccount`).
+* When the proposal is vetoed with greater than 1/3, deposits will be burned from the
+ governance `ModuleAccount` and the proposal information along with its deposit
+ information will be removed from state.
+* All refunded or burned deposits are removed from the state. Events are issued when
+ burning or refunding a deposit.
+
+### Vote
+
+#### Participants
+
+*Participants* are users that have the right to vote on proposals. On the
+Cosmos Hub, participants are bonded Atom holders. Unbonded Atom holders and
+other users do not get the right to participate in governance. However, they
+can submit and deposit on proposals.
+
+Note that when *participants* have bonded and unbonded Atoms, their voting power is calculated from their bonded Atom holdings only.
+
+#### Voting period
+
+Once a proposal reaches `MinDeposit`, it immediately enters `Voting period`. We
+define `Voting period` as the interval between the moment the vote opens and
+the moment the vote closes. The initial value of `Voting period` is 2 weeks.
+
+#### Option set
+
+The option set of a proposal refers to the set of choices a participant can
+choose from when casting its vote.
+
+The initial option set includes the following options:
+
+* `Yes`
+* `No`
+* `NoWithVeto`
+* `Abstain`
+
+`NoWithVeto` counts as `No` but also adds a `Veto` vote. `Abstain` option
+allows voters to signal that they do not intend to vote in favor or against the
+proposal but accept the result of the vote.
+
+*Note: from the UI, for urgent proposals we should maybe add a ‘Not Urgent’ option that casts a `NoWithVeto` vote.*
+
+#### Weighted Votes
+
+[ADR-037](https://github.com/cosmos/cosmos-sdk/blob/main/docs/architecture/adr-037-gov-split-vote.md) introduces the weighted vote feature which allows a staker to split their votes into several voting options. For example, it could use 70% of its voting power to vote Yes and 30% of its voting power to vote No.
+
+Often times the entity owning that address might not be a single individual. For example, a company might have different stakeholders who want to vote differently, and so it makes sense to allow them to split their voting power. Currently, it is not possible for them to do "passthrough voting" and giving their users voting rights over their tokens. However, with this system, exchanges can poll their users for voting preferences, and then vote on-chain proportionally to the results of the poll.
+
+To represent weighted vote on chain, we use the following Protobuf message.
+
+```protobuf
+// Reference: https://github.com/cosmos/cosmos-sdk/blob/v0.47.0-rc1/proto/cosmos/gov/v1beta1/gov.proto#L34-L47
+```
+
+```protobuf
+// Reference: https://github.com/cosmos/cosmos-sdk/blob/v0.47.0-rc1/proto/cosmos/gov/v1beta1/gov.proto#L181-L201
+```
+
+For a weighted vote to be valid, the `options` field must not contain duplicate vote options, and the sum of weights of all options must be equal to 1.
+
+#### Custom Vote Calculation
+
+
+As of v0.54, the x/gov module has been decoupled from x/staking to allow for custom voting power calculations. The `CalculateVoteResultsAndVotingPowerFn` is now a required parameter when initializing the governance keeper.
+
+
+Developers can define a custom vote result and voting power calculation function. This function determines how votes are tallied and voting power is calculated.
+
+```go expandable
+package keeper
+
+import (
+ "context"
+ "fmt"
+
+ "cosmossdk.io/math"
+
+ sdk "github.com/cosmos/cosmos-sdk/types"
+ v1 "github.com/cosmos/cosmos-sdk/x/gov/types/v1"
+)
+
+// CalculateVoteResultsAndVotingPowerFn is a function signature for calculating vote results and voting power
+// It can be overridden to customize the voting power calculation for proposals
+// It must fetch validators, calculate total validator power, and return vote results
+// totalVoterPower is the sum of voting power that actually voted
+// totalValPower is the sum of all active validator power (for quorum calculation)
+type CalculateVoteResultsAndVotingPowerFn func(
+ ctx context.Context,
+ k Keeper,
+ proposal v1.Proposal,
+) (totalVoterPower math.LegacyDec, totalValPower math.Int, results map[v1.VoteOption]math.LegacyDec, err error)
+```
+
+The SDK provides a default implementation (`NewDefaultCalculateVoteResultsAndVotingPower`) that uses the staking module to calculate voting power based on bonded tokens and delegations. This function:
+
+- Fetches all bonded validators and their voting power
+- Iterates through all votes on the proposal
+- Calculates voting power for each voter based on their delegations
+- Handles validator votes and delegator votes with inheritance
+- Returns the total voting power that voted, total validator power, and vote results by option
+
+Applications requiring custom voting mechanisms can implement their own calculation function. This gives developers flexibility to build governance systems with:
+
+* Quadratic Voting
+* Time-weighted Voting
+* Reputation-Based voting
+
+For applications using the default staking-based voting power, use `NewDefaultCalculateVoteResultsAndVotingPower`:
+
+```go expandable
+// Initialize with default staking-based voting power calculation
+govKeeper := govkeeper.NewKeeper(
+ appCodec,
+ runtime.NewKVStoreService(keys[govtypes.StoreKey]),
+ app.AccountKeeper,
+ app.BankKeeper,
+ app.DistrKeeper, // Optional: can be nil if not using distribution module features
+ app.MsgServiceRouter(),
+ govConfig,
+ authtypes.NewModuleAddress(govtypes.ModuleName).String(),
+ govkeeper.NewDefaultCalculateVoteResultsAndVotingPower(app.StakingKeeper),
+)
+```
+
+For applications requiring custom voting mechanisms, implement the `CalculateVoteResultsAndVotingPowerFn` interface:
+
+```go expandable
+// Custom voting function example
+func myCustomVotingFunction(
+ ctx context.Context,
+ k Keeper,
+ proposal v1.Proposal,
+) (totalVoterPower math.LegacyDec, totalValPower math.Int, results map[v1.VoteOption]math.LegacyDec, err error) {
+ // Custom tally logic
+ // Must return:
+ // - totalVoterPower: sum of voting power that voted
+ // - totalValPower: total validator power for quorum calculation
+ // - results: vote tallies by option
+ // - err: any error encountered
+ return totalVoterPower, totalValPower, results, nil
+}
+
+// Initialize with custom voting function
+govKeeper := govkeeper.NewKeeper(
+ appCodec,
+ runtime.NewKVStoreService(keys[govtypes.StoreKey]),
+ app.AccountKeeper,
+ app.BankKeeper,
+ app.DistrKeeper, // Optional: can be nil
+ app.MsgServiceRouter(),
+ govConfig,
+ authtypes.NewModuleAddress(govtypes.ModuleName).String(),
+ myCustomVotingFunction,
+)
+```
+
+
+As of v0.54, the distribution keeper (`DistrKeeper`) is now optional. If your application does not use distribution module features for governance (such as community pool funding for failed proposals), you can pass `nil`.
+
+
+### Quorum
+
+Quorum is defined as the minimum percentage of voting power that needs to be
+cast on a proposal for the result to be valid.
+
+### Expedited Proposals
+
+A proposal can be expedited, making the proposal use shorter voting duration and a higher tally threshold by its default. If an expedited proposal fails to meet the threshold within the scope of shorter voting duration, the expedited proposal is then converted to a regular proposal and restarts voting under regular voting conditions.
+
+#### Threshold
+
+Threshold is defined as the minimum proportion of `Yes` votes (excluding
+`Abstain` votes) for the proposal to be accepted.
+
+Initially, the threshold is set at 50% of `Yes` votes, excluding `Abstain`
+votes. A possibility to veto exists if more than 1/3rd of all votes are
+`NoWithVeto` votes. Note, both of these values are derived from the `TallyParams`
+on-chain parameter, which is modifiable by governance.
+This means that proposals are accepted iff:
+
+* There exist bonded tokens.
+* Quorum has been achieved.
+* The proportion of `Abstain` votes is inferior to 1/1.
+* The proportion of `NoWithVeto` votes is inferior to 1/3, including
+ `Abstain` votes.
+* The proportion of `Yes` votes, excluding `Abstain` votes, at the end of
+ the voting period is superior to 1/2.
+
+For expedited proposals, by default, the threshold is higher than with a *normal proposal*, namely, 66.7%.
+
+#### Inheritance
+
+If a delegator does not vote, it will inherit its validator vote.
+
+* If the delegator votes before its validator, it will not inherit from the
+ validator's vote.
+* If the delegator votes after its validator, it will override its validator
+ vote with its own. If the proposal is urgent, it is possible
+ that the vote will close before delegators have a chance to react and
+ override their validator's vote. This is not a problem, as proposals require more than 2/3rd of the total voting power to pass, when tallied at the end of the voting period. Because as little as 1/3 + 1 validation power could collude to censor transactions, non-collusion is already assumed for ranges exceeding this threshold.
+
+#### Validator’s punishment for non-voting
+
+At present, validators are not punished for failing to vote.
+
+#### Governance address
+
+Later, we may add permissioned keys that could only sign txs from certain modules. For the MVP, the `Governance address` will be the main validator address generated at account creation. This address corresponds to a different PrivKey than the CometBFT PrivKey which is responsible for signing consensus messages. Validators thus do not have to sign governance transactions with the sensitive CometBFT PrivKey.
+
+#### Burnable Params
+
+There are three parameters that define if the deposit of a proposal should be burned or returned to the depositors.
+
+* `BurnVoteVeto` burns the proposal deposit if the proposal gets vetoed.
+* `BurnVoteQuorum` burns the proposal deposit if the proposal deposit if the vote does not reach quorum.
+* `BurnProposalDepositPrevote` burns the proposal deposit if it does not enter the voting phase.
+
+> Note: These parameters are modifiable via governance.
+
+## State
+
+### Constitution
+
+`Constitution` is found in the genesis state. It is a string field intended to be used to describe the purpose of a particular blockchain, and its expected norms. A few examples of how the constitution field can be used:
+
+* define the purpose of the chain, laying a foundation for its future development
+* set expectations for delegators
+* set expectations for validators
+* define the chain's relationship to "meatspace" entities, like a foundation or corporation
+
+Since this is more of a social feature than a technical feature, we'll now get into some items that may have been useful to have in a genesis constitution:
+
+* What limitations on governance exist, if any?
+ * is it okay for the community to slash the wallet of a whale that they no longer feel that they want around? (viz: Juno Proposal 4 and 16)
+ * can governance "socially slash" a validator who is using unapproved MEV? (viz: commonwealth.im/osmosis)
+ * In the event of an economic emergency, what should validators do?
+ * Terra crash of May, 2022, saw validators choose to run a new binary with code that had not been approved by governance, because the governance token had been inflated to nothing.
+* What is the purpose of the chain, specifically?
+ * best example of this is the Cosmos hub, where different founding groups, have different interpertations of the purpose of the network.
+
+This genesis entry, "constitution" hasn't been designed for existing chains, who should likely just ratify a constitution using their governance system. Instead, this is for new chains. It will allow for validators to have a much clearer idea of purpose and the expectations placed on them while operating their nodes. Likewise, for community members, the constitution will give them some idea of what to expect from both the "chain team" and the validators, respectively.
+
+This constitution is designed to be immutable, and placed only in genesis, though that could change over time by a pull request to the cosmos-sdk that allows for the constitution to be changed by governance. Communities wishing to make amendments to their original constitution should use the governance mechanism and a "signaling proposal" to do exactly that.
+
+**Ideal use scenario for a cosmos chain constitution**
+
+As a chain developer, you decide that you'd like to provide clarity to your key user groups:
+
+* validators
+* token holders
+* developers (yourself)
+
+You use the constitution to immutably store some Markdown in genesis, so that when difficult questions come up, the constitution can provide guidance to the community.
+
+### Proposals
+
+`Proposal` objects are used to tally votes and generally track the proposal's state.
+They contain an array of arbitrary `sdk.Msg`'s which the governance module will attempt
+to resolve and then execute if the proposal passes. `Proposal`'s are identified by a
+unique id and contains a series of timestamps: `submit_time`, `deposit_end_time`,
+`voting_start_time`, `voting_end_time` which track the lifecycle of a proposal
+
+```protobuf
+// Reference: https://github.com/cosmos/cosmos-sdk/blob/v0.47.0-rc1/proto/cosmos/gov/v1/gov.proto#L51-L99
+```
+
+A proposal will generally require more than just a set of messages to explain its
+purpose but need some greater justification and allow a means for interested participants
+to discuss and debate the proposal.
+In most cases, **it is encouraged to have an off-chain system that supports the on-chain governance process**.
+To accommodate for this, a proposal contains a special **`metadata`** field, a string,
+which can be used to add context to the proposal. The `metadata` field allows custom use for networks,
+however, it is expected that the field contains a URL or some form of CID using a system such as
+[IPFS](https://docs.ipfs.io/concepts/content-addressing/). To support the case of
+interoperability across networks, the SDK recommends that the `metadata` represents
+the following `JSON` template:
+
+```json
+{
+ "title": "...",
+ "description": "...",
+ "forum": "...", // a link to the discussion platform (i.e. Discord)
+ "other": "..." // any extra data that doesn't correspond to the other fields
+}
+```
+
+This makes it far easier for clients to support multiple networks.
+
+The metadata has a maximum length that is chosen by the app developer, and
+passed into the gov keeper as a config. The default maximum length in the SDK is 255 characters.
+
+#### Writing a module that uses governance
+
+There are many aspects of a chain, or of the individual modules that you may want to
+use governance to perform such as changing various parameters. This is very simple
+to do. First, write out your message types and `MsgServer` implementation. Add an
+`authority` field to the keeper which will be populated in the constructor with the
+governance module account: `govKeeper.GetGovernanceAccount().GetAddress()`. Then for
+the methods in the `msg_server.go`, perform a check on the message that the signer
+matches `authority`. This will prevent any user from executing that message.
+
+### Parameters and base types
+
+`Parameters` define the rules according to which votes are run. There can only
+be one active parameter set at any given time. If governance wants to change a
+parameter set, either to modify a value or add/remove a parameter field, a new
+parameter set has to be created and the previous one rendered inactive.
+
+#### DepositParams
+
+```protobuf
+// Reference: https://github.com/cosmos/cosmos-sdk/blob/v0.47.0-rc1/proto/cosmos/gov/v1/gov.proto#L152-L162
+```
+
+#### VotingParams
+
+```protobuf
+// Reference: https://github.com/cosmos/cosmos-sdk/blob/v0.47.0-rc1/proto/cosmos/gov/v1/gov.proto#L164-L168
+```
+
+#### TallyParams
+
+```protobuf
+// Reference: https://github.com/cosmos/cosmos-sdk/blob/v0.47.0-rc1/proto/cosmos/gov/v1/gov.proto#L170-L182
+```
+
+Parameters are stored in a global `GlobalParams` KVStore.
+
+Additionally, we introduce some basic types:
+
+```go expandable
+type Vote byte
+
+const (
+ VoteYes = 0x1
+ VoteNo = 0x2
+ VoteNoWithVeto = 0x3
+ VoteAbstain = 0x4
+)
+
+type ProposalType string
+
+const (
+ ProposalTypePlainText = "Text"
+ ProposalTypeSoftwareUpgrade = "SoftwareUpgrade"
+)
+
+type ProposalStatus byte
+
+const (
+ StatusNil ProposalStatus = 0x00
+ StatusDepositPeriod ProposalStatus = 0x01 // Proposal is submitted. Participants can deposit on it but not vote
+ StatusVotingPeriod ProposalStatus = 0x02 // MinDeposit is reached, participants can vote
+ StatusPassed ProposalStatus = 0x03 // Proposal passed and successfully executed
+ StatusRejected ProposalStatus = 0x04 // Proposal has been rejected
+ StatusFailed ProposalStatus = 0x05 // Proposal passed but failed execution
+)
+```
+
+### Deposit
+
+```protobuf
+// Reference: https://github.com/cosmos/cosmos-sdk/blob/v0.47.0-rc1/proto/cosmos/gov/v1/gov.proto#L38-L49
+```
+
+### ValidatorGovInfo
+
+This type is used in a temp map when tallying
+
+```go
+type ValidatorGovInfo struct {
+ Minus sdk.Dec
+ Vote Vote
+}
+```
+
+## Stores
+
+
+Stores are KVStores in the multi-store. The key to find the store is the first parameter in the list
+
+
+We will use one KVStore `Governance` to store four mappings:
+
+* A mapping from `proposalID|'proposal'` to `Proposal`.
+* A mapping from `proposalID|'addresses'|address` to `Vote`. This mapping allows
+ us to query all addresses that voted on the proposal along with their vote by
+ doing a range query on `proposalID:addresses`.
+* A mapping from `ParamsKey|'Params'` to `Params`. This map allows to query all
+ x/gov params.
+* A mapping from `VotingPeriodProposalKeyPrefix|proposalID` to a single byte. This allows
+ us to know if a proposal is in the voting period or not with very low gas cost.
+
+For pseudocode purposes, here are the two function we will use to read or write in stores:
+
+* `load(StoreKey, Key)`: Retrieve item stored at key `Key` in store found at key `StoreKey` in the multistore
+* `store(StoreKey, Key, value)`: Write value `Value` at key `Key` in store found at key `StoreKey` in the multistore
+
+### Proposal Processing Queue
+
+**Store:**
+
+* `ProposalProcessingQueue`: A queue `queue[proposalID]` containing all the
+ `ProposalIDs` of proposals that reached `MinDeposit`. During each `EndBlock`,
+ all the proposals that have reached the end of their voting period are processed.
+ To process a finished proposal, the application tallies the votes, computes the
+ votes of each validator and checks if every validator in the validator set has
+ voted. If the proposal is accepted, deposits are refunded. Finally, the proposal
+ content `Handler` is executed.
+
+And the pseudocode for the `ProposalProcessingQueue`:
+
+```go expandable
+in EndBlock do
+ for finishedProposalID in GetAllFinishedProposalIDs(block.Time)
+
+proposal = load(Governance, ) // proposal is a const key
+
+ validators = Keeper.getAllValidators()
+ tmpValMap := map(sdk.AccAddress)
+
+ValidatorGovInfo
+
+ // Initiate mapping at 0. This is the amount of shares of the validator's vote that will be overridden by their delegator's votes
+ for each validator in validators
+ tmpValMap(validator.OperatorAddr).Minus = 0
+
+ // Tally
+ voterIterator = rangeQuery(Governance, ) //return all the addresses that voted on the proposal
+ for each (voterAddress, vote)
+
+in voterIterator
+ delegations = stakingKeeper.getDelegations(voterAddress) // get all delegations for current voter
+ for each delegation in delegations
+ // make sure delegation.Shares does NOT include shares being unbonded
+ tmpValMap(delegation.ValidatorAddr).Minus += delegation.Shares
+ proposal.updateTally(vote, delegation.Shares)
+
+ _, isVal = stakingKeeper.getValidator(voterAddress)
+ if (isVal)
+
+tmpValMap(voterAddress).Vote = vote
+
+ tallyingParam = load(GlobalParams, 'TallyingParam')
+
+ // Update tally if validator voted
+ for each validator in validators
+ if tmpValMap(validator).HasVoted
+ proposal.updateTally(tmpValMap(validator).Vote, (validator.TotalShares - tmpValMap(validator).Minus))
+
+ // Check if proposal is accepted or rejected
+ totalNonAbstain := proposal.YesVotes + proposal.NoVotes + proposal.NoWithVetoVotes
+ if (proposal.Votes.YesVotes/totalNonAbstain > tallyingParam.Threshold AND proposal.Votes.NoWithVetoVotes/totalNonAbstain < tallyingParam.Veto)
+ // proposal was accepted at the end of the voting period
+ // refund deposits (non-voters already punished)
+ for each (amount, depositor)
+
+in proposal.Deposits
+ depositor.AtomBalance += amount
+
+ stateWriter, err := proposal.Handler()
+ if err != nil
+ // proposal passed but failed during state execution
+ proposal.CurrentStatus = ProposalStatusFailed
+ else
+ // proposal pass and state is persisted
+ proposal.CurrentStatus = ProposalStatusAccepted
+ stateWriter.save()
+
+else
+ // proposal was rejected
+ proposal.CurrentStatus = ProposalStatusRejected
+
+ store(Governance, , proposal)
+```
+
+### Legacy Proposal
+
+
+Legacy proposals are deprecated. Use the new proposal flow by granting the governance module the right to execute the message.
+
+
+A legacy proposal is the old implementation of governance proposal.
+Contrary to proposal that can contain any messages, a legacy proposal allows to submit a set of pre-defined proposals.
+These proposals are defined by their types and handled by handlers that are registered in the gov v1beta1 router.
+
+More information on how to submit proposals in the [client section](#client).
+
+## Messages
+
+### Proposal Submission
+
+Proposals can be submitted by any account via a `MsgSubmitProposal` transaction.
+
+```protobuf
+// Reference: https://github.com/cosmos/cosmos-sdk/blob/v0.47.0-rc1/proto/cosmos/gov/v1/tx.proto#L42-L69
+```
+
+All `sdk.Msgs` passed into the `messages` field of a `MsgSubmitProposal` message
+must be registered in the app's `MsgServiceRouter`. Each of these messages must
+have one signer, namely the gov module account. And finally, the metadata length
+must not be larger than the `maxMetadataLen` config passed into the gov keeper.
+The `initialDeposit` must be strictly positive and conform to the accepted denom of the `MinDeposit` param.
+
+**State modifications:**
+
+* Generate new `proposalID`
+* Create new `Proposal`
+* Initialize `Proposal`'s attributes
+* Decrease balance of sender by `InitialDeposit`
+* If `MinDeposit` is reached:
+ * Push `proposalID` in `ProposalProcessingQueue`
+* Transfer `InitialDeposit` from the `Proposer` to the governance `ModuleAccount`
+
+### Deposit
+
+Once a proposal is submitted, if `Proposal.TotalDeposit < ActiveParam.MinDeposit`, Atom holders can send
+`MsgDeposit` transactions to increase the proposal's deposit.
+
+A deposit is accepted iff:
+
+* The proposal exists
+* The proposal is not in the voting period
+* The deposited coins are conform to the accepted denom from the `MinDeposit` param
+
+```protobuf
+// Reference: https://github.com/cosmos/cosmos-sdk/blob/v0.47.0-rc1/proto/cosmos/gov/v1/tx.proto#L134-L147
+```
+
+**State modifications:**
+
+* Decrease balance of sender by `deposit`
+* Add `deposit` of sender in `proposal.Deposits`
+* Increase `proposal.TotalDeposit` by sender's `deposit`
+* If `MinDeposit` is reached:
+ * Push `proposalID` in `ProposalProcessingQueueEnd`
+* Transfer `Deposit` from the `proposer` to the governance `ModuleAccount`
+
+### Vote
+
+Once `ActiveParam.MinDeposit` is reached, voting period starts. From there,
+bonded Atom holders are able to send `MsgVote` transactions to cast their
+vote on the proposal.
+
+```protobuf
+// Reference: https://github.com/cosmos/cosmos-sdk/blob/v0.47.0-rc1/proto/cosmos/gov/v1/tx.proto#L92-L108
+```
+
+**State modifications:**
+
+* Record `Vote` of sender
+
+
+Gas cost for this message has to take into account the future tallying of the vote in EndBlocker.
+
+
+## Events
+
+The governance module emits the following events:
+
+### EndBlocker
+
+| Type | Attribute Key | Attribute Value |
+| ------------------ | ---------------- | ---------------- |
+| inactive\_proposal | proposal\_id | `{proposalID}` |
+| inactive\_proposal | proposal\_result | `{proposalResult}` |
+| active\_proposal | proposal\_id | `{proposalID}` |
+| active\_proposal | proposal\_result | `{proposalResult}` |
+
+### Handlers
+
+#### MsgSubmitProposal
+
+| Type | Attribute Key | Attribute Value |
+| --------------------- | --------------------- | ---------------- |
+| submit\_proposal | proposal\_id | `{proposalID}` |
+| submit\_proposal \[0] | voting\_period\_start | `{proposalID}` |
+| proposal\_deposit | amount | `{depositAmount}` |
+| proposal\_deposit | proposal\_id | `{proposalID}` |
+| message | module | governance |
+| message | action | submit\_proposal |
+| message | sender | `{senderAddress}` |
+
+* \[0] Event only emitted if the voting period starts during the submission.
+
+#### MsgVote
+
+| Type | Attribute Key | Attribute Value |
+| -------------- | ------------- | --------------- |
+| proposal\_vote | option | `{voteOption}` |
+| proposal\_vote | proposal\_id | `{proposalID}` |
+| message | module | governance |
+| message | action | vote |
+| message | sender | `{senderAddress}` |
+
+#### MsgVoteWeighted
+
+| Type | Attribute Key | Attribute Value |
+| -------------- | ------------- | --------------------- |
+| proposal\_vote | option | `{weightedVoteOptions}` |
+| proposal\_vote | proposal\_id | `{proposalID}` |
+| message | module | governance |
+| message | action | vote |
+| message | sender | `{senderAddress}` |
+
+#### MsgDeposit
+
+| Type | Attribute Key | Attribute Value |
+| ---------------------- | --------------------- | --------------- |
+| proposal\_deposit | amount | `{depositAmount}` |
+| proposal\_deposit | proposal\_id | `{proposalID}` |
+| proposal\_deposit \[0] | voting\_period\_start | `{proposalID}` |
+| message | module | governance |
+| message | action | deposit |
+| message | sender | `{senderAddress}` |
+
+* \[0] Event only emitted if the voting period starts during the submission.
+
+## Parameters
+
+The governance module contains the following parameters:
+
+| Key | Type | Example |
+| -------------------------------- | ---------------- | ---------------------------------------- |
+| min\_deposit | array (coins) | \[`{"denom":"uatom","amount":"10000000"}`] |
+| max\_deposit\_period | string (time ns) | "172800000000000" (17280s) |
+| voting\_period | string (time ns) | "172800000000000" (17280s) |
+| quorum | string (dec) | "0.334000000000000000" |
+| threshold | string (dec) | "0.500000000000000000" |
+| veto | string (dec) | "0.334000000000000000" |
+| expedited\_threshold | string (time ns) | "0.667000000000000000" |
+| expedited\_voting\_period | string (time ns) | "86400000000000" (8600s) |
+| expedited\_min\_deposit | array (coins) | \[`{"denom":"uatom","amount":"50000000"}`] |
+| burn\_proposal\_deposit\_prevote | bool | false |
+| burn\_vote\_quorum | bool | false |
+| burn\_vote\_veto | bool | true |
+| min\_initial\_deposit\_ratio | string | "0.1" |
+
+**NOTE**: The governance module contains parameters that are objects unlike other
+modules. If only a subset of parameters are desired to be changed, only they need
+to be included and not the entire parameter object structure.
+
+## Client
+
+### CLI
+
+A user can query and interact with the `gov` module using the CLI.
+
+#### Query
+
+The `query` commands allow users to query `gov` state.
+
+```bash
+simd query gov --help
+```
+
+##### deposit
+
+The `deposit` command allows users to query a deposit for a given proposal from a given depositor.
+
+```bash
+simd query gov deposit [proposal-id] [depositer-addr] [flags]
+```
+
+Example:
+
+```bash
+simd query gov deposit 1 cosmos1..
+```
+
+Example Output:
+
+```bash
+amount:
+- amount: "100"
+ denom: stake
+depositor: cosmos1..
+proposal_id: "1"
+```
+
+##### deposits
+
+The `deposits` command allows users to query all deposits for a given proposal.
+
+```bash
+simd query gov deposits [proposal-id] [flags]
+```
+
+Example:
+
+```bash
+simd query gov deposits 1
+```
+
+Example Output:
+
+```bash
+deposits:
+- amount:
+ - amount: "100"
+ denom: stake
+ depositor: cosmos1..
+ proposal_id: "1"
+pagination:
+ next_key: null
+ total: "0"
+```
+
+##### param
+
+The `param` command allows users to query a given parameter for the `gov` module.
+
+```bash
+simd query gov param [param-type] [flags]
+```
+
+Example:
+
+```bash
+simd query gov param voting
+```
+
+Example Output:
+
+```bash
+voting_period: "172800000000000"
+```
+
+##### params
+
+The `params` command allows users to query all parameters for the `gov` module.
+
+```bash
+simd query gov params [flags]
+```
+
+Example:
+
+```bash
+simd query gov params
+```
+
+Example Output:
+
+```bash expandable
+deposit_params:
+ max_deposit_period: 172800s
+ min_deposit:
+ - amount: "10000000"
+ denom: stake
+params:
+ expedited_min_deposit:
+ - amount: "50000000"
+ denom: stake
+ expedited_threshold: "0.670000000000000000"
+ expedited_voting_period: 86400s
+ max_deposit_period: 172800s
+ min_deposit:
+ - amount: "10000000"
+ denom: stake
+ min_initial_deposit_ratio: "0.000000000000000000"
+ proposal_cancel_burn_rate: "0.500000000000000000"
+ quorum: "0.334000000000000000"
+ threshold: "0.500000000000000000"
+ veto_threshold: "0.334000000000000000"
+ voting_period: 172800s
+tally_params:
+ quorum: "0.334000000000000000"
+ threshold: "0.500000000000000000"
+ veto_threshold: "0.334000000000000000"
+voting_params:
+ voting_period: 172800s
+```
+
+##### proposal
+
+The `proposal` command allows users to query a given proposal.
+
+```bash
+simd query gov proposal [proposal-id] [flags]
+```
+
+Example:
+
+```bash
+simd query gov proposal 1
+```
+
+Example Output:
+
+```bash expandable
+deposit_end_time: "2022-03-30T11:50:20.819676256Z"
+final_tally_result:
+ abstain_count: "0"
+ no_count: "0"
+ no_with_veto_count: "0"
+ yes_count: "0"
+id: "1"
+messages:
+- '@type': /cosmos.bank.v1beta1.MsgSend
+ amount:
+ - amount: "10"
+ denom: stake
+ from_address: cosmos1..
+ to_address: cosmos1..
+metadata: AQ==
+status: PROPOSAL_STATUS_DEPOSIT_PERIOD
+submit_time: "2022-03-28T11:50:20.819676256Z"
+total_deposit:
+- amount: "10"
+ denom: stake
+voting_end_time: null
+voting_start_time: null
+```
+
+##### proposals
+
+The `proposals` command allows users to query all proposals with optional filters.
+
+```bash
+simd query gov proposals [flags]
+```
+
+Example:
+
+```bash
+simd query gov proposals
+```
+
+Example Output:
+
+```bash expandable
+pagination:
+ next_key: null
+ total: "0"
+proposals:
+- deposit_end_time: "2022-03-30T11:50:20.819676256Z"
+ final_tally_result:
+ abstain_count: "0"
+ no_count: "0"
+ no_with_veto_count: "0"
+ yes_count: "0"
+ id: "1"
+ messages:
+ - '@type': /cosmos.bank.v1beta1.MsgSend
+ amount:
+ - amount: "10"
+ denom: stake
+ from_address: cosmos1..
+ to_address: cosmos1..
+ metadata: AQ==
+ status: PROPOSAL_STATUS_DEPOSIT_PERIOD
+ submit_time: "2022-03-28T11:50:20.819676256Z"
+ total_deposit:
+ - amount: "10"
+ denom: stake
+ voting_end_time: null
+ voting_start_time: null
+- deposit_end_time: "2022-03-30T14:02:41.165025015Z"
+ final_tally_result:
+ abstain_count: "0"
+ no_count: "0"
+ no_with_veto_count: "0"
+ yes_count: "0"
+ id: "2"
+ messages:
+ - '@type': /cosmos.bank.v1beta1.MsgSend
+ amount:
+ - amount: "10"
+ denom: stake
+ from_address: cosmos1..
+ to_address: cosmos1..
+ metadata: AQ==
+ status: PROPOSAL_STATUS_DEPOSIT_PERIOD
+ submit_time: "2022-03-28T14:02:41.165025015Z"
+ total_deposit:
+ - amount: "10"
+ denom: stake
+ voting_end_time: null
+ voting_start_time: null
+```
+
+##### proposer
+
+The `proposer` command allows users to query the proposer for a given proposal.
+
+```bash
+simd query gov proposer [proposal-id] [flags]
+```
+
+Example:
+
+```bash
+simd query gov proposer 1
+```
+
+Example Output:
+
+```bash
+proposal_id: "1"
+proposer: cosmos1..
+```
+
+##### tally
+
+The `tally` command allows users to query the tally of a given proposal vote.
+
+```bash
+simd query gov tally [proposal-id] [flags]
+```
+
+Example:
+
+```bash
+simd query gov tally 1
+```
+
+Example Output:
+
+```bash
+abstain: "0"
+"no": "0"
+no_with_veto: "0"
+"yes": "1"
+```
+
+##### vote
+
+The `vote` command allows users to query a vote for a given proposal.
+
+```bash
+simd query gov vote [proposal-id] [voter-addr] [flags]
+```
+
+Example:
+
+```bash
+simd query gov vote 1 cosmos1..
+```
+
+Example Output:
+
+```bash
+option: VOTE_OPTION_YES
+options:
+- option: VOTE_OPTION_YES
+ weight: "1.000000000000000000"
+proposal_id: "1"
+voter: cosmos1..
+```
+
+##### votes
+
+The `votes` command allows users to query all votes for a given proposal.
+
+```bash
+simd query gov votes [proposal-id] [flags]
+```
+
+Example:
+
+```bash
+simd query gov votes 1
+```
+
+Example Output:
+
+```bash
+pagination:
+ next_key: null
+ total: "0"
+votes:
+- option: VOTE_OPTION_YES
+ options:
+ - option: VOTE_OPTION_YES
+ weight: "1.000000000000000000"
+ proposal_id: "1"
+ voter: cosmos1..
+```
+
+#### Transactions
+
+The `tx` commands allow users to interact with the `gov` module.
+
+```bash
+simd tx gov --help
+```
+
+##### deposit
+
+The `deposit` command allows users to deposit tokens for a given proposal.
+
+```bash
+simd tx gov deposit [proposal-id] [deposit] [flags]
+```
+
+Example:
+
+```bash
+simd tx gov deposit 1 10000000stake --from cosmos1..
+```
+
+##### draft-proposal
+
+The `draft-proposal` command allows users to draft any type of proposal.
+The command returns a `draft_proposal.json`, to be used by `submit-proposal` after being completed.
+The `draft_metadata.json` is meant to be uploaded to [IPFS](#metadata).
+
+```bash
+simd tx gov draft-proposal
+```
+
+##### submit-proposal
+
+The `submit-proposal` command allows users to submit a governance proposal along with some messages and metadata.
+Messages, metadata and deposit are defined in a JSON file.
+
+```bash
+simd tx gov submit-proposal [path-to-proposal-json] [flags]
+```
+
+Example:
+
+```bash
+simd tx gov submit-proposal /path/to/proposal.json --from cosmos1..
+```
+
+where `proposal.json` contains:
+
+```json expandable
+{
+ "messages": [
+ {
+ "@type": "/cosmos.bank.v1beta1.MsgSend",
+ "from_address": "cosmos1...", // The gov module module address
+ "to_address": "cosmos1...",
+ "amount":[{
+ "denom": "stake",
+ "amount": "10"}]
+ }
+ ],
+ "metadata": "AQ==",
+ "deposit": "10stake",
+ "title": "Proposal Title",
+ "summary": "Proposal Summary"
+}
+```
+
+
+By default the metadata, summary and title are both limited by 255 characters, this can be overridden by the application developer.
+
+
+
+When metadata is not specified, the title is limited to 255 characters and the summary 40x the title length.
+
+
+##### submit-legacy-proposal
+
+The `submit-legacy-proposal` command allows users to submit a governance legacy proposal along with an initial deposit.
+
+```bash
+simd tx gov submit-legacy-proposal [command] [flags]
+```
+
+Example:
+
+```bash
+simd tx gov submit-legacy-proposal --title="Test Proposal" --description="testing" --type="Text" --deposit="100000000stake" --from cosmos1..
+```
+
+Example (`param-change`):
+
+```bash
+simd tx gov submit-legacy-proposal param-change proposal.json --from cosmos1..
+```
+
+```json expandable
+{
+ "title": "Test Proposal",
+ "description": "testing, testing, 1, 2, 3",
+ "changes": [
+ {
+ "subspace": "staking",
+ "key": "MaxValidators",
+ "value": 100
+ }
+ ],
+ "deposit": "10000000stake"
+}
+```
+
+#### cancel-proposal
+
+Once proposal is canceled, from the deposits of proposal `deposits * proposal_cancel_ratio` will be burned or sent to `ProposalCancelDest` address , if `ProposalCancelDest` is empty then deposits will be burned. The `remaining deposits` will be sent to depositers.
+
+```bash
+simd tx gov cancel-proposal [proposal-id] [flags]
+```
+
+Example:
+
+```bash
+simd tx gov cancel-proposal 1 --from cosmos1...
+```
+
+##### vote
+
+The `vote` command allows users to submit a vote for a given governance proposal.
+
+```bash
+simd tx gov vote [command] [flags]
+```
+
+Example:
+
+```bash
+simd tx gov vote 1 yes --from cosmos1..
+```
+
+##### weighted-vote
+
+The `weighted-vote` command allows users to submit a weighted vote for a given governance proposal.
+
+```bash
+simd tx gov weighted-vote [proposal-id] [weighted-options] [flags]
+```
+
+Example:
+
+```bash
+simd tx gov weighted-vote 1 yes=0.5,no=0.5 --from cosmos1..
+```
+
+### gRPC
+
+A user can query the `gov` module using gRPC endpoints.
+
+#### Proposal
+
+The `Proposal` endpoint allows users to query a given proposal.
+
+Using legacy v1beta1:
+
+```bash
+cosmos.gov.v1beta1.Query/Proposal
+```
+
+Example:
+
+```bash
+grpcurl -plaintext \
+ -d '{"proposal_id":"1"}' \
+ localhost:9090 \
+ cosmos.gov.v1beta1.Query/Proposal
+```
+
+Example Output:
+
+```bash expandable
+{
+ "proposal": {
+ "proposalId": "1",
+ "content": {"@type":"/cosmos.gov.v1beta1.TextProposal","description":"testing, testing, 1, 2, 3","title":"Test Proposal"},
+ "status": "PROPOSAL_STATUS_VOTING_PERIOD",
+ "finalTallyResult": {
+ "yes": "0",
+ "abstain": "0",
+ "no": "0",
+ "noWithVeto": "0"
+ },
+ "submitTime": "2021-09-16T19:40:08.712440474Z",
+ "depositEndTime": "2021-09-18T19:40:08.712440474Z",
+ "totalDeposit": [
+ {
+ "denom": "stake",
+ "amount": "10000000"
+ }
+ ],
+ "votingStartTime": "2021-09-16T19:40:08.712440474Z",
+ "votingEndTime": "2021-09-18T19:40:08.712440474Z",
+ "title": "Test Proposal",
+ "summary": "testing, testing, 1, 2, 3"
+ }
+}
+```
+
+Using v1:
+
+```bash
+cosmos.gov.v1.Query/Proposal
+```
+
+Example:
+
+```bash
+grpcurl -plaintext \
+ -d '{"proposal_id":"1"}' \
+ localhost:9090 \
+ cosmos.gov.v1.Query/Proposal
+```
+
+Example Output:
+
+```bash expandable
+{
+ "proposal": {
+ "id": "1",
+ "messages": [
+ {"@type":"/cosmos.bank.v1beta1.MsgSend","amount":[{"denom":"stake","amount":"10"}],"fromAddress":"cosmos1..","toAddress":"cosmos1.."}
+ ],
+ "status": "PROPOSAL_STATUS_VOTING_PERIOD",
+ "finalTallyResult": {
+ "yesCount": "0",
+ "abstainCount": "0",
+ "noCount": "0",
+ "noWithVetoCount": "0"
+ },
+ "submitTime": "2022-03-28T11:50:20.819676256Z",
+ "depositEndTime": "2022-03-30T11:50:20.819676256Z",
+ "totalDeposit": [
+ {
+ "denom": "stake",
+ "amount": "10000000"
+ }
+ ],
+ "votingStartTime": "2022-03-28T14:25:26.644857113Z",
+ "votingEndTime": "2022-03-30T14:25:26.644857113Z",
+ "metadata": "AQ==",
+ "title": "Test Proposal",
+ "summary": "testing, testing, 1, 2, 3"
+ }
+}
+```
+
+#### Proposals
+
+The `Proposals` endpoint allows users to query all proposals with optional filters.
+
+Using legacy v1beta1:
+
+```bash
+cosmos.gov.v1beta1.Query/Proposals
+```
+
+Example:
+
+```bash
+grpcurl -plaintext \
+ localhost:9090 \
+ cosmos.gov.v1beta1.Query/Proposals
+```
+
+Example Output:
+
+```bash expandable
+{
+ "proposals": [
+ {
+ "proposalId": "1",
+ "status": "PROPOSAL_STATUS_VOTING_PERIOD",
+ "finalTallyResult": {
+ "yes": "0",
+ "abstain": "0",
+ "no": "0",
+ "noWithVeto": "0"
+ },
+ "submitTime": "2022-03-28T11:50:20.819676256Z",
+ "depositEndTime": "2022-03-30T11:50:20.819676256Z",
+ "totalDeposit": [
+ {
+ "denom": "stake",
+ "amount": "10000000010"
+ }
+ ],
+ "votingStartTime": "2022-03-28T14:25:26.644857113Z",
+ "votingEndTime": "2022-03-30T14:25:26.644857113Z"
+ },
+ {
+ "proposalId": "2",
+ "status": "PROPOSAL_STATUS_DEPOSIT_PERIOD",
+ "finalTallyResult": {
+ "yes": "0",
+ "abstain": "0",
+ "no": "0",
+ "noWithVeto": "0"
+ },
+ "submitTime": "2022-03-28T14:02:41.165025015Z",
+ "depositEndTime": "2022-03-30T14:02:41.165025015Z",
+ "totalDeposit": [
+ {
+ "denom": "stake",
+ "amount": "10"
+ }
+ ],
+ "votingStartTime": "0001-01-01T00:00:00Z",
+ "votingEndTime": "0001-01-01T00:00:00Z"
+ }
+ ],
+ "pagination": {
+ "total": "2"
+ }
+}
+
+```
+
+Using v1:
+
+```bash
+cosmos.gov.v1.Query/Proposals
+```
+
+Example:
+
+```bash
+grpcurl -plaintext \
+ localhost:9090 \
+ cosmos.gov.v1.Query/Proposals
+```
+
+Example Output:
+
+```bash expandable
+{
+ "proposals": [
+ {
+ "id": "1",
+ "messages": [
+ {"@type":"/cosmos.bank.v1beta1.MsgSend","amount":[{"denom":"stake","amount":"10"}],"fromAddress":"cosmos1..","toAddress":"cosmos1.."}
+ ],
+ "status": "PROPOSAL_STATUS_VOTING_PERIOD",
+ "finalTallyResult": {
+ "yesCount": "0",
+ "abstainCount": "0",
+ "noCount": "0",
+ "noWithVetoCount": "0"
+ },
+ "submitTime": "2022-03-28T11:50:20.819676256Z",
+ "depositEndTime": "2022-03-30T11:50:20.819676256Z",
+ "totalDeposit": [
+ {
+ "denom": "stake",
+ "amount": "10000000010"
+ }
+ ],
+ "votingStartTime": "2022-03-28T14:25:26.644857113Z",
+ "votingEndTime": "2022-03-30T14:25:26.644857113Z",
+ "metadata": "AQ==",
+ "title": "Proposal Title",
+ "summary": "Proposal Summary"
+ },
+ {
+ "id": "2",
+ "messages": [
+ {"@type":"/cosmos.bank.v1beta1.MsgSend","amount":[{"denom":"stake","amount":"10"}],"fromAddress":"cosmos1..","toAddress":"cosmos1.."}
+ ],
+ "status": "PROPOSAL_STATUS_DEPOSIT_PERIOD",
+ "finalTallyResult": {
+ "yesCount": "0",
+ "abstainCount": "0",
+ "noCount": "0",
+ "noWithVetoCount": "0"
+ },
+ "submitTime": "2022-03-28T14:02:41.165025015Z",
+ "depositEndTime": "2022-03-30T14:02:41.165025015Z",
+ "totalDeposit": [
+ {
+ "denom": "stake",
+ "amount": "10"
+ }
+ ],
+ "metadata": "AQ==",
+ "title": "Proposal Title",
+ "summary": "Proposal Summary"
+ }
+ ],
+ "pagination": {
+ "total": "2"
+ }
+}
+```
+
+#### Vote
+
+The `Vote` endpoint allows users to query a vote for a given proposal.
+
+Using legacy v1beta1:
+
+```bash
+cosmos.gov.v1beta1.Query/Vote
+```
+
+Example:
+
+```bash
+grpcurl -plaintext \
+ -d '{"proposal_id":"1","voter":"cosmos1.."}' \
+ localhost:9090 \
+ cosmos.gov.v1beta1.Query/Vote
+```
+
+Example Output:
+
+```bash expandable
+{
+ "vote": {
+ "proposalId": "1",
+ "voter": "cosmos1..",
+ "option": "VOTE_OPTION_YES",
+ "options": [
+ {
+ "option": "VOTE_OPTION_YES",
+ "weight": "1000000000000000000"
+ }
+ ]
+ }
+}
+```
+
+Using v1:
+
+```bash
+cosmos.gov.v1.Query/Vote
+```
+
+Example:
+
+```bash
+grpcurl -plaintext \
+ -d '{"proposal_id":"1","voter":"cosmos1.."}' \
+ localhost:9090 \
+ cosmos.gov.v1.Query/Vote
+```
+
+Example Output:
+
+```bash expandable
+{
+ "vote": {
+ "proposalId": "1",
+ "voter": "cosmos1..",
+ "option": "VOTE_OPTION_YES",
+ "options": [
+ {
+ "option": "VOTE_OPTION_YES",
+ "weight": "1.000000000000000000"
+ }
+ ]
+ }
+}
+```
+
+#### Votes
+
+The `Votes` endpoint allows users to query all votes for a given proposal.
+
+Using legacy v1beta1:
+
+```bash
+cosmos.gov.v1beta1.Query/Votes
+```
+
+Example:
+
+```bash
+grpcurl -plaintext \
+ -d '{"proposal_id":"1"}' \
+ localhost:9090 \
+ cosmos.gov.v1beta1.Query/Votes
+```
+
+Example Output:
+
+```bash expandable
+{
+ "votes": [
+ {
+ "proposalId": "1",
+ "voter": "cosmos1..",
+ "options": [
+ {
+ "option": "VOTE_OPTION_YES",
+ "weight": "1000000000000000000"
+ }
+ ]
+ }
+ ],
+ "pagination": {
+ "total": "1"
+ }
+}
+```
+
+Using v1:
+
+```bash
+cosmos.gov.v1.Query/Votes
+```
+
+Example:
+
+```bash
+grpcurl -plaintext \
+ -d '{"proposal_id":"1"}' \
+ localhost:9090 \
+ cosmos.gov.v1.Query/Votes
+```
+
+Example Output:
+
+```bash expandable
+{
+ "votes": [
+ {
+ "proposalId": "1",
+ "voter": "cosmos1..",
+ "options": [
+ {
+ "option": "VOTE_OPTION_YES",
+ "weight": "1.000000000000000000"
+ }
+ ]
+ }
+ ],
+ "pagination": {
+ "total": "1"
+ }
+}
+```
+
+#### Params
+
+The `Params` endpoint allows users to query all parameters for the `gov` module.
+
+{/* TODO: #10197 Querying governance params outputs nil values */}
+
+Using legacy v1beta1:
+
+```bash
+cosmos.gov.v1beta1.Query/Params
+```
+
+Example:
+
+```bash
+grpcurl -plaintext \
+ -d '{"params_type":"voting"}' \
+ localhost:9090 \
+ cosmos.gov.v1beta1.Query/Params
+```
+
+Example Output:
+
+```bash expandable
+{
+ "votingParams": {
+ "votingPeriod": "172800s"
+ },
+ "depositParams": {
+ "maxDepositPeriod": "0s"
+ },
+ "tallyParams": {
+ "quorum": "MA==",
+ "threshold": "MA==",
+ "vetoThreshold": "MA=="
+ }
+}
+```
+
+Using v1:
+
+```bash
+cosmos.gov.v1.Query/Params
+```
+
+Example:
+
+```bash
+grpcurl -plaintext \
+ -d '{"params_type":"voting"}' \
+ localhost:9090 \
+ cosmos.gov.v1.Query/Params
+```
+
+Example Output:
+
+```bash
+{
+ "votingParams": {
+ "votingPeriod": "172800s"
+ }
+}
+```
+
+#### Deposit
+
+The `Deposit` endpoint allows users to query a deposit for a given proposal from a given depositor.
+
+Using legacy v1beta1:
+
+```bash
+cosmos.gov.v1beta1.Query/Deposit
+```
+
+Example:
+
+```bash
+grpcurl -plaintext \
+ '{"proposal_id":"1","depositor":"cosmos1.."}' \
+ localhost:9090 \
+ cosmos.gov.v1beta1.Query/Deposit
+```
+
+Example Output:
+
+```bash expandable
+{
+ "deposit": {
+ "proposalId": "1",
+ "depositor": "cosmos1..",
+ "amount": [
+ {
+ "denom": "stake",
+ "amount": "10000000"
+ }
+ ]
+ }
+}
+```
+
+Using v1:
+
+```bash
+cosmos.gov.v1.Query/Deposit
+```
+
+Example:
+
+```bash
+grpcurl -plaintext \
+ '{"proposal_id":"1","depositor":"cosmos1.."}' \
+ localhost:9090 \
+ cosmos.gov.v1.Query/Deposit
+```
+
+Example Output:
+
+```bash expandable
+{
+ "deposit": {
+ "proposalId": "1",
+ "depositor": "cosmos1..",
+ "amount": [
+ {
+ "denom": "stake",
+ "amount": "10000000"
+ }
+ ]
+ }
+}
+```
+
+#### deposits
+
+The `Deposits` endpoint allows users to query all deposits for a given proposal.
+
+Using legacy v1beta1:
+
+```bash
+cosmos.gov.v1beta1.Query/Deposits
+```
+
+Example:
+
+```bash
+grpcurl -plaintext \
+ -d '{"proposal_id":"1"}' \
+ localhost:9090 \
+ cosmos.gov.v1beta1.Query/Deposits
+```
+
+Example Output:
+
+```bash expandable
+{
+ "deposits": [
+ {
+ "proposalId": "1",
+ "depositor": "cosmos1..",
+ "amount": [
+ {
+ "denom": "stake",
+ "amount": "10000000"
+ }
+ ]
+ }
+ ],
+ "pagination": {
+ "total": "1"
+ }
+}
+```
+
+Using v1:
+
+```bash
+cosmos.gov.v1.Query/Deposits
+```
+
+Example:
+
+```bash
+grpcurl -plaintext \
+ -d '{"proposal_id":"1"}' \
+ localhost:9090 \
+ cosmos.gov.v1.Query/Deposits
+```
+
+Example Output:
+
+```bash expandable
+{
+ "deposits": [
+ {
+ "proposalId": "1",
+ "depositor": "cosmos1..",
+ "amount": [
+ {
+ "denom": "stake",
+ "amount": "10000000"
+ }
+ ]
+ }
+ ],
+ "pagination": {
+ "total": "1"
+ }
+}
+```
+
+#### TallyResult
+
+The `TallyResult` endpoint allows users to query the tally of a given proposal.
+
+Using legacy v1beta1:
+
+```bash
+cosmos.gov.v1beta1.Query/TallyResult
+```
+
+Example:
+
+```bash
+grpcurl -plaintext \
+ -d '{"proposal_id":"1"}' \
+ localhost:9090 \
+ cosmos.gov.v1beta1.Query/TallyResult
+```
+
+Example Output:
+
+```bash
+{
+ "tally": {
+ "yes": "1000000",
+ "abstain": "0",
+ "no": "0",
+ "noWithVeto": "0"
+ }
+}
+```
+
+Using v1:
+
+```bash
+cosmos.gov.v1.Query/TallyResult
+```
+
+Example:
+
+```bash
+grpcurl -plaintext \
+ -d '{"proposal_id":"1"}' \
+ localhost:9090 \
+ cosmos.gov.v1.Query/TallyResult
+```
+
+Example Output:
+
+```bash
+{
+ "tally": {
+ "yes": "1000000",
+ "abstain": "0",
+ "no": "0",
+ "noWithVeto": "0"
+ }
+}
+```
+
+### REST
+
+A user can query the `gov` module using REST endpoints.
+
+#### proposal
+
+The `proposals` endpoint allows users to query a given proposal.
+
+Using legacy v1beta1:
+
+```bash
+/cosmos/gov/v1beta1/proposals/{proposal_id}
+```
+
+Example:
+
+```bash
+curl localhost:1317/cosmos/gov/v1beta1/proposals/1
+```
+
+Example Output:
+
+```bash expandable
+{
+ "proposal": {
+ "proposal_id": "1",
+ "content": null,
+ "status": "PROPOSAL_STATUS_VOTING_PERIOD",
+ "final_tally_result": {
+ "yes": "0",
+ "abstain": "0",
+ "no": "0",
+ "no_with_veto": "0"
+ },
+ "submit_time": "2022-03-28T11:50:20.819676256Z",
+ "deposit_end_time": "2022-03-30T11:50:20.819676256Z",
+ "total_deposit": [
+ {
+ "denom": "stake",
+ "amount": "10000000010"
+ }
+ ],
+ "voting_start_time": "2022-03-28T14:25:26.644857113Z",
+ "voting_end_time": "2022-03-30T14:25:26.644857113Z"
+ }
+}
+```
+
+Using v1:
+
+```bash
+/cosmos/gov/v1/proposals/{proposal_id}
+```
+
+Example:
+
+```bash
+curl localhost:1317/cosmos/gov/v1/proposals/1
+```
+
+Example Output:
+
+```bash expandable
+{
+ "proposal": {
+ "id": "1",
+ "messages": [
+ {
+ "@type": "/cosmos.bank.v1beta1.MsgSend",
+ "from_address": "cosmos1..",
+ "to_address": "cosmos1..",
+ "amount": [
+ {
+ "denom": "stake",
+ "amount": "10"
+ }
+ ]
+ }
+ ],
+ "status": "PROPOSAL_STATUS_VOTING_PERIOD",
+ "final_tally_result": {
+ "yes_count": "0",
+ "abstain_count": "0",
+ "no_count": "0",
+ "no_with_veto_count": "0"
+ },
+ "submit_time": "2022-03-28T11:50:20.819676256Z",
+ "deposit_end_time": "2022-03-30T11:50:20.819676256Z",
+ "total_deposit": [
+ {
+ "denom": "stake",
+ "amount": "10000000"
+ }
+ ],
+ "voting_start_time": "2022-03-28T14:25:26.644857113Z",
+ "voting_end_time": "2022-03-30T14:25:26.644857113Z",
+ "metadata": "AQ==",
+ "title": "Proposal Title",
+ "summary": "Proposal Summary"
+ }
+}
+```
+
+#### proposals
+
+The `proposals` endpoint also allows users to query all proposals with optional filters.
+
+Using legacy v1beta1:
+
+```bash
+/cosmos/gov/v1beta1/proposals
+```
+
+Example:
+
+```bash
+curl localhost:1317/cosmos/gov/v1beta1/proposals
+```
+
+Example Output:
+
+```bash expandable
+{
+ "proposals": [
+ {
+ "proposal_id": "1",
+ "content": null,
+ "status": "PROPOSAL_STATUS_VOTING_PERIOD",
+ "final_tally_result": {
+ "yes": "0",
+ "abstain": "0",
+ "no": "0",
+ "no_with_veto": "0"
+ },
+ "submit_time": "2022-03-28T11:50:20.819676256Z",
+ "deposit_end_time": "2022-03-30T11:50:20.819676256Z",
+ "total_deposit": [
+ {
+ "denom": "stake",
+ "amount": "10000000"
+ }
+ ],
+ "voting_start_time": "2022-03-28T14:25:26.644857113Z",
+ "voting_end_time": "2022-03-30T14:25:26.644857113Z"
+ },
+ {
+ "proposal_id": "2",
+ "content": null,
+ "status": "PROPOSAL_STATUS_DEPOSIT_PERIOD",
+ "final_tally_result": {
+ "yes": "0",
+ "abstain": "0",
+ "no": "0",
+ "no_with_veto": "0"
+ },
+ "submit_time": "2022-03-28T14:02:41.165025015Z",
+ "deposit_end_time": "2022-03-30T14:02:41.165025015Z",
+ "total_deposit": [
+ {
+ "denom": "stake",
+ "amount": "10"
+ }
+ ],
+ "voting_start_time": "0001-01-01T00:00:00Z",
+ "voting_end_time": "0001-01-01T00:00:00Z"
+ }
+ ],
+ "pagination": {
+ "next_key": null,
+ "total": "2"
+ }
+}
+```
+
+Using v1:
+
+```bash
+/cosmos/gov/v1/proposals
+```
+
+Example:
+
+```bash
+curl localhost:1317/cosmos/gov/v1/proposals
+```
+
+Example Output:
+
+```bash expandable
+{
+ "proposals": [
+ {
+ "id": "1",
+ "messages": [
+ {
+ "@type": "/cosmos.bank.v1beta1.MsgSend",
+ "from_address": "cosmos1..",
+ "to_address": "cosmos1..",
+ "amount": [
+ {
+ "denom": "stake",
+ "amount": "10"
+ }
+ ]
+ }
+ ],
+ "status": "PROPOSAL_STATUS_VOTING_PERIOD",
+ "final_tally_result": {
+ "yes_count": "0",
+ "abstain_count": "0",
+ "no_count": "0",
+ "no_with_veto_count": "0"
+ },
+ "submit_time": "2022-03-28T11:50:20.819676256Z",
+ "deposit_end_time": "2022-03-30T11:50:20.819676256Z",
+ "total_deposit": [
+ {
+ "denom": "stake",
+ "amount": "10000000010"
+ }
+ ],
+ "voting_start_time": "2022-03-28T14:25:26.644857113Z",
+ "voting_end_time": "2022-03-30T14:25:26.644857113Z",
+ "metadata": "AQ==",
+ "title": "Proposal Title",
+ "summary": "Proposal Summary"
+ },
+ {
+ "id": "2",
+ "messages": [
+ {
+ "@type": "/cosmos.bank.v1beta1.MsgSend",
+ "from_address": "cosmos1..",
+ "to_address": "cosmos1..",
+ "amount": [
+ {
+ "denom": "stake",
+ "amount": "10"
+ }
+ ]
+ }
+ ],
+ "status": "PROPOSAL_STATUS_DEPOSIT_PERIOD",
+ "final_tally_result": {
+ "yes_count": "0",
+ "abstain_count": "0",
+ "no_count": "0",
+ "no_with_veto_count": "0"
+ },
+ "submit_time": "2022-03-28T14:02:41.165025015Z",
+ "deposit_end_time": "2022-03-30T14:02:41.165025015Z",
+ "total_deposit": [
+ {
+ "denom": "stake",
+ "amount": "10"
+ }
+ ],
+ "voting_start_time": null,
+ "voting_end_time": null,
+ "metadata": "AQ==",
+ "title": "Proposal Title",
+ "summary": "Proposal Summary"
+ }
+ ],
+ "pagination": {
+ "next_key": null,
+ "total": "2"
+ }
+}
+```
+
+#### voter vote
+
+The `votes` endpoint allows users to query a vote for a given proposal.
+
+Using legacy v1beta1:
+
+```bash
+/cosmos/gov/v1beta1/proposals/{proposal_id}/votes/{voter}
+```
+
+Example:
+
+```bash
+curl localhost:1317/cosmos/gov/v1beta1/proposals/1/votes/cosmos1..
+```
+
+Example Output:
+
+```bash expandable
+{
+ "vote": {
+ "proposal_id": "1",
+ "voter": "cosmos1..",
+ "option": "VOTE_OPTION_YES",
+ "options": [
+ {
+ "option": "VOTE_OPTION_YES",
+ "weight": "1.000000000000000000"
+ }
+ ]
+ }
+}
+```
+
+Using v1:
+
+```bash
+/cosmos/gov/v1/proposals/{proposal_id}/votes/{voter}
+```
+
+Example:
+
+```bash
+curl localhost:1317/cosmos/gov/v1/proposals/1/votes/cosmos1..
+```
+
+Example Output:
+
+```bash expandable
+{
+ "vote": {
+ "proposal_id": "1",
+ "voter": "cosmos1..",
+ "options": [
+ {
+ "option": "VOTE_OPTION_YES",
+ "weight": "1.000000000000000000"
+ }
+ ],
+ "metadata": ""
+ }
+}
+```
+
+#### votes
+
+The `votes` endpoint allows users to query all votes for a given proposal.
+
+Using legacy v1beta1:
+
+```bash
+/cosmos/gov/v1beta1/proposals/{proposal_id}/votes
+```
+
+Example:
+
+```bash
+curl localhost:1317/cosmos/gov/v1beta1/proposals/1/votes
+```
+
+Example Output:
+
+```bash expandable
+{
+ "votes": [
+ {
+ "proposal_id": "1",
+ "voter": "cosmos1..",
+ "option": "VOTE_OPTION_YES",
+ "options": [
+ {
+ "option": "VOTE_OPTION_YES",
+ "weight": "1.000000000000000000"
+ }
+ ]
+ }
+ ],
+ "pagination": {
+ "next_key": null,
+ "total": "1"
+ }
+}
+```
+
+Using v1:
+
+```bash
+/cosmos/gov/v1/proposals/{proposal_id}/votes
+```
+
+Example:
+
+```bash
+curl localhost:1317/cosmos/gov/v1/proposals/1/votes
+```
+
+Example Output:
+
+```bash expandable
+{
+ "votes": [
+ {
+ "proposal_id": "1",
+ "voter": "cosmos1..",
+ "options": [
+ {
+ "option": "VOTE_OPTION_YES",
+ "weight": "1.000000000000000000"
+ }
+ ],
+ "metadata": ""
+ }
+ ],
+ "pagination": {
+ "next_key": null,
+ "total": "1"
+ }
+}
+```
+
+#### params
+
+The `params` endpoint allows users to query all parameters for the `gov` module.
+
+{/* TODO: #10197 Querying governance params outputs nil values */}
+
+Using legacy v1beta1:
+
+```bash
+/cosmos/gov/v1beta1/params/{params_type}
+```
+
+Example:
+
+```bash
+curl localhost:1317/cosmos/gov/v1beta1/params/voting
+```
+
+Example Output:
+
+```bash expandable
+{
+ "voting_params": {
+ "voting_period": "172800s"
+ },
+ "deposit_params": {
+ "min_deposit": [
+ ],
+ "max_deposit_period": "0s"
+ },
+ "tally_params": {
+ "quorum": "0.000000000000000000",
+ "threshold": "0.000000000000000000",
+ "veto_threshold": "0.000000000000000000"
+ }
+}
+```
+
+Using v1:
+
+```bash
+/cosmos/gov/v1/params/{params_type}
+```
+
+Example:
+
+```bash
+curl localhost:1317/cosmos/gov/v1/params/voting
+```
+
+Example Output:
+
+```bash expandable
+{
+ "voting_params": {
+ "voting_period": "172800s"
+ },
+ "deposit_params": {
+ "min_deposit": [
+ ],
+ "max_deposit_period": "0s"
+ },
+ "tally_params": {
+ "quorum": "0.000000000000000000",
+ "threshold": "0.000000000000000000",
+ "veto_threshold": "0.000000000000000000"
+ }
+}
+```
+
+#### deposits
+
+The `deposits` endpoint allows users to query a deposit for a given proposal from a given depositor.
+
+Using legacy v1beta1:
+
+```bash
+/cosmos/gov/v1beta1/proposals/{proposal_id}/deposits/{depositor}
+```
+
+Example:
+
+```bash
+curl localhost:1317/cosmos/gov/v1beta1/proposals/1/deposits/cosmos1..
+```
+
+Example Output:
+
+```bash expandable
+{
+ "deposit": {
+ "proposal_id": "1",
+ "depositor": "cosmos1..",
+ "amount": [
+ {
+ "denom": "stake",
+ "amount": "10000000"
+ }
+ ]
+ }
+}
+```
+
+Using v1:
+
+```bash
+/cosmos/gov/v1/proposals/{proposal_id}/deposits/{depositor}
+```
+
+Example:
+
+```bash
+curl localhost:1317/cosmos/gov/v1/proposals/1/deposits/cosmos1..
+```
+
+Example Output:
+
+```bash expandable
+{
+ "deposit": {
+ "proposal_id": "1",
+ "depositor": "cosmos1..",
+ "amount": [
+ {
+ "denom": "stake",
+ "amount": "10000000"
+ }
+ ]
+ }
+}
+```
+
+#### proposal deposits
+
+The `deposits` endpoint allows users to query all deposits for a given proposal.
+
+Using legacy v1beta1:
+
+```bash
+/cosmos/gov/v1beta1/proposals/{proposal_id}/deposits
+```
+
+Example:
+
+```bash
+curl localhost:1317/cosmos/gov/v1beta1/proposals/1/deposits
+```
+
+Example Output:
+
+```bash expandable
+{
+ "deposits": [
+ {
+ "proposal_id": "1",
+ "depositor": "cosmos1..",
+ "amount": [
+ {
+ "denom": "stake",
+ "amount": "10000000"
+ }
+ ]
+ }
+ ],
+ "pagination": {
+ "next_key": null,
+ "total": "1"
+ }
+}
+```
+
+Using v1:
+
+```bash
+/cosmos/gov/v1/proposals/{proposal_id}/deposits
+```
+
+Example:
+
+```bash
+curl localhost:1317/cosmos/gov/v1/proposals/1/deposits
+```
+
+Example Output:
+
+```bash expandable
+{
+ "deposits": [
+ {
+ "proposal_id": "1",
+ "depositor": "cosmos1..",
+ "amount": [
+ {
+ "denom": "stake",
+ "amount": "10000000"
+ }
+ ]
+ }
+ ],
+ "pagination": {
+ "next_key": null,
+ "total": "1"
+ }
+}
+```
+
+#### tally
+
+The `tally` endpoint allows users to query the tally of a given proposal.
+
+Using legacy v1beta1:
+
+```bash
+/cosmos/gov/v1beta1/proposals/{proposal_id}/tally
+```
+
+Example:
+
+```bash
+curl localhost:1317/cosmos/gov/v1beta1/proposals/1/tally
+```
+
+Example Output:
+
+```bash
+{
+ "tally": {
+ "yes": "1000000",
+ "abstain": "0",
+ "no": "0",
+ "no_with_veto": "0"
+ }
+}
+```
+
+Using v1:
+
+```bash
+/cosmos/gov/v1/proposals/{proposal_id}/tally
+```
+
+Example:
+
+```bash
+curl localhost:1317/cosmos/gov/v1/proposals/1/tally
+```
+
+Example Output:
+
+```bash
+{
+ "tally": {
+ "yes": "1000000",
+ "abstain": "0",
+ "no": "0",
+ "no_with_veto": "0"
+ }
+}
+```
+
+## Metadata
+
+The gov module has two locations for metadata where users can provide further context about the on-chain actions they are taking. By default all metadata fields have a 255 character length field where metadata can be stored in json format, either on-chain or off-chain depending on the amount of data required. Here we provide a recommendation for the json structure and where the data should be stored. There are two important factors in making these recommendations. First, that the gov and group modules are consistent with one another, note the number of proposals made by all groups may be quite large. Second, that client applications such as block explorers and governance interfaces have confidence in the consistency of metadata structure accross chains.
+
+### Proposal
+
+Location: off-chain as json object stored on IPFS (mirrors [group proposal](/sdk/v0.53/build/modules/group/README#metadata))
+
+```json
+{
+ "title": "",
+ "authors": [""],
+ "summary": "",
+ "details": "",
+ "proposal_forum_url": "",
+ "vote_option_context": "",
+}
+```
+
+
+The `authors` field is an array of strings, this is to allow for multiple authors to be listed in the metadata.
+In v0.46, the `authors` field is a comma-separated string. Frontends are encouraged to support both formats for backwards compatibility.
+
+
+### Vote
+
+Location: on-chain as json within 255 character limit (mirrors [group vote](/sdk/v0.53/build/modules/group/README#metadata))
+
+```json
+{
+ "justification": "",
+}
+```
+
+## Future Improvements
+
+The current documentation only describes the minimum viable product for the
+governance module. Future improvements may include:
+
+* **`BountyProposals`:** If accepted, a `BountyProposal` creates an open
+ bounty. The `BountyProposal` specifies how many Atoms will be given upon
+ completion. These Atoms will be taken from the `reserve pool`. After a
+ `BountyProposal` is accepted by governance, anybody can submit a
+ `SoftwareUpgradeProposal` with the code to claim the bounty. Note that once a
+ `BountyProposal` is accepted, the corresponding funds in the `reserve pool`
+ are locked so that payment can always be honored. In order to link a
+ `SoftwareUpgradeProposal` to an open bounty, the submitter of the
+ `SoftwareUpgradeProposal` will use the `Proposal.LinkedProposal` attribute.
+ If a `SoftwareUpgradeProposal` linked to an open bounty is accepted by
+ governance, the funds that were reserved are automatically transferred to the
+ submitter.
+* **Complex delegation:** Delegators could choose other representatives than
+ their validators. Ultimately, the chain of representatives would always end
+ up to a validator, but delegators could inherit the vote of their chosen
+ representative before they inherit the vote of their validator. In other
+ words, they would only inherit the vote of their validator if their other
+ appointed representative did not vote.
+* **Better process for proposal review:** There would be two parts to
+ `proposal.Deposit`, one for anti-spam (same as in MVP) and an other one to
+ reward third party auditors.
diff --git a/sdk/next/build/modules/group/README.mdx b/sdk/next/build/modules/group/README.mdx
new file mode 100644
index 000000000..d8e5d673d
--- /dev/null
+++ b/sdk/next/build/modules/group/README.mdx
@@ -0,0 +1,2168 @@
+---
+title: 'x/group'
+description: The following documents specify the group module.
+---
+
+
+This module has been moved to [contrib/x/group](https://github.com/cosmos/cosmos-sdk/tree/main/contrib/x/group) and is no longer actively maintained. For continued maintenance, users should fork the module or refer to the [Cosmos Legacy repository](https://github.com/cosmos/cosmos-legacy).
+
+
+## Abstract
+
+The following documents specify the group module.
+
+This module allows the creation and management of on-chain multisig accounts and enables voting for message execution based on configurable decision policies.
+
+## Contents
+
+* [Concepts](#concepts)
+ * [Group](#group)
+ * [Group Policy](#group-policy)
+ * [Decision Policy](#decision-policy)
+ * [Proposal](#proposal)
+ * [Pruning](#pruning)
+* [State](#state)
+ * [Group Table](#group-table)
+ * [Group Member Table](#group-member-table)
+ * [Group Policy Table](#group-policy-table)
+ * [Proposal Table](#proposal-table)
+ * [Vote Table](#vote-table)
+* [Msg Service](#msg-service)
+ * [Msg/CreateGroup](#msgcreategroup)
+ * [Msg/UpdateGroupMembers](#msgupdategroupmembers)
+ * [Msg/UpdateGroupAdmin](#msgupdategroupadmin)
+ * [Msg/UpdateGroupMetadata](#msgupdategroupmetadata)
+ * [Msg/CreateGroupPolicy](#msgcreategrouppolicy)
+ * [Msg/CreateGroupWithPolicy](#msgcreategroupwithpolicy)
+ * [Msg/UpdateGroupPolicyAdmin](#msgupdategrouppolicyadmin)
+ * [Msg/UpdateGroupPolicyDecisionPolicy](#msgupdategrouppolicydecisionpolicy)
+ * [Msg/UpdateGroupPolicyMetadata](#msgupdategrouppolicymetadata)
+ * [Msg/SubmitProposal](#msgsubmitproposal)
+ * [Msg/WithdrawProposal](#msgwithdrawproposal)
+ * [Msg/Vote](#msgvote)
+ * [Msg/Exec](#msgexec)
+ * [Msg/LeaveGroup](#msgleavegroup)
+* [Events](#events)
+ * [EventCreateGroup](#eventcreategroup)
+ * [EventUpdateGroup](#eventupdategroup)
+ * [EventCreateGroupPolicy](#eventcreategrouppolicy)
+ * [EventUpdateGroupPolicy](#eventupdategrouppolicy)
+ * [EventCreateProposal](#eventcreateproposal)
+ * [EventWithdrawProposal](#eventwithdrawproposal)
+ * [EventVote](#eventvote)
+ * [EventExec](#eventexec)
+ * [EventLeaveGroup](#eventleavegroup)
+ * [EventProposalPruned](#eventproposalpruned)
+* [Client](#client)
+ * [CLI](#cli)
+ * [gRPC](#grpc)
+ * [REST](#rest)
+* [Metadata](#metadata)
+
+## Concepts
+
+### Group
+
+A group is simply an aggregation of accounts with associated weights. It is not
+an account and doesn't have a balance. It doesn't in and of itself have any
+sort of voting or decision weight. It does have an "administrator" which has
+the ability to add, remove and update members in the group. Note that a
+group policy account could be an administrator of a group, and that the
+administrator doesn't necessarily have to be a member of the group.
+
+### Group Policy
+
+A group policy is an account associated with a group and a decision policy.
+Group policies are abstracted from groups because a single group may have
+multiple decision policies for different types of actions. Managing group
+membership separately from decision policies results in the least overhead
+and keeps membership consistent across different policies. The pattern that
+is recommended is to have a single master group policy for a given group,
+and then to create separate group policies with different decision policies
+and delegate the desired permissions from the master account to
+those "sub-accounts" using the `x/authz` module.
+
+### Decision Policy
+
+A decision policy is the mechanism by which members of a group can vote on
+proposals, as well as the rules that dictate whether a proposal should pass
+or not based on its tally outcome.
+
+All decision policies generally would have a mininum execution period and a
+maximum voting window. The minimum execution period is the minimum amount of time
+that must pass after submission in order for a proposal to potentially be executed, and it may
+be set to 0. The maximum voting window is the maximum time after submission that a proposal may
+be voted on before it is tallied.
+
+The chain developer also defines an app-wide maximum execution period, which is
+the maximum amount of time after a proposal's voting period end where users are
+allowed to execute a proposal.
+
+The current group module comes shipped with two decision policies: threshold
+and percentage. Any chain developer can extend upon these two, by creating
+custom decision policies, as long as they adhere to the `DecisionPolicy`
+interface:
+
+```go
+// Reference: https://github.com/cosmos/cosmos-sdk/tree/release/v0.50.x/x/group/types.go#L27-L45
+```
+
+#### Threshold decision policy
+
+A threshold decision policy defines a threshold of yes votes (based on a tally
+of voter weights) that must be achieved in order for a proposal to pass. For
+this decision policy, abstain and veto are simply treated as no's.
+
+This decision policy also has a VotingPeriod window and a MinExecutionPeriod
+window. The former defines the duration after proposal submission where members
+are allowed to vote, after which tallying is performed. The latter specifies
+the minimum duration after proposal submission where the proposal can be
+executed. If set to 0, then the proposal is allowed to be executed immediately
+on submission (using the `TRY_EXEC` option). Obviously, MinExecutionPeriod
+cannot be greater than VotingPeriod+MaxExecutionPeriod (where MaxExecution is
+the app-defined duration that specifies the window after voting ended where a
+proposal can be executed).
+
+#### Percentage decision policy
+
+A percentage decision policy is similar to a threshold decision policy, except
+that the threshold is not defined as a constant weight, but as a percentage.
+It's more suited for groups where the group members' weights can be updated, as
+the percentage threshold stays the same, and doesn't depend on how those member
+weights get updated.
+
+Same as the Threshold decision policy, the percentage decision policy has the
+two VotingPeriod and MinExecutionPeriod parameters.
+
+### Proposal
+
+Any member(s) of a group can submit a proposal for a group policy account to decide upon.
+A proposal consists of a set of messages that will be executed if the proposal
+passes as well as any metadata associated with the proposal.
+
+#### Voting
+
+There are four choices to choose while voting - yes, no, abstain and veto. Not
+all decision policies will take the four choices into account. Votes can contain some optional metadata.
+In the current implementation, the voting window begins as soon as a proposal
+is submitted, and the end is defined by the group policy's decision policy.
+
+#### Withdrawing Proposals
+
+Proposals can be withdrawn any time before the voting period end, either by the
+admin of the group policy or by one of the proposers. Once withdrawn, it is
+marked as `PROPOSAL_STATUS_WITHDRAWN`, and no more voting or execution is
+allowed on it.
+
+#### Aborted Proposals
+
+If the group policy is updated during the voting period of the proposal, then
+the proposal is marked as `PROPOSAL_STATUS_ABORTED`, and no more voting or
+execution is allowed on it. This is because the group policy defines the rules
+of proposal voting and execution, so if those rules change during the lifecycle
+of a proposal, then the proposal should be marked as stale.
+
+#### Tallying
+
+Tallying is the counting of all votes on a proposal. It happens only once in
+the lifecycle of a proposal, but can be triggered by two factors, whichever
+happens first:
+
+* either someone tries to execute the proposal (see next section), which can
+ happen on a `Msg/Exec` transaction, or a `Msg/{SubmitProposal,Vote}`
+ transaction with the `Exec` field set. When a proposal execution is attempted,
+ a tally is done first to make sure the proposal passes.
+* or on `EndBlock` when the proposal's voting period end just passed.
+
+If the tally result passes the decision policy's rules, then the proposal is
+marked as `PROPOSAL_STATUS_ACCEPTED`, or else it is marked as
+`PROPOSAL_STATUS_REJECTED`. In any case, no more voting is allowed anymore, and the tally
+result is persisted to state in the proposal's `FinalTallyResult`.
+
+#### Executing Proposals
+
+Proposals are executed only when the tallying is done, and the group account's
+decision policy allows the proposal to pass based on the tally outcome. They
+are marked by the status `PROPOSAL_STATUS_ACCEPTED`. Execution must happen
+before a duration of `MaxExecutionPeriod` (set by the chain developer) after
+each proposal's voting period end.
+
+Proposals will not be automatically executed by the chain in this current design,
+but rather a user must submit a `Msg/Exec` transaction to attempt to execute the
+proposal based on the current votes and decision policy. Any user (not only the
+group members) can execute proposals that have been accepted, and execution fees are
+paid by the proposal executor.
+It's also possible to try to execute a proposal immediately on creation or on
+new votes using the `Exec` field of `Msg/SubmitProposal` and `Msg/Vote` requests.
+In the former case, proposers signatures are considered as yes votes.
+In these cases, if the proposal can't be executed (i.e. it didn't pass the
+decision policy's rules), it will still be opened for new votes and
+could be tallied and executed later on.
+
+A successful proposal execution will have its `ExecutorResult` marked as
+`PROPOSAL_EXECUTOR_RESULT_SUCCESS`. The proposal will be automatically pruned
+after execution. On the other hand, a failed proposal execution will be marked
+as `PROPOSAL_EXECUTOR_RESULT_FAILURE`. Such a proposal can be re-executed
+multiple times, until it expires after `MaxExecutionPeriod` after voting period
+end.
+
+### Pruning
+
+Proposals and votes are automatically pruned to avoid state bloat.
+
+Votes are pruned:
+
+* either after a successful tally, i.e. a tally whose result passes the decision
+ policy's rules, which can be trigged by a `Msg/Exec` or a
+ `Msg/{SubmitProposal,Vote}` with the `Exec` field set,
+* or on `EndBlock` right after the proposal's voting period end. This applies to proposals with status `aborted` or `withdrawn` too.
+
+whichever happens first.
+
+Proposals are pruned:
+
+* on `EndBlock` whose proposal status is `withdrawn` or `aborted` on proposal's voting period end before tallying,
+* and either after a successful proposal execution,
+* or on `EndBlock` right after the proposal's `voting_period_end` +
+ `max_execution_period` (defined as an app-wide configuration) is passed,
+
+whichever happens first.
+
+## State
+
+The `group` module uses the `orm` package which provides table storage with support for
+primary keys and secondary indexes. `orm` also defines `Sequence` which is a persistent unique key generator based on a counter that can be used along with `Table`s.
+
+Here's the list of tables and associated sequences and indexes stored as part of the `group` module.
+
+### Group Table
+
+The `groupTable` stores `GroupInfo`: `0x0 | BigEndian(GroupId) -> ProtocolBuffer(GroupInfo)`.
+
+#### groupSeq
+
+The value of `groupSeq` is incremented when creating a new group and corresponds to the new `GroupId`: `0x1 | 0x1 -> BigEndian`.
+
+The second `0x1` corresponds to the ORM `sequenceStorageKey`.
+
+#### groupByAdminIndex
+
+`groupByAdminIndex` allows to retrieve groups by admin address:
+`0x2 | len([]byte(group.Admin)) | []byte(group.Admin) | BigEndian(GroupId) -> []byte()`.
+
+### Group Member Table
+
+The `groupMemberTable` stores `GroupMember`s: `0x10 | BigEndian(GroupId) | []byte(member.Address) -> ProtocolBuffer(GroupMember)`.
+
+The `groupMemberTable` is a primary key table and its `PrimaryKey` is given by
+`BigEndian(GroupId) | []byte(member.Address)` which is used by the following indexes.
+
+#### groupMemberByGroupIndex
+
+`groupMemberByGroupIndex` allows to retrieve group members by group id:
+`0x11 | BigEndian(GroupId) | PrimaryKey -> []byte()`.
+
+#### groupMemberByMemberIndex
+
+`groupMemberByMemberIndex` allows to retrieve group members by member address:
+`0x12 | len([]byte(member.Address)) | []byte(member.Address) | PrimaryKey -> []byte()`.
+
+### Group Policy Table
+
+The `groupPolicyTable` stores `GroupPolicyInfo`: `0x20 | len([]byte(Address)) | []byte(Address) -> ProtocolBuffer(GroupPolicyInfo)`.
+
+The `groupPolicyTable` is a primary key table and its `PrimaryKey` is given by
+`len([]byte(Address)) | []byte(Address)` which is used by the following indexes.
+
+#### groupPolicySeq
+
+The value of `groupPolicySeq` is incremented when creating a new group policy and is used to generate the new group policy account `Address`:
+`0x21 | 0x1 -> BigEndian`.
+
+The second `0x1` corresponds to the ORM `sequenceStorageKey`.
+
+#### groupPolicyByGroupIndex
+
+`groupPolicyByGroupIndex` allows to retrieve group policies by group id:
+`0x22 | BigEndian(GroupId) | PrimaryKey -> []byte()`.
+
+#### groupPolicyByAdminIndex
+
+`groupPolicyByAdminIndex` allows to retrieve group policies by admin address:
+`0x23 | len([]byte(Address)) | []byte(Address) | PrimaryKey -> []byte()`.
+
+### Proposal Table
+
+The `proposalTable` stores `Proposal`s: `0x30 | BigEndian(ProposalId) -> ProtocolBuffer(Proposal)`.
+
+#### proposalSeq
+
+The value of `proposalSeq` is incremented when creating a new proposal and corresponds to the new `ProposalId`: `0x31 | 0x1 -> BigEndian`.
+
+The second `0x1` corresponds to the ORM `sequenceStorageKey`.
+
+#### proposalByGroupPolicyIndex
+
+`proposalByGroupPolicyIndex` allows to retrieve proposals by group policy account address:
+`0x32 | len([]byte(account.Address)) | []byte(account.Address) | BigEndian(ProposalId) -> []byte()`.
+
+#### ProposalsByVotingPeriodEndIndex
+
+`proposalsByVotingPeriodEndIndex` allows to retrieve proposals sorted by chronological `voting_period_end`:
+`0x33 | sdk.FormatTimeBytes(proposal.VotingPeriodEnd) | BigEndian(ProposalId) -> []byte()`.
+
+This index is used when tallying the proposal votes at the end of the voting period, and for pruning proposals at `VotingPeriodEnd + MaxExecutionPeriod`.
+
+### Vote Table
+
+The `voteTable` stores `Vote`s: `0x40 | BigEndian(ProposalId) | []byte(voter.Address) -> ProtocolBuffer(Vote)`.
+
+The `voteTable` is a primary key table and its `PrimaryKey` is given by
+`BigEndian(ProposalId) | []byte(voter.Address)` which is used by the following indexes.
+
+#### voteByProposalIndex
+
+`voteByProposalIndex` allows to retrieve votes by proposal id:
+`0x41 | BigEndian(ProposalId) | PrimaryKey -> []byte()`.
+
+#### voteByVoterIndex
+
+`voteByVoterIndex` allows to retrieve votes by voter address:
+`0x42 | len([]byte(voter.Address)) | []byte(voter.Address) | PrimaryKey -> []byte()`.
+
+## Msg Service
+
+### Msg/CreateGroup
+
+A new group can be created with the `MsgCreateGroup`, which has an admin address, a list of members and some optional metadata.
+
+The metadata has a maximum length that is chosen by the app developer, and
+passed into the group keeper as a config.
+
+```go
+// Reference: https://github.com/cosmos/cosmos-sdk/tree/release/v0.50.x/proto/cosmos/group/v1/tx.proto#L67-L80
+```
+
+It's expected to fail if
+
+* metadata length is greater than `MaxMetadataLen` config
+* members are not correctly set (e.g. wrong address format, duplicates, or with 0 weight).
+
+### Msg/UpdateGroupMembers
+
+Group members can be updated with the `UpdateGroupMembers`.
+
+```go
+// Reference: https://github.com/cosmos/cosmos-sdk/tree/release/v0.50.x/proto/cosmos/group/v1/tx.proto#L88-L102
+```
+
+In the list of `MemberUpdates`, an existing member can be removed by setting its weight to 0.
+
+It's expected to fail if:
+
+* the signer is not the admin of the group.
+* for any one of the associated group policies, if its decision policy's `Validate()` method fails against the updated group.
+
+### Msg/UpdateGroupAdmin
+
+The `UpdateGroupAdmin` can be used to update a group admin.
+
+```go
+// Reference: https://github.com/cosmos/cosmos-sdk/tree/release/v0.50.x/proto/cosmos/group/v1/tx.proto#L107-L120
+```
+
+It's expected to fail if the signer is not the admin of the group.
+
+### Msg/UpdateGroupMetadata
+
+The `UpdateGroupMetadata` can be used to update a group metadata.
+
+```go
+// Reference: https://github.com/cosmos/cosmos-sdk/tree/release/v0.50.x/proto/cosmos/group/v1/tx.proto#L125-L138
+```
+
+It's expected to fail if:
+
+* new metadata length is greater than `MaxMetadataLen` config.
+* the signer is not the admin of the group.
+
+### Msg/CreateGroupPolicy
+
+A new group policy can be created with the `MsgCreateGroupPolicy`, which has an admin address, a group id, a decision policy and some optional metadata.
+
+```go
+// Reference: https://github.com/cosmos/cosmos-sdk/tree/release/v0.50.x/proto/cosmos/group/v1/tx.proto#L147-L165
+```
+
+It's expected to fail if:
+
+* the signer is not the admin of the group.
+* metadata length is greater than `MaxMetadataLen` config.
+* the decision policy's `Validate()` method doesn't pass against the group.
+
+### Msg/CreateGroupWithPolicy
+
+A new group with policy can be created with the `MsgCreateGroupWithPolicy`, which has an admin address, a list of members, a decision policy, a `group_policy_as_admin` field to optionally set group and group policy admin with group policy address and some optional metadata for group and group policy.
+
+```go
+// Reference: https://github.com/cosmos/cosmos-sdk/tree/release/v0.50.x/proto/cosmos/group/v1/tx.proto#L191-L215
+```
+
+It's expected to fail for the same reasons as `Msg/CreateGroup` and `Msg/CreateGroupPolicy`.
+
+### Msg/UpdateGroupPolicyAdmin
+
+The `UpdateGroupPolicyAdmin` can be used to update a group policy admin.
+
+```go
+// Reference: https://github.com/cosmos/cosmos-sdk/tree/release/v0.50.x/proto/cosmos/group/v1/tx.proto#L173-L186
+```
+
+It's expected to fail if the signer is not the admin of the group policy.
+
+### Msg/UpdateGroupPolicyDecisionPolicy
+
+The `UpdateGroupPolicyDecisionPolicy` can be used to update a decision policy.
+
+```go
+// Reference: https://github.com/cosmos/cosmos-sdk/tree/release/v0.50.x/proto/cosmos/group/v1/tx.proto#L226-L241
+```
+
+It's expected to fail if:
+
+* the signer is not the admin of the group policy.
+* the new decision policy's `Validate()` method doesn't pass against the group.
+
+### Msg/UpdateGroupPolicyMetadata
+
+The `UpdateGroupPolicyMetadata` can be used to update a group policy metadata.
+
+```go
+// Reference: https://github.com/cosmos/cosmos-sdk/tree/release/v0.50.x/proto/cosmos/group/v1/tx.proto#L246-L259
+```
+
+It's expected to fail if:
+
+* new metadata length is greater than `MaxMetadataLen` config.
+* the signer is not the admin of the group.
+
+### Msg/SubmitProposal
+
+A new proposal can be created with the `MsgSubmitProposal`, which has a group policy account address, a list of proposers addresses, a list of messages to execute if the proposal is accepted and some optional metadata.
+An optional `Exec` value can be provided to try to execute the proposal immediately after proposal creation. Proposers signatures are considered as yes votes in this case.
+
+```go
+// Reference: https://github.com/cosmos/cosmos-sdk/tree/release/v0.50.x/proto/cosmos/group/v1/tx.proto#L281-L315
+```
+
+It's expected to fail if:
+
+* metadata, title, or summary length is greater than `MaxMetadataLen` config.
+* if any of the proposers is not a group member.
+
+### Msg/WithdrawProposal
+
+A proposal can be withdrawn using `MsgWithdrawProposal` which has an `address` (can be either a proposer or the group policy admin) and a `proposal_id` (which has to be withdrawn).
+
+```go
+// Reference: https://github.com/cosmos/cosmos-sdk/tree/release/v0.50.x/proto/cosmos/group/v1/tx.proto#L323-L333
+```
+
+It's expected to fail if:
+
+* the signer is neither the group policy admin nor proposer of the proposal.
+* the proposal is already closed or aborted.
+
+### Msg/Vote
+
+A new vote can be created with the `MsgVote`, given a proposal id, a voter address, a choice (yes, no, veto or abstain) and some optional metadata.
+An optional `Exec` value can be provided to try to execute the proposal immediately after voting.
+
+```go
+// Reference: https://github.com/cosmos/cosmos-sdk/tree/release/v0.50.x/proto/cosmos/group/v1/tx.proto#L338-L358
+```
+
+It's expected to fail if:
+
+* metadata length is greater than `MaxMetadataLen` config.
+* the proposal is not in voting period anymore.
+
+### Msg/Exec
+
+A proposal can be executed with the `MsgExec`.
+
+```go
+// Reference: https://github.com/cosmos/cosmos-sdk/tree/release/v0.50.x/proto/cosmos/group/v1/tx.proto#L363-L373
+```
+
+The messages that are part of this proposal won't be executed if:
+
+* the proposal has not been accepted by the group policy.
+* the proposal has already been successfully executed.
+
+### Msg/LeaveGroup
+
+The `MsgLeaveGroup` allows group member to leave a group.
+
+```go
+// Reference: https://github.com/cosmos/cosmos-sdk/tree/release/v0.50.x/proto/cosmos/group/v1/tx.proto#L381-L391
+```
+
+It's expected to fail if:
+
+* the group member is not part of the group.
+* for any one of the associated group policies, if its decision policy's `Validate()` method fails against the updated group.
+
+## Events
+
+The group module emits the following events:
+
+### EventCreateGroup
+
+| Type | Attribute Key | Attribute Value |
+| -------------------------------- | ------------- | -------------------------------- |
+| message | action | /cosmos.group.v1.Msg/CreateGroup |
+| cosmos.group.v1.EventCreateGroup | group\_id | `{groupId}` |
+
+### EventUpdateGroup
+
+| Type | Attribute Key | Attribute Value |
+| -------------------------------- | ------------- | ---------------------------------------------------------- |
+| message | action | `/cosmos.group.v1.Msg/UpdateGroup{Admin\|Metadata\|Members}` |
+| cosmos.group.v1.EventUpdateGroup | group\_id | `{groupId}` |
+
+### EventCreateGroupPolicy
+
+| Type | Attribute Key | Attribute Value |
+| -------------------------------------- | ------------- | -------------------------------------- |
+| message | action | /cosmos.group.v1.Msg/CreateGroupPolicy |
+| cosmos.group.v1.EventCreateGroupPolicy | address | `{groupPolicyAddress}` |
+
+### EventUpdateGroupPolicy
+
+| Type | Attribute Key | Attribute Value |
+| -------------------------------------- | ------------- | ----------------------------------------------------------------------- |
+| message | action | `/cosmos.group.v1.Msg/UpdateGroupPolicy{Admin\|Metadata\|DecisionPolicy}` |
+| cosmos.group.v1.EventUpdateGroupPolicy | address | `{groupPolicyAddress}` |
+
+### EventCreateProposal
+
+| Type | Attribute Key | Attribute Value |
+| ----------------------------------- | ------------- | ----------------------------------- |
+| message | action | /cosmos.group.v1.Msg/CreateProposal |
+| cosmos.group.v1.EventCreateProposal | proposal\_id | `{proposalId}` |
+
+### EventWithdrawProposal
+
+| Type | Attribute Key | Attribute Value |
+| ------------------------------------- | ------------- | ------------------------------------- |
+| message | action | /cosmos.group.v1.Msg/WithdrawProposal |
+| cosmos.group.v1.EventWithdrawProposal | proposal\_id | `{proposalId}` |
+
+### EventVote
+
+| Type | Attribute Key | Attribute Value |
+| ------------------------- | ------------- | ------------------------- |
+| message | action | /cosmos.group.v1.Msg/Vote |
+| cosmos.group.v1.EventVote | proposal\_id | `{proposalId}` |
+
+## EventExec
+
+| Type | Attribute Key | Attribute Value |
+| ------------------------- | ------------- | ------------------------- |
+| message | action | /cosmos.group.v1.Msg/Exec |
+| cosmos.group.v1.EventExec | proposal\_id | `{proposalId}` |
+| cosmos.group.v1.EventExec | logs | `{logs\_string}` |
+
+### EventLeaveGroup
+
+| Type | Attribute Key | Attribute Value |
+| ------------------------------- | ------------- | ------------------------------- |
+| message | action | /cosmos.group.v1.Msg/LeaveGroup |
+| cosmos.group.v1.EventLeaveGroup | proposal\_id | `{proposalId}` |
+| cosmos.group.v1.EventLeaveGroup | address | `{address}` |
+
+### EventProposalPruned
+
+| Type | Attribute Key | Attribute Value |
+| ----------------------------------- | ------------- | ------------------------------- |
+| message | action | /cosmos.group.v1.Msg/LeaveGroup |
+| cosmos.group.v1.EventProposalPruned | proposal\_id | `{proposalId}` |
+| cosmos.group.v1.EventProposalPruned | status | `{ProposalStatus}` |
+| cosmos.group.v1.EventProposalPruned | tally\_result | `{TallyResult}` |
+
+## Client
+
+### CLI
+
+A user can query and interact with the `group` module using the CLI.
+
+#### Query
+
+The `query` commands allow users to query `group` state.
+
+```bash
+simd query group --help
+```
+
+##### group-info
+
+The `group-info` command allows users to query for group info by given group id.
+
+```bash
+simd query group group-info [id] [flags]
+```
+
+Example:
+
+```bash
+simd query group group-info 1
+```
+
+Example Output:
+
+```bash
+admin: cosmos1..
+group_id: "1"
+metadata: AQ==
+total_weight: "3"
+version: "1"
+```
+
+##### group-policy-info
+
+The `group-policy-info` command allows users to query for group policy info by account address of group policy .
+
+```bash
+simd query group group-policy-info [group-policy-account] [flags]
+```
+
+Example:
+
+```bash
+simd query group group-policy-info cosmos1..
+```
+
+Example Output:
+
+```bash expandable
+address: cosmos1..
+admin: cosmos1..
+decision_policy:
+ '@type': /cosmos.group.v1.ThresholdDecisionPolicy
+ threshold: "1"
+ windows:
+ min_execution_period: 0s
+ voting_period: 432000s
+group_id: "1"
+metadata: AQ==
+version: "1"
+```
+
+##### group-members
+
+The `group-members` command allows users to query for group members by group id with pagination flags.
+
+```bash
+simd query group group-members [id] [flags]
+```
+
+Example:
+
+```bash
+simd query group group-members 1
+```
+
+Example Output:
+
+```bash expandable
+members:
+- group_id: "1"
+ member:
+ address: cosmos1..
+ metadata: AQ==
+ weight: "2"
+- group_id: "1"
+ member:
+ address: cosmos1..
+ metadata: AQ==
+ weight: "1"
+pagination:
+ next_key: null
+ total: "2"
+```
+
+##### groups-by-admin
+
+The `groups-by-admin` command allows users to query for groups by admin account address with pagination flags.
+
+```bash
+simd query group groups-by-admin [admin] [flags]
+```
+
+Example:
+
+```bash
+simd query group groups-by-admin cosmos1..
+```
+
+Example Output:
+
+```bash expandable
+groups:
+- admin: cosmos1..
+ group_id: "1"
+ metadata: AQ==
+ total_weight: "3"
+ version: "1"
+- admin: cosmos1..
+ group_id: "2"
+ metadata: AQ==
+ total_weight: "3"
+ version: "1"
+pagination:
+ next_key: null
+ total: "2"
+```
+
+##### group-policies-by-group
+
+The `group-policies-by-group` command allows users to query for group policies by group id with pagination flags.
+
+```bash
+simd query group group-policies-by-group [group-id] [flags]
+```
+
+Example:
+
+```bash
+simd query group group-policies-by-group 1
+```
+
+Example Output:
+
+```bash expandable
+group_policies:
+- address: cosmos1..
+ admin: cosmos1..
+ decision_policy:
+ '@type': /cosmos.group.v1.ThresholdDecisionPolicy
+ threshold: "1"
+ windows:
+ min_execution_period: 0s
+ voting_period: 432000s
+ group_id: "1"
+ metadata: AQ==
+ version: "1"
+- address: cosmos1..
+ admin: cosmos1..
+ decision_policy:
+ '@type': /cosmos.group.v1.ThresholdDecisionPolicy
+ threshold: "1"
+ windows:
+ min_execution_period: 0s
+ voting_period: 432000s
+ group_id: "1"
+ metadata: AQ==
+ version: "1"
+pagination:
+ next_key: null
+ total: "2"
+```
+
+##### group-policies-by-admin
+
+The `group-policies-by-admin` command allows users to query for group policies by admin account address with pagination flags.
+
+```bash
+simd query group group-policies-by-admin [admin] [flags]
+```
+
+Example:
+
+```bash
+simd query group group-policies-by-admin cosmos1..
+```
+
+Example Output:
+
+```bash expandable
+group_policies:
+- address: cosmos1..
+ admin: cosmos1..
+ decision_policy:
+ '@type': /cosmos.group.v1.ThresholdDecisionPolicy
+ threshold: "1"
+ windows:
+ min_execution_period: 0s
+ voting_period: 432000s
+ group_id: "1"
+ metadata: AQ==
+ version: "1"
+- address: cosmos1..
+ admin: cosmos1..
+ decision_policy:
+ '@type': /cosmos.group.v1.ThresholdDecisionPolicy
+ threshold: "1"
+ windows:
+ min_execution_period: 0s
+ voting_period: 432000s
+ group_id: "1"
+ metadata: AQ==
+ version: "1"
+pagination:
+ next_key: null
+ total: "2"
+```
+
+##### proposal
+
+The `proposal` command allows users to query for proposal by id.
+
+```bash
+simd query group proposal [id] [flags]
+```
+
+Example:
+
+```bash
+simd query group proposal 1
+```
+
+Example Output:
+
+```bash expandable
+proposal:
+ address: cosmos1..
+ executor_result: EXECUTOR_RESULT_NOT_RUN
+ group_policy_version: "1"
+ group_version: "1"
+ metadata: AQ==
+ msgs:
+ - '@type': /cosmos.bank.v1beta1.MsgSend
+ amount:
+ - amount: "100000000"
+ denom: stake
+ from_address: cosmos1..
+ to_address: cosmos1..
+ proposal_id: "1"
+ proposers:
+ - cosmos1..
+ result: RESULT_UNFINALIZED
+ status: STATUS_SUBMITTED
+ submitted_at: "2021-12-17T07:06:26.310638964Z"
+ windows:
+ min_execution_period: 0s
+ voting_period: 432000s
+ vote_state:
+ abstain_count: "0"
+ no_count: "0"
+ veto_count: "0"
+ yes_count: "0"
+ summary: "Summary"
+ title: "Title"
+```
+
+##### proposals-by-group-policy
+
+The `proposals-by-group-policy` command allows users to query for proposals by account address of group policy with pagination flags.
+
+```bash
+simd query group proposals-by-group-policy [group-policy-account] [flags]
+```
+
+Example:
+
+```bash
+simd query group proposals-by-group-policy cosmos1..
+```
+
+Example Output:
+
+```bash expandable
+pagination:
+ next_key: null
+ total: "1"
+proposals:
+- address: cosmos1..
+ executor_result: EXECUTOR_RESULT_NOT_RUN
+ group_policy_version: "1"
+ group_version: "1"
+ metadata: AQ==
+ msgs:
+ - '@type': /cosmos.bank.v1beta1.MsgSend
+ amount:
+ - amount: "100000000"
+ denom: stake
+ from_address: cosmos1..
+ to_address: cosmos1..
+ proposal_id: "1"
+ proposers:
+ - cosmos1..
+ result: RESULT_UNFINALIZED
+ status: STATUS_SUBMITTED
+ submitted_at: "2021-12-17T07:06:26.310638964Z"
+ windows:
+ min_execution_period: 0s
+ voting_period: 432000s
+ vote_state:
+ abstain_count: "0"
+ no_count: "0"
+ veto_count: "0"
+ yes_count: "0"
+ summary: "Summary"
+ title: "Title"
+```
+
+##### vote
+
+The `vote` command allows users to query for vote by proposal id and voter account address.
+
+```bash
+simd query group vote [proposal-id] [voter] [flags]
+```
+
+Example:
+
+```bash
+simd query group vote 1 cosmos1..
+```
+
+Example Output:
+
+```bash
+vote:
+ choice: CHOICE_YES
+ metadata: AQ==
+ proposal_id: "1"
+ submitted_at: "2021-12-17T08:05:02.490164009Z"
+ voter: cosmos1..
+```
+
+##### votes-by-proposal
+
+The `votes-by-proposal` command allows users to query for votes by proposal id with pagination flags.
+
+```bash
+simd query group votes-by-proposal [proposal-id] [flags]
+```
+
+Example:
+
+```bash
+simd query group votes-by-proposal 1
+```
+
+Example Output:
+
+```bash
+pagination:
+ next_key: null
+ total: "1"
+votes:
+- choice: CHOICE_YES
+ metadata: AQ==
+ proposal_id: "1"
+ submitted_at: "2021-12-17T08:05:02.490164009Z"
+ voter: cosmos1..
+```
+
+##### votes-by-voter
+
+The `votes-by-voter` command allows users to query for votes by voter account address with pagination flags.
+
+```bash
+simd query group votes-by-voter [voter] [flags]
+```
+
+Example:
+
+```bash
+simd query group votes-by-voter cosmos1..
+```
+
+Example Output:
+
+```bash
+pagination:
+ next_key: null
+ total: "1"
+votes:
+- choice: CHOICE_YES
+ metadata: AQ==
+ proposal_id: "1"
+ submitted_at: "2021-12-17T08:05:02.490164009Z"
+ voter: cosmos1..
+```
+
+### Transactions
+
+The `tx` commands allow users to interact with the `group` module.
+
+```bash
+simd tx group --help
+```
+
+#### create-group
+
+The `create-group` command allows users to create a group which is an aggregation of member accounts with associated weights and
+an administrator account.
+
+```bash
+simd tx group create-group [admin] [metadata] [members-json-file]
+```
+
+Example:
+
+```bash
+simd tx group create-group cosmos1.. "AQ==" members.json
+```
+
+#### update-group-admin
+
+The `update-group-admin` command allows users to update a group's admin.
+
+```bash
+simd tx group update-group-admin [admin] [group-id] [new-admin] [flags]
+```
+
+Example:
+
+```bash
+simd tx group update-group-admin cosmos1.. 1 cosmos1..
+```
+
+#### update-group-members
+
+The `update-group-members` command allows users to update a group's members.
+
+```bash
+simd tx group update-group-members [admin] [group-id] [members-json-file] [flags]
+```
+
+Example:
+
+```bash
+simd tx group update-group-members cosmos1.. 1 members.json
+```
+
+#### update-group-metadata
+
+The `update-group-metadata` command allows users to update a group's metadata.
+
+```bash
+simd tx group update-group-metadata [admin] [group-id] [metadata] [flags]
+```
+
+Example:
+
+```bash
+simd tx group update-group-metadata cosmos1.. 1 "AQ=="
+```
+
+#### create-group-policy
+
+The `create-group-policy` command allows users to create a group policy which is an account associated with a group and a decision policy.
+
+```bash
+simd tx group create-group-policy [admin] [group-id] [metadata] [decision-policy] [flags]
+```
+
+Example:
+
+```bash
+simd tx group create-group-policy cosmos1.. 1 "AQ==" '{"@type":"/cosmos.group.v1.ThresholdDecisionPolicy", "threshold":"1", "windows": {"voting_period": "120h", "min_execution_period": "0s"}}'
+```
+
+#### create-group-with-policy
+
+The `create-group-with-policy` command allows users to create a group which is an aggregation of member accounts with associated weights and an administrator account with decision policy. If the `--group-policy-as-admin` flag is set to `true`, the group policy address becomes the group and group policy admin.
+
+```bash
+simd tx group create-group-with-policy [admin] [group-metadata] [group-policy-metadata] [members-json-file] [decision-policy] [flags]
+```
+
+Example:
+
+```bash
+simd tx group create-group-with-policy cosmos1.. "AQ==" "AQ==" members.json '{"@type":"/cosmos.group.v1.ThresholdDecisionPolicy", "threshold":"1", "windows": {"voting_period": "120h", "min_execution_period": "0s"}}'
+```
+
+#### update-group-policy-admin
+
+The `update-group-policy-admin` command allows users to update a group policy admin.
+
+```bash
+simd tx group update-group-policy-admin [admin] [group-policy-account] [new-admin] [flags]
+```
+
+Example:
+
+```bash
+simd tx group update-group-policy-admin cosmos1.. cosmos1.. cosmos1..
+```
+
+#### update-group-policy-metadata
+
+The `update-group-policy-metadata` command allows users to update a group policy metadata.
+
+```bash
+simd tx group update-group-policy-metadata [admin] [group-policy-account] [new-metadata] [flags]
+```
+
+Example:
+
+```bash
+simd tx group update-group-policy-metadata cosmos1.. cosmos1.. "AQ=="
+```
+
+#### update-group-policy-decision-policy
+
+The `update-group-policy-decision-policy` command allows users to update a group policy's decision policy.
+
+```bash
+simd tx group update-group-policy-decision-policy [admin] [group-policy-account] [decision-policy] [flags]
+```
+
+Example:
+
+```bash
+simd tx group update-group-policy-decision-policy cosmos1.. cosmos1.. '{"@type":"/cosmos.group.v1.ThresholdDecisionPolicy", "threshold":"2", "windows": {"voting_period": "120h", "min_execution_period": "0s"}}'
+```
+
+#### submit-proposal
+
+The `submit-proposal` command allows users to submit a new proposal.
+
+```bash
+simd tx group submit-proposal [group-policy-account] [proposer[,proposer]*] [msg_tx_json_file] [metadata] [flags]
+```
+
+Example:
+
+```bash
+simd tx group submit-proposal cosmos1.. cosmos1.. msg_tx.json "AQ=="
+```
+
+#### withdraw-proposal
+
+The `withdraw-proposal` command allows users to withdraw a proposal.
+
+```bash
+simd tx group withdraw-proposal [proposal-id] [group-policy-admin-or-proposer]
+```
+
+Example:
+
+```bash
+simd tx group withdraw-proposal 1 cosmos1..
+```
+
+#### vote
+
+The `vote` command allows users to vote on a proposal.
+
+```bash
+simd tx group vote proposal-id] [voter] [choice] [metadata] [flags]
+```
+
+Example:
+
+```bash
+simd tx group vote 1 cosmos1.. CHOICE_YES "AQ=="
+```
+
+#### exec
+
+The `exec` command allows users to execute a proposal.
+
+```bash
+simd tx group exec [proposal-id] [flags]
+```
+
+Example:
+
+```bash
+simd tx group exec 1
+```
+
+#### leave-group
+
+The `leave-group` command allows group member to leave the group.
+
+```bash
+simd tx group leave-group [member-address] [group-id]
+```
+
+Example:
+
+```bash
+simd tx group leave-group cosmos1... 1
+```
+
+### gRPC
+
+A user can query the `group` module using gRPC endpoints.
+
+#### GroupInfo
+
+The `GroupInfo` endpoint allows users to query for group info by given group id.
+
+```bash
+cosmos.group.v1.Query/GroupInfo
+```
+
+Example:
+
+```bash
+grpcurl -plaintext \
+ -d '{"group_id":1}' localhost:9090 cosmos.group.v1.Query/GroupInfo
+```
+
+Example Output:
+
+```bash
+{
+ "info": {
+ "groupId": "1",
+ "admin": "cosmos1..",
+ "metadata": "AQ==",
+ "version": "1",
+ "totalWeight": "3"
+ }
+}
+```
+
+#### GroupPolicyInfo
+
+The `GroupPolicyInfo` endpoint allows users to query for group policy info by account address of group policy.
+
+```bash
+cosmos.group.v1.Query/GroupPolicyInfo
+```
+
+Example:
+
+```bash
+grpcurl -plaintext \
+ -d '{"address":"cosmos1.."}' localhost:9090 cosmos.group.v1.Query/GroupPolicyInfo
+```
+
+Example Output:
+
+```bash
+{
+ "info": {
+ "address": "cosmos1..",
+ "groupId": "1",
+ "admin": "cosmos1..",
+ "version": "1",
+ "decisionPolicy": {"@type":"/cosmos.group.v1.ThresholdDecisionPolicy","threshold":"1","windows": {"voting_period": "120h", "min_execution_period": "0s"}},
+ }
+}
+```
+
+#### GroupMembers
+
+The `GroupMembers` endpoint allows users to query for group members by group id with pagination flags.
+
+```bash
+cosmos.group.v1.Query/GroupMembers
+```
+
+Example:
+
+```bash
+grpcurl -plaintext \
+ -d '{"group_id":"1"}' localhost:9090 cosmos.group.v1.Query/GroupMembers
+```
+
+Example Output:
+
+```bash expandable
+{
+ "members": [
+ {
+ "groupId": "1",
+ "member": {
+ "address": "cosmos1..",
+ "weight": "1"
+ }
+ },
+ {
+ "groupId": "1",
+ "member": {
+ "address": "cosmos1..",
+ "weight": "2"
+ }
+ }
+ ],
+ "pagination": {
+ "total": "2"
+ }
+}
+```
+
+#### GroupsByAdmin
+
+The `GroupsByAdmin` endpoint allows users to query for groups by admin account address with pagination flags.
+
+```bash
+cosmos.group.v1.Query/GroupsByAdmin
+```
+
+Example:
+
+```bash
+grpcurl -plaintext \
+ -d '{"admin":"cosmos1.."}' localhost:9090 cosmos.group.v1.Query/GroupsByAdmin
+```
+
+Example Output:
+
+```bash expandable
+{
+ "groups": [
+ {
+ "groupId": "1",
+ "admin": "cosmos1..",
+ "metadata": "AQ==",
+ "version": "1",
+ "totalWeight": "3"
+ },
+ {
+ "groupId": "2",
+ "admin": "cosmos1..",
+ "metadata": "AQ==",
+ "version": "1",
+ "totalWeight": "3"
+ }
+ ],
+ "pagination": {
+ "total": "2"
+ }
+}
+```
+
+#### GroupPoliciesByGroup
+
+The `GroupPoliciesByGroup` endpoint allows users to query for group policies by group id with pagination flags.
+
+```bash
+cosmos.group.v1.Query/GroupPoliciesByGroup
+```
+
+Example:
+
+```bash
+grpcurl -plaintext \
+ -d '{"group_id":"1"}' localhost:9090 cosmos.group.v1.Query/GroupPoliciesByGroup
+```
+
+Example Output:
+
+```bash expandable
+{
+ "GroupPolicies": [
+ {
+ "address": "cosmos1..",
+ "groupId": "1",
+ "admin": "cosmos1..",
+ "version": "1",
+ "decisionPolicy": {"@type":"/cosmos.group.v1.ThresholdDecisionPolicy","threshold":"1","windows":{"voting_period": "120h", "min_execution_period": "0s"}},
+ },
+ {
+ "address": "cosmos1..",
+ "groupId": "1",
+ "admin": "cosmos1..",
+ "version": "1",
+ "decisionPolicy": {"@type":"/cosmos.group.v1.ThresholdDecisionPolicy","threshold":"1","windows":{"voting_period": "120h", "min_execution_period": "0s"}},
+ }
+ ],
+ "pagination": {
+ "total": "2"
+ }
+}
+```
+
+#### GroupPoliciesByAdmin
+
+The `GroupPoliciesByAdmin` endpoint allows users to query for group policies by admin account address with pagination flags.
+
+```bash
+cosmos.group.v1.Query/GroupPoliciesByAdmin
+```
+
+Example:
+
+```bash
+grpcurl -plaintext \
+ -d '{"admin":"cosmos1.."}' localhost:9090 cosmos.group.v1.Query/GroupPoliciesByAdmin
+```
+
+Example Output:
+
+```bash expandable
+{
+ "GroupPolicies": [
+ {
+ "address": "cosmos1..",
+ "groupId": "1",
+ "admin": "cosmos1..",
+ "version": "1",
+ "decisionPolicy": {"@type":"/cosmos.group.v1.ThresholdDecisionPolicy","threshold":"1","windows":{"voting_period": "120h", "min_execution_period": "0s"}},
+ },
+ {
+ "address": "cosmos1..",
+ "groupId": "1",
+ "admin": "cosmos1..",
+ "version": "1",
+ "decisionPolicy": {"@type":"/cosmos.group.v1.ThresholdDecisionPolicy","threshold":"1","windows":{"voting_period": "120h", "min_execution_period": "0s"}},
+ }
+ ],
+ "pagination": {
+ "total": "2"
+ }
+}
+```
+
+#### Proposal
+
+The `Proposal` endpoint allows users to query for proposal by id.
+
+```bash
+cosmos.group.v1.Query/Proposal
+```
+
+Example:
+
+```bash
+grpcurl -plaintext \
+ -d '{"proposal_id":"1"}' localhost:9090 cosmos.group.v1.Query/Proposal
+```
+
+Example Output:
+
+```bash expandable
+{
+ "proposal": {
+ "proposalId": "1",
+ "address": "cosmos1..",
+ "proposers": [
+ "cosmos1.."
+ ],
+ "submittedAt": "2021-12-17T07:06:26.310638964Z",
+ "groupVersion": "1",
+ "GroupPolicyVersion": "1",
+ "status": "STATUS_SUBMITTED",
+ "result": "RESULT_UNFINALIZED",
+ "voteState": {
+ "yesCount": "0",
+ "noCount": "0",
+ "abstainCount": "0",
+ "vetoCount": "0"
+ },
+ "windows": {
+ "min_execution_period": "0s",
+ "voting_period": "432000s"
+ },
+ "executorResult": "EXECUTOR_RESULT_NOT_RUN",
+ "messages": [
+ {"@type":"/cosmos.bank.v1beta1.MsgSend","amount":[{"denom":"stake","amount":"100000000"}],"fromAddress":"cosmos1..","toAddress":"cosmos1.."}
+ ],
+ "title": "Title",
+ "summary": "Summary",
+ }
+}
+```
+
+#### ProposalsByGroupPolicy
+
+The `ProposalsByGroupPolicy` endpoint allows users to query for proposals by account address of group policy with pagination flags.
+
+```bash
+cosmos.group.v1.Query/ProposalsByGroupPolicy
+```
+
+Example:
+
+```bash
+grpcurl -plaintext \
+ -d '{"address":"cosmos1.."}' localhost:9090 cosmos.group.v1.Query/ProposalsByGroupPolicy
+```
+
+Example Output:
+
+```bash expandable
+{
+ "proposals": [
+ {
+ "proposalId": "1",
+ "address": "cosmos1..",
+ "proposers": [
+ "cosmos1.."
+ ],
+ "submittedAt": "2021-12-17T08:03:27.099649352Z",
+ "groupVersion": "1",
+ "GroupPolicyVersion": "1",
+ "status": "STATUS_CLOSED",
+ "result": "RESULT_ACCEPTED",
+ "voteState": {
+ "yesCount": "1",
+ "noCount": "0",
+ "abstainCount": "0",
+ "vetoCount": "0"
+ },
+ "windows": {
+ "min_execution_period": "0s",
+ "voting_period": "432000s"
+ },
+ "executorResult": "EXECUTOR_RESULT_NOT_RUN",
+ "messages": [
+ {"@type":"/cosmos.bank.v1beta1.MsgSend","amount":[{"denom":"stake","amount":"100000000"}],"fromAddress":"cosmos1..","toAddress":"cosmos1.."}
+ ],
+ "title": "Title",
+ "summary": "Summary",
+ }
+ ],
+ "pagination": {
+ "total": "1"
+ }
+}
+```
+
+#### VoteByProposalVoter
+
+The `VoteByProposalVoter` endpoint allows users to query for vote by proposal id and voter account address.
+
+```bash
+cosmos.group.v1.Query/VoteByProposalVoter
+```
+
+Example:
+
+```bash
+grpcurl -plaintext \
+ -d '{"proposal_id":"1","voter":"cosmos1.."}' localhost:9090 cosmos.group.v1.Query/VoteByProposalVoter
+```
+
+Example Output:
+
+```bash
+{
+ "vote": {
+ "proposalId": "1",
+ "voter": "cosmos1..",
+ "choice": "CHOICE_YES",
+ "submittedAt": "2021-12-17T08:05:02.490164009Z"
+ }
+}
+```
+
+#### VotesByProposal
+
+The `VotesByProposal` endpoint allows users to query for votes by proposal id with pagination flags.
+
+```bash
+cosmos.group.v1.Query/VotesByProposal
+```
+
+Example:
+
+```bash
+grpcurl -plaintext \
+ -d '{"proposal_id":"1"}' localhost:9090 cosmos.group.v1.Query/VotesByProposal
+```
+
+Example Output:
+
+```bash expandable
+{
+ "votes": [
+ {
+ "proposalId": "1",
+ "voter": "cosmos1..",
+ "choice": "CHOICE_YES",
+ "submittedAt": "2021-12-17T08:05:02.490164009Z"
+ }
+ ],
+ "pagination": {
+ "total": "1"
+ }
+}
+```
+
+#### VotesByVoter
+
+The `VotesByVoter` endpoint allows users to query for votes by voter account address with pagination flags.
+
+```bash
+cosmos.group.v1.Query/VotesByVoter
+```
+
+Example:
+
+```bash
+grpcurl -plaintext \
+ -d '{"voter":"cosmos1.."}' localhost:9090 cosmos.group.v1.Query/VotesByVoter
+```
+
+Example Output:
+
+```bash expandable
+{
+ "votes": [
+ {
+ "proposalId": "1",
+ "voter": "cosmos1..",
+ "choice": "CHOICE_YES",
+ "submittedAt": "2021-12-17T08:05:02.490164009Z"
+ }
+ ],
+ "pagination": {
+ "total": "1"
+ }
+}
+```
+
+### REST
+
+A user can query the `group` module using REST endpoints.
+
+#### GroupInfo
+
+The `GroupInfo` endpoint allows users to query for group info by given group id.
+
+```bash
+/cosmos/group/v1/group_info/{group_id}
+```
+
+Example:
+
+```bash
+curl localhost:1317/cosmos/group/v1/group_info/1
+```
+
+Example Output:
+
+```bash
+{
+ "info": {
+ "id": "1",
+ "admin": "cosmos1..",
+ "metadata": "AQ==",
+ "version": "1",
+ "total_weight": "3"
+ }
+}
+```
+
+#### GroupPolicyInfo
+
+The `GroupPolicyInfo` endpoint allows users to query for group policy info by account address of group policy.
+
+```bash
+/cosmos/group/v1/group_policy_info/{address}
+```
+
+Example:
+
+```bash
+curl localhost:1317/cosmos/group/v1/group_policy_info/cosmos1..
+```
+
+Example Output:
+
+```bash expandable
+{
+ "info": {
+ "address": "cosmos1..",
+ "group_id": "1",
+ "admin": "cosmos1..",
+ "metadata": "AQ==",
+ "version": "1",
+ "decision_policy": {
+ "@type": "/cosmos.group.v1.ThresholdDecisionPolicy",
+ "threshold": "1",
+ "windows": {
+ "voting_period": "120h",
+ "min_execution_period": "0s"
+ }
+ },
+ }
+}
+```
+
+#### GroupMembers
+
+The `GroupMembers` endpoint allows users to query for group members by group id with pagination flags.
+
+```bash
+/cosmos/group/v1/group_members/{group_id}
+```
+
+Example:
+
+```bash
+curl localhost:1317/cosmos/group/v1/group_members/1
+```
+
+Example Output:
+
+```bash expandable
+{
+ "members": [
+ {
+ "group_id": "1",
+ "member": {
+ "address": "cosmos1..",
+ "weight": "1",
+ "metadata": "AQ=="
+ }
+ },
+ {
+ "group_id": "1",
+ "member": {
+ "address": "cosmos1..",
+ "weight": "2",
+ "metadata": "AQ=="
+ }
+ ],
+ "pagination": {
+ "next_key": null,
+ "total": "2"
+ }
+}
+```
+
+#### GroupsByAdmin
+
+The `GroupsByAdmin` endpoint allows users to query for groups by admin account address with pagination flags.
+
+```bash
+/cosmos/group/v1/groups_by_admin/{admin}
+```
+
+Example:
+
+```bash
+curl localhost:1317/cosmos/group/v1/groups_by_admin/cosmos1..
+```
+
+Example Output:
+
+```bash expandable
+{
+ "groups": [
+ {
+ "id": "1",
+ "admin": "cosmos1..",
+ "metadata": "AQ==",
+ "version": "1",
+ "total_weight": "3"
+ },
+ {
+ "id": "2",
+ "admin": "cosmos1..",
+ "metadata": "AQ==",
+ "version": "1",
+ "total_weight": "3"
+ }
+ ],
+ "pagination": {
+ "next_key": null,
+ "total": "2"
+ }
+}
+```
+
+#### GroupPoliciesByGroup
+
+The `GroupPoliciesByGroup` endpoint allows users to query for group policies by group id with pagination flags.
+
+```bash
+/cosmos/group/v1/group_policies_by_group/{group_id}
+```
+
+Example:
+
+```bash
+curl localhost:1317/cosmos/group/v1/group_policies_by_group/1
+```
+
+Example Output:
+
+```bash expandable
+{
+ "group_policies": [
+ {
+ "address": "cosmos1..",
+ "group_id": "1",
+ "admin": "cosmos1..",
+ "metadata": "AQ==",
+ "version": "1",
+ "decision_policy": {
+ "@type": "/cosmos.group.v1.ThresholdDecisionPolicy",
+ "threshold": "1",
+ "windows": {
+ "voting_period": "120h",
+ "min_execution_period": "0s"
+ }
+ },
+ },
+ {
+ "address": "cosmos1..",
+ "group_id": "1",
+ "admin": "cosmos1..",
+ "metadata": "AQ==",
+ "version": "1",
+ "decision_policy": {
+ "@type": "/cosmos.group.v1.ThresholdDecisionPolicy",
+ "threshold": "1",
+ "windows": {
+ "voting_period": "120h",
+ "min_execution_period": "0s"
+ }
+ },
+ }
+ ],
+ "pagination": {
+ "next_key": null,
+ "total": "2"
+ }
+}
+```
+
+#### GroupPoliciesByAdmin
+
+The `GroupPoliciesByAdmin` endpoint allows users to query for group policies by admin account address with pagination flags.
+
+```bash
+/cosmos/group/v1/group_policies_by_admin/{admin}
+```
+
+Example:
+
+```bash
+curl localhost:1317/cosmos/group/v1/group_policies_by_admin/cosmos1..
+```
+
+Example Output:
+
+```bash expandable
+{
+ "group_policies": [
+ {
+ "address": "cosmos1..",
+ "group_id": "1",
+ "admin": "cosmos1..",
+ "metadata": "AQ==",
+ "version": "1",
+ "decision_policy": {
+ "@type": "/cosmos.group.v1.ThresholdDecisionPolicy",
+ "threshold": "1",
+ "windows": {
+ "voting_period": "120h",
+ "min_execution_period": "0s"
+ }
+ },
+ },
+ {
+ "address": "cosmos1..",
+ "group_id": "1",
+ "admin": "cosmos1..",
+ "metadata": "AQ==",
+ "version": "1",
+ "decision_policy": {
+ "@type": "/cosmos.group.v1.ThresholdDecisionPolicy",
+ "threshold": "1",
+ "windows": {
+ "voting_period": "120h",
+ "min_execution_period": "0s"
+ }
+ },
+ }
+ ],
+ "pagination": {
+ "next_key": null,
+ "total": "2"
+ }
+```
+
+#### Proposal
+
+The `Proposal` endpoint allows users to query for proposal by id.
+
+```bash
+/cosmos/group/v1/proposal/{proposal_id}
+```
+
+Example:
+
+```bash
+curl localhost:1317/cosmos/group/v1/proposal/1
+```
+
+Example Output:
+
+```bash expandable
+{
+ "proposal": {
+ "proposal_id": "1",
+ "address": "cosmos1..",
+ "metadata": "AQ==",
+ "proposers": [
+ "cosmos1.."
+ ],
+ "submitted_at": "2021-12-17T07:06:26.310638964Z",
+ "group_version": "1",
+ "group_policy_version": "1",
+ "status": "STATUS_SUBMITTED",
+ "result": "RESULT_UNFINALIZED",
+ "vote_state": {
+ "yes_count": "0",
+ "no_count": "0",
+ "abstain_count": "0",
+ "veto_count": "0"
+ },
+ "windows": {
+ "min_execution_period": "0s",
+ "voting_period": "432000s"
+ },
+ "executor_result": "EXECUTOR_RESULT_NOT_RUN",
+ "messages": [
+ {
+ "@type": "/cosmos.bank.v1beta1.MsgSend",
+ "from_address": "cosmos1..",
+ "to_address": "cosmos1..",
+ "amount": [
+ {
+ "denom": "stake",
+ "amount": "100000000"
+ }
+ ]
+ }
+ ],
+ "title": "Title",
+ "summary": "Summary",
+ }
+}
+```
+
+#### ProposalsByGroupPolicy
+
+The `ProposalsByGroupPolicy` endpoint allows users to query for proposals by account address of group policy with pagination flags.
+
+```bash
+/cosmos/group/v1/proposals_by_group_policy/{address}
+```
+
+Example:
+
+```bash
+curl localhost:1317/cosmos/group/v1/proposals_by_group_policy/cosmos1..
+```
+
+Example Output:
+
+```bash expandable
+{
+ "proposals": [
+ {
+ "id": "1",
+ "group_policy_address": "cosmos1..",
+ "metadata": "AQ==",
+ "proposers": [
+ "cosmos1.."
+ ],
+ "submit_time": "2021-12-17T08:03:27.099649352Z",
+ "group_version": "1",
+ "group_policy_version": "1",
+ "status": "STATUS_CLOSED",
+ "result": "RESULT_ACCEPTED",
+ "vote_state": {
+ "yes_count": "1",
+ "no_count": "0",
+ "abstain_count": "0",
+ "veto_count": "0"
+ },
+ "windows": {
+ "min_execution_period": "0s",
+ "voting_period": "432000s"
+ },
+ "executor_result": "EXECUTOR_RESULT_NOT_RUN",
+ "messages": [
+ {
+ "@type": "/cosmos.bank.v1beta1.MsgSend",
+ "from_address": "cosmos1..",
+ "to_address": "cosmos1..",
+ "amount": [
+ {
+ "denom": "stake",
+ "amount": "100000000"
+ }
+ ]
+ }
+ ]
+ }
+ ],
+ "pagination": {
+ "next_key": null,
+ "total": "1"
+ }
+}
+```
+
+#### VoteByProposalVoter
+
+The `VoteByProposalVoter` endpoint allows users to query for vote by proposal id and voter account address.
+
+```bash
+/cosmos/group/v1/vote_by_proposal_voter/{proposal_id}/{voter}
+```
+
+Example:
+
+```bash
+curl localhost:1317/cosmos/group/v1beta1/vote_by_proposal_voter/1/cosmos1..
+```
+
+Example Output:
+
+```bash
+{
+ "vote": {
+ "proposal_id": "1",
+ "voter": "cosmos1..",
+ "choice": "CHOICE_YES",
+ "metadata": "AQ==",
+ "submitted_at": "2021-12-17T08:05:02.490164009Z"
+ }
+}
+```
+
+#### VotesByProposal
+
+The `VotesByProposal` endpoint allows users to query for votes by proposal id with pagination flags.
+
+```bash
+/cosmos/group/v1/votes_by_proposal/{proposal_id}
+```
+
+Example:
+
+```bash
+curl localhost:1317/cosmos/group/v1/votes_by_proposal/1
+```
+
+Example Output:
+
+```bash expandable
+{
+ "votes": [
+ {
+ "proposal_id": "1",
+ "voter": "cosmos1..",
+ "option": "CHOICE_YES",
+ "metadata": "AQ==",
+ "submit_time": "2021-12-17T08:05:02.490164009Z"
+ }
+ ],
+ "pagination": {
+ "next_key": null,
+ "total": "1"
+ }
+}
+```
+
+#### VotesByVoter
+
+The `VotesByVoter` endpoint allows users to query for votes by voter account address with pagination flags.
+
+```bash
+/cosmos/group/v1/votes_by_voter/{voter}
+```
+
+Example:
+
+```bash
+curl localhost:1317/cosmos/group/v1/votes_by_voter/cosmos1..
+```
+
+Example Output:
+
+```bash expandable
+{
+ "votes": [
+ {
+ "proposal_id": "1",
+ "voter": "cosmos1..",
+ "choice": "CHOICE_YES",
+ "metadata": "AQ==",
+ "submitted_at": "2021-12-17T08:05:02.490164009Z"
+ }
+ ],
+ "pagination": {
+ "next_key": null,
+ "total": "1"
+ }
+}
+```
+
+## Metadata
+
+The group module has four locations for metadata where users can provide further context about the on-chain actions they are taking. By default all metadata fields have a 255 character length field where metadata can be stored in json format, either on-chain or off-chain depending on the amount of data required. Here we provide a recommendation for the json structure and where the data should be stored. There are two important factors in making these recommendations. First, that the group and gov modules are consistent with one another, note the number of proposals made by all groups may be quite large. Second, that client applications such as block explorers and governance interfaces have confidence in the consistency of metadata structure across chains.
+
+### Proposal
+
+Location: off-chain as json object stored on IPFS (mirrors [gov proposal](/sdk/v0.53/build/modules/gov/README#metadata))
+
+```json
+{
+ "title": "",
+ "authors": [""],
+ "summary": "",
+ "details": "",
+ "proposal_forum_url": "",
+ "vote_option_context": "",
+}
+```
+
+
+The `authors` field is an array of strings, this is to allow for multiple authors to be listed in the metadata.
+In v0.46, the `authors` field is a comma-separated string. Frontends are encouraged to support both formats for backwards compatibility.
+
+
+### Vote
+
+Location: on-chain as json within 255 character limit (mirrors [gov vote](/sdk/v0.53/build/modules/gov/README#metadata))
+
+```json
+{
+ "justification": "",
+}
+```
+
+### Group
+
+Location: off-chain as json object stored on IPFS
+
+```json
+{
+ "name": "",
+ "description": "",
+ "group_website_url": "",
+ "group_forum_url": "",
+}
+```
+
+### Decision policy
+
+Location: on-chain as json within 255 character limit
+
+```json
+{
+ "name": "",
+ "description": "",
+}
+```
diff --git a/sdk/next/build/modules/mint/README.mdx b/sdk/next/build/modules/mint/README.mdx
new file mode 100644
index 000000000..fb15a7cd3
--- /dev/null
+++ b/sdk/next/build/modules/mint/README.mdx
@@ -0,0 +1,471 @@
+---
+title: 'x/mint'
+description: >-
+ The x/mint module handles the regular minting of new tokens in a configurable
+ manner.
+---
+
+The `x/mint` module handles the regular minting of new tokens in a configurable manner.
+
+## Contents
+
+* [State](#state)
+ * [Minter](#minter)
+ * [Params](#params)
+* [Begin-Block](#begin-block)
+ * [NextInflationRate](#nextinflationrate)
+ * [NextAnnualProvisions](#nextannualprovisions)
+ * [BlockProvision](#blockprovision)
+* [Parameters](#parameters)
+* [Events](#events)
+ * [BeginBlocker](#beginblocker)
+* [Client](#client)
+ * [CLI](#cli)
+ * [gRPC](#grpc)
+ * [REST](#rest)
+
+## Concepts
+
+### The Minting Mechanism
+
+The default minting mechanism was designed to:
+
+* allow for a flexible inflation rate determined by market demand targeting a particular bonded-stake ratio
+* effect a balance between market liquidity and staked supply
+
+In order to best determine the appropriate market rate for inflation rewards, a
+moving change rate is used. The moving change rate mechanism ensures that if
+the % bonded is either over or under the goal %-bonded, the inflation rate will
+adjust to further incentivize or disincentivize being bonded, respectively. Setting the goal
+%-bonded at less than 100% encourages the network to maintain some non-staked tokens
+which should help provide some liquidity.
+
+It can be broken down in the following way:
+
+* If the actual percentage of bonded tokens is below the goal %-bonded the inflation rate will
+ increase until a maximum value is reached
+* If the goal % bonded (67% in Cosmos-Hub) is maintained, then the inflation
+ rate will stay constant
+* If the actual percentage of bonded tokens is above the goal %-bonded the inflation rate will
+ decrease until a minimum value is reached
+
+### Custom Minters
+
+As of Cosmos SDK v0.53.0, developers can set a custom `MintFn` for the module for specialized token minting logic.
+
+The function signature that a `MintFn` must implement is as follows:
+
+```go
+// MintFn defines the function that needs to be implemented in order to customize the minting process.
+type MintFn func(ctx sdk.Context, k *Keeper)
+
+error
+```
+
+This can be passed to the `Keeper` upon creation with an additional `Option`:
+
+```go
+app.MintKeeper = mintkeeper.NewKeeper(
+ appCodec,
+ runtime.NewKVStoreService(keys[minttypes.StoreKey]),
+ app.StakingKeeper,
+ app.AccountKeeper,
+ app.BankKeeper,
+ authtypes.FeeCollectorName,
+ authtypes.NewModuleAddress(govtypes.ModuleName).String(),
+ // mintkeeper.WithMintFn(CUSTOM_MINT_FN), // custom mintFn can be added here
+ )
+```
+
+#### Custom Minter DI Example
+
+Below is a simple approach to creating a custom mint function with extra dependencies in DI configurations.
+For this basic example, we will make the minter simply double the supply of `foo` coin.
+
+First, we will define a function that takes our required dependencies, and returns a `MintFn`.
+
+```go expandable
+// MyCustomMintFunction is a custom mint function that doubles the supply of `foo` coin.
+func MyCustomMintFunction(bank bankkeeper.BaseKeeper)
+
+mintkeeper.MintFn {
+ return func(ctx sdk.Context, k *mintkeeper.Keeper)
+
+error {
+ supply := bank.GetSupply(ctx, "foo")
+ err := k.MintCoins(ctx, sdk.NewCoins(supply.Add(supply)))
+ if err != nil {
+ return err
+}
+
+return nil
+}
+}
+```
+
+Then, pass the function defined above into the `depinject.Supply` function with the required dependencies.
+
+```go expandable
+// NewSimApp returns a reference to an initialized SimApp.
+func NewSimApp(
+ logger log.Logger,
+ db dbm.DB,
+ traceStore io.Writer,
+ loadLatest bool,
+ appOpts servertypes.AppOptions,
+ baseAppOptions ...func(*baseapp.BaseApp),
+) *SimApp {
+ var (
+ app = &SimApp{
+}
+
+appBuilder *runtime.AppBuilder
+ appConfig = depinject.Configs(
+ AppConfig,
+ depinject.Supply(
+ appOpts,
+ logger,
+ // our custom mint function with the necessary dependency passed in.
+ MyCustomMintFunction(app.BankKeeper),
+ ),
+ )
+ )
+ // ...
+}
+```
+
+## State
+
+### Minter
+
+The minter is a space for holding current inflation information.
+
+* Minter: `0x00 -> ProtocolBuffer(minter)`
+
+```protobuf
+// Reference: https://github.com/cosmos/cosmos-sdk/blob/v0.47.0-rc1/proto/cosmos/mint/v1beta1/mint.proto#L10-L24
+```
+
+### Params
+
+The mint module stores its params in state with the prefix of `0x01`,
+it can be updated with governance or the address with authority.
+
+* Params: `mint/params -> legacy_amino(params)`
+
+```protobuf
+// Reference: https://github.com/cosmos/cosmos-sdk/blob/v0.47.0-rc1/proto/cosmos/mint/v1beta1/mint.proto#L26-L59
+```
+
+## Begin-Block
+
+Minting parameters are recalculated and inflation paid at the beginning of each block.
+
+### Inflation rate calculation
+
+Inflation rate is calculated using an "inflation calculation function" that's
+passed to the `NewAppModule` function. If no function is passed, then the SDK's
+default inflation function will be used (`NextInflationRate`). In case a custom
+inflation calculation logic is needed, this can be achieved by defining and
+passing a function that matches `InflationCalculationFn`'s signature.
+
+```go
+type InflationCalculationFn func(ctx sdk.Context, minter Minter, params Params, bondedRatio math.LegacyDec)
+
+math.LegacyDec
+```
+
+#### NextInflationRate
+
+The target annual inflation rate is recalculated each block.
+The inflation is also subject to a rate change (positive or negative)
+depending on the distance from the desired ratio (67%). The maximum rate change
+possible is defined to be 13% per year, however, the annual inflation is capped
+as between 7% and 20%.
+
+```go expandable
+NextInflationRate(params Params, bondedRatio math.LegacyDec) (inflation math.LegacyDec) {
+ inflationRateChangePerYear = (1 - bondedRatio/params.GoalBonded) * params.InflationRateChange
+ inflationRateChange = inflationRateChangePerYear/blocksPerYr
+
+ // increase the new annual inflation for this next block
+ inflation += inflationRateChange
+ if inflation > params.InflationMax {
+ inflation = params.InflationMax
+}
+ if inflation < params.InflationMin {
+ inflation = params.InflationMin
+}
+
+return inflation
+}
+```
+
+### NextAnnualProvisions
+
+Calculate the annual provisions based on current total supply and inflation
+rate. This parameter is calculated once per block.
+
+```go
+NextAnnualProvisions(params Params, totalSupply math.LegacyDec) (provisions math.LegacyDec) {
+ return Inflation * totalSupply
+```
+
+### BlockProvision
+
+Calculate the provisions generated for each block based on current annual provisions. The provisions are then minted by the `mint` module's `ModuleMinterAccount` and then transferred to the `auth`'s `FeeCollector` `ModuleAccount`.
+
+```go
+BlockProvision(params Params)
+
+sdk.Coin {
+ provisionAmt = AnnualProvisions/ params.BlocksPerYear
+ return sdk.NewCoin(params.MintDenom, provisionAmt.Truncate())
+```
+
+## Parameters
+
+The minting module contains the following parameters:
+
+| Key | Type | Example |
+| ------------------- | --------------- | ---------------------- |
+| MintDenom | string | "uatom" |
+| InflationRateChange | string (dec) | "0.130000000000000000" |
+| InflationMax | string (dec) | "0.200000000000000000" |
+| InflationMin | string (dec) | "0.070000000000000000" |
+| GoalBonded | string (dec) | "0.670000000000000000" |
+| BlocksPerYear | string (uint64) | "6311520" |
+
+## Events
+
+The minting module emits the following events:
+
+### BeginBlocker
+
+| Type | Attribute Key | Attribute Value |
+| ---- | ------------------ | ------------------ |
+| mint | bonded\_ratio | `{bondedRatio}` |
+| mint | inflation | `{inflation}` |
+| mint | annual\_provisions | `{annualProvisions}` |
+| mint | amount | `{amount}` |
+
+## Client
+
+### CLI
+
+A user can query and interact with the `mint` module using the CLI.
+
+#### Query
+
+The `query` commands allows users to query `mint` state.
+
+```shell
+simd query mint --help
+```
+
+##### annual-provisions
+
+The `annual-provisions` command allows users to query the current minting annual provisions value
+
+```shell
+simd query mint annual-provisions [flags]
+```
+
+Example:
+
+```shell
+simd query mint annual-provisions
+```
+
+Example Output:
+
+```shell
+22268504368893.612100895088410693
+```
+
+##### inflation
+
+The `inflation` command allows users to query the current minting inflation value
+
+```shell
+simd query mint inflation [flags]
+```
+
+Example:
+
+```shell
+simd query mint inflation
+```
+
+Example Output:
+
+```shell
+0.199200302563256955
+```
+
+##### params
+
+The `params` command allows users to query the current minting parameters
+
+```shell
+simd query mint params [flags]
+```
+
+Example:
+
+```yml
+blocks_per_year: "4360000"
+goal_bonded: "0.670000000000000000"
+inflation_max: "0.200000000000000000"
+inflation_min: "0.070000000000000000"
+inflation_rate_change: "0.130000000000000000"
+mint_denom: stake
+```
+
+### gRPC
+
+A user can query the `mint` module using gRPC endpoints.
+
+#### AnnualProvisions
+
+The `AnnualProvisions` endpoint allows users to query the current minting annual provisions value
+
+```shell
+/cosmos.mint.v1beta1.Query/AnnualProvisions
+```
+
+Example:
+
+```shell
+grpcurl -plaintext localhost:9090 cosmos.mint.v1beta1.Query/AnnualProvisions
+```
+
+Example Output:
+
+```json
+{
+ "annualProvisions": "1432452520532626265712995618"
+}
+```
+
+#### Inflation
+
+The `Inflation` endpoint allows users to query the current minting inflation value
+
+```shell
+/cosmos.mint.v1beta1.Query/Inflation
+```
+
+Example:
+
+```shell
+grpcurl -plaintext localhost:9090 cosmos.mint.v1beta1.Query/Inflation
+```
+
+Example Output:
+
+```json
+{
+ "inflation": "130197115720711261"
+}
+```
+
+#### Params
+
+The `Params` endpoint allows users to query the current minting parameters
+
+```shell
+/cosmos.mint.v1beta1.Query/Params
+```
+
+Example:
+
+```shell
+grpcurl -plaintext localhost:9090 cosmos.mint.v1beta1.Query/Params
+```
+
+Example Output:
+
+```json
+{
+ "params": {
+ "mintDenom": "stake",
+ "inflationRateChange": "130000000000000000",
+ "inflationMax": "200000000000000000",
+ "inflationMin": "70000000000000000",
+ "goalBonded": "670000000000000000",
+ "blocksPerYear": "6311520"
+ }
+}
+```
+
+### REST
+
+A user can query the `mint` module using REST endpoints.
+
+#### annual-provisions
+
+```shell
+/cosmos/mint/v1beta1/annual_provisions
+```
+
+Example:
+
+```shell
+curl "localhost:1317/cosmos/mint/v1beta1/annual_provisions"
+```
+
+Example Output:
+
+```json
+{
+ "annualProvisions": "1432452520532626265712995618"
+}
+```
+
+#### inflation
+
+```shell
+/cosmos/mint/v1beta1/inflation
+```
+
+Example:
+
+```shell
+curl "localhost:1317/cosmos/mint/v1beta1/inflation"
+```
+
+Example Output:
+
+```json
+{
+ "inflation": "130197115720711261"
+}
+```
+
+#### params
+
+```shell
+/cosmos/mint/v1beta1/params
+```
+
+Example:
+
+```shell
+curl "localhost:1317/cosmos/mint/v1beta1/params"
+```
+
+Example Output:
+
+```json
+{
+ "params": {
+ "mintDenom": "stake",
+ "inflationRateChange": "130000000000000000",
+ "inflationMax": "200000000000000000",
+ "inflationMin": "70000000000000000",
+ "goalBonded": "670000000000000000",
+ "blocksPerYear": "6311520"
+ }
+}
+```
diff --git a/sdk/next/build/modules/modules.mdx b/sdk/next/build/modules/modules.mdx
new file mode 100644
index 000000000..4b79768b3
--- /dev/null
+++ b/sdk/next/build/modules/modules.mdx
@@ -0,0 +1,64 @@
+---
+title: List of Modules
+description: >-
+ Here are some production-grade modules that can be used in Cosmos SDK
+ applications, along with their respective documentation.
+---
+
+Here are some production-grade modules that can be used in Cosmos SDK applications, along with their respective documentation:
+
+## Essential Modules
+
+Essential modules include functionality that *must* be included in your Cosmos SDK blockchain.
+These modules provide the core behaviors that are needed for users and operators such as balance tracking,
+proof-of-stake capabilities and governance.
+
+* [Auth](/sdk/v0.53/build/modules/auth/auth) - Authentication of accounts and transactions for Cosmos SDK applications.
+* [Bank](/sdk/v0.53/build/modules/bank/README) - Token transfer functionalities.
+* [Circuit](/sdk/v0.53/build/modules/circuit/README) - Circuit breaker module for pausing messages.
+* [Consensus](/sdk/v0.53/build/modules/consensus/README) - Consensus module for modifying CometBFT's ABCI consensus params.
+* [Distribution](/sdk/v0.53/build/modules/distribution/README) - Fee distribution, and staking token provision distribution.
+* [Evidence](/sdk/v0.53/build/modules/evidence/README) - Evidence handling for double signing, misbehaviour, etc.
+* [Governance](/sdk/v0.53/build/modules/gov/README) - On-chain proposals and voting.
+* [Genutil](/sdk/v0.53/build/modules/genutil/README) - Genesis utilities for the Cosmos SDK.
+* [Mint](/sdk/v0.53/build/modules/mint/README) - Creation of new units of staking token.
+* [Slashing](/sdk/v0.53/build/modules/slashing/README) - Validator punishment mechanisms.
+* [Staking](/sdk/v0.53/build/modules/staking/README) - Proof-of-Stake layer for public blockchains.
+* [Upgrade](/sdk/v0.53/build/modules/upgrade/README) - Software upgrades handling and coordination.
+
+## Supplementary Modules
+
+Supplementary modules are modules that are maintained in the Cosmos SDK but are not necessary for
+the core functionality of your blockchain. They can be thought of as ways to extend the
+capabilities of your blockchain or further specialize it.
+
+* [Authz](/sdk/v0.53/build/modules/authz/README) - Authorization for accounts to perform actions on behalf of other accounts.
+* [Epochs](/sdk/v0.53/build/modules/epochs/README) - Registration so SDK modules can have logic to be executed at the timed tickers.
+* [Feegrant](/sdk/v0.53/build/modules/feegrant/README) - Grant fee allowances for executing transactions.
+* [Group](/sdk/v0.53/build/modules/group/README) - Allows for the creation and management of on-chain multisig accounts.
+* [NFT](/sdk/v0.53/build/modules/nft/README) - NFT module implemented based on [ADR43](/sdk/v0.53/build/architecture/adr-043-nft-module).
+* [ProtocolPool](/sdk/v0.53/build/modules/protocolpool/README) - Extended management of community pool functionality.
+
+## Deprecated Modules
+
+The following modules are deprecated. They will no longer be maintained and eventually will be removed
+in an upcoming release of the Cosmos SDK per our [release process](https://github.com/cosmos/cosmos-sdk/blob/main/RELEASE_PROCESS.md).
+
+* [Crisis](/sdk/v0.53/build/modules/crisis/README) - *Deprecated* halting the blockchain under certain circumstances (e.g. if an invariant is broken).
+* [Params](/sdk/v0.53/build/modules/params/README) - *Deprecated* Globally available parameter store.
+
+To learn more about the process of building modules, visit the [building modules reference documentation](/sdk/v0.53/build/building-modules/intro).
+
+## IBC
+
+The IBC module for the SDK is maintained by the IBC Go team in its [own repository](https://github.com/cosmos/ibc-go).
+
+Additionally, the [capability module](https://github.com/cosmos/ibc-go/tree/fdd664698d79864f1e00e147f9879e58497b5ef1/modules/capability) is from v0.50+ maintained by the IBC Go team in its [own repository](https://github.com/cosmos/ibc-go/tree/fdd664698d79864f1e00e147f9879e58497b5ef1/modules/capability).
+
+## CosmWasm
+
+The CosmWasm module enables smart contracts, learn more by going to their [documentation site](https://book.cosmwasm.com/), or visit [the repository](https://github.com/CosmWasm/cosmwasm).
+
+## EVM
+
+Read more about writing smart contracts with solidity at the official [`evm` documentation page](https://evm.cosmos.network/).
\ No newline at end of file
diff --git a/sdk/next/build/modules/nft/README.mdx b/sdk/next/build/modules/nft/README.mdx
new file mode 100644
index 000000000..a038521fe
--- /dev/null
+++ b/sdk/next/build/modules/nft/README.mdx
@@ -0,0 +1,92 @@
+---
+title: 'x/nft'
+description: '## Abstract'
+---
+
+
+This module has been moved to [contrib/x/nft](https://github.com/cosmos/cosmos-sdk/tree/main/contrib/x/nft) and is no longer actively maintained. For continued maintenance, users should fork the module or refer to the [Cosmos Legacy repository](https://github.com/cosmos/cosmos-legacy).
+
+
+## Contents
+
+## Abstract
+
+`x/nft` is an implementation of a Cosmos SDK module, per [ADR 43](https://github.com/cosmos/cosmos-sdk/blob/main/docs/architecture/adr-043-nft-module.md), that allows you to create nft classification, create nft, transfer nft, update nft, and support various queries by integrating the module. It is fully compatible with the ERC721 specification.
+
+* [Concepts](#concepts)
+ * [Class](#class)
+ * [NFT](#nft)
+* [State](#state)
+ * [Class](#class-1)
+ * [NFT](#nft-1)
+ * [NFTOfClassByOwner](#nftofclassbyowner)
+ * [Owner](#owner)
+ * [TotalSupply](#totalsupply)
+* [Messages](#messages)
+ * [MsgSend](#msgsend)
+* [Events](#events)
+
+## Concepts
+
+### Class
+
+`x/nft` module defines a struct `Class` to describe the common characteristics of a class of nft, under this class, you can create a variety of nft, which is equivalent to an erc721 contract for Ethereum. The design is defined in the [ADR 043](https://github.com/cosmos/cosmos-sdk/blob/main/docs/architecture/adr-043-nft-module.md).
+
+### NFT
+
+The full name of NFT is Non-Fungible Tokens. Because of the irreplaceable nature of NFT, it means that it can be used to represent unique things. The nft implemented by this module is fully compatible with Ethereum ERC721 standard.
+
+## State
+
+### Class
+
+Class is mainly composed of `id`, `name`, `symbol`, `description`, `uri`, `uri_hash`,`data` where `id` is the unique identifier of the class, similar to the Ethereum ERC721 contract address, the others are optional.
+
+* Class: `0x01 | classID | -> ProtocolBuffer(Class)`
+
+### NFT
+
+NFT is mainly composed of `class_id`, `id`, `uri`, `uri_hash` and `data`. Among them, `class_id` and `id` are two-tuples that identify the uniqueness of nft, `uri` and `uri_hash` is optional, which identifies the off-chain storage location of the nft, and `data` is an Any type. Use Any chain of `x/nft` modules can be customized by extending this field
+
+* NFT: `0x02 | classID | 0x00 | nftID |-> ProtocolBuffer(NFT)`
+
+### NFTOfClassByOwner
+
+NFTOfClassByOwner is mainly to realize the function of querying all nfts using classID and owner, without other redundant functions.
+
+* NFTOfClassByOwner: `0x03 | owner | 0x00 | classID | 0x00 | nftID |-> 0x01`
+
+### Owner
+
+Since there is no extra field in NFT to indicate the owner of nft, an additional key-value pair is used to save the ownership of nft. With the transfer of nft, the key-value pair is updated synchronously.
+
+* OwnerKey: `0x04 | classID | 0x00 | nftID |-> owner`
+
+### TotalSupply
+
+TotalSupply is responsible for tracking the number of all nfts under a certain class. Mint operation is performed under the changed class, supply increases by one, burn operation, and supply decreases by one.
+
+* OwnerKey: `0x05 | classID |-> totalSupply`
+
+## Messages
+
+In this section we describe the processing of messages for the NFT module.
+
+
+The validation of `ClassID` and `NftID` is left to the app developer.\
+The SDK does not provide any validation for these fields.
+
+
+### MsgSend
+
+You can use the `MsgSend` message to transfer the ownership of nft. This is a function provided by the `x/nft` module. Of course, you can use the `Transfer` method to implement your own transfer logic, but you need to pay extra attention to the transfer permissions.
+
+The message handling should fail if:
+
+* provided `ClassID` does not exist.
+* provided `Id` does not exist.
+* provided `Sender` does not the owner of nft.
+
+## Events
+
+The nft module emits proto events defined in [the Protobuf reference](https://buf.build/cosmos/cosmos-sdk/docs/main:cosmos.nft.v1beta1).
diff --git a/sdk/next/build/modules/params/README.mdx b/sdk/next/build/modules/params/README.mdx
new file mode 100644
index 000000000..83a98c999
--- /dev/null
+++ b/sdk/next/build/modules/params/README.mdx
@@ -0,0 +1,82 @@
+---
+title: 'x/params'
+description: >-
+ NOTE: x/params is deprecated as of Cosmos SDK v0.53 and will be removed in the
+ next release.
+---
+
+NOTE: `x/params` is deprecated as of Cosmos SDK v0.53 and will be removed in the next release.
+
+## Abstract
+
+Package params provides a globally available parameter store.
+
+There are two main types, Keeper and Subspace. Subspace is an isolated namespace for a
+paramstore, where keys are prefixed by preconfigured spacename. Keeper has a
+permission to access all existing spaces.
+
+Subspace can be used by the individual keepers, which need a private parameter store
+that the other keepers cannot modify. The params Keeper can be used to add a route to `x/gov` router in order to modify any parameter in case a proposal passes.
+
+The following contents explains how to use params module for master and user modules.
+
+## Contents
+
+* [Keeper](#keeper)
+* [Subspace](#subspace)
+ * [Key](#key)
+ * [KeyTable](#keytable)
+ * [ParamSet](#paramset)
+
+## Keeper
+
+In the app initialization stage, [subspaces](#subspace) can be allocated for other modules' keeper using `Keeper.Subspace` and are stored in `Keeper.spaces`. Then, those modules can have a reference to their specific parameter store through `Keeper.GetSubspace`.
+
+Example:
+
+```go
+type ExampleKeeper struct {
+ paramSpace paramtypes.Subspace
+}
+
+func (k ExampleKeeper)
+
+SetParams(ctx sdk.Context, params types.Params) {
+ k.paramSpace.SetParamSet(ctx, ¶ms)
+}
+```
+
+## Subspace
+
+`Subspace` is a prefixed subspace of the parameter store. Each module which uses the
+parameter store will take a `Subspace` to isolate permission to access.
+
+### Key
+
+Parameter keys are human readable alphanumeric strings. A parameter for the key
+`"ExampleParameter"` is stored under `[]byte("SubspaceName" + "/" + "ExampleParameter")`,
+where `"SubspaceName"` is the name of the subspace.
+
+Subkeys are secondary parameter keys those are used along with a primary parameter key.
+Subkeys can be used for grouping or dynamic parameter key generation during runtime.
+
+### KeyTable
+
+All of the parameter keys that will be used should be registered at the compile
+time. `KeyTable` is essentially a `map[string]attribute`, where the `string` is a parameter key.
+
+Currently, `attribute` consists of a `reflect.Type`, which indicates the parameter
+type to check that provided key and value are compatible and registered, as well as a function `ValueValidatorFn` to validate values.
+
+Only primary keys have to be registered on the `KeyTable`. Subkeys inherit the
+attribute of the primary key.
+
+### ParamSet
+
+Modules often define parameters as a proto message. The generated struct can implement
+`ParamSet` interface to be used with the following methods:
+
+* `KeyTable.RegisterParamSet()`: registers all parameters in the struct
+* `Subspace.{Get, Set}ParamSet()`: Get to & Set from the struct
+
+The implementor should be a pointer in order to use `GetParamSet()`.
diff --git a/sdk/next/build/modules/protocolpool/README.mdx b/sdk/next/build/modules/protocolpool/README.mdx
new file mode 100644
index 000000000..6b637dcc9
--- /dev/null
+++ b/sdk/next/build/modules/protocolpool/README.mdx
@@ -0,0 +1,657 @@
+---
+title: 'x/protocolpool'
+---
+
+## Concepts
+
+`x/protocolpool` is a supplemental Cosmos SDK module that handles functionality for community pool funds. The module provides a separate module account for the community pool making it easier to track the pool assets. Starting with v0.53 of the Cosmos SDK, community funds can be tracked using this module instead of the `x/distribution` module. Funds are migrated from the `x/distribution` module's community pool to `x/protocolpool`'s module account automatically.
+
+This module is `supplemental`; it is not required to run a Cosmos SDK chain. `x/protocolpool` enhances the community pool functionality provided by `x/distribution` and enables custom modules to further extend the community pool.
+
+Note: *as long as an external commmunity pool keeper (here, `x/protocolpool`) is wired in DI configs, `x/distribution` will automatically use it for its external pool.*
+
+## Usage Limitations
+
+The following `x/distribution` handlers will now return an error when the `protocolpool` module is used with `x/distribution`:
+
+**QueryService**
+
+* `CommunityPool`
+
+**MsgService**
+
+* `CommunityPoolSpend`
+* `FundCommunityPool`
+
+If you have services that rely on this functionality from `x/distribution`, please update them to use the `x/protocolpool` equivalents.
+
+## State Transitions
+
+### FundCommunityPool
+
+FundCommunityPool can be called by any valid account to send funds to the `x/protocolpool` module account.
+
+```protobuf
+ // FundCommunityPool defines a method to allow an account to directly
+ // fund the community pool.
+ rpc FundCommunityPool(MsgFundCommunityPool) returns (MsgFundCommunityPoolResponse);
+```
+
+### CommunityPoolSpend
+
+CommunityPoolSpend can be called by the module authority (default governance module account) or any account with authorization to spend funds from the `x/protocolpool` module account to a receiver address.
+
+```protobuf
+ // CommunityPoolSpend defines a governance operation for sending tokens from
+ // the community pool in the x/protocolpool module to another account, which
+ // could be the governance module itself. The authority is defined in the
+ // keeper.
+ rpc CommunityPoolSpend(MsgCommunityPoolSpend) returns (MsgCommunityPoolSpendResponse);
+```
+
+### CreateContinuousFund
+
+CreateContinuousFund is a message used to initiate a continuous fund for a specific recipient. The proposed percentage of funds will be distributed only on withdraw request for the recipient. The fund distribution continues until expiry time is reached or continuous fund request is canceled.
+NOTE: This feature is designed to work with the SDK's default bond denom.
+
+```protobuf
+ // CreateContinuousFund defines a method to distribute a percentage of funds to an address continuously.
+ // This ContinuousFund can be indefinite or run until a given expiry time.
+ // Funds come from validator block rewards from x/distribution, but may also come from
+ // any user who funds the ProtocolPoolEscrow module account directly through x/bank.
+ rpc CreateContinuousFund(MsgCreateContinuousFund) returns (MsgCreateContinuousFundResponse);
+```
+
+### CancelContinuousFund
+
+CancelContinuousFund is a message used to cancel an existing continuous fund proposal for a specific recipient. Cancelling a continuous fund stops further distribution of funds, and the state object is removed from storage.
+
+```protobuf
+ // CancelContinuousFund defines a method for cancelling continuous fund.
+ rpc CancelContinuousFund(MsgCancelContinuousFund) returns (MsgCancelContinuousFundResponse);
+```
+
+## Messages
+
+### MsgFundCommunityPool
+
+This message sends coins directly from the sender to the community pool.
+
+
+If you know the `x/protocolpool` module account address, you can directly use bank `send` transaction instead.
+
+
+```protobuf
+// Reference: https://github.com/cosmos/cosmos-sdk/blob/release/v0.53.x/proto/cosmos/protocolpool/v1/tx.proto#L43-L53
+```
+
+* The msg will fail if the amount cannot be transferred from the sender to the `x/protocolpool` module account.
+
+```go
+func (k Keeper)
+
+FundCommunityPool(ctx context.Context, amount sdk.Coins, sender sdk.AccAddress)
+
+error {
+ return k.bankKeeper.SendCoinsFromAccountToModule(ctx, sender, types.ModuleName, amount)
+}
+```
+
+### MsgCommunityPoolSpend
+
+This message distributes funds from the `x/protocolpool` module account to the recipient using `DistributeFromCommunityPool` keeper method.
+
+```protobuf
+// Reference: https://github.com/cosmos/cosmos-sdk/blob/release/v0.53.x/proto/cosmos/protocolpool/v1/tx.proto#L58-L69
+```
+
+The message will fail under the following conditions:
+
+* The amount cannot be transferred to the recipient from the `x/protocolpool` module account.
+* The `recipient` address is restricted
+
+```go
+func (k Keeper)
+
+DistributeFromCommunityPool(ctx context.Context, amount sdk.Coins, receiveAddr sdk.AccAddress)
+
+error {
+ return k.bankKeeper.SendCoinsFromModuleToAccount(ctx, types.ModuleName, receiveAddr, amount)
+}
+```
+
+### MsgCreateContinuousFund
+
+This message is used to create a continuous fund for a specific recipient. The proposed percentage of funds will be distributed only on withdraw request for the recipient. This fund distribution continues until expiry time is reached or continuous fund request is canceled.
+
+```protobuf
+// Reference: https://github.com/cosmos/cosmos-sdk/blob/release/v0.53.x/proto/cosmos/protocolpool/v1/tx.proto#L114-L130
+```
+
+The message will fail under the following conditions:
+
+* The recipient address is empty or restricted.
+* The percentage is zero/negative/greater than one.
+* The Expiry time is less than the current block time.
+
+
+If two continuous fund proposals to the same address are created, the previous ContinuousFund will be updated with the new ContinuousFund.
+
+
+```go expandable
+package keeper
+
+import (
+
+ "context"
+ "fmt"
+ "cosmossdk.io/math"
+
+ sdk "github.com/cosmos/cosmos-sdk/types"
+ sdkerrors "github.com/cosmos/cosmos-sdk/types/errors"
+ "github.com/cosmos/cosmos-sdk/x/protocolpool/types"
+)
+
+type MsgServer struct {
+ Keeper
+}
+
+var _ types.MsgServer = MsgServer{
+}
+
+// NewMsgServerImpl returns an implementation of the protocolpool MsgServer interface
+// for the provided Keeper.
+func NewMsgServerImpl(keeper Keeper)
+
+types.MsgServer {
+ return &MsgServer{
+ Keeper: keeper
+}
+}
+
+func (k MsgServer)
+
+FundCommunityPool(ctx context.Context, msg *types.MsgFundCommunityPool) (*types.MsgFundCommunityPoolResponse, error) {
+ sdkCtx := sdk.UnwrapSDKContext(ctx)
+
+depositor, err := k.authKeeper.AddressCodec().StringToBytes(msg.Depositor)
+ if err != nil {
+ return nil, sdkerrors.ErrInvalidAddress.Wrapf("invalid depositor address: %s", err)
+}
+ if err := validateAmount(msg.Amount); err != nil {
+ return nil, err
+}
+
+ // send funds to community pool module account
+ if err := k.Keeper.FundCommunityPool(sdkCtx, msg.Amount, depositor); err != nil {
+ return nil, err
+}
+
+return &types.MsgFundCommunityPoolResponse{
+}, nil
+}
+
+func (k MsgServer)
+
+CommunityPoolSpend(ctx context.Context, msg *types.MsgCommunityPoolSpend) (*types.MsgCommunityPoolSpendResponse, error) {
+ sdkCtx := sdk.UnwrapSDKContext(ctx)
+ if err := k.validateAuthority(msg.Authority); err != nil {
+ return nil, err
+}
+ if err := validateAmount(msg.Amount); err != nil {
+ return nil, err
+}
+
+recipient, err := k.authKeeper.AddressCodec().StringToBytes(msg.Recipient)
+ if err != nil {
+ return nil, err
+}
+
+ // distribute funds from community pool module account
+ if err := k.DistributeFromCommunityPool(sdkCtx, msg.Amount, recipient); err != nil {
+ return nil, err
+}
+
+sdkCtx.Logger().Debug("transferred from the community pool", "amount", msg.Amount.String(), "recipient", msg.Recipient)
+
+return &types.MsgCommunityPoolSpendResponse{
+}, nil
+}
+
+func (k MsgServer)
+
+CreateContinuousFund(ctx context.Context, msg *types.MsgCreateContinuousFund) (*types.MsgCreateContinuousFundResponse, error) {
+ sdkCtx := sdk.UnwrapSDKContext(ctx)
+ if err := k.validateAuthority(msg.Authority); err != nil {
+ return nil, err
+}
+
+recipient, err := k.Keeper.authKeeper.AddressCodec().StringToBytes(msg.Recipient)
+ if err != nil {
+ return nil, err
+}
+
+ // deny creation if we know this address is blocked from receiving funds
+ if k.bankKeeper.BlockedAddr(recipient) {
+ return nil, fmt.Errorf("recipient is blocked in the bank keeper: %s", msg.Recipient)
+}
+
+has, err := k.ContinuousFunds.Has(sdkCtx, recipient)
+ if err != nil {
+ return nil, err
+}
+ if has {
+ return nil, fmt.Errorf("continuous fund already exists for recipient %s", msg.Recipient)
+}
+
+ // Validate the message fields
+ err = validateContinuousFund(sdkCtx, *msg)
+ if err != nil {
+ return nil, err
+}
+
+ // Check if total funds percentage exceeds 100%
+ // If exceeds, we should not setup continuous fund proposal.
+ totalStreamFundsPercentage := math.LegacyZeroDec()
+
+err = k.ContinuousFunds.Walk(sdkCtx, nil, func(key sdk.AccAddress, value types.ContinuousFund) (stop bool, err error) {
+ totalStreamFundsPercentage = totalStreamFundsPercentage.Add(value.Percentage)
+
+return false, nil
+})
+ if err != nil {
+ return nil, err
+}
+
+totalStreamFundsPercentage = totalStreamFundsPercentage.Add(msg.Percentage)
+ if totalStreamFundsPercentage.GT(math.LegacyOneDec()) {
+ return nil, fmt.Errorf("cannot set continuous fund proposal\ntotal funds percentage exceeds 100\ncurrent total percentage: %s", totalStreamFundsPercentage.Sub(msg.Percentage).MulInt64(100).TruncateInt().String())
+}
+
+ // Create continuous fund proposal
+ cf := types.ContinuousFund{
+ Recipient: msg.Recipient,
+ Percentage: msg.Percentage,
+ Expiry: msg.Expiry,
+}
+
+ // Set continuous fund to the state
+ err = k.ContinuousFunds.Set(sdkCtx, recipient, cf)
+ if err != nil {
+ return nil, err
+}
+
+return &types.MsgCreateContinuousFundResponse{
+}, nil
+}
+
+func (k MsgServer)
+
+CancelContinuousFund(ctx context.Context, msg *types.MsgCancelContinuousFund) (*types.MsgCancelContinuousFundResponse, error) {
+ sdkCtx := sdk.UnwrapSDKContext(ctx)
+ if err := k.validateAuthority(msg.Authority); err != nil {
+ return nil, err
+}
+
+recipient, err := k.Keeper.authKeeper.AddressCodec().StringToBytes(msg.Recipient)
+ if err != nil {
+ return nil, err
+}
+ canceledHeight := sdkCtx.BlockHeight()
+ canceledTime := sdkCtx.BlockTime()
+
+has, err := k.ContinuousFunds.Has(sdkCtx, recipient)
+ if err != nil {
+ return nil, fmt.Errorf("cannot get continuous fund for recipient %w", err)
+}
+ if !has {
+ return nil, fmt.Errorf("cannot cancel continuous fund for recipient %s - does not exist", msg.Recipient)
+}
+ if err := k.ContinuousFunds.Remove(sdkCtx, recipient); err != nil {
+ return nil, fmt.Errorf("failed to remove continuous fund for recipient %s: %w", msg.Recipient, err)
+}
+
+return &types.MsgCancelContinuousFundResponse{
+ CanceledTime: canceledTime,
+ CanceledHeight: uint64(canceledHeight),
+ Recipient: msg.Recipient,
+}, nil
+}
+
+func (k MsgServer)
+
+UpdateParams(ctx context.Context, msg *types.MsgUpdateParams) (*types.MsgUpdateParamsResponse, error) {
+ sdkCtx := sdk.UnwrapSDKContext(ctx)
+ if err := k.validateAuthority(msg.GetAuthority()); err != nil {
+ return nil, err
+}
+ if err := msg.Params.Validate(); err != nil {
+ return nil, fmt.Errorf("invalid params: %w", err)
+}
+ if err := k.Params.Set(sdkCtx, msg.Params); err != nil {
+ return nil, fmt.Errorf("failed to set params: %w", err)
+}
+
+return &types.MsgUpdateParamsResponse{
+}, nil
+}
+```
+
+### MsgCancelContinuousFund
+
+This message is used to cancel an existing continuous fund proposal for a specific recipient. Once canceled, the continuous fund will no longer distribute funds at each begin block, and the state object will be removed.
+
+```protobuf
+// Reference: https://github.com/cosmos/cosmos-sdk/blob/release/v0.53.x/x/protocolpool/proto/cosmos/protocolpool/v1/tx.proto#L136-L161
+```
+
+The message will fail under the following conditions:
+
+* The recipient address is empty or restricted.
+* The ContinuousFund for the recipient does not exist.
+
+```go expandable
+package keeper
+
+import (
+
+ "context"
+ "fmt"
+ "cosmossdk.io/math"
+
+ sdk "github.com/cosmos/cosmos-sdk/types"
+ sdkerrors "github.com/cosmos/cosmos-sdk/types/errors"
+ "github.com/cosmos/cosmos-sdk/x/protocolpool/types"
+)
+
+type MsgServer struct {
+ Keeper
+}
+
+var _ types.MsgServer = MsgServer{
+}
+
+// NewMsgServerImpl returns an implementation of the protocolpool MsgServer interface
+// for the provided Keeper.
+func NewMsgServerImpl(keeper Keeper)
+
+types.MsgServer {
+ return &MsgServer{
+ Keeper: keeper
+}
+}
+
+func (k MsgServer)
+
+FundCommunityPool(ctx context.Context, msg *types.MsgFundCommunityPool) (*types.MsgFundCommunityPoolResponse, error) {
+ sdkCtx := sdk.UnwrapSDKContext(ctx)
+
+depositor, err := k.authKeeper.AddressCodec().StringToBytes(msg.Depositor)
+ if err != nil {
+ return nil, sdkerrors.ErrInvalidAddress.Wrapf("invalid depositor address: %s", err)
+}
+ if err := validateAmount(msg.Amount); err != nil {
+ return nil, err
+}
+
+ // send funds to community pool module account
+ if err := k.Keeper.FundCommunityPool(sdkCtx, msg.Amount, depositor); err != nil {
+ return nil, err
+}
+
+return &types.MsgFundCommunityPoolResponse{
+}, nil
+}
+
+func (k MsgServer)
+
+CommunityPoolSpend(ctx context.Context, msg *types.MsgCommunityPoolSpend) (*types.MsgCommunityPoolSpendResponse, error) {
+ sdkCtx := sdk.UnwrapSDKContext(ctx)
+ if err := k.validateAuthority(msg.Authority); err != nil {
+ return nil, err
+}
+ if err := validateAmount(msg.Amount); err != nil {
+ return nil, err
+}
+
+recipient, err := k.authKeeper.AddressCodec().StringToBytes(msg.Recipient)
+ if err != nil {
+ return nil, err
+}
+
+ // distribute funds from community pool module account
+ if err := k.DistributeFromCommunityPool(sdkCtx, msg.Amount, recipient); err != nil {
+ return nil, err
+}
+
+sdkCtx.Logger().Debug("transferred from the community pool", "amount", msg.Amount.String(), "recipient", msg.Recipient)
+
+return &types.MsgCommunityPoolSpendResponse{
+}, nil
+}
+
+func (k MsgServer)
+
+CreateContinuousFund(ctx context.Context, msg *types.MsgCreateContinuousFund) (*types.MsgCreateContinuousFundResponse, error) {
+ sdkCtx := sdk.UnwrapSDKContext(ctx)
+ if err := k.validateAuthority(msg.Authority); err != nil {
+ return nil, err
+}
+
+recipient, err := k.Keeper.authKeeper.AddressCodec().StringToBytes(msg.Recipient)
+ if err != nil {
+ return nil, err
+}
+
+ // deny creation if we know this address is blocked from receiving funds
+ if k.bankKeeper.BlockedAddr(recipient) {
+ return nil, fmt.Errorf("recipient is blocked in the bank keeper: %s", msg.Recipient)
+}
+
+has, err := k.ContinuousFunds.Has(sdkCtx, recipient)
+ if err != nil {
+ return nil, err
+}
+ if has {
+ return nil, fmt.Errorf("continuous fund already exists for recipient %s", msg.Recipient)
+}
+
+ // Validate the message fields
+ err = validateContinuousFund(sdkCtx, *msg)
+ if err != nil {
+ return nil, err
+}
+
+ // Check if total funds percentage exceeds 100%
+ // If exceeds, we should not setup continuous fund proposal.
+ totalStreamFundsPercentage := math.LegacyZeroDec()
+
+err = k.ContinuousFunds.Walk(sdkCtx, nil, func(key sdk.AccAddress, value types.ContinuousFund) (stop bool, err error) {
+ totalStreamFundsPercentage = totalStreamFundsPercentage.Add(value.Percentage)
+
+return false, nil
+})
+ if err != nil {
+ return nil, err
+}
+
+totalStreamFundsPercentage = totalStreamFundsPercentage.Add(msg.Percentage)
+ if totalStreamFundsPercentage.GT(math.LegacyOneDec()) {
+ return nil, fmt.Errorf("cannot set continuous fund proposal\ntotal funds percentage exceeds 100\ncurrent total percentage: %s", totalStreamFundsPercentage.Sub(msg.Percentage).MulInt64(100).TruncateInt().String())
+}
+
+ // Create continuous fund proposal
+ cf := types.ContinuousFund{
+ Recipient: msg.Recipient,
+ Percentage: msg.Percentage,
+ Expiry: msg.Expiry,
+}
+
+ // Set continuous fund to the state
+ err = k.ContinuousFunds.Set(sdkCtx, recipient, cf)
+ if err != nil {
+ return nil, err
+}
+
+return &types.MsgCreateContinuousFundResponse{
+}, nil
+}
+
+func (k MsgServer)
+
+CancelContinuousFund(ctx context.Context, msg *types.MsgCancelContinuousFund) (*types.MsgCancelContinuousFundResponse, error) {
+ sdkCtx := sdk.UnwrapSDKContext(ctx)
+ if err := k.validateAuthority(msg.Authority); err != nil {
+ return nil, err
+}
+
+recipient, err := k.Keeper.authKeeper.AddressCodec().StringToBytes(msg.Recipient)
+ if err != nil {
+ return nil, err
+}
+ canceledHeight := sdkCtx.BlockHeight()
+ canceledTime := sdkCtx.BlockTime()
+
+has, err := k.ContinuousFunds.Has(sdkCtx, recipient)
+ if err != nil {
+ return nil, fmt.Errorf("cannot get continuous fund for recipient %w", err)
+}
+ if !has {
+ return nil, fmt.Errorf("cannot cancel continuous fund for recipient %s - does not exist", msg.Recipient)
+}
+ if err := k.ContinuousFunds.Remove(sdkCtx, recipient); err != nil {
+ return nil, fmt.Errorf("failed to remove continuous fund for recipient %s: %w", msg.Recipient, err)
+}
+
+return &types.MsgCancelContinuousFundResponse{
+ CanceledTime: canceledTime,
+ CanceledHeight: uint64(canceledHeight),
+ Recipient: msg.Recipient,
+}, nil
+}
+
+func (k MsgServer)
+
+UpdateParams(ctx context.Context, msg *types.MsgUpdateParams) (*types.MsgUpdateParamsResponse, error) {
+ sdkCtx := sdk.UnwrapSDKContext(ctx)
+ if err := k.validateAuthority(msg.GetAuthority()); err != nil {
+ return nil, err
+}
+ if err := msg.Params.Validate(); err != nil {
+ return nil, fmt.Errorf("invalid params: %w", err)
+}
+ if err := k.Params.Set(sdkCtx, msg.Params); err != nil {
+ return nil, fmt.Errorf("failed to set params: %w", err)
+}
+
+return &types.MsgUpdateParamsResponse{
+}, nil
+}
+```
+
+## Client
+
+It takes the advantage of `AutoCLI`
+
+```go expandable
+package protocolpool
+
+import (
+
+ "fmt"
+
+ autocliv1 "cosmossdk.io/api/cosmos/autocli/v1"
+ poolv1 "cosmossdk.io/api/cosmos/protocolpool/v1"
+ "github.com/cosmos/cosmos-sdk/version"
+)
+
+// AutoCLIOptions implements the autocli.HasAutoCLIConfig interface.
+func (am AppModule)
+
+AutoCLIOptions() *autocliv1.ModuleOptions {
+ return &autocliv1.ModuleOptions{
+ Query: &autocliv1.ServiceCommandDescriptor{
+ Service: poolv1.Query_ServiceDesc.ServiceName,
+ RpcCommandOptions: []*autocliv1.RpcCommandOptions{
+ {
+ RpcMethod: "CommunityPool",
+ Use: "community-pool",
+ Short: "Query the amount of coins in the community pool",
+ Example: fmt.Sprintf(`%s query protocolpool community-pool`, version.AppName),
+},
+ {
+ RpcMethod: "ContinuousFunds",
+ Use: "continuous-funds",
+ Short: "Query all continuous funds",
+ Example: fmt.Sprintf(`%s query protocolpool continuous-funds`, version.AppName),
+},
+ {
+ RpcMethod: "ContinuousFund",
+ Use: "continuous-fund ",
+ Short: "Query a continuous fund by its recipient address",
+ Example: fmt.Sprintf(`%s query protocolpool continuous-fund cosmos1...`, version.AppName),
+ PositionalArgs: []*autocliv1.PositionalArgDescriptor{{
+ ProtoField: "recipient"
+}},
+},
+},
+},
+ Tx: &autocliv1.ServiceCommandDescriptor{
+ Service: poolv1.Msg_ServiceDesc.ServiceName,
+ RpcCommandOptions: []*autocliv1.RpcCommandOptions{
+ {
+ RpcMethod: "FundCommunityPool",
+ Use: "fund-community-pool ",
+ Short: "Funds the community pool with the specified amount",
+ Example: fmt.Sprintf(`%s tx protocolpool fund-community-pool 100uatom --from mykey`, version.AppName),
+ PositionalArgs: []*autocliv1.PositionalArgDescriptor{{
+ ProtoField: "amount"
+}},
+},
+ {
+ RpcMethod: "CreateContinuousFund",
+ Use: "create-continuous-fund ",
+ Short: "Create continuous fund for a recipient with optional expiry",
+ Example: fmt.Sprintf(`%s tx protocolpool create-continuous-fund cosmos1... 0.2 2023-11-31T12:34:56.789Z --from mykey`, version.AppName),
+ PositionalArgs: []*autocliv1.PositionalArgDescriptor{
+ {
+ ProtoField: "recipient"
+},
+ {
+ ProtoField: "percentage"
+},
+ {
+ ProtoField: "expiry",
+ Optional: true
+},
+},
+ GovProposal: true,
+},
+ {
+ RpcMethod: "CancelContinuousFund",
+ Use: "cancel-continuous-fund ",
+ Short: "Cancel continuous fund for a specific recipient",
+ Example: fmt.Sprintf(`%s tx protocolpool cancel-continuous-fund cosmos1... --from mykey`, version.AppName),
+ PositionalArgs: []*autocliv1.PositionalArgDescriptor{
+ {
+ ProtoField: "recipient"
+},
+},
+ GovProposal: true,
+},
+ {
+ RpcMethod: "UpdateParams",
+ Use: "update-params-proposal ",
+ Short: "Submit a proposal to update protocolpool module params. Note: the entire params must be provided.",
+ Example: fmt.Sprintf(`%s tx protocolpool update-params-proposal '{ "enabled_distribution_denoms": ["stake", "foo"]
+}'`, version.AppName),
+ PositionalArgs: []*autocliv1.PositionalArgDescriptor{{
+ ProtoField: "params"
+}},
+ GovProposal: true,
+},
+},
+},
+}
+}
+```
diff --git a/sdk/next/build/modules/slashing/README.mdx b/sdk/next/build/modules/slashing/README.mdx
new file mode 100644
index 000000000..29a5d417d
--- /dev/null
+++ b/sdk/next/build/modules/slashing/README.mdx
@@ -0,0 +1,814 @@
+---
+title: 'x/slashing'
+description: >-
+ This section specifies the slashing module of the Cosmos SDK, which implements
+ functionality first outlined in the Cosmos Whitepaper in June 2016.
+---
+
+## Abstract
+
+This section specifies the slashing module of the Cosmos SDK, which implements functionality
+first outlined in the [Cosmos Whitepaper](https://cosmos.network/about/whitepaper) in June 2016.
+
+The slashing module enables Cosmos SDK-based blockchains to disincentivize any attributable action
+by a protocol-recognized actor with value at stake by penalizing them ("slashing").
+
+Penalties may include, but are not limited to:
+
+* Burning some amount of their stake
+* Removing their ability to vote on future blocks for a period of time.
+
+This module will be used by the Cosmos Hub, the first hub in the Cosmos ecosystem.
+
+## Contents
+
+* [Concepts](#concepts)
+ * [States](#states)
+ * [Tombstone Caps](#tombstone-caps)
+ * [Infraction Timelines](#infraction-timelines)
+* [State](#state)
+ * [Signing Info (Liveness)](#signing-info-liveness)
+ * [Params](#params)
+* [Messages](#messages)
+ * [Unjail](#unjail)
+* [BeginBlock](#beginblock)
+ * [Liveness Tracking](#liveness-tracking)
+* [Hooks](#hooks)
+* [Events](#events)
+* [Staking Tombstone](#staking-tombstone)
+* [Parameters](#parameters)
+* [CLI](#cli)
+ * [Query](#query)
+ * [Transactions](#transactions)
+ * [gRPC](#grpc)
+ * [REST](#rest)
+
+## Concepts
+
+### States
+
+At any given time, there are any number of validators registered in the state
+machine. Each block, the top `MaxValidators` (defined by `x/staking`) validators
+who are not jailed become *bonded*, meaning that they may propose and vote on
+blocks. Validators who are *bonded* are *at stake*, meaning that part or all of
+their stake and their delegators' stake is at risk if they commit a protocol fault.
+
+For each of these validators we keep a `ValidatorSigningInfo` record that contains
+information pertaining to validator's liveness and other infraction related
+attributes.
+
+### Tombstone Caps
+
+In order to mitigate the impact of initially likely categories of non-malicious
+protocol faults, the Cosmos Hub implements for each validator
+a *tombstone* cap, which only allows a validator to be slashed once for a double
+sign fault. For example, if you misconfigure your HSM and double-sign a bunch of
+old blocks, you'll only be punished for the first double-sign (and then immediately tombstoned). This will still be quite expensive and desirable to avoid, but tombstone caps
+somewhat blunt the economic impact of unintentional misconfiguration.
+
+Liveness faults do not have caps, as they can't stack upon each other. Liveness bugs are "detected" as soon as the infraction occurs, and the validators are immediately put in jail, so it is not possible for them to commit multiple liveness faults without unjailing in between.
+
+### Infraction Timelines
+
+To illustrate how the `x/slashing` module handles submitted evidence through
+CometBFT consensus, consider the following examples:
+
+**Definitions**:
+
+*\[* : timeline start\
+*]* : timeline end\
+*Cn* : infraction `n` committed\
+*Dn* : infraction `n` discovered\
+*Vb* : validator bonded\
+*Vu* : validator unbonded
+
+#### Single Double Sign Infraction
+
+\[----------C1----D1,Vu-----]
+
+A single infraction is committed then later discovered, at which point the
+validator is unbonded and slashed at the full amount for the infraction.
+
+#### Multiple Double Sign Infractions
+
+\[----------C1--C2---C3---D1,D2,D3Vu-----]
+
+Multiple infractions are committed and then later discovered, at which point the
+validator is jailed and slashed for only one infraction. Because the validator
+is also tombstoned, they can not rejoin the validator set.
+
+## State
+
+### Signing Info (Liveness)
+
+Every block includes a set of precommits by the validators for the previous block,
+known as the `LastCommitInfo` provided by CometBFT. A `LastCommitInfo` is valid so
+long as it contains precommits from +2/3 of total voting power.
+
+Proposers are incentivized to include precommits from all validators in the CometBFT `LastCommitInfo`
+by receiving additional fees proportional to the difference between the voting
+power included in the `LastCommitInfo` and +2/3 (see [fee distribution](/sdk/v0.47/build/modules/distribution/README#begin-block)).
+
+```go
+type LastCommitInfo struct {
+ Round int32
+ Votes []VoteInfo
+}
+```
+
+Validators are penalized for failing to be included in the `LastCommitInfo` for some
+number of blocks by being automatically jailed, potentially slashed, and unbonded.
+
+Information about validator's liveness activity is tracked through `ValidatorSigningInfo`.
+It is indexed in the store as follows:
+
+* ValidatorSigningInfo: `0x01 | ConsAddrLen (1 byte) | ConsAddress -> ProtocolBuffer(ValSigningInfo)`
+* MissedBlocksBitArray: `0x02 | ConsAddrLen (1 byte) | ConsAddress | LittleEndianUint64(signArrayIndex) -> VarInt(didMiss)` (varint is a number encoding format)
+
+The first mapping allows us to easily lookup the recent signing info for a
+validator based on the validator's consensus address.
+
+The second mapping (`MissedBlocksBitArray`) acts
+as a bit-array of size `SignedBlocksWindow` that tells us if the validator missed
+the block for a given index in the bit-array. The index in the bit-array is given
+as little endian uint64.
+The result is a `varint` that takes on `0` or `1`, where `0` indicates the
+validator did not miss (did sign) the corresponding block, and `1` indicates
+they missed the block (did not sign).
+
+Note that the `MissedBlocksBitArray` is not explicitly initialized up-front. Keys
+are added as we progress through the first `SignedBlocksWindow` blocks for a newly
+bonded validator. The `SignedBlocksWindow` parameter defines the size
+(number of blocks) of the sliding window used to track validator liveness.
+
+The information stored for tracking validator liveness is as follows:
+
+```protobuf
+// Reference: https://github.com/cosmos/cosmos-sdk/blob/v0.47.0-rc1/proto/cosmos/slashing/v1beta1/slashing.proto#L13-L35
+```
+
+### Params
+
+The slashing module stores it's params in state with the prefix of `0x00`,
+it can be updated with governance or the address with authority.
+
+* Params: `0x00 | ProtocolBuffer(Params)`
+
+```protobuf
+// Reference: https://github.com/cosmos/cosmos-sdk/blob/v0.47.0-rc1/proto/cosmos/slashing/v1beta1/slashing.proto#L37-L59
+```
+
+## Messages
+
+In this section we describe the processing of messages for the `slashing` module.
+
+### Unjail
+
+If a validator was automatically unbonded due to downtime and wishes to come back online &
+possibly rejoin the bonded set, it must send `MsgUnjail`:
+
+```protobuf
+// MsgUnjail is an sdk.Msg used for unjailing a jailed validator, thus returning
+// them into the bonded validator set, so they can begin receiving provisions
+// and rewards again.
+message MsgUnjail {
+ string validator_addr = 1;
+}
+```
+
+Below is a pseudocode of the `MsgSrv/Unjail` RPC:
+
+```go expandable
+unjail(tx MsgUnjail)
+
+validator = getValidator(tx.ValidatorAddr)
+ if validator == nil
+ fail with "No validator found"
+ if getSelfDelegation(validator) == 0
+ fail with "validator must self delegate before unjailing"
+ if !validator.Jailed
+ fail with "Validator not jailed, cannot unjail"
+
+ info = GetValidatorSigningInfo(operator)
+ if info.Tombstoned
+ fail with "Tombstoned validator cannot be unjailed"
+ if block time < info.JailedUntil
+ fail with "Validator still jailed, cannot unjail until period has expired"
+
+ validator.Jailed = false
+ setValidator(validator)
+
+return
+```
+
+If the validator has enough stake to be in the top `n = MaximumBondedValidators`, it will be automatically rebonded,
+and all delegators still delegated to the validator will be rebonded and begin to again collect
+provisions and rewards.
+
+## BeginBlock
+
+### Liveness Tracking
+
+At the beginning of each block, we update the `ValidatorSigningInfo` for each
+validator and check if they've crossed below the liveness threshold over a
+sliding window. This sliding window is defined by `SignedBlocksWindow` and the
+index in this window is determined by `IndexOffset` found in the validator's
+`ValidatorSigningInfo`. For each block processed, the `IndexOffset` is incremented
+regardless if the validator signed or not. Once the index is determined, the
+`MissedBlocksBitArray` and `MissedBlocksCounter` are updated accordingly.
+
+Finally, in order to determine if a validator crosses below the liveness threshold,
+we fetch the maximum number of blocks missed, `maxMissed`, which is
+`SignedBlocksWindow - (MinSignedPerWindow * SignedBlocksWindow)` and the minimum
+height at which we can determine liveness, `minHeight`. If the current block is
+greater than `minHeight` and the validator's `MissedBlocksCounter` is greater than
+`maxMissed`, they will be slashed by `SlashFractionDowntime`, will be jailed
+for `DowntimeJailDuration`, and have the following values reset:
+`MissedBlocksBitArray`, `MissedBlocksCounter`, and `IndexOffset`.
+
+**Note**: Liveness slashes do **NOT** lead to a tombstoning.
+
+```go expandable
+height := block.Height
+ for vote in block.LastCommitInfo.Votes {
+ signInfo := GetValidatorSigningInfo(vote.Validator.Address)
+
+ // This is a relative index, so we counts blocks the validator SHOULD have
+ // signed. We use the 0-value default signing info if not present, except for
+ // start height.
+ index := signInfo.IndexOffset % SignedBlocksWindow()
+
+signInfo.IndexOffset++
+
+ // Update MissedBlocksBitArray and MissedBlocksCounter. The MissedBlocksCounter
+ // just tracks the sum of MissedBlocksBitArray. That way we avoid needing to
+ // read/write the whole array each time.
+ missedPrevious := GetValidatorMissedBlockBitArray(vote.Validator.Address, index)
+ missed := !signed
+ switch {
+ case !missedPrevious && missed:
+ // array index has changed from not missed to missed, increment counter
+ SetValidatorMissedBlockBitArray(vote.Validator.Address, index, true)
+
+signInfo.MissedBlocksCounter++
+ case missedPrevious && !missed:
+ // array index has changed from missed to not missed, decrement counter
+ SetValidatorMissedBlockBitArray(vote.Validator.Address, index, false)
+
+signInfo.MissedBlocksCounter--
+
+ default:
+ // array index at this index has not changed; no need to update counter
+}
+ if missed {
+ // emit events...
+}
+ minHeight := signInfo.StartHeight + SignedBlocksWindow()
+ maxMissed := SignedBlocksWindow() - MinSignedPerWindow()
+
+ // If we are past the minimum height and the validator has missed too many
+ // jail and slash them.
+ if height > minHeight && signInfo.MissedBlocksCounter > maxMissed {
+ validator := ValidatorByConsAddr(vote.Validator.Address)
+
+ // emit events...
+
+ // We need to retrieve the stake distribution which signed the block, so we
+ // subtract ValidatorUpdateDelay from the block height, and subtract an
+ // additional 1 since this is the LastCommit.
+ //
+ // Note, that this CAN result in a negative "distributionHeight" up to
+ // -ValidatorUpdateDelay-1, i.e. at the end of the pre-genesis block (none) = at the beginning of the genesis block.
+ // That's fine since this is just used to filter unbonding delegations & redelegations.
+ distributionHeight := height - sdk.ValidatorUpdateDelay - 1
+
+ SlashWithInfractionReason(vote.Validator.Address, distributionHeight, vote.Validator.Power, SlashFractionDowntime(), stakingtypes.Downtime)
+
+Jail(vote.Validator.Address)
+
+signInfo.JailedUntil = block.Time.Add(DowntimeJailDuration())
+
+ // We need to reset the counter & array so that the validator won't be
+ // immediately slashed for downtime upon rebonding.
+ signInfo.MissedBlocksCounter = 0
+ signInfo.IndexOffset = 0
+ ClearValidatorMissedBlockBitArray(vote.Validator.Address)
+}
+
+SetValidatorSigningInfo(vote.Validator.Address, signInfo)
+}
+```
+
+## Hooks
+
+This section contains a description of the module's `hooks`. Hooks are operations that are executed automatically when events are raised.
+
+### Staking hooks
+
+The slashing module implements the `StakingHooks` defined in `x/staking` and are used as record-keeping of validators information. During the app initialization, these hooks should be registered in the staking module struct.
+
+The following hooks impact the slashing state:
+
+* `AfterValidatorBonded` creates a `ValidatorSigningInfo` instance as described in the following section.
+* `AfterValidatorCreated` stores a validator's consensus key.
+* `AfterValidatorRemoved` removes a validator's consensus key.
+
+### Validator Bonded
+
+Upon successful first-time bonding of a new validator, we create a new `ValidatorSigningInfo` structure for the
+now-bonded validator, which `StartHeight` of the current block.
+
+If the validator was out of the validator set and gets bonded again, its new bonded height is set.
+
+```go expandable
+onValidatorBonded(address sdk.ValAddress)
+
+signingInfo, found = GetValidatorSigningInfo(address)
+ if !found {
+ signingInfo = ValidatorSigningInfo {
+ StartHeight : CurrentHeight,
+ IndexOffset : 0,
+ JailedUntil : time.Unix(0, 0),
+ Tombstone : false,
+ MissedBlocksCounter : 0
+}
+
+else {
+ signingInfo.StartHeight = CurrentHeight
+}
+
+setValidatorSigningInfo(signingInfo)
+}
+
+return
+```
+
+## Events
+
+The slashing module emits the following events:
+
+### MsgServer
+
+#### MsgUnjail
+
+| Type | Attribute Key | Attribute Value |
+| ------- | ------------- | ------------------ |
+| message | module | slashing |
+| message | sender | `{validatorAddress}` |
+
+### Keeper
+
+### BeginBlocker: HandleValidatorSignature
+
+| Type | Attribute Key | Attribute Value |
+| ----- | ------------- | --------------------------- |
+| slash | address | `{validatorConsensusAddress}` |
+| slash | power | `{validatorPower}` |
+| slash | reason | `{slashReason}` |
+| slash | jailed \[0] | `{validatorConsensusAddress}` |
+| slash | burned coins | `{math.Int}` |
+
+* \[0] Only included if the validator is jailed.
+
+| Type | Attribute Key | Attribute Value |
+| -------- | -------------- | --------------------------- |
+| liveness | address | `{validatorConsensusAddress}` |
+| liveness | missed\_blocks | `{missedBlocksCounter}` |
+| liveness | height | `{blockHeight}` |
+
+#### Slash
+
+* same as `"slash"` event from `HandleValidatorSignature`, but without the `jailed` attribute.
+
+#### Jail
+
+| Type | Attribute Key | Attribute Value |
+| ----- | ------------- | ------------------ |
+| slash | jailed | `{validatorAddress}` |
+
+## Staking Tombstone
+
+### Abstract
+
+In the current implementation of the `slashing` module, when the consensus engine
+informs the state machine of a validator's consensus fault, the validator is
+partially slashed, and put into a "jail period", a period of time in which they
+are not allowed to rejoin the validator set. However, because of the nature of
+consensus faults and ABCI, there can be a delay between an infraction occurring,
+and evidence of the infraction reaching the state machine (this is one of the
+primary reasons for the existence of the unbonding period).
+
+> Note: The tombstone concept, only applies to faults that have a delay between
+> the infraction occurring and evidence reaching the state machine. For example,
+> evidence of a validator double signing may take a while to reach the state machine
+> due to unpredictable evidence gossip layer delays and the ability of validators to
+> selectively reveal double-signatures (e.g. to infrequently-online light clients).
+> Liveness slashing, on the other hand, is detected immediately as soon as the
+> infraction occurs, and therefore no slashing period is needed. A validator is
+> immediately put into jail period, and they cannot commit another liveness fault
+> until they unjail. In the future, there may be other types of byzantine faults
+> that have delays (for example, submitting evidence of an invalid proposal as a transaction).
+> When implemented, it will have to be decided whether these future types of
+> byzantine faults will result in a tombstoning (and if not, the slash amounts
+> will not be capped by a slashing period).
+
+In the current system design, once a validator is put in the jail for a consensus
+fault, after the `JailPeriod` they are allowed to send a transaction to `unjail`
+themselves, and thus rejoin the validator set.
+
+One of the "design desires" of the `slashing` module is that if multiple
+infractions occur before evidence is executed (and a validator is put in jail),
+they should only be punished for single worst infraction, but not cumulatively.
+For example, if the sequence of events is:
+
+1. Validator A commits Infraction 1 (worth 30% slash)
+2. Validator A commits Infraction 2 (worth 40% slash)
+3. Validator A commits Infraction 3 (worth 35% slash)
+4. Evidence for Infraction 1 reaches state machine (and validator is put in jail)
+5. Evidence for Infraction 2 reaches state machine
+6. Evidence for Infraction 3 reaches state machine
+
+Only Infraction 2 should have its slash take effect, as it is the highest. This
+is done, so that in the case of the compromise of a validator's consensus key,
+they will only be punished once, even if the hacker double-signs many blocks.
+Because, the unjailing has to be done with the validator's operator key, they
+have a chance to re-secure their consensus key, and then signal that they are
+ready using their operator key. We call this period during which we track only
+the max infraction, the "slashing period".
+
+Once, a validator rejoins by unjailing themselves, we begin a new slashing period;
+if they commit a new infraction after unjailing, it gets slashed cumulatively on
+top of the worst infraction from the previous slashing period.
+
+However, while infractions are grouped based off of the slashing periods, because
+evidence can be submitted up to an `unbondingPeriod` after the infraction, we
+still have to allow for evidence to be submitted for previous slashing periods.
+For example, if the sequence of events is:
+
+1. Validator A commits Infraction 1 (worth 30% slash)
+2. Validator A commits Infraction 2 (worth 40% slash)
+3. Evidence for Infraction 1 reaches state machine (and Validator A is put in jail)
+4. Validator A unjails
+
+We are now in a new slashing period, however we still have to keep the door open
+for the previous infraction, as the evidence for Infraction 2 may still come in.
+As the number of slashing periods increase, it creates more complexity as we have
+to keep track of the highest infraction amount for every single slashing period.
+
+> Note: Currently, according to the `slashing` module spec, a new slashing period
+> is created every time a validator is unbonded then rebonded. This should probably
+> be changed to jailed/unjailed. See issue [#3205](https://github.com/cosmos/cosmos-sdk/issues/3205)
+> for further details. For the remainder of this, I will assume that we only start
+> a new slashing period when a validator gets unjailed.
+
+The maximum number of slashing periods is the `len(UnbondingPeriod) / len(JailPeriod)`.
+The current defaults in Gaia for the `UnbondingPeriod` and `JailPeriod` are 3 weeks
+and 2 days, respectively. This means there could potentially be up to 11 slashing
+periods concurrently being tracked per validator. If we set the `JailPeriod >= UnbondingPeriod`,
+we only have to track 1 slashing period (i.e not have to track slashing periods).
+
+Currently, in the jail period implementation, once a validator unjails, all of
+their delegators who are delegated to them (haven't unbonded / redelegated away),
+stay with them. Given that consensus safety faults are so egregious
+(way more so than liveness faults), it is probably prudent to have delegators not
+"auto-rebond" to the validator.
+
+#### Proposal: infinite jail
+
+We propose setting the "jail time" for a
+validator who commits a consensus safety fault, to `infinite` (i.e. a tombstone state).
+This essentially kicks the validator out of the validator set and does not allow
+them to re-enter the validator set. All of their delegators (including the operator themselves)
+have to either unbond or redelegate away. The validator operator can create a new
+validator if they would like, with a new operator key and consensus key, but they
+have to "re-earn" their delegations back.
+
+Implementing the tombstone system and getting rid of the slashing period tracking
+will make the `slashing` module way simpler, especially because we can remove all
+of the hooks defined in the `slashing` module consumed by the `staking` module
+(the `slashing` module still consumes hooks defined in `staking`).
+
+#### Single slashing amount
+
+Another optimization that can be made is that if we assume that all ABCI faults
+for CometBFT consensus are slashed at the same level, we don't have to keep
+track of "max slash". Once an ABCI fault happens, we don't have to worry about
+comparing potential future ones to find the max.
+
+Currently the only CometBFT ABCI fault is:
+
+* Unjustified precommits (double signs)
+
+It is currently planned to include the following fault in the near future:
+
+* Signing a precommit when you're in unbonding phase (needed to make light client bisection safe)
+
+Given that these faults are both attributable byzantine faults, we will likely
+want to slash them equally, and thus we can enact the above change.
+
+> Note: This change may make sense for current CometBFT consensus, but maybe
+> not for a different consensus algorithm or future versions of CometBFT that
+> may want to punish at different levels (for example, partial slashing).
+
+## Parameters
+
+The slashing module contains the following parameters:
+
+| Key | Type | Example |
+| ----------------------- | -------------- | ---------------------- |
+| SignedBlocksWindow | string (int64) | "100" |
+| MinSignedPerWindow | string (dec) | "0.500000000000000000" |
+| DowntimeJailDuration | string (ns) | "600000000000" |
+| SlashFractionDoubleSign | string (dec) | "0.050000000000000000" |
+| SlashFractionDowntime | string (dec) | "0.010000000000000000" |
+
+## CLI
+
+A user can query and interact with the `slashing` module using the CLI.
+
+### Query
+
+The `query` commands allow users to query `slashing` state.
+
+```shell
+simd query slashing --help
+```
+
+#### params
+
+The `params` command allows users to query genesis parameters for the slashing module.
+
+```shell
+simd query slashing params [flags]
+```
+
+Example:
+
+```shell
+simd query slashing params
+```
+
+Example Output:
+
+```yml
+downtime_jail_duration: 600s
+min_signed_per_window: "0.500000000000000000"
+signed_blocks_window: "100"
+slash_fraction_double_sign: "0.050000000000000000"
+slash_fraction_downtime: "0.010000000000000000"
+```
+
+#### signing-info
+
+The `signing-info` command allows users to query signing-info of the validator using consensus public key.
+
+```shell
+simd query slashing signing-infos [flags]
+```
+
+Example:
+
+```shell
+simd query slashing signing-info '{"@type":"/cosmos.crypto.ed25519.PubKey","key":"Auxs3865HpB/EfssYOzfqNhEJjzys6jD5B6tPgC8="}'
+
+```
+
+Example Output:
+
+```yml
+address: cosmosvalcons1nrqsld3aw6lh6t082frdqc84uwxn0t958c
+index_offset: "2068"
+jailed_until: "1970-01-01T00:00:00Z"
+missed_blocks_counter: "0"
+start_height: "0"
+tombstoned: false
+```
+
+#### signing-infos
+
+The `signing-infos` command allows users to query signing infos of all validators.
+
+```shell
+simd query slashing signing-infos [flags]
+```
+
+Example:
+
+```shell
+simd query slashing signing-infos
+```
+
+Example Output:
+
+```yml
+info:
+- address: cosmosvalcons1nrqsld3aw6lh6t082frdqc84uwxn0t958c
+ index_offset: "2075"
+ jailed_until: "1970-01-01T00:00:00Z"
+ missed_blocks_counter: "0"
+ start_height: "0"
+ tombstoned: false
+pagination:
+ next_key: null
+ total: "0"
+```
+
+### Transactions
+
+The `tx` commands allow users to interact with the `slashing` module.
+
+```bash
+simd tx slashing --help
+```
+
+#### unjail
+
+The `unjail` command allows users to unjail a validator previously jailed for downtime.
+
+```bash
+simd tx slashing unjail --from mykey [flags]
+```
+
+Example:
+
+```bash
+simd tx slashing unjail --from mykey
+```
+
+### gRPC
+
+A user can query the `slashing` module using gRPC endpoints.
+
+#### Params
+
+The `Params` endpoint allows users to query the parameters of slashing module.
+
+```shell
+cosmos.slashing.v1beta1.Query/Params
+```
+
+Example:
+
+```shell
+grpcurl -plaintext localhost:9090 cosmos.slashing.v1beta1.Query/Params
+```
+
+Example Output:
+
+```json
+{
+ "params": {
+ "signedBlocksWindow": "100",
+ "minSignedPerWindow": "NTAwMDAwMDAwMDAwMDAwMDAw",
+ "downtimeJailDuration": "600s",
+ "slashFractionDoubleSign": "NTAwMDAwMDAwMDAwMDAwMDA=",
+ "slashFractionDowntime": "MTAwMDAwMDAwMDAwMDAwMDA="
+ }
+}
+```
+
+#### SigningInfo
+
+The SigningInfo queries the signing info of given cons address.
+
+```shell
+cosmos.slashing.v1beta1.Query/SigningInfo
+```
+
+Example:
+
+```shell
+grpcurl -plaintext -d '{"cons_address":"cosmosvalcons1nrqsld3aw6lh6t082frdqc84uwxn0t958c"}' localhost:9090 cosmos.slashing.v1beta1.Query/SigningInfo
+```
+
+Example Output:
+
+```json
+{
+ "valSigningInfo": {
+ "address": "cosmosvalcons1nrqsld3aw6lh6t082frdqc84uwxn0t958c",
+ "indexOffset": "3493",
+ "jailedUntil": "1970-01-01T00:00:00Z"
+ }
+}
+```
+
+#### SigningInfos
+
+The SigningInfos queries signing info of all validators.
+
+```shell
+cosmos.slashing.v1beta1.Query/SigningInfos
+```
+
+Example:
+
+```shell
+grpcurl -plaintext localhost:9090 cosmos.slashing.v1beta1.Query/SigningInfos
+```
+
+Example Output:
+
+```json expandable
+{
+ "info": [
+ {
+ "address": "cosmosvalcons1nrqslkwd3pz096lh6t082frdqc84uwxn0t958c",
+ "indexOffset": "2467",
+ "jailedUntil": "1970-01-01T00:00:00Z"
+ }
+ ],
+ "pagination": {
+ "total": "1"
+ }
+}
+```
+
+### REST
+
+A user can query the `slashing` module using REST endpoints.
+
+#### Params
+
+```shell
+/cosmos/slashing/v1beta1/params
+```
+
+Example:
+
+```shell
+curl "localhost:1317/cosmos/slashing/v1beta1/params"
+```
+
+Example Output:
+
+```json
+{
+ "params": {
+ "signed_blocks_window": "100",
+ "min_signed_per_window": "0.500000000000000000",
+ "downtime_jail_duration": "600s",
+ "slash_fraction_double_sign": "0.050000000000000000",
+ "slash_fraction_downtime": "0.010000000000000000"
+}
+```
+
+#### signing\_info
+
+```shell
+/cosmos/slashing/v1beta1/signing_infos/%s
+```
+
+Example:
+
+```shell
+curl "localhost:1317/cosmos/slashing/v1beta1/signing_infos/cosmosvalcons1nrqslkwd3pz096lh6t082frdqc84uwxn0t958c"
+```
+
+Example Output:
+
+```json
+{
+ "val_signing_info": {
+ "address": "cosmosvalcons1nrqslkwd3pz096lh6t082frdqc84uwxn0t958c",
+ "start_height": "0",
+ "index_offset": "4184",
+ "jailed_until": "1970-01-01T00:00:00Z",
+ "tombstoned": false,
+ "missed_blocks_counter": "0"
+ }
+}
+```
+
+#### signing\_infos
+
+```shell
+/cosmos/slashing/v1beta1/signing_infos
+```
+
+Example:
+
+```shell
+curl "localhost:1317/cosmos/slashing/v1beta1/signing_infos
+```
+
+Example Output:
+
+```json expandable
+{
+ "info": [
+ {
+ "address": "cosmosvalcons1nrqslkwd3pz096lh6t082frdqc84uwxn0t958c",
+ "start_height": "0",
+ "index_offset": "4169",
+ "jailed_until": "1970-01-01T00:00:00Z",
+ "tombstoned": false,
+ "missed_blocks_counter": "0"
+ }
+ ],
+ "pagination": {
+ "next_key": null,
+ "total": "1"
+ }
+}
+```
diff --git a/sdk/next/build/modules/staking/README.mdx b/sdk/next/build/modules/staking/README.mdx
new file mode 100644
index 000000000..e566b1257
--- /dev/null
+++ b/sdk/next/build/modules/staking/README.mdx
@@ -0,0 +1,3467 @@
+---
+title: 'x/staking'
+description: >-
+ This paper specifies the Staking module of the Cosmos SDK that was first
+ described in the Cosmos Whitepaper in June 2016.
+---
+
+## Abstract
+
+This paper specifies the Staking module of the Cosmos SDK that was first
+described in the [Cosmos Whitepaper](https://cosmos.network/about/whitepaper)
+in June 2016.
+
+The module enables Cosmos SDK-based blockchain to support an advanced
+Proof-of-Stake (PoS) system. In this system, holders of the native staking token of
+the chain can become validators and can delegate tokens to validators,
+ultimately determining the effective validator set for the system.
+
+This module is used in the Cosmos Hub, the first Hub in the Cosmos
+network.
+
+## Contents
+
+* [State](#state)
+ * [Pool](#pool)
+ * [LastTotalPower](#lasttotalpower)
+ * [ValidatorUpdates](#validatorupdates)
+ * [UnbondingID](#unbondingid)
+ * [Params](#params)
+ * [Validator](#validator)
+ * [Delegation](#delegation)
+ * [UnbondingDelegation](#unbondingdelegation)
+ * [Redelegation](#redelegation)
+ * [Queues](#queues)
+ * [HistoricalInfo](#historicalinfo)
+* [State Transitions](#state-transitions)
+ * [Validators](#validators)
+ * [Delegations](#delegations)
+ * [Slashing](#slashing)
+ * [How Shares are calculated](#how-shares-are-calculated)
+* [Messages](#messages)
+ * [MsgCreateValidator](#msgcreatevalidator)
+ * [MsgEditValidator](#msgeditvalidator)
+ * [MsgDelegate](#msgdelegate)
+ * [MsgUndelegate](#msgundelegate)
+ * [MsgCancelUnbondingDelegation](#msgcancelunbondingdelegation)
+ * [MsgBeginRedelegate](#msgbeginredelegate)
+ * [MsgUpdateParams](#msgupdateparams)
+* [Begin-Block](#begin-block)
+ * [Historical Info Tracking](#historical-info-tracking)
+* [End-Block](#end-block)
+ * [Validator Set Changes](#validator-set-changes)
+ * [Queues](#queues-1)
+* [Hooks](#hooks)
+* [Events](#events)
+ * [EndBlocker](#endblocker)
+ * [Msg's](#msgs)
+* [Parameters](#parameters)
+* [Client](#client)
+ * [CLI](#cli)
+ * [gRPC](#grpc)
+ * [REST](#rest)
+
+## State
+
+### Pool
+
+Pool is used for tracking bonded and not-bonded token supply of the bond denomination.
+
+### LastTotalPower
+
+LastTotalPower tracks the total amounts of bonded tokens recorded during the previous end block.
+Store entries prefixed with "Last" must remain unchanged until EndBlock.
+
+* LastTotalPower: `0x12 -> ProtocolBuffer(math.Int)`
+
+### ValidatorUpdates
+
+ValidatorUpdates contains the validator updates returned to ABCI at the end of every block.
+The values are overwritten in every block.
+
+* ValidatorUpdates `0x61 -> []abci.ValidatorUpdate`
+
+### UnbondingID
+
+UnbondingID stores the ID of the latest unbonding operation. It enables creating unique IDs for unbonding operations, i.e., UnbondingID is incremented every time a new unbonding operation (validator unbonding, unbonding delegation, redelegation) is initiated.
+
+* UnbondingID: `0x37 -> uint64`
+
+### Params
+
+The staking module stores its params in state with the prefix of `0x51`,
+it can be updated with governance or the address with authority.
+
+* Params: `0x51 | ProtocolBuffer(Params)`
+
+```protobuf
+// Reference: https://github.com/cosmos/cosmos-sdk/blob/v0.47.0-rc1/proto/cosmos/staking/v1beta1/staking.proto#L310-L333
+```
+
+### Validator
+
+Validators can have one of three statuses
+
+* `Unbonded`: The validator is not in the active set. They cannot sign blocks and do not earn
+ rewards. They can receive delegations.
+* `Bonded`: Once the validator receives sufficient bonded tokens they automatically join the
+ active set during [`EndBlock`](#validator-set-changes) and their status is updated to `Bonded`.
+ They are signing blocks and receiving rewards. They can receive further delegations.
+ They can be slashed for misbehavior. Delegators to this validator who unbond their delegation
+ must wait the duration of the UnbondingTime, a chain-specific param, during which time
+ they are still slashable for offences of the source validator if those offences were committed
+ during the period of time that the tokens were bonded.
+* `Unbonding`: When a validator leaves the active set, either by choice or due to slashing, jailing or
+ tombstoning, an unbonding of all their delegations begins. All delegations must then wait the UnbondingTime
+ before their tokens are moved to their accounts from the `BondedPool`.
+
+
+Tombstoning is permanent, once tombstoned a validator's consensus key can not be reused within the chain where the tombstoning happened.
+
+
+Validators objects should be primarily stored and accessed by the
+`OperatorAddr`, an SDK validator address for the operator of the validator. Two
+additional indices are maintained per validator object in order to fulfill
+required lookups for slashing and validator-set updates. A third special index
+(`LastValidatorPower`) is also maintained which however remains constant
+throughout each block, unlike the first two indices which mirror the validator
+records within a block.
+
+* Validators: `0x21 | OperatorAddrLen (1 byte) | OperatorAddr -> ProtocolBuffer(validator)`
+* ValidatorsByConsAddr: `0x22 | ConsAddrLen (1 byte) | ConsAddr -> OperatorAddr`
+* ValidatorsByPower: `0x23 | BigEndian(ConsensusPower) | OperatorAddrLen (1 byte) | OperatorAddr -> OperatorAddr`
+* LastValidatorsPower: `0x11 | OperatorAddrLen (1 byte) | OperatorAddr -> ProtocolBuffer(ConsensusPower)`
+* ValidatorsByUnbondingID: `0x38 | UnbondingID -> 0x21 | OperatorAddrLen (1 byte) | OperatorAddr`
+
+`Validators` is the primary index - it ensures that each operator can have only one
+associated validator, where the public key of that validator can change in the
+future. Delegators can refer to the immutable operator of the validator, without
+concern for the changing public key.
+
+`ValidatorsByUnbondingID` is an additional index that enables lookups for
+validators by the unbonding IDs corresponding to their current unbonding.
+
+`ValidatorByConsAddr` is an additional index that enables lookups for slashing.
+When CometBFT reports evidence, it provides the validator address, so this
+map is needed to find the operator. Note that the `ConsAddr` corresponds to the
+address which can be derived from the validator's `ConsPubKey`.
+
+`ValidatorsByPower` is an additional index that provides a sorted list of
+potential validators to quickly determine the current active set. Here
+ConsensusPower is validator.Tokens/10^6 by default. Note that all validators
+where `Jailed` is true are not stored within this index.
+
+`LastValidatorsPower` is a special index that provides a historical list of the
+last-block's bonded validators. This index remains constant during a block but
+is updated during the validator set update process which takes place in [`EndBlock`](#end-block).
+
+Each validator's state is stored in a `Validator` struct:
+
+```protobuf
+// Reference: https://github.com/cosmos/cosmos-sdk/blob/v0.47.0-rc1/proto/cosmos/staking/v1beta1/staking.proto#L82-L138
+```
+
+```protobuf
+// Reference: https://github.com/cosmos/cosmos-sdk/blob/v0.47.0-rc1/proto/cosmos/staking/v1beta1/staking.proto#L26-L80
+```
+
+### Delegation
+
+Delegations are identified by combining `DelegatorAddr` (the address of the delegator)
+with the `ValidatorAddr` Delegators are indexed in the store as follows:
+
+* Delegation: `0x31 | DelegatorAddrLen (1 byte) | DelegatorAddr | ValidatorAddrLen (1 byte) | ValidatorAddr -> ProtocolBuffer(delegation)`
+
+Stake holders may delegate coins to validators; under this circumstance their
+funds are held in a `Delegation` data structure. It is owned by one
+delegator, and is associated with the shares for one validator. The sender of
+the transaction is the owner of the bond.
+
+```protobuf
+// Reference: https://github.com/cosmos/cosmos-sdk/blob/v0.47.0-rc1/proto/cosmos/staking/v1beta1/staking.proto#L198-L216
+```
+
+#### Delegator Shares
+
+When one delegates tokens to a Validator, they are issued a number of delegator shares based on a
+dynamic exchange rate, calculated as follows from the total number of tokens delegated to the
+validator and the number of shares issued so far:
+
+`Shares per Token = validator.TotalShares() / validator.Tokens()`
+
+Only the number of shares received is stored on the DelegationEntry. When a delegator then
+Undelegates, the token amount they receive is calculated from the number of shares they currently
+hold and the inverse exchange rate:
+
+`Tokens per Share = validator.Tokens() / validatorShares()`
+
+These `Shares` are simply an accounting mechanism. They are not a fungible asset. The reason for
+this mechanism is to simplify the accounting around slashing. Rather than iteratively slashing the
+tokens of every delegation entry, instead the Validator's total bonded tokens can be slashed,
+effectively reducing the value of each issued delegator share.
+
+### UnbondingDelegation
+
+Shares in a `Delegation` can be unbonded, but they must for some time exist as
+an `UnbondingDelegation`, where shares can be reduced if Byzantine behavior is
+detected.
+
+`UnbondingDelegation` are indexed in the store as:
+
+* UnbondingDelegation: `0x32 | DelegatorAddrLen (1 byte) | DelegatorAddr | ValidatorAddrLen (1 byte) | ValidatorAddr -> ProtocolBuffer(unbondingDelegation)`
+* UnbondingDelegationsFromValidator: `0x33 | ValidatorAddrLen (1 byte) | ValidatorAddr | DelegatorAddrLen (1 byte) | DelegatorAddr -> nil`
+* UnbondingDelegationByUnbondingId: `0x38 | UnbondingId -> 0x32 | DelegatorAddrLen (1 byte) | DelegatorAddr | ValidatorAddrLen (1 byte) | ValidatorAddr`
+ `UnbondingDelegation` is used in queries, to lookup all unbonding delegations for
+ a given delegator.
+
+`UnbondingDelegationsFromValidator` is used in slashing, to lookup all
+unbonding delegations associated with a given validator that need to be
+slashed.
+
+`UnbondingDelegationByUnbondingId` is an additional index that enables
+lookups for unbonding delegations by the unbonding IDs of the containing
+unbonding delegation entries.
+
+A UnbondingDelegation object is created every time an unbonding is initiated.
+
+```protobuf
+// Reference: https://github.com/cosmos/cosmos-sdk/blob/v0.47.0-rc1/proto/cosmos/staking/v1beta1/staking.proto#L218-L261
+```
+
+### Redelegation
+
+The bonded tokens worth of a `Delegation` may be instantly redelegated from a
+source validator to a different validator (destination validator). However when
+this occurs they must be tracked in a `Redelegation` object, whereby their
+shares can be slashed if their tokens have contributed to a Byzantine fault
+committed by the source validator.
+
+`Redelegation` are indexed in the store as:
+
+* Redelegations: `0x34 | DelegatorAddrLen (1 byte) | DelegatorAddr | ValidatorAddrLen (1 byte) | ValidatorSrcAddr | ValidatorDstAddr -> ProtocolBuffer(redelegation)`
+* RedelegationsBySrc: `0x35 | ValidatorSrcAddrLen (1 byte) | ValidatorSrcAddr | ValidatorDstAddrLen (1 byte) | ValidatorDstAddr | DelegatorAddrLen (1 byte) | DelegatorAddr -> nil`
+* RedelegationsByDst: `0x36 | ValidatorDstAddrLen (1 byte) | ValidatorDstAddr | ValidatorSrcAddrLen (1 byte) | ValidatorSrcAddr | DelegatorAddrLen (1 byte) | DelegatorAddr -> nil`
+* RedelegationByUnbondingId: `0x38 | UnbondingId -> 0x34 | DelegatorAddrLen (1 byte) | DelegatorAddr | ValidatorAddrLen (1 byte) | ValidatorSrcAddr | ValidatorDstAddr`
+
+`Redelegations` is used for queries, to lookup all redelegations for a given
+delegator.
+
+`RedelegationsBySrc` is used for slashing based on the `ValidatorSrcAddr`.
+
+`RedelegationsByDst` is used for slashing based on the `ValidatorDstAddr`
+
+The first map here is used for queries, to lookup all redelegations for a given
+delegator. The second map is used for slashing based on the `ValidatorSrcAddr`,
+while the third map is for slashing based on the `ValidatorDstAddr`.
+
+`RedelegationByUnbondingId` is an additional index that enables
+lookups for redelegations by the unbonding IDs of the containing
+redelegation entries.
+
+A redelegation object is created every time a redelegation occurs. To prevent
+"redelegation hopping" redelegations may not occur under the situation that:
+
+* the (re)delegator already has another immature redelegation in progress
+ with a destination to a validator (let's call it `Validator X`)
+* and, the (re)delegator is attempting to create a *new* redelegation
+ where the source validator for this new redelegation is `Validator X`.
+
+```protobuf
+// Reference: https://github.com/cosmos/cosmos-sdk/blob/v0.47.0-rc1/proto/cosmos/staking/v1beta1/staking.proto#L263-L308
+```
+
+### Queues
+
+All queue objects are sorted by timestamp. The time used within any queue is
+firstly converted to UTC, rounded to the nearest nanosecond then sorted. The sortable time format
+used is a slight modification of the RFC3339Nano and uses the format string
+`"2006-01-02T15:04:05.000000000"`. Notably this format:
+
+* right pads all zeros
+* drops the time zone info (we already use UTC)
+
+In all cases, the stored timestamp represents the maturation time of the queue
+element.
+
+#### UnbondingDelegationQueue
+
+For the purpose of tracking progress of unbonding delegations the unbonding
+delegations queue is kept.
+
+* UnbondingDelegation: `0x41 | format(time) -> []DVPair`
+
+```protobuf
+// Reference: https://github.com/cosmos/cosmos-sdk/blob/v0.47.0-rc1/proto/cosmos/staking/v1beta1/staking.proto#L162-L172
+```
+
+#### RedelegationQueue
+
+For the purpose of tracking progress of redelegations the redelegation queue is
+kept.
+
+* RedelegationQueue: `0x42 | format(time) -> []DVVTriplet`
+
+```protobuf
+// Reference: https://github.com/cosmos/cosmos-sdk/blob/v0.47.0-rc1/proto/cosmos/staking/v1beta1/staking.proto#L179-L191
+```
+
+#### ValidatorQueue
+
+For the purpose of tracking progress of unbonding validators the validator
+queue is kept.
+
+* ValidatorQueueTime: `0x43 | format(time) -> []sdk.ValAddress`
+
+The stored object by each key is an array of validator operator addresses from
+which the validator object can be accessed. Typically it is expected that only
+a single validator record will be associated with a given timestamp however it is possible
+that multiple validators exist in the queue at the same location.
+
+### HistoricalInfo
+
+HistoricalInfo objects are stored and pruned at each block such that the staking keeper persists
+the `n` most recent historical info defined by staking module parameter: `HistoricalEntries`.
+
+```go expandable
+syntax = "proto3";
+package cosmos.staking.v1beta1;
+
+import "gogoproto/gogo.proto";
+import "google/protobuf/any.proto";
+import "google/protobuf/duration.proto";
+import "google/protobuf/timestamp.proto";
+
+import "cosmos_proto/cosmos.proto";
+import "cosmos/base/v1beta1/coin.proto";
+import "amino/amino.proto";
+import "tendermint/types/types.proto";
+import "tendermint/abci/types.proto";
+
+option go_package = "github.com/cosmos/cosmos-sdk/x/staking/types";
+
+// HistoricalInfo contains header and validator information for a given block.
+// It is stored as part of staking module's state, which persists the `n` most
+// recent HistoricalInfo
+// (`n` is set by the staking module's `historical_entries` parameter).
+message HistoricalInfo {
+ tendermint.types.Header header = 1 [(gogoproto.nullable) = false, (amino.dont_omitempty) = true];
+ repeated Validator valset = 2 [(gogoproto.nullable) = false, (amino.dont_omitempty) = true];
+}
+
+// CommissionRates defines the initial commission rates to be used for creating
+// a validator.
+message CommissionRates {
+ option (gogoproto.equal) = true;
+ option (gogoproto.goproto_stringer) = false;
+
+ // rate is the commission rate charged to delegators, as a fraction.
+ string rate = 1 [
+ (cosmos_proto.scalar) = "cosmos.Dec",
+ (gogoproto.customtype) = "github.com/cosmos/cosmos-sdk/types.Dec",
+ (gogoproto.nullable) = false
+ ];
+ // max_rate defines the maximum commission rate which validator can ever charge, as a fraction.
+ string max_rate = 2 [
+ (cosmos_proto.scalar) = "cosmos.Dec",
+ (gogoproto.customtype) = "github.com/cosmos/cosmos-sdk/types.Dec",
+ (gogoproto.nullable) = false
+ ];
+ // max_change_rate defines the maximum daily increase of the validator commission, as a fraction.
+ string max_change_rate = 3 [
+ (cosmos_proto.scalar) = "cosmos.Dec",
+ (gogoproto.customtype) = "github.com/cosmos/cosmos-sdk/types.Dec",
+ (gogoproto.nullable) = false
+ ];
+}
+
+// Commission defines commission parameters for a given validator.
+message Commission {
+ option (gogoproto.equal) = true;
+ option (gogoproto.goproto_stringer) = false;
+
+ // commission_rates defines the initial commission rates to be used for creating a validator.
+ CommissionRates commission_rates = 1
+ [(gogoproto.embed) = true, (gogoproto.nullable) = false, (amino.dont_omitempty) = true];
+ // update_time is the last time the commission rate was changed.
+ google.protobuf.Timestamp update_time = 2
+ [(gogoproto.nullable) = false, (amino.dont_omitempty) = true, (gogoproto.stdtime) = true];
+}
+
+// Description defines a validator description.
+message Description {
+ option (gogoproto.equal) = true;
+ option (gogoproto.goproto_stringer) = false;
+
+ // moniker defines a human-readable name for the validator.
+ string moniker = 1;
+ // identity defines an optional identity signature (ex. UPort or Keybase).
+ string identity = 2;
+ // website defines an optional website link.
+ string website = 3;
+ // security_contact defines an optional email for security contact.
+ string security_contact = 4;
+ // details define other optional details.
+ string details = 5;
+}
+
+// Validator defines a validator, together with the total amount of the
+// Validator's bond shares and their exchange rate to coins. Slashing results in
+// a decrease in the exchange rate, allowing correct calculation of future
+// undelegations without iterating over delegators. When coins are delegated to
+// this validator, the validator is credited with a delegation whose number of
+// bond shares is based on the amount of coins delegated divided by the current
+// exchange rate. Voting power can be calculated as total bonded shares
+// multiplied by exchange rate.
+message Validator {
+ option (gogoproto.equal) = false;
+ option (gogoproto.goproto_stringer) = false;
+ option (gogoproto.goproto_getters) = false;
+
+ // operator_address defines the address of the validator's operator; bech encoded in JSON.
+ string operator_address = 1 [(cosmos_proto.scalar) = "cosmos.AddressString"];
+ // consensus_pubkey is the consensus public key of the validator, as a Protobuf Any.
+ google.protobuf.Any consensus_pubkey = 2 [(cosmos_proto.accepts_interface) = "cosmos.crypto.PubKey"];
+ // jailed defined whether the validator has been jailed from bonded status or not.
+ bool jailed = 3;
+ // status is the validator status (bonded/unbonding/unbonded).
+ BondStatus status = 4;
+ // tokens define the delegated tokens (incl. self-delegation).
+ string tokens = 5 [
+ (cosmos_proto.scalar) = "cosmos.Int",
+ (gogoproto.customtype) = "github.com/cosmos/cosmos-sdk/types.Int",
+ (gogoproto.nullable) = false
+ ];
+ // delegator_shares defines total shares issued to a validator's delegators.
+ string delegator_shares = 6 [
+ (cosmos_proto.scalar) = "cosmos.Dec",
+ (gogoproto.customtype) = "github.com/cosmos/cosmos-sdk/types.Dec",
+ (gogoproto.nullable) = false
+ ];
+ // description defines the description terms for the validator.
+ Description description = 7 [(gogoproto.nullable) = false, (amino.dont_omitempty) = true];
+ // unbonding_height defines, if unbonding, the height at which this validator has begun unbonding.
+ int64 unbonding_height = 8;
+ // unbonding_time defines, if unbonding, the min time for the validator to complete unbonding.
+ google.protobuf.Timestamp unbonding_time = 9
+ [(gogoproto.nullable) = false, (amino.dont_omitempty) = true, (gogoproto.stdtime) = true];
+ // commission defines the commission parameters.
+ Commission commission = 10 [(gogoproto.nullable) = false, (amino.dont_omitempty) = true];
+ // min_self_delegation is the validator's self declared minimum self delegation.
+ //
+ // Since: cosmos-sdk 0.46
+ string min_self_delegation = 11 [
+ (cosmos_proto.scalar) = "cosmos.Int",
+ (gogoproto.customtype) = "github.com/cosmos/cosmos-sdk/types.Int",
+ (gogoproto.nullable) = false
+ ];
+
+ // strictly positive if this validator's unbonding has been stopped by external modules
+ int64 unbonding_on_hold_ref_count = 12;
+
+ // list of unbonding ids, each uniquely identifing an unbonding of this validator
+ repeated uint64 unbonding_ids = 13;
+}
+
+// BondStatus is the status of a validator.
+enum BondStatus {
+ option (gogoproto.goproto_enum_prefix) = false;
+
+ // UNSPECIFIED defines an invalid validator status.
+ BOND_STATUS_UNSPECIFIED = 0 [(gogoproto.enumvalue_customname) = "Unspecified"];
+ // UNBONDED defines a validator that is not bonded.
+ BOND_STATUS_UNBONDED = 1 [(gogoproto.enumvalue_customname) = "Unbonded"];
+ // UNBONDING defines a validator that is unbonding.
+ BOND_STATUS_UNBONDING = 2 [(gogoproto.enumvalue_customname) = "Unbonding"];
+ // BONDED defines a validator that is bonded.
+ BOND_STATUS_BONDED = 3 [(gogoproto.enumvalue_customname) = "Bonded"];
+}
+
+// ValAddresses defines a repeated set of validator addresses.
+message ValAddresses {
+ option (gogoproto.goproto_stringer) = false;
+ option (gogoproto.stringer) = true;
+
+ repeated string addresses = 1 [(cosmos_proto.scalar) = "cosmos.AddressString"];
+}
+
+// DVPair is struct that just has a delegator-validator pair with no other data.
+// It is intended to be used as a marshalable pointer. For example, a DVPair can
+// be used to construct the key to getting an UnbondingDelegation from state.
+message DVPair {
+ option (gogoproto.equal) = false;
+ option (gogoproto.goproto_getters) = false;
+ option (gogoproto.goproto_stringer) = false;
+
+ string delegator_address = 1 [(cosmos_proto.scalar) = "cosmos.AddressString"];
+ string validator_address = 2 [(cosmos_proto.scalar) = "cosmos.AddressString"];
+}
+
+// DVPairs defines an array of DVPair objects.
+message DVPairs {
+ repeated DVPair pairs = 1 [(gogoproto.nullable) = false, (amino.dont_omitempty) = true];
+}
+
+// DVVTriplet is struct that just has a delegator-validator-validator triplet
+// with no other data. It is intended to be used as a marshalable pointer. For
+// example, a DVVTriplet can be used to construct the key to getting a
+// Redelegation from state.
+message DVVTriplet {
+ option (gogoproto.equal) = false;
+ option (gogoproto.goproto_getters) = false;
+ option (gogoproto.goproto_stringer) = false;
+
+ string delegator_address = 1 [(cosmos_proto.scalar) = "cosmos.AddressString"];
+ string validator_src_address = 2 [(cosmos_proto.scalar) = "cosmos.AddressString"];
+ string validator_dst_address = 3 [(cosmos_proto.scalar) = "cosmos.AddressString"];
+}
+
+// DVVTriplets defines an array of DVVTriplet objects.
+message DVVTriplets {
+ repeated DVVTriplet triplets = 1 [(gogoproto.nullable) = false, (amino.dont_omitempty) = true];
+}
+
+// Delegation represents the bond with tokens held by an account. It is
+// owned by one delegator, and is associated with the voting power of one
+// validator.
+message Delegation {
+ option (gogoproto.equal) = false;
+ option (gogoproto.goproto_getters) = false;
+ option (gogoproto.goproto_stringer) = false;
+
+ // delegator_address is the bech32-encoded address of the delegator.
+ string delegator_address = 1 [(cosmos_proto.scalar) = "cosmos.AddressString"];
+ // validator_address is the bech32-encoded address of the validator.
+ string validator_address = 2 [(cosmos_proto.scalar) = "cosmos.AddressString"];
+ // shares define the delegation shares received.
+ string shares = 3 [
+ (cosmos_proto.scalar) = "cosmos.Dec",
+ (gogoproto.customtype) = "github.com/cosmos/cosmos-sdk/types.Dec",
+ (gogoproto.nullable) = false
+ ];
+}
+
+// UnbondingDelegation stores all of a single delegator's unbonding bonds
+// for a single validator in an time-ordered list.
+message UnbondingDelegation {
+ option (gogoproto.equal) = false;
+ option (gogoproto.goproto_getters) = false;
+ option (gogoproto.goproto_stringer) = false;
+
+ // delegator_address is the bech32-encoded address of the delegator.
+ string delegator_address = 1 [(cosmos_proto.scalar) = "cosmos.AddressString"];
+ // validator_address is the bech32-encoded address of the validator.
+ string validator_address = 2 [(cosmos_proto.scalar) = "cosmos.AddressString"];
+ // entries are the unbonding delegation entries.
+ repeated UnbondingDelegationEntry entries = 3
+ [(gogoproto.nullable) = false, (amino.dont_omitempty) = true]; // unbonding delegation entries
+}
+
+// UnbondingDelegationEntry defines an unbonding object with relevant metadata.
+message UnbondingDelegationEntry {
+ option (gogoproto.equal) = true;
+ option (gogoproto.goproto_stringer) = false;
+
+ // creation_height is the height which the unbonding took place.
+ int64 creation_height = 1;
+ // completion_time is the unix time for unbonding completion.
+ google.protobuf.Timestamp completion_time = 2
+ [(gogoproto.nullable) = false, (amino.dont_omitempty) = true, (gogoproto.stdtime) = true];
+ // initial_balance defines the tokens initially scheduled to receive at completion.
+ string initial_balance = 3 [
+ (cosmos_proto.scalar) = "cosmos.Int",
+ (gogoproto.customtype) = "github.com/cosmos/cosmos-sdk/types.Int",
+ (gogoproto.nullable) = false
+ ];
+ // balance defines the tokens to receive at completion.
+ string balance = 4 [
+ (cosmos_proto.scalar) = "cosmos.Int",
+ (gogoproto.customtype) = "github.com/cosmos/cosmos-sdk/types.Int",
+ (gogoproto.nullable) = false
+ ];
+ // Incrementing id that uniquely identifies this entry
+ uint64 unbonding_id = 5;
+
+ // Strictly positive if this entry's unbonding has been stopped by external modules
+ int64 unbonding_on_hold_ref_count = 6;
+}
+
+// RedelegationEntry defines a redelegation object with relevant metadata.
+message RedelegationEntry {
+ option (gogoproto.equal) = true;
+ option (gogoproto.goproto_stringer) = false;
+
+ // creation_height defines the height which the redelegation took place.
+ int64 creation_height = 1;
+ // completion_time defines the unix time for redelegation completion.
+ google.protobuf.Timestamp completion_time = 2
+ [(gogoproto.nullable) = false, (amino.dont_omitempty) = true, (gogoproto.stdtime) = true];
+ // initial_balance defines the initial balance when redelegation started.
+ string initial_balance = 3 [
+ (cosmos_proto.scalar) = "cosmos.Int",
+ (gogoproto.customtype) = "github.com/cosmos/cosmos-sdk/types.Int",
+ (gogoproto.nullable) = false
+ ];
+ // shares_dst is the amount of destination-validator shares created by redelegation.
+ string shares_dst = 4 [
+ (cosmos_proto.scalar) = "cosmos.Dec",
+ (gogoproto.customtype) = "github.com/cosmos/cosmos-sdk/types.Dec",
+ (gogoproto.nullable) = false
+ ];
+ // Incrementing id that uniquely identifies this entry
+ uint64 unbonding_id = 5;
+
+ // Strictly positive if this entry's unbonding has been stopped by external modules
+ int64 unbonding_on_hold_ref_count = 6;
+}
+
+// Redelegation contains the list of a particular delegator's redelegating bonds
+// from a particular source validator to a particular destination validator.
+message Redelegation {
+ option (gogoproto.equal) = false;
+ option (gogoproto.goproto_getters) = false;
+ option (gogoproto.goproto_stringer) = false;
+
+ // delegator_address is the bech32-encoded address of the delegator.
+ string delegator_address = 1 [(cosmos_proto.scalar) = "cosmos.AddressString"];
+ // validator_src_address is the validator redelegation source operator address.
+ string validator_src_address = 2 [(cosmos_proto.scalar) = "cosmos.AddressString"];
+ // validator_dst_address is the validator redelegation destination operator address.
+ string validator_dst_address = 3 [(cosmos_proto.scalar) = "cosmos.AddressString"];
+ // entries are the redelegation entries.
+ repeated RedelegationEntry entries = 4
+ [(gogoproto.nullable) = false, (amino.dont_omitempty) = true]; // redelegation entries
+}
+
+// Params defines the parameters for the x/staking module.
+message Params {
+ option (amino.name) = "cosmos-sdk/x/staking/Params";
+ option (gogoproto.equal) = true;
+ option (gogoproto.goproto_stringer) = false;
+
+ // unbonding_time is the time duration of unbonding.
+ google.protobuf.Duration unbonding_time = 1
+ [(gogoproto.nullable) = false, (amino.dont_omitempty) = true, (gogoproto.stdduration) = true];
+ // max_validators is the maximum number of validators.
+ uint32 max_validators = 2;
+ // max_entries is the max entries for either unbonding delegation or redelegation (per pair/trio).
+ uint32 max_entries = 3;
+ // historical_entries is the number of historical entries to persist.
+ uint32 historical_entries = 4;
+ // bond_denom defines the bondable coin denomination.
+ string bond_denom = 5;
+ // min_commission_rate is the chain-wide minimum commission rate that a validator can charge their delegators
+ string min_commission_rate = 6 [
+ (gogoproto.moretags) = "yaml:\"min_commission_rate\"",
+ (gogoproto.customtype) = "github.com/cosmos/cosmos-sdk/types.Dec",
+ (gogoproto.nullable) = false
+ ];
+}
+
+// DelegationResponse is equivalent to Delegation except that it contains a
+// balance in addition to shares which is more suitable for client responses.
+message DelegationResponse {
+ option (gogoproto.equal) = false;
+ option (gogoproto.goproto_stringer) = false;
+
+ Delegation delegation = 1 [(gogoproto.nullable) = false, (amino.dont_omitempty) = true];
+
+ cosmos.base.v1beta1.Coin balance = 2 [(gogoproto.nullable) = false, (amino.dont_omitempty) = true];
+}
+
+// RedelegationEntryResponse is equivalent to a RedelegationEntry except that it
+// contains a balance in addition to shares which is more suitable for client
+// responses.
+message RedelegationEntryResponse {
+ option (gogoproto.equal) = true;
+
+ RedelegationEntry redelegation_entry = 1 [(gogoproto.nullable) = false, (amino.dont_omitempty) = true];
+ string balance = 4 [
+ (cosmos_proto.scalar) = "cosmos.Int",
+ (gogoproto.customtype) = "github.com/cosmos/cosmos-sdk/types.Int",
+ (gogoproto.nullable) = false
+ ];
+}
+
+// RedelegationResponse is equivalent to a Redelegation except that its entries
+// contain a balance in addition to shares which is more suitable for client
+// responses.
+message RedelegationResponse {
+ option (gogoproto.equal) = false;
+
+ Redelegation redelegation = 1 [(gogoproto.nullable) = false, (amino.dont_omitempty) = true];
+ repeated RedelegationEntryResponse entries = 2 [(gogoproto.nullable) = false, (amino.dont_omitempty) = true];
+}
+
+// Pool is used for tracking bonded and not-bonded token supply of the bond
+// denomination.
+message Pool {
+ option (gogoproto.description) = true;
+ option (gogoproto.equal) = true;
+ string not_bonded_tokens = 1 [
+ (cosmos_proto.scalar) = "cosmos.Int",
+ (gogoproto.customtype) = "github.com/cosmos/cosmos-sdk/types.Int",
+ (gogoproto.nullable) = false,
+ (gogoproto.jsontag) = "not_bonded_tokens",
+ (amino.dont_omitempty) = true
+ ];
+ string bonded_tokens = 2 [
+ (cosmos_proto.scalar) = "cosmos.Int",
+ (gogoproto.customtype) = "github.com/cosmos/cosmos-sdk/types.Int",
+ (gogoproto.nullable) = false,
+ (gogoproto.jsontag) = "bonded_tokens",
+ (amino.dont_omitempty) = true
+ ];
+}
+
+// Infraction indicates the infraction a validator committed.
+enum Infraction {
+ // UNSPECIFIED defines an empty infraction.
+ INFRACTION_UNSPECIFIED = 0;
+ // DOUBLE_SIGN defines a validator that double-signs a block.
+ INFRACTION_DOUBLE_SIGN = 1;
+ // DOWNTIME defines a validator that missed signing too many blocks.
+ INFRACTION_DOWNTIME = 2;
+}
+
+// ValidatorUpdates defines an array of abci.ValidatorUpdate objects.
+// TODO: explore moving this to proto/cosmos/base to separate modules from tendermint dependence
+message ValidatorUpdates {
+ repeated tendermint.abci.ValidatorUpdate updates = 1 [(gogoproto.nullable) = false, (amino.dont_omitempty) = true];
+}
+```
+
+At each BeginBlock, the staking keeper will persist the current Header and the Validators that committed
+the current block in a `HistoricalInfo` object. The Validators are sorted on their address to ensure that
+they are in a deterministic order.
+The oldest HistoricalEntries will be pruned to ensure that there only exist the parameter-defined number of
+historical entries.
+
+## State Transitions
+
+### Validators
+
+State transitions in validators are performed on every [`EndBlock`](#validator-set-changes)
+in order to check for changes in the active `ValidatorSet`.
+
+A validator can be `Unbonded`, `Unbonding` or `Bonded`. `Unbonded`
+and `Unbonding` are collectively called `Not Bonded`. A validator can move
+directly between all the states, except for from `Bonded` to `Unbonded`.
+
+#### Not bonded to Bonded
+
+The following transition occurs when a validator's ranking in the `ValidatorPowerIndex` surpasses
+that of the `LastValidator`.
+
+* set `validator.Status` to `Bonded`
+* send the `validator.Tokens` from the `NotBondedTokens` to the `BondedPool` `ModuleAccount`
+* delete the existing record from `ValidatorByPowerIndex`
+* add a new updated record to the `ValidatorByPowerIndex`
+* update the `Validator` object for this validator
+* if it exists, delete any `ValidatorQueue` record for this validator
+
+#### Bonded to Unbonding
+
+When a validator begins the unbonding process the following operations occur:
+
+* send the `validator.Tokens` from the `BondedPool` to the `NotBondedTokens` `ModuleAccount`
+* set `validator.Status` to `Unbonding`
+* delete the existing record from `ValidatorByPowerIndex`
+* add a new updated record to the `ValidatorByPowerIndex`
+* update the `Validator` object for this validator
+* insert a new record into the `ValidatorQueue` for this validator
+
+#### Unbonding to Unbonded
+
+A validator moves from unbonding to unbonded when the `ValidatorQueue` object
+moves from bonded to unbonded
+
+* update the `Validator` object for this validator
+* set `validator.Status` to `Unbonded`
+
+#### Jail/Unjail
+
+when a validator is jailed it is effectively removed from the CometBFT set.
+this process may be also be reversed. the following operations occur:
+
+* set `Validator.Jailed` and update object
+* if jailed delete record from `ValidatorByPowerIndex`
+* if unjailed add record to `ValidatorByPowerIndex`
+
+Jailed validators are not present in any of the following stores:
+
+* the power store (from consensus power to address)
+
+### Delegations
+
+#### Delegate
+
+When a delegation occurs both the validator and the delegation objects are affected
+
+* determine the delegators shares based on tokens delegated and the validator's exchange rate
+* remove tokens from the sending account
+* add shares the delegation object or add them to a created validator object
+* add new delegator shares and update the `Validator` object
+* transfer the `delegation.Amount` from the delegator's account to the `BondedPool` or the `NotBondedPool` `ModuleAccount` depending if the `validator.Status` is `Bonded` or not
+* delete the existing record from `ValidatorByPowerIndex`
+* add an new updated record to the `ValidatorByPowerIndex`
+
+#### Begin Unbonding
+
+As a part of the Undelegate and Complete Unbonding state transitions Unbond
+Delegation may be called.
+
+* subtract the unbonded shares from delegator
+* add the unbonded tokens to an `UnbondingDelegationEntry`
+* update the delegation or remove the delegation if there are no more shares
+* if the delegation is the operator of the validator and no more shares exist then trigger a jail validator
+* update the validator with removed the delegator shares and associated coins
+* if the validator state is `Bonded`, transfer the `Coins` worth of the unbonded
+ shares from the `BondedPool` to the `NotBondedPool` `ModuleAccount`
+* remove the validator if it is unbonded and there are no more delegation shares.
+* remove the validator if it is unbonded and there are no more delegation shares
+* get a unique `unbondingId` and map it to the `UnbondingDelegationEntry` in `UnbondingDelegationByUnbondingId`
+* call the `AfterUnbondingInitiated(unbondingId)` hook
+* add the unbonding delegation to `UnbondingDelegationQueue` with the completion time set to `UnbondingTime`
+
+#### Cancel an `UnbondingDelegation` Entry
+
+When a `cancel unbond delegation` occurs both the `validator`, the `delegation` and an `UnbondingDelegationQueue` state will be updated.
+
+* if cancel unbonding delegation amount equals to the `UnbondingDelegation` entry `balance`, then the `UnbondingDelegation` entry deleted from `UnbondingDelegationQueue`.
+* if the `cancel unbonding delegation amount is less than the `UnbondingDelegation`entry balance, then the`UnbondingDelegation`entry will be updated with new balance in the`UnbondingDelegationQueue\`.
+* cancel `amount` is [Delegated](#delegations) back to the original `validator`.
+
+#### Complete Unbonding
+
+For undelegations which do not complete immediately, the following operations
+occur when the unbonding delegation queue element matures:
+
+* remove the entry from the `UnbondingDelegation` object
+* transfer the tokens from the `NotBondedPool` `ModuleAccount` to the delegator `Account`
+
+#### Begin Redelegation
+
+Redelegations affect the delegation, source and destination validators.
+
+* perform an `unbond` delegation from the source validator to retrieve the tokens worth of the unbonded shares
+* using the unbonded tokens, `Delegate` them to the destination validator
+* if the `sourceValidator.Status` is `Bonded`, and the `destinationValidator` is not,
+ transfer the newly delegated tokens from the `BondedPool` to the `NotBondedPool` `ModuleAccount`
+* otherwise, if the `sourceValidator.Status` is not `Bonded`, and the `destinationValidator`
+ is `Bonded`, transfer the newly delegated tokens from the `NotBondedPool` to the `BondedPool` `ModuleAccount`
+* record the token amount in an new entry in the relevant `Redelegation`
+
+From when a redelegation begins until it completes, the delegator is in a state of "pseudo-unbonding", and can still be
+slashed for infractions that occurred before the redelegation began.
+
+#### Complete Redelegation
+
+When a redelegations complete the following occurs:
+
+* remove the entry from the `Redelegation` object
+
+### Slashing
+
+#### Slash Validator
+
+When a Validator is slashed, the following occurs:
+
+* The total `slashAmount` is calculated as the `slashFactor` (a chain parameter) \* `TokensFromConsensusPower`,
+ the total number of tokens bonded to the validator at the time of the infraction.
+* Every unbonding delegation and pseudo-unbonding redelegation such that the infraction occurred before the unbonding or
+ redelegation began from the validator are slashed by the `slashFactor` percentage of the initialBalance.
+* Each amount slashed from redelegations and unbonding delegations is subtracted from the
+ total slash amount.
+* The `remaingSlashAmount` is then slashed from the validator's tokens in the `BondedPool` or
+ `NonBondedPool` depending on the validator's status. This reduces the total supply of tokens.
+
+In the case of a slash due to any infraction that requires evidence to submitted (for example double-sign), the slash
+occurs at the block where the evidence is included, not at the block where the infraction occurred.
+Put otherwise, validators are not slashed retroactively, only when they are caught.
+
+#### Slash Unbonding Delegation
+
+When a validator is slashed, so are those unbonding delegations from the validator that began unbonding
+after the time of the infraction. Every entry in every unbonding delegation from the validator
+is slashed by `slashFactor`. The amount slashed is calculated from the `InitialBalance` of the
+delegation and is capped to prevent a resulting negative balance. Completed (or mature) unbondings are not slashed.
+
+#### Slash Redelegation
+
+When a validator is slashed, so are all redelegations from the validator that began after the
+infraction. Redelegations are slashed by `slashFactor`.
+Redelegations that began before the infraction are not slashed.
+The amount slashed is calculated from the `InitialBalance` of the delegation and is capped to
+prevent a resulting negative balance.
+Mature redelegations (that have completed pseudo-unbonding) are not slashed.
+
+### How Shares are calculated
+
+At any given point in time, each validator has a number of tokens, `T`, and has a number of shares issued, `S`.
+Each delegator, `i`, holds a number of shares, `S_i`.
+The number of tokens is the sum of all tokens delegated to the validator, plus the rewards, minus the slashes.
+
+The delegator is entitled to a portion of the underlying tokens proportional to their proportion of shares.
+So delegator `i` is entitled to `T * S_i / S` of the validator's tokens.
+
+When a delegator delegates new tokens to the validator, they receive a number of shares proportional to their contribution.
+So when delegator `j` delegates `T_j` tokens, they receive `S_j = S * T_j / T` shares.
+The total number of tokens is now `T + T_j`, and the total number of shares is `S + S_j`.
+`j`s proportion of the shares is the same as their proportion of the total tokens contributed: `(S + S_j) / S = (T + T_j) / T`.
+
+A special case is the initial delegation, when `T = 0` and `S = 0`, so `T_j / T` is undefined.
+For the initial delegation, delegator `j` who delegates `T_j` tokens receive `S_j = T_j` shares.
+So a validator that hasn't received any rewards and has not been slashed will have `T = S`.
+
+## Messages
+
+In this section we describe the processing of the staking messages and the corresponding updates to the state. All created/modified state objects specified by each message are defined within the [state](#state) section.
+
+### MsgCreateValidator
+
+A validator is created using the `MsgCreateValidator` message.
+The validator must be created with an initial delegation from the operator.
+
+```protobuf
+// Reference: https://github.com/cosmos/cosmos-sdk/blob/v0.47.0-rc1/proto/cosmos/staking/v1beta1/tx.proto#L20-L21
+```
+
+```protobuf
+// Reference: https://github.com/cosmos/cosmos-sdk/blob/v0.47.0-rc1/proto/cosmos/staking/v1beta1/tx.proto#L50-L73
+```
+
+This message is expected to fail if:
+
+* another validator with this operator address is already registered
+* another validator with this pubkey is already registered
+* the initial self-delegation tokens are of a denom not specified as the bonding denom
+* the commission parameters are faulty, namely:
+ * `MaxRate` is either > 1 or < 0
+ * the initial `Rate` is either negative or > `MaxRate`
+ * the initial `MaxChangeRate` is either negative or > `MaxRate`
+* the description fields are too large
+
+This message creates and stores the `Validator` object at appropriate indexes.
+Additionally a self-delegation is made with the initial tokens delegation
+tokens `Delegation`. The validator always starts as unbonded but may be bonded
+in the first end-block.
+
+### MsgEditValidator
+
+The `Description`, `CommissionRate` of a validator can be updated using the
+`MsgEditValidator` message.
+
+```protobuf
+// Reference: https://github.com/cosmos/cosmos-sdk/blob/v0.47.0-rc1/proto/cosmos/staking/v1beta1/tx.proto#L23-L24
+```
+
+```protobuf
+// Reference: https://github.com/cosmos/cosmos-sdk/blob/v0.47.0-rc1/proto/cosmos/staking/v1beta1/tx.proto#L78-L97
+```
+
+This message is expected to fail if:
+
+* the initial `CommissionRate` is either negative or > `MaxRate`
+* the `CommissionRate` has already been updated within the previous 24 hours
+* the `CommissionRate` is > `MaxChangeRate`
+* the description fields are too large
+
+This message stores the updated `Validator` object.
+
+### MsgDelegate
+
+Within this message the delegator provides coins, and in return receives
+some amount of their validator's (newly created) delegator-shares that are
+assigned to `Delegation.Shares`.
+
+```protobuf
+// Reference: https://github.com/cosmos/cosmos-sdk/blob/v0.47.0-rc1/proto/cosmos/staking/v1beta1/tx.proto#L26-L28
+```
+
+```protobuf
+// Reference: https://github.com/cosmos/cosmos-sdk/blob/v0.47.0-rc1/proto/cosmos/staking/v1beta1/tx.proto#L102-L114
+```
+
+This message is expected to fail if:
+
+* the validator does not exist
+* the `Amount` `Coin` has a denomination different than one defined by `params.BondDenom`
+* the exchange rate is invalid, meaning the validator has no tokens (due to slashing) but there are outstanding shares
+* the amount delegated is less than the minimum allowed delegation
+
+If an existing `Delegation` object for provided addresses does not already
+exist then it is created as part of this message otherwise the existing
+`Delegation` is updated to include the newly received shares.
+
+The delegator receives newly minted shares at the current exchange rate.
+The exchange rate is the number of existing shares in the validator divided by
+the number of currently delegated tokens.
+
+The validator is updated in the `ValidatorByPower` index, and the delegation is
+tracked in validator object in the `Validators` index.
+
+It is possible to delegate to a jailed validator, the only difference being it
+will not be added to the power index until it is unjailed.
+
+
+
+### MsgUndelegate
+
+The `MsgUndelegate` message allows delegators to undelegate their tokens from
+validator.
+
+```protobuf
+// Reference: https://github.com/cosmos/cosmos-sdk/blob/v0.47.0-rc1/proto/cosmos/staking/v1beta1/tx.proto#L34-L36
+```
+
+```protobuf
+// Reference: https://github.com/cosmos/cosmos-sdk/blob/v0.47.0-rc1/proto/cosmos/staking/v1beta1/tx.proto#L140-L152
+```
+
+This message returns a response containing the completion time of the undelegation:
+
+```protobuf
+// Reference: https://github.com/cosmos/cosmos-sdk/blob/v0.47.0-rc1/proto/cosmos/staking/v1beta1/tx.proto#L154-L158
+```
+
+This message is expected to fail if:
+
+* the delegation doesn't exist
+* the validator doesn't exist
+* the delegation has less shares than the ones worth of `Amount`
+* existing `UnbondingDelegation` has maximum entries as defined by `params.MaxEntries`
+* the `Amount` has a denomination different than one defined by `params.BondDenom`
+
+When this message is processed the following actions occur:
+
+* validator's `DelegatorShares` and the delegation's `Shares` are both reduced by the message `SharesAmount`
+* calculate the token worth of the shares remove that amount tokens held within the validator
+* with those removed tokens, if the validator is:
+ * `Bonded` - add them to an entry in `UnbondingDelegation` (create `UnbondingDelegation` if it doesn't exist) with a completion time a full unbonding period from the current time. Update pool shares to reduce BondedTokens and increase NotBondedTokens by token worth of the shares.
+ * `Unbonding` - add them to an entry in `UnbondingDelegation` (create `UnbondingDelegation` if it doesn't exist) with the same completion time as the validator (`UnbondingMinTime`).
+ * `Unbonded` - then send the coins the message `DelegatorAddr`
+* if there are no more `Shares` in the delegation, then the delegation object is removed from the store
+ * under this situation if the delegation is the validator's self-delegation then also jail the validator.
+
+
+
+### MsgCancelUnbondingDelegation
+
+The `MsgCancelUnbondingDelegation` message allows delegators to cancel the `unbondingDelegation` entry and delegate back to a previous validator.
+
+```protobuf
+// Reference: https://github.com/cosmos/cosmos-sdk/blob/v0.47.0-rc1/proto/cosmos/staking/v1beta1/tx.proto#L38-L42
+```
+
+```protobuf
+// Reference: https://github.com/cosmos/cosmos-sdk/blob/v0.47.0-rc1/proto/cosmos/staking/v1beta1/tx.proto#L160-L175
+```
+
+This message is expected to fail if:
+
+* the `unbondingDelegation` entry is already processed.
+* the `cancel unbonding delegation` amount is greater than the `unbondingDelegation` entry balance.
+* the `cancel unbonding delegation` height doesn't exist in the `unbondingDelegationQueue` of the delegator.
+
+When this message is processed the following actions occur:
+
+* if the `unbondingDelegation` Entry balance is zero
+ * in this condition `unbondingDelegation` entry will be removed from `unbondingDelegationQueue`.
+ * otherwise `unbondingDelegationQueue` will be updated with new `unbondingDelegation` entry balance and initial balance
+* the validator's `DelegatorShares` and the delegation's `Shares` are both increased by the message `Amount`.
+
+### MsgBeginRedelegate
+
+The redelegation command allows delegators to instantly switch validators. Once
+the unbonding period has passed, the redelegation is automatically completed in
+the EndBlocker.
+
+```protobuf
+// Reference: https://github.com/cosmos/cosmos-sdk/blob/v0.47.0-rc1/proto/cosmos/staking/v1beta1/tx.proto#L30-L32
+```
+
+```protobuf
+// Reference: https://github.com/cosmos/cosmos-sdk/blob/v0.47.0-rc1/proto/cosmos/staking/v1beta1/tx.proto#L119-L132
+```
+
+This message returns a response containing the completion time of the redelegation:
+
+```protobuf
+// Reference: https://github.com/cosmos/cosmos-sdk/blob/v0.47.0-rc1/proto/cosmos/staking/v1beta1/tx.proto#L133-L138
+```
+
+This message is expected to fail if:
+
+* the delegation doesn't exist
+* the source or destination validators don't exist
+* the delegation has less shares than the ones worth of `Amount`
+* the source validator has a receiving redelegation which is not matured (aka. the redelegation may be transitive)
+* existing `Redelegation` has maximum entries as defined by `params.MaxEntries`
+* the `Amount` `Coin` has a denomination different than one defined by `params.BondDenom`
+
+When this message is processed the following actions occur:
+
+* the source validator's `DelegatorShares` and the delegations `Shares` are both reduced by the message `SharesAmount`
+* calculate the token worth of the shares remove that amount tokens held within the source validator.
+* if the source validator is:
+ * `Bonded` - add an entry to the `Redelegation` (create `Redelegation` if it doesn't exist) with a completion time a full unbonding period from the current time. Update pool shares to reduce BondedTokens and increase NotBondedTokens by token worth of the shares (this may be effectively reversed in the next step however).
+ * `Unbonding` - add an entry to the `Redelegation` (create `Redelegation` if it doesn't exist) with the same completion time as the validator (`UnbondingMinTime`).
+ * `Unbonded` - no action required in this step
+* Delegate the token worth to the destination validator, possibly moving tokens back to the bonded state.
+* if there are no more `Shares` in the source delegation, then the source delegation object is removed from the store
+ * under this situation if the delegation is the validator's self-delegation then also jail the validator.
+
+
+
+### MsgUpdateParams
+
+The `MsgUpdateParams` update the staking module parameters.
+The params are updated through a governance proposal where the signer is the gov module account address.
+
+```protobuf
+// Reference: https://github.com/cosmos/cosmos-sdk/blob/v0.47.0-rc1/proto/cosmos/staking/v1beta1/tx.proto#L182-L195
+```
+
+The message handling can fail if:
+
+* signer is not the authority defined in the staking keeper (usually the gov module account).
+* the bond denom does not exist on-chain or has zero supply.
+
+
+As of v0.54, `MsgUpdateParams` validates that the `bond_denom` parameter references an existing denomination with non-zero supply. This prevents governance proposals from setting an invalid bond denomination that would place the chain in an unsafe state.
+
+
+## Begin-Block
+
+Each abci begin block call, the historical info will get stored and pruned
+according to the `HistoricalEntries` parameter.
+
+### Historical Info Tracking
+
+If the `HistoricalEntries` parameter is 0, then the `BeginBlock` performs a no-op.
+
+Otherwise, the latest historical info is stored under the key `historicalInfoKey|height`, while any entries older than `height - HistoricalEntries` is deleted.
+In most cases, this results in a single entry being pruned per block.
+However, if the parameter `HistoricalEntries` has changed to a lower value there will be multiple entries in the store that must be pruned.
+
+## End-Block
+
+Each abci end block call, the operations to update queues and validator set
+changes are specified to execute.
+
+### Validator Set Changes
+
+The staking validator set is updated during this process by state transitions
+that run at the end of every block. As a part of this process any updated
+validators are also returned back to CometBFT for inclusion in the CometBFT
+validator set which is responsible for validating CometBFT messages at the
+consensus layer. Operations are as following:
+
+* the new validator set is taken as the top `params.MaxValidators` number of
+ validators retrieved from the `ValidatorsByPower` index
+* the previous validator set is compared with the new validator set:
+ * missing validators begin unbonding and their `Tokens` are transferred from the
+ `BondedPool` to the `NotBondedPool` `ModuleAccount`
+ * new validators are instantly bonded and their `Tokens` are transferred from the
+ `NotBondedPool` to the `BondedPool` `ModuleAccount`
+
+In all cases, any validators leaving or entering the bonded validator set or
+changing balances and staying within the bonded validator set incur an update
+message reporting their new consensus power which is passed back to CometBFT.
+
+The `LastTotalPower` and `LastValidatorsPower` hold the state of the total power
+and validator power from the end of the last block, and are used to check for
+changes that have occurred in `ValidatorsByPower` and the total new power, which
+is calculated during `EndBlock`.
+
+### Queues
+
+Within staking, certain state-transitions are not instantaneous but take place
+over a duration of time (typically the unbonding period). When these
+transitions are mature certain operations must take place in order to complete
+the state operation. This is achieved through the use of queues which are
+checked/processed at the end of each block.
+
+#### Unbonding Validators
+
+When a validator is kicked out of the bonded validator set (either through
+being jailed, or not having sufficient bonded tokens) it begins the unbonding
+process along with all its delegations begin unbonding (while still being
+delegated to this validator). At this point the validator is said to be an
+"unbonding validator", whereby it will mature to become an "unbonded validator"
+after the unbonding period has passed.
+
+Each block the validator queue is to be checked for mature unbonding validators
+(namely with a completion time `<=` current time and completion height `<=` current
+block height). At this point any mature validators which do not have any
+delegations remaining are deleted from state. For all other mature unbonding
+validators that still have remaining delegations, the `validator.Status` is
+switched from `types.Unbonding` to
+`types.Unbonded`.
+
+Unbonding operations can be put on hold by external modules via the `PutUnbondingOnHold(unbondingId)` method.
+As a result, an unbonding operation (e.g., an unbonding delegation) that is on hold, cannot complete
+even if it reaches maturity. For an unbonding operation with `unbondingId` to eventually complete
+(after it reaches maturity), every call to `PutUnbondingOnHold(unbondingId)` must be matched
+by a call to `UnbondingCanComplete(unbondingId)`.
+
+#### Unbonding Delegations
+
+Complete the unbonding of all mature `UnbondingDelegations.Entries` within the
+`UnbondingDelegations` queue with the following procedure:
+
+* transfer the balance coins to the delegator's wallet address
+* remove the mature entry from `UnbondingDelegation.Entries`
+* remove the `UnbondingDelegation` object from the store if there are no
+ remaining entries.
+
+#### Redelegations
+
+Complete the unbonding of all mature `Redelegation.Entries` within the
+`Redelegations` queue with the following procedure:
+
+* remove the mature entry from `Redelegation.Entries`
+* remove the `Redelegation` object from the store if there are no
+ remaining entries.
+
+## Hooks
+
+Other modules may register operations to execute when a certain event has
+occurred within staking. These events can be registered to execute either
+right `Before` or `After` the staking event (as per the hook name). The
+following hooks can registered with staking:
+
+* `AfterValidatorCreated(Context, ValAddress) error`
+ * called when a validator is created
+* `BeforeValidatorModified(Context, ValAddress) error`
+ * called when a validator's state is changed
+* `AfterValidatorRemoved(Context, ConsAddress, ValAddress) error`
+ * called when a validator is deleted
+* `AfterValidatorBonded(Context, ConsAddress, ValAddress) error`
+ * called when a validator is bonded
+* `AfterValidatorBeginUnbonding(Context, ConsAddress, ValAddress) error`
+ * called when a validator begins unbonding
+* `BeforeDelegationCreated(Context, AccAddress, ValAddress) error`
+ * called when a delegation is created
+* `BeforeDelegationSharesModified(Context, AccAddress, ValAddress) error`
+ * called when a delegation's shares are modified
+* `AfterDelegationModified(Context, AccAddress, ValAddress) error`
+ * called when a delegation is created or modified
+* `BeforeDelegationRemoved(Context, AccAddress, ValAddress) error`
+ * called when a delegation is removed
+* `AfterUnbondingInitiated(Context, UnbondingID)`
+ * called when an unbonding operation (validator unbonding, unbonding delegation, redelegation) was initiated
+
+## Events
+
+The staking module emits the following events:
+
+### EndBlocker
+
+| Type | Attribute Key | Attribute Value |
+| ---------------------- | ---------------------- | ------------------------- |
+| complete\_unbonding | amount | `{totalUnbondingAmount}` |
+| complete\_unbonding | validator | `{validatorAddress}` |
+| complete\_unbonding | delegator | `{delegatorAddress}` |
+| complete\_redelegation | amount | `{totalRedelegationAmount}` |
+| complete\_redelegation | source\_validator | `{srcValidatorAddress}` |
+| complete\_redelegation | destination\_validator | `{dstValidatorAddress}` |
+| complete\_redelegation | delegator | `{delegatorAddress}` |
+
+## Msg's
+
+### MsgCreateValidator
+
+| Type | Attribute Key | Attribute Value |
+| ----------------- | ------------- | ------------------ |
+| create\_validator | validator | `{validatorAddress}` |
+| create\_validator | amount | `{delegationAmount}` |
+| message | module | staking |
+| message | action | create\_validator |
+| message | sender | `{senderAddress}` |
+
+### MsgEditValidator
+
+| Type | Attribute Key | Attribute Value |
+| --------------- | --------------------- | ------------------- |
+| edit\_validator | commission\_rate | `{commissionRate}` |
+| edit\_validator | min\_self\_delegation | `{minSelfDelegation}` |
+| message | module | staking |
+| message | action | edit\_validator |
+| message | sender | `{senderAddress}` |
+
+### MsgDelegate
+
+| Type | Attribute Key | Attribute Value |
+| -------- | ------------- | ------------------ |
+| delegate | validator | `{validatorAddress}` |
+| delegate | amount | `{delegationAmount}` |
+| message | module | staking |
+| message | action | delegate |
+| message | sender | `{senderAddress}` |
+
+### MsgUndelegate
+
+| Type | Attribute Key | Attribute Value |
+| ------- | --------------------- | ------------------ |
+| unbond | validator | `{validatorAddress}` |
+| unbond | amount | `{unbondAmount}` |
+| unbond | completion\_time \[0] | `{completionTime}` |
+| message | module | staking |
+| message | action | begin\_unbonding |
+| message | sender | `{senderAddress}` |
+
+* \[0] Time is formatted in the RFC3339 standard
+
+### MsgCancelUnbondingDelegation
+
+| Type | Attribute Key | Attribute Value |
+| ----------------------------- | ---------------- | --------------------------------- |
+| cancel\_unbonding\_delegation | validator | `{validatorAddress}` |
+| cancel\_unbonding\_delegation | delegator | `{delegatorAddress}` |
+| cancel\_unbonding\_delegation | amount | `{cancelUnbondingDelegationAmount}` |
+| cancel\_unbonding\_delegation | creation\_height | `{unbondingCreationHeight}` |
+| message | module | staking |
+| message | action | cancel\_unbond |
+| message | sender | `{senderAddress}` |
+
+### MsgBeginRedelegate
+
+| Type | Attribute Key | Attribute Value |
+| ---------- | ---------------------- | --------------------- |
+| redelegate | source\_validator | `{srcValidatorAddress}` |
+| redelegate | destination\_validator | `{dstValidatorAddress}` |
+| redelegate | amount | `{unbondAmount}` |
+| redelegate | completion\_time \[0] | `{completionTime}` |
+| message | module | staking |
+| message | action | begin\_redelegate |
+| message | sender | `{senderAddress}` |
+
+* \[0] Time is formatted in the RFC3339 standard
+
+## Parameters
+
+The staking module contains the following parameters:
+
+| Key | Type | Example |
+| ----------------- | ---------------- | ---------------------- |
+| UnbondingTime | string (time ns) | "259200000000000" |
+| MaxValidators | uint16 | 100 |
+| KeyMaxEntries | uint16 | 7 |
+| HistoricalEntries | uint16 | 3 |
+| BondDenom | string | "stake" |
+| MinCommissionRate | string | "0.000000000000000000" |
+
+## Client
+
+### CLI
+
+A user can query and interact with the `staking` module using the CLI.
+
+#### Query
+
+The `query` commands allows users to query `staking` state.
+
+```bash
+simd query staking --help
+```
+
+##### delegation
+
+The `delegation` command allows users to query delegations for an individual delegator on an individual validator.
+
+Usage:
+
+```bash
+simd query staking delegation [delegator-addr] [validator-addr] [flags]
+```
+
+Example:
+
+```bash
+simd query staking delegation cosmos1gghjut3ccd8ay0zduzj64hwre2fxs9ld75ru9p cosmosvaloper1gghjut3ccd8ay0zduzj64hwre2fxs9ldmqhffj
+```
+
+Example Output:
+
+```bash
+balance:
+ amount: "10000000000"
+ denom: stake
+delegation:
+ delegator_address: cosmos1gghjut3ccd8ay0zduzj64hwre2fxs9ld75ru9p
+ shares: "10000000000.000000000000000000"
+ validator_address: cosmosvaloper1gghjut3ccd8ay0zduzj64hwre2fxs9ldmqhffj
+```
+
+##### delegations
+
+The `delegations` command allows users to query delegations for an individual delegator on all validators.
+
+Usage:
+
+```bash
+simd query staking delegations [delegator-addr] [flags]
+```
+
+Example:
+
+```bash
+simd query staking delegations cosmos1gghjut3ccd8ay0zduzj64hwre2fxs9ld75ru9p
+```
+
+Example Output:
+
+```bash expandable
+delegation_responses:
+- balance:
+ amount: "10000000000"
+ denom: stake
+ delegation:
+ delegator_address: cosmos1gghjut3ccd8ay0zduzj64hwre2fxs9ld75ru9p
+ shares: "10000000000.000000000000000000"
+ validator_address: cosmosvaloper1gghjut3ccd8ay0zduzj64hwre2fxs9ldmqhffj
+- balance:
+ amount: "10000000000"
+ denom: stake
+ delegation:
+ delegator_address: cosmos1gghjut3ccd8ay0zduzj64hwre2fxs9ld75ru9p
+ shares: "10000000000.000000000000000000"
+ validator_address: cosmosvaloper1x20lytyf6zkcrv5edpkfkn8sz578qg5sqfyqnp
+pagination:
+ next_key: null
+ total: "0"
+```
+
+##### delegations-to
+
+The `delegations-to` command allows users to query delegations on an individual validator.
+
+Usage:
+
+```bash
+simd query staking delegations-to [validator-addr] [flags]
+```
+
+Example:
+
+```bash
+simd query staking delegations-to cosmosvaloper1gghjut3ccd8ay0zduzj64hwre2fxs9ldmqhffj
+```
+
+Example Output:
+
+```bash expandable
+- balance:
+ amount: "504000000"
+ denom: stake
+ delegation:
+ delegator_address: cosmos1q2qwwynhv8kh3lu5fkeex4awau9x8fwt45f5cp
+ shares: "504000000.000000000000000000"
+ validator_address: cosmosvaloper1gghjut3ccd8ay0zduzj64hwre2fxs9ldmqhffj
+- balance:
+ amount: "78125000000"
+ denom: uixo
+ delegation:
+ delegator_address: cosmos1qvppl3479hw4clahe0kwdlfvf8uvjtcd99m2ca
+ shares: "78125000000.000000000000000000"
+ validator_address: cosmosvaloper1gghjut3ccd8ay0zduzj64hwre2fxs9ldmqhffj
+pagination:
+ next_key: null
+ total: "0"
+```
+
+##### historical-info
+
+The `historical-info` command allows users to query historical information at given height.
+
+Usage:
+
+```bash
+simd query staking historical-info [height] [flags]
+```
+
+Example:
+
+```bash
+simd query staking historical-info 10
+```
+
+Example Output:
+
+```bash expandable
+header:
+ app_hash: Lbx8cXpI868wz8sgp4qPYVrlaKjevR5WP/IjUxwp3oo=
+ chain_id: testnet
+ consensus_hash: BICRvH3cKD93v7+R1zxE2ljD34qcvIZ0Bdi389qtoi8=
+ data_hash: 47DEQpj8HBSa+/TImW+5JCeuQeRkm5NMpJWZG3hSuFU=
+ evidence_hash: 47DEQpj8HBSa+/TImW+5JCeuQeRkm5NMpJWZG3hSuFU=
+ height: "10"
+ last_block_id:
+ hash: RFbkpu6pWfSThXxKKl6EZVDnBSm16+U0l0xVjTX08Fk=
+ part_set_header:
+ hash: vpIvXD4rxD5GM4MXGz0Sad9I7//iVYLzZsEU4BVgWIU=
+ total: 1
+ last_commit_hash: Ne4uXyx4QtNp4Zx89kf9UK7oG9QVbdB6e7ZwZkhy8K0=
+ last_results_hash: 47DEQpj8HBSa+/TImW+5JCeuQeRkm5NMpJWZG3hSuFU=
+ next_validators_hash: nGBgKeWBjoxeKFti00CxHsnULORgKY4LiuQwBuUrhCs=
+ proposer_address: mMEP2c2IRPLr99LedSRtBg9eONM=
+ time: "2021-10-01T06:00:49.785790894Z"
+ validators_hash: nGBgKeWBjoxeKFti00CxHsnULORgKY4LiuQwBuUrhCs=
+ version:
+ app: "0"
+ block: "11"
+valset:
+- commission:
+ commission_rates:
+ max_change_rate: "0.010000000000000000"
+ max_rate: "0.200000000000000000"
+ rate: "0.100000000000000000"
+ update_time: "2021-10-01T05:52:50.380144238Z"
+ consensus_pubkey:
+ '@type': /cosmos.crypto.ed25519.PubKey
+ key: Auxs3865HpB/EfssYOzfqNhEJjzys2Fo6jD5B8tPgC8=
+ delegator_shares: "10000000.000000000000000000"
+ description:
+ details: ""
+ identity: ""
+ moniker: myvalidator
+ security_contact: ""
+ website: ""
+ jailed: false
+ min_self_delegation: "1"
+ operator_address: cosmosvaloper1rne8lgs98p0jqe82sgt0qr4rdn4hgvmgp9ggcc
+ status: BOND_STATUS_BONDED
+ tokens: "10000000"
+ unbonding_height: "0"
+ unbonding_time: "1970-01-01T00:00:00Z"
+```
+
+##### params
+
+The `params` command allows users to query values set as staking parameters.
+
+Usage:
+
+```bash
+simd query staking params [flags]
+```
+
+Example:
+
+```bash
+simd query staking params
+```
+
+Example Output:
+
+```bash
+bond_denom: stake
+historical_entries: 10000
+max_entries: 7
+max_validators: 50
+unbonding_time: 1814400s
+```
+
+##### pool
+
+The `pool` command allows users to query values for amounts stored in the staking pool.
+
+Usage:
+
+```bash
+simd q staking pool [flags]
+```
+
+Example:
+
+```bash
+simd q staking pool
+```
+
+Example Output:
+
+```bash
+bonded_tokens: "10000000"
+not_bonded_tokens: "0"
+```
+
+##### redelegation
+
+The `redelegation` command allows users to query a redelegation record based on delegator and a source and destination validator address.
+
+Usage:
+
+```bash
+simd query staking redelegation [delegator-addr] [src-validator-addr] [dst-validator-addr] [flags]
+```
+
+Example:
+
+```bash
+simd query staking redelegation cosmos1gghjut3ccd8ay0zduzj64hwre2fxs9ld75ru9p cosmosvaloper1l2rsakp388kuv9k8qzq6lrm9taddae7fpx59wm cosmosvaloper1gghjut3ccd8ay0zduzj64hwre2fxs9ldmqhffj
+```
+
+Example Output:
+
+```bash expandable
+pagination: null
+redelegation_responses:
+- entries:
+ - balance: "50000000"
+ redelegation_entry:
+ completion_time: "2021-10-24T20:33:21.960084845Z"
+ creation_height: 2.382847e+06
+ initial_balance: "50000000"
+ shares_dst: "50000000.000000000000000000"
+ - balance: "5000000000"
+ redelegation_entry:
+ completion_time: "2021-10-25T21:33:54.446846862Z"
+ creation_height: 2.397271e+06
+ initial_balance: "5000000000"
+ shares_dst: "5000000000.000000000000000000"
+ redelegation:
+ delegator_address: cosmos1gghjut3ccd8ay0zduzj64hwre2fxs9ld75ru9p
+ entries: null
+ validator_dst_address: cosmosvaloper1l2rsakp388kuv9k8qzq6lrm9taddae7fpx59wm
+ validator_src_address: cosmosvaloper1l2rsakp388kuv9k8qzq6lrm9taddae7fpx59wm
+```
+
+##### redelegations
+
+The `redelegations` command allows users to query all redelegation records for an individual delegator.
+
+Usage:
+
+```bash
+simd query staking redelegations [delegator-addr] [flags]
+```
+
+Example:
+
+```bash
+simd query staking redelegation cosmos1gghjut3ccd8ay0zduzj64hwre2fxs9ld75ru9p
+```
+
+Example Output:
+
+```bash expandable
+pagination:
+ next_key: null
+ total: "0"
+redelegation_responses:
+- entries:
+ - balance: "50000000"
+ redelegation_entry:
+ completion_time: "2021-10-24T20:33:21.960084845Z"
+ creation_height: 2.382847e+06
+ initial_balance: "50000000"
+ shares_dst: "50000000.000000000000000000"
+ - balance: "5000000000"
+ redelegation_entry:
+ completion_time: "2021-10-25T21:33:54.446846862Z"
+ creation_height: 2.397271e+06
+ initial_balance: "5000000000"
+ shares_dst: "5000000000.000000000000000000"
+ redelegation:
+ delegator_address: cosmos1gghjut3ccd8ay0zduzj64hwre2fxs9ld75ru9p
+ entries: null
+ validator_dst_address: cosmosvaloper1uccl5ugxrm7vqlzwqr04pjd320d2fz0z3hc6vm
+ validator_src_address: cosmosvaloper1zppjyal5emta5cquje8ndkpz0rs046m7zqxrpp
+- entries:
+ - balance: "562770000000"
+ redelegation_entry:
+ completion_time: "2021-10-25T21:42:07.336911677Z"
+ creation_height: 2.39735e+06
+ initial_balance: "562770000000"
+ shares_dst: "562770000000.000000000000000000"
+ redelegation:
+ delegator_address: cosmos1gghjut3ccd8ay0zduzj64hwre2fxs9ld75ru9p
+ entries: null
+ validator_dst_address: cosmosvaloper1uccl5ugxrm7vqlzwqr04pjd320d2fz0z3hc6vm
+ validator_src_address: cosmosvaloper1zppjyal5emta5cquje8ndkpz0rs046m7zqxrpp
+```
+
+##### redelegations-from
+
+The `redelegations-from` command allows users to query delegations that are redelegating *from* a validator.
+
+Usage:
+
+```bash
+simd query staking redelegations-from [validator-addr] [flags]
+```
+
+Example:
+
+```bash
+simd query staking redelegations-from cosmosvaloper1y4rzzrgl66eyhzt6gse2k7ej3zgwmngeleucjy
+```
+
+Example Output:
+
+```bash expandable
+pagination:
+ next_key: null
+ total: "0"
+redelegation_responses:
+- entries:
+ - balance: "50000000"
+ redelegation_entry:
+ completion_time: "2021-10-24T20:33:21.960084845Z"
+ creation_height: 2.382847e+06
+ initial_balance: "50000000"
+ shares_dst: "50000000.000000000000000000"
+ - balance: "5000000000"
+ redelegation_entry:
+ completion_time: "2021-10-25T21:33:54.446846862Z"
+ creation_height: 2.397271e+06
+ initial_balance: "5000000000"
+ shares_dst: "5000000000.000000000000000000"
+ redelegation:
+ delegator_address: cosmos1pm6e78p4pgn0da365plzl4t56pxy8hwtqp2mph
+ entries: null
+ validator_dst_address: cosmosvaloper1uccl5ugxrm7vqlzwqr04pjd320d2fz0z3hc6vm
+ validator_src_address: cosmosvaloper1y4rzzrgl66eyhzt6gse2k7ej3zgwmngeleucjy
+- entries:
+ - balance: "221000000"
+ redelegation_entry:
+ completion_time: "2021-10-05T21:05:45.669420544Z"
+ creation_height: 2.120693e+06
+ initial_balance: "221000000"
+ shares_dst: "221000000.000000000000000000"
+ redelegation:
+ delegator_address: cosmos1zqv8qxy2zgn4c58fz8jt8jmhs3d0attcussrf6
+ entries: null
+ validator_dst_address: cosmosvaloper10mseqwnwtjaqfrwwp2nyrruwmjp6u5jhah4c3y
+ validator_src_address: cosmosvaloper1y4rzzrgl66eyhzt6gse2k7ej3zgwmngeleucjy
+```
+
+##### unbonding-delegation
+
+The `unbonding-delegation` command allows users to query unbonding delegations for an individual delegator on an individual validator.
+
+Usage:
+
+```bash
+simd query staking unbonding-delegation [delegator-addr] [validator-addr] [flags]
+```
+
+Example:
+
+```bash
+simd query staking unbonding-delegation cosmos1gghjut3ccd8ay0zduzj64hwre2fxs9ld75ru9p cosmosvaloper1gghjut3ccd8ay0zduzj64hwre2fxs9ldmqhffj
+```
+
+Example Output:
+
+```bash
+delegator_address: cosmos1gghjut3ccd8ay0zduzj64hwre2fxs9ld75ru9p
+entries:
+- balance: "52000000"
+ completion_time: "2021-11-02T11:35:55.391594709Z"
+ creation_height: "55078"
+ initial_balance: "52000000"
+validator_address: cosmosvaloper1gghjut3ccd8ay0zduzj64hwre2fxs9ldmqhffj
+```
+
+##### unbonding-delegations
+
+The `unbonding-delegations` command allows users to query all unbonding-delegations records for one delegator.
+
+Usage:
+
+```bash
+simd query staking unbonding-delegations [delegator-addr] [flags]
+```
+
+Example:
+
+```bash
+simd query staking unbonding-delegations cosmos1gghjut3ccd8ay0zduzj64hwre2fxs9ld75ru9p
+```
+
+Example Output:
+
+```bash expandable
+pagination:
+ next_key: null
+ total: "0"
+unbonding_responses:
+- delegator_address: cosmos1gghjut3ccd8ay0zduzj64hwre2fxs9ld75ru9p
+ entries:
+ - balance: "52000000"
+ completion_time: "2021-11-02T11:35:55.391594709Z"
+ creation_height: "55078"
+ initial_balance: "52000000"
+ validator_address: cosmosvaloper1t8ehvswxjfn3ejzkjtntcyrqwvmvuknzmvtaaa
+
+```
+
+##### unbonding-delegations-from
+
+The `unbonding-delegations-from` command allows users to query delegations that are unbonding *from* a validator.
+
+Usage:
+
+```bash
+simd query staking unbonding-delegations-from [validator-addr] [flags]
+```
+
+Example:
+
+```bash
+simd query staking unbonding-delegations-from cosmosvaloper1gghjut3ccd8ay0zduzj64hwre2fxs9ldmqhffj
+```
+
+Example Output:
+
+```bash expandable
+pagination:
+ next_key: null
+ total: "0"
+unbonding_responses:
+- delegator_address: cosmos1qqq9txnw4c77sdvzx0tkedsafl5s3vk7hn53fn
+ entries:
+ - balance: "150000000"
+ completion_time: "2021-11-01T21:41:13.098141574Z"
+ creation_height: "46823"
+ initial_balance: "150000000"
+ validator_address: cosmosvaloper1gghjut3ccd8ay0zduzj64hwre2fxs9ldmqhffj
+- delegator_address: cosmos1peteje73eklqau66mr7h7rmewmt2vt99y24f5z
+ entries:
+ - balance: "24000000"
+ completion_time: "2021-10-31T02:57:18.192280361Z"
+ creation_height: "21516"
+ initial_balance: "24000000"
+ validator_address: cosmosvaloper1gghjut3ccd8ay0zduzj64hwre2fxs9ldmqhffj
+```
+
+##### validator
+
+The `validator` command allows users to query details about an individual validator.
+
+Usage:
+
+```bash
+simd query staking validator [validator-addr] [flags]
+```
+
+Example:
+
+```bash
+simd query staking validator cosmosvaloper1gghjut3ccd8ay0zduzj64hwre2fxs9ldmqhffj
+```
+
+Example Output:
+
+```bash expandable
+commission:
+ commission_rates:
+ max_change_rate: "0.020000000000000000"
+ max_rate: "0.200000000000000000"
+ rate: "0.050000000000000000"
+ update_time: "2021-10-01T19:24:52.663191049Z"
+consensus_pubkey:
+ '@type': /cosmos.crypto.ed25519.PubKey
+ key: sIiexdJdYWn27+7iUHQJDnkp63gq/rzUq1Y+fxoGjXc=
+delegator_shares: "32948270000.000000000000000000"
+description:
+ details: Witval is the validator arm from Vitwit. Vitwit is into software consulting
+ and services business since 2015. We are working closely with Cosmos ecosystem
+ since 2018. We are also building tools for the ecosystem, Aneka is our explorer
+ for the cosmos ecosystem.
+ identity: 51468B615127273A
+ moniker: Witval
+ security_contact: ""
+ website: ""
+jailed: false
+min_self_delegation: "1"
+operator_address: cosmosvaloper1gghjut3ccd8ay0zduzj64hwre2fxs9ldmqhffj
+status: BOND_STATUS_BONDED
+tokens: "32948270000"
+unbonding_height: "0"
+unbonding_time: "1970-01-01T00:00:00Z"
+```
+
+##### validators
+
+The `validators` command allows users to query details about all validators on a network.
+
+Usage:
+
+```bash
+simd query staking validators [flags]
+```
+
+Example:
+
+```bash
+simd query staking validators
+```
+
+Example Output:
+
+```bash expandable
+pagination:
+ next_key: FPTi7TKAjN63QqZh+BaXn6gBmD5/
+ total: "0"
+validators:
+commission:
+ commission_rates:
+ max_change_rate: "0.020000000000000000"
+ max_rate: "0.200000000000000000"
+ rate: "0.050000000000000000"
+ update_time: "2021-10-01T19:24:52.663191049Z"
+consensus_pubkey:
+ '@type': /cosmos.crypto.ed25519.PubKey
+ key: sIiexdJdYWn27+7iUHQJDnkp63gq/rzUq1Y+fxoGjXc=
+delegator_shares: "32948270000.000000000000000000"
+description:
+ details: Witval is the validator arm from Vitwit. Vitwit is into software consulting
+ and services business since 2015. We are working closely with Cosmos ecosystem
+ since 2018. We are also building tools for the ecosystem, Aneka is our explorer
+ for the cosmos ecosystem.
+ identity: 51468B615127273A
+ moniker: Witval
+ security_contact: ""
+ website: ""
+ jailed: false
+ min_self_delegation: "1"
+ operator_address: cosmosvaloper1gghjut3ccd8ay0zduzj64hwre2fxs9ldmqhffj
+ status: BOND_STATUS_BONDED
+ tokens: "32948270000"
+ unbonding_height: "0"
+ unbonding_time: "1970-01-01T00:00:00Z"
+- commission:
+ commission_rates:
+ max_change_rate: "0.100000000000000000"
+ max_rate: "0.200000000000000000"
+ rate: "0.050000000000000000"
+ update_time: "2021-10-04T18:02:21.446645619Z"
+ consensus_pubkey:
+ '@type': /cosmos.crypto.ed25519.PubKey
+ key: GDNpuKDmCg9GnhnsiU4fCWktuGUemjNfvpCZiqoRIYA=
+ delegator_shares: "559343421.000000000000000000"
+ description:
+ details: Noderunners is a professional validator in POS networks. We have a huge
+ node running experience, reliable soft and hardware. Our commissions are always
+ low, our support to delegators is always full. Stake with us and start receiving
+ your Cosmos rewards now!
+ identity: 812E82D12FEA3493
+ moniker: Noderunners
+ security_contact: info@noderunners.biz
+ website: http://noderunners.biz
+ jailed: false
+ min_self_delegation: "1"
+ operator_address: cosmosvaloper1q5ku90atkhktze83j9xjaks2p7uruag5zp6wt7
+ status: BOND_STATUS_BONDED
+ tokens: "559343421"
+ unbonding_height: "0"
+ unbonding_time: "1970-01-01T00:00:00Z"
+```
+
+#### Transactions
+
+The `tx` commands allows users to interact with the `staking` module.
+
+```bash
+simd tx staking --help
+```
+
+##### create-validator
+
+The command `create-validator` allows users to create new validator initialized with a self-delegation to it.
+
+Usage:
+
+```bash
+simd tx staking create-validator [path/to/validator.json] [flags]
+```
+
+Example:
+
+```bash
+simd tx staking create-validator /path/to/validator.json \
+ --chain-id="name_of_chain_id" \
+ --gas="auto" \
+ --gas-adjustment="1.2" \
+ --gas-prices="0.025stake" \
+ --from=mykey
+```
+
+where `validator.json` contains:
+
+```json expandable
+{
+ "pubkey": {
+ "@type": "/cosmos.crypto.ed25519.PubKey",
+ "key": "BnbwFpeONLqvWqJb3qaUbL5aoIcW3fSuAp9nT3z5f20="
+ },
+ "amount": "1000000stake",
+ "moniker": "my-moniker",
+ "website": "https://myweb.site",
+ "security": "security-contact@gmail.com",
+ "details": "description of your validator",
+ "commission-rate": "0.10",
+ "commission-max-rate": "0.20",
+ "commission-max-change-rate": "0.01",
+ "min-self-delegation": "1"
+}
+```
+
+and pubkey can be obtained by using `simd tendermint show-validator` command.
+
+##### delegate
+
+The command `delegate` allows users to delegate liquid tokens to a validator.
+
+Usage:
+
+```bash
+simd tx staking delegate [validator-addr] [amount] [flags]
+```
+
+Example:
+
+```bash
+simd tx staking delegate cosmosvaloper1l2rsakp388kuv9k8qzq6lrm9taddae7fpx59wm 1000stake --from mykey
+```
+
+##### edit-validator
+
+The command `edit-validator` allows users to edit an existing validator account.
+
+Usage:
+
+```bash
+simd tx staking edit-validator [flags]
+```
+
+Example:
+
+```bash
+simd tx staking edit-validator --moniker "new_moniker_name" --website "new_webiste_url" --from mykey
+```
+
+##### redelegate
+
+The command `redelegate` allows users to redelegate illiquid tokens from one validator to another.
+
+Usage:
+
+```bash
+simd tx staking redelegate [src-validator-addr] [dst-validator-addr] [amount] [flags]
+```
+
+Example:
+
+```bash
+simd tx staking redelegate cosmosvaloper1gghjut3ccd8ay0zduzj64hwre2fxs9ldmqhffj cosmosvaloper1l2rsakp388kuv9k8qzq6lrm9taddae7fpx59wm 100stake --from mykey
+```
+
+##### unbond
+
+The command `unbond` allows users to unbond shares from a validator.
+
+Usage:
+
+```bash
+simd tx staking unbond [validator-addr] [amount] [flags]
+```
+
+Example:
+
+```bash
+simd tx staking unbond cosmosvaloper1gghjut3ccd8ay0zduzj64hwre2fxs9ldmqhffj 100stake --from mykey
+```
+
+##### cancel unbond
+
+The command `cancel-unbond` allow users to cancel the unbonding delegation entry and delegate back to the original validator.
+
+Usage:
+
+```bash
+simd tx staking cancel-unbond [validator-addr] [amount] [creation-height]
+```
+
+Example:
+
+```bash
+simd tx staking cancel-unbond cosmosvaloper1gghjut3ccd8ay0zduzj64hwre2fxs9ldmqhffj 100stake 123123 --from mykey
+```
+
+### gRPC
+
+A user can query the `staking` module using gRPC endpoints.
+
+#### Validators
+
+The `Validators` endpoint queries all validators that match the given status.
+
+```bash
+cosmos.staking.v1beta1.Query/Validators
+```
+
+Example:
+
+```bash
+grpcurl -plaintext localhost:9090 cosmos.staking.v1beta1.Query/Validators
+```
+
+Example Output:
+
+```bash expandable
+{
+ "validators": [
+ {
+ "operatorAddress": "cosmosvaloper1rne8lgs98p0jqe82sgt0qr4rdn4hgvmgp9ggcc",
+ "consensusPubkey": {"@type":"/cosmos.crypto.ed25519.PubKey","key":"Auxs3865HpB/EfssYOzfqNhEJjzys2Fo6jD5B8tPgC8="},
+ "status": "BOND_STATUS_BONDED",
+ "tokens": "10000000",
+ "delegatorShares": "10000000000000000000000000",
+ "description": {
+ "moniker": "myvalidator"
+ },
+ "unbondingTime": "1970-01-01T00:00:00Z",
+ "commission": {
+ "commissionRates": {
+ "rate": "100000000000000000",
+ "maxRate": "200000000000000000",
+ "maxChangeRate": "10000000000000000"
+ },
+ "updateTime": "2021-10-01T05:52:50.380144238Z"
+ },
+ "minSelfDelegation": "1"
+ }
+ ],
+ "pagination": {
+ "total": "1"
+ }
+}
+```
+
+#### Validator
+
+The `Validator` endpoint queries validator information for given validator address.
+
+```bash
+cosmos.staking.v1beta1.Query/Validator
+```
+
+Example:
+
+```bash
+grpcurl -plaintext -d '{"validator_addr":"cosmosvaloper1rne8lgs98p0jqe82sgt0qr4rdn4hgvmgp9ggcc"}' \
+localhost:9090 cosmos.staking.v1beta1.Query/Validator
+```
+
+Example Output:
+
+```bash expandable
+{
+ "validator": {
+ "operatorAddress": "cosmosvaloper1rne8lgs98p0jqe82sgt0qr4rdn4hgvmgp9ggcc",
+ "consensusPubkey": {"@type":"/cosmos.crypto.ed25519.PubKey","key":"Auxs3865HpB/EfssYOzfqNhEJjzys2Fo6jD5B8tPgC8="},
+ "status": "BOND_STATUS_BONDED",
+ "tokens": "10000000",
+ "delegatorShares": "10000000000000000000000000",
+ "description": {
+ "moniker": "myvalidator"
+ },
+ "unbondingTime": "1970-01-01T00:00:00Z",
+ "commission": {
+ "commissionRates": {
+ "rate": "100000000000000000",
+ "maxRate": "200000000000000000",
+ "maxChangeRate": "10000000000000000"
+ },
+ "updateTime": "2021-10-01T05:52:50.380144238Z"
+ },
+ "minSelfDelegation": "1"
+ }
+}
+```
+
+#### ValidatorDelegations
+
+The `ValidatorDelegations` endpoint queries delegate information for given validator.
+
+```bash
+cosmos.staking.v1beta1.Query/ValidatorDelegations
+```
+
+Example:
+
+```bash
+grpcurl -plaintext -d '{"validator_addr":"cosmosvaloper1rne8lgs98p0jqe82sgt0qr4rdn4hgvmgp9ggcc"}' \
+localhost:9090 cosmos.staking.v1beta1.Query/ValidatorDelegations
+```
+
+Example Output:
+
+```bash expandable
+{
+ "delegationResponses": [
+ {
+ "delegation": {
+ "delegatorAddress": "cosmos1rne8lgs98p0jqe82sgt0qr4rdn4hgvmgy3ua5t",
+ "validatorAddress": "cosmosvaloper1rne8lgs98p0jqe82sgt0qr4rdn4hgvmgp9ggcc",
+ "shares": "10000000000000000000000000"
+ },
+ "balance": {
+ "denom": "stake",
+ "amount": "10000000"
+ }
+ }
+ ],
+ "pagination": {
+ "total": "1"
+ }
+}
+```
+
+#### ValidatorUnbondingDelegations
+
+The `ValidatorUnbondingDelegations` endpoint queries delegate information for given validator.
+
+```bash
+cosmos.staking.v1beta1.Query/ValidatorUnbondingDelegations
+```
+
+Example:
+
+```bash
+grpcurl -plaintext -d '{"validator_addr":"cosmosvaloper1rne8lgs98p0jqe82sgt0qr4rdn4hgvmgp9ggcc"}' \
+localhost:9090 cosmos.staking.v1beta1.Query/ValidatorUnbondingDelegations
+```
+
+Example Output:
+
+```bash expandable
+{
+ "unbonding_responses": [
+ {
+ "delegator_address": "cosmos1z3pzzw84d6xn00pw9dy3yapqypfde7vg6965fy",
+ "validator_address": "cosmosvaloper1rne8lgs98p0jqe82sgt0qr4rdn4hgvmgp9ggcc",
+ "entries": [
+ {
+ "creation_height": "25325",
+ "completion_time": "2021-10-31T09:24:36.797320636Z",
+ "initial_balance": "20000000",
+ "balance": "20000000"
+ }
+ ]
+ },
+ {
+ "delegator_address": "cosmos1y8nyfvmqh50p6ldpzljk3yrglppdv3t8phju77",
+ "validator_address": "cosmosvaloper1rne8lgs98p0jqe82sgt0qr4rdn4hgvmgp9ggcc",
+ "entries": [
+ {
+ "creation_height": "13100",
+ "completion_time": "2021-10-30T12:53:02.272266791Z",
+ "initial_balance": "1000000",
+ "balance": "1000000"
+ }
+ ]
+ },
+ ],
+ "pagination": {
+ "next_key": null,
+ "total": "8"
+ }
+}
+```
+
+#### Delegation
+
+The `Delegation` endpoint queries delegate information for given validator delegator pair.
+
+```bash
+cosmos.staking.v1beta1.Query/Delegation
+```
+
+Example:
+
+```bash
+grpcurl -plaintext \
+-d '{"delegator_addr": "cosmos1y8nyfvmqh50p6ldpzljk3yrglppdv3t8phju77", validator_addr":"cosmosvaloper1rne8lgs98p0jqe82sgt0qr4rdn4hgvmgp9ggcc"}' \
+localhost:9090 cosmos.staking.v1beta1.Query/Delegation
+```
+
+Example Output:
+
+```bash expandable
+{
+ "delegation_response":
+ {
+ "delegation":
+ {
+ "delegator_address":"cosmos1y8nyfvmqh50p6ldpzljk3yrglppdv3t8phju77",
+ "validator_address":"cosmosvaloper1rne8lgs98p0jqe82sgt0qr4rdn4hgvmgp9ggcc",
+ "shares":"25083119936.000000000000000000"
+ },
+ "balance":
+ {
+ "denom":"stake",
+ "amount":"25083119936"
+ }
+ }
+}
+```
+
+#### UnbondingDelegation
+
+The `UnbondingDelegation` endpoint queries unbonding information for given validator delegator.
+
+```bash
+cosmos.staking.v1beta1.Query/UnbondingDelegation
+```
+
+Example:
+
+```bash
+grpcurl -plaintext \
+-d '{"delegator_addr": "cosmos1y8nyfvmqh50p6ldpzljk3yrglppdv3t8phju77", validator_addr":"cosmosvaloper1rne8lgs98p0jqe82sgt0qr4rdn4hgvmgp9ggcc"}' \
+localhost:9090 cosmos.staking.v1beta1.Query/UnbondingDelegation
+```
+
+Example Output:
+
+```bash expandable
+{
+ "unbond": {
+ "delegator_address": "cosmos1y8nyfvmqh50p6ldpzljk3yrglppdv3t8phju77",
+ "validator_address": "cosmosvaloper1rne8lgs98p0jqe82sgt0qr4rdn4hgvmgp9ggcc",
+ "entries": [
+ {
+ "creation_height": "136984",
+ "completion_time": "2021-11-08T05:38:47.505593891Z",
+ "initial_balance": "400000000",
+ "balance": "400000000"
+ },
+ {
+ "creation_height": "137005",
+ "completion_time": "2021-11-08T05:40:53.526196312Z",
+ "initial_balance": "385000000",
+ "balance": "385000000"
+ }
+ ]
+ }
+}
+```
+
+#### DelegatorDelegations
+
+The `DelegatorDelegations` endpoint queries all delegations of a given delegator address.
+
+```bash
+cosmos.staking.v1beta1.Query/DelegatorDelegations
+```
+
+Example:
+
+```bash
+grpcurl -plaintext \
+-d '{"delegator_addr": "cosmos1y8nyfvmqh50p6ldpzljk3yrglppdv3t8phju77"}' \
+localhost:9090 cosmos.staking.v1beta1.Query/DelegatorDelegations
+```
+
+Example Output:
+
+```bash
+{
+ "delegation_responses": [
+ {"delegation":{"delegator_address":"cosmos1y8nyfvmqh50p6ldpzljk3yrglppdv3t8phju77","validator_address":"cosmosvaloper1eh5mwu044gd5ntkkc2xgfg8247mgc56fww3vc8","shares":"25083339023.000000000000000000"},"balance":{"denom":"stake","amount":"25083339023"}}
+ ],
+ "pagination": {
+ "next_key": null,
+ "total": "1"
+ }
+}
+```
+
+#### DelegatorUnbondingDelegations
+
+The `DelegatorUnbondingDelegations` endpoint queries all unbonding delegations of a given delegator address.
+
+```bash
+cosmos.staking.v1beta1.Query/DelegatorUnbondingDelegations
+```
+
+Example:
+
+```bash
+grpcurl -plaintext \
+-d '{"delegator_addr": "cosmos1y8nyfvmqh50p6ldpzljk3yrglppdv3t8phju77"}' \
+localhost:9090 cosmos.staking.v1beta1.Query/DelegatorUnbondingDelegations
+```
+
+Example Output:
+
+```bash expandable
+{
+ "unbonding_responses": [
+ {
+ "delegator_address": "cosmos1y8nyfvmqh50p6ldpzljk3yrglppdv3t8phju77",
+ "validator_address": "cosmosvaloper1sjllsnramtg3ewxqwwrwjxfgc4n4ef9uxyejze",
+ "entries": [
+ {
+ "creation_height": "136984",
+ "completion_time": "2021-11-08T05:38:47.505593891Z",
+ "initial_balance": "400000000",
+ "balance": "400000000"
+ },
+ {
+ "creation_height": "137005",
+ "completion_time": "2021-11-08T05:40:53.526196312Z",
+ "initial_balance": "385000000",
+ "balance": "385000000"
+ }
+ ]
+ }
+ ],
+ "pagination": {
+ "next_key": null,
+ "total": "1"
+ }
+}
+```
+
+#### Redelegations
+
+The `Redelegations` endpoint queries redelegations of given address.
+
+```bash
+cosmos.staking.v1beta1.Query/Redelegations
+```
+
+Example:
+
+```bash
+grpcurl -plaintext \
+-d '{"delegator_addr": "cosmos1ld5p7hn43yuh8ht28gm9pfjgj2fctujp2tgwvf", "src_validator_addr" : "cosmosvaloper1j7euyj85fv2jugejrktj540emh9353ltgppc3g", "dst_validator_addr" : "cosmosvaloper1yy3tnegzmkdcm7czzcy3flw5z0zyr9vkkxrfse"}' \
+localhost:9090 cosmos.staking.v1beta1.Query/Redelegations
+```
+
+Example Output:
+
+```bash expandable
+{
+ "redelegation_responses": [
+ {
+ "redelegation": {
+ "delegator_address": "cosmos1ld5p7hn43yuh8ht28gm9pfjgj2fctujp2tgwvf",
+ "validator_src_address": "cosmosvaloper1j7euyj85fv2jugejrktj540emh9353ltgppc3g",
+ "validator_dst_address": "cosmosvaloper1yy3tnegzmkdcm7czzcy3flw5z0zyr9vkkxrfse",
+ "entries": null
+ },
+ "entries": [
+ {
+ "redelegation_entry": {
+ "creation_height": 135932,
+ "completion_time": "2021-11-08T03:52:55.299147901Z",
+ "initial_balance": "2900000",
+ "shares_dst": "2900000.000000000000000000"
+ },
+ "balance": "2900000"
+ }
+ ]
+ }
+ ],
+ "pagination": null
+}
+```
+
+#### DelegatorValidators
+
+The `DelegatorValidators` endpoint queries all validators information for given delegator.
+
+```bash
+cosmos.staking.v1beta1.Query/DelegatorValidators
+```
+
+Example:
+
+```bash
+grpcurl -plaintext \
+-d '{"delegator_addr": "cosmos1ld5p7hn43yuh8ht28gm9pfjgj2fctujp2tgwvf"}' \
+localhost:9090 cosmos.staking.v1beta1.Query/DelegatorValidators
+```
+
+Example Output:
+
+```bash expandable
+{
+ "validators": [
+ {
+ "operator_address": "cosmosvaloper1eh5mwu044gd5ntkkc2xgfg8247mgc56fww3vc8",
+ "consensus_pubkey": {
+ "@type": "/cosmos.crypto.ed25519.PubKey",
+ "key": "UPwHWxH1zHJWGOa/m6JB3f5YjHMvPQPkVbDqqi+U7Uw="
+ },
+ "jailed": false,
+ "status": "BOND_STATUS_BONDED",
+ "tokens": "347260647559",
+ "delegator_shares": "347260647559.000000000000000000",
+ "description": {
+ "moniker": "BouBouNode",
+ "identity": "",
+ "website": "https://boubounode.com",
+ "security_contact": "",
+ "details": "AI-based Validator. #1 AI Validator on Game of Stakes. Fairly priced. Don't trust (humans), verify. Made with BouBou love."
+ },
+ "unbonding_height": "0",
+ "unbonding_time": "1970-01-01T00:00:00Z",
+ "commission": {
+ "commission_rates": {
+ "rate": "0.061000000000000000",
+ "max_rate": "0.300000000000000000",
+ "max_change_rate": "0.150000000000000000"
+ },
+ "update_time": "2021-10-01T15:00:00Z"
+ },
+ "min_self_delegation": "1"
+ }
+ ],
+ "pagination": {
+ "next_key": null,
+ "total": "1"
+ }
+}
+```
+
+#### DelegatorValidator
+
+The `DelegatorValidator` endpoint queries validator information for given delegator validator
+
+```bash
+cosmos.staking.v1beta1.Query/DelegatorValidator
+```
+
+Example:
+
+```bash
+grpcurl -plaintext \
+-d '{"delegator_addr": "cosmos1eh5mwu044gd5ntkkc2xgfg8247mgc56f3n8rr7", "validator_addr": "cosmosvaloper1eh5mwu044gd5ntkkc2xgfg8247mgc56fww3vc8"}' \
+localhost:9090 cosmos.staking.v1beta1.Query/DelegatorValidator
+```
+
+Example Output:
+
+```bash expandable
+{
+ "validator": {
+ "operator_address": "cosmosvaloper1eh5mwu044gd5ntkkc2xgfg8247mgc56fww3vc8",
+ "consensus_pubkey": {
+ "@type": "/cosmos.crypto.ed25519.PubKey",
+ "key": "UPwHWxH1zHJWGOa/m6JB3f5YjHMvPQPkVbDqqi+U7Uw="
+ },
+ "jailed": false,
+ "status": "BOND_STATUS_BONDED",
+ "tokens": "347262754841",
+ "delegator_shares": "347262754841.000000000000000000",
+ "description": {
+ "moniker": "BouBouNode",
+ "identity": "",
+ "website": "https://boubounode.com",
+ "security_contact": "",
+ "details": "AI-based Validator. #1 AI Validator on Game of Stakes. Fairly priced. Don't trust (humans), verify. Made with BouBou love."
+ },
+ "unbonding_height": "0",
+ "unbonding_time": "1970-01-01T00:00:00Z",
+ "commission": {
+ "commission_rates": {
+ "rate": "0.061000000000000000",
+ "max_rate": "0.300000000000000000",
+ "max_change_rate": "0.150000000000000000"
+ },
+ "update_time": "2021-10-01T15:00:00Z"
+ },
+ "min_self_delegation": "1"
+ }
+}
+```
+
+#### HistoricalInfo
+
+```bash
+cosmos.staking.v1beta1.Query/HistoricalInfo
+```
+
+Example:
+
+```bash
+grpcurl -plaintext -d '{"height" : 1}' localhost:9090 cosmos.staking.v1beta1.Query/HistoricalInfo
+```
+
+Example Output:
+
+```bash expandable
+{
+ "hist": {
+ "header": {
+ "version": {
+ "block": "11",
+ "app": "0"
+ },
+ "chain_id": "simd-1",
+ "height": "140142",
+ "time": "2021-10-11T10:56:29.720079569Z",
+ "last_block_id": {
+ "hash": "9gri/4LLJUBFqioQ3NzZIP9/7YHR9QqaM6B2aJNQA7o=",
+ "part_set_header": {
+ "total": 1,
+ "hash": "Hk1+C864uQkl9+I6Zn7IurBZBKUevqlVtU7VqaZl1tc="
+ }
+ },
+ "last_commit_hash": "VxrcS27GtvGruS3I9+AlpT7udxIT1F0OrRklrVFSSKc=",
+ "data_hash": "80BjOrqNYUOkTnmgWyz9AQ8n7SoEmPVi4QmAe8RbQBY=",
+ "validators_hash": "95W49n2hw8RWpr1GPTAO5MSPi6w6Wjr3JjjS7AjpBho=",
+ "next_validators_hash": "95W49n2hw8RWpr1GPTAO5MSPi6w6Wjr3JjjS7AjpBho=",
+ "consensus_hash": "BICRvH3cKD93v7+R1zxE2ljD34qcvIZ0Bdi389qtoi8=",
+ "app_hash": "ZZaxnSY3E6Ex5Bvkm+RigYCK82g8SSUL53NymPITeOE=",
+ "last_results_hash": "47DEQpj8HBSa+/TImW+5JCeuQeRkm5NMpJWZG3hSuFU=",
+ "evidence_hash": "47DEQpj8HBSa+/TImW+5JCeuQeRkm5NMpJWZG3hSuFU=",
+ "proposer_address": "aH6dO428B+ItuoqPq70efFHrSMY="
+ },
+ "valset": [
+ {
+ "operator_address": "cosmosvaloper196ax4vc0lwpxndu9dyhvca7jhxp70rmcqcnylw",
+ "consensus_pubkey": {
+ "@type": "/cosmos.crypto.ed25519.PubKey",
+ "key": "/O7BtNW0pafwfvomgR4ZnfldwPXiFfJs9mHg3gwfv5Q="
+ },
+ "jailed": false,
+ "status": "BOND_STATUS_BONDED",
+ "tokens": "1426045203613",
+ "delegator_shares": "1426045203613.000000000000000000",
+ "description": {
+ "moniker": "SG-1",
+ "identity": "48608633F99D1B60",
+ "website": "https://sg-1.online",
+ "security_contact": "",
+ "details": "SG-1 - your favorite validator on Witval. We offer 100% Soft Slash protection."
+ },
+ "unbonding_height": "0",
+ "unbonding_time": "1970-01-01T00:00:00Z",
+ "commission": {
+ "commission_rates": {
+ "rate": "0.037500000000000000",
+ "max_rate": "0.200000000000000000",
+ "max_change_rate": "0.030000000000000000"
+ },
+ "update_time": "2021-10-01T15:00:00Z"
+ },
+ "min_self_delegation": "1"
+ }
+ ]
+ }
+}
+
+```
+
+#### Pool
+
+The `Pool` endpoint queries the pool information.
+
+```bash
+cosmos.staking.v1beta1.Query/Pool
+```
+
+Example:
+
+```bash
+grpcurl -plaintext -d localhost:9090 cosmos.staking.v1beta1.Query/Pool
+```
+
+Example Output:
+
+```bash
+{
+ "pool": {
+ "not_bonded_tokens": "369054400189",
+ "bonded_tokens": "15657192425623"
+ }
+}
+```
+
+#### Params
+
+The `Params` endpoint queries the pool information.
+
+```bash
+cosmos.staking.v1beta1.Query/Params
+```
+
+Example:
+
+```bash
+grpcurl -plaintext localhost:9090 cosmos.staking.v1beta1.Query/Params
+```
+
+Example Output:
+
+```bash
+{
+ "params": {
+ "unbondingTime": "1814400s",
+ "maxValidators": 100,
+ "maxEntries": 7,
+ "historicalEntries": 10000,
+ "bondDenom": "stake"
+ }
+}
+```
+
+### REST
+
+A user can query the `staking` module using REST endpoints.
+
+#### DelegatorDelegations
+
+The `DelegtaorDelegations` REST endpoint queries all delegations of a given delegator address.
+
+```bash
+/cosmos/staking/v1beta1/delegations/{delegatorAddr}
+```
+
+Example:
+
+```bash
+curl -X GET "http://localhost:1317/cosmos/staking/v1beta1/delegations/cosmos1vcs68xf2tnqes5tg0khr0vyevm40ff6zdxatp5" -H "accept: application/json"
+```
+
+Example Output:
+
+```bash expandable
+{
+ "delegation_responses": [
+ {
+ "delegation": {
+ "delegator_address": "cosmos1vcs68xf2tnqes5tg0khr0vyevm40ff6zdxatp5",
+ "validator_address": "cosmosvaloper1quqxfrxkycr0uzt4yk0d57tcq3zk7srm7sm6r8",
+ "shares": "256250000.000000000000000000"
+ },
+ "balance": {
+ "denom": "stake",
+ "amount": "256250000"
+ }
+ },
+ {
+ "delegation": {
+ "delegator_address": "cosmos1vcs68xf2tnqes5tg0khr0vyevm40ff6zdxatp5",
+ "validator_address": "cosmosvaloper194v8uwee2fvs2s8fa5k7j03ktwc87h5ym39jfv",
+ "shares": "255150000.000000000000000000"
+ },
+ "balance": {
+ "denom": "stake",
+ "amount": "255150000"
+ }
+ }
+ ],
+ "pagination": {
+ "next_key": null,
+ "total": "2"
+ }
+}
+```
+
+#### Redelegations
+
+The `Redelegations` REST endpoint queries redelegations of given address.
+
+```bash
+/cosmos/staking/v1beta1/delegators/{delegatorAddr}/redelegations
+```
+
+Example:
+
+```bash
+curl -X GET \
+"http://localhost:1317/cosmos/staking/v1beta1/delegators/cosmos1thfntksw0d35n2tkr0k8v54fr8wxtxwxl2c56e/redelegations?srcValidatorAddr=cosmosvaloper1lzhlnpahvznwfv4jmay2tgaha5kmz5qx4cuznf&dstValidatorAddr=cosmosvaloper1vq8tw77kp8lvxq9u3c8eeln9zymn68rng8pgt4" \
+-H "accept: application/json"
+```
+
+Example Output:
+
+```bash expandable
+{
+ "redelegation_responses": [
+ {
+ "redelegation": {
+ "delegator_address": "cosmos1thfntksw0d35n2tkr0k8v54fr8wxtxwxl2c56e",
+ "validator_src_address": "cosmosvaloper1lzhlnpahvznwfv4jmay2tgaha5kmz5qx4cuznf",
+ "validator_dst_address": "cosmosvaloper1vq8tw77kp8lvxq9u3c8eeln9zymn68rng8pgt4",
+ "entries": null
+ },
+ "entries": [
+ {
+ "redelegation_entry": {
+ "creation_height": 151523,
+ "completion_time": "2021-11-09T06:03:25.640682116Z",
+ "initial_balance": "200000000",
+ "shares_dst": "200000000.000000000000000000"
+ },
+ "balance": "200000000"
+ }
+ ]
+ }
+ ],
+ "pagination": null
+}
+```
+
+#### DelegatorUnbondingDelegations
+
+The `DelegatorUnbondingDelegations` REST endpoint queries all unbonding delegations of a given delegator address.
+
+```bash
+/cosmos/staking/v1beta1/delegators/{delegatorAddr}/unbonding_delegations
+```
+
+Example:
+
+```bash
+curl -X GET \
+"http://localhost:1317/cosmos/staking/v1beta1/delegators/cosmos1nxv42u3lv642q0fuzu2qmrku27zgut3n3z7lll/unbonding_delegations" \
+-H "accept: application/json"
+```
+
+Example Output:
+
+```bash expandable
+{
+ "unbonding_responses": [
+ {
+ "delegator_address": "cosmos1nxv42u3lv642q0fuzu2qmrku27zgut3n3z7lll",
+ "validator_address": "cosmosvaloper1e7mvqlz50ch6gw4yjfemsc069wfre4qwmw53kq",
+ "entries": [
+ {
+ "creation_height": "2442278",
+ "completion_time": "2021-10-12T10:59:03.797335857Z",
+ "initial_balance": "50000000000",
+ "balance": "50000000000"
+ }
+ ]
+ }
+ ],
+ "pagination": {
+ "next_key": null,
+ "total": "1"
+ }
+}
+```
+
+#### DelegatorValidators
+
+The `DelegatorValidators` REST endpoint queries all validators information for given delegator address.
+
+```bash
+/cosmos/staking/v1beta1/delegators/{delegatorAddr}/validators
+```
+
+Example:
+
+```bash
+curl -X GET \
+"http://localhost:1317/cosmos/staking/v1beta1/delegators/cosmos1xwazl8ftks4gn00y5x3c47auquc62ssune9ppv/validators" \
+-H "accept: application/json"
+```
+
+Example Output:
+
+```bash expandable
+{
+ "validators": [
+ {
+ "operator_address": "cosmosvaloper1xwazl8ftks4gn00y5x3c47auquc62ssuvynw64",
+ "consensus_pubkey": {
+ "@type": "/cosmos.crypto.ed25519.PubKey",
+ "key": "5v4n3px3PkfNnKflSgepDnsMQR1hiNXnqOC11Y72/PQ="
+ },
+ "jailed": false,
+ "status": "BOND_STATUS_BONDED",
+ "tokens": "21592843799",
+ "delegator_shares": "21592843799.000000000000000000",
+ "description": {
+ "moniker": "jabbey",
+ "identity": "",
+ "website": "https://twitter.com/JoeAbbey",
+ "security_contact": "",
+ "details": "just another dad in the cosmos"
+ },
+ "unbonding_height": "0",
+ "unbonding_time": "1970-01-01T00:00:00Z",
+ "commission": {
+ "commission_rates": {
+ "rate": "0.100000000000000000",
+ "max_rate": "0.200000000000000000",
+ "max_change_rate": "0.100000000000000000"
+ },
+ "update_time": "2021-10-09T19:03:54.984821705Z"
+ },
+ "min_self_delegation": "1"
+ }
+ ],
+ "pagination": {
+ "next_key": null,
+ "total": "1"
+ }
+}
+```
+
+#### DelegatorValidator
+
+The `DelegatorValidator` REST endpoint queries validator information for given delegator validator pair.
+
+```bash
+/cosmos/staking/v1beta1/delegators/{delegatorAddr}/validators/{validatorAddr}
+```
+
+Example:
+
+```bash
+curl -X GET \
+"http://localhost:1317/cosmos/staking/v1beta1/delegators/cosmos1xwazl8ftks4gn00y5x3c47auquc62ssune9ppv/validators/cosmosvaloper1xwazl8ftks4gn00y5x3c47auquc62ssuvynw64" \
+-H "accept: application/json"
+```
+
+Example Output:
+
+```bash expandable
+{
+ "validator": {
+ "operator_address": "cosmosvaloper1xwazl8ftks4gn00y5x3c47auquc62ssuvynw64",
+ "consensus_pubkey": {
+ "@type": "/cosmos.crypto.ed25519.PubKey",
+ "key": "5v4n3px3PkfNnKflSgepDnsMQR1hiNXnqOC11Y72/PQ="
+ },
+ "jailed": false,
+ "status": "BOND_STATUS_BONDED",
+ "tokens": "21592843799",
+ "delegator_shares": "21592843799.000000000000000000",
+ "description": {
+ "moniker": "jabbey",
+ "identity": "",
+ "website": "https://twitter.com/JoeAbbey",
+ "security_contact": "",
+ "details": "just another dad in the cosmos"
+ },
+ "unbonding_height": "0",
+ "unbonding_time": "1970-01-01T00:00:00Z",
+ "commission": {
+ "commission_rates": {
+ "rate": "0.100000000000000000",
+ "max_rate": "0.200000000000000000",
+ "max_change_rate": "0.100000000000000000"
+ },
+ "update_time": "2021-10-09T19:03:54.984821705Z"
+ },
+ "min_self_delegation": "1"
+ }
+}
+```
+
+#### HistoricalInfo
+
+The `HistoricalInfo` REST endpoint queries the historical information for given height.
+
+```bash
+/cosmos/staking/v1beta1/historical_info/{height}
+```
+
+Example:
+
+```bash
+curl -X GET "http://localhost:1317/cosmos/staking/v1beta1/historical_info/153332" -H "accept: application/json"
+```
+
+Example Output:
+
+```bash expandable
+{
+ "hist": {
+ "header": {
+ "version": {
+ "block": "11",
+ "app": "0"
+ },
+ "chain_id": "cosmos-1",
+ "height": "153332",
+ "time": "2021-10-12T09:05:35.062230221Z",
+ "last_block_id": {
+ "hash": "NX8HevR5khb7H6NGKva+jVz7cyf0skF1CrcY9A0s+d8=",
+ "part_set_header": {
+ "total": 1,
+ "hash": "zLQ2FiKM5tooL3BInt+VVfgzjlBXfq0Hc8Iux/xrhdg="
+ }
+ },
+ "last_commit_hash": "P6IJrK8vSqU3dGEyRHnAFocoDGja0bn9euLuy09s350=",
+ "data_hash": "eUd+6acHWrNXYju8Js449RJ99lOYOs16KpqQl4SMrEM=",
+ "validators_hash": "mB4pravvMsJKgi+g8aYdSeNlt0kPjnRFyvtAQtaxcfw=",
+ "next_validators_hash": "mB4pravvMsJKgi+g8aYdSeNlt0kPjnRFyvtAQtaxcfw=",
+ "consensus_hash": "BICRvH3cKD93v7+R1zxE2ljD34qcvIZ0Bdi389qtoi8=",
+ "app_hash": "fuELArKRK+CptnZ8tu54h6xEleSWenHNmqC84W866fU=",
+ "last_results_hash": "p/BPexV4LxAzlVcPRvW+lomgXb6Yze8YLIQUo/4Kdgc=",
+ "evidence_hash": "47DEQpj8HBSa+/TImW+5JCeuQeRkm5NMpJWZG3hSuFU=",
+ "proposer_address": "G0MeY8xQx7ooOsni8KE/3R/Ib3Q="
+ },
+ "valset": [
+ {
+ "operator_address": "cosmosvaloper196ax4vc0lwpxndu9dyhvca7jhxp70rmcqcnylw",
+ "consensus_pubkey": {
+ "@type": "/cosmos.crypto.ed25519.PubKey",
+ "key": "/O7BtNW0pafwfvomgR4ZnfldwPXiFfJs9mHg3gwfv5Q="
+ },
+ "jailed": false,
+ "status": "BOND_STATUS_BONDED",
+ "tokens": "1416521659632",
+ "delegator_shares": "1416521659632.000000000000000000",
+ "description": {
+ "moniker": "SG-1",
+ "identity": "48608633F99D1B60",
+ "website": "https://sg-1.online",
+ "security_contact": "",
+ "details": "SG-1 - your favorite validator on cosmos. We offer 100% Soft Slash protection."
+ },
+ "unbonding_height": "0",
+ "unbonding_time": "1970-01-01T00:00:00Z",
+ "commission": {
+ "commission_rates": {
+ "rate": "0.037500000000000000",
+ "max_rate": "0.200000000000000000",
+ "max_change_rate": "0.030000000000000000"
+ },
+ "update_time": "2021-10-01T15:00:00Z"
+ },
+ "min_self_delegation": "1"
+ },
+ {
+ "operator_address": "cosmosvaloper1t8ehvswxjfn3ejzkjtntcyrqwvmvuknzmvtaaa",
+ "consensus_pubkey": {
+ "@type": "/cosmos.crypto.ed25519.PubKey",
+ "key": "uExZyjNLtr2+FFIhNDAMcQ8+yTrqE7ygYTsI7khkA5Y="
+ },
+ "jailed": false,
+ "status": "BOND_STATUS_BONDED",
+ "tokens": "1348298958808",
+ "delegator_shares": "1348298958808.000000000000000000",
+ "description": {
+ "moniker": "Cosmostation",
+ "identity": "AE4C403A6E7AA1AC",
+ "website": "https://www.cosmostation.io",
+ "security_contact": "admin@stamper.network",
+ "details": "Cosmostation validator node. Delegate your tokens and Start Earning Staking Rewards"
+ },
+ "unbonding_height": "0",
+ "unbonding_time": "1970-01-01T00:00:00Z",
+ "commission": {
+ "commission_rates": {
+ "rate": "0.050000000000000000",
+ "max_rate": "1.000000000000000000",
+ "max_change_rate": "0.200000000000000000"
+ },
+ "update_time": "2021-10-01T15:06:38.821314287Z"
+ },
+ "min_self_delegation": "1"
+ }
+ ]
+ }
+}
+```
+
+#### Parameters
+
+The `Parameters` REST endpoint queries the staking parameters.
+
+```bash
+/cosmos/staking/v1beta1/params
+```
+
+Example:
+
+```bash
+curl -X GET "http://localhost:1317/cosmos/staking/v1beta1/params" -H "accept: application/json"
+```
+
+Example Output:
+
+```bash
+{
+ "params": {
+ "unbonding_time": "2419200s",
+ "max_validators": 100,
+ "max_entries": 7,
+ "historical_entries": 10000,
+ "bond_denom": "stake"
+ }
+}
+```
+
+#### Pool
+
+The `Pool` REST endpoint queries the pool information.
+
+```bash
+/cosmos/staking/v1beta1/pool
+```
+
+Example:
+
+```bash
+curl -X GET "http://localhost:1317/cosmos/staking/v1beta1/pool" -H "accept: application/json"
+```
+
+Example Output:
+
+```bash
+{
+ "pool": {
+ "not_bonded_tokens": "432805737458",
+ "bonded_tokens": "15783637712645"
+ }
+}
+```
+
+#### Validators
+
+The `Validators` REST endpoint queries all validators that match the given status.
+
+```bash
+/cosmos/staking/v1beta1/validators
+```
+
+Example:
+
+```bash
+curl -X GET "http://localhost:1317/cosmos/staking/v1beta1/validators" -H "accept: application/json"
+```
+
+Example Output:
+
+```bash expandable
+{
+ "validators": [
+ {
+ "operator_address": "cosmosvaloper1q3jsx9dpfhtyqqgetwpe5tmk8f0ms5qywje8tw",
+ "consensus_pubkey": {
+ "@type": "/cosmos.crypto.ed25519.PubKey",
+ "key": "N7BPyek2aKuNZ0N/8YsrqSDhGZmgVaYUBuddY8pwKaE="
+ },
+ "jailed": false,
+ "status": "BOND_STATUS_BONDED",
+ "tokens": "383301887799",
+ "delegator_shares": "383301887799.000000000000000000",
+ "description": {
+ "moniker": "SmartNodes",
+ "identity": "D372724899D1EDC8",
+ "website": "https://smartnodes.co",
+ "security_contact": "",
+ "details": "Earn Rewards with Crypto Staking & Node Deployment"
+ },
+ "unbonding_height": "0",
+ "unbonding_time": "1970-01-01T00:00:00Z",
+ "commission": {
+ "commission_rates": {
+ "rate": "0.050000000000000000",
+ "max_rate": "0.200000000000000000",
+ "max_change_rate": "0.100000000000000000"
+ },
+ "update_time": "2021-10-01T15:51:31.596618510Z"
+ },
+ "min_self_delegation": "1"
+ },
+ {
+ "operator_address": "cosmosvaloper1q5ku90atkhktze83j9xjaks2p7uruag5zp6wt7",
+ "consensus_pubkey": {
+ "@type": "/cosmos.crypto.ed25519.PubKey",
+ "key": "GDNpuKDmCg9GnhnsiU4fCWktuGUemjNfvpCZiqoRIYA="
+ },
+ "jailed": false,
+ "status": "BOND_STATUS_UNBONDING",
+ "tokens": "1017819654",
+ "delegator_shares": "1017819654.000000000000000000",
+ "description": {
+ "moniker": "Noderunners",
+ "identity": "812E82D12FEA3493",
+ "website": "http://noderunners.biz",
+ "security_contact": "info@noderunners.biz",
+ "details": "Noderunners is a professional validator in POS networks. We have a huge node running experience, reliable soft and hardware. Our commissions are always low, our support to delegators is always full. Stake with us and start receiving your cosmos rewards now!"
+ },
+ "unbonding_height": "147302",
+ "unbonding_time": "2021-11-08T22:58:53.718662452Z",
+ "commission": {
+ "commission_rates": {
+ "rate": "0.050000000000000000",
+ "max_rate": "0.200000000000000000",
+ "max_change_rate": "0.100000000000000000"
+ },
+ "update_time": "2021-10-04T18:02:21.446645619Z"
+ },
+ "min_self_delegation": "1"
+ }
+ ],
+ "pagination": {
+ "next_key": "FONDBFkE4tEEf7yxWWKOD49jC2NK",
+ "total": "2"
+ }
+}
+```
+
+#### Validator
+
+The `Validator` REST endpoint queries validator information for given validator address.
+
+```bash
+/cosmos/staking/v1beta1/validators/{validatorAddr}
+```
+
+Example:
+
+```bash
+curl -X GET \
+"http://localhost:1317/cosmos/staking/v1beta1/validators/cosmosvaloper16msryt3fqlxtvsy8u5ay7wv2p8mglfg9g70e3q" \
+-H "accept: application/json"
+```
+
+Example Output:
+
+```bash expandable
+{
+ "validator": {
+ "operator_address": "cosmosvaloper16msryt3fqlxtvsy8u5ay7wv2p8mglfg9g70e3q",
+ "consensus_pubkey": {
+ "@type": "/cosmos.crypto.ed25519.PubKey",
+ "key": "sIiexdJdYWn27+7iUHQJDnkp63gq/rzUq1Y+fxoGjXc="
+ },
+ "jailed": false,
+ "status": "BOND_STATUS_BONDED",
+ "tokens": "33027900000",
+ "delegator_shares": "33027900000.000000000000000000",
+ "description": {
+ "moniker": "Witval",
+ "identity": "51468B615127273A",
+ "website": "",
+ "security_contact": "",
+ "details": "Witval is the validator arm from Vitwit. Vitwit is into software consulting and services business since 2015. We are working closely with Cosmos ecosystem since 2018. We are also building tools for the ecosystem, Aneka is our explorer for the cosmos ecosystem."
+ },
+ "unbonding_height": "0",
+ "unbonding_time": "1970-01-01T00:00:00Z",
+ "commission": {
+ "commission_rates": {
+ "rate": "0.050000000000000000",
+ "max_rate": "0.200000000000000000",
+ "max_change_rate": "0.020000000000000000"
+ },
+ "update_time": "2021-10-01T19:24:52.663191049Z"
+ },
+ "min_self_delegation": "1"
+ }
+}
+```
+
+#### ValidatorDelegations
+
+The `ValidatorDelegations` REST endpoint queries delegate information for given validator.
+
+```bash
+/cosmos/staking/v1beta1/validators/{validatorAddr}/delegations
+```
+
+Example:
+
+```bash
+curl -X GET "http://localhost:1317/cosmos/staking/v1beta1/validators/cosmosvaloper16msryt3fqlxtvsy8u5ay7wv2p8mglfg9g70e3q/delegations" -H "accept: application/json"
+```
+
+Example Output:
+
+```bash expandable
+{
+ "delegation_responses": [
+ {
+ "delegation": {
+ "delegator_address": "cosmos190g5j8aszqhvtg7cprmev8xcxs6csra7xnk3n3",
+ "validator_address": "cosmosvaloper16msryt3fqlxtvsy8u5ay7wv2p8mglfg9g70e3q",
+ "shares": "31000000000.000000000000000000"
+ },
+ "balance": {
+ "denom": "stake",
+ "amount": "31000000000"
+ }
+ },
+ {
+ "delegation": {
+ "delegator_address": "cosmos1ddle9tczl87gsvmeva3c48nenyng4n56qwq4ee",
+ "validator_address": "cosmosvaloper16msryt3fqlxtvsy8u5ay7wv2p8mglfg9g70e3q",
+ "shares": "628470000.000000000000000000"
+ },
+ "balance": {
+ "denom": "stake",
+ "amount": "628470000"
+ }
+ },
+ {
+ "delegation": {
+ "delegator_address": "cosmos10fdvkczl76m040smd33lh9xn9j0cf26kk4s2nw",
+ "validator_address": "cosmosvaloper16msryt3fqlxtvsy8u5ay7wv2p8mglfg9g70e3q",
+ "shares": "838120000.000000000000000000"
+ },
+ "balance": {
+ "denom": "stake",
+ "amount": "838120000"
+ }
+ },
+ {
+ "delegation": {
+ "delegator_address": "cosmos1n8f5fknsv2yt7a8u6nrx30zqy7lu9jfm0t5lq8",
+ "validator_address": "cosmosvaloper16msryt3fqlxtvsy8u5ay7wv2p8mglfg9g70e3q",
+ "shares": "500000000.000000000000000000"
+ },
+ "balance": {
+ "denom": "stake",
+ "amount": "500000000"
+ }
+ },
+ {
+ "delegation": {
+ "delegator_address": "cosmos16msryt3fqlxtvsy8u5ay7wv2p8mglfg9hrek2e",
+ "validator_address": "cosmosvaloper16msryt3fqlxtvsy8u5ay7wv2p8mglfg9g70e3q",
+ "shares": "61310000.000000000000000000"
+ },
+ "balance": {
+ "denom": "stake",
+ "amount": "61310000"
+ }
+ }
+ ],
+ "pagination": {
+ "next_key": null,
+ "total": "5"
+ }
+}
+```
+
+#### Delegation
+
+The `Delegation` REST endpoint queries delegate information for given validator delegator pair.
+
+```bash
+/cosmos/staking/v1beta1/validators/{validatorAddr}/delegations/{delegatorAddr}
+```
+
+Example:
+
+```bash
+curl -X GET \
+"http://localhost:1317/cosmos/staking/v1beta1/validators/cosmosvaloper16msryt3fqlxtvsy8u5ay7wv2p8mglfg9g70e3q/delegations/cosmos1n8f5fknsv2yt7a8u6nrx30zqy7lu9jfm0t5lq8" \
+-H "accept: application/json"
+```
+
+Example Output:
+
+```bash expandable
+{
+ "delegation_response": {
+ "delegation": {
+ "delegator_address": "cosmos1n8f5fknsv2yt7a8u6nrx30zqy7lu9jfm0t5lq8",
+ "validator_address": "cosmosvaloper16msryt3fqlxtvsy8u5ay7wv2p8mglfg9g70e3q",
+ "shares": "500000000.000000000000000000"
+ },
+ "balance": {
+ "denom": "stake",
+ "amount": "500000000"
+ }
+ }
+}
+```
+
+#### UnbondingDelegation
+
+The `UnbondingDelegation` REST endpoint queries unbonding information for given validator delegator pair.
+
+```bash
+/cosmos/staking/v1beta1/validators/{validatorAddr}/delegations/{delegatorAddr}/unbonding_delegation
+```
+
+Example:
+
+```bash
+curl -X GET \
+"http://localhost:1317/cosmos/staking/v1beta1/validators/cosmosvaloper13v4spsah85ps4vtrw07vzea37gq5la5gktlkeu/delegations/cosmos1ze2ye5u5k3qdlexvt2e0nn0508p04094ya0qpm/unbonding_delegation" \
+-H "accept: application/json"
+```
+
+Example Output:
+
+```bash expandable
+{
+ "unbond": {
+ "delegator_address": "cosmos1ze2ye5u5k3qdlexvt2e0nn0508p04094ya0qpm",
+ "validator_address": "cosmosvaloper13v4spsah85ps4vtrw07vzea37gq5la5gktlkeu",
+ "entries": [
+ {
+ "creation_height": "153687",
+ "completion_time": "2021-11-09T09:41:18.352401903Z",
+ "initial_balance": "525111",
+ "balance": "525111"
+ }
+ ]
+ }
+}
+```
+
+#### ValidatorUnbondingDelegations
+
+The `ValidatorUnbondingDelegations` REST endpoint queries unbonding delegations of a validator.
+
+```bash
+/cosmos/staking/v1beta1/validators/{validatorAddr}/unbonding_delegations
+```
+
+Example:
+
+```bash
+curl -X GET \
+"http://localhost:1317/cosmos/staking/v1beta1/validators/cosmosvaloper13v4spsah85ps4vtrw07vzea37gq5la5gktlkeu/unbonding_delegations" \
+-H "accept: application/json"
+```
+
+Example Output:
+
+```bash expandable
+{
+ "unbonding_responses": [
+ {
+ "delegator_address": "cosmos1q9snn84jfrd9ge8t46kdcggpe58dua82vnj7uy",
+ "validator_address": "cosmosvaloper13v4spsah85ps4vtrw07vzea37gq5la5gktlkeu",
+ "entries": [
+ {
+ "creation_height": "90998",
+ "completion_time": "2021-11-05T00:14:37.005841058Z",
+ "initial_balance": "24000000",
+ "balance": "24000000"
+ }
+ ]
+ },
+ {
+ "delegator_address": "cosmos1qf36e6wmq9h4twhdvs6pyq9qcaeu7ye0s3dqq2",
+ "validator_address": "cosmosvaloper13v4spsah85ps4vtrw07vzea37gq5la5gktlkeu",
+ "entries": [
+ {
+ "creation_height": "47478",
+ "completion_time": "2021-11-01T22:47:26.714116854Z",
+ "initial_balance": "8000000",
+ "balance": "8000000"
+ }
+ ]
+ }
+ ],
+ "pagination": {
+ "next_key": null,
+ "total": "2"
+ }
+}
+```
diff --git a/sdk/next/build/modules/upgrade/README.mdx b/sdk/next/build/modules/upgrade/README.mdx
new file mode 100644
index 000000000..33c1aa718
--- /dev/null
+++ b/sdk/next/build/modules/upgrade/README.mdx
@@ -0,0 +1,609 @@
+---
+title: 'x/upgrade'
+---
+
+## Abstract
+
+`x/upgrade` is an implementation of a Cosmos SDK module that facilitates smoothly
+upgrading a live Cosmos chain to a new (breaking) software version. It accomplishes this by
+providing a `PreBlocker` hook that prevents the blockchain state machine from
+proceeding once a pre-defined upgrade block height has been reached.
+
+The module does not prescribe anything regarding how governance decides to do an
+upgrade, but just the mechanism for coordinating the upgrade safely. Without software
+support for upgrades, upgrading a live chain is risky because all of the validators
+need to pause their state machines at exactly the same point in the process. If
+this is not done correctly, there can be state inconsistencies which are hard to
+recover from.
+
+* [Concepts](#concepts)
+* [State](#state)
+* [Events](#events)
+* [Client](#client)
+ * [CLI](#cli)
+ * [REST](#rest)
+ * [gRPC](#grpc)
+* [Resources](#resources)
+
+## Concepts
+
+### Plan
+
+The `x/upgrade` module defines a `Plan` type in which a live upgrade is scheduled
+to occur. A `Plan` can be scheduled at a specific block height.
+A `Plan` is created once a (frozen) release candidate along with an appropriate upgrade
+`Handler` (see below) is agreed upon, where the `Name` of a `Plan` corresponds to a
+specific `Handler`. Typically, a `Plan` is created through a governance proposal
+process, where if voted upon and passed, will be scheduled. The `Info` of a `Plan`
+may contain various metadata about the upgrade, typically application specific
+upgrade info to be included on-chain such as a git commit that validators could
+automatically upgrade to.
+
+```go
+type Plan struct {
+ Name string
+ Height int64
+ Info string
+}
+```
+
+#### Sidecar Process
+
+If an operator running the application binary also runs a sidecar process to assist
+in the automatic download and upgrade of a binary, the `Info` allows this process to
+be seamless. This tool is [Cosmovisor](https://github.com/cosmos/cosmos-sdk/tree/main/tools/cosmovisor#readme).
+
+### Handler
+
+The `x/upgrade` module facilitates upgrading from major version X to major version Y. To
+accomplish this, node operators must first upgrade their current binary to a new
+binary that has a corresponding `Handler` for the new version Y. It is assumed that
+this version has fully been tested and approved by the community at large. This
+`Handler` defines what state migrations need to occur before the new binary Y
+can successfully run the chain. Naturally, this `Handler` is application specific
+and not defined on a per-module basis. Registering a `Handler` is done via
+`Keeper#SetUpgradeHandler` in the application.
+
+```go
+type UpgradeHandler func(Context, Plan, VersionMap) (VersionMap, error)
+```
+
+During each `EndBlock` execution, the `x/upgrade` module checks if there exists a
+`Plan` that should execute (is scheduled at that height). If so, the corresponding
+`Handler` is executed. If the `Plan` is expected to execute but no `Handler` is registered
+or if the binary was upgraded too early, the node will gracefully panic and exit.
+
+### StoreLoader
+
+The `x/upgrade` module also facilitates store migrations as part of the upgrade. The
+`StoreLoader` sets the migrations that need to occur before the new binary can
+successfully run the chain. This `StoreLoader` is also application specific and
+not defined on a per-module basis. Registering this `StoreLoader` is done via
+`app#SetStoreLoader` in the application.
+
+```go
+func UpgradeStoreLoader (upgradeHeight int64, storeUpgrades *store.StoreUpgrades)
+
+baseapp.StoreLoader
+```
+
+If there's a planned upgrade and the upgrade height is reached, the old binary writes `Plan` to the disk before panicking.
+
+This information is critical to ensure the `StoreUpgrades` happens smoothly at the correct height and
+expected upgrade. It eliminates the chances for the new binary to execute `StoreUpgrades` multiple
+times every time on restart. Also, if there are multiple upgrades planned on the same height, the `Name`
+will ensure these `StoreUpgrades` take place only in the planned upgrade handler.
+
+### Proposal
+
+Typically, a `Plan` is proposed and submitted through governance via a proposal
+containing a `MsgSoftwareUpgrade` message.
+This proposal prescribes to the standard governance process. If the proposal passes,
+the `Plan`, which targets a specific `Handler`, is persisted and scheduled. The
+upgrade can be delayed or hastened by updating the `Plan.Height` in a new proposal.
+
+```protobuf
+// Reference: https://github.com/cosmos/cosmos-sdk/blob/v0.47.0-rc1/proto/cosmos/upgrade/v1beta1/tx.proto#L29-L41
+```
+
+#### Cancelling Upgrade Proposals
+
+Upgrade proposals can be cancelled. There exists a gov-enabled `MsgCancelUpgrade`
+message type, which can be embedded in a proposal, voted on and, if passed, will
+remove the scheduled upgrade `Plan`.
+Of course this requires that the upgrade was known to be a bad idea well before the
+upgrade itself, to allow time for a vote.
+
+```protobuf
+// Reference: https://github.com/cosmos/cosmos-sdk/blob/v0.47.0-rc1/proto/cosmos/upgrade/v1beta1/tx.proto#L48-L57
+```
+
+If such a possibility is desired, the upgrade height is to be
+`2 * (VotingPeriod + DepositPeriod) + (SafetyDelta)` from the beginning of the
+upgrade proposal. The `SafetyDelta` is the time available from the success of an
+upgrade proposal and the realization it was a bad idea (due to external social consensus).
+
+A `MsgCancelUpgrade` proposal can also be made while the original
+`MsgSoftwareUpgrade` proposal is still being voted upon, as long as the `VotingPeriod`
+ends after the `MsgSoftwareUpgrade` proposal.
+
+## State
+
+The internal state of the `x/upgrade` module is relatively minimal and simple. The
+state contains the currently active upgrade `Plan` (if one exists) by key
+`0x0` and if a `Plan` is marked as "done" by key `0x1`. The state
+contains the consensus versions of all app modules in the application. The versions
+are stored as big endian `uint64`, and can be accessed with prefix `0x2` appended
+by the corresponding module name of type `string`. The state maintains a
+`Protocol Version` which can be accessed by key `0x3`.
+
+* Plan: `0x0 -> Plan`
+* Done: `0x1 | byte(plan name) -> BigEndian(Block Height)`
+* ConsensusVersion: `0x2 | byte(module name) -> BigEndian(Module Consensus Version)`
+* ProtocolVersion: `0x3 -> BigEndian(Protocol Version)`
+
+The `x/upgrade` module contains no genesis state.
+
+## Events
+
+The `x/upgrade` does not emit any events by itself. Any and all proposal related
+events are emitted through the `x/gov` module.
+
+## Client
+
+### CLI
+
+A user can query and interact with the `upgrade` module using the CLI.
+
+#### Query
+
+The `query` commands allow users to query `upgrade` state.
+
+```bash
+simd query upgrade --help
+```
+
+##### applied
+
+The `applied` command allows users to query the block header for height at which a completed upgrade was applied.
+
+```bash
+simd query upgrade applied [upgrade-name] [flags]
+```
+
+If upgrade-name was previously executed on the chain, this returns the header for the block at which it was applied.
+This helps a client determine which binary was valid over a given range of blocks, as well as more context to understand past migrations.
+
+Example:
+
+```bash
+simd query upgrade applied "test-upgrade"
+```
+
+Example Output:
+
+```bash expandable
+"block_id": {
+ "hash": "A769136351786B9034A5F196DC53F7E50FCEB53B48FA0786E1BFC45A0BB646B5",
+ "parts": {
+ "total": 1,
+ "hash": "B13CBD23011C7480E6F11BE4594EE316548648E6A666B3575409F8F16EC6939E"
+ }
+ },
+ "block_size": "7213",
+ "header": {
+ "version": {
+ "block": "11"
+ },
+ "chain_id": "testnet-2",
+ "height": "455200",
+ "time": "2021-04-10T04:37:57.085493838Z",
+ "last_block_id": {
+ "hash": "0E8AD9309C2DC411DF98217AF59E044A0E1CCEAE7C0338417A70338DF50F4783",
+ "parts": {
+ "total": 1,
+ "hash": "8FE572A48CD10BC2CBB02653CA04CA247A0F6830FF19DC972F64D339A355E77D"
+ }
+ },
+ "last_commit_hash": "DE890239416A19E6164C2076B837CC1D7F7822FC214F305616725F11D2533140",
+ "data_hash": "E3B0C44298FC1C149AFBF4C8996FB92427AE41E4649B934CA495991B7852B855",
+ "validators_hash": "A31047ADE54AE9072EE2A12FF260A8990BA4C39F903EAF5636B50D58DBA72582",
+ "next_validators_hash": "A31047ADE54AE9072EE2A12FF260A8990BA4C39F903EAF5636B50D58DBA72582",
+ "consensus_hash": "048091BC7DDC283F77BFBF91D73C44DA58C3DF8A9CBC867405D8B7F3DAADA22F",
+ "app_hash": "28ECC486AFC332BA6CC976706DBDE87E7D32441375E3F10FD084CD4BAF0DA021",
+ "last_results_hash": "E3B0C44298FC1C149AFBF4C8996FB92427AE41E4649B934CA495991B7852B855",
+ "evidence_hash": "E3B0C44298FC1C149AFBF4C8996FB92427AE41E4649B934CA495991B7852B855",
+ "proposer_address": "2ABC4854B1A1C5AA8403C4EA853A81ACA901CC76"
+ },
+ "num_txs": "0"
+}
+```
+
+##### module versions
+
+The `module_versions` command gets a list of module names and their respective consensus versions.
+
+Following the command with a specific module name will return only
+that module's information.
+
+```bash
+simd query upgrade module_versions [optional module_name] [flags]
+```
+
+Example:
+
+```bash
+simd query upgrade module_versions
+```
+
+Example Output:
+
+```bash expandable
+module_versions:
+- name: auth
+ version: "2"
+- name: authz
+ version: "1"
+- name: bank
+ version: "2"
+- name: distribution
+ version: "2"
+- name: evidence
+ version: "1"
+- name: feegrant
+ version: "1"
+- name: genutil
+ version: "1"
+- name: gov
+ version: "2"
+- name: ibc
+ version: "2"
+- name: mint
+ version: "1"
+- name: params
+ version: "1"
+- name: slashing
+ version: "2"
+- name: staking
+ version: "2"
+- name: transfer
+ version: "1"
+- name: upgrade
+ version: "1"
+- name: vesting
+ version: "1"
+```
+
+Example:
+
+```bash
+regen query upgrade module_versions ibc
+```
+
+Example Output:
+
+```bash
+module_versions:
+- name: ibc
+ version: "2"
+```
+
+##### plan
+
+The `plan` command gets the currently scheduled upgrade plan, if one exists.
+
+```bash
+regen query upgrade plan [flags]
+```
+
+Example:
+
+```bash
+simd query upgrade plan
+```
+
+Example Output:
+
+```bash
+height: "130"
+info: ""
+name: test-upgrade
+time: "0001-01-01T00:00:00Z"
+upgraded_client_state: null
+```
+
+#### Transactions
+
+The upgrade module supports the following transactions:
+
+* `software-proposal` - submits an upgrade proposal:
+
+```bash
+simd tx upgrade software-upgrade v2 --title="Test Proposal" --summary="testing" --deposit="100000000stake" --upgrade-height 1000000 \
+--upgrade-info '{ "binaries": { "linux/amd64":"https://example.com/simd.zip?checksum=sha256:aec070645fe53ee3b3763059376134f058cc337247c978add178b6ccdfb0019f" } }' --from cosmos1..
+```
+
+* `cancel-software-upgrade` - cancels a previously submitted upgrade proposal:
+
+```bash
+simd tx upgrade cancel-software-upgrade --title="Test Proposal" --summary="testing" --deposit="100000000stake" --from cosmos1..
+```
+
+### REST
+
+A user can query the `upgrade` module using REST endpoints.
+
+#### Applied Plan
+
+`AppliedPlan` queries a previously applied upgrade plan by its name.
+
+```bash
+/cosmos/upgrade/v1beta1/applied_plan/{name}
+```
+
+Example:
+
+```bash
+curl -X GET "http://localhost:1317/cosmos/upgrade/v1beta1/applied_plan/v2.0-upgrade" -H "accept: application/json"
+```
+
+Example Output:
+
+```bash
+{
+ "height": "30"
+}
+```
+
+#### Current Plan
+
+`CurrentPlan` queries the current upgrade plan.
+
+```bash
+/cosmos/upgrade/v1beta1/current_plan
+```
+
+Example:
+
+```bash
+curl -X GET "http://localhost:1317/cosmos/upgrade/v1beta1/current_plan" -H "accept: application/json"
+```
+
+Example Output:
+
+```bash
+{
+ "plan": "v2.1-upgrade"
+}
+```
+
+#### Module versions
+
+`ModuleVersions` queries the list of module versions from state.
+
+```bash
+/cosmos/upgrade/v1beta1/module_versions
+```
+
+Example:
+
+```bash
+curl -X GET "http://localhost:1317/cosmos/upgrade/v1beta1/module_versions" -H "accept: application/json"
+```
+
+Example Output:
+
+```bash expandable
+{
+ "module_versions": [
+ {
+ "name": "auth",
+ "version": "2"
+ },
+ {
+ "name": "authz",
+ "version": "1"
+ },
+ {
+ "name": "bank",
+ "version": "2"
+ },
+ {
+ "name": "distribution",
+ "version": "2"
+ },
+ {
+ "name": "evidence",
+ "version": "1"
+ },
+ {
+ "name": "feegrant",
+ "version": "1"
+ },
+ {
+ "name": "genutil",
+ "version": "1"
+ },
+ {
+ "name": "gov",
+ "version": "2"
+ },
+ {
+ "name": "ibc",
+ "version": "2"
+ },
+ {
+ "name": "mint",
+ "version": "1"
+ },
+ {
+ "name": "params",
+ "version": "1"
+ },
+ {
+ "name": "slashing",
+ "version": "2"
+ },
+ {
+ "name": "staking",
+ "version": "2"
+ },
+ {
+ "name": "transfer",
+ "version": "1"
+ },
+ {
+ "name": "upgrade",
+ "version": "1"
+ },
+ {
+ "name": "vesting",
+ "version": "1"
+ }
+ ]
+}
+```
+
+### gRPC
+
+A user can query the `upgrade` module using gRPC endpoints.
+
+#### Applied Plan
+
+`AppliedPlan` queries a previously applied upgrade plan by its name.
+
+```bash
+cosmos.upgrade.v1beta1.Query/AppliedPlan
+```
+
+Example:
+
+```bash
+grpcurl -plaintext \
+ -d '{"name":"v2.0-upgrade"}' \
+ localhost:9090 \
+ cosmos.upgrade.v1beta1.Query/AppliedPlan
+```
+
+Example Output:
+
+```bash
+{
+ "height": "30"
+}
+```
+
+#### Current Plan
+
+`CurrentPlan` queries the current upgrade plan.
+
+```bash
+cosmos.upgrade.v1beta1.Query/CurrentPlan
+```
+
+Example:
+
+```bash
+grpcurl -plaintext localhost:9090 cosmos.slashing.v1beta1.Query/CurrentPlan
+```
+
+Example Output:
+
+```bash
+{
+ "plan": "v2.1-upgrade"
+}
+```
+
+#### Module versions
+
+`ModuleVersions` queries the list of module versions from state.
+
+```bash
+cosmos.upgrade.v1beta1.Query/ModuleVersions
+```
+
+Example:
+
+```bash
+grpcurl -plaintext localhost:9090 cosmos.slashing.v1beta1.Query/ModuleVersions
+```
+
+Example Output:
+
+```bash expandable
+{
+ "module_versions": [
+ {
+ "name": "auth",
+ "version": "2"
+ },
+ {
+ "name": "authz",
+ "version": "1"
+ },
+ {
+ "name": "bank",
+ "version": "2"
+ },
+ {
+ "name": "distribution",
+ "version": "2"
+ },
+ {
+ "name": "evidence",
+ "version": "1"
+ },
+ {
+ "name": "feegrant",
+ "version": "1"
+ },
+ {
+ "name": "genutil",
+ "version": "1"
+ },
+ {
+ "name": "gov",
+ "version": "2"
+ },
+ {
+ "name": "ibc",
+ "version": "2"
+ },
+ {
+ "name": "mint",
+ "version": "1"
+ },
+ {
+ "name": "params",
+ "version": "1"
+ },
+ {
+ "name": "slashing",
+ "version": "2"
+ },
+ {
+ "name": "staking",
+ "version": "2"
+ },
+ {
+ "name": "transfer",
+ "version": "1"
+ },
+ {
+ "name": "upgrade",
+ "version": "1"
+ },
+ {
+ "name": "vesting",
+ "version": "1"
+ }
+ ]
+}
+```
+
+## Resources
+
+A list of (external) resources to learn more about the `x/upgrade` module.
+
+* [Cosmos Dev Series: Cosmos Blockchain Upgrade](https://medium.com/web3-surfers/cosmos-dev-series-cosmos-sdk-based-blockchain-upgrade-b5e99181554c) - The blog post that explains how software upgrades work in detail.
diff --git a/sdk/next/build/packages.mdx b/sdk/next/build/packages.mdx
new file mode 100644
index 000000000..11e1de823
--- /dev/null
+++ b/sdk/next/build/packages.mdx
@@ -0,0 +1,35 @@
+---
+title: "Packages"
+description: "Version: v0.53"
+---
+
+The Cosmos SDK is a collection of Go modules. This section provides documentation on various packages that can used when developing a Cosmos SDK chain. It lists all standalone Go modules that are part of the Cosmos SDK.
+
+
+ For more information on SDK modules, see the [SDK Modules](/sdk/v0.53/build/modules/modules) section. For more information on SDK tooling, see the [Tooling](/sdk/v0.53/build/tooling) section.
+
+
+## Core[](#core "Direct link to Core")
+
+* [Core](https://pkg.go.dev/cosmossdk.io/core) - Core library defining SDK interfaces ([ADR-063](/sdk/v0.53/build/architecture/adr-063-core-module-api))
+* [API](https://pkg.go.dev/cosmossdk.io/api) - API library containing generated SDK Pulsar API
+* [Store](https://pkg.go.dev/cosmossdk.io/store) - Implementation of the Cosmos SDK store
+
+## State Management[](#state-management "Direct link to State Management")
+
+* [Collections](/sdk/v0.53/build/packages/collections) - State management library
+
+## Automation[](#automation "Direct link to Automation")
+
+* [Depinject](/sdk/v0.53/build/packages/depinject) - Dependency injection framework
+* [Client/v2](https://pkg.go.dev/cosmossdk.io/client/v2) - Library powering [AutoCLI](/sdk/v0.50/learn/advanced/autocli)
+
+## Utilities[](#utilities "Direct link to Utilities")
+
+* [Log](https://pkg.go.dev/cosmossdk.io/log) - Logging library
+* [Errors](https://pkg.go.dev/cosmossdk.io/errors) - Error handling library
+* [Math](https://pkg.go.dev/cosmossdk.io/math) - Math library for SDK arithmetic operations
+
+## Example[](#example "Direct link to Example")
+
+* [SimApp](https://pkg.go.dev/cosmossdk.io/simapp) - SimApp is **the** sample Cosmos SDK chain. This package should not be imported in your application.
diff --git a/sdk/next/build/packages/README.mdx b/sdk/next/build/packages/README.mdx
new file mode 100644
index 000000000..b1be086c3
--- /dev/null
+++ b/sdk/next/build/packages/README.mdx
@@ -0,0 +1,40 @@
+---
+title: Packages
+description: >-
+ The Cosmos SDK is a collection of Go modules. This section provides
+ documentation on various packages that can be used when developing a Cosmos SDK
+ chain. It lists all standalone Go modules that are part of the Cosmos SDK.
+---
+
+The Cosmos SDK is a collection of Go modules. This section provides documentation on various packages that can be used when developing a Cosmos SDK chain.
+It lists all standalone Go modules that are part of the Cosmos SDK.
+
+
+For more information on SDK modules, see the [SDK Modules](/sdk/v0.53/build/modules) section.
+For more information on SDK tooling, see the [Tooling](/sdk/v0.53/build/tooling) section.
+
+
+## Core
+
+* [Core](https://pkg.go.dev/cosmossdk.io/core) - Core library defining SDK interfaces ([ADR-063](/sdk/v0.53/build/architecture/adr-063-core-module-api))
+* [API](https://pkg.go.dev/cosmossdk.io/api) - API library containing generated SDK Pulsar API
+* [Store](https://pkg.go.dev/cosmossdk.io/store) - Implementation of the Cosmos SDK store
+
+## State Management
+
+* [Collections](/sdk/v0.53/build/packages/collections) - State management library
+
+## Automation
+
+* [Depinject](/sdk/v0.53/build/packages/depinject) - Dependency injection framework
+* [Client/v2](https://pkg.go.dev/cosmossdk.io/client/v2) - Library powering [AutoCLI](/sdk/v0.53/learn/advanced/autocli)
+
+## Utilities
+
+* [Log](https://pkg.go.dev/cosmossdk.io/log) - Logging library
+* [Errors](https://pkg.go.dev/cosmossdk.io/errors) - Error handling library
+* [Math](https://pkg.go.dev/cosmossdk.io/math) - Math library for SDK arithmetic operations
+
+## Example
+
+* [SimApp](https://pkg.go.dev/cosmossdk.io/simapp) - SimApp is **the** sample Cosmos SDK chain. This package should not be imported in your application.
diff --git a/sdk/next/build/packages/collections.mdx b/sdk/next/build/packages/collections.mdx
new file mode 100644
index 000000000..d5d2cd46a
--- /dev/null
+++ b/sdk/next/build/packages/collections.mdx
@@ -0,0 +1,1374 @@
+---
+title: Collections
+description: >-
+ Collections is a library meant to simplify the experience with respect to
+ module state handling.
+---
+
+Collections is a library meant to simplify the experience with respect to module state handling.
+
+Cosmos SDK modules handle their state using the `KVStore` interface. The problem with working with
+`KVStore` is that it forces you to think of state as a bytes KV pairings when in reality the majority of
+state comes from complex concrete golang objects (strings, ints, structs, etc.).
+
+Collections allows you to work with state as if they were normal golang objects and removes the need
+for you to think of your state as raw bytes in your code.
+
+It also allows you to migrate your existing state without causing any state breakage that forces you into
+tedious and complex chain state migrations.
+
+## Installation
+
+To install collections in your cosmos-sdk chain project, run the following command:
+
+```shell
+go get cosmossdk.io/collections
+```
+
+## Core types
+
+Collections offers 5 different APIs to work with state, which will be explored in the next sections, these APIs are:
+
+* `Map`: to work with typed arbitrary KV pairings.
+* `KeySet`: to work with just typed keys
+* `Item`: to work with just one typed value
+* `Sequence`: which is a monotonically increasing number.
+* `IndexedMap`: which combines `Map` and `KeySet` to provide a `Map` with indexing capabilities.
+
+## Preliminary components
+
+Before exploring the different collections types and their capability it is necessary to introduce
+the three components that every collection shares. In fact when instantiating a collection type by doing, for example,
+`collections.NewMap/collections.NewItem/...` you will find yourself having to pass them some common arguments.
+
+For example, in code:
+
+```go expandable
+package collections
+
+import (
+
+ "cosmossdk.io/collections"
+ storetypes "cosmossdk.io/store/types"
+ sdk "github.com/cosmos/cosmos-sdk/types"
+)
+
+var AllowListPrefix = collections.NewPrefix(0)
+
+type Keeper struct {
+ Schema collections.Schema
+ AllowList collections.KeySet[string]
+}
+
+func NewKeeper(storeKey *storetypes.KVStoreKey)
+
+Keeper {
+ sb := collections.NewSchemaBuilder(sdk.OpenKVStore(storeKey))
+
+return Keeper{
+ AllowList: collections.NewKeySet(sb, AllowListPrefix, "allow_list", collections.StringKey),
+}
+}
+```
+
+Let's analyze the shared arguments, what they do, and why we need them.
+
+### SchemaBuilder
+
+The first argument passed is the `SchemaBuilder`
+
+`SchemaBuilder` is a structure that keeps track of all the state of a module, it is not required by the collections
+to deal with state but it offers a dynamic and reflective way for clients to explore a module's state.
+
+We instantiate a `SchemaBuilder` by passing it a function that given the modules store key returns the module's specific store.
+
+We then need to pass the schema builder to every collection type we instantiate in our keeper, in our case the `AllowList`.
+
+### Prefix
+
+The second argument passed to our `KeySet` is a `collections.Prefix`, a prefix represents a partition of the module's `KVStore`
+where all the state of a specific collection will be saved.
+
+Since a module can have multiple collections, the following is expected:
+
+* module params will become a `collections.Item`
+* the `AllowList` is a `collections.KeySet`
+
+We don't want a collection to write over the state of the other collection so we pass it a prefix, which defines a storage
+partition owned by the collection.
+
+If you already built modules, the prefix translates to the items you were creating in your `types/keys.go` file, example: [Link](https://github.com/cosmos/cosmos-sdk/blob/v0.52.0-rc.1/x/feegrant/key.go#L16~L22)
+
+your old:
+
+```go
+var (
+ // FeeAllowanceKeyPrefix is the set of the kvstore for fee allowance data
+ // - 0x00: allowance
+ FeeAllowanceKeyPrefix = []byte{0x00
+}
+
+ // FeeAllowanceQueueKeyPrefix is the set of the kvstore for fee allowance keys data
+ // - 0x01:
+ FeeAllowanceQueueKeyPrefix = []byte{0x01
+}
+)
+```
+
+becomes:
+
+```go
+var (
+ // FeeAllowanceKeyPrefix is the set of the kvstore for fee allowance data
+ // - 0x00: allowance
+ FeeAllowanceKeyPrefix = collections.NewPrefix(0)
+
+ // FeeAllowanceQueueKeyPrefix is the set of the kvstore for fee allowance keys data
+ // - 0x01:
+ FeeAllowanceQueueKeyPrefix = collections.NewPrefix(1)
+)
+```
+
+#### Rules
+
+`collections.NewPrefix` accepts either `uint8`, `string` or `[]bytes` it's good practice to use an always increasing `uint8`for disk space efficiency.
+
+A collection **MUST NOT** share the same prefix as another collection in the same module, and a collection prefix **MUST NEVER** start with the same prefix as another, examples:
+
+```go
+prefix1 := collections.NewPrefix("prefix")
+
+prefix2 := collections.NewPrefix("prefix") // THIS IS BAD!
+```
+
+```go
+prefix1 := collections.NewPrefix("a")
+
+prefix2 := collections.NewPrefix("aa") // prefix2 starts with the same as prefix1: BAD!!!
+```
+
+### Human-Readable Name
+
+The third parameter we pass to a collection is a string, which is a human-readable name.
+It is needed to make the role of a collection understandable by clients who have no clue about
+what a module is storing in state.
+
+#### Rules
+
+Each collection in a module **MUST** have a unique humanized name.
+
+## Key and Value Codecs
+
+A collection is generic over the type you can use as keys or values.
+This makes collections dumb, but also means that hypothetically we can store everything
+that can be a go type into a collection. We are not bounded to any type of encoding (be it proto, json or whatever)
+
+So a collection needs to be given a way to understand how to convert your keys and values to bytes.
+This is achieved through `KeyCodec` and `ValueCodec`, which are arguments that you pass to your
+collections when you're instantiating them using the `collections.NewMap/collections.NewItem/...`
+instantiation functions.
+
+NOTE: Generally speaking you will never be required to implement your own `Key/ValueCodec` as
+the SDK and collections libraries already come with default, safe and fast implementation of those.
+You might need to implement them only if you're migrating to collections and there are state layout incompatibilities.
+
+Let's explore an example:
+
+```go expandable
+package collections
+
+import (
+
+ "cosmossdk.io/collections"
+ storetypes "cosmossdk.io/store/types"
+ sdk "github.com/cosmos/cosmos-sdk/types"
+)
+
+var IDsPrefix = collections.NewPrefix(0)
+
+type Keeper struct {
+ Schema collections.Schema
+ IDs collections.Map[string, uint64]
+}
+
+func NewKeeper(storeKey *storetypes.KVStoreKey)
+
+Keeper {
+ sb := collections.NewSchemaBuilder(sdk.OpenKVStore(storeKey))
+
+return Keeper{
+ IDs: collections.NewMap(sb, IDsPrefix, "ids", collections.StringKey, collections.Uint64Value),
+}
+}
+```
+
+We're now instantiating a map where the key is string and the value is `uint64`.
+We already know the first three arguments of the `NewMap` function.
+
+The fourth parameter is our `KeyCodec`, we know that the `Map` has `string` as key so we pass it a `KeyCodec` that handles strings as keys.
+
+The fifth parameter is our `ValueCodec`, we know that the `Map` has a `uint64` as value so we pass it a `ValueCodec` that handles uint64.
+
+Collections already comes with all the required implementations for golang primitive types.
+
+Let's make another example, this falls closer to what we build using cosmos SDK, let's say we want
+to create a `collections.Map` that maps account addresses to their base account. So we want to map an `sdk.AccAddress` to an `auth.BaseAccount` (which is a proto):
+
+```go expandable
+package collections
+
+import (
+
+ "cosmossdk.io/collections"
+ storetypes "cosmossdk.io/store/types"
+ "github.com/cosmos/cosmos-sdk/codec"
+ sdk "github.com/cosmos/cosmos-sdk/types"
+ authtypes "github.com/cosmos/cosmos-sdk/x/auth/types"
+)
+
+var AccountsPrefix = collections.NewPrefix(0)
+
+type Keeper struct {
+ Schema collections.Schema
+ Accounts collections.Map[sdk.AccAddress, authtypes.BaseAccount]
+}
+
+func NewKeeper(storeKey *storetypes.KVStoreKey, cdc codec.BinaryCodec)
+
+Keeper {
+ sb := collections.NewSchemaBuilder(sdk.OpenKVStore(storeKey))
+
+return Keeper{
+ Accounts: collections.NewMap(sb, AccountsPrefix, "accounts",
+ sdk.AccAddressKey, codec.CollValue[authtypes.BaseAccount](cdc)),
+}
+}
+```
+
+As we can see here since our `collections.Map` maps `sdk.AccAddress` to `authtypes.BaseAccount`,
+we use the `sdk.AccAddressKey` which is the `KeyCodec` implementation for `AccAddress` and we use `codec.CollValue` to
+encode our proto type `BaseAccount`.
+
+Generally speaking you will always find the respective key and value codecs for types in the `go.mod` path you're using
+to import that type. If you want to encode proto values refer to the codec `codec.CollValue` function, which allows you
+to encode any type implement the `proto.Message` interface.
+
+## Map
+
+We analyze the first and most important collection type, the `collections.Map`.
+This is the type that everything else builds on top of.
+
+### Use case
+
+A `collections.Map` is used to map arbitrary keys with arbitrary values.
+
+### Example
+
+It's easier to explain a `collections.Map` capabilities through an example:
+
+```go expandable
+package collections
+
+import (
+
+ "cosmossdk.io/collections"
+ storetypes "cosmossdk.io/store/types"
+ "fmt"
+ "github.com/cosmos/cosmos-sdk/codec"
+ sdk "github.com/cosmos/cosmos-sdk/types"
+ authtypes "github.com/cosmos/cosmos-sdk/x/auth/types"
+)
+
+var AccountsPrefix = collections.NewPrefix(0)
+
+type Keeper struct {
+ Schema collections.Schema
+ Accounts collections.Map[sdk.AccAddress, authtypes.BaseAccount]
+}
+
+func NewKeeper(storeKey *storetypes.KVStoreKey, cdc codec.BinaryCodec)
+
+Keeper {
+ sb := collections.NewSchemaBuilder(sdk.OpenKVStore(storeKey))
+
+return Keeper{
+ Accounts: collections.NewMap(sb, AccountsPrefix, "accounts",
+ sdk.AccAddressKey, codec.CollValue[authtypes.BaseAccount](cdc)),
+}
+}
+
+func (k Keeper)
+
+CreateAccount(ctx sdk.Context, addr sdk.AccAddress, account authtypes.BaseAccount)
+
+error {
+ has, err := k.Accounts.Has(ctx, addr)
+ if err != nil {
+ return err
+}
+ if has {
+ return fmt.Errorf("account already exists: %s", addr)
+}
+
+err = k.Accounts.Set(ctx, addr, account)
+ if err != nil {
+ return err
+}
+
+return nil
+}
+
+func (k Keeper)
+
+GetAccount(ctx sdk.Context, addr sdk.AccAddress) (authtypes.BaseAccount, error) {
+ acc, err := k.Accounts.Get(ctx, addr)
+ if err != nil {
+ return authtypes.BaseAccount{
+}, err
+}
+
+return acc, nil
+}
+
+func (k Keeper)
+
+RemoveAccount(ctx sdk.Context, addr sdk.AccAddress)
+
+error {
+ err := k.Accounts.Remove(ctx, addr)
+ if err != nil {
+ return err
+}
+
+return nil
+}
+```
+
+#### Set method
+
+Set maps with the provided `AccAddress` (the key) to the `auth.BaseAccount` (the value).
+
+Under the hood the `collections.Map` will convert the key and value to bytes using the [key and value codec](#key-and-value-codecs).
+It will prepend to our bytes key the [prefix](#prefix) and store it in the KVStore of the module.
+
+#### Has method
+
+The has method reports if the provided key exists in the store.
+
+#### Get method
+
+The get method accepts the `AccAddress` and returns the associated `auth.BaseAccount` if it exists, otherwise it errors.
+
+#### Remove method
+
+The remove method accepts the `AccAddress` and removes it from the store. It won't report errors
+if it does not exist, to check for existence before removal use the `Has` method.
+
+#### Iteration
+
+Iteration has a separate section.
+
+## KeySet
+
+The second type of collection is `collections.KeySet`, as the word suggests it maintains
+only a set of keys without values.
+
+#### Implementation curiosity
+
+A `collections.KeySet` is just a `collections.Map` with a `key` but no value.
+The value internally is always the same and is represented as an empty byte slice `[]byte{}`.
+
+### Example
+
+As always we explore the collection type through an example:
+
+```go expandable
+package collections
+
+import (
+
+ "cosmossdk.io/collections"
+ storetypes "cosmossdk.io/store/types"
+ "fmt"
+ sdk "github.com/cosmos/cosmos-sdk/types"
+)
+
+var ValidatorsSetPrefix = collections.NewPrefix(0)
+
+type Keeper struct {
+ Schema collections.Schema
+ ValidatorsSet collections.KeySet[sdk.ValAddress]
+}
+
+func NewKeeper(storeKey *storetypes.KVStoreKey)
+
+Keeper {
+ sb := collections.NewSchemaBuilder(sdk.OpenKVStore(storeKey))
+
+return Keeper{
+ ValidatorsSet: collections.NewKeySet(sb, ValidatorsSetPrefix, "validators_set", sdk.ValAddressKey),
+}
+}
+
+func (k Keeper)
+
+AddValidator(ctx sdk.Context, validator sdk.ValAddress)
+
+error {
+ has, err := k.ValidatorsSet.Has(ctx, validator)
+ if err != nil {
+ return err
+}
+ if has {
+ return fmt.Errorf("validator already in set: %s", validator)
+}
+
+err = k.ValidatorsSet.Set(ctx, validator)
+ if err != nil {
+ return err
+}
+
+return nil
+}
+
+func (k Keeper)
+
+RemoveValidator(ctx sdk.Context, validator sdk.ValAddress)
+
+error {
+ err := k.ValidatorsSet.Remove(ctx, validator)
+ if err != nil {
+ return err
+}
+
+return nil
+}
+```
+
+The first difference we notice is that `KeySet` needs use to specify only one type parameter: the key (`sdk.ValAddress` in this case).
+The second difference we notice is that `KeySet` in its `NewKeySet` function does not require
+us to specify a `ValueCodec` but only a `KeyCodec`. This is because a `KeySet` only saves keys and not values.
+
+Let's explore the methods.
+
+#### Has method
+
+Has allows us to understand if a key is present in the `collections.KeySet` or not, functions in the same way as `collections.Map.Has
+`
+
+#### Set method
+
+Set inserts the provided key in the `KeySet`.
+
+#### Remove method
+
+Remove removes the provided key from the `KeySet`, it does not error if the key does not exist,
+if existence check before removal is required it needs to be coupled with the `Has` method.
+
+## Item
+
+The third type of collection is the `collections.Item`.
+It stores only one single item, it's useful for example for parameters, there's only one instance
+of parameters in state always.
+
+#### implementation curiosity
+
+A `collections.Item` is just a `collections.Map` with no key but just a value.
+The key is the prefix of the collection!
+
+### Example
+
+```go expandable
+package collections
+
+import (
+
+ "cosmossdk.io/collections"
+ storetypes "cosmossdk.io/store/types"
+ "github.com/cosmos/cosmos-sdk/codec"
+ sdk "github.com/cosmos/cosmos-sdk/types"
+ stakingtypes "cosmossdk.io/x/staking/types"
+)
+
+var ParamsPrefix = collections.NewPrefix(0)
+
+type Keeper struct {
+ Schema collections.Schema
+ Params collections.Item[stakingtypes.Params]
+}
+
+func NewKeeper(storeKey *storetypes.KVStoreKey, cdc codec.BinaryCodec)
+
+Keeper {
+ sb := collections.NewSchemaBuilder(sdk.OpenKVStore(storeKey))
+
+return Keeper{
+ Params: collections.NewItem(sb, ParamsPrefix, "params", codec.CollValue[stakingtypes.Params](cdc)),
+}
+}
+
+func (k Keeper)
+
+UpdateParams(ctx sdk.Context, params stakingtypes.Params)
+
+error {
+ err := k.Params.Set(ctx, params)
+ if err != nil {
+ return err
+}
+
+return nil
+}
+
+func (k Keeper)
+
+GetParams(ctx sdk.Context) (stakingtypes.Params, error) {
+ return k.Params.Get(ctx)
+}
+```
+
+The first key difference we notice is that we specify only one type parameter, which is the value we're storing.
+The second key difference is that we don't specify the `KeyCodec`, since we store only one item we already know the key
+and the fact that it is constant.
+
+## Iteration
+
+One of the key features of the `KVStore` is iterating over keys.
+
+Collections which deal with keys (so `Map`, `KeySet` and `IndexedMap`) allow you to iterate
+over keys in a safe and typed way. They all share the same API, the only difference being
+that `KeySet` returns a different type of `Iterator` because `KeySet` only deals with keys.
+
+
+
+Every collection shares the same `Iterator` semantics.
+
+
+
+Let's have a look at the `Map.Iterate` method:
+
+```go
+func (m Map[K, V])
+
+Iterate(ctx context.Context, ranger Ranger[K]) (Iterator[K, V], error)
+```
+
+It accepts a `collections.Ranger[K]`, which is an API that instructs map on how to iterate over keys.
+As always we don't need to implement anything here as `collections` already provides some generic `Ranger` implementers
+that expose all you need to work with ranges.
+
+### Example
+
+We have a `collections.Map` that maps accounts using `uint64` IDs.
+
+```go expandable
+package collections
+
+import (
+
+ "cosmossdk.io/collections"
+ storetypes "cosmossdk.io/store/types"
+ "github.com/cosmos/cosmos-sdk/codec"
+ sdk "github.com/cosmos/cosmos-sdk/types"
+ authtypes "github.com/cosmos/cosmos-sdk/x/auth/types"
+)
+
+var AccountsPrefix = collections.NewPrefix(0)
+
+type Keeper struct {
+ Schema collections.Schema
+ Accounts collections.Map[uint64, authtypes.BaseAccount]
+}
+
+func NewKeeper(storeKey *storetypes.KVStoreKey, cdc codec.BinaryCodec)
+
+Keeper {
+ sb := collections.NewSchemaBuilder(sdk.OpenKVStore(storeKey))
+
+return Keeper{
+ Accounts: collections.NewMap(sb, AccountsPrefix, "accounts", collections.Uint64Key, codec.CollValue[authtypes.BaseAccount](cdc)),
+}
+}
+
+func (k Keeper)
+
+GetAllAccounts(ctx sdk.Context) ([]authtypes.BaseAccount, error) {
+ // passing a nil Ranger equals to: iterate over every possible key
+ iter, err := k.Accounts.Iterate(ctx, nil)
+ if err != nil {
+ return nil, err
+}
+
+accounts, err := iter.Values()
+ if err != nil {
+ return nil, err
+}
+
+return accounts, err
+}
+
+func (k Keeper)
+
+IterateAccountsBetween(ctx sdk.Context, start, end uint64) ([]authtypes.BaseAccount, error) {
+ // The collections.Range API offers a lot of capabilities
+ // like defining where the iteration starts or ends.
+ rng := new(collections.Range[uint64]).
+ StartInclusive(start).
+ EndExclusive(end).
+ Descending()
+
+iter, err := k.Accounts.Iterate(ctx, rng)
+ if err != nil {
+ return nil, err
+}
+
+accounts, err := iter.Values()
+ if err != nil {
+ return nil, err
+}
+
+return accounts, nil
+}
+
+func (k Keeper)
+
+IterateAccounts(ctx sdk.Context, do func(id uint64, acc authtypes.BaseAccount) (stop bool))
+
+error {
+ iter, err := k.Accounts.Iterate(ctx, nil)
+ if err != nil {
+ return err
+}
+
+defer iter.Close()
+ for ; iter.Valid(); iter.Next() {
+ kv, err := iter.KeyValue()
+ if err != nil {
+ return err
+}
+ if do(kv.Key, kv.Value) {
+ break
+}
+
+}
+
+return nil
+}
+```
+
+Let's analyze each method in the example and how it makes use of the `Iterate` and the returned `Iterator` API.
+
+#### GetAllAccounts
+
+In `GetAllAccounts` we pass to our `Iterate` a nil `Ranger`. This means that the returned `Iterator` will include
+all the existing keys within the collection.
+
+Then we use the `Values` method from the returned `Iterator` API to collect all the values into a slice.
+
+`Iterator` offers other methods such as `Keys()` to collect only the keys and not the values and `KeyValues` to collect
+all the keys and values.
+
+#### IterateAccountsBetween
+
+Here we make use of the `collections.Range` helper to specialize our range.
+We make it start in a point through `StartInclusive` and end in the other with `EndExclusive`, then
+we instruct it to report us results in reverse order through `Descending`
+
+Then we pass the range instruction to `Iterate` and get an `Iterator`, which will contain only the results
+we specified in the range.
+
+Then we use again the `Values` method of the `Iterator` to collect all the results.
+
+`collections.Range` also offers a `Prefix` API which is not applicable to all keys types,
+for example uint64 cannot be prefix because it is of constant size, but a `string` key
+can be prefixed.
+
+#### IterateAccounts
+
+Here we showcase how to lazily collect values from an Iterator.
+
+
+
+`Keys/Values/KeyValues` fully consume and close the `Iterator`, here we need to explicitly do a `defer iterator.Close()` call.
+
+
+
+`Iterator` also exposes a `Value` and `Key` method to collect only the current value or key, if collecting both is not needed.
+
+
+
+For this `callback` pattern, collections expose a `Walk` API.
+
+
+
+## Composite keys
+
+So far we've worked only with simple keys, like `uint64`, the account address, etc.
+There are some more complex cases in, which we need to deal with composite keys.
+
+A key is composite when it is composed of multiple keys, for example bank balances as stored as the composite key
+`(AccAddress, string)` where the first part is the address holding the coins and the second part is the denom.
+
+Example, let's say address `BOB` holds `10atom,15osmo`, this is how it is stored in state:
+
+```javascript
+(bob, atom) => 10
+(bob, osmos) => 15
+```
+
+Now this allows to efficiently get a specific denom balance of an address, by simply `getting` `(address, denom)`, or getting all the balances
+of an address by prefixing over `(address)`.
+
+Let's see now how we can work with composite keys using collections.
+
+### Example
+
+In our example we will showcase how we can use collections when we are dealing with balances, similar to bank,
+a balance is a mapping between `(address, denom) => math.Int` the composite key in our case is `(address, denom)`.
+
+## Instantiation of a composite key collection
+
+```go expandable
+package collections
+
+import (
+
+ "cosmossdk.io/collections"
+ "cosmossdk.io/math"
+ storetypes "cosmossdk.io/store/types"
+ sdk "github.com/cosmos/cosmos-sdk/types"
+)
+
+var BalancesPrefix = collections.NewPrefix(1)
+
+type Keeper struct {
+ Schema collections.Schema
+ Balances collections.Map[collections.Pair[sdk.AccAddress, string], math.Int]
+}
+
+func NewKeeper(storeKey *storetypes.KVStoreKey)
+
+Keeper {
+ sb := collections.NewSchemaBuilder(sdk.OpenKVStore(storeKey))
+
+return Keeper{
+ Balances: collections.NewMap(
+ sb, BalancesPrefix, "balances",
+ collections.PairKeyCodec(sdk.AccAddressKey, collections.StringKey),
+ sdk.IntValue,
+ ),
+}
+}
+```
+
+#### The Map Key definition
+
+First of all we can see that in order to define a composite key of two elements we use the `collections.Pair` type:
+
+```go
+collections.Map[collections.Pair[sdk.AccAddress, string], math.Int]
+```
+
+`collections.Pair` defines a key composed of two other keys, in our case the first part is `sdk.AccAddress`, the second
+part is `string`.
+
+#### The Key Codec instantiation
+
+The arguments to instantiate are always the same, the only thing that changes is how we instantiate
+the `KeyCodec`, since this key is composed of two keys we use `collections.PairKeyCodec`, which generates
+a `KeyCodec` composed of two key codecs. The first one will encode the first part of the key, the second one will
+encode the second part of the key.
+
+### Working with composite key collections
+
+Let's expand on the example we used before:
+
+```go expandable
+var BalancesPrefix = collections.NewPrefix(1)
+
+type Keeper struct {
+ Schema collections.Schema
+ Balances collections.Map[collections.Pair[sdk.AccAddress, string], math.Int]
+}
+
+func NewKeeper(storeKey *storetypes.KVStoreKey)
+
+Keeper {
+ sb := collections.NewSchemaBuilder(sdk.OpenKVStore(storeKey))
+
+return Keeper{
+ Balances: collections.NewMap(
+ sb, BalancesPrefix, "balances",
+ collections.PairKeyCodec(sdk.AccAddressKey, collections.StringKey),
+ sdk.IntValue,
+ ),
+}
+}
+
+func (k Keeper)
+
+SetBalance(ctx sdk.Context, address sdk.AccAddress, denom string, amount math.Int)
+
+error {
+ key := collections.Join(address, denom)
+
+return k.Balances.Set(ctx, key, amount)
+}
+
+func (k Keeper)
+
+GetBalance(ctx sdk.Context, address sdk.AccAddress, denom string) (math.Int, error) {
+ return k.Balances.Get(ctx, collections.Join(address, denom))
+}
+
+func (k Keeper)
+
+GetAllAddressBalances(ctx sdk.Context, address sdk.AccAddress) (sdk.Coins, error) {
+ balances := sdk.NewCoins()
+ rng := collections.NewPrefixedPairRange[sdk.AccAddress, string](address)
+
+iter, err := k.Balances.Iterate(ctx, rng)
+ if err != nil {
+ return nil, err
+}
+
+kvs, err := iter.KeyValues()
+ if err != nil {
+ return nil, err
+}
+ for _, kv := range kvs {
+ balances = balances.Add(sdk.NewCoin(kv.Key.K2(), kv.Value))
+}
+
+return balances, nil
+}
+
+func (k Keeper)
+
+GetAllAddressBalancesBetween(ctx sdk.Context, address sdk.AccAddress, startDenom, endDenom string) (sdk.Coins, error) {
+ rng := collections.NewPrefixedPairRange[sdk.AccAddress, string](address).
+ StartInclusive(startDenom).
+ EndInclusive(endDenom)
+
+iter, err := k.Balances.Iterate(ctx, rng)
+ if err != nil {
+ return nil, err
+}
+ ...
+}
+```
+
+#### SetBalance
+
+As we can see here we're setting the balance of an address for a specific denom.
+We use the `collections.Join` function to generate the composite key.
+`collections.Join` returns a `collections.Pair` (which is the key of our `collections.Map`)
+
+`collections.Pair` contains the two keys we have joined, it also exposes two methods: `K1` to fetch the 1st part of the
+key and `K2` to fetch the second part.
+
+As always, we use the `collections.Map.Set` method to map the composite key to our value (`math.Int` in this case)
+
+#### GetBalance
+
+To get a value in composite key collection, we simply use `collections.Join` to compose the key.
+
+#### GetAllAddressBalances
+
+We use `collections.PrefixedPairRange` to iterate over all the keys starting with the provided address.
+Concretely the iteration will report all the balances belonging to the provided address.
+
+The first part is that we instantiate a `PrefixedPairRange`, which is a `Ranger` implementer aimed to help
+in `Pair` keys iterations.
+
+```go
+rng := collections.NewPrefixedPairRange[sdk.AccAddress, string](address)
+```
+
+As we can see here we're passing the type parameters of the `collections.Pair` because golang type inference
+with respect to generics is not as permissive as other languages, so we need to explicitly say what are the types of the pair key.
+
+#### GetAllAddressesBalancesBetween
+
+This showcases how we can further specialize our range to limit the results further, by specifying
+the range between the second part of the key (in our case the denoms, which are strings).
+
+## IndexedMap
+
+`collections.IndexedMap` is a collection that uses under the hood a `collections.Map`, and has a struct, which contains the indexes that we need to define.
+
+### Example
+
+Let's say we have an `auth.BaseAccount` struct which looks like the following:
+
+```go
+type BaseAccount struct {
+ AccountNumber uint64 `protobuf:"varint,3,opt,name=account_number,json=accountNumber,proto3" json:"account_number,omitempty"`
+ Sequence uint64 `protobuf:"varint,4,opt,name=sequence,proto3" json:"sequence,omitempty"`
+}
+```
+
+First of all, when we save our accounts in state we map them using a primary key `sdk.AccAddress`.
+If it were to be a `collections.Map` it would be `collections.Map[sdk.AccAddress, authtypes.BaseAccount]`.
+
+Then we also want to be able to get an account not only by its `sdk.AccAddress`, but also by its `AccountNumber`.
+
+So we can say we want to create an `Index` that maps our `BaseAccount` to its `AccountNumber`.
+
+We also know that this `Index` is unique. Unique means that there can only be one `BaseAccount` that maps to a specific
+`AccountNumber`.
+
+First of all, we start by defining the object that contains our index:
+
+```go expandable
+var AccountsNumberIndexPrefix = collections.NewPrefix(1)
+
+type AccountsIndexes struct {
+ Number *indexes.Unique[uint64, sdk.AccAddress, authtypes.BaseAccount]
+}
+
+func NewAccountIndexes(sb *collections.SchemaBuilder)
+
+AccountsIndexes {
+ return AccountsIndexes{
+ Number: indexes.NewUnique(
+ sb, AccountsNumberIndexPrefix, "accounts_by_number",
+ collections.Uint64Key, sdk.AccAddressKey,
+ func(_ sdk.AccAddress, v authtypes.BaseAccount) (uint64, error) {
+ return v.AccountNumber, nil
+},
+ ),
+}
+}
+```
+
+We create an `AccountIndexes` struct which contains a field: `Number`. This field represents our `AccountNumber` index.
+`AccountNumber` is a field of `authtypes.BaseAccount` and it's a `uint64`.
+
+Then we can see in our `AccountIndexes` struct the `Number` field is defined as:
+
+```go
+*indexes.Unique[uint64, sdk.AccAddress, authtypes.BaseAccount]
+```
+
+Where the first type parameter is `uint64`, which is the field type of our index.
+The second type parameter is the primary key `sdk.AccAddress`.
+And the third type parameter is the actual object we're storing `authtypes.BaseAccount`.
+
+Then we create a `NewAccountIndexes` function that instantiates and returns the `AccountsIndexes` struct.
+
+The function takes a `SchemaBuilder`. Then we instantiate our `indexes.Unique`, let's analyze the arguments we pass to
+`indexes.NewUnique`.
+
+#### NOTE: indexes list
+
+The `AccountsIndexes` struct contains the indexes, the `NewIndexedMap` function will infer the indexes form that struct
+using reflection, this happens only at init and is not computationally expensive. In case you want to explicitly declare
+indexes: implement the `Indexes` interface in the `AccountsIndexes` struct:
+
+```go
+func (a AccountsIndexes)
+
+IndexesList() []collections.Index[sdk.AccAddress, authtypes.BaseAccount] {
+ return []collections.Index[sdk.AccAddress, authtypes.BaseAccount]{
+ a.Number
+}
+}
+```
+
+#### Instantiating a `indexes.Unique`
+
+The first three arguments, we already know them, they are: `SchemaBuilder`, `Prefix` which is our index prefix (the partition
+where index keys relationship for the `Number` index will be maintained), and the human name for the `Number` index.
+
+The second argument is a `collections.Uint64Key` which is a key codec to deal with `uint64` keys, we pass that because
+the key we're trying to index is a `uint64` key (the account number), and then we pass as fifth argument the primary key codec,
+which in our case is `sdk.AccAddress` (remember: we're mapping `sdk.AccAddress` => `BaseAccount`).
+
+Then as last parameter we pass a function that: given the `BaseAccount` returns its `AccountNumber`.
+
+After this we can proceed instantiating our `IndexedMap`.
+
+```go expandable
+var AccountsPrefix = collections.NewPrefix(0)
+
+type Keeper struct {
+ Schema collections.Schema
+ Accounts *collections.IndexedMap[sdk.AccAddress, authtypes.BaseAccount, AccountsIndexes]
+}
+
+func NewKeeper(storeKey *storetypes.KVStoreKey, cdc codec.BinaryCodec)
+
+Keeper {
+ sb := collections.NewSchemaBuilder(sdk.OpenKVStore(storeKey))
+
+return Keeper{
+ Accounts: collections.NewIndexedMap(
+ sb, AccountsPrefix, "accounts",
+ sdk.AccAddressKey, codec.CollValue[authtypes.BaseAccount](cdc),
+ NewAccountIndexes(sb),
+ ),
+}
+}
+```
+
+As we can see here what we do, for now, is the same thing as we did for `collections.Map`.
+We pass it the `SchemaBuilder`, the `Prefix` where we plan to store the mapping between `sdk.AccAddress` and `authtypes.BaseAccount`,
+the human name and the respective `sdk.AccAddress` key codec and `authtypes.BaseAccount` value codec.
+
+Then we pass the instantiation of our `AccountIndexes` through `NewAccountIndexes`.
+
+Full example:
+
+```go expandable
+package docs
+
+import (
+
+ "cosmossdk.io/collections"
+ "cosmossdk.io/collections/indexes"
+ storetypes "cosmossdk.io/store/types"
+ "github.com/cosmos/cosmos-sdk/codec"
+ sdk "github.com/cosmos/cosmos-sdk/types"
+ authtypes "github.com/cosmos/cosmos-sdk/x/auth/types"
+)
+
+var AccountsNumberIndexPrefix = collections.NewPrefix(1)
+
+type AccountsIndexes struct {
+ Number *indexes.Unique[uint64, sdk.AccAddress, authtypes.BaseAccount]
+}
+
+func (a AccountsIndexes)
+
+IndexesList() []collections.Index[sdk.AccAddress, authtypes.BaseAccount] {
+ return []collections.Index[sdk.AccAddress, authtypes.BaseAccount]{
+ a.Number
+}
+}
+
+func NewAccountIndexes(sb *collections.SchemaBuilder)
+
+AccountsIndexes {
+ return AccountsIndexes{
+ Number: indexes.NewUnique(
+ sb, AccountsNumberIndexPrefix, "accounts_by_number",
+ collections.Uint64Key, sdk.AccAddressKey,
+ func(_ sdk.AccAddress, v authtypes.BaseAccount) (uint64, error) {
+ return v.AccountNumber, nil
+},
+ ),
+}
+}
+
+var AccountsPrefix = collections.NewPrefix(0)
+
+type Keeper struct {
+ Schema collections.Schema
+ Accounts *collections.IndexedMap[sdk.AccAddress, authtypes.BaseAccount, AccountsIndexes]
+}
+
+func NewKeeper(storeKey *storetypes.KVStoreKey, cdc codec.BinaryCodec)
+
+Keeper {
+ sb := collections.NewSchemaBuilder(sdk.OpenKVStore(storeKey))
+
+return Keeper{
+ Accounts: collections.NewIndexedMap(
+ sb, AccountsPrefix, "accounts",
+ sdk.AccAddressKey, codec.CollValue[authtypes.BaseAccount](cdc),
+ NewAccountIndexes(sb),
+ ),
+}
+}
+```
+
+### Working with IndexedMaps
+
+While instantiating `collections.IndexedMap` is tedious, working with them is extremely smooth.
+
+Let's take the full example, and expand it with some use-cases.
+
+```go expandable
+package docs
+
+import (
+
+ "cosmossdk.io/collections"
+ "cosmossdk.io/collections/indexes"
+ storetypes "cosmossdk.io/store/types"
+ "github.com/cosmos/cosmos-sdk/codec"
+ sdk "github.com/cosmos/cosmos-sdk/types"
+ authtypes "github.com/cosmos/cosmos-sdk/x/auth/types"
+)
+
+var AccountsNumberIndexPrefix = collections.NewPrefix(1)
+
+type AccountsIndexes struct {
+ Number *indexes.Unique[uint64, sdk.AccAddress, authtypes.BaseAccount]
+}
+
+func (a AccountsIndexes)
+
+IndexesList() []collections.Index[sdk.AccAddress, authtypes.BaseAccount] {
+ return []collections.Index[sdk.AccAddress, authtypes.BaseAccount]{
+ a.Number
+}
+}
+
+func NewAccountIndexes(sb *collections.SchemaBuilder)
+
+AccountsIndexes {
+ return AccountsIndexes{
+ Number: indexes.NewUnique(
+ sb, AccountsNumberIndexPrefix, "accounts_by_number",
+ collections.Uint64Key, sdk.AccAddressKey,
+ func(_ sdk.AccAddress, v authtypes.BaseAccount) (uint64, error) {
+ return v.AccountNumber, nil
+},
+ ),
+}
+}
+
+var AccountsPrefix = collections.NewPrefix(0)
+
+type Keeper struct {
+ Schema collections.Schema
+ Accounts *collections.IndexedMap[sdk.AccAddress, authtypes.BaseAccount, AccountsIndexes]
+}
+
+func NewKeeper(storeKey *storetypes.KVStoreKey, cdc codec.BinaryCodec)
+
+Keeper {
+ sb := collections.NewSchemaBuilder(sdk.OpenKVStore(storeKey))
+
+return Keeper{
+ Accounts: collections.NewIndexedMap(
+ sb, AccountsPrefix, "accounts",
+ sdk.AccAddressKey, codec.CollValue[authtypes.BaseAccount](cdc),
+ NewAccountIndexes(sb),
+ ),
+}
+}
+
+func (k Keeper)
+
+CreateAccount(ctx sdk.Context, addr sdk.AccAddress)
+
+error {
+ nextAccountNumber := k.getNextAccountNumber()
+ newAcc := authtypes.BaseAccount{
+ AccountNumber: nextAccountNumber,
+ Sequence: 0,
+}
+
+return k.Accounts.Set(ctx, addr, newAcc)
+}
+
+func (k Keeper)
+
+RemoveAccount(ctx sdk.Context, addr sdk.AccAddress)
+
+error {
+ return k.Accounts.Remove(ctx, addr)
+}
+
+func (k Keeper)
+
+GetAccountByNumber(ctx sdk.Context, accNumber uint64) (sdk.AccAddress, authtypes.BaseAccount, error) {
+ accAddress, err := k.Accounts.Indexes.Number.MatchExact(ctx, accNumber)
+ if err != nil {
+ return nil, authtypes.BaseAccount{
+}, err
+}
+
+acc, err := k.Accounts.Get(ctx, accAddress)
+
+return accAddress, acc, nil
+}
+
+func (k Keeper)
+
+GetAccountsByNumber(ctx sdk.Context, startAccNum, endAccNum uint64) ([]authtypes.BaseAccount, error) {
+ rng := new(collections.Range[uint64]).
+ StartInclusive(startAccNum).
+ EndInclusive(endAccNum)
+
+iter, err := k.Accounts.Indexes.Number.Iterate(ctx, rng)
+ if err != nil {
+ return nil, err
+}
+
+return indexes.CollectValues(ctx, k.Accounts, iter)
+}
+
+func (k Keeper)
+
+getNextAccountNumber()
+
+uint64 {
+ return 0
+}
+```
+
+## Collections with interfaces as values
+
+Although cosmos-sdk is shifting away from the usage of interface registry, there are still some places where it is used.
+In order to support old code, we have to support collections with interface values.
+
+The generic `codec.CollValue` is not able to handle interface values, so we need to use a special type `codec.CollValueInterface`.
+`codec.CollValueInterface` takes a `codec.BinaryCodec` as an argument, and uses it to marshal and unmarshal values as interfaces.
+The `codec.CollValueInterface` lives in the `codec` package, whose import path is `github.com/cosmos/cosmos-sdk/codec`.
+
+### Instantiating Collections with interface values
+
+In order to instantiate a collection with interface values, we need to use `codec.CollValueInterface` instead of `codec.CollValue`.
+
+```go expandable
+package example
+
+import (
+
+ "cosmossdk.io/collections"
+ storetypes "cosmossdk.io/store/types"
+ "github.com/cosmos/cosmos-sdk/codec"
+ sdk "github.com/cosmos/cosmos-sdk/types"
+ authtypes "github.com/cosmos/cosmos-sdk/x/auth/types"
+)
+
+var AccountsPrefix = collections.NewPrefix(0)
+
+type Keeper struct {
+ Schema collections.Schema
+ Accounts *collections.Map[sdk.AccAddress, sdk.AccountI]
+}
+
+func NewKeeper(cdc codec.BinaryCodec, storeKey *storetypes.KVStoreKey)
+
+Keeper {
+ sb := collections.NewSchemaBuilder(sdk.OpenKVStore(storeKey))
+
+return Keeper{
+ Accounts: collections.NewMap(
+ sb, AccountsPrefix, "accounts",
+ sdk.AccAddressKey, codec.CollInterfaceValue[sdk.AccountI](cdc),
+ ),
+}
+}
+
+func (k Keeper)
+
+SaveBaseAccount(ctx sdk.Context, account authtypes.BaseAccount)
+
+error {
+ return k.Accounts.Set(ctx, account.GetAddress(), account)
+}
+
+func (k Keeper)
+
+SaveModuleAccount(ctx sdk.Context, account authtypes.ModuleAccount)
+
+error {
+ return k.Accounts.Set(ctx, account.GetAddress(), account)
+}
+
+func (k Keeper)
+
+GetAccount(ctx sdk.Context, addr sdk.AccAddress) (sdk.AccountI, error) {
+ return k.Accounts.Get(ctx, addr)
+}
+```
+
+## Triple key
+
+The `collections.Triple` is a special type of key composed of three keys, it's identical to `collections.Pair`.
+
+Let's see an example.
+
+```go expandable
+package example
+
+import (
+
+ "context"
+ "cosmossdk.io/collections"
+ storetypes "cosmossdk.io/store/types"
+ "github.com/cosmos/cosmos-sdk/codec"
+)
+
+type AccAddress = string
+type ValAddress = string
+
+type Keeper struct {
+ // let's simulate we have redelegations which are stored as a triple key composed of
+ // the delegator, the source validator and the destination validator.
+ Redelegations collections.KeySet[collections.Triple[AccAddress, ValAddress, ValAddress]]
+}
+
+func NewKeeper(storeKey *storetypes.KVStoreKey)
+
+Keeper {
+ sb := collections.NewSchemaBuilder(sdk.OpenKVStore(storeKey))
+
+return Keeper{
+ Redelegations: collections.NewKeySet(sb, collections.NewPrefix(0), "redelegations", collections.TripleKeyCodec(collections.StringKey, collections.StringKey, collections.StringKey)
+}
+}
+
+// RedelegationsByDelegator iterates over all the redelegations of a given delegator and calls onResult providing
+// each redelegation from source validator towards the destination validator.
+func (k Keeper)
+
+RedelegationsByDelegator(ctx context.Context, delegator AccAddress, onResult func(src, dst ValAddress) (stop bool, err error))
+
+error {
+ rng := collections.NewPrefixedTripleRange[AccAddress, ValAddress, ValAddress](delegator)
+
+return k.Redelegations.Walk(ctx, rng, func(key collections.Triple[AccAddress, ValAddress, ValAddress]) (stop bool, err error) {
+ return onResult(key.K2(), key.K3())
+})
+}
+
+// RedelegationsByDelegatorAndValidator iterates over all the redelegations of a given delegator and its source validator and calls onResult for each
+// destination validator.
+func (k Keeper)
+
+RedelegationsByDelegatorAndValidator(ctx context.Context, delegator AccAddress, validator ValAddress, onResult func(dst ValAddress) (stop bool, err error))
+
+error {
+ rng := collections.NewSuperPrefixedTripleRange[AccAddress, ValAddress, ValAddress](delegator, validator)
+
+return k.Redelegations.Walk(ctx, rng, func(key collections.Triple[AccAddress, ValAddress, ValAddress]) (stop bool, err error) {
+ return onResult(key.K3())
+})
+}
+```
+
+## Advanced Usages
+
+### Alternative Value Codec
+
+The `codec.AltValueCodec` allows a collection to decode values using a different codec than the one used to encode them.
+Basically it enables to decode two different byte representations of the same concrete value.
+It can be used to lazily migrate values from one bytes representation to another, as long as the new representation is
+not able to decode the old one.
+
+A concrete example can be found in `x/bank` where the balance was initially stored as `Coin` and then migrated to `Int`.
+
+```go
+var BankBalanceValueCodec = codec.NewAltValueCodec(sdk.IntValue, func(b []byte) (sdk.Int, error) {
+ coin := sdk.Coin{
+}
+ err := coin.Unmarshal(b)
+ if err != nil {
+ return sdk.Int{
+}, err
+}
+
+return coin.Amount, nil
+})
+```
+
+The above example shows how to create an `AltValueCodec` that can decode both `sdk.Int` and `sdk.Coin` values. The provided
+decoder function will be used as a fallback in case the default decoder fails. When the value will be encoded back into state
+it will use the default encoder. This allows to lazily migrate values to a new bytes representation.
diff --git a/sdk/next/build/packages/depinject.mdx b/sdk/next/build/packages/depinject.mdx
new file mode 100644
index 000000000..cb8ab061b
--- /dev/null
+++ b/sdk/next/build/packages/depinject.mdx
@@ -0,0 +1,678 @@
+---
+title: Depinject
+---
+
+> **DISCLAIMER**: This is a **beta** package. The SDK team is actively working on this feature and we are looking for feedback from the community. Please try it out and let us know what you think.
+
+## Overview
+
+`depinject` is a dependency injection (DI) framework for the Cosmos SDK, designed to streamline the process of building and configuring blockchain applications. It works in conjunction with the `core/appconfig` module to replace the majority of boilerplate code in `app.go` with a configuration file in Go, YAML, or JSON format.
+
+`depinject` is particularly useful for developing blockchain applications:
+
+* With multiple interdependent components, modules, or services. Helping manage their dependencies effectively.
+* That require decoupling of these components, making it easier to test, modify, or replace individual parts without affecting the entire system.
+* That are wanting to simplify the setup and initialization of modules and their dependencies by reducing boilerplate code and automating dependency management.
+
+By using `depinject`, developers can achieve:
+
+* Cleaner and more organized code.
+
+* Improved modularity and maintainability.
+
+* A more maintainable and modular structure for their blockchain applications, ultimately enhancing development velocity and code quality.
+
+* [Go Doc](https://pkg.go.dev/cosmossdk.io/depinject)
+
+## Usage
+
+The `depinject` framework, based on dependency injection concepts, streamlines the management of dependencies within your blockchain application using its Configuration API. This API offers a set of functions and methods to create easy to use configurations, making it simple to define, modify, and access dependencies and their relationships.
+
+A core component of the [Configuration API](https://pkg.go.dev/github.com/cosmos/cosmos-sdk/depinject#Config) is the `Provide` function, which allows you to register provider functions that supply dependencies. Inspired by constructor injection, these provider functions form the basis of the dependency tree, enabling the management and resolution of dependencies in a structured and maintainable manner. Additionally, `depinject` supports interface types as inputs to provider functions, offering flexibility and decoupling between components, similar to interface injection concepts.
+
+By leveraging `depinject` and its Configuration API, you can efficiently handle dependencies in your blockchain application, ensuring a clean, modular, and well-organized codebase.
+
+Example:
+
+```go expandable
+package main
+
+import (
+
+ "fmt"
+ "cosmossdk.io/depinject"
+)
+
+type AnotherInt int
+
+func GetInt()
+
+int {
+ return 1
+}
+
+func GetAnotherInt()
+
+AnotherInt {
+ return 2
+}
+
+func main() {
+ var (
+ x int
+ y AnotherInt
+ )
+
+fmt.Printf("Before (%v, %v)\n", x, y)
+
+depinject.Inject(
+ depinject.Provide(
+ GetInt,
+ GetAnotherInt,
+ ),
+ &x,
+ &y,
+ )
+
+fmt.Printf("After (%v, %v)\n", x, y)
+}
+```
+
+In this example, `depinject.Provide` registers two provider functions that return `int` and `AnotherInt` values. The `depinject.Inject` function is then used to inject these values into the variables `x` and `y`.
+
+Provider functions serve as the basis for the dependency tree. They are analysed to identify their inputs as dependencies and their outputs as dependents. These dependents can either be used by another provider function or be stored outside the DI container (e.g., `&x` and `&y` in the example above). Provider functions must be exported.
+
+### Interface type resolution
+
+`depinject` supports the use of interface types as inputs to provider functions, which helps decouple dependencies between modules. This approach is particularly useful for managing complex systems with multiple modules, such as the Cosmos SDK, where dependencies need to be flexible and maintainable.
+
+For example, `x/bank` expects an [AccountKeeper](https://pkg.go.dev/github.com/cosmos/cosmos-sdk/x/bank/types#AccountKeeper) interface as [input to ProvideModule](https://github.com/cosmos/cosmos-sdk/tree/release/v0.50.x/x/bank/module.go#L208-L260). `SimApp` uses the implementation in `x/auth`, but the modular design allows for easy changes to the implementation if needed.
+
+Consider the following example:
+
+```go expandable
+package duck
+
+type Duck interface {
+ quack()
+}
+
+type AlsoDuck interface {
+ quack()
+}
+
+type Mallard struct{
+}
+
+type Canvasback struct{
+}
+
+func (duck Mallard)
+
+quack() {
+}
+
+func (duck Canvasback)
+
+quack() {
+}
+
+type Pond struct {
+ Duck AlsoDuck
+}
+```
+
+And the following provider functions:
+
+```go expandable
+func GetMallard()
+
+duck.Mallard {
+ return Mallard{
+}
+}
+
+func GetPond(duck Duck)
+
+Pond {
+ return Pond{
+ Duck: duck
+}
+}
+
+func GetCanvasback()
+
+Canvasback {
+ return Canvasback{
+}
+}
+```
+
+In this example, there's a `Pond` struct that has a `Duck` field of type `AlsoDuck`. The `depinject` framework can automatically resolve the appropriate implementation when there's only one available, as shown below:
+
+```go
+var pond Pond
+
+depinject.Inject(
+ depinject.Provide(
+ GetMallard,
+ GetPond,
+ ),
+ &pond)
+```
+
+This code snippet results in the `Duck` field of `Pond` being implicitly bound to the `Mallard` implementation because it's the only implementation of the `Duck` interface in the container.
+
+However, if there are multiple implementations of the `Duck` interface, as in the following example, you'll encounter an error:
+
+```go
+var pond Pond
+
+depinject.Inject(
+ depinject.Provide(
+ GetMallard,
+ GetCanvasback,
+ GetPond,
+ ),
+ &pond)
+```
+
+A specific binding preference for `Duck` is required.
+
+#### `BindInterface` API
+
+In the above situation registering a binding for a given interface binding may look like:
+
+```go expandable
+depinject.Inject(
+ depinject.Configs(
+ depinject.BindInterface(
+ "duck/duck.Duck",
+ "duck/duck.Mallard",
+ ),
+ depinject.Provide(
+ GetMallard,
+ GetCanvasback,
+ GetPond,
+ ),
+ ),
+ &pond)
+```
+
+Now `depinject` has enough information to provide `Mallard` as an input to `Pond`.
+
+### Full example in real app
+
+
+When using `depinject.Inject`, the injected types must be pointers.
+
+
+```go expandable
+//go:build !app_v1
+
+package simapp
+
+import (
+
+ "io"
+
+ dbm "github.com/cosmos/cosmos-db"
+
+ clienthelpers "cosmossdk.io/client/v2/helpers"
+ "cosmossdk.io/depinject"
+ "cosmossdk.io/log"
+ storetypes "cosmossdk.io/store/types"
+ circuitkeeper "cosmossdk.io/x/circuit/keeper"
+ evidencekeeper "cosmossdk.io/x/evidence/keeper"
+ feegrantkeeper "cosmossdk.io/x/feegrant/keeper"
+ nftkeeper "cosmossdk.io/x/nft/keeper"
+ upgradekeeper "cosmossdk.io/x/upgrade/keeper"
+ "github.com/cosmos/cosmos-sdk/baseapp"
+ "github.com/cosmos/cosmos-sdk/client"
+ "github.com/cosmos/cosmos-sdk/codec"
+ codectypes "github.com/cosmos/cosmos-sdk/codec/types"
+ "github.com/cosmos/cosmos-sdk/runtime"
+ "github.com/cosmos/cosmos-sdk/server"
+ "github.com/cosmos/cosmos-sdk/server/api"
+ "github.com/cosmos/cosmos-sdk/server/config"
+ servertypes "github.com/cosmos/cosmos-sdk/server/types"
+ testdata_pulsar "github.com/cosmos/cosmos-sdk/testutil/testdata/testpb"
+ "github.com/cosmos/cosmos-sdk/types/module"
+ "github.com/cosmos/cosmos-sdk/x/auth"
+ "github.com/cosmos/cosmos-sdk/x/auth/ante"
+ authkeeper "github.com/cosmos/cosmos-sdk/x/auth/keeper"
+ authsims "github.com/cosmos/cosmos-sdk/x/auth/simulation"
+ authtypes "github.com/cosmos/cosmos-sdk/x/auth/types"
+ authzkeeper "github.com/cosmos/cosmos-sdk/x/authz/keeper"
+ bankkeeper "github.com/cosmos/cosmos-sdk/x/bank/keeper"
+ consensuskeeper "github.com/cosmos/cosmos-sdk/x/consensus/keeper"
+ distrkeeper "github.com/cosmos/cosmos-sdk/x/distribution/keeper"
+ epochskeeper "github.com/cosmos/cosmos-sdk/x/epochs/keeper"
+ govkeeper "github.com/cosmos/cosmos-sdk/x/gov/keeper"
+ groupkeeper "github.com/cosmos/cosmos-sdk/x/group/keeper"
+ mintkeeper "github.com/cosmos/cosmos-sdk/x/mint/keeper"
+ protocolpoolkeeper "github.com/cosmos/cosmos-sdk/x/protocolpool/keeper"
+ slashingkeeper "github.com/cosmos/cosmos-sdk/x/slashing/keeper"
+ stakingkeeper "github.com/cosmos/cosmos-sdk/x/staking/keeper"
+)
+
+// DefaultNodeHome default home directories for the application daemon
+var DefaultNodeHome string
+
+var (
+ _ runtime.AppI = (*SimApp)(nil)
+ _ servertypes.Application = (*SimApp)(nil)
+)
+
+// SimApp extends an ABCI application, but with most of its parameters exported.
+// They are exported for convenience in creating helper functions, as object
+// capabilities aren't needed for testing.
+type SimApp struct {
+ *runtime.App
+ legacyAmino *codec.LegacyAmino
+ appCodec codec.Codec
+ txConfig client.TxConfig
+ interfaceRegistry codectypes.InterfaceRegistry
+
+ // essential keepers
+ AccountKeeper authkeeper.AccountKeeper
+ BankKeeper bankkeeper.BaseKeeper
+ StakingKeeper *stakingkeeper.Keeper
+ SlashingKeeper slashingkeeper.Keeper
+ MintKeeper mintkeeper.Keeper
+ DistrKeeper distrkeeper.Keeper
+ GovKeeper *govkeeper.Keeper
+ UpgradeKeeper *upgradekeeper.Keeper
+ EvidenceKeeper evidencekeeper.Keeper
+ ConsensusParamsKeeper consensuskeeper.Keeper
+ CircuitKeeper circuitkeeper.Keeper
+
+ // supplementary keepers
+ FeeGrantKeeper feegrantkeeper.Keeper
+ GroupKeeper groupkeeper.Keeper
+ AuthzKeeper authzkeeper.Keeper
+ NFTKeeper nftkeeper.Keeper
+ EpochsKeeper epochskeeper.Keeper
+ ProtocolPoolKeeper protocolpoolkeeper.Keeper
+
+ // simulation manager
+ sm *module.SimulationManager
+}
+
+func init() {
+ var err error
+ DefaultNodeHome, err = clienthelpers.GetNodeHomeDirectory(".simapp")
+ if err != nil {
+ panic(err)
+}
+}
+
+// NewSimApp returns a reference to an initialized SimApp.
+func NewSimApp(
+ logger log.Logger,
+ db dbm.DB,
+ traceStore io.Writer,
+ loadLatest bool,
+ appOpts servertypes.AppOptions,
+ baseAppOptions ...func(*baseapp.BaseApp),
+) *SimApp {
+ var (
+ app = &SimApp{
+}
+
+appBuilder *runtime.AppBuilder
+
+ // merge the AppConfig and other configuration in one config
+ appConfig = depinject.Configs(
+ AppConfig,
+ depinject.Supply(
+ // supply the application options
+ appOpts,
+ // supply the logger
+ logger,
+
+ // ADVANCED CONFIGURATION
+
+ //
+ // AUTH
+ //
+ // For providing a custom function required in auth to generate custom account types
+ // add it below. By default the auth module uses simulation.RandomGenesisAccounts.
+ //
+ // authtypes.RandomGenesisAccountsFn(simulation.RandomGenesisAccounts),
+ //
+ // For providing a custom a base account type add it below.
+ // By default the auth module uses authtypes.ProtoBaseAccount().
+ //
+ // func()
+
+sdk.AccountI {
+ return authtypes.ProtoBaseAccount()
+},
+ //
+ // For providing a different address codec, add it below.
+ // By default the auth module uses a Bech32 address codec,
+ // with the prefix defined in the auth module configuration.
+ //
+ // func()
+
+address.Codec {
+ return <- custom address codec type ->
+}
+ //
+ // STAKING
+ //
+ // For providing a different validator and consensus address codec, add it below.
+ // By default the staking module uses the bech32 prefix provided in the auth config,
+ // and appends "valoper" and "valcons" for validator and consensus addresses respectively.
+ // When providing a custom address codec in auth, custom address codecs must be provided here as well.
+ //
+ // func()
+
+runtime.ValidatorAddressCodec {
+ return <- custom validator address codec type ->
+}
+ // func()
+
+runtime.ConsensusAddressCodec {
+ return <- custom consensus address codec type ->
+}
+
+ //
+ // MINT
+ //
+
+ // For providing a custom inflation function for x/mint add here your
+ // custom minting function that implements the mintkeeper.MintFn
+ // interface.
+ ),
+ )
+ )
+ if err := depinject.Inject(appConfig,
+ &appBuilder,
+ &app.appCodec,
+ &app.legacyAmino,
+ &app.txConfig,
+ &app.interfaceRegistry,
+ &app.AccountKeeper,
+ &app.BankKeeper,
+ &app.StakingKeeper,
+ &app.SlashingKeeper,
+ &app.MintKeeper,
+ &app.DistrKeeper,
+ &app.GovKeeper,
+ &app.UpgradeKeeper,
+ &app.AuthzKeeper,
+ &app.EvidenceKeeper,
+ &app.FeeGrantKeeper,
+ &app.GroupKeeper,
+ &app.NFTKeeper,
+ &app.ConsensusParamsKeeper,
+ &app.CircuitKeeper,
+ &app.EpochsKeeper,
+ &app.ProtocolPoolKeeper,
+ ); err != nil {
+ panic(err)
+}
+
+ // Below we could construct and set an application specific mempool and
+ // ABCI 1.0 PrepareProposal and ProcessProposal handlers. These defaults are
+ // already set in the SDK's BaseApp, this shows an example of how to override
+ // them.
+ //
+ // Example:
+ //
+ // app.App = appBuilder.Build(...)
+ // nonceMempool := mempool.NewSenderNonceMempool()
+ // abciPropHandler := NewDefaultProposalHandler(nonceMempool, app.App.BaseApp)
+ //
+ // app.App.BaseApp.SetMempool(nonceMempool)
+ // app.App.BaseApp.SetPrepareProposal(abciPropHandler.PrepareProposalHandler())
+ // app.App.BaseApp.SetProcessProposal(abciPropHandler.ProcessProposalHandler())
+ //
+ // Alternatively, you can construct BaseApp options, append those to
+ // baseAppOptions and pass them to the appBuilder.
+ //
+ // Example:
+ //
+ // prepareOpt = func(app *baseapp.BaseApp) {
+ // abciPropHandler := baseapp.NewDefaultProposalHandler(nonceMempool, app)
+ // app.SetPrepareProposal(abciPropHandler.PrepareProposalHandler())
+ //
+}
+ // baseAppOptions = append(baseAppOptions, prepareOpt)
+
+ // create and set dummy vote extension handler
+ voteExtOp := func(bApp *baseapp.BaseApp) {
+ voteExtHandler := NewVoteExtensionHandler()
+
+voteExtHandler.SetHandlers(bApp)
+}
+
+baseAppOptions = append(baseAppOptions, voteExtOp, baseapp.SetOptimisticExecution())
+
+app.App = appBuilder.Build(db, traceStore, baseAppOptions...)
+
+ // register streaming services
+ if err := app.RegisterStreamingServices(appOpts, app.kvStoreKeys()); err != nil {
+ panic(err)
+}
+
+ /**** Module Options ****/
+
+ // RegisterUpgradeHandlers is used for registering any on-chain upgrades.
+ app.RegisterUpgradeHandlers()
+
+ // add test gRPC service for testing gRPC queries in isolation
+ testdata_pulsar.RegisterQueryServer(app.GRPCQueryRouter(), testdata_pulsar.QueryImpl{
+})
+
+ // create the simulation manager and define the order of the modules for deterministic simulations
+ //
+ // NOTE: this is not required apps that don't use the simulator for fuzz testing
+ // transactions
+ overrideModules := map[string]module.AppModuleSimulation{
+ authtypes.ModuleName: auth.NewAppModule(app.appCodec, app.AccountKeeper, authsims.RandomGenesisAccounts, nil),
+}
+
+app.sm = module.NewSimulationManagerFromAppModules(app.ModuleManager.Modules, overrideModules)
+
+app.sm.RegisterStoreDecoders()
+
+ // A custom InitChainer can be set if extra pre-init-genesis logic is required.
+ // By default, when using app wiring enabled module, this is not required.
+ // For instance, the upgrade module will set automatically the module version map in its init genesis thanks to app wiring.
+ // However, when registering a module manually (i.e. that does not support app wiring), the module version map
+ // must be set manually as follow. The upgrade module will de-duplicate the module version map.
+ //
+ // app.SetInitChainer(func(ctx sdk.Context, req *abci.RequestInitChain) (*abci.ResponseInitChain, error) {
+ // app.UpgradeKeeper.SetModuleVersionMap(ctx, app.ModuleManager.GetVersionMap())
+ // return app.App.InitChainer(ctx, req)
+ //
+})
+
+ // set custom ante handler
+ app.setAnteHandler(app.txConfig)
+ if err := app.Load(loadLatest); err != nil {
+ panic(err)
+}
+
+return app
+}
+
+// setAnteHandler sets custom ante handlers.
+// "x/auth/tx" pre-defined ante handler have been disabled in app_config.
+func (app *SimApp)
+
+setAnteHandler(txConfig client.TxConfig) {
+ anteHandler, err := NewAnteHandler(
+ HandlerOptions{
+ ante.HandlerOptions{
+ AccountKeeper: app.AccountKeeper,
+ BankKeeper: app.BankKeeper,
+ SignModeHandler: txConfig.SignModeHandler(),
+ FeegrantKeeper: app.FeeGrantKeeper,
+ SigGasConsumer: ante.DefaultSigVerificationGasConsumer,
+},
+ &app.CircuitKeeper,
+},
+ )
+ if err != nil {
+ panic(err)
+}
+
+ // Set the AnteHandler for the app
+ app.SetAnteHandler(anteHandler)
+}
+
+// LegacyAmino returns SimApp's amino codec.
+//
+// NOTE: This is solely to be used for testing purposes as it may be desirable
+// for modules to register their own custom testing types.
+func (app *SimApp)
+
+LegacyAmino() *codec.LegacyAmino {
+ return app.legacyAmino
+}
+
+// AppCodec returns SimApp's app codec.
+//
+// NOTE: This is solely to be used for testing purposes as it may be desirable
+// for modules to register their own custom testing types.
+func (app *SimApp)
+
+AppCodec()
+
+codec.Codec {
+ return app.appCodec
+}
+
+// InterfaceRegistry returns SimApp's InterfaceRegistry.
+func (app *SimApp)
+
+InterfaceRegistry()
+
+codectypes.InterfaceRegistry {
+ return app.interfaceRegistry
+}
+
+// TxConfig returns SimApp's TxConfig
+func (app *SimApp)
+
+TxConfig()
+
+client.TxConfig {
+ return app.txConfig
+}
+
+// GetKey returns the KVStoreKey for the provided store key.
+//
+// NOTE: This is solely to be used for testing purposes.
+func (app *SimApp)
+
+GetKey(storeKey string) *storetypes.KVStoreKey {
+ sk := app.UnsafeFindStoreKey(storeKey)
+
+kvStoreKey, ok := sk.(*storetypes.KVStoreKey)
+ if !ok {
+ return nil
+}
+
+return kvStoreKey
+}
+
+func (app *SimApp)
+
+kvStoreKeys()
+
+map[string]*storetypes.KVStoreKey {
+ keys := make(map[string]*storetypes.KVStoreKey)
+ for _, k := range app.GetStoreKeys() {
+ if kv, ok := k.(*storetypes.KVStoreKey); ok {
+ keys[kv.Name()] = kv
+}
+
+}
+
+return keys
+}
+
+// SimulationManager implements the SimulationApp interface
+func (app *SimApp)
+
+SimulationManager() *module.SimulationManager {
+ return app.sm
+}
+
+// RegisterAPIRoutes registers all application module routes with the provided
+// API server.
+func (app *SimApp)
+
+RegisterAPIRoutes(apiSvr *api.Server, apiConfig config.APIConfig) {
+ app.App.RegisterAPIRoutes(apiSvr, apiConfig)
+ // register swagger API in app.go so that other applications can override easily
+ if err := server.RegisterSwaggerAPI(apiSvr.ClientCtx, apiSvr.Router, apiConfig.Swagger); err != nil {
+ panic(err)
+}
+}
+
+// GetMaccPerms returns a copy of the module account permissions
+//
+// NOTE: This is solely to be used for testing purposes.
+func GetMaccPerms()
+
+map[string][]string {
+ dup := make(map[string][]string)
+ for _, perms := range moduleAccPerms {
+ dup[perms.Account] = perms.Permissions
+}
+
+return dup
+}
+
+// BlockedAddresses returns all the app's blocked account addresses.
+func BlockedAddresses()
+
+map[string]bool {
+ result := make(map[string]bool)
+ if len(blockAccAddrs) > 0 {
+ for _, addr := range blockAccAddrs {
+ result[addr] = true
+}
+
+}
+
+else {
+ for addr := range GetMaccPerms() {
+ result[addr] = true
+}
+
+}
+
+return result
+}
+```
+
+## Debugging
+
+Issues with resolving dependencies in the container can be done with logs and [Graphviz](https://graphviz.org) renderings of the container tree.
+By default, whenever there is an error, logs will be printed to stderr and a rendering of the dependency graph in Graphviz DOT format will be saved to `debug_container.dot`.
+
+Here is an example Graphviz rendering of a successful build of a dependency graph:
+
+
+Rectangles represent functions, ovals represent types, rounded rectangles represent modules and the single hexagon
+represents the function which called `Build`. Black-colored shapes mark functions and types that were called/resolved
+without an error. Gray-colored nodes mark functions and types that could have been called/resolved in the container but
+were left unused.
+
+Here is an example Graphviz rendering of a dependency graph build which failed:
+
+
+Graphviz DOT files can be converted into SVG's for viewing in a web browser using the `dot` command-line tool, ex:
+
+```txt
+dot -Tsvg debug_container.dot > debug_container.svg
+```
+
+Many other tools including some IDEs support working with DOT files.
diff --git a/sdk/next/build/rfc.mdx b/sdk/next/build/rfc.mdx
new file mode 100644
index 000000000..e7cec59ea
--- /dev/null
+++ b/sdk/next/build/rfc.mdx
@@ -0,0 +1,25 @@
+---
+title: "Requests for Comments"
+description: "Version: v0.53"
+---
+
+A Request for Comments (RFC) is a record of discussion on an open-ended topic related to the design and implementation of the Cosmos SDK, for which no immediate decision is required.
+
+The purpose of an RFC is to serve as a historical record of a high-level discussion that might otherwise only be recorded in an ad-hoc way (for example, via gists or Google docs) that are difficult to discover for someone after the fact. An RFC *may* give rise to more specific architectural *decisions* for the Cosmos SDK, but those decisions must be recorded separately in [Architecture Decision Records (ADR)](/sdk/v0.53/build/architecture/README).
+
+As a rule of thumb, if you can articulate a specific question that needs to be answered, write an ADR. If you need to explore the topic and get input from others to know what questions need to be answered, an RFC may be appropriate.
+
+## RFC Content[](#rfc-content "Direct link to RFC Content")
+
+An RFC should provide:
+
+* A **changelog**, documenting when and how the RFC has changed.
+* An **abstract**, briefly summarizing the topic so the reader can quickly tell whether it is relevant to their interest.
+* Any **background** a reader will need to understand and participate in the substance of the discussion (links to other documents are fine here).
+* The **discussion**, the primary content of the document.
+
+The [rfc-template.md](/sdk/v0.53/build/rfc/rfc-template) file includes placeholders for these sections.
+
+## Table of Contents[](#table-of-contents "Direct link to Table of Contents")
+
+* [RFC-001: Tx Validation](/sdk/v0.53/build/rfc/rfc-001-tx-validation)
diff --git a/sdk/next/build/rfc/PROCESS.mdx b/sdk/next/build/rfc/PROCESS.mdx
new file mode 100644
index 000000000..460b83928
--- /dev/null
+++ b/sdk/next/build/rfc/PROCESS.mdx
@@ -0,0 +1,64 @@
+---
+title: RFC Creation Process
+---
+
+1. Copy the `rfc-template.md` file. Use the following filename pattern: `rfc-next_number-title.md`
+2. Create a draft Pull Request if you want to get an early feedback.
+3. Make sure the context and a solution is clear and well documented.
+4. Add an entry to a list in the [README](/sdk/v0.50/build/rfc/README) file.
+5. Create a Pull Request to propose a new ADR.
+
+## What is an RFC?
+
+An RFC is a sort of async whiteboarding session. It is meant to replace the need for a distributed team to come together to make a decision. Currently, the Cosmos SDK team and contributors are distributed around the world. The team conducts working groups to have a synchronous discussion and an RFC can be used to capture the discussion for a wider audience to better understand the changes that are coming to the software.
+
+The main difference the Cosmos SDK is defining as a differentiation between RFC and ADRs is that one is to come to consensus and circulate information about a potential change or feature. An ADR is used if there is already consensus on a feature or change and there is not a need to articulate the change coming to the software. An ADR will articulate the changes and have a lower amount of communication .
+
+## RFC life cycle
+
+RFC creation is an **iterative** process. An RFC is meant as a distributed collaboration session, it may have many comments and is usually the byproduct of no working group or synchronous communication
+
+1. Proposals could start with a new GitHub Issue, be a result of existing Issues or a discussion.
+
+2. An RFC doesn't have to arrive to `main` with an *accepted* status in a single PR. If the motivation is clear and the solution is sound, we SHOULD be able to merge it and keep a *proposed* status. It's preferable to have an iterative approach rather than long, not merged Pull Requests.
+
+3. If a *proposed* RFC is merged, then it should clearly document outstanding issues either in the RFC document notes or in a GitHub Issue.
+
+4. The PR SHOULD always be merged. In the case of a faulty RFC, we still prefer to merge it with a *rejected* status. The only time the RFC SHOULD NOT be merged is if the author abandons it.
+
+5. Merged RFCs SHOULD NOT be pruned.
+
+6. If there is consensus and enough feedback then the RFC can be accepted.
+
+> Note: An RFC is written when there is no working group or team session on the problem. RFC's are meant as a distributed whiteboarding session. If there is a working group on the proposal there is no need to have an RFC as there is synchronous whiteboarding going on.
+
+### RFC status
+
+Status has two components:
+
+```text
+{CONSENSUS STATUS}
+```
+
+#### Consensus Status
+
+```text
+DRAFT -> PROPOSED -> LAST CALL yyyy-mm-dd -> ACCEPTED | REJECTED -> SUPERSEDED by ADR-xxx
+ \ |
+ \ |
+ v v
+ ABANDONED
+```
+
+* `DRAFT`: \[optional] an ADR which is work in progress, not being ready for a general review. This is to present an early work and get an early feedback in a Draft Pull Request form.
+* `PROPOSED`: an ADR covering a full solution architecture and still in the review - project stakeholders haven't reached agreement yet.
+* `LAST CALL `: \[optional] clear notify that we are close to accept updates. Changing a status to `LAST CALL` means that social consensus (of Cosmos SDK maintainers) has been reached and we still want to give it a time to let the community react or analyze.
+* `ACCEPTED`: ADR which will represent a currently implemented or to be implemented architecture design.
+* `REJECTED`: ADR can go from PROPOSED or ACCEPTED to rejected if the consensus among project stakeholders will decide so.
+* `SUPERSEDED by ADR-xxx`: ADR which has been superseded by a new ADR.
+* `ABANDONED`: the ADR is no longer pursued by the original authors.
+
+## Language used in RFC
+
+* The background/goal should be written in the present tense.
+* Avoid using a first, personal form.
diff --git a/sdk/next/build/rfc/README.mdx b/sdk/next/build/rfc/README.mdx
new file mode 100644
index 000000000..50b1ca018
--- /dev/null
+++ b/sdk/next/build/rfc/README.mdx
@@ -0,0 +1,40 @@
+---
+title: Requests for Comments
+description: >-
+ A Request for Comments (RFC) is a record of discussion on an open-ended topic
+ related to the design and implementation of the Cosmos SDK, for which no
+ immediate decision is required.
+---
+
+A Request for Comments (RFC) is a record of discussion on an open-ended topic
+related to the design and implementation of the Cosmos SDK, for which no
+immediate decision is required.
+
+The purpose of an RFC is to serve as a historical record of a high-level
+discussion that might otherwise only be recorded in an ad-hoc way (for example,
+via gists or Google docs) that are difficult to discover for someone after the
+fact. An RFC *may* give rise to more specific architectural *decisions* for
+the Cosmos SDK, but those decisions must be recorded separately in
+[Architecture Decision Records (ADR)](/sdk/v0.53/build/architecture/README).
+
+As a rule of thumb, if you can articulate a specific question that needs to be
+answered, write an ADR. If you need to explore the topic and get input from
+others to know what questions need to be answered, an RFC may be appropriate.
+
+## RFC Content
+
+An RFC should provide:
+
+* A **changelog**, documenting when and how the RFC has changed.
+* An **abstract**, briefly summarizing the topic so the reader can quickly tell
+ whether it is relevant to their interest.
+* Any **background** a reader will need to understand and participate in the
+ substance of the discussion (links to other documents are fine here).
+* The **discussion**, the primary content of the document.
+
+The [rfc-template.md](/sdk/v0.50/build/rfc/rfc-template) file includes placeholders for these
+sections.
+
+## Table of Contents
+
+* [RFC-001: Tx Validation](/sdk/v0.50/build/rfc/rfc-001-tx-validation)
diff --git a/sdk/next/build/rfc/rfc-001-tx-validation.mdx b/sdk/next/build/rfc/rfc-001-tx-validation.mdx
new file mode 100644
index 000000000..2105ff191
--- /dev/null
+++ b/sdk/next/build/rfc/rfc-001-tx-validation.mdx
@@ -0,0 +1,28 @@
+---
+title: 'RFC 001: Transaction Validation'
+description: '2023-03-12: Proposed'
+---
+
+## Changelog
+
+* 2023-03-12: Proposed
+
+## Background
+
+Transaction Validation is crucial to a functioning state machine. Within the Cosmos SDK there are two validation flows, one is outside the message server and the other within. The flow outside of the message server is the `ValidateBasic` function. It is called in the antehandler on both `CheckTx` and `DeliverTx`. There is an overhead and sometimes duplication of validation within these two flows. This extra validation provides an additional check before entering the mempool.
+
+With the deprecation of [`GetSigners`](https://github.com/cosmos/cosmos-sdk/issues/11275) we have the optionality to remove [sdk.Msg](https://github.com/cosmos/cosmos-sdk/blob/16a5404f8e00ddcf8857c8a55dca2f7c109c29bc/types/tx_msg.go#L16) and the `ValidateBasic` function.
+
+With the separation of CometBFT and Cosmos-SDK, there is a lack of control of what transactions get broadcasted and included in a block. This extra validation in the antehandler is meant to help in this case. In most cases the transaction is or should be simulated against a node for validation. With this flow transactions will be treated the same.
+
+## Proposal
+
+The acceptance of this RFC would move validation within `ValidateBasic` to the message server in modules, update tutorials and docs to remove mention of using `ValidateBasic` in favour of handling all validation for a message where it is executed.
+
+We can and will still support the `ValidateBasic` function for users and provide an extension interface of the function once `sdk.Msg` is deprecated.
+
+> Note: This is how messages are handled in VMs like Ethereum and CosmWasm.
+
+### Consequences
+
+The consequence of updating the transaction flow is that transaction that may have failed before with the `ValidateBasic` flow will now be included in a block and fees charged.
diff --git a/sdk/next/build/rfc/rfc-template.mdx b/sdk/next/build/rfc/rfc-template.mdx
new file mode 100644
index 000000000..94f13c6a0
--- /dev/null
+++ b/sdk/next/build/rfc/rfc-template.mdx
@@ -0,0 +1,77 @@
+## Changelog
+
+* `{date}`: `{changelog}`
+
+## Background
+
+> The next section is the "Background" section. This section should be at least two paragraphs and can take up to a whole
+> page in some cases. The guiding goal of the background section is: as a newcomer to this project (new employee, team
+> transfer), can I read the background section and follow any links to get the full context of why this change is\
+> necessary?
+>
+> If you can't show a random engineer the background section and have them acquire nearly full context on the necessity
+> for the RFC, then the background section is not full enough. To help achieve this, link to prior RFCs, discussions, and
+> more here as necessary to provide context so you don't have to simply repeat yourself.
+
+## Proposal
+
+> The next required section is "Proposal" or "Goal". Given the background above, this section proposes a solution.
+> This should be an overview of the "how" for the solution, but for details further sections will be used.
+
+## Abandoned Ideas (Optional)
+
+> As RFCs evolve, it is common that there are ideas that are abandoned. Rather than simply deleting them from the
+> document, you should try to organize them into sections that make it clear they're abandoned while explaining why they
+> were abandoned.
+>
+> When sharing your RFC with others or having someone look back on your RFC in the future, it is common to walk the same
+> path and fall into the same pitfalls that we've since matured from. Abandoned ideas are a way to recognize that path
+> and explain the pitfalls and why they were abandoned.
+
+## Decision
+
+> This section describes alternative designs to the chosen design. This section
+> is important and if an ADR does not have any alternatives then it should be
+> considered that the ADR was not thought through.
+
+## Consequences (optional)
+
+> This section describes the resulting context, after applying the decision. All
+> consequences should be listed here, not just the "positive" ones. A particular
+> decision may have positive, negative, and neutral consequences, but all of them
+> affect the team and project in the future.
+
+### Backwards Compatibility
+
+> All ADRs that introduce backwards incompatibilities must include a section
+> describing these incompatibilities and their severity. The ADR must explain
+> how the author proposes to deal with these incompatibilities. ADR submissions
+> without a sufficient backwards compatibility treatise may be rejected outright.
+
+### Positive
+
+> `{positive consequences}`
+
+### Negative
+
+> `{negative consequences}`
+
+### Neutral
+
+> `{neutral consequences}`
+
+### References
+
+> Links to external materials needed to follow the discussion may be added here.
+>
+> In addition, if the discussion in a request for comments leads to any design
+> decisions, it may be helpful to add links to the ADR documents here after the
+> discussion has settled.
+
+## Discussion
+
+> This section contains the core of the discussion.
+>
+> There is no fixed format for this section, but ideally changes to this
+> section should be updated before merging to reflect any discussion that took
+> place on the PR that made those changes.
diff --git a/sdk/next/build/spec.mdx b/sdk/next/build/spec.mdx
new file mode 100644
index 000000000..eb0223572
--- /dev/null
+++ b/sdk/next/build/spec.mdx
@@ -0,0 +1,21 @@
+---
+title: "Specifications"
+description: "Version: v0.53"
+---
+
+This directory contains specifications for the modules of the Cosmos SDK as well as Interchain Standards (ICS) and other specifications.
+
+Cosmos SDK applications hold this state in a Merkle store. Updates to the store may be made during transactions and at the beginning and end of every block.
+
+## Cosmos SDK specifications[](#cosmos-sdk-specifications "Direct link to Cosmos SDK specifications")
+
+* [Store](/sdk/v0.53/build/spec/store/store) - The core Merkle store that holds the state.
+* [Bech32](/sdk/v0.53/build/spec/addresses/bech32) - Address format for Cosmos SDK applications.
+
+## Modules specifications[](#modules-specifications "Direct link to Modules specifications")
+
+Go the [module directory](/sdk/v0.53/build/modules/modules)
+
+## CometBFT[](#cometbft "Direct link to CometBFT")
+
+For details on the underlying blockchain and p2p protocols, see the [CometBFT specification](https://github.com/cometbft/cometbft/tree/main/spec).
diff --git a/sdk/next/build/spec/README.mdx b/sdk/next/build/spec/README.mdx
new file mode 100644
index 000000000..8f196c2c2
--- /dev/null
+++ b/sdk/next/build/spec/README.mdx
@@ -0,0 +1,26 @@
+---
+title: Specifications
+description: >-
+ This directory contains specifications for the modules of the Cosmos SDK as
+ well as Interchain Standards (ICS) and other specifications.
+---
+
+This directory contains specifications for the modules of the Cosmos SDK as well as Interchain Standards (ICS) and other specifications.
+
+Cosmos SDK applications hold this state in a Merkle store. Updates to
+the store may be made during transactions and at the beginning and end of every
+block.
+
+## Cosmos SDK specifications
+
+* [Store](/sdk/v0.50/learn/advanced/store) - The core Merkle store that holds the state.
+* [Bech32](/sdk/v0.50/build/spec/addresses/bech32) - Address format for Cosmos SDK applications.
+
+## Modules specifications
+
+Go the [module directory](/sdk/v0.53/build/modules)
+
+## CometBFT
+
+For details on the underlying blockchain and p2p protocols, see
+the [CometBFT specification](https://github.com/cometbft/cometbft/tree/main/spec).
diff --git a/sdk/next/build/spec/SPEC_MODULE.mdx b/sdk/next/build/spec/SPEC_MODULE.mdx
new file mode 100644
index 000000000..417cb1514
--- /dev/null
+++ b/sdk/next/build/spec/SPEC_MODULE.mdx
@@ -0,0 +1,65 @@
+---
+title: Specification of Modules
+description: >-
+ This file intends to outline the common structure for specifications within
+ this directory.
+---
+
+This file intends to outline the common structure for specifications within
+this directory.
+
+## Tense
+
+For consistency, specs should be written in passive present tense.
+
+## Pseudo-Code
+
+Generally, pseudo-code should be minimized throughout the spec. Often, simple
+bulleted-lists which describe a function's operations are sufficient and should
+be considered preferable. In certain instances, due to the complex nature of
+the functionality being described pseudo-code may be the most suitable form of
+specification. In these cases use of pseudo-code is permissible, but should be
+presented in a concise manner, ideally restricted to only the complex
+element as a part of a larger description.
+
+## Common Layout
+
+The following generalized `README` structure should be used to breakdown
+specifications for modules. The following list is nonbinding and all sections are optional.
+
+* `# {Module Name}` - overview of the module
+* `## Concepts` - describe specialized concepts and definitions used throughout the spec
+* `## State` - specify and describe structures expected to be marshaled into the store, and their keys
+* `## State Transitions` - standard state transition operations triggered by hooks, messages, etc.
+* `## Messages` - specify message structure(s) and expected state machine behavior(s)
+* `## Begin Block` - specify any begin-block operations
+* `## End Block` - specify any end-block operations
+* `## Hooks` - describe available hooks to be called by/from this module
+* `## Events` - list and describe event tags used
+* `## Client` - list and describe CLI commands and gRPC and REST endpoints
+* `## Params` - list all module parameters, their types (in JSON) and examples
+* `## Future Improvements` - describe future improvements of this module
+* `## Tests` - acceptance tests
+* `## Appendix` - supplementary details referenced elsewhere within the spec
+
+### Notation for key-value mapping
+
+Within `## State` the following notation `->` should be used to describe key to
+value mapping:
+
+```text
+key -> value
+```
+
+to represent byte concatenation the `|` may be used. In addition, encoding
+type may be specified, for example:
+
+```text
+0x00 | addressBytes | address2Bytes -> amino(value_object)
+```
+
+Additionally, index mappings may be specified by mapping to the `nil` value, for example:
+
+```text
+0x01 | address2Bytes | addressBytes -> nil
+```
diff --git a/sdk/next/build/spec/SPEC_STANDARD.mdx b/sdk/next/build/spec/SPEC_STANDARD.mdx
new file mode 100644
index 000000000..6d302e03d
--- /dev/null
+++ b/sdk/next/build/spec/SPEC_STANDARD.mdx
@@ -0,0 +1,128 @@
+---
+title: What is an SDK standard?
+---
+
+An SDK standard is a design document describing a particular protocol, standard, or feature expected to be used by the Cosmos SDK. An SDK standard should list the desired properties of the standard, explain the design rationale, and provide a concise but comprehensive technical specification. The primary author is responsible for pushing the proposal through the standardization process, soliciting input and support from the community, and communicating with relevant stakeholders to ensure (social) consensus.
+
+## Sections
+
+An SDK standard consists of:
+
+* a synopsis,
+* overview and basic concepts,
+* technical specification,
+* history log, and
+* copyright notice.
+
+All top-level sections are required. References should be included inline as links, or tabulated at the bottom of the section if necessary. Included subsections should be listed in the order specified below.
+
+### Table Of Contents
+
+Provide a table of contents at the top of the file to help readers.
+
+### Synopsis
+
+The document should include a brief (\~200 word) synopsis providing a high-level description of and rationale for the specification.
+
+### Overview and basic concepts
+
+This section should include a motivation subsection and a definition subsection if required:
+
+* *Motivation* - A rationale for the existence of the proposed feature, or the proposed changes to an existing feature.
+* *Definitions* - A list of new terms or concepts used in the document or required to understand it.
+
+### System model and properties
+
+This section should include an assumption subsection if any, the mandatory properties subsection, and a dependency subsection. Note that the first two subsections are tightly coupled: how to enforce a property will depend directly on the assumptions made. This subsection is important to capture the interactions of the specified feature with the "rest-of-the-world," i.e., with other features of the ecosystem.
+
+* *Assumptions* - A list of any assumptions made by the feature designer. It should capture which features are used by the feature under specification, and what do we expect from them.
+* *Properties* - A list of the desired properties or characteristics of the feature specified, and expected effects or failures when the properties are violated. In case it is relevant, it can also include a list of properties that the feature does not guarantee.
+* *Dependencies* - A list of the features that use the feature under specification and how.
+
+### Technical specification
+
+This is the main section of the document, and should contain protocol documentation, design rationale, required references, and technical details where appropriate.
+The section may have any or all of the following subsections, as appropriate to the particular specification. The API subsection is especially encouraged when appropriate.
+
+* *API* - A detailed description of the feature's API.
+* *Technical Details* - All technical details including syntax, diagrams, semantics, protocols, data structures, algorithms, and pseudocode as appropriate. The technical specification should be detailed enough such that separate correct implementations of the specification without knowledge of each other are compatible.
+* *Backwards Compatibility* - A discussion of compatibility (or lack thereof) with previous feature or protocol versions.
+* *Known Issues* - A list of known issues. This subsection is specially important for specifications of already in-use features.
+* *Example Implementation* - A concrete example implementation or description of an expected implementation to serve as the primary reference for implementers.
+
+### History
+
+A specification should include a history section, listing any inspiring documents and a plaintext log of significant changes.
+
+See an example history section [below](#history-1).
+
+### Copyright
+
+A specification should include a copyright section waiving rights via [Apache 2.0](https://www.apache.org/licenses/LICENSE-2.0).
+
+## Formatting
+
+### General
+
+Specifications must be written in GitHub-flavored Markdown.
+
+For a GitHub-flavored Markdown cheat sheet, see [here](https://github.com/adam-p/markdown-here/wiki/Markdown-Cheatsheet). For a local Markdown renderer, see [here](https://github.com/joeyespo/grip).
+
+### Language
+
+Specifications should be written in Simple English, avoiding obscure terminology and unnecessary jargon. For excellent examples of Simple English, please see the [Simple English Wikipedia](https://simple.wikipedia.org/wiki/Main_Page).
+
+The keywords "MUST", "MUST NOT", "REQUIRED", "SHALL", "SHALL NOT", "SHOULD", "SHOULD NOT", "RECOMMENDED", "MAY", and "OPTIONAL" in specifications are to be interpreted as described in [RFC 2119](https://tools.ietf.org/html/rfc2119).
+
+### Pseudocode
+
+Pseudocode in specifications should be language-agnostic and formatted in a simple imperative standard, with line numbers, variables, simple conditional blocks, for loops, and
+English fragments where necessary to explain further functionality such as scheduling timeouts. LaTeX images should be avoided because they are challenging to review in diff form.
+
+Pseudocode for structs can be written in a simple language like TypeScript or golang, as interfaces.
+
+Example Golang pseudocode struct:
+
+```go
+type CacheKVStore interface {
+ cache: map[Key]Value
+ parent: KVStore
+ deleted: Key
+}
+```
+
+Pseudocode for algorithms should be written in simple Golang, as functions.
+
+Example pseudocode algorithm:
+
+```go expandable
+func get(
+ store CacheKVStore,
+ key Key)
+
+Value {
+ value = store.cache.get(Key)
+ if (value !== null) {
+ return value
+}
+
+else {
+ value = store.parent.get(key)
+
+store.cache.set(key, value)
+
+return value
+}
+}
+```
+
+## History
+
+This specification was significantly inspired by and derived from IBC's [ICS](https://github.com/cosmos/ibc/blob/main/spec/ics-001-ics-standard/README.md), which
+was in turn derived from Ethereum's [EIP 1](https://github.com/ethereum/EIPs/blob/master/EIPS/eip-1.md).
+
+Nov 24, 2022 - Initial draft finished and submitted as a PR
+
+## Copyright
+
+All content herein is licensed under [Apache 2.0](https://www.apache.org/licenses/LICENSE-2.0).
diff --git a/sdk/next/build/spec/_ics/README.mdx b/sdk/next/build/spec/_ics/README.mdx
new file mode 100644
index 000000000..9aabfc7f3
--- /dev/null
+++ b/sdk/next/build/spec/_ics/README.mdx
@@ -0,0 +1,6 @@
+---
+title: Cosmos ICS
+description: ICS030 - Signed Messages
+---
+
+* [ICS030 - Signed Messages](/sdk/v0.50/build/spec/_ics/ics-030-signed-messages)
diff --git a/sdk/next/build/spec/_ics/ics-030-signed-messages.mdx b/sdk/next/build/spec/_ics/ics-030-signed-messages.mdx
new file mode 100644
index 000000000..1b3d2481a
--- /dev/null
+++ b/sdk/next/build/spec/_ics/ics-030-signed-messages.mdx
@@ -0,0 +1,194 @@
+---
+title: 'ICS 030: Cosmos Signed Messages'
+---
+
+> TODO: Replace with valid ICS number and possibly move to new location.
+
+* [Changelog](#changelog)
+* [Abstract](#abstract)
+* [Preliminary](#preliminary)
+* [Specification](#specification)
+* [Future Adaptations](#future-adaptations)
+* [API](#api)
+* [References](#references)
+
+## Status
+
+Proposed.
+
+## Changelog
+
+## Abstract
+
+Having the ability to sign messages off-chain has proven to be a fundamental aspect
+of nearly any blockchain. The notion of signing messages off-chain has many
+added benefits such as saving on computational costs and reducing transaction
+throughput and overhead. Within the context of the Cosmos, some of the major
+applications of signing such data includes, but is not limited to, providing a
+cryptographic secure and verifiable means of proving validator identity and
+possibly associating it with some other framework or organization. In addition,
+having the ability to sign Cosmos messages with a Ledger or similar HSM device.
+
+A standardized protocol for hashing, signing, and verifying messages that can be
+implemented by the Cosmos SDK and other third-party organizations is needed. Such a
+standardized protocol subscribes to the following:
+
+* Contains a specification of human-readable and machine-verifiable typed structured data
+* Contains a framework for deterministic and injective encoding of structured data
+* Utilizes cryptographic secure hashing and signing algorithms
+* A framework for supporting extensions and domain separation
+* Is invulnerable to chosen ciphertext attacks
+* Has protection against potentially signing transactions a user did not intend to
+
+This specification is only concerned with the rationale and the standardized
+implementation of Cosmos signed messages. It does **not** concern itself with the
+concept of replay attacks as that will be left up to the higher-level application
+implementation. If you view signed messages in the means of authorizing some
+action or data, then such an application would have to either treat this as
+idempotent or have mechanisms in place to reject known signed messages.
+
+## Preliminary
+
+The Cosmos message signing protocol will be parameterized with a cryptographic
+secure hashing algorithm `SHA-256` and a signing algorithm `S` that contains
+the operations `sign` and `verify` which provide a digital signature over a set
+of bytes and verification of a signature respectively.
+
+Note, our goal here is not to provide context and reasoning about why necessarily
+these algorithms were chosen apart from the fact they are the de facto algorithms
+used in CometBFT and the Cosmos SDK and that they satisfy our needs for such
+cryptographic algorithms such as having resistance to collision and second
+pre-image attacks, as well as being [deterministic](https://en.wikipedia.org/wiki/Hash_function#Determinism) and [uniform](https://en.wikipedia.org/wiki/Hash_function#Uniformity).
+
+## Specification
+
+CometBFT has a well established protocol for signing messages using a canonical
+JSON representation as defined [here](https://github.com/cometbft/cometbft/blob/master/types/canonical.go).
+
+An example of such a canonical JSON structure is CometBFT's vote structure:
+
+```go
+type CanonicalJSONVote struct {
+ ChainID string `json:"@chain_id"`
+ Type string `json:"@type"`
+ BlockID CanonicalJSONBlockID `json:"block_id"`
+ Height int64 `json:"height"`
+ Round int `json:"round"`
+ Timestamp string `json:"timestamp"`
+ VoteType byte `json:"type"`
+}
+```
+
+With such canonical JSON structures, the specification requires that they include
+meta fields: `@chain_id` and `@type`. These meta fields are reserved and must be
+included. They are both of type `string`. In addition, fields must be ordered
+in lexicographically ascending order.
+
+For the purposes of signing Cosmos messages, the `@chain_id` field must correspond
+to the Cosmos chain identifier. The user-agent should **refuse** signing if the
+`@chain_id` field does not match the currently active chain! The `@type` field
+must equal the constant `"message"`. The `@type` field corresponds to the type of
+structure the user will be signing in an application. For now, a user is only
+allowed to sign bytes of valid ASCII text ([see here](https://github.com/cometbft/cometbft/blob/v0.37.0/libs/strings/string.go#L35-L64)).
+However, this will change and evolve to support additional application-specific
+structures that are human-readable and machine-verifiable ([see Future Adaptations](#future-adaptations)).
+
+Thus, we can have a canonical JSON structure for signing Cosmos messages using
+the [JSON schema](http://json-schema.org/) specification as such:
+
+```json expandable
+{
+ "$schema": "http://json-schema.org/draft-04/schema#",
+ "$id": "cosmos/signing/typeData/schema",
+ "title": "The Cosmos signed message typed data schema.",
+ "type": "object",
+ "properties": {
+ "@chain_id": {
+ "type": "string",
+ "description": "The corresponding Cosmos chain identifier.",
+ "minLength": 1
+ },
+ "@type": {
+ "type": "string",
+ "description": "The message type. It must be 'message'.",
+ "enum": [
+ "message"
+ ]
+ },
+ "text": {
+ "type": "string",
+ "description": "The valid ASCII text to sign.",
+ "pattern": "^[\\x20-\\x7E]+$",
+ "minLength": 1
+ }
+ },
+ "required": [
+ "@chain_id",
+ "@type",
+ "text"
+ ]
+}
+```
+
+e.g.
+
+```json
+{
+ "@chain_id": "1",
+ "@type": "message",
+ "text": "Hello, you can identify me as XYZ on keybase."
+}
+```
+
+## Future Adaptations
+
+As applications can vary greatly in domain, it will be vital to support both
+domain separation and human-readable and machine-verifiable structures.
+
+Domain separation will allow for application developers to prevent collisions of
+otherwise identical structures. It should be designed to be unique per application
+use and should directly be used in the signature encoding itself.
+
+Human-readable and machine-verifiable structures will allow end users to sign
+more complex structures, apart from just string messages, and still be able to
+know exactly what they are signing (opposed to signing a bunch of arbitrary bytes).
+
+Thus, in the future, the Cosmos signing message specification will be expected
+to expand upon its canonical JSON structure to include such functionality.
+
+## API
+
+Application developers and designers should formalize a standard set of APIs that
+adhere to the following specification:
+
+***
+
+### **cosmosSignBytes**
+
+Params:
+
+* `data`: the Cosmos signed message canonical JSON structure
+* `address`: the Bech32 Cosmos account address to sign data with
+
+Returns:
+
+* `signature`: the Cosmos signature derived using signing algorithm `S`
+
+***
+
+### Examples
+
+Using the `secp256k1` as the DSA, `S`:
+
+```javascript
+data = {
+ "@chain_id": "1",
+ "@type": "message",
+ "text": "I hereby claim I am ABC on Keybase!"
+}
+
+cosmosSignBytes(data, "cosmos1pvsch6cddahhrn5e8ekw0us50dpnugwnlfngt3")
+> "0x7fc4a495473045022100dec81a9820df0102381cdbf7e8b0f1e2cb64c58e0ecda1324543742e0388e41a02200df37905a6505c1b56a404e23b7473d2c0bc5bcda96771d2dda59df6ed2b98f8"
+```
+
+## References
diff --git a/sdk/next/build/spec/addresses/bech32.mdx b/sdk/next/build/spec/addresses/bech32.mdx
new file mode 100644
index 000000000..c9dad426c
--- /dev/null
+++ b/sdk/next/build/spec/addresses/bech32.mdx
@@ -0,0 +1,203 @@
+---
+title: Address Encoding
+---
+
+The Cosmos SDK uses the Bech32 address format for all user-facing addresses. Bech32 encoding provides robust integrity checks through checksums and includes a human-readable prefix (HRP) that provides contextual information about the address type.
+
+## Address Types
+
+The SDK defines three distinct address types, each with its own Bech32 prefix:
+
+| Address Type | Bech32 Prefix | Example | Purpose |
+|--------------|---------------|---------|---------|
+| Account Address | `cosmos` | `cosmos1r5v5sr...` | User accounts, balances, transactions |
+| Validator Operator Address | `cosmosvaloper` | `cosmosvaloper1r5v5sr...` | Validator operator identity, staking operations |
+| Consensus Address | `cosmosvalcons` | `cosmosvalcons1r5v5sr...` | Validator consensus participation, block signing |
+
+Each address type also has a corresponding public key prefix:
+- Account public keys: `cosmospub`
+- Validator public keys: `cosmosvaloperpub`
+- Consensus public keys: `cosmosvalconspub`
+
+## Address Derivation
+
+Addresses are derived from public keys through cryptographic hashing. The process differs based on the key algorithm:
+
+### Secp256k1 Keys (Account Addresses)
+
+Account addresses use Bitcoin-style address derivation:
+
+```
+1. Public Key: 33 bytes (compressed secp256k1 public key)
+2. SHA-256 hash of public key: 32 bytes
+3. RIPEMD-160 hash of result: 20 bytes (final address)
+```
+
+**Implementation:** `crypto/keys/secp256k1/secp256k1.go`
+
+```go
+func (pubKey *PubKey) Address() crypto.Address {
+ sha := sha256.Sum256(pubKey.Key) // Step 1: SHA-256
+ hasherRIPEMD160 := ripemd160.New()
+ hasherRIPEMD160.Write(sha[:])
+ return hasherRIPEMD160.Sum(nil) // Step 2: RIPEMD-160 = 20 bytes
+}
+```
+
+### Ed25519 Keys (Consensus Addresses)
+
+Consensus addresses use truncated SHA-256:
+
+```
+1. Public Key: 32 bytes (Ed25519 public key)
+2. SHA-256 hash, truncated to first 20 bytes
+```
+
+**Implementation:** `crypto/keys/ed25519/ed25519.go`
+
+```go
+func (pubKey *PubKey) Address() crypto.Address {
+ return crypto.Address(tmhash.SumTruncated(pubKey.Key)) // SHA-256-20
+}
+```
+
+## Bech32 Encoding Process
+
+Once address bytes are derived, they're converted to Bech32 format:
+
+**Step 1: Convert from 8-bit to 5-bit encoding**
+
+```go
+// Address bytes (20 bytes = 160 bits)
+addressBytes := []byte{0x12, 0x34, ..., 0xab} // 20 bytes
+
+// Convert to 5-bit groups for Bech32
+converted, _ := bech32.ConvertBits(addressBytes, 8, 5, true)
+```
+
+**Step 2: Encode with Human-Readable Prefix**
+
+```go
+// Combine HRP with converted bytes
+bech32Address, _ := bech32.Encode("cosmos", converted)
+// Result: "cosmos1r5v5srda7xfth3uckstjst6k05kmeyzptewwdk"
+```
+
+**Implementation:** `types/bech32/bech32.go`
+
+## Address Validation
+
+The SDK validates addresses through:
+
+1. **Format validation**: Ensures valid Bech32 encoding
+2. **Prefix validation**: Confirms correct HRP for address type
+3. **Length validation**: Verifies address is exactly 20 bytes when decoded
+
+```go
+func (bc Bech32Codec) StringToBytes(text string) ([]byte, error) {
+ hrp, bz, err := bech32.DecodeAndConvert(text)
+ if err != nil {
+ return nil, err
+ }
+
+ if hrp != bc.Bech32Prefix {
+ return nil, fmt.Errorf("invalid prefix")
+ }
+
+ return bz, sdk.VerifyAddressFormat(bz) // Checks length = 20 bytes
+}
+```
+
+## Module Addresses
+
+Module accounts use deterministic address derivation defined in [ADR-028](https://github.com/cosmos/cosmos-sdk/blob/main/docs/architecture/adr-028-public-key-addresses.md):
+
+```go
+// Module address without derivation keys
+func Module(moduleName string) []byte {
+ return crypto.AddressHash([]byte(moduleName))
+}
+
+// Module address with derivation keys (new method)
+func Module(moduleName string, derivationKeys ...[]byte) []byte {
+ mKey := append([]byte(moduleName), 0) // Null byte separator
+ addr := Hash("module", append(mKey, derivationKeys[0]...))
+ return addr // 32 bytes (not 20 bytes like user addresses)
+}
+```
+
+Module addresses are longer (32 bytes vs 20 bytes) to reduce collision probability.
+
+## Validator Address Relationships
+
+A validator has three related addresses:
+
+1. **Operator Address** (`cosmosvaloper1...`): The validator's operational identity, derived from the operator's account key
+2. **Consensus Address** (`cosmosvalcons1...`): Derived from the validator's consensus public key (Ed25519), used for block signing
+3. **Account Address** (`cosmos1...`): The operator's account for receiving rewards
+
+```go
+// Validator stores its consensus pubkey
+type Validator struct {
+ OperatorAddress string // cosmosvaloper1... (from operator's account)
+ ConsensusPubkey *Any // Ed25519 public key for signing
+ // ...
+}
+
+// Consensus address is derived from the consensus pubkey
+func (v Validator) GetConsAddr() ([]byte, error) {
+ pk := v.ConsensusPubkey.GetCachedValue().(cryptotypes.PubKey)
+ return pk.Address().Bytes(), nil // SHA-256-20 of Ed25519 pubkey
+}
+```
+
+## Performance: Address Caching
+
+The SDK caches Bech32-encoded addresses to optimize repeated conversions:
+
+```go
+var (
+ accAddrCache *simplelru.LRU // 60,000 entries
+ valAddrCache *simplelru.LRU // 500 entries
+ consAddrCache *simplelru.LRU // 500 entries
+)
+```
+
+When `Address.String()` is called, the SDK:
+1. Checks the LRU cache for the encoded address
+2. Returns cached value if found
+3. Otherwise, performs Bech32 encoding and caches the result
+
+This significantly improves performance during block processing and state queries.
+
+## Complete Example
+
+Here's the full pipeline for creating an account address:
+
+```go
+// 1. Generate keypair
+privKey := secp256k1.GenPrivKey() // 32 bytes
+pubKey := privKey.PubKey() // 33 bytes (compressed)
+
+// 2. Derive address bytes
+sha := sha256.Sum256(pubKey.Bytes()) // 32 bytes
+ripemd := ripemd160.Sum(sha[:]) // 20 bytes
+addrBytes := ripemd[:]
+
+// 3. Create AccAddress type
+accAddr := sdk.AccAddress(addrBytes)
+
+// 4. Convert to Bech32 string
+// Internally: bech32.ConvertAndEncode("cosmos", addrBytes)
+addressStr := accAddr.String()
+// Result: "cosmos1r5v5srda7xfth3uckstjst6k05kmeyzptewwdk"
+
+// 5. Use in account
+account := auth.NewBaseAccount(accAddr, pubKey, accountNumber, sequence)
+```
+
+## Related Concepts
+
+- [Accounts](/sdk/v0.53/learn/beginner/accounts) - Understanding account types and management
+- [Store](/sdk/v0.53/learn/advanced/store) - How addresses are used as keys in state storage
+- [Transactions](/sdk/v0.53/learn/advanced/transactions) - How addresses are used in transaction signing
diff --git a/sdk/next/build/spec/store/interblock-cache.mdx b/sdk/next/build/spec/store/interblock-cache.mdx
new file mode 100644
index 000000000..358521e27
--- /dev/null
+++ b/sdk/next/build/spec/store/interblock-cache.mdx
@@ -0,0 +1,314 @@
+---
+title: Inter-block Cache
+---
+
+* [Inter-block Cache](#inter-block-cache)
+ * [Synopsis](#synopsis)
+ * [Overview and basic concepts](#overview-and-basic-concepts)
+ * [Motivation](#motivation)
+ * [Definitions](#definitions)
+ * [System model and properties](#system-model-and-properties)
+ * [Assumptions](#assumptions)
+ * [Properties](#properties)
+ * [Thread safety](#thread-safety)
+ * [Crash recovery](#crash-recovery)
+ * [Iteration](#iteration)
+ * [Technical specification](#technical-specification)
+ * [General design](#general-design)
+ * [API](#api)
+ * [CommitKVCacheManager](#commitkvcachemanager)
+ * [CommitKVStoreCache](#commitkvstorecache)
+ * [Implementation details](#implementation-details)
+ * [History](#history)
+ * [Copyright](#copyright)
+
+## Synopsis
+
+The inter-block cache is an in-memory cache storing (in-most-cases) immutable state that modules need to read in between blocks. When enabled, all sub-stores of a multi store, e.g., `rootmulti`, are wrapped.
+
+## Overview and basic concepts
+
+### Motivation
+
+The goal of the inter-block cache is to allow SDK modules to have fast access to data that it is typically queried during the execution of every block. This is data that do not change often, e.g. module parameters. The inter-block cache wraps each `CommitKVStore` of a multi store such as `rootmulti` with a fixed size, write-through cache. Caches are not cleared after a block is committed, as opposed to other caching layers such as `cachekv`.
+
+### Definitions
+
+* `Store key` uniquely identifies a store.
+* `KVCache` is a `CommitKVStore` wrapped with a cache.
+* `Cache manager` is a key component of the inter-block cache responsible for maintaining a map from `store keys` to `KVCaches`.
+
+## System model and properties
+
+### Assumptions
+
+This specification assumes that there exists a cache implementation accessible to the inter-block cache feature.
+
+> The implementation uses adaptive replacement cache (ARC), an enhancement over the standard last-recently-used (LRU) cache in that tracks both frequency and recency of use.
+
+The inter-block cache requires that the cache implementation to provide methods to create a cache, add a key/value pair, remove a key/value pair and retrieve the value associated to a key. In this specification, we assume that a `Cache` feature offers this functionality through the following methods:
+
+* `NewCache(size int)` creates a new cache with `size` capacity and returns it.
+* `Get(key string)` attempts to retrieve a key/value pair from `Cache.` It returns `(value []byte, success bool)`. If `Cache` contains the key, it `value` contains the associated value and `success=true`. Otherwise, `success=false` and `value` should be ignored.
+* `Add(key string, value []byte)` inserts a key/value pair into the `Cache`.
+* `Remove(key string)` removes the key/value pair identified by `key` from `Cache`.
+
+The specification also assumes that `CommitKVStore` offers the following API:
+
+* `Get(key string)` attempts to retrieve a key/value pair from `CommitKVStore`.
+* `Set(key, string, value []byte)` inserts a key/value pair into the `CommitKVStore`.
+* `Delete(key string)` removes the key/value pair identified by `key` from `CommitKVStore`.
+
+> Ideally, both `Cache` and `CommitKVStore` should be specified in a different document and referenced here.
+
+### Properties
+
+#### Thread safety
+
+Accessing the `cache manager` or a `KVCache` is not thread-safe: no method is guarded with a lock.
+Note that this is true even if the cache implementation is thread-safe.
+
+> For instance, assume that two `Set` operations are executed concurrently on the same key, each writing a different value. After both are executed, the cache and the underlying store may be inconsistent, each storing a different value under the same key.
+
+#### Crash recovery
+
+The inter-block cache transparently delegates `Commit()` to its aggregate `CommitKVStore`. If the
+aggregate `CommitKVStore` supports atomic writes and use them to guarantee that the store is always in a consistent state in disk, the inter-block cache can be transparently moved to a consistent state when a failure occurs.
+
+> Note that this is the case for `IAVLStore`, the preferred `CommitKVStore`. On commit, it calls `SaveVersion()` on the underlying `MutableTree`. `SaveVersion` writes to disk are atomic via batching. This means that only consistent versions of the store (the tree) are written to the disk. Thus, in case of a failure during a `SaveVersion` call, on recovery from disk, the version of the store will be consistent.
+
+#### Iteration
+
+Iteration over each wrapped store is supported via the embedded `CommitKVStore` interface.
+
+## Technical specification
+
+### General design
+
+The inter-block cache feature is composed by two components: `CommitKVCacheManager` and `CommitKVCache`.
+
+`CommitKVCacheManager` implements the cache manager. It maintains a mapping from a store key to a `KVStore`.
+
+```go
+type CommitKVStoreCacheManager interface{
+ cacheSize uint
+ caches map[string]CommitKVStore
+}
+```
+
+`CommitKVStoreCache` implements a `KVStore`: a write-through cache that wraps a `CommitKVStore`. This means that deletes and writes always happen to both the cache and the underlying `CommitKVStore`. Reads on the other hand first hit the internal cache. During a cache miss, the read is delegated to the underlying `CommitKVStore` and cached.
+
+```go
+type CommitKVStoreCache interface{
+ store CommitKVStore
+ cache Cache
+}
+```
+
+To enable inter-block cache on `rootmulti`, one needs to instantiate a `CommitKVCacheManager` and set it by calling `SetInterBlockCache()` before calling one of `LoadLatestVersion()`, `LoadLatestVersionAndUpgrade(...)`, `LoadVersionAndUpgrade(...)` and `LoadVersion(version)`.
+
+### API
+
+#### CommitKVCacheManager
+
+The method `NewCommitKVStoreCacheManager` creates a new cache manager and returns it.
+
+| Name | Type | Description |
+| ---- | ------- | ------------------------------------------------------------------------ |
+| size | integer | Determines the capacity of each of the KVCache maintained by the manager |
+
+```go
+func NewCommitKVStoreCacheManager(size uint)
+
+CommitKVStoreCacheManager {
+ manager = CommitKVStoreCacheManager{
+ size, make(map[string]CommitKVStore)
+}
+
+return manager
+}
+```
+
+`GetStoreCache` returns a cache from the CommitStoreCacheManager for a given store key. If no cache exists for the store key, then one is created and set.
+
+| Name | Type | Description |
+| -------- | --------------------------- | -------------------------------------------------------------------------------------- |
+| manager | `CommitKVStoreCacheManager` | The cache manager |
+| storeKey | string | The store key of the store being retrieved |
+| store | `CommitKVStore` | The store that it is cached in case the manager does not have any in its map of caches |
+
+```go expandable
+func GetStoreCache(
+ manager CommitKVStoreCacheManager,
+ storeKey string,
+ store CommitKVStore)
+
+CommitKVStore {
+ if manager.caches.has(storeKey) {
+ return manager.caches.get(storeKey)
+}
+
+else {
+ cache = CommitKVStoreCacheManager{
+ store, manager.cacheSize
+}
+
+manager.set(storeKey, cache)
+
+return cache
+}
+}
+```
+
+`Unwrap` returns the underlying CommitKVStore for a given store key.
+
+| Name | Type | Description |
+| -------- | --------------------------- | ------------------------------------------ |
+| manager | `CommitKVStoreCacheManager` | The cache manager |
+| storeKey | string | The store key of the store being unwrapped |
+
+```go expandable
+func Unwrap(
+ manager CommitKVStoreCacheManager,
+ storeKey string)
+
+CommitKVStore {
+ if manager.caches.has(storeKey) {
+ cache = manager.caches.get(storeKey)
+
+return cache.store
+}
+
+else {
+ return nil
+}
+}
+```
+
+`Reset` resets the manager's map of caches.
+
+| Name | Type | Description |
+| ------- | --------------------------- | ----------------- |
+| manager | `CommitKVStoreCacheManager` | The cache manager |
+
+```go
+function Reset(manager CommitKVStoreCacheManager) {
+ for (let storeKey of manager.caches.keys()) {
+ manager.caches.delete(storeKey)
+}
+}
+```
+
+#### CommitKVStoreCache
+
+`NewCommitKVStoreCache` creates a new `CommitKVStoreCache` and returns it.
+
+| Name | Type | Description |
+| ----- | ------------- | -------------------------------------------------- |
+| store | CommitKVStore | The store to be cached |
+| size | string | Determines the capacity of the cache being created |
+
+```go
+func NewCommitKVStoreCache(
+ store CommitKVStore,
+ size uint)
+
+CommitKVStoreCache {
+ KVCache = CommitKVStoreCache{
+ store, NewCache(size)
+}
+
+return KVCache
+}
+```
+
+`Get` retrieves a value by key. It first looks in the cache. If the key is not in the cache, the query is delegated to the underlying `CommitKVStore`. In the latter case, the key/value pair is cached. The method returns the value.
+
+| Name | Type | Description |
+| ------- | -------------------- | ------------------------------------------------------------------- |
+| KVCache | `CommitKVStoreCache` | The `CommitKVStoreCache` from which the key/value pair is retrieved |
+| key | string | Key of the key/value pair being retrieved |
+
+```go expandable
+func Get(
+ KVCache CommitKVStoreCache,
+ key string) []byte {
+ valueCache, success := KVCache.cache.Get(key)
+ if success {
+ // cache hit
+ return valueCache
+}
+
+else {
+ // cache miss
+ valueStore = KVCache.store.Get(key)
+
+KVCache.cache.Add(key, valueStore)
+
+return valueStore
+}
+}
+```
+
+`Set` inserts a key/value pair into both the write-through cache and the underlying `CommitKVStore`.
+
+| Name | Type | Description |
+| ------- | -------------------- | ---------------------------------------------------------------- |
+| KVCache | `CommitKVStoreCache` | The `CommitKVStoreCache` to which the key/value pair is inserted |
+| key | string | Key of the key/value pair being inserted |
+| value | \[]byte | Value of the key/value pair being inserted |
+
+```go
+func Set(
+ KVCache CommitKVStoreCache,
+ key string,
+ value []byte) {
+ KVCache.cache.Add(key, value)
+
+KVCache.store.Set(key, value)
+}
+```
+
+`Delete` removes a key/value pair from both the write-through cache and the underlying `CommitKVStore`.
+
+| Name | Type | Description |
+| ------- | -------------------- | ----------------------------------------------------------------- |
+| KVCache | `CommitKVStoreCache` | The `CommitKVStoreCache` from which the key/value pair is deleted |
+| key | string | Key of the key/value pair being deleted |
+
+```go
+func Delete(
+ KVCache CommitKVStoreCache,
+ key string) {
+ KVCache.cache.Remove(key)
+
+KVCache.store.Delete(key)
+}
+```
+
+`CacheWrap` wraps a `CommitKVStoreCache` with another caching layer (`CacheKV`).
+
+> It is unclear whether there is a use case for `CacheWrap`.
+
+| Name | Type | Description |
+| ------- | -------------------- | -------------------------------------- |
+| KVCache | `CommitKVStoreCache` | The `CommitKVStoreCache` being wrapped |
+
+```go
+func CacheWrap(
+ KVCache CommitKVStoreCache) {
+ return CacheKV.NewStore(KVCache)
+}
+```
+
+### Implementation details
+
+The inter-block cache implementation uses a fixed-sized adaptive replacement cache (ARC) as cache. [The ARC implementation](https://github.com/hashicorp/golang-lru/blob/main/arc/arc.go) is thread-safe. ARC is an enhancement over the standard LRU cache in that tracks both frequency and recency of use. This avoids a burst in access to new entries from evicting the frequently used older entries. It adds some additional tracking overhead to a standard LRU cache, computationally it is roughly `2x` the cost, and the extra memory overhead is linear with the size of the cache. The default cache size is `1000`.
+
+## History
+
+Dec 20, 2022 - Initial draft finished and submitted as a PR
+
+## Copyright
+
+All content herein is licensed under [Apache 2.0](https://www.apache.org/licenses/LICENSE-2.0).
diff --git a/sdk/next/build/spec/store/store.mdx b/sdk/next/build/spec/store/store.mdx
new file mode 100644
index 000000000..80e2732b8
--- /dev/null
+++ b/sdk/next/build/spec/store/store.mdx
@@ -0,0 +1,242 @@
+---
+title: Store
+---
+
+The store package defines the interfaces, types and abstractions for Cosmos SDK
+modules to read and write to Merkleized state within a Cosmos SDK application.
+The store package provides many primitives for developers to use in order to
+work with both state storage and state commitment. Below we describe the various
+abstractions.
+
+## Types
+
+### `Store`
+
+The bulk of the store interfaces are defined [here](https://github.com/cosmos/cosmos-sdk/blob/main/store/types/store.go),
+where the base primitive interface, for which other interfaces build off of, is
+the `Store` type. The `Store` interface defines the ability to tell the type of
+the implementing store and the ability to cache wrap via the `CacheWrapper` interface.
+
+### `CacheWrapper` & `CacheWrap`
+
+One of the most important features a store has the ability to perform is the
+ability to cache wrap. Cache wrapping is essentially the underlying store wrapping
+itself within another store type that performs caching for both reads and writes
+with the ability to flush writes via `Write()`.
+
+### `KVStore` & `CacheKVStore`
+
+One of the most important interfaces that both developers and modules interface
+with, which also provides the basis of most state storage and commitment operations,
+is the `KVStore`. The `KVStore` interface provides basic CRUD abilities and
+prefix-based iteration, including reverse iteration.
+
+Typically, each module has its own dedicated `KVStore` instance, which it can
+get access to via the `sdk.Context` and the use of a pointer-based named key --
+`KVStoreKey`. The `KVStoreKey` provides pseudo-OCAP. How exactly a `KVStoreKey`
+maps to a `KVStore` will be illustrated below through the `CommitMultiStore`.
+
+Note, a `KVStore` cannot directly commit state. Instead, a `KVStore` can be wrapped
+by a `CacheKVStore` which extends a `KVStore` and provides the ability for the
+caller to execute `Write()` which commits state to the underlying state storage.
+Note, this doesn't actually flush writes to disk as writes are held in memory
+until `Commit()` is called on the `CommitMultiStore`.
+
+### `CommitMultiStore`
+
+The `CommitMultiStore` interface exposes the top-level interface that is used
+to manage state commitment and storage by an SDK application and abstracts the
+concept of multiple `KVStore`s which are used by multiple modules. Specifically,
+it supports the following high-level primitives:
+
+* Allows for a caller to retrieve a `KVStore` by providing a `KVStoreKey`.
+* Exposes pruning mechanisms to remove state pinned against a specific height/version
+ in the past.
+* Allows for loading state storage at a particular height/version in the past to
+ provide current head and historical queries.
+* Provides the ability to rollback state to a previous height/version.
+* Provides the ability to load state storage at a particular height/version
+ while also performing store upgrades, which are used during live hard-fork
+ application state migrations.
+* Provides the ability to commit all current accumulated state to disk and performs
+ Merkle commitment.
+
+## Implementation Details
+
+While there are many interfaces that the `store` package provides, there is
+typically a core implementation for each main interface that modules and
+developers interact with that are defined in the Cosmos SDK.
+
+### `iavl.Store`
+
+The `iavl.Store` provides the core implementation for state storage and commitment
+by implementing the following interfaces:
+
+* `KVStore`
+* `CommitStore`
+* `CommitKVStore`
+* `Queryable`
+* `StoreWithInitialVersion`
+
+It allows for all CRUD operations to be performed along with allowing current
+and historical state queries, prefix iteration, and state commitment along with
+Merkle proof operations. The `iavl.Store` also provides the ability to remove
+historical state from the state commitment layer.
+
+An overview of the IAVL implementation can be found [here](https://github.com/cosmos/iavl/blob/master/docs/overview.md).
+It is important to note that the IAVL store provides both state commitment and
+logical storage operations, which comes with drawbacks as there are various
+performance impacts, some of which are very drastic, when it comes to the
+operations mentioned above.
+
+When dealing with state management in modules and clients, the Cosmos SDK provides
+various layers of abstractions or "store wrapping", where the `iavl.Store` is the
+bottom most layer. When requesting a store to perform reads or writes in a module,
+the typical abstraction layer in order is defined as follows:
+
+```text
+iavl.Store <- cachekv.Store <- gaskv.Store <- cachemulti.Store <- rootmulti.Store
+```
+
+### Concurrent use of IAVL store
+
+The tree under `iavl.Store` is not safe for concurrent use. It is the
+responsibility of the caller to ensure that concurrent access to the store is
+not performed.
+
+The main issue with concurrent use is when data is written at the same time as
+it's being iterated over. Doing so will cause an irrecoverable fatal error because
+of concurrent reads and writes to an internal map.
+
+Although it's not recommended, you can iterate through values while writing to
+it by disabling "FastNode" **without guarantees that the values being written will
+be returned during the iteration** (if you need this, you might want to reconsider
+the design of your application). This is done by setting `iavl-disable-fastnode`
+to `true` in the config TOML file.
+
+### `cachekv.Store`
+
+The `cachekv.Store` store wraps an underlying `KVStore`, typically a `iavl.Store`
+and contains an in-memory cache for storing pending writes to underlying `KVStore`.
+`Set` and `Delete` calls are executed on the in-memory cache, whereas `Has` calls
+are proxied to the underlying `KVStore`.
+
+One of the most important calls to a `cachekv.Store` is `Write()`, which ensures
+that key-value pairs are written to the underlying `KVStore` in a deterministic
+and ordered manner by sorting the keys first. The store keeps track of "dirty"
+keys and uses these to determine what keys to sort. In addition, it also keeps
+track of deleted keys and ensures these are also removed from the underlying
+`KVStore`.
+
+The `cachekv.Store` also provides the ability to perform iteration and reverse
+iteration. Iteration is performed through the `cacheMergeIterator` type and uses
+both the dirty cache and underlying `KVStore` to iterate over key-value pairs.
+
+Note, all calls to CRUD and iteration operations on a `cachekv.Store` are thread-safe.
+
+### `gaskv.Store`
+
+The `gaskv.Store` store provides a simple implementation of a `KVStore`.
+Specifically, it just wraps an existing `KVStore`, such as a cache-wrapped
+`iavl.Store`, and incurs configurable gas costs for CRUD operations via
+`ConsumeGas()` calls defined on the `GasMeter` which exists in a `sdk.Context`
+and then proxies the underlying CRUD call to the underlying store. Note, the
+`GasMeter` is reset on each block.
+
+### `cachemulti.Store` & `rootmulti.Store`
+
+The `rootmulti.Store` acts as an abstraction around a series of stores. Namely,
+it implements the `CommitMultiStore` an `Queryable` interfaces. Through the
+`rootmulti.Store`, an SDK module can request access to a `KVStore` to perform
+state CRUD operations and queries by holding access to a unique `KVStoreKey`.
+
+The `rootmulti.Store` ensures these queries and state operations are performed
+through cached-wrapped instances of `cachekv.Store` which is described above. The
+`rootmulti.Store` implementation is also responsible for committing all accumulated
+state from each `KVStore` to disk and returning an application state Merkle root.
+
+Queries can be performed to return state data along with associated state
+commitment proofs for both previous heights/versions and the current state root.
+Queries are routed based on store name, i.e. a module, along with other parameters
+which are defined in `abci.RequestQuery`.
+
+The `rootmulti.Store` also provides primitives for pruning data at a given
+height/version from state storage. When a height is committed, the `rootmulti.Store`
+will determine if other previous heights should be considered for removal based
+on the operator's pruning settings defined by `PruningOptions`, which defines
+how many recent versions to keep on disk and the interval at which to remove
+"staged" pruned heights from disk. During each interval, the staged heights are
+removed from each `KVStore`. Note, it is up to the underlying `KVStore`
+implementation to determine how pruning is actually performed. The `PruningOptions`
+are defined as follows:
+
+```go
+type PruningOptions struct {
+ // KeepRecent defines how many recent heights to keep on disk.
+ KeepRecent uint64
+
+ // Interval defines when the pruned heights are removed from disk.
+ Interval uint64
+
+ // Strategy defines the kind of pruning strategy. See below for more information on each.
+ Strategy PruningStrategy
+}
+```
+
+The Cosmos SDK defines a preset number of pruning "strategies": `default`, `everything`
+`nothing`, and `custom`.
+
+It is important to note that the `rootmulti.Store` considers each `KVStore` as a
+separate logical store. In other words, they do not share a Merkle tree or
+comparable data structure. This means that when state is committed via
+`rootmulti.Store`, each store is committed in sequence and thus is not atomic.
+
+In terms of store construction and wiring, each Cosmos SDK application contains
+a `BaseApp` instance which internally has a reference to a `CommitMultiStore`
+that is implemented by a `rootmulti.Store`. The application then registers one or
+more `KVStoreKey` that pertain to a unique module and thus a `KVStore`. Through
+the use of an `sdk.Context` and a `KVStoreKey`, each module can get direct access
+to it's respective `KVStore` instance.
+
+Example:
+
+```go expandable
+func NewApp(...)
+
+Application {
+ // ...
+ bApp := baseapp.NewBaseApp(appName, logger, db, txConfig.TxDecoder(), baseAppOptions...)
+
+bApp.SetCommitMultiStoreTracer(traceStore)
+
+bApp.SetVersion(version.Version)
+
+bApp.SetInterfaceRegistry(interfaceRegistry)
+
+ // ...
+ keys := sdk.NewKVStoreKeys(...)
+ transientKeys := sdk.NewTransientStoreKeys(...)
+ memKeys := sdk.NewMemoryStoreKeys(...)
+
+ // ...
+
+ // initialize stores
+ app.MountKVStores(keys)
+
+app.MountTransientStores(transientKeys)
+
+app.MountMemoryStores(memKeys)
+
+ // ...
+}
+```
+
+The `rootmulti.Store` itself can be cache-wrapped which returns an instance of a
+`cachemulti.Store`. For each block, `BaseApp` ensures that the proper abstractions
+are created on the `CommitMultiStore`, i.e. ensuring that the `rootmulti.Store`
+is cached-wrapped and uses the resulting `cachemulti.Store` to be set on the
+`sdk.Context` which is then used for block and transaction execution. As a result,
+all state mutations due to block and transaction execution are actually held
+ephemerally until `Commit()` is called by the ABCI client. This concept is further
+expanded upon when the AnteHandler is executed per transaction to ensure state
+is not committed for transactions that failed CheckTx.
diff --git a/sdk/next/build/tooling.mdx b/sdk/next/build/tooling.mdx
new file mode 100644
index 000000000..22a932ee5
--- /dev/null
+++ b/sdk/next/build/tooling.mdx
@@ -0,0 +1,27 @@
+---
+title: "Tool Guide"
+description: "Version: v0.53"
+---
+
+Essential tools for Cosmos SDK development, from code generation to configuration management. These utilities streamline your development workflow and help maintain production-ready applications.
+
+
+
+ Set up protobuf code generation with Docker images, Makefile commands, and Buf configuration.
+
+
+ Migrate and manage configuration files across SDK versions with automatic migration tooling.
+
+
+ Official JavaScript/TypeScript library for building clients, frontends, and scripts that interact with Cosmos chains.
+
+
+
+## Related Tools
+
+Looking for other SDK tools? Check out these sections:
+
+- **Cosmovisor** - Automated upgrade management → [Run a Node](/sdk/v0.53/build/tooling/cosmovisor)
+- **AutoCLI** - Automatic CLI generation → [In-Depth Concepts](/sdk/v0.53/learn/advanced/autocli)
+- **Depinject** - Dependency injection framework → [SDK Packages](/sdk/v0.53/build/packages/depinject)
+- **Simulation** - Blockchain fuzz testing → [Build a Chain](/sdk/v0.53/learn/advanced/simulation)
diff --git a/sdk/next/build/tooling/confix.mdx b/sdk/next/build/tooling/confix.mdx
new file mode 100644
index 000000000..6913785c7
--- /dev/null
+++ b/sdk/next/build/tooling/confix.mdx
@@ -0,0 +1,157 @@
+---
+title: Confix
+description: >-
+ Confix is a configuration management tool that allows you to manage your
+ configuration via CLI.
+---
+
+`Confix` is a configuration management tool that allows you to manage your configuration via CLI.
+
+It is based on the [CometBFT RFC 019](https://github.com/cometbft/cometbft/blob/5013bc3f4a6d64dcc2bf02ccc002ebc9881c62e4/docs/rfc/rfc-019-config-version.md).
+
+## Installation
+
+### Add Config Command
+
+To add the confix tool, it's required to add the `ConfigCommand` to your application's root command file (e.g. `/cmd/root.go`).
+
+Import the `confixCmd` package:
+
+```go
+import "cosmossdk.io/tools/confix/cmd"
+```
+
+Find the following line:
+
+```go
+initRootCmd(rootCmd, moduleManager)
+```
+
+After that line, add the following:
+
+```go
+rootCmd.AddCommand(
+ confixcmd.ConfigCommand(),
+)
+```
+
+The `ConfixCommand` function builds the `config` root command and is defined in the `confixCmd` package (`cosmossdk.io/tools/confix/cmd`).
+An implementation example can be found in `simapp`.
+
+The command will be available as `simd config`.
+
+
+Using confix directly in the application can have less features than using it standalone.
+This is because confix is versioned with the SDK, while `latest` is the standalone version.
+
+
+### Using Confix Standalone
+
+To use Confix standalone, without having to add it in your application, install it with the following command:
+
+```bash
+go install cosmossdk.io/tools/confix/cmd/confix@latest
+```
+
+Alternatively, for building from source, simply run `make confix`. The binary will be located in `tools/confix`.
+
+## Usage
+
+Use standalone:
+
+```shell
+confix --help
+```
+
+Use in simd:
+
+```shell
+simd config fix --help
+```
+
+### Get
+
+Get a configuration value, e.g.:
+
+```shell
+simd config get app pruning # gets the value pruning from app.toml
+simd config get client chain-id # gets the value chain-id from client.toml
+```
+
+```shell
+confix get ~/.simapp/config/app.toml pruning # gets the value pruning from app.toml
+confix get ~/.simapp/config/client.toml chain-id # gets the value chain-id from client.toml
+```
+
+### Set
+
+Set a configuration value, e.g.:
+
+```shell
+simd config set app pruning "enabled" # sets the value pruning from app.toml
+simd config set client chain-id "foo-1" # sets the value chain-id from client.toml
+```
+
+```shell
+confix set ~/.simapp/config/app.toml pruning "enabled" # sets the value pruning from app.toml
+confix set ~/.simapp/config/client.toml chain-id "foo-1" # sets the value chain-id from client.toml
+```
+
+### Migrate
+
+Migrate a configuration file to a new version, config type defaults to `app.toml`, if you want to change it to `client.toml`, please indicate it by adding the optional parameter, e.g.:
+
+```shell
+simd config migrate v0.50 # migrates defaultHome/config/app.toml to the latest v0.50 config
+simd config migrate v0.50 --client # migrates defaultHome/config/client.toml to the latest v0.50 config
+```
+
+```shell
+confix migrate v0.50 ~/.simapp/config/app.toml # migrate ~/.simapp/config/app.toml to the latest v0.50 config
+confix migrate v0.50 ~/.simapp/config/client.toml --client # migrate ~/.simapp/config/client.toml to the latest v0.50 config
+```
+
+### Diff
+
+Get the diff between a given configuration file and the default configuration file, e.g.:
+
+```shell
+simd config diff v0.47 # gets the diff between defaultHome/config/app.toml and the latest v0.47 config
+simd config diff v0.47 --client # gets the diff between defaultHome/config/client.toml and the latest v0.47 config
+```
+
+```shell
+confix diff v0.47 ~/.simapp/config/app.toml # gets the diff between ~/.simapp/config/app.toml and the latest v0.47 config
+confix diff v0.47 ~/.simapp/config/client.toml --client # gets the diff between ~/.simapp/config/client.toml and the latest v0.47 config
+```
+
+### View
+
+View a configuration file, e.g:
+
+```shell
+simd config view client # views the current app client config
+```
+
+```shell
+confix view ~/.simapp/config/client.toml # views the current app client config
+```
+
+### Maintainer
+
+At each SDK modification of the default configuration, add the default SDK config under `data/vXX-app.toml`.
+This allows users to use the tool standalone.
+
+### Compatibility
+
+The recommended standalone version is `latest`, which is using the latest development version of the Confix.
+
+| SDK Version | Confix Version |
+| ----------- | -------------- |
+| v0.50 | v0.1.x |
+| v0.52 | v0.2.x |
+| v2 | v0.2.x |
+
+## Credits
+
+This project is based on the [CometBFT RFC 019](https://github.com/cometbft/cometbft/blob/5013bc3f4a6d64dcc2bf02ccc002ebc9881c62e4/docs/rfc/rfc-019-config-version.md) and their never released own implementation of [confix](https://github.com/cometbft/cometbft/blob/v0.36.x/scripts/confix/confix.go).
diff --git a/sdk/next/build/tooling/cosmovisor.mdx b/sdk/next/build/tooling/cosmovisor.mdx
new file mode 100644
index 000000000..96c67d9cf
--- /dev/null
+++ b/sdk/next/build/tooling/cosmovisor.mdx
@@ -0,0 +1,409 @@
+---
+title: Cosmovisor
+---
+
+`cosmovisor` is a process manager for Cosmos SDK application binaries that automates application binary switch at chain upgrades.
+It polls the `upgrade-info.json` file that is created by the x/upgrade module at upgrade height, and then can automatically download the new binary, stop the current binary, switch from the old binary to the new one, and finally restart the node with the new binary.
+
+* [Design](#design)
+* [Contributing](#contributing)
+* [Setup](#setup)
+ * [Installation](#installation)
+ * [Command Line Arguments And Environment Variables](#command-line-arguments-and-environment-variables)
+ * [Folder Layout](#folder-layout)
+* [Usage](#usage)
+ * [Initialization](#initialization)
+ * [Detecting Upgrades](#detecting-upgrades)
+ * [Adding Upgrade Binary](#adding-upgrade-binary)
+ * [Auto-Download](#auto-download)
+ * [Preparing for an Upgrade](#preparing-for-an-upgrade)
+* [Example: SimApp Upgrade](#example-simapp-upgrade)
+ * [Chain Setup](#chain-setup)
+ * [Prepare Cosmovisor and Start the Chain](#prepare-cosmovisor-and-start-the-chain)
+ * [Update App](#update-app)
+
+## Design
+
+Cosmovisor is designed to be used as a wrapper for a `Cosmos SDK` app:
+
+* it will pass arguments to the associated app (configured by `DAEMON_NAME` env variable).
+ Running `cosmovisor run arg1 arg2 ....` will run `app arg1 arg2 ...`;
+* it will manage an app by restarting and upgrading if needed;
+* it is configured using environment variables, not positional arguments.
+
+*Note: If new versions of the application are not set up to run in-place store migrations, migrations will need to be run manually before restarting `cosmovisor` with the new binary. For this reason, we recommend applications adopt in-place store migrations.*
+
+
+Only the latest version of cosmovisor is actively developed/maintained.
+
+
+
+Versions prior to v1.0.0 have a vulnerability that could lead to a DOS. Please upgrade to the latest version.
+
+
+## Contributing
+
+Cosmovisor is part of the Cosmos SDK monorepo, but it's a separate module with its own release schedule.
+
+Release branches have the following format `release/cosmovisor/vA.B.x`, where A and B are a number (e.g. `release/cosmovisor/v1.3.x`). Releases are tagged using the following format: `cosmovisor/vA.B.C`.
+
+## Setup
+
+### Installation
+
+You can download Cosmovisor from the [GitHub releases](https://github.com/cosmos/cosmos-sdk/releases/tag/cosmovisor%2Fv1.5.0).
+
+To install the latest version of `cosmovisor`, run the following command:
+
+```shell
+go install cosmossdk.io/tools/cosmovisor/cmd/cosmovisor@latest
+```
+
+To install a specific version, you can specify the version:
+
+```shell
+go install cosmossdk.io/tools/cosmovisor/cmd/cosmovisor@v1.5.0
+```
+
+Run `cosmovisor version` to check the cosmovisor version.
+
+Alternatively, for building from source, simply run `make cosmovisor`. The binary will be located in `tools/cosmovisor`.
+
+
+Installing cosmovisor using `go install` will display the correct `cosmovisor` version.
+Building from source (`make cosmovisor`) or installing `cosmovisor` by other means won't display the correct version.
+
+
+### Command Line Arguments And Environment Variables
+
+The first argument passed to `cosmovisor` is the action for `cosmovisor` to take. Options are:
+
+* `help`, `--help`, or `-h` - Output `cosmovisor` help information and check your `cosmovisor` configuration.
+* `run` - Run the configured binary using the rest of the provided arguments.
+* `version` - Output the `cosmovisor` version and also run the binary with the `version` argument.
+* `config` - Display the current `cosmovisor` configuration, that means displaying the environment variables value that `cosmovisor` is using.
+* `add-upgrade` - Add an upgrade manually to `cosmovisor`. This command allow you to easily add the binary corresponding to an upgrade in cosmovisor.
+
+All arguments passed to `cosmovisor run` will be passed to the application binary (as a subprocess). `cosmovisor` will return `/dev/stdout` and `/dev/stderr` of the subprocess as its own. For this reason, `cosmovisor run` cannot accept any command-line arguments other than those available to the application binary.
+
+`cosmovisor` reads its configuration from environment variables, or its configuration file (use `--cosmovisor-config `):
+
+* `DAEMON_HOME` is the location where the `cosmovisor/` directory is kept that contains the genesis binary, the upgrade binaries, and any additional auxiliary files associated with each binary (e.g. `$HOME/.gaiad`, `$HOME/.regend`, `$HOME/.simd`, etc.).
+* `DAEMON_NAME` is the name of the binary itself (e.g. `gaiad`, `regend`, `simd`, etc.).
+* `DAEMON_ALLOW_DOWNLOAD_BINARIES` (*optional*), if set to `true`, will enable auto-downloading of new binaries (for security reasons, this is intended for full nodes rather than validators). By default, `cosmovisor` will not auto-download new binaries.
+* `DAEMON_DOWNLOAD_MUST_HAVE_CHECKSUM` (*optional*, default = `false`), if `true` cosmovisor will require that a checksum is provided in the upgrade plan for the binary to be downloaded. If `false`, cosmovisor will not require a checksum to be provided, but still check the checksum if one is provided.
+* `DAEMON_RESTART_AFTER_UPGRADE` (*optional*, default = `true`), if `true`, restarts the subprocess with the same command-line arguments and flags (but with the new binary) after a successful upgrade. Otherwise (`false`), `cosmovisor` stops running after an upgrade and requires the system administrator to manually restart it. Note restart is only after the upgrade and does not auto-restart the subprocess after an error occurs.
+* `DAEMON_RESTART_DELAY` (*optional*, default none), allow a node operator to define a delay between the node halt (for upgrade) and backup by the specified time. The value must be a duration (e.g. `1s`).
+* `DAEMON_SHUTDOWN_GRACE` (*optional*, default none), if set, send interrupt to binary and wait the specified time to allow for cleanup/cache flush to disk before sending the kill signal. The value must be a duration (e.g. `1s`).
+* `DAEMON_POLL_INTERVAL` (*optional*, default 300 milliseconds), is the interval length for polling the upgrade plan file. The value must be a duration (e.g. `1s`).
+* `DAEMON_DATA_BACKUP_DIR` option to set a custom backup directory. If not set, `DAEMON_HOME` is used.
+* `UNSAFE_SKIP_BACKUP` (defaults to `false`), if set to `true`, upgrades directly without performing a backup. Otherwise (`false`, default) backs up the data before trying the upgrade. The default value of false is useful and recommended in case of failures and when a backup needed to rollback. We recommend using the default backup option `UNSAFE_SKIP_BACKUP=false`.
+* `DAEMON_PREUPGRADE_MAX_RETRIES` (defaults to `0`). The maximum number of times to call [`pre-upgrade`](/sdk/v0.53/build/building-apps/app-upgrade#pre-upgrade-handling) in the application after exit status of `31`. After the maximum number of retries, Cosmovisor fails the upgrade.
+* `COSMOVISOR_DISABLE_LOGS` (defaults to `false`). If set to true, this will disable Cosmovisor logs (but not the underlying process) completely. This may be useful, for example, when a Cosmovisor subcommand you are executing returns a valid JSON you are then parsing, as logs added by Cosmovisor make this output not a valid JSON.
+* `COSMOVISOR_COLOR_LOGS` (defaults to `true`). If set to true, this will colorize Cosmovisor logs (but not the underlying process).
+* `COSMOVISOR_TIMEFORMAT_LOGS` (defaults to `kitchen`). If set to a value (`layout|ansic|unixdate|rubydate|rfc822|rfc822z|rfc850|rfc1123|rfc1123z|rfc3339|rfc3339nano|kitchen`), this will add timestamp prefix to Cosmovisor logs (but not the underlying process).
+* `COSMOVISOR_CUSTOM_PREUPGRADE` (defaults to \`\`). If set, this will run $DAEMON\_HOME/cosmovisor/$COSMOVISOR\_CUSTOM\_PREUPGRADE prior to upgrade with the arguments \[ upgrade.Name, upgrade.Height ]. Executes a custom script (separate and prior to the chain daemon pre-upgrade command)
+* `COSMOVISOR_DISABLE_RECASE` (defaults to `false`). If set to true, the upgrade directory will expected to match the upgrade plan name without any case changes
+
+### Folder Layout
+
+`$DAEMON_HOME/cosmovisor` is expected to belong completely to `cosmovisor` and the subprocesses that are controlled by it. The folder content is organized as follows:
+
+```text expandable
+.
+├── current -> genesis or upgrades/
+├── genesis
+│ └── bin
+│ └── $DAEMON_NAME
+└── upgrades
+│ └──
+│ ├── bin
+│ │ └── $DAEMON_NAME
+│ └── upgrade-info.json
+└── preupgrade.sh (optional)
+```
+
+The `cosmovisor/` directory includes a subdirectory for each version of the application (i.e. `genesis` or `upgrades/`). Within each subdirectory is the application binary (i.e. `bin/$DAEMON_NAME`) and any additional auxiliary files associated with each binary. `current` is a symbolic link to the currently active directory (i.e. `genesis` or `upgrades/`). The `name` variable in `upgrades/` is the lowercased URI-encoded name of the upgrade as specified in the upgrade module plan. Note that the upgrade name path are normalized to be lowercased: for instance, `MyUpgrade` is normalized to `myupgrade`, and its path is `upgrades/myupgrade`.
+
+Please note that `$DAEMON_HOME/cosmovisor` only stores the *application binaries*. The `cosmovisor` binary itself can be stored in any typical location (e.g. `/usr/local/bin`). The application will continue to store its data in the default data directory (e.g. `$HOME/.simapp`) or the data directory specified with the `--home` flag. `$DAEMON_HOME` is dependent of the data directory and must be set to the same directory as the data directory, you will end up with a configuration like the following:
+
+```text
+.simapp
+├── config
+├── data
+└── cosmovisor
+```
+
+## Usage
+
+The system administrator is responsible for:
+
+* installing the `cosmovisor` binary
+* configuring the host's init system (e.g. `systemd`, `launchd`, etc.)
+* appropriately setting the environmental variables
+* creating the `/cosmovisor` directory
+* creating the `/cosmovisor/genesis/bin` folder
+* creating the `/cosmovisor/upgrades//bin` folders
+* placing the different versions of the `` executable in the appropriate `bin` folders.
+
+`cosmovisor` will set the `current` link to point to `genesis` at first start (i.e. when no `current` link exists) and then handle switching binaries at the correct points in time so that the system administrator can prepare days in advance and relax at upgrade time.
+
+In order to support downloadable binaries, a tarball for each upgrade binary will need to be packaged up and made available through a canonical URL. Additionally, a tarball that includes the genesis binary and all available upgrade binaries can be packaged up and made available so that all the necessary binaries required to sync a fullnode from start can be easily downloaded.
+
+The `DAEMON` specific code and operations (e.g. CometBFT config, the application db, syncing blocks, etc.) all work as expected. The application binaries' directives such as command-line flags and environment variables also work as expected.
+
+### Initialization
+
+The `cosmovisor init ` command creates the folder structure required for using cosmovisor.
+
+It does the following:
+
+* creates the `/cosmovisor` folder if it doesn't yet exist
+* creates the `/cosmovisor/genesis/bin` folder if it doesn't yet exist
+* copies the provided executable file to `/cosmovisor/genesis/bin/`
+* creates the `current` link, pointing to the `genesis` folder
+
+It uses the `DAEMON_HOME` and `DAEMON_NAME` environment variables for folder location and executable name.
+
+The `cosmovisor init` command is specifically for initializing cosmovisor, and should not be confused with a chain's `init` command (e.g. `cosmovisor run init`).
+
+### Detecting Upgrades
+
+`cosmovisor` is polling the `$DAEMON_HOME/data/upgrade-info.json` file for new upgrade instructions. The file is created by the x/upgrade module in `BeginBlocker` when an upgrade is detected and the blockchain reaches the upgrade height.
+The following heuristic is applied to detect the upgrade:
+
+* When starting, `cosmovisor` doesn't know much about currently running upgrade, except the binary which is `current/bin/`. It tries to read the `current/update-info.json` file to get information about the current upgrade name.
+* If neither `cosmovisor/current/upgrade-info.json` nor `data/upgrade-info.json` exist, then `cosmovisor` will wait for `data/upgrade-info.json` file to trigger an upgrade.
+* If `cosmovisor/current/upgrade-info.json` doesn't exist but `data/upgrade-info.json` exists, then `cosmovisor` assumes that whatever is in `data/upgrade-info.json` is a valid upgrade request. In this case `cosmovisor` tries immediately to make an upgrade according to the `name` attribute in `data/upgrade-info.json`.
+* Otherwise, `cosmovisor` waits for changes in `upgrade-info.json`. As soon as a new upgrade name is recorded in the file, `cosmovisor` will trigger an upgrade mechanism.
+
+When the upgrade mechanism is triggered, `cosmovisor` will:
+
+1. if `DAEMON_ALLOW_DOWNLOAD_BINARIES` is enabled, start by auto-downloading a new binary into `cosmovisor//bin` (where `` is the `upgrade-info.json:name` attribute);
+2. update the `current` symbolic link to point to the new directory and save `data/upgrade-info.json` to `cosmovisor/current/upgrade-info.json`.
+
+### Adding Upgrade Binary
+
+`cosmovisor` has an `add-upgrade` command that allows to easily link a binary to an upgrade. It creates a new folder in `cosmovisor/upgrades/` and copies the provided executable file to `cosmovisor/upgrades//bin/`.
+
+Using the `--upgrade-height` flag allows you to specify at which height the binary should be switched, without going via a governance proposal.
+This enables support for an emergency coordinated upgrades where the binary must be switched at a specific height, but there is no time to go through a governance proposal.
+
+
+`--upgrade-height` creates an `upgrade-info.json` file. This means if a chain upgrade via governance proposal is executed before the specified height with `--upgrade-height`, the governance proposal will overwrite the `upgrade-info.json` plan created by `add-upgrade --upgrade-height `.
+Take this into consideration when using `--upgrade-height`.
+
+
+### Auto-Download
+
+Generally, `cosmovisor` requires that the system administrator place all relevant binaries on disk before the upgrade happens. However, for people who don't need such control and want an automated setup (maybe they are syncing a non-validating fullnode and want to do little maintenance), there is another option.
+
+**NOTE: we don't recommend using auto-download** because it doesn't verify in advance if a binary is available. If there will be any issue with downloading a binary, the cosmovisor will stop and won't restart an App (which could lead to a chain halt).
+
+If `DAEMON_ALLOW_DOWNLOAD_BINARIES` is set to `true`, and no local binary can be found when an upgrade is triggered, `cosmovisor` will attempt to download and install the binary itself based on the instructions in the `info` attribute in the `data/upgrade-info.json` file. The files is constructed by the x/upgrade module and contains data from the upgrade `Plan` object. The `Plan` has an info field that is expected to have one of the following two valid formats to specify a download:
+
+1. Store an os/architecture -> binary URI map in the upgrade plan info field as JSON under the `"binaries"` key. For example:
+
+ ```json
+ {
+ "binaries": {
+ "linux/amd64": "https://example.com/gaia.zip?checksum=sha256:aec070645fe53ee3b3763059376134f058cc337247c978add178b6ccdfb0019f"
+ }
+ }
+ ```
+
+ You can include multiple binaries at once to ensure more than one environment will receive the correct binaries:
+
+ ```json
+ {
+ "binaries": {
+ "linux/amd64": "https://example.com/gaia.zip?checksum=sha256:aec070645fe53ee3b3763059376134f058cc337247c978add178b6ccdfb0019f",
+ "linux/arm64": "https://example.com/gaia.zip?checksum=sha256:aec070645fe53ee3b3763059376134f058cc337247c978add178b6ccdfb0019f",
+ "darwin/amd64": "https://example.com/gaia.zip?checksum=sha256:aec070645fe53ee3b3763059376134f058cc337247c978add178b6ccdfb0019f"
+ }
+ }
+ ```
+
+ When submitting this as a proposal ensure there are no spaces. An example command using `gaiad` could look like:
+
+ ```shell expandable
+ > gaiad tx upgrade software-upgrade Vega \
+ --title Vega \
+ --deposit 100uatom \
+ --upgrade-height 7368420 \
+ --upgrade-info '{"binaries":{"linux/amd64":"https://github.com/cosmos/gaia/releases/download/v6.0.0-rc1/gaiad-v6.0.0-rc1-linux-amd64","linux/arm64":"https://github.com/cosmos/gaia/releases/download/v6.0.0-rc1/gaiad-v6.0.0-rc1-linux-arm64","darwin/amd64":"https://github.com/cosmos/gaia/releases/download/v6.0.0-rc1/gaiad-v6.0.0-rc1-darwin-amd64"}}' \
+ --summary "upgrade to Vega" \
+ --gas 400000 \
+ --from user \
+ --chain-id test \
+ --home test/val2 \
+ --node tcp://localhost:36657 \
+ --yes
+ ```
+
+2. Store a link to a file that contains all information in the above format (e.g. if you want to specify lots of binaries, changelog info, etc. without filling up the blockchain). For example:
+
+ ```text
+ https://example.com/testnet-1001-info.json?checksum=sha256:deaaa99fda9407c4dbe1d04bd49bab0cc3c1dd76fa392cd55a9425be074af01e
+ ```
+
+When `cosmovisor` is triggered to download the new binary, `cosmovisor` will parse the `"binaries"` field, download the new binary with [go-getter](https://github.com/hashicorp/go-getter), and unpack the new binary in the `upgrades/` folder so that it can be run as if it was installed manually.
+
+Note that for this mechanism to provide strong security guarantees, all URLs should include a SHA 256/512 checksum. This ensures that no false binary is run, even if someone hacks the server or hijacks the DNS. `go-getter` will always ensure the downloaded file matches the checksum if it is provided. `go-getter` will also handle unpacking archives into directories (in this case the download link should point to a `zip` file of all data in the `bin` directory).
+
+To properly create a sha256 checksum on linux, you can use the `sha256sum` utility. For example:
+
+```shell
+sha256sum ./testdata/repo/zip_directory/autod.zip
+```
+
+The result will look something like the following: `29139e1381b8177aec909fab9a75d11381cab5adf7d3af0c05ff1c9c117743a7`.
+
+You can also use `sha512sum` if you would prefer to use longer hashes, or `md5sum` if you would prefer to use broken hashes. Whichever you choose, make sure to set the hash algorithm properly in the checksum argument to the URL.
+
+### Preparing for an Upgrade
+
+To prepare for an upgrade, use the `prepare-upgrade` command:
+
+```shell
+cosmovisor prepare-upgrade
+```
+
+This command performs the following actions:
+
+1. Retrieves upgrade information directly from the blockchain about the next scheduled upgrade.
+2. Downloads the new binary specified in the upgrade plan.
+3. Verifies the binary's checksum (if required by configuration).
+4. Places the new binary in the appropriate directory for Cosmovisor to use during the upgrade.
+
+The `prepare-upgrade` command provides detailed logging throughout the process, including:
+
+* The name and height of the upcoming upgrade
+* The URL from which the new binary is being downloaded
+* Confirmation of successful download and verification
+* The path where the new binary has been placed
+
+Example output:
+
+```bash
+INFO Preparing for upgrade name=v1.0.0 height=1000000
+INFO Downloading upgrade binary url=https://example.com/binary/v1.0.0?checksum=sha256:339911508de5e20b573ce902c500ee670589073485216bee8b045e853f24bce8
+INFO Upgrade preparation complete name=v1.0.0 height=1000000
+```
+
+*Note: The current way of downloading manually and placing the binary at the right place would still work.*
+
+## Example: SimApp Upgrade
+
+The following instructions provide a demonstration of `cosmovisor` using the simulation application (`simapp`) shipped with the Cosmos SDK's source code. The following commands are to be run from within the `cosmos-sdk` repository.
+
+### Chain Setup
+
+Let's create a new chain using the `v0.47.4` version of simapp (the Cosmos SDK demo app):
+
+```shell
+git checkout v0.47.4
+make build
+```
+
+Clean `~/.simapp` (never do this in a production environment):
+
+```shell
+./build/simd tendermint unsafe-reset-all
+```
+
+Set up app config:
+
+```shell
+./build/simd config chain-id test
+./build/simd config keyring-backend test
+./build/simd config broadcast-mode sync
+```
+
+Initialize the node and overwrite any previous genesis file (never do this in a production environment):
+
+```shell
+./build/simd init test --chain-id test --overwrite
+```
+
+For the sake of this demonstration, amend `voting_period` in `genesis.json` to a reduced time of 20 seconds (`20s`):
+
+```shell
+cat <<< $(jq '.app_state.gov.params.voting_period = "20s"' $HOME/.simapp/config/genesis.json) > $HOME/.simapp/config/genesis.json
+```
+
+Create a validator, and setup genesis transaction:
+
+```shell
+./build/simd keys add validator
+./build/simd genesis add-genesis-account validator 1000000000stake --keyring-backend test
+./build/simd genesis gentx validator 1000000stake --chain-id test
+./build/simd genesis collect-gentxs
+```
+
+#### Prepare Cosmovisor and Start the Chain
+
+Set the required environment variables:
+
+```shell
+export DAEMON_NAME=simd
+export DAEMON_HOME=$HOME/.simapp
+```
+
+Set the optional environment variable to trigger an automatic app restart:
+
+```shell
+export DAEMON_RESTART_AFTER_UPGRADE=true
+```
+
+Initialize cosmovisor with the current binary:
+
+```shell
+cosmovisor init ./build/simd
+```
+
+Now you can run cosmovisor with simapp v0.47.4:
+
+```shell
+cosmovisor run start
+```
+
+### Update App
+
+Update app to the latest version (e.g. v0.50.0).
+
+
+
+Migration plans are defined using the `x/upgrade` module and described in [In-Place Store Migrations](https://github.com/cosmos/cosmos-sdk/blob/main/docs/learn/advanced/15-upgrade.md). Migrations can perform any deterministic state change.
+
+The migration plan to upgrade the simapp from v0.47 to v0.50 is defined in `simapp/upgrade.go`.
+
+
+
+Build the new version `simd` binary:
+
+```shell
+make build
+```
+
+Add the new `simd` binary and the upgrade name:
+
+
+
+The migration name must match the one defined in the migration plan.
+
+
+
+```shell
+cosmovisor add-upgrade v047-to-v050 ./build/simd
+```
+
+Open a new terminal window and submit an upgrade proposal along with a deposit and a vote (these commands must be run within 20 seconds of each other):
+
+```shell
+./build/simd tx upgrade software-upgrade v047-to-v050 --title upgrade --summary upgrade --upgrade-height 200 --upgrade-info "{}" --no-validate --from validator --yes
+./build/simd tx gov deposit 1 10000000stake --from validator --yes
+./build/simd tx gov vote 1 yes --from validator --yes
+```
+
+The upgrade will occur automatically at height 200. Note: you may need to change the upgrade height in the snippet above if your test play takes more time.
diff --git a/sdk/next/build/tooling/protobuf.mdx b/sdk/next/build/tooling/protobuf.mdx
new file mode 100644
index 000000000..a151dfdb9
--- /dev/null
+++ b/sdk/next/build/tooling/protobuf.mdx
@@ -0,0 +1,808 @@
+---
+title: Protocol Buffers
+description: >-
+ It is known that Cosmos SDK uses protocol buffers extensively, this document
+ is meant to provide a guide on how it is used in the cosmos-sdk.
+---
+
+It is known that Cosmos SDK uses protocol buffers extensively, this document is meant to provide a guide on how it is used in the cosmos-sdk.
+
+To generate the proto file, the Cosmos SDK uses a docker image, this image is provided to all to use as well. The latest version is `ghcr.io/cosmos/proto-builder:0.12.x`
+
+Below is the example of the Cosmos SDK's commands for generating, linting, and formatting protobuf files that can be reused in any applications makefile.
+
+```go expandable
+#!/usr/bin/make -f
+
+PACKAGES_NOSIMULATION=$(shell go list ./... | grep -v '/simulation')
+
+PACKAGES_SIMTEST=$(shell go list ./... | grep '/simulation')
+
+export VERSION := $(shell echo $(shell git describe --tags --always --match "v*") | sed 's/^v//')
+
+export CMTVERSION := $(shell go list -m github.com/cometbft/cometbft | sed 's:.* ::')
+
+export COMMIT := $(shell git log -1 --format='%H')
+
+LEDGER_ENABLED ?= true
+BINDIR ?= $(GOPATH)/bin
+BUILDDIR ?= $(CURDIR)/build
+SIMAPP = ./simapp
+MOCKS_DIR = $(CURDIR)/tests/mocks
+HTTPS_GIT := https://github.com/cosmos/cosmos-sdk.git
+DOCKER := $(shell which docker)
+
+PROJECT_NAME = $(shell git remote get-url origin | xargs basename -s .git)
+
+# process build tags
+build_tags = netgo
+ ifeq ($(LEDGER_ENABLED),true)
+ ifeq ($(OS),Windows_NT)
+
+GCCEXE = $(shell where gcc.exe 2> NUL)
+ ifeq ($(GCCEXE),)
+ $(error gcc.exe not installed for ledger support, please install or set LEDGER_ENABLED=false)
+
+else
+ build_tags += ledger
+ endif
+ else
+ UNAME_S = $(shell uname -s)
+ ifeq ($(UNAME_S),OpenBSD)
+ $(warning OpenBSD detected, disabling ledger support (https://github.com/cosmos/cosmos-sdk/issues/1988))
+
+else
+ GCC = $(shell command -v gcc 2> /dev/null)
+ ifeq ($(GCC),)
+ $(error gcc not installed for ledger support, please install or set LEDGER_ENABLED=false)
+
+else
+ build_tags += ledger
+ endif
+ endif
+ endif
+endif
+ ifeq (secp,$(findstring secp,$(COSMOS_BUILD_OPTIONS)))
+
+build_tags += libsecp256k1_sdk
+endif
+ ifeq (legacy,$(findstring legacy,$(COSMOS_BUILD_OPTIONS)))
+
+build_tags += app_v1
+endif
+ whitespace :=
+whitespace += $(whitespace)
+ comma := ,
+build_tags_comma_sep := $(subst $(whitespace),$(comma),$(build_tags))
+
+# process linker flags
+
+ldflags = -X github.com/cosmos/cosmos-sdk/version.Name=sim \
+ -X github.com/cosmos/cosmos-sdk/version.AppName=simd \
+ -X github.com/cosmos/cosmos-sdk/version.Version=$(VERSION) \
+ -X github.com/cosmos/cosmos-sdk/version.Commit=$(COMMIT) \
+ -X "github.com/cosmos/cosmos-sdk/version.BuildTags=$(build_tags_comma_sep)" \
+ -X github.com/cometbft/cometbft/version.TMCoreSemVer=$(CMTVERSION)
+
+# DB backend selection
+ ifeq (cleveldb,$(findstring cleveldb,$(COSMOS_BUILD_OPTIONS)))
+
+build_tags += gcc
+endif
+ ifeq (badgerdb,$(findstring badgerdb,$(COSMOS_BUILD_OPTIONS)))
+
+build_tags += badgerdb
+endif
+# handle rocksdb
+ ifeq (rocksdb,$(findstring rocksdb,$(COSMOS_BUILD_OPTIONS)))
+
+CGO_ENABLED=1
+ build_tags += rocksdb
+endif
+# handle boltdb
+ ifeq (boltdb,$(findstring boltdb,$(COSMOS_BUILD_OPTIONS)))
+
+build_tags += boltdb
+endif
+ ifeq (,$(findstring nostrip,$(COSMOS_BUILD_OPTIONS)))
+
+ldflags += -w -s
+endif
+ldflags += $(LDFLAGS)
+ ldflags := $(strip $(ldflags))
+
+build_tags += $(BUILD_TAGS)
+
+build_tags := $(strip $(build_tags))
+
+BUILD_FLAGS := -tags "$(build_tags)" -ldflags '$(ldflags)'
+# check for nostrip option
+ ifeq (,$(findstring nostrip,$(COSMOS_BUILD_OPTIONS)))
+
+BUILD_FLAGS += -trimpath
+endif
+
+# Check for debug option
+ ifeq (debug,$(findstring debug,$(COSMOS_BUILD_OPTIONS)))
+
+BUILD_FLAGS += -gcflags "all=-N -l"
+endif
+
+all: tools build lint test vulncheck
+
+# The below include contains the tools and runsim targets.
+include contrib/devtools/Makefile
+
+###############################################################################
+### Build ###
+###############################################################################
+
+BUILD_TARGETS := build install
+
+build: BUILD_ARGS=-o $(BUILDDIR)/
+
+build-linux-amd64:
+ GOOS=linux GOARCH=amd64 LEDGER_ENABLED=false $(MAKE)
+
+build
+
+build-linux-arm64:
+ GOOS=linux GOARCH=arm64 LEDGER_ENABLED=false $(MAKE)
+
+build
+
+$(BUILD_TARGETS): go.sum $(BUILDDIR)/
+ cd ${
+ CURRENT_DIR
+}/simapp && go $@ -mod=readonly $(BUILD_FLAGS) $(BUILD_ARGS) ./...
+
+$(BUILDDIR)/:
+ mkdir -p $(BUILDDIR)/
+
+cosmovisor:
+ $(MAKE) -C tools/cosmovisor cosmovisor
+
+rosetta:
+ $(MAKE) -C tools/rosetta rosetta
+
+confix:
+ $(MAKE) -C tools/confix confix
+
+hubl:
+ $(MAKE) -C tools/hubl hubl
+
+.PHONY: build build-linux-amd64 build-linux-arm64 cosmovisor rosetta confix
+
+mocks: $(MOCKS_DIR)
+ @go install github.com/golang/mock/mockgen@v1.6.0
+ sh ./scripts/mockgen.sh
+.PHONY: mocks
+
+vulncheck: $(BUILDDIR)/
+ GOBIN=$(BUILDDIR)
+
+go install golang.org/x/vuln/cmd/govulncheck@latest
+ $(BUILDDIR)/govulncheck ./...
+
+$(MOCKS_DIR):
+ mkdir -p $(MOCKS_DIR)
+
+distclean: clean tools-clean
+clean:
+ rm -rf \
+ $(BUILDDIR)/ \
+ artifacts/ \
+ tmp-swagger-gen/ \
+ .testnets
+
+.PHONY: distclean clean
+
+###############################################################################
+### Tools & Dependencies ###
+###############################################################################
+
+go.sum: go.mod
+ echo "Ensure dependencies have not been modified ..." >&2
+ go mod verify
+ go mod tidy
+
+###############################################################################
+### Documentation ###
+###############################################################################
+
+godocs:
+ @echo "--> Wait a few seconds and visit http://localhost:6060/pkg/github.com/cosmos/cosmos-sdk/types"
+ go install golang.org/x/tools/cmd/godoc@latest
+ godoc -http=:6060
+
+build-docs:
+ @cd docs && DOCS_DOMAIN=docs.cosmos.network sh ./build-all.sh
+
+.PHONY: build-docs
+
+###############################################################################
+### Tests & Simulation ###
+###############################################################################
+
+# make init-simapp initializes a single local node network
+# it is useful for testing and development
+# Usage: make install && make init-simapp && simd start
+# Warning: make init-simapp will remove all data in simapp home directory
+init-simapp:
+ ./scripts/init-simapp.sh
+
+test: test-unit
+test-e2e:
+ $(MAKE) -C tests test-e2e
+test-e2e-cov:
+ $(MAKE) -C tests test-e2e-cov
+test-integration:
+ $(MAKE) -C tests test-integration
+test-integration-cov:
+ $(MAKE) -C tests test-integration-cov
+test-all: test-unit test-e2e test-integration test-ledger-mock test-race
+
+TEST_PACKAGES=./...
+TEST_TARGETS := test-unit test-unit-amino test-unit-proto test-ledger-mock test-race test-ledger test-race
+
+# Test runs-specific rules. To add a new test target, just add
+# a new rule, customize ARGS or TEST_PACKAGES ad libitum, and
+# append the new rule to the TEST_TARGETS list.
+test-unit: test_tags += cgo ledger test_ledger_mock norace
+test-unit-amino: test_tags += ledger test_ledger_mock test_amino norace
+test-ledger: test_tags += cgo ledger norace
+test-ledger-mock: test_tags += ledger test_ledger_mock norace
+test-race: test_tags += cgo ledger test_ledger_mock
+test-race: ARGS=-race
+test-race: TEST_PACKAGES=$(PACKAGES_NOSIMULATION)
+$(TEST_TARGETS): run-tests
+
+# check-* compiles and collects tests without running them
+# note: go test -c doesn't support multiple packages yet (https://github.com/golang/go/issues/15513)
+
+CHECK_TEST_TARGETS := check-test-unit check-test-unit-amino
+check-test-unit: test_tags += cgo ledger test_ledger_mock norace
+check-test-unit-amino: test_tags += ledger test_ledger_mock test_amino norace
+$(CHECK_TEST_TARGETS): EXTRA_ARGS=-run=none
+$(CHECK_TEST_TARGETS): run-tests
+
+ARGS += -tags "$(test_tags)"
+SUB_MODULES = $(shell find . -type f -name 'go.mod' -print0 | xargs -0 -n1 dirname | sort)
+
+CURRENT_DIR = $(shell pwd)
+
+run-tests:
+ ifneq (,$(shell which tparse 2>/dev/null))
+ @echo "Starting unit tests"; \
+ finalec=0; \
+ for module in $(SUB_MODULES); do \
+ cd ${
+ CURRENT_DIR
+}/$module; \
+ echo "Running unit tests for $(grep '^module' go.mod)"; \
+ go test -mod=readonly -json $(ARGS) $(TEST_PACKAGES) ./... | tparse; \
+ ec=$?; \
+ if [ "$ec" -ne '0' ]; then finalec=$ec; fi; \
+ done; \
+ exit $finalec
+else
+ @echo "Starting unit tests"; \
+ finalec=0; \
+ for module in $(SUB_MODULES); do \
+ cd ${
+ CURRENT_DIR
+}/$module; \
+ echo "Running unit tests for $(grep '^module' go.mod)"; \
+ go test -mod=readonly $(ARGS) $(TEST_PACKAGES) ./... ; \
+ ec=$?; \
+ if [ "$ec" -ne '0' ]; then finalec=$ec; fi; \
+ done; \
+ exit $finalec
+endif
+
+.PHONY: run-tests test test-all $(TEST_TARGETS)
+
+test-sim-nondeterminism:
+ @echo "Running non-determinism test..."
+ @cd ${
+ CURRENT_DIR
+}/simapp && go test -mod=readonly -run TestAppStateDeterminism -Enabled=true \
+ -NumBlocks=100 -BlockSize=200 -Commit=true -Period=0 -v -timeout 24h
+
+# Requires an exported plugin. See store/streaming/README.md for documentation.
+#
+# example:
+# export COSMOS_SDK_ABCI_V1=
+# make test-sim-nondeterminism-streaming
+#
+# Using the built-in examples:
+# export COSMOS_SDK_ABCI_V1=/store/streaming/abci/examples/file/file
+# make test-sim-nondeterminism-streaming
+test-sim-nondeterminism-streaming:
+ @echo "Running non-determinism-streaming test..."
+ @cd ${
+ CURRENT_DIR
+}/simapp && go test -mod=readonly -run TestAppStateDeterminism -Enabled=true \
+ -NumBlocks=100 -BlockSize=200 -Commit=true -Period=0 -v -timeout 24h -EnableStreaming=true
+
+test-sim-custom-genesis-fast:
+ @echo "Running custom genesis simulation..."
+ @echo "By default, ${
+ HOME
+}/.gaiad/config/genesis.json will be used."
+ @cd ${
+ CURRENT_DIR
+}/simapp && go test -mod=readonly -run TestFullAppSimulation -Genesis=${
+ HOME
+}/.gaiad/config/genesis.json \
+ -Enabled=true -NumBlocks=100 -BlockSize=200 -Commit=true -Seed=99 -Period=5 -v -timeout 24h
+
+test-sim-import-export: runsim
+ @echo "Running application import/export simulation. This may take several minutes..."
+ @cd ${
+ CURRENT_DIR
+}/simapp && $(BINDIR)/runsim -Jobs=4 -SimAppPkg=. -ExitOnFail 50 5 TestAppImportExport
+
+test-sim-after-import: runsim
+ @echo "Running application simulation-after-import. This may take several minutes..."
+ @cd ${
+ CURRENT_DIR
+}/simapp && $(BINDIR)/runsim -Jobs=4 -SimAppPkg=. -ExitOnFail 50 5 TestAppSimulationAfterImport
+
+test-sim-custom-genesis-multi-seed: runsim
+ @echo "Running multi-seed custom genesis simulation..."
+ @echo "By default, ${
+ HOME
+}/.gaiad/config/genesis.json will be used."
+ @cd ${
+ CURRENT_DIR
+}/simapp && $(BINDIR)/runsim -Genesis=${
+ HOME
+}/.gaiad/config/genesis.json -SimAppPkg=. -ExitOnFail 400 5 TestFullAppSimulation
+
+test-sim-multi-seed-long: runsim
+ @echo "Running long multi-seed application simulation. This may take awhile!"
+ @cd ${
+ CURRENT_DIR
+}/simapp && $(BINDIR)/runsim -Jobs=4 -SimAppPkg=. -ExitOnFail 500 50 TestFullAppSimulation
+
+test-sim-multi-seed-short: runsim
+ @echo "Running short multi-seed application simulation. This may take awhile!"
+ @cd ${
+ CURRENT_DIR
+}/simapp && $(BINDIR)/runsim -Jobs=4 -SimAppPkg=. -ExitOnFail 50 10 TestFullAppSimulation
+
+test-sim-benchmark-invariants:
+ @echo "Running simulation invariant benchmarks..."
+ cd ${
+ CURRENT_DIR
+}/simapp && @go test -mod=readonly -benchmem -bench=BenchmarkInvariants -run=^$ \
+ -Enabled=true -NumBlocks=1000 -BlockSize=200 \
+ -Period=1 -Commit=true -Seed=57 -v -timeout 24h
+
+.PHONY: \
+test-sim-nondeterminism \
+test-sim-nondeterminism-streaming \
+test-sim-custom-genesis-fast \
+test-sim-import-export \
+test-sim-after-import \
+test-sim-custom-genesis-multi-seed \
+test-sim-multi-seed-short \
+test-sim-multi-seed-long \
+test-sim-benchmark-invariants
+
+SIM_NUM_BLOCKS ?= 500
+SIM_BLOCK_SIZE ?= 200
+SIM_COMMIT ?= true
+
+test-sim-benchmark:
+ @echo "Running application benchmark for numBlocks=$(SIM_NUM_BLOCKS), blockSize=$(SIM_BLOCK_SIZE). This may take awhile!"
+ @cd ${
+ CURRENT_DIR
+}/simapp && go test -mod=readonly -run=^$ $(.) -bench ^BenchmarkFullAppSimulation$ \
+ -Enabled=true -NumBlocks=$(SIM_NUM_BLOCKS) -BlockSize=$(SIM_BLOCK_SIZE) -Commit=$(SIM_COMMIT) -timeout 24h
+
+# Requires an exported plugin. See store/streaming/README.md for documentation.
+#
+# example:
+# export COSMOS_SDK_ABCI_V1=
+# make test-sim-benchmark-streaming
+#
+# Using the built-in examples:
+# export COSMOS_SDK_ABCI_V1=/store/streaming/abci/examples/file/file
+# make test-sim-benchmark-streaming
+test-sim-benchmark-streaming:
+ @echo "Running application benchmark for numBlocks=$(SIM_NUM_BLOCKS), blockSize=$(SIM_BLOCK_SIZE). This may take awhile!"
+ @cd ${
+ CURRENT_DIR
+}/simapp && go test -mod=readonly -run=^$ $(.) -bench ^BenchmarkFullAppSimulation$ \
+ -Enabled=true -NumBlocks=$(SIM_NUM_BLOCKS) -BlockSize=$(SIM_BLOCK_SIZE) -Commit=$(SIM_COMMIT) -timeout 24h -EnableStreaming=true
+
+test-sim-profile:
+ @echo "Running application benchmark for numBlocks=$(SIM_NUM_BLOCKS), blockSize=$(SIM_BLOCK_SIZE). This may take awhile!"
+ @cd ${
+ CURRENT_DIR
+}/simapp && go test -mod=readonly -benchmem -run=^$ $(.) -bench ^BenchmarkFullAppSimulation$ \
+ -Enabled=true -NumBlocks=$(SIM_NUM_BLOCKS) -BlockSize=$(SIM_BLOCK_SIZE) -Commit=$(SIM_COMMIT) -timeout 24h -cpuprofile cpu.out -memprofile mem.out
+
+# Requires an exported plugin. See store/streaming/README.md for documentation.
+#
+# example:
+# export COSMOS_SDK_ABCI_V1=
+# make test-sim-profile-streaming
+#
+# Using the built-in examples:
+# export COSMOS_SDK_ABCI_V1=/store/streaming/abci/examples/file/file
+# make test-sim-profile-streaming
+test-sim-profile-streaming:
+ @echo "Running application benchmark for numBlocks=$(SIM_NUM_BLOCKS), blockSize=$(SIM_BLOCK_SIZE). This may take awhile!"
+ @cd ${
+ CURRENT_DIR
+}/simapp && go test -mod=readonly -benchmem -run=^$ $(.) -bench ^BenchmarkFullAppSimulation$ \
+ -Enabled=true -NumBlocks=$(SIM_NUM_BLOCKS) -BlockSize=$(SIM_BLOCK_SIZE) -Commit=$(SIM_COMMIT) -timeout 24h -cpuprofile cpu.out -memprofile mem.out -EnableStreaming=true
+
+.PHONY: test-sim-profile test-sim-benchmark
+
+test-rosetta:
+ docker build -t rosetta-ci:latest -f contrib/rosetta/rosetta-ci/Dockerfile .
+ docker-compose -f contrib/rosetta/docker-compose.yaml up --abort-on-container-exit --exit-code-from test_rosetta --build
+.PHONY: test-rosetta
+
+benchmark:
+ @go test -mod=readonly -bench=. $(PACKAGES_NOSIMULATION)
+.PHONY: benchmark
+
+###############################################################################
+### Linting ###
+###############################################################################
+
+golangci_lint_cmd=golangci-lint
+golangci_version=v1.51.2
+
+lint:
+ @echo "--> Running linter"
+ @go install github.com/golangci/golangci-lint/cmd/golangci-lint@$(golangci_version)
+ @./scripts/go-lint-all.bash --timeout=15m
+
+lint-fix:
+ @echo "--> Running linter"
+ @go install github.com/golangci/golangci-lint/cmd/golangci-lint@$(golangci_version)
+ @./scripts/go-lint-all.bash --fix
+
+.PHONY: lint lint-fix
+
+###############################################################################
+### Protobuf ###
+###############################################################################
+
+protoVer=0.13.2
+protoImageName=ghcr.io/cosmos/proto-builder:$(protoVer)
+
+protoImage=$(DOCKER)
+
+run --rm -v $(CURDIR):/workspace --workdir /workspace $(protoImageName)
+
+proto-all: proto-format proto-lint proto-gen
+
+proto-gen:
+ @echo "Generating Protobuf files"
+ @$(protoImage)
+
+sh ./scripts/protocgen.sh
+
+proto-swagger-gen:
+ @echo "Generating Protobuf Swagger"
+ @$(protoImage)
+
+sh ./scripts/protoc-swagger-gen.sh
+
+proto-format:
+ @$(protoImage)
+
+find ./ -name "*.proto" -exec clang-format -i {
+} \;
+
+proto-lint:
+ @$(protoImage)
+
+buf lint --error-format=json
+
+proto-check-breaking:
+ @$(protoImage)
+
+buf breaking --against $(HTTPS_GIT)#branch=main
+
+CMT_URL = https://raw.githubusercontent.com/cometbft/cometbft/v0.38.0-alpha.2/proto/tendermint
+
+CMT_CRYPTO_TYPES = proto/tendermint/crypto
+CMT_ABCI_TYPES = proto/tendermint/abci
+CMT_TYPES = proto/tendermint/types
+CMT_VERSION = proto/tendermint/version
+CMT_LIBS = proto/tendermint/libs/bits
+CMT_P2P = proto/tendermint/p2p
+
+proto-update-deps:
+ @echo "Updating Protobuf dependencies"
+
+ @mkdir -p $(CMT_ABCI_TYPES)
+ @curl -sSL $(CMT_URL)/abci/types.proto > $(CMT_ABCI_TYPES)/types.proto
+
+ @mkdir -p $(CMT_VERSION)
+ @curl -sSL $(CMT_URL)/version/types.proto > $(CMT_VERSION)/types.proto
+
+ @mkdir -p $(CMT_TYPES)
+ @curl -sSL $(CMT_URL)/types/types.proto > $(CMT_TYPES)/types.proto
+ @curl -sSL $(CMT_URL)/types/evidence.proto > $(CMT_TYPES)/evidence.proto
+ @curl -sSL $(CMT_URL)/types/params.proto > $(CMT_TYPES)/params.proto
+ @curl -sSL $(CMT_URL)/types/validator.proto > $(CMT_TYPES)/validator.proto
+ @curl -sSL $(CMT_URL)/types/block.proto > $(CMT_TYPES)/block.proto
+
+ @mkdir -p $(CMT_CRYPTO_TYPES)
+ @curl -sSL $(CMT_URL)/crypto/proof.proto > $(CMT_CRYPTO_TYPES)/proof.proto
+ @curl -sSL $(CMT_URL)/crypto/keys.proto > $(CMT_CRYPTO_TYPES)/keys.proto
+
+ @mkdir -p $(CMT_LIBS)
+ @curl -sSL $(CMT_URL)/libs/bits/types.proto > $(CMT_LIBS)/types.proto
+
+ @mkdir -p $(CMT_P2P)
+ @curl -sSL $(CMT_URL)/p2p/types.proto > $(CMT_P2P)/types.proto
+
+ $(DOCKER)
+
+run --rm -v $(CURDIR)/proto:/workspace --workdir /workspace $(protoImageName)
+
+buf mod update
+
+.PHONY: proto-all proto-gen proto-swagger-gen proto-format proto-lint proto-check-breaking proto-update-deps
+
+###############################################################################
+### Localnet ###
+###############################################################################
+
+localnet-build-env:
+ $(MAKE) -C contrib/images simd-env
+localnet-build-dlv:
+ $(MAKE) -C contrib/images simd-dlv
+
+localnet-build-nodes:
+ $(DOCKER)
+
+run --rm -v $(CURDIR)/.testnets:/data cosmossdk/simd \
+ testnet init-files --v 4 -o /data --starting-ip-address 192.168.10.2 --keyring-backend=test
+ docker-compose up -d
+
+localnet-stop:
+ docker-compose down
+
+# localnet-start will run a 4-node testnet locally. The nodes are
+# based off the docker images in: ./contrib/images/simd-env
+localnet-start: localnet-stop localnet-build-env localnet-build-nodes
+
+# localnet-debug will run a 4-node testnet locally in debug mode
+# you can read more about the debug mode here: ./contrib/images/simd-dlv/README.md
+localnet-debug: localnet-stop localnet-build-dlv localnet-build-nodes
+
+.PHONY: localnet-start localnet-stop localnet-debug localnet-build-env localnet-build-dlv localnet-build-nodes
+
+###############################################################################
+### rosetta ###
+###############################################################################
+# builds rosetta test data dir
+rosetta-data:
+ -docker container rm data_dir_build
+ docker build -t rosetta-ci:latest -f contrib/rosetta/rosetta-ci/Dockerfile .
+ docker run --name data_dir_build -t rosetta-ci:latest sh /rosetta/data.sh
+ docker cp data_dir_build:/tmp/data.tar.gz "$(CURDIR)/contrib/rosetta/rosetta-ci/data.tar.gz"
+ docker container rm data_dir_build
+.PHONY: rosetta-data
+```
+
+The script used to generate the protobuf files can be found in the `scripts/` directory.
+
+```shell
+# Reference: https://github.com/cosmos/cosmos-sdk/blob/v0.50.0-alpha.0/scripts/protocgen.sh
+```
+
+## Buf
+
+[Buf](https://buf.build) is a protobuf tool that abstracts the needs to use the complicated `protoc` toolchain on top of various other things that ensure you are using protobuf in accordance with the majority of the ecosystem. Within the cosmos-sdk repository there are a few files that have a buf prefix. Let's start with the top level and then dive into the various directories.
+
+### Workspace
+
+At the root level directory a workspace is defined using [buf workspaces](https://docs.buf.build/configuration/v1/buf-work-yaml). This helps if there are one or more protobuf containing directories in your project.
+
+Cosmos SDK example:
+
+```go
+version: v1
+directories:
+ - proto
+```
+
+### Proto Directory
+
+Next is the `proto/` directory where all of our protobuf files live. In here there are many different buf files defined each serving a different purpose.
+
+```bash
+├── README.md
+├── buf.gen.gogo.yaml
+├── buf.gen.pulsar.yaml
+├── buf.gen.swagger.yaml
+├── buf.lock
+├── buf.md
+├── buf.yaml
+├── cosmos
+└── tendermint
+```
+
+The above diagram all the files and directories within the Cosmos SDK `proto/` directory.
+
+#### `buf.gen.gogo.yaml`
+
+`buf.gen.gogo.yaml` defines how the protobuf files should be generated for use within the module. This file uses [gogoproto](https://github.com/gogo/protobuf), a separate generator from the google go-proto generator that makes working with various objects more ergonomic, and it has more performant encode and decode steps
+
+```go
+version: v1
+plugins:
+ - name: gocosmos
+ out: ..
+ opt: plugins=grpc,Mgoogle/protobuf/any.proto=github.com/cosmos/gogoproto/types/any
+ - name: grpc-gateway
+ out: ..
+ opt: logtostderr=true,allow_colon_final_segments=true
+```
+
+
+Example of how to define `gen` files can be found [here](https://docs.buf.build/tour/generate-go-code)
+
+
+#### `buf.gen.pulsar.yaml`
+
+`buf.gen.pulsar.yaml` defines how protobuf files should be generated using the [new golang apiv2 of protobuf](https://go.dev/blog/protobuf-apiv2). This generator is used instead of the google go-proto generator because it has some extra helpers for Cosmos SDK applications and will have more performant encode and decode than the google go-proto generator. You can follow the development of this generator [here](https://github.com/cosmos/cosmos-proto).
+
+```go expandable
+version: v1
+managed:
+ enabled: true
+ go_package_prefix:
+ default: cosmossdk.io/api
+ except:
+ - buf.build/googleapis/googleapis
+ - buf.build/cosmos/gogo-proto
+ - buf.build/cosmos/cosmos-proto
+ override:
+plugins:
+ - name: go-pulsar
+ out: ../api
+ opt: paths=source_relative
+ - name: go-grpc
+ out: ../api
+ opt: paths=source_relative
+```
+
+
+Example of how to define `gen` files can be found [here](https://docs.buf.build/tour/generate-go-code)
+
+
+#### `buf.gen.swagger.yaml`
+
+`buf.gen.swagger.yaml` generates the swagger documentation for the query and messages of the chain. This will only define the REST API end points that were defined in the query and msg servers. You can find examples of this [here](https://github.com/cosmos/cosmos-sdk/blob/main/proto/cosmos/bank/v1beta1/query.proto#L19)
+
+```go
+version: v1
+plugins:
+ - name: swagger
+ out: ../tmp-swagger-gen
+ opt: logtostderr=true,fqn_for_swagger_name=true,simple_operation_ids=true
+```
+
+
+Example of how to define `gen` files can be found [here](https://docs.buf.build/tour/generate-go-code)
+
+
+#### `buf.lock`
+
+This is an autogenerated file based off the dependencies required by the `.gen` files. There is no need to copy the current one. If you depend on cosmos-sdk proto definitions a new entry for the Cosmos SDK will need to be provided. The dependency you will need to use is `buf.build/cosmos/cosmos-sdk`.
+
+```go expandable
+# Generated by buf. DO NOT EDIT.
+version: v1
+deps:
+ - remote: buf.build
+ owner: cosmos
+ repository: cosmos-proto
+ commit: 04467658e59e44bbb22fe568206e1f70
+ digest: shake256:73a640bd60e0c523b0f8237ff34eab67c45a38b64bbbde1d80224819d272dbf316ac183526bd245f994af6608b025f5130483d0133c5edd385531326b5990466
+ - remote: buf.build
+ owner: cosmos
+ repository: gogo-proto
+ commit: 88ef6483f90f478fb938c37dde52ece3
+ digest: shake256:89c45df2aa11e0cff97b0d695436713db3d993d76792e9f8dc1ae90e6ab9a9bec55503d48ceedd6b86069ab07d3041b32001b2bfe0227fa725dd515ff381e5ba
+ - remote: buf.build
+ owner: googleapis
+ repository: googleapis
+ commit: 751cbe31638d43a9bfb6162cd2352e67
+ digest: shake256:87f55470d9d124e2d1dedfe0231221f4ed7efbc55bc5268917c678e2d9b9c41573a7f9a557f6d8539044524d9fc5ca8fbb7db05eb81379d168285d76b57eb8a4
+ - remote: buf.build
+ owner: protocolbuffers
+ repository: wellknowntypes
+ commit: 3ddd61d1f53d485abd3d3a2b47a62b8e
+ digest: shake256:9e6799d56700d0470c3723a2fd027e8b4a41a07085a0c90c58e05f6c0038fac9b7a0170acd7692707a849983b1b8189aa33e7b73f91d68157f7136823115546b
+```
+
+#### `buf.yaml`
+
+`buf.yaml` defines the [name of your package](https://github.com/cosmos/cosmos-sdk/blob/main/proto/buf.yaml#L3), which [breakage checker](https://docs.buf.build/tour/detect-breaking-changes) to use and how to [lint your protobuf files](https://buf.build/docs/tutorials/getting-started-with-buf-cli#lint-your-api).
+
+```go expandable
+# This module represents buf.build/cosmos/cosmos-sdk
+version: v1
+name: buf.build/cosmos/cosmos-sdk
+deps:
+ - buf.build/cosmos/cosmos-proto
+ - buf.build/cosmos/gogo-proto
+ - buf.build/googleapis/googleapis
+ - buf.build/protocolbuffers/wellknowntypes
+breaking:
+ use:
+ - FILE
+ ignore:
+ - testpb
+lint:
+ use:
+ - STANDARD
+ - COMMENTS
+ - FILE_LOWER_SNAKE_CASE
+ except:
+ - UNARY_RPC
+ - COMMENT_FIELD
+ - SERVICE_SUFFIX
+ - PACKAGE_VERSION_SUFFIX
+ - RPC_REQUEST_STANDARD_NAME
+ ignore:
+ - tendermint
+```
+
+We use a variety of linters for the Cosmos SDK protobuf files. The repo also checks this in ci.
+
+A reference to the github actions can be found [here](https://github.com/cosmos/cosmos-sdk/blob/main/.github/workflows/proto.yml#L1-L32)
+
+```go expandable
+name: Protobuf
+# Protobuf runs buf (https://buf.build/)
+
+lint and check-breakage
+# This workflow is only run when a .proto file has been changed
+on:
+ pull_request:
+ paths:
+ - "proto/**"
+
+permissions:
+ contents: read
+
+jobs:
+ lint:
+ runs-on: depot-ubuntu-22.04-4
+ timeout-minutes: 5
+ steps:
+ - uses: actions/checkout@v5
+ - uses: bufbuild/buf-setup-action@v1.50.0
+ - uses: bufbuild/buf-lint-action@v1
+ with:
+ input: "proto"
+
+ break-check:
+ runs-on: depot-ubuntu-22.04-4
+ steps:
+ - uses: actions/checkout@v5
+ - uses: bufbuild/buf-setup-action@v1.50.0
+ - uses: bufbuild/buf-breaking-action@v1
+ with:
+ input: "proto"
+ against: "https://github.com/${{
+ github.repository
+}}.git#branch=${{
+ github.event.pull_request.base.ref
+}},ref=HEAD~1,subdir=proto"
+```
diff --git a/sdk/next/changelog/release-notes.mdx b/sdk/next/changelog/release-notes.mdx
index b2a091d2f..b648e8ab4 100644
--- a/sdk/next/changelog/release-notes.mdx
+++ b/sdk/next/changelog/release-notes.mdx
@@ -4,631 +4,158 @@ description: "Release history and changelog for Cosmos SDK"
mode: "wide"
---
-
- This page tracks all releases and changes from the [cosmos/cosmos-sdk](https://github.com/cosmos/cosmos-sdk) repository.
- For the latest development updates, see the [UNRELEASED](https://github.com/cosmos/cosmos-sdk/blob/main/CHANGELOG.md#unreleased) section.
-
-
-
-## Features
+
+## BREAKING CHANGES
+
+- (x/staking) [#25724](https://github.com/cosmos/cosmos-sdk/issues/25724) Validate `BondDenom` in `MsgUpdateParams` to prevent setting non-existent or zero-supply denoms.
+- [#25778](https://github.com/cosmos/cosmos-sdk/pull/25778) Update `log` to log v2.
+- [#25090](https://github.com/cosmos/cosmos-sdk/pull/25090) Moved deprecated modules to `./contrib`. These modules are still available but will no longer be actively maintained or supported in the Cosmos SDK Bug Bounty program.
+ - `x/group`
+ - `x/nft`
+ - `x/circuit`
+ - `x/crisis`
+- (crypto) [#24414](https://github.com/cosmos/cosmos-sdk/pull/24414) Remove sr25519 support, since it was removed in CometBFT v1.x.
+- (x/gov) [#25615](https://github.com/cosmos/cosmos-sdk/pull/25615) Decouple `x/gov` from `x/staking` by making `CalculateVoteResultsAndVotingPowerFn` a required parameter to `keeper.NewKeeper` instead of `StakingKeeper`. `BondedTokens` has been renamed to `ValidatorPower` and `TotalBondedTokens` has been renamed to `TotalValidatorPower` to allow for multiple validator power representations.
+- (x/gov) [#25617](https://github.com/cosmos/cosmos-sdk/pull/25617) `AfterProposalSubmission` hook now includes proposer address as a parameter.
+- (x/gov) [#25616](https://github.com/cosmos/cosmos-sdk/pull/25616) `DistrKeeper` `x/distribution` is now optional. Genesis validation ensures `distrKeeper` is set if distribution module is used as proposal cancel destination.
+
+## FEATURES
+
+- [#25471](https://github.com/cosmos/cosmos-sdk/pull/25471) Full BLS 12-381 support enabled.
+- [#24872](https://github.com/cosmos/cosmos-sdk/pull/24872) Support BLS 12-381 for cli `init`, `gentx`, `collect-gentx`.
+- (crypto) [#24919](https://github.com/cosmos/cosmos-sdk/pull/24919) Add `NewPubKeyFromBytes` function to the `secp256r1` package to create `PubKey` from bytes.
+- (server) [#24720](https://github.com/cosmos/cosmos-sdk/pull/24720) Add `verbose_log_level` flag for configuring the log level when switching to verbose logging mode during sensitive operations such as chain upgrades.
+- (crypto) [#24861](https://github.com/cosmos/cosmos-sdk/pull/24861) Add `PubKeyFromCometTypeAndBytes` helper function to convert from `comet/v2` PubKeys to the `cryptotypes.Pubkey` interface.
+- (abci_utils) [#25008](https://github.com/cosmos/cosmos-sdk/pull/25008) Add the ability to assign a custom signer extraction adapter in `DefaultProposalHandler`.
+- (crypto/ledger) [#25435](https://github.com/cosmos/cosmos-sdk/pull/25435) Add SetDERConversion to reset skipDERConversion and App name for ledger.
+- (gRPC) [#25565](https://github.com/cosmos/cosmos-sdk/pull/25565) Support for multi gRPC query clients serve with historical binaries to serve proper historical state.
+- (blockstm) [#25600](https://github.com/cosmos/cosmos-sdk/pull/25600) Allow dynamic retrieval of the coin denomination from multi store at runtime.
+- (x/distribution) [#25650](https://github.com/cosmos/cosmos-sdk/pull/25650) Add new gRPC query endpoints and CLI commands for `DelegatorStartingInfo`, `ValidatorHistoricalRewards`, and `ValidatorCurrentRewards`.
+- [#25516](https://github.com/cosmos/cosmos-sdk/pull/25516) Support automatic configuration of OpenTelemetry via OpenTelemetry declarative configuration and add OpenTelemetry instrumentation of BaseApp.
+- [#25745](https://github.com/cosmos/cosmos-sdk/pull/25745) Add DiskIO telemetry via gopsutil.
+- (grpc) [#25648](https://github.com/cosmos/cosmos-sdk/pull/25648) Add `earliest_block_height` and `latest_block_height` fields to `GetSyncingResponse`.
+- (collections/codec) [#25614](https://github.com/cosmos/cosmos-sdk/pull/25827) Add `TimeValue` (`ValueCodec[time.Time]`) to collections/codec.
+- (enterprise/poa) [#25838](https://github.com/cosmos/cosmos-sdk/pull/25838) Add the `poa` module under the `enterprise` directory.
+
+## IMPROVEMENTS
+
+- (types) [#25342](https://github.com/cosmos/cosmos-sdk/pull/25342) Undeprecated `EmitEvent` and `EmitEvents` on the `EventManager`. These functions will continue to be maintained.
+- (types) [#25877](https://github.com/cosmos/cosmos-sdk/pull/25877) Add `OverrideEvents` to `EventManagerI`.
+- (types) [#24668](https://github.com/cosmos/cosmos-sdk/pull/24668) Scope the global config to a particular binary so that multiple SDK binaries can be properly run on the same machine.
+- (baseapp) [#24655](https://github.com/cosmos/cosmos-sdk/pull/24655) Add mutex locks for `state` and make `lastCommitInfo` atomic to prevent race conditions between `Commit` and `CreateQueryContext`.
+- (proto) [#24161](https://github.com/cosmos/cosmos-sdk/pull/24161) Remove unnecessary annotations from `x/staking` authz proto.
+- (x/bank) [#24660](https://github.com/cosmos/cosmos-sdk/pull/24660) Improve performance of the `GetAllBalances` and `GetAccountsBalances` keeper methods.
+- (collections) [#25464](https://github.com/cosmos/cosmos-sdk/pull/25464) Add `IterateRaw` method to `Multi` index type to satisfy query `Collection` interface.
+- (x/mint) [#25562](https://github.com/cosmos/cosmos-sdk/pull/25562) Improve and test `x/mint` params validation.
+- (api) [#25613](https://github.com/cosmos/cosmos-sdk/pull/25613) Separated deprecated modules into the contrib directory, distinct from api, to enable and unblock new proto changes without affecting legacy code.
+- (server) [#25632](https://github.com/cosmos/cosmos-sdk/pull/25632) Add missing call to close the app on shutdown.
+- (server) [#25740](https://github.com/cosmos/cosmos-sdk/pull/25740) Add variadic `grpc.DialOption` parameter to `StartGrpcServer` for custom gRPC client connection options.
+- (blockstm) [#25765](https://github.com/cosmos/cosmos-sdk/pull/25765) Minor code readability improvement in block-stm.
+- (server/config) [#25807](https://github.com/cosmos/cosmos-sdk/pull/25807) Reject overlapping historical gRPC block ranges.
+- [#25857](https://github.com/cosmos/cosmos-sdk/pull/25857) Reduce scope of mutex in `PriorityNonceMempool.Remove`.
+- (baseapp) [#25862](https://github.com/cosmos/cosmos-sdk/pull/25862) Skip validateBasic for rechecking txs to improve performance.
+- (grpc) [#25850](https://github.com/cosmos/cosmos-sdk/pull/25850) Add `GetBlockResults` and `GetLatestBlockResults` gRPC endpoints to expose CometBFT block results including finalize_block_events.
+
+## BUG FIXES
+
+- (types/query) [#25665](https://github.com/cosmos/cosmos-sdk/issues/25665) Fix pagination offset when querying a collection with predicate function.
+- (x/staking) [#25649](https://github.com/cosmos/cosmos-sdk/pull/25649) Add missing `defer iterator.Close()` calls in `IterateDelegatorRedelegations` and `GetRedelegations` to prevent resource leaks.
+- (mempool) [#25563](https://github.com/cosmos/cosmos-sdk/pull/25563) Cleanup sender indices in case of tx replacement.
+- (x/epochs) [#25425](https://github.com/cosmos/cosmos-sdk/pull/25425) Fix `InvokeSetHooks` being called with a nil keeper and `AppModule` containing a copy instead of a pointer.
+- (client, client/rpc, x/auth/tx) [#24551](https://github.com/cosmos/cosmos-sdk/pull/24551) Handle cancellation properly when supplying context to client methods.
+- (x/authz) [#24638](https://github.com/cosmos/cosmos-sdk/pull/24638) Fixed a minor bug where the grant key was cast as a string and dumped directly into the error message leading to an error string possibly containing invalid UTF-8.
+- (x/epochs) [#24770](https://github.com/cosmos/cosmos-sdk/pull/24770) Fix register of epoch hooks in `InvokeSetHooks`.
+- (x/epochs) [#25087](https://github.com/cosmos/cosmos-sdk/pull/25087) Remove redundant error check in BeginBlocker.
+- [GHSA-p22h-3m2v-cmgh](https://github.com/cosmos/cosmos-sdk/security/advisories/GHSA-p22h-3m2v-cmgh) Fix x/distribution can halt when historical rewards overflow.
+- (x/staking) [#25258](https://github.com/cosmos/cosmos-sdk/pull/25258) Add delegator address to redelegate event.
+- (cli) [#25485](https://github.com/cosmos/cosmos-sdk/pull/25485) Avoid failed to convert address field in `withdraw-validator-commission` cmd.
+- (baseapp) [#25642](https://github.com/cosmos/cosmos-sdk/pull/25642) Mark pre-block events for indexing based on local configuration.
+- (x/bank) [#25751](https://github.com/cosmos/cosmos-sdk/pull/25751) Fix recipient address in events.
+- (client) [#25811](https://github.com/cosmos/cosmos-sdk/pull/25811) Fix file handle leaks in snapshot commands.
+- (server/config) [#25806](https://github.com/cosmos/cosmos-sdk/pull/25806) Add missing commas in historical gRPC config template.
+- (client) [#25804](https://github.com/cosmos/cosmos-sdk/pull/25804) Add `GetHeightFromMetadataStrict` API to `grpc` client for better error handling.
+- (x/auth) [#25828](https://github.com/cosmos/cosmos-sdk/pull/25828) Limits pagination at default for values that exceed it.
+- (x/staking) [#25829](https://github.com/cosmos/cosmos-sdk/pull/25829) Validates case-sensitivity on authz grands in x/staking.
+
+## DEPRECATED
+
+- (x/nft) [#24575](https://github.com/cosmos/cosmos-sdk/pull/24575) Deprecate the `x/nft` module in the Cosmos SDK repository. This module will not be maintained to the extent that our core modules will and will be kept in a legacy repo.
+- (x/group) [#24571](https://github.com/cosmos/cosmos-sdk/pull/24571) Deprecate the `x/group` module in the Cosmos SDK repository. This module will not be maintained to the extent that our core modules will and will be kept in a legacy repo.
+- (types) [#24664](https://github.com/cosmos/cosmos-sdk/pull/24664) Deprecate the `Invariant` type in the Cosmos SDK.
+- [#25516](https://github.com/cosmos/cosmos-sdk/pull/25516) Deprecate all existing methods and types in the `telemetry` package, usage of `github.com/hashicorp/go-metrics` and the `telemetry` configuration section. New instrumentation should use the official OpenTelemetry go API and Cosmos SDK applications can automatically expose OpenTelemetry metrics, traces and logs via OpenTelemetry declarative configuration.
+
-- (simsx) [#24062](https://github.com/cosmos/cosmos-sdk/pull/24062) [#24145](https://github.com/cosmos/cosmos-sdk/pull/24145) Add new simsx framework on top of simulations for better module dev experience.
-- (baseapp) [#24069](https://github.com/cosmos/cosmos-sdk/pull/24069) Create CheckTxHandler to allow extending the logic of CheckTx.
-- (types) [#24093](https://github.com/cosmos/cosmos-sdk/pull/24093) Added a new method, `IsGT`, for `types.Coin`. This method is used to check if a `types.Coin` is greater than another `types.Coin`.
-- (client/keys) [#24071](https://github.com/cosmos/cosmos-sdk/pull/24071) Add support for importing hex key using standard input.
-- (types) [#23780](https://github.com/cosmos/cosmos-sdk/pull/23780) Add a ValueCodec for the math.Uint type that can be used in collections maps.
-- (perf)[#24045](https://github.com/cosmos/cosmos-sdk/pull/24045) Sims: Replace runsim command with Go stdlib testing. CLI: `Commit` default true, `Lean`, `SimulateEveryOperation`, `PrintAllInvariants`, `DBBackend` params removed
-- (crypto/keyring) [#24040](https://github.com/cosmos/cosmos-sdk/pull/24040) Expose the db keyring used in the keystore.
-- (types) [#23919](https://github.com/cosmos/cosmos-sdk/pull/23919) Add MustValAddressFromBech32 function.
-- (all) [#23708](https://github.com/cosmos/cosmos-sdk/pull/23708) Add unordered transaction support.
+
+## FEATURES
+
+- [#24062](https://github.com/cosmos/cosmos-sdk/pull/24062) [#24145](https://github.com/cosmos/cosmos-sdk/pull/24145) (simsx) Add new simsx framework on top of simulations for better module dev experience.
+- [#24069](https://github.com/cosmos/cosmos-sdk/pull/24069) (baseapp) Create CheckTxHandler to allow extending the logic of CheckTx.
+- [#24093](https://github.com/cosmos/cosmos-sdk/pull/24093) (types) Added a new method, `IsGT`, for `types.Coin`. This method is used to check if a `types.Coin` is greater than another `types.Coin`.
+- [#24071](https://github.com/cosmos/cosmos-sdk/pull/24071) (client/keys) Add support for importing hex key using standard input.
+- [#23780](https://github.com/cosmos/cosmos-sdk/pull/23780) (types) Add a ValueCodec for the math.Uint type that can be used in collections maps.
+- [#24045](https://github.com/cosmos/cosmos-sdk/pull/24045) (perf) Sims: Replace runsim command with Go stdlib testing. CLI: `Commit` default true, `Lean`, `SimulateEveryOperation`, `PrintAllInvariants`, `DBBackend` params removed
+- [#24040](https://github.com/cosmos/cosmos-sdk/pull/24040) (crypto/keyring) Expose the db keyring used in the keystore.
+- [#23919](https://github.com/cosmos/cosmos-sdk/pull/23919) (types) Add MustValAddressFromBech32 function.
+- [#23708](https://github.com/cosmos/cosmos-sdk/pull/23708) (all) Add unordered transaction support.
- Adds a `--timeout-timestamp` flag that allows users to specify a block time at which the unordered transactions should expire from the mempool.
-- (x/epochs) [#23815](https://github.com/cosmos/cosmos-sdk/pull/23815) Upstream `x/epochs` from Osmosis
-- (client) [#23811](https://github.com/cosmos/cosmos-sdk/pull/23811) Add auto cli for node service.
-- (genutil) [#24018](https://github.com/cosmos/cosmos-sdk/pull/24018) Allow manually setting the consensus key type in genesis
-- (client) [#18557](https://github.com/cosmos/cosmos-sdk/pull/18557) Add `--qrcode` flag to `keys show` command to support displaying keys address QR code.
-- (x/auth) [#24030](https://github.com/cosmos/cosmos-sdk/pull/24030) Allow usage of ed25519 keys for transaction signing.
-- (baseapp) [#24163](https://github.com/cosmos/cosmos-sdk/pull/24163) Add `StreamingManager` to baseapp to extend the abci listeners.
-- (x/protocolpool) [#23933](https://github.com/cosmos/cosmos-sdk/pull/23933) Add x/protocolpool module.
+- [#23815](https://github.com/cosmos/cosmos-sdk/pull/23815) (x/epochs) Upstream `x/epochs` from Osmosis
+- [#23811](https://github.com/cosmos/cosmos-sdk/pull/23811) (client) Add auto cli for node service.
+- [#24018](https://github.com/cosmos/cosmos-sdk/pull/24018) (genutil) Allow manually setting the consensus key type in genesis
+- [#18557](https://github.com/cosmos/cosmos-sdk/pull/18557) (client) Add `--qrcode` flag to `keys show` command to support displaying key address QR code.
+- [#24030](https://github.com/cosmos/cosmos-sdk/pull/24030) (x/auth) Allow usage of ed25519 keys for transaction signing.
+- [#24163](https://github.com/cosmos/cosmos-sdk/pull/24163) (baseapp) Add `StreamingManager` to baseapp to extend the abci listeners.
+- [#23933](https://github.com/cosmos/cosmos-sdk/pull/23933) (x/protocolpool) Add x/protocolpool module.
- x/distribution can now utilize an externally managed community pool. NOTE: this will make the message handlers for FundCommunityPool and CommunityPoolSpend error, as well as the query handler for CommunityPool.
-- (client) [#18101](https://github.com/cosmos/cosmos-sdk/pull/18101) Add a `keyring-default-keyname` in `client.toml` for specifying a default key name, and skip the need to use the `--from` flag when signing transactions.
-- (x/gov) [#24355](https://github.com/cosmos/cosmos-sdk/pull/24355) Allow users to set a custom CalculateVoteResultsAndVotingPower function to be used in govkeeper.Tally.
-- (x/mint) [#24436](https://github.com/cosmos/cosmos-sdk/pull/24436) Allow users to set a custom minting function used in the `x/mint` begin blocker.
-- The `InflationCalculationFn` argument to `mint.NewAppModule()` is now ignored and must be nil. To set a custom `InflationCalculationFn` on the default minter, use `mintkeeper.WithMintFn(mintkeeper.DefaultMintFn(customInflationFn))`.
-- (api) [#24428](https://github.com/cosmos/cosmos-sdk/pull/24428) Add block height to response headers
-
-## Improvements
-
-- (x/feegrant) [24461](https://github.com/cosmos/cosmos-sdk/pull/24461) Use collections for `FeeAllowance`, `FeeAllowanceQueue`.
-- (client) [#24561](https://github.com/cosmos/cosmos-sdk/pull/24561) TimeoutTimestamp flag has been changed to TimeoutDuration, which now sets the timeout timestamp of unordered transactions to the current time + duration passed.
-- (telemetry) [#24541](https://github.com/cosmos/cosmos-sdk/pull/24541) Telemetry now includes a pre_blocker metric key. x/upgrade should migrate to this key in v0.54.0.
-- (x/auth) [#24541](https://github.com/cosmos/cosmos-sdk/pull/24541) x/auth's PreBlocker now emits telemetry under the pre_blocker metric key.
-- (x/bank) [#24431](https://github.com/cosmos/cosmos-sdk/pull/24431) Reduce the number of `ValidateDenom` calls in `bank.SendCoins` and `Coin`.
-- The `AmountOf()` method on`sdk.Coins` no longer will `panic` if given an invalid denom and will instead return a zero value.
-- (x/staking) [#24391](https://github.com/cosmos/cosmos-sdk/pull/24391) Replace panics with error results; more verbose error messages
-- (x/staking) [#24354](https://github.com/cosmos/cosmos-sdk/pull/24354) Optimize validator endblock by reducing bech32 conversions, resulting in significant performance improvement
-- (client/keys) [#18950](https://github.com/cosmos/cosmos-sdk/pull/18950) Improve ` keys add`, ` keys import` and ` keys rename` by checking name validation.
-- (client/keys) [#18703](https://github.com/cosmos/cosmos-sdk/pull/18703) Improve ` keys add` and ` keys show` by checking whether there are duplicate keys in the multisig case.
-- (client/keys) [#18745](https://github.com/cosmos/cosmos-sdk/pull/18745) Improve ` keys export` and ` keys mnemonic` by adding --yes option to skip interactive confirmation.
-- (x/bank) [#24106](https://github.com/cosmos/cosmos-sdk/pull/24106) `SendCoins` now checks for `SendRestrictions` before instead of after deducting coins using `subUnlockedCoins`.
-- (crypto/ledger) [#24036](https://github.com/cosmos/cosmos-sdk/pull/24036) Improve error message when deriving paths using index > 100
-- (gRPC) [#23844](https://github.com/cosmos/cosmos-sdk/pull/23844) Add debug log prints for each gRPC request.
-- (gRPC) [#24073](https://github.com/cosmos/cosmos-sdk/pull/24073) Adds error handling for out-of-gas panics in grpc query handlers.
-- (server) [#24072](https://github.com/cosmos/cosmos-sdk/pull/24072) Return BlockHeader by shallow copy in server Context.
-- (x/bank) [#24053](https://github.com/cosmos/cosmos-sdk/pull/24053) Resolve a foot-gun by swapping send restrictions check in `InputOutputCoins` before coin deduction.
-- (codec/types) [#24336](https://github.com/cosmos/cosmos-sdk/pull/24336) Most types definitions were moved to `github.com/cosmos/gogoproto/types/any` with aliases to these left in `codec/types` so that there should be no breakage to existing code. This allows protobuf generated code to optionally reference the SDK's custom `Any` type without a direct dependency on the SDK. This can be done by changing the `protoc` `M` parameter for `any.proto` to `Mgoogle/protobuf/any.proto=github.com/cosmos/gogoproto/types/any`.
-
-## Bug Fixes
-
-- (server)[#24583](https://github.com/cosmos/cosmos-sdk/pull/24583) Fix height calculation in pruning manager and better restart handling.
-- (x/gov)[#24460](https://github.com/cosmos/cosmos-sdk/pull/24460) Do not call Remove during Walk in defaultCalculateVoteResultsAndVotingPower.
+- [#18101](https://github.com/cosmos/cosmos-sdk/pull/18101) (client) Add a `keyring-default-keyname` in `client.toml` for specifying a default key name, and skip the need to use the `--from` flag when signing transactions.
+- [#24355](https://github.com/cosmos/cosmos-sdk/pull/24355) (x/gov) Allow users to set a custom CalculateVoteResultsAndVotingPower function to be used in govkeeper.Tally.
+- [#24436](https://github.com/cosmos/cosmos-sdk/pull/24436) (x/mint) Allow users to set a custom minting function used in the `x/mint` begin blocker.
+- The `InflationCalculationFn` argument to `mint.NewAppModule()` is now ignored and must be nil. To set a custom `InflationCalculationFn` for the default minter, use `mintkeeper.WithMintFn(mintkeeper.DefaultMintFn(customInflationFn))`.
+- [#24428](https://github.com/cosmos/cosmos-sdk/pull/24428) (api) Add block height to response headers
+
+## IMPROVEMENTS
+
+- [#24561](https://github.com/cosmos/cosmos-sdk/pull/24561) (client) TimeoutTimestamp flag has been changed to TimeoutDuration, which now sets the timeout timestamp of unordered transactions to the current time + duration passed.
+- [#24541](https://github.com/cosmos/cosmos-sdk/pull/24541) (telemetry) Telemetry now includes a pre_blocker metric key. x/upgrade should migrate to this key in v0.54.0.
+- [#24541](https://github.com/cosmos/cosmos-sdk/pull/24541) (x/auth) x/auth's PreBlocker now emits telemetry under the pre_blocker metric key.
+- [#24431](https://github.com/cosmos/cosmos-sdk/pull/24431) (x/bank) Reduce the number of `ValidateDenom` calls in `bank.SendCoins` and `Coin`.
+- The `AmountOf()` method on `sdk.Coins` will no longer `panic` if given an invalid denom and will instead return a zero value.
+- [#24391](https://github.com/cosmos/cosmos-sdk/pull/24391) (x/staking) Replace panics with error results; more verbose error messages
+- [#24354](https://github.com/cosmos/cosmos-sdk/pull/24354) (x/staking) Optimize validator endblock by reducing bech32 conversions, resulting in significant performance improvement
+- [#18950](https://github.com/cosmos/cosmos-sdk/pull/18950) (client/keys) Improve ` keys add`, ` keys import` and ` keys rename` by checking name validation.
+- [#18703](https://github.com/cosmos/cosmos-sdk/pull/18703) (client/keys) Improve ` keys add` and ` keys show` by checking whether there are duplicate keys in the multisig case.
+- [#18745](https://github.com/cosmos/cosmos-sdk/pull/18745) (client/keys) Improve ` keys export` and ` keys mnemonic` by adding --yes option to skip interactive confirmation.
+- [#24106](https://github.com/cosmos/cosmos-sdk/pull/24106) (x/bank) `SendCoins` now checks for `SendRestrictions` before instead of after deducting coins using `subUnlockedCoins`.
+- [#24036](https://github.com/cosmos/cosmos-sdk/pull/24036) (crypto/ledger) Improve error message when deriving paths using index > 100
+- [#23844](https://github.com/cosmos/cosmos-sdk/pull/23844) (gRPC) Add debug log prints for each gRPC request.
+- [#24073](https://github.com/cosmos/cosmos-sdk/pull/24073) (gRPC) Adds error handling for out-of-gas panics in grpc query handlers.
+- [#24072](https://github.com/cosmos/cosmos-sdk/pull/24072) (server) Return BlockHeader by shallow copy in server Context.
+- [#24053](https://github.com/cosmos/cosmos-sdk/pull/24053) (x/bank) Resolve a foot-gun by swapping send restrictions check in `InputOutputCoins` before coin deduction.
+- [#24336](https://github.com/cosmos/cosmos-sdk/pull/24336) (codec/types) Most types definitions were moved to `github.com/cosmos/gogoproto/types/any` with aliases to these left in `codec/types` so that there should be no breakage to existing code. This allows protobuf generated code to optionally reference the SDK's custom `Any` type without a direct dependency on the SDK. This can be done by changing the `protoc` `M` parameter for `any.proto` to `Mgoogle/protobuf/any.proto=github.com/cosmos/gogoproto/types/any`.
+
+## BUG FIXES
+
+- [#24460](https://github.com/cosmos/cosmos-sdk/pull/24460) (x/gov) Do not call Remove during Walk in defaultCalculateVoteResultsAndVotingPower.
- (baseapp) [24261](https://github.com/cosmos/cosmos-sdk/pull/24261) Fix post handler error always results in code 1
-- (server) [#24068](https://github.com/cosmos/cosmos-sdk/pull/24068) Allow align block header with skip check header in grpc server.
-- (x/gov) [#24044](https://github.com/cosmos/cosmos-sdk/pull/24044) Fix some places in which we call Remove inside a Walk (x/gov).
-- (baseapp) [#24042](https://github.com/cosmos/cosmos-sdk/pull/24042) Fixed a data race inside BaseApp.getContext, found by end-to-end (e2e) tests.
-- (client/server) [#24059](https://github.com/cosmos/cosmos-sdk/pull/24059) Consistently set viper prefix in client and server. It defaults for the binary name for both client and server.
-- (client/keys) [#24041](https://github.com/cosmos/cosmos-sdk/pull/24041) `keys delete` won't terminate when a key is not found, but will log the error.
-- (baseapp) [#24027](https://github.com/cosmos/cosmos-sdk/pull/24027) Ensure that `BaseApp.Init` checks that the commit multistore is set to protect against nil dereferences.
-- (x/group) [GHSA-47ww-ff84-4jrg](https://github.com/cosmos/cosmos-sdk/security/advisories/GHSA-47ww-ff84-4jrg) Fix x/group can halt when erroring in EndBlocker
-- (x/distribution) [#23934](https://github.com/cosmos/cosmos-sdk/pull/23934) Fix vulnerability in `incrementReferenceCount` in distribution.
-- (baseapp) [#23879](https://github.com/cosmos/cosmos-sdk/pull/23879) Ensure finalize block response is not empty in the defer check of FinalizeBlock to avoid panic by nil pointer.
-- (query) [#23883](https://github.com/cosmos/cosmos-sdk/pull/23883) Fix NPE in query pagination.
-- (client) [#23860](https://github.com/cosmos/cosmos-sdk/pull/23860) Add missing `unordered` field for legacy amino signing of tx body.
-- (x/bank) [#23836](https://github.com/cosmos/cosmos-sdk/pull/23836) Fix `DenomMetadata` rpc allow value with slashes.
+- [#24068](https://github.com/cosmos/cosmos-sdk/pull/24068) (server) Allow aligning block header with skip check header in gRPC server.
+- [#24044](https://github.com/cosmos/cosmos-sdk/pull/24044) (x/gov) Fix some places in which we call Remove inside a Walk (x/gov).
+- [#24042](https://github.com/cosmos/cosmos-sdk/pull/24042) (baseapp) Fixed a data race inside BaseApp.getContext, found by end-to-end (e2e) tests.
+- [#24059](https://github.com/cosmos/cosmos-sdk/pull/24059) (client/server) Consistently set viper prefix in client and server. It defaults to the binary name for both client and server.
+- [#24041](https://github.com/cosmos/cosmos-sdk/pull/24041) (client/keys) `keys delete` won't terminate when a key is not found, but will log the error.
+- [#24027](https://github.com/cosmos/cosmos-sdk/pull/24027) (baseapp) Ensure that `BaseApp.Init` checks that the commit multistore is set to protect against nil dereferences.
+- [GHSA-47ww-ff84-4jrg](https://github.com/cosmos/cosmos-sdk/security/advisories/GHSA-47ww-ff84-4jrg) (x/group) Fix x/group halting when erroring in EndBlocker
+- [#23934](https://github.com/cosmos/cosmos-sdk/pull/23934) (x/distribution) Fix vulnerability in `incrementReferenceCount` in distribution.
+- [#23879](https://github.com/cosmos/cosmos-sdk/pull/23879) (baseapp) Ensure finalize block response is not empty in the defer check of FinalizeBlock to avoid panic by nil pointer.
+- [#23883](https://github.com/cosmos/cosmos-sdk/pull/23883) (query) Fix NPE in query pagination.
+- [#23860](https://github.com/cosmos/cosmos-sdk/pull/23860) (client) Add missing `unordered` field for legacy amino signing of tx body.
+- [#23836](https://github.com/cosmos/cosmos-sdk/pull/23836) (x/bank) Fix `DenomMetadata` RPC to allow values with slashes.
- (query) [87d3a43](https://github.com/cosmos/cosmos-sdk/commit/87d3a432af95f4cf96aa02351ed5fcc51cca6e7b) Fix collection filtered pagination.
-- (sims) [#23952](https://github.com/cosmos/cosmos-sdk/pull/23952) Use liveness matrix for validator sign status in sims
-- (baseapp) [#24055](https://github.com/cosmos/cosmos-sdk/pull/24055) Align block header when query with latest height.
-- (baseapp) [#24074](https://github.com/cosmos/cosmos-sdk/pull/24074) Use CometBFT's ComputeProtoSizeForTxs in defaultTxSelector.SelectTxForProposal for consistency.
-- (cli) [#24090](https://github.com/cosmos/cosmos-sdk/pull/24090) Prune cmd should disable async pruning.
-- (x/auth) [#19239](https://github.com/cosmos/cosmos-sdk/pull/19239) Sets from flag in multi-sign command to avoid no key name provided error.
-- (x/auth) [#23741](https://github.com/cosmos/cosmos-sdk/pull/23741) Support legacy global AccountNumber for legacy compatibility.
-- (baseapp) [#24526](https://github.com/cosmos/cosmos-sdk/pull/24526) Fix incorrect retention height when `commitHeight` equals `minRetainBlocks`.
-- (x/protocolpool) [#24594](https://github.com/cosmos/cosmos-sdk/pull/24594) Fix NPE when initializing module via depinject.
-- (x/epochs) [#24610](https://github.com/cosmos/cosmos-sdk/pull/24610) Fix semantics of `CurrentEpochStartHeight` being set before epoch has started.
-
-
-
-## Bug Fixes
-
-- [GHSA-47ww-ff84-4jrg](https://github.com/cosmos/cosmos-sdk/security/advisories/GHSA-47ww-ff84-4jrg) Fix x/group can halt when erroring in EndBlocker
-
-
-
-## Bug Fixes
-
-- [GHSA-x5vx-95h7-rv4p](https://github.com/cosmos/cosmos-sdk/security/advisories/GHSA-x5vx-95h7-rv4p) Fix Group module can halt chain when handling a malicious proposal.
-
-
-
-## Features
-
-- (crypto/keyring) [#21653](https://github.com/cosmos/cosmos-sdk/pull/21653) New Linux-only backend that adds Linux kernel's `keyctl` support.
-
-## Improvements
-
-- (server) [#21941](https://github.com/cosmos/cosmos-sdk/pull/21941) Regenerate addrbook.json for in place testnet.
-
-## Bug Fixes
-
-- Fix [ABS-0043/ABS-0044](https://github.com/cosmos/cosmos-sdk/security/advisories/GHSA-8wcc-m6j2-qxvm) Limit recursion depth for unknown field detection and unpack any
-- (server) [#22564](https://github.com/cosmos/cosmos-sdk/pull/22564) Fix fallback genesis path in server
-- (x/group) [#22425](https://github.com/cosmos/cosmos-sdk/pull/22425) Proper address rendering in error
-- (sims) [#21906](https://github.com/cosmos/cosmos-sdk/pull/21906) Skip sims test when running dry on validators
-- (cli) [#21919](https://github.com/cosmos/cosmos-sdk/pull/21919) Query address-by-acc-num by account_id instead of id.
-- (x/group) [#22229](https://github.com/cosmos/cosmos-sdk/pull/22229) Accept `1` and `try` in CLI for group proposal exec.
-
-
-
-## Features
-
-- (cli) [#20779](https://github.com/cosmos/cosmos-sdk/pull/20779) Added `module-hash-by-height` command to query and retrieve module hashes at a specified blockchain height, enhancing debugging capabilities.
-- (cli) [#21372](https://github.com/cosmos/cosmos-sdk/pull/21372) Added a `bulk-add-genesis-account` genesis command to add many genesis accounts at once.
-- (types/collections) [#21724](https://github.com/cosmos/cosmos-sdk/pull/21724) Added `LegacyDec` collection value.
-
-## Improvements
-
-- (x/bank) [#21460](https://github.com/cosmos/cosmos-sdk/pull/21460) Added `Sender` attribute in `MsgMultiSend` event.
-- (genutil) [#21701](https://github.com/cosmos/cosmos-sdk/pull/21701) Improved error messages for genesis validation.
-- (testutil/integration) [#21816](https://github.com/cosmos/cosmos-sdk/pull/21816) Allow to pass baseapp options in `NewIntegrationApp`.
-
-## Bug Fixes
-
-- (runtime) [#21769](https://github.com/cosmos/cosmos-sdk/pull/21769) Fix baseapp options ordering to avoid overwriting options set by modules.
-- (x/consensus) [#21493](https://github.com/cosmos/cosmos-sdk/pull/21493) Fix regression that prevented to upgrade to > v0.50.7 without consensus version params.
-- (baseapp) [#21256](https://github.com/cosmos/cosmos-sdk/pull/21256) Halt height will not commit the block indicated, meaning that if halt-height is set to 10, only blocks until 9 (included) will be committed. This is to go back to the original behavior before a change was introduced in v0.50.0.
-- (baseapp) [#21444](https://github.com/cosmos/cosmos-sdk/pull/21444) Follow-up, Return PreBlocker events in FinalizeBlockResponse.
-- (baseapp) [#21413](https://github.com/cosmos/cosmos-sdk/pull/21413) Fix data race in sdk mempool.
-
-
-
-## Changes
-
-- (baseapp) [#21159](https://github.com/cosmos/cosmos-sdk/pull/21159) Return PreBlocker events in FinalizeBlockResponse.
-- [#20939](https://github.com/cosmos/cosmos-sdk/pull/20939) Fix collection reverse iterator to include `pagination.key` in the result.
-- (client/grpc) [#20969](https://github.com/cosmos/cosmos-sdk/pull/20969) Fix `node.NewQueryServer` method not setting `cfg`.
-- (testutil/integration) [#21006](https://github.com/cosmos/cosmos-sdk/pull/21006) Fix `NewIntegrationApp` method not writing default genesis to state.
-- (runtime) [#21080](https://github.com/cosmos/cosmos-sdk/pull/21080) Fix `app.yaml` / `app.json` incompatibility with `depinject v1.0.0`.
-
-
-
-## Changes
-
-- (client) [#20690](https://github.com/cosmos/cosmos-sdk/pull/20690) Import mnemonic from file
-- (x/authz,x/feegrant) [#20590](https://github.com/cosmos/cosmos-sdk/pull/20590) Provide updated keeper in depinject for authz and feegrant modules.
-- [#20631](https://github.com/cosmos/cosmos-sdk/pull/20631) Fix json parsing in the wait-tx command.
-- (x/auth) [#20438](https://github.com/cosmos/cosmos-sdk/pull/20438) Add `--skip-signature-verification` flag to multisign command to allow nested multisigs.
-
-
-
-## Improvements
-
-- (debug) [#20328](https://github.com/cosmos/cosmos-sdk/pull/20328) Add consensus address for debug cmd.
-- (runtime) [#20264](https://github.com/cosmos/cosmos-sdk/pull/20264) Expose grpc query router via depinject.
-- (x/consensus) [#20381](https://github.com/cosmos/cosmos-sdk/pull/20381) Use Comet utility for consensus module consensus param updates.
-- (client) [#20356](https://github.com/cosmos/cosmos-sdk/pull/20356) Overwrite client context when available in `SetCmdClientContext`.
-
-## Bug Fixes
-
-- (simulation) [#17911](https://github.com/cosmos/cosmos-sdk/pull/17911) Fix all problems with executing command `make test-sim-custom-genesis-fast` for simulation test.
-- (simulation) [#18196](https://github.com/cosmos/cosmos-sdk/pull/18196) Fix the problem of `validator set is empty after InitGenesis` in simulation test.
-- (baseapp) [#20346](https://github.com/cosmos/cosmos-sdk/pull/20346) Correctly assign `execModeSimulate` to context for `simulateTx`.
-- (baseapp) [#20144](https://github.com/cosmos/cosmos-sdk/pull/20144) Remove txs from mempool when AnteHandler fails in recheck.
-- (baseapp) [#20107](https://github.com/cosmos/cosmos-sdk/pull/20107) Avoid header height overwrite block height.
-- (cli) [#20020](https://github.com/cosmos/cosmos-sdk/pull/20020) Make bootstrap-state command support both new and legacy genesis format.
-- (testutil/sims) [#20151](https://github.com/cosmos/cosmos-sdk/pull/20151) Set all signatures and don't overwrite the previous one in `GenSignedMockTx`.
-
-
-
-## Features
-
-- (types) [#19759](https://github.com/cosmos/cosmos-sdk/pull/19759) Align SignerExtractionAdapter in PriorityNonceMempool Remove.
-- (client) [#19870](https://github.com/cosmos/cosmos-sdk/pull/19870) Add new query command `wait-tx`. Alias `event-query-tx-for` to `wait-tx` for backward compatibility.
-
-## Improvements
-
-- (telemetry) [#19903](https://github.com/cosmos/cosmos-sdk/pull/19903) Conditionally emit metrics based on enablement.
-- **Introduction of `Now` Function**: Added a new function called `Now` to the telemetry package. It returns the current system time if telemetry is enabled, or a zero time if telemetry is not enabled.
-- **Atomic Global Variable**: Implemented an atomic global variable to manage the state of telemetry's enablement. This ensures thread safety for the telemetry state.
-- **Conditional Telemetry Emission**: All telemetry functions have been updated to emit metrics only when telemetry is enabled. They perform a check with `isTelemetryEnabled()` and return early if telemetry is disabled, minimizing unnecessary operations and overhead.
-- (deps) [#19810](https://github.com/cosmos/cosmos-sdk/pull/19810) Upgrade prometheus version and fix API breaking change due to prometheus bump.
-- (deps) [#19810](https://github.com/cosmos/cosmos-sdk/pull/19810) Bump `cosmossdk.io/store` to v1.1.0.
-- (server) [#19884](https://github.com/cosmos/cosmos-sdk/pull/19884) Add start customizability to start command options.
-- (x/gov) [#19853](https://github.com/cosmos/cosmos-sdk/pull/19853) Emit `depositor` in `EventTypeProposalDeposit`.
-- (x/gov) [#19844](https://github.com/cosmos/cosmos-sdk/pull/19844) Emit the proposer of governance proposals.
-- (baseapp) [#19616](https://github.com/cosmos/cosmos-sdk/pull/19616) Don't share gas meter in tx execution.
-- (x/authz) [#20114](https://github.com/cosmos/cosmos-sdk/pull/20114) Follow up of [GHSA-4j93-fm92-rp4m](https://github.com/cosmos/cosmos-sdk/security/advisories/GHSA-4j93-fm92-rp4m) for `x/authz`.
-- (crypto) [#19691](https://github.com/cosmos/cosmos-sdk/pull/19745) Fix tx sign doesn't throw an error when incorrect Ledger is used.
-- (baseapp) [#19970](https://github.com/cosmos/cosmos-sdk/pull/19970) Fix default config values to use no-op mempool as default.
-- (crypto) [#20027](https://github.com/cosmos/cosmos-sdk/pull/20027) secp256r1 keys now implement gogoproto's customtype interface.
-- (x/bank) [#20028](https://github.com/cosmos/cosmos-sdk/pull/20028) Align query with multi denoms for send-enabled.
-
-
-
-## Features
-
-- (baseapp) [#19626](https://github.com/cosmos/cosmos-sdk/pull/19626) Add `DisableBlockGasMeter` option to `BaseApp`, which removes the block gas meter during transaction execution.
-
-## Improvements
-
-- (x/distribution) [#19707](https://github.com/cosmos/cosmos-sdk/pull/19707) Add autocli config for `DelegationTotalRewards` for CLI consistency with `q rewards` commands in previous versions.
-- (x/auth) [#19651](https://github.com/cosmos/cosmos-sdk/pull/19651) Allow empty public keys in `GetSignBytesAdapter`.
-
-## Bug Fixes
-
-- (x/gov) [#19725](https://github.com/cosmos/cosmos-sdk/pull/19725) Fetch a failed proposal tally from proposal.FinalTallyResult in the gprc query.
-- (types) [#19709](https://github.com/cosmos/cosmos-sdk/pull/19709) Fix skip staking genesis export when using `CoreAppModuleAdaptor` / `CoreAppModuleBasicAdaptor` for it.
-- (x/auth) [#19549](https://github.com/cosmos/cosmos-sdk/pull/19549) Accept custom get signers when injecting `x/auth/tx`.
-- (x/staking) Fix a possible bypass of delegator slashing: [GHSA-86h5-xcpx-cfqc](https://github.com/cosmos/cosmos-sdk/security/advisories/GHSA-86h5-xcpx-cfqc)
-- (baseapp) Fix a bug in `baseapp.ValidateVoteExtensions` helper ([GHSA-95rx-m9m5-m94v](https://github.com/cosmos/cosmos-sdk/security/advisories/GHSA-95rx-m9m5-m94v)). The helper has been fixed and for avoiding API breaking changes `currentHeight` and `chainID` arguments are ignored. Those arguments are removed from the helper in v0.51+.
-
-
-
-## Features
-
-- (server) [#19280](https://github.com/cosmos/cosmos-sdk/pull/19280) Adds in-place testnet CLI command.
-
-## Improvements
-
-- (client) [#19393](https://github.com/cosmos/cosmos-sdk/pull/19393/) Add `ReadDefaultValuesFromDefaultClientConfig` to populate the default values from the default client config in client.Context without creating a app folder.
-
-## Bug Fixes
-
-- (x/auth/vesting) [GHSA-4j93-fm92-rp4m](#bug-fixes) Add `BlockedAddr` check in `CreatePeriodicVestingAccount`.
-- (baseapp) [#19338](https://github.com/cosmos/cosmos-sdk/pull/19338) Set HeaderInfo in context when calling `setState`.
-- (baseapp): [#19200](https://github.com/cosmos/cosmos-sdk/pull/19200) Ensure that sdk side ve math matches cometbft.
-- [#19106](https://github.com/cosmos/cosmos-sdk/pull/19106) Allow empty public keys when setting signatures. Public keys aren't needed for every transaction.
-- (baseapp) [#19198](https://github.com/cosmos/cosmos-sdk/pull/19198) Remove usage of pointers in logs in all optimistic execution goroutines.
-- (baseapp) [#19177](https://github.com/cosmos/cosmos-sdk/pull/19177) Fix baseapp `DefaultProposalHandler` same-sender non-sequential sequence.
-- (crypto) [#19371](https://github.com/cosmos/cosmos-sdk/pull/19371) Avoid CLI redundant log in stdout, log to stderr instead.
+- [#23952](https://github.com/cosmos/cosmos-sdk/pull/23952) (sims) Use liveness matrix for validator sign status in sims
+- [#24055](https://github.com/cosmos/cosmos-sdk/pull/24055) (baseapp) Align block header when querying with latest height.
+- [#24074](https://github.com/cosmos/cosmos-sdk/pull/24074) (baseapp) Use CometBFT's ComputeProtoSizeForTxs in defaultTxSelector.SelectTxForProposal for consistency.
+- [#24090](https://github.com/cosmos/cosmos-sdk/pull/24090) (cli) Prune cmd should disable async pruning.
+- [#19239](https://github.com/cosmos/cosmos-sdk/pull/19239) (x/auth) Sets from flag in multi-sign command to avoid no key name provided error.
+- [#23741](https://github.com/cosmos/cosmos-sdk/pull/23741) (x/auth) Support legacy global AccountNumber for legacy compatibility.
+- [#24526](https://github.com/cosmos/cosmos-sdk/pull/24526) (baseapp) Fix incorrect retention height when `commitHeight` equals `minRetainBlocks`.
+- [#24594](https://github.com/cosmos/cosmos-sdk/pull/24594) (x/protocolpool) Fix NPE when initializing module via depinject.
+- [#24610](https://github.com/cosmos/cosmos-sdk/pull/24610) (x/epochs) Fix semantics of `CurrentEpochStartHeight` being set before epoch has started.
-
-
-## Features
-
-- (types) [#18991](https://github.com/cosmos/cosmos-sdk/pull/18991) Add SignerExtractionAdapter to PriorityNonceMempool/Config and provide Default implementation matching existing behavior.
-- (gRPC) [#19043](https://github.com/cosmos/cosmos-sdk/pull/19043) Add `halt_height` to the gRPC `/cosmos/base/node/v1beta1/config` request.
-
-## Improvements
-
-- (x/bank) [#18956](https://github.com/cosmos/cosmos-sdk/pull/18956) Introduced a new `DenomOwnersByQuery` query method for `DenomOwners`, which accepts the denom value as a query string parameter, resolving issues with denoms containing slashes.
-- (x/gov) [#18707](https://github.com/cosmos/cosmos-sdk/pull/18707) Improve genesis validation.
-- (x/auth/tx) [#18772](https://github.com/cosmos/cosmos-sdk/pull/18772) Remove misleading gas wanted from tx simulation failure log.
-- (client/tx) [#18852](https://github.com/cosmos/cosmos-sdk/pull/18852) Add `WithFromName` to tx factory.
-- (types) [#18888](https://github.com/cosmos/cosmos-sdk/pull/18888) Speedup DecCoin.Sort() if len(coins) <= 1
-- (types) [#18875](https://github.com/cosmos/cosmos-sdk/pull/18875) Speedup coins.Sort() if len(coins) <= 1
-- (baseapp) [#18915](https://github.com/cosmos/cosmos-sdk/pull/18915) Add a new `ExecModeVerifyVoteExtension` exec mode and ensure it's populated in the `Context` during `VerifyVoteExtension` execution.
-- (testutil) [#18930](https://github.com/cosmos/cosmos-sdk/pull/18930) Add NodeURI for clientCtx.
-
-## Bug Fixes
-
-- (baseapp) [#19058](https://github.com/cosmos/cosmos-sdk/pull/19058) Fix baseapp posthandler branch would fail if the `runMsgs` had returned an error.
-- (baseapp) [#18609](https://github.com/cosmos/cosmos-sdk/issues/18609) Fixed accounting in the block gas meter after module's beginBlock and before DeliverTx, ensuring transaction processing always starts with the expected zeroed out block gas meter.
-- (baseapp) [#18895](https://github.com/cosmos/cosmos-sdk/pull/18895) Fix de-duplicating vote extensions during validation in ValidateVoteExtensions.
-
-
-
-## Features
-
-- (debug) [#18219](https://github.com/cosmos/cosmos-sdk/pull/18219) Add debug commands for application codec types.
-- (client/keys) [#17639](https://github.com/cosmos/cosmos-sdk/pull/17639) Allows using and saving public keys encoded as base64.
-- (server) [#17094](https://github.com/cosmos/cosmos-sdk/pull/17094) Add a `shutdown-grace` flag for waiting a given time before exit.
-
-## Improvements
-
-- (telemetry) [#18646] (https://github.com/cosmos/cosmos-sdk/pull/18646) Enable statsd and dogstatsd telemetry sinks.
-- (server) [#18478](https://github.com/cosmos/cosmos-sdk/pull/18478) Add command flag to disable colored logs.
-- (x/gov) [#18025](https://github.com/cosmos/cosmos-sdk/pull/18025) Improve ` q gov proposer` by querying directly a proposal instead of tx events. It is an alias of `q gov proposal` as the proposer is a field of the proposal.
-- (version) [#18063](https://github.com/cosmos/cosmos-sdk/pull/18063) Allow to define extra info to be displayed in ` version --long` command.
-- (codec/unknownproto)[#18541](https://github.com/cosmos/cosmos-sdk/pull/18541) Remove the use of "protoc-gen-gogo/descriptor" in favour of using the official protobuf descriptorpb types inside unknownproto.
-
-## Bug Fixes
-
-- (x/auth) [#18564](https://github.com/cosmos/cosmos-sdk/pull/18564) Fix total fees calculation when batch signing.
-- (server) [#18537](https://github.com/cosmos/cosmos-sdk/pull/18537) Fix panic when defining minimum gas config as `100stake;100uatom`. Use a `,` delimiter instead of `;`. Fixes the server config getter to use the correct delimiter.
-- [#18531](https://github.com/cosmos/cosmos-sdk/pull/18531) Baseapp's `GetConsensusParams` returns an empty struct instead of panicking if no params are found.
-- (client/tx) [#18472](https://github.com/cosmos/cosmos-sdk/pull/18472) Utilizes the correct Pubkey when simulating a transaction.
-- (baseapp) [#18486](https://github.com/cosmos/cosmos-sdk/pull/18486) Fixed FinalizeBlock calls not being passed to ABCIListeners.
-- (baseapp) [#18627](https://github.com/cosmos/cosmos-sdk/pull/18627) Post handlers are run on non successful transaction executions too.
-- (baseapp) [#18654](https://github.com/cosmos/cosmos-sdk/pull/18654) Fixes an issue in which `gogoproto.Merge` does not work with gogoproto messages with custom types.
-
-
-
-## Features
-
-- (baseapp) [#18071](https://github.com/cosmos/cosmos-sdk/pull/18071) Add hybrid handlers to `MsgServiceRouter`.
-- (server) [#18162](https://github.com/cosmos/cosmos-sdk/pull/18162) Start gRPC & API server in standalone mode.
-- (baseapp & types) [#17712](https://github.com/cosmos/cosmos-sdk/pull/17712) Introduce `PreBlock`, which runs before begin blocker other modules, and allows to modify consensus parameters, and the changes are visible to the following state machine logics. Additionally it can be used for vote extensions.
-- (genutil) [#17571](https://github.com/cosmos/cosmos-sdk/pull/17571) Allow creation of `AppGenesis` without a file lookup.
-- (codec) [#17042](https://github.com/cosmos/cosmos-sdk/pull/17042) Add `CollValueV2` which supports encoding of protov2 messages in collections.
-- (x/gov) [#16976](https://github.com/cosmos/cosmos-sdk/pull/16976) Add `failed_reason` field to `Proposal` under `x/gov` to indicate the reason for a failed proposal. Referenced from [#238](https://github.com/bnb-chain/greenfield-cosmos-sdk/pull/238) under `bnb-chain/greenfield-cosmos-sdk`.
-- (baseapp) [#16898](https://github.com/cosmos/cosmos-sdk/pull/16898) Add `preFinalizeBlockHook` to allow vote extensions persistence.
-- (cli) [#16887](https://github.com/cosmos/cosmos-sdk/pull/16887) Add two new CLI commands: ` tx simulate` for simulating a transaction; ` query block-results` for querying CometBFT RPC for block results.
-- (x/bank) [#16852](https://github.com/cosmos/cosmos-sdk/pull/16852) Add `DenomMetadataByQueryString` query in bank module to support metadata query by query string.
-- (baseapp) [#16581](https://github.com/cosmos/cosmos-sdk/pull/16581) Implement Optimistic Execution as an experimental feature (not enabled by default).
-- (types) [#16257](https://github.com/cosmos/cosmos-sdk/pull/16257) Allow setting the base denom in the denom registry.
-- (baseapp) [#16239](https://github.com/cosmos/cosmos-sdk/pull/16239) Add Gas Limits to allow node operators to resource bound queries.
-- (cli) [#16209](https://github.com/cosmos/cosmos-sdk/pull/16209) Make `StartCmd` more customizable.
-- (types/simulation) [#16074](https://github.com/cosmos/cosmos-sdk/pull/16074) Add generic SimulationStoreDecoder for modules using collections.
-- (genutil) [#16046](https://github.com/cosmos/cosmos-sdk/pull/16046) Add "module-name" flag to genutil `add-genesis-account` to enable initializing module accounts at genesis.* [#15970](https://github.com/cosmos/cosmos-sdk/pull/15970) Enable SIGN_MODE_TEXTUAL.
-- (types) [#15958](https://github.com/cosmos/cosmos-sdk/pull/15958) Add `module.NewBasicManagerFromManager` for creating a basic module manager from a module manager.
-- (types/module) [#15829](https://github.com/cosmos/cosmos-sdk/pull/15829) Add new endblocker interface to handle valset updates.
-- (runtime) [#15818](https://github.com/cosmos/cosmos-sdk/pull/15818) Provide logger through `depinject` instead of appBuilder.
-- (types) [#15735](https://github.com/cosmos/cosmos-sdk/pull/15735) Make `ValidateBasic() error` method of `Msg` interface optional. Modules should validate messages directly in their message handlers ([RFC 001](/sdk/v0.50/learn/advanced/baseapp)).
-- (x/genutil) [#15679](https://github.com/cosmos/cosmos-sdk/pull/15679) Allow applications to specify a custom genesis migration function for the `genesis migrate` command.
-- (telemetry) [#15657](https://github.com/cosmos/cosmos-sdk/pull/15657) Emit more data (go version, sdk version, upgrade height) in prom metrics.
-- (client) [#15597](https://github.com/cosmos/cosmos-sdk/pull/15597) Add status endpoint for clients.
-- (testutil/integration) [#15556](https://github.com/cosmos/cosmos-sdk/pull/15556) Introduce `testutil/integration` package for module integration testing.
-- (runtime) [#15547](https://github.com/cosmos/cosmos-sdk/pull/15547) Allow runtime to pass event core api service to modules.
-- (client) [#15458](https://github.com/cosmos/cosmos-sdk/pull/15458) Add a `CmdContext` field to client.Context initialized to cobra command's context.
-- (x/genutil) [#15301](https://github.com/cosmos/cosmos-sdk/pull/15031) Add application genesis. The genesis is now entirely managed by the application and passed to CometBFT at note instantiation. Functions that were taking a `cmttypes.GenesisDoc{}` now takes a `genutiltypes.AppGenesis{}`.
-- (core) [#15133](https://github.com/cosmos/cosmos-sdk/pull/15133) Implement RegisterServices in the module manager.
-- (x/bank) [#14894](https://github.com/cosmos/cosmos-sdk/pull/14894) Return a human readable denomination for IBC vouchers when querying bank balances. Added a `ResolveDenom` parameter to `types.QueryAllBalancesRequest` and `--resolve-denom` flag to `GetBalancesCmd()`.
-- (core) [#14860](https://github.com/cosmos/cosmos-sdk/pull/14860) Add `Precommit` and `PrepareCheckState` AppModule callbacks.
-- (x/gov) [#14720](https://github.com/cosmos/cosmos-sdk/pull/14720) Upstream expedited proposals from Osmosis.
-- (cli) [#14659](https://github.com/cosmos/cosmos-sdk/pull/14659) Added ability to query blocks by events with queries directly passed to Tendermint, which will allow for full query operator support, e.g. `>`.
-- (x/auth) [#14650](https://github.com/cosmos/cosmos-sdk/pull/14650) Add Textual SignModeHandler. Enable `SIGN_MODE_TEXTUAL` by following the [UPGRADING.mdx](https://github.com/cosmos/cosmos-sdk/blob/main/UPGRADING.mdx) instructions.
-- (x/crisis) [#14588](https://github.com/cosmos/cosmos-sdk/pull/14588) Use CacheContext() in AssertInvariants().
-- (mempool) [#14484](https://github.com/cosmos/cosmos-sdk/pull/14484) Add priority nonce mempool option for transaction replacement.
-- (query) [#14468](https://github.com/cosmos/cosmos-sdk/pull/14468) Implement pagination for collections.
-- (x/gov) [#14373](https://github.com/cosmos/cosmos-sdk/pull/14373) Add new proto field `constitution` of type `string` to gov module genesis state, which allows chain builders to lay a strong foundation by specifying purpose.
-- (client) [#14342](https://github.com/cosmos/cosmos-sdk/pull/14342) Add ` config` command is now a sub-command, for setting, getting and migrating Cosmos SDK configuration files.
-- (x/distribution) [#14322](https://github.com/cosmos/cosmos-sdk/pull/14322) Introduce a new gRPC message handler, `DepositValidatorRewardsPool`, that allows explicit funding of a validator's reward pool.
-- (x/bank) [#14224](https://github.com/cosmos/cosmos-sdk/pull/14224) Allow injection of restrictions on transfers using `AppendSendRestriction` or `PrependSendRestriction`.
-
-## Improvements
-
-- (x/gov) [#18189](https://github.com/cosmos/cosmos-sdk/pull/18189) Limit the accepted deposit coins for a proposal to the minimum proposal deposit denoms.
-- (x/staking) [#18049](https://github.com/cosmos/cosmos-sdk/pull/18049) Return early if Slash encounters zero tokens to burn.
-- (x/staking) [#18035](https://github.com/cosmos/cosmos-sdk/pull/18035) Hoisted out of the redelegation loop, the non-changing validator and delegator addresses parsing.
-- (keyring) [#17913](https://github.com/cosmos/cosmos-sdk/pull/17913) Add `NewAutoCLIKeyring` for creating an AutoCLI keyring from a SDK keyring.
-- (x/consensus) [#18041](https://github.com/cosmos/cosmos-sdk/pull/18041) Let `ToProtoConsensusParams()` return an error.
-- (x/gov) [#17780](https://github.com/cosmos/cosmos-sdk/pull/17780) Recover panics and turn them into errors when executing x/gov proposals.
-- (baseapp) [#17667](https://github.com/cosmos/cosmos-sdk/pull/17667) Close databases opened by SDK in `baseApp.Close()`.
-- (types/module) [#17554](https://github.com/cosmos/cosmos-sdk/pull/17554) Introduce `HasABCIGenesis` which is implemented by a module only when a validatorset update needs to be returned.
-- (cli) [#17389](https://github.com/cosmos/cosmos-sdk/pull/17389) gRPC CometBFT commands have been added under ` q consensus comet`. CometBFT commands placement in the SDK has been simplified. See the exhaustive list below.
-- `client/rpc.StatusCommand()` is now at `server.StatusCommand()`
-- (testutil) [#17216](https://github.com/cosmos/cosmos-sdk/issues/17216) Add `DefaultContextWithKeys` to `testutil` package.
-- (cli) [#17187](https://github.com/cosmos/cosmos-sdk/pull/17187) Do not use `ctx.PrintObjectLegacy` in commands anymore.
-- ` q gov proposer [proposal-id]` now returns a proposal id as int instead of string.
-- (x/staking) [#17164](https://github.com/cosmos/cosmos-sdk/pull/17164) Add `BondedTokensAndPubKeyByConsAddr` to the keeper to enable vote extension verification.
-- (x/group, x/gov) [#17109](https://github.com/cosmos/cosmos-sdk/pull/17109) Let proposal summary be 40x longer than metadata limit.
-- (version) [#17096](https://github.com/cosmos/cosmos-sdk/pull/17096) Improve `getSDKVersion()` to handle module replacements.
-- (types) [#16890](https://github.com/cosmos/cosmos-sdk/pull/16890) Remove `GetTxCmd() *cobra.Command` and `GetQueryCmd() *cobra.Command` from `module.AppModuleBasic` interface.
-- (x/authz) [#16869](https://github.com/cosmos/cosmos-sdk/pull/16869) Improve error message when grant not found.
-- (all) [#16497](https://github.com/cosmos/cosmos-sdk/pull/16497) Removed all exported vestiges of `sdk.MustSortJSON` and `sdk.SortJSON`.
-- (server) [#16238](https://github.com/cosmos/cosmos-sdk/pull/16238) Don't setup p2p node keys if starting a node in GRPC only mode.
-- (cli) [#16206](https://github.com/cosmos/cosmos-sdk/pull/16206) Make ABCI handshake profileable.
-- (types) [#16076](https://github.com/cosmos/cosmos-sdk/pull/16076) Optimize `ChainAnteDecorators`/`ChainPostDecorators` to instantiate the functions once instead of on every invocation of the returned `AnteHandler`/`PostHandler`.
-- (server) [#16071](https://github.com/cosmos/cosmos-sdk/pull/16071) When `mempool.max-txs` is set to a negative value, use a no-op mempool (effectively disable the app mempool).
-- (types/query) [#16041](https://github.com/cosmos/cosmos-sdk/pull/16041) Change pagination max limit to a variable in order to be modified by application devs.
-- (simapp) [#15958](https://github.com/cosmos/cosmos-sdk/pull/15958) Refactor SimApp for removing the global basic manager.
-- (all modules) [#15901](https://github.com/cosmos/cosmos-sdk/issues/15901) All core Cosmos SDK modules query commands have migrated to [AutoCLI](/sdk/v0.53/learn/advanced/autocli), ensuring parity between gRPC and CLI queries.
-- (x/auth) [#15867](https://github.com/cosmos/cosmos-sdk/pull/15867) Support better logging for signature verification failure.
-- (store/cachekv) [#15767](https://github.com/cosmos/cosmos-sdk/pull/15767) Reduce peak RAM usage during and after `InitGenesis`.
-- (x/bank) [#15764](https://github.com/cosmos/cosmos-sdk/pull/15764) Speedup x/bank `InitGenesis`.
-- (x/slashing) [#15580](https://github.com/cosmos/cosmos-sdk/pull/15580) Refactor the validator's missed block signing window to be a chunked bitmap instead of a "logical" bitmap, significantly reducing the storage footprint.
-- (x/gov) [#15554](https://github.com/cosmos/cosmos-sdk/pull/15554) Add proposal result log in `active_proposal` event. When a proposal passes but fails to execute, the proposal result is logged in the `active_proposal` event.
-- (x/consensus) [#15553](https://github.com/cosmos/cosmos-sdk/pull/15553) Migrate consensus module to use collections.
-- (server) [#15358](https://github.com/cosmos/cosmos-sdk/pull/15358) Add `server.InterceptConfigsAndCreateContext` as alternative to `server.InterceptConfigsPreRunHandler` which does not set the server context and the default SDK logger.
-- (mempool) [#15328](https://github.com/cosmos/cosmos-sdk/pull/15328) Improve the `PriorityNonceMempool`:
-- Support generic transaction prioritization, instead of `ctx.Priority()`
-- Improve construction through the use of a single `PriorityNonceMempoolConfig` instead of option functions
-- (x/authz) [#15164](https://github.com/cosmos/cosmos-sdk/pull/15164) Add `MsgCancelUnbondingDelegation` to staking authorization.
-- (server) [#15041](https://github.com/cosmos/cosmos-sdk/pull/15041) Remove unnecessary sleeps from gRPC and API server initiation. The servers will start and accept requests as soon as they're ready.
-- (baseapp) [#15023](https://github.com/cosmos/cosmos-sdk/pull/15023) & [#15213](https://github.com/cosmos/cosmos-sdk/pull/15213) Add `MessageRouter` interface to baseapp and pass it to authz, gov and groups instead of concrete type.
-- [#15011](https://github.com/cosmos/cosmos-sdk/pull/15011) Introduce `cosmossdk.io/log` package to provide a consistent logging interface through the SDK. CometBFT logger is now replaced by `cosmossdk.io/log.Logger`.
-- (x/staking) [#14864](https://github.com/cosmos/cosmos-sdk/pull/14864) ` tx staking create-validator` CLI command now takes a json file as an arg instead of using required flags.
-- (x/auth) [#14758](https://github.com/cosmos/cosmos-sdk/pull/14758) Allow transaction event queries to directly passed to Tendermint, which will allow for full query operator support, e.g. `>`.
-- (x/evidence) [#14757](https://github.com/cosmos/cosmos-sdk/pull/14757) Evidence messages do not need to implement a `.Type()` anymore.
-- (x/auth/tx) [#14751](https://github.com/cosmos/cosmos-sdk/pull/14751) Remove `.Type()` and `Route()` methods from all msgs and `legacytx.LegacyMsg` interface.
-- (cli) [#14659](https://github.com/cosmos/cosmos-sdk/pull/14659) Added ability to query blocks by either height/hash ` q block --type=height|hash `.
-- (x/staking) [#14590](https://github.com/cosmos/cosmos-sdk/pull/14590) Return undelegate amount in MsgUndelegateResponse.
-- [#14529](https://github.com/cosmos/cosmos-sdk/pull/14529) Add new property `BondDenom` to `SimulationState` struct.
-- (store) [#14439](https://github.com/cosmos/cosmos-sdk/pull/14439) Remove global metric gatherer from store.
-- By default store has a no op metric gatherer, the application developer must set another metric gatherer or us the provided one in `store/metrics`.
-- (store) [#14438](https://github.com/cosmos/cosmos-sdk/pull/14438) Pass logger from baseapp to store.
-- (baseapp) [#14417](https://github.com/cosmos/cosmos-sdk/pull/14417) The store package no longer has a dependency on baseapp.
-- (module) [#14415](https://github.com/cosmos/cosmos-sdk/pull/14415) Loosen assertions in SetOrderBeginBlockers() and SetOrderEndBlockers().
-- (store) [#14410](https://github.com/cosmos/cosmos-sdk/pull/14410) `rootmulti.Store.loadVersion` has validation to check if all the module stores' height is correct, it will error if any module store has incorrect height.
-- [#14406](https://github.com/cosmos/cosmos-sdk/issues/14406) Migrate usage of `types/store.go` to `store/types/..`.
-- (context)[#14384](https://github.com/cosmos/cosmos-sdk/pull/14384) Refactor(context): Pass EventManager to the context as an interface.
-- (types) [#14354](https://github.com/cosmos/cosmos-sdk/pull/14354) Improve performance on Context.KVStore and Context.TransientStore by 40%.
-- (crypto/keyring) [#14151](https://github.com/cosmos/cosmos-sdk/pull/14151) Move keys presentation from `crypto/keyring` to `client/keys`
-- (signing) [#14087](https://github.com/cosmos/cosmos-sdk/pull/14087) Add SignModeHandlerWithContext interface with a new `GetSignBytesWithContext` to get the sign bytes using `context.Context` as an argument to access state.
-- (server) [#14062](https://github.com/cosmos/cosmos-sdk/pull/14062) Remove rosetta from server start.
-- (crypto) [#3129](https://github.com/cosmos/cosmos-sdk/pull/3129) New armor and keyring key derivation uses aead and encryption uses chacha20poly.
-
-## State Machine Breaking
-
-- (x/gov) [#18146](https://github.com/cosmos/cosmos-sdk/pull/18146) Add denom check to reject denoms outside of those listed in `MinDeposit`. A new `MinDepositRatio` param is added (with a default value of `0.001`) and now deposits are required to be at least `MinDepositRatio*MinDeposit` to be accepted.
-- (x/group,x/gov) [#16235](https://github.com/cosmos/cosmos-sdk/pull/16235) A group and gov proposal is rejected if the proposal metadata title and summary do not match the proposal title and summary.
-- (baseapp) [#15930](https://github.com/cosmos/cosmos-sdk/pull/15930) change vote info provided by prepare and process proposal to the one in the block.
-- (x/staking) [#15731](https://github.com/cosmos/cosmos-sdk/pull/15731) Introducing a new index to retrieve the delegations by validator efficiently.
-- (x/staking) [#15701](https://github.com/cosmos/cosmos-sdk/pull/15701) The `HistoricalInfoKey` has been updated to use a binary format.
-- (x/slashing) [#15580](https://github.com/cosmos/cosmos-sdk/pull/15580) The validator slashing window now stores "chunked" bitmap entries for each validator's signing window instead of a single boolean entry per signing window index.
-- (x/staking) [#14590](https://github.com/cosmos/cosmos-sdk/pull/14590) `MsgUndelegateResponse` now includes undelegated amount. `x/staking` module's `keeper.Undelegate` now returns 3 values (completionTime,undelegateAmount,error) instead of 2.
-- (x/feegrant) [#14294](https://github.com/cosmos/cosmos-sdk/pull/14294) Moved the logic of rejecting duplicate grant from `msg_server` to `keeper` method.
-
-## API Breaking Changes
-
-- (x/auth) [#17787](https://github.com/cosmos/cosmos-sdk/pull/17787) Remove Tip functionality.
-- (types) `module.EndBlockAppModule` has been replaced by Core API `appmodule.HasEndBlocker` or `module.HasABCIEndBlock` when needing validator updates.
-- (types) `module.BeginBlockAppModule` has been replaced by Core API `appmodule.HasBeginBlocker`.
-- (types) [#17358](https://github.com/cosmos/cosmos-sdk/pull/17358) Remove deprecated `sdk.Handler`, use `baseapp.MsgServiceHandler` instead.
-- (client) [#17197](https://github.com/cosmos/cosmos-sdk/pull/17197) `keys.Commands` does not take a home directory anymore. It is inferred from the root command.
-- (x/staking) [#17157](https://github.com/cosmos/cosmos-sdk/pull/17157) `GetValidatorsByPowerIndexKey` and `ValidateBasic` for historical info takes a validator address codec in order to be able to decode/encode addresses.
-- `GetOperator()` now returns the address as it is represented in state, by default this is an encoded address
-- `GetConsAddr() ([]byte, error)` returns `[]byte` instead of sdk.ConsAddres.
-- `FromABCIEvidence` & `GetConsensusAddress(consAc address.Codec)` now take a consensus address codec to be able to decode the incoming address.
-- (x/distribution) `Delegate` & `SlashValidator` helper function added the mock staking keeper as a parameter passed to the function
-- (x/staking) [#17098](https://github.com/cosmos/cosmos-sdk/pull/17098) `NewMsgCreateValidator`, `NewValidator`, `NewMsgCancelUnbondingDelegation`, `NewMsgUndelegate`, `NewMsgBeginRedelegate`, `NewMsgDelegate` and `NewMsgEditValidator` takes a string instead of `sdk.ValAddress` or `sdk.AccAddress`:
-- `NewRedelegation` and `NewUnbondingDelegation` takes a validatorAddressCodec and a delegatorAddressCodec in order to decode the addresses.
-- `NewRedelegationResponse` takes a string instead of `sdk.ValAddress` or `sdk.AccAddress`.
-- `NewMsgCreateValidator.Validate()` takes an address codec in order to decode the address.
-- `BuildCreateValidatorMsg` takes a ValidatorAddressCodec in order to decode addresses.
-- (x/slashing) [#17098](https://github.com/cosmos/cosmos-sdk/pull/17098) `NewMsgUnjail` takes a string instead of `sdk.ValAddress`
-- (x/genutil) [#17098](https://github.com/cosmos/cosmos-sdk/pull/17098) `GenAppStateFromConfig`, AddGenesisAccountCmd and `GenTxCmd` takes an addresscodec to decode addresses.
-- (x/distribution) [#17098](https://github.com/cosmos/cosmos-sdk/pull/17098) `NewMsgDepositValidatorRewardsPool`, `NewMsgFundCommunityPool`, `NewMsgWithdrawValidatorCommission` and `NewMsgWithdrawDelegatorReward` takes a string instead of `sdk.ValAddress` or `sdk.AccAddress`.
-- (x/staking) [#16959](https://github.com/cosmos/cosmos-sdk/pull/16959) Add validator and consensus address codec as staking keeper arguments.
-- (x/staking) [#16958](https://github.com/cosmos/cosmos-sdk/pull/16958) DelegationI interface `GetDelegatorAddr` & `GetValidatorAddr` have been migrated to return string instead of sdk.AccAddress and sdk.ValAddress respectively. stakingtypes.NewDelegation takes a string instead of sdk.AccAddress and sdk.ValAddress.
-- (testutil) [#16899](https://github.com/cosmos/cosmos-sdk/pull/16899) The *cli testutil* `QueryBalancesExec` has been removed. Use the gRPC or REST query instead.
-- (x/staking) [#16795](https://github.com/cosmos/cosmos-sdk/pull/16795) `DelegationToDelegationResponse`, `DelegationsToDelegationResponses`, `RedelegationsToRedelegationResponses` are no longer exported.
-- (x/auth/vesting) [#16741](https://github.com/cosmos/cosmos-sdk/pull/16741) Vesting account constructor now return an error with the result of their validate function.
-- (x/auth) [#16650](https://github.com/cosmos/cosmos-sdk/pull/16650) The *cli testutil* `QueryAccountExec` has been removed. Use the gRPC or REST query instead.
-- (x/auth) [#16621](https://github.com/cosmos/cosmos-sdk/pull/16621) Pass address codec to auth new keeper constructor.
-- (x/auth) [#16423](https://github.com/cosmos/cosmos-sdk/pull/16423) `helpers.AddGenesisAccount` has been moved to `x/genutil` to remove the cyclic dependency between `x/auth` and `x/genutil`.
-- (baseapp) [#16342](https://github.com/cosmos/cosmos-sdk/pull/16342) NewContext was renamed to NewContextLegacy. The replacement (NewContext) now does not take a header, instead you should set the header via `WithHeaderInfo` or `WithBlockHeight`. Note that `WithBlockHeight` will soon be deprecated and its recommended to use `WithHeaderInfo`.
-- (x/mint) [#16329](https://github.com/cosmos/cosmos-sdk/pull/16329) Use collections for state management:
-- Removed: keeper `GetParams`, `SetParams`, `GetMinter`, `SetMinter`.
-- (x/crisis) [#16328](https://github.com/cosmos/cosmos-sdk/pull/16328) Use collections for state management:
-- Removed: keeper `GetConstantFee`, `SetConstantFee`
-- (x/staking) [#16324](https://github.com/cosmos/cosmos-sdk/pull/16324) `NewKeeper` now takes a `KVStoreService` instead of a `StoreKey`, and methods in the `Keeper` now take a `context.Context` instead of a `sdk.Context` and return an `error`. Notable changes:
-- `Validator` method now returns `types.ErrNoValidatorFound` instead of `nil` when not found.
-- (x/distribution) [#16302](https://github.com/cosmos/cosmos-sdk/pull/16302) Use collections for FeePool state management.
-- Removed: keeper `GetFeePool`, `SetFeePool`, `GetFeePoolCommunityCoins`
-- (types) [#16272](https://github.com/cosmos/cosmos-sdk/pull/16272) `FeeGranter` in the `FeeTx` interface returns `[]byte` instead of `string`.
-- (x/gov) [#16268](https://github.com/cosmos/cosmos-sdk/pull/16268) Use collections for proposal state management (part 2):
-- this finalizes the gov collections migration
-- Removed: types all the key related functions
-- Removed: keeper `InsertActiveProposalsQueue`, `RemoveActiveProposalsQueue`, `InsertInactiveProposalsQueue`, `RemoveInactiveProposalsQueue`, `IterateInactiveProposalsQueue`, `IterateActiveProposalsQueue`, `ActiveProposalsQueueIterator`, `InactiveProposalsQueueIterator`
-- (x/slashing) [#16246](https://github.com/cosmos/cosmos-sdk/issues/16246) `NewKeeper` now takes a `KVStoreService` instead of a `StoreKey`, and methods in the `Keeper` now take a `context.Context` instead of a `sdk.Context` and return an `error`. `GetValidatorSigningInfo` now returns an error instead of a `found bool`, the error can be `nil` (found), `ErrNoSigningInfoFound` (not found) and any other error.
-- (module) [#16227](https://github.com/cosmos/cosmos-sdk/issues/16227) `manager.RunMigrations()` now take a `context.Context` instead of a `sdk.Context`.
-- (x/crisis) [#16216](https://github.com/cosmos/cosmos-sdk/issues/16216) `NewKeeper` now takes a `KVStoreService` instead of a `StoreKey`, methods in the `Keeper` now take a `context.Context` instead of a `sdk.Context` and return an `error` instead of panicking.
-- (x/distribution) [#16211](https://github.com/cosmos/cosmos-sdk/pull/16211) Use collections for params state management.
-- (cli) [#16209](https://github.com/cosmos/cosmos-sdk/pull/16209) Add API `StartCmdWithOptions` to create customized start command.
-- (x/mint) [#16179](https://github.com/cosmos/cosmos-sdk/issues/16179) `NewKeeper` now takes a `KVStoreService` instead of a `StoreKey`, and methods in the `Keeper` now take a `context.Context` instead of a `sdk.Context` and return an `error`.
-- (x/gov) [#16171](https://github.com/cosmos/cosmos-sdk/pull/16171) Use collections for proposal state management (part 1):
-- Removed: keeper: `GetProposal`, `UnmarshalProposal`, `MarshalProposal`, `IterateProposal`, `GetProposal`, `GetProposalFiltered`, `GetProposals`, `GetProposalID`, `SetProposalID`
-- Removed: errors unused errors
-- (x/gov) [#16164](https://github.com/cosmos/cosmos-sdk/pull/16164) Use collections for vote state management:
-- Removed: types `VoteKey`, `VoteKeys`
-- Removed: keeper `IterateVotes`, `IterateAllVotes`, `GetVotes`, `GetVote`, `SetVote`
-- (sims) [#16155](https://github.com/cosmos/cosmos-sdk/pull/16155)
-- `simulation.NewOperationMsg` now marshals the operation msg as proto bytes instead of legacy amino JSON bytes.
-- `simulation.NewOperationMsg` is now 2-arity instead of 3-arity with the obsolete argument `codec.ProtoCodec` removed.
-- The field `OperationMsg.Msg` is now of type `[]byte` instead of `json.RawMessage`.
-- (x/gov) [#16127](https://github.com/cosmos/cosmos-sdk/pull/16127) Use collections for deposit state management:
-- The following methods are removed from the gov keeper: `GetDeposit`, `GetAllDeposits`, `IterateAllDeposits`.
-- The following functions are removed from the gov types: `DepositKey`, `DepositsKey`.
-- (x/gov) [#16118](https://github.com/cosmos/cosmos-sdk/pull/16118/) Use collections for constitution and params state management.
-- (x/gov) [#16106](https://github.com/cosmos/cosmos-sdk/pull/16106) Remove gRPC query methods from gov keeper.
-- (x/*all*) [#16052](https://github.com/cosmos/cosmos-sdk/pull/16062) `GetSignBytes` implementations on messages and global legacy amino codec definitions have been removed from all modules.
-- (sims) [#16052](https://github.com/cosmos/cosmos-sdk/pull/16062) `GetOrGenerate` no longer requires a codec argument is now 4-arity instead of 5-arity.
-- (types/math) [#16040](https://github.com/cosmos/cosmos-sdk/pull/16798) Remove aliases in `types/math.go` (part 2).
-- (types/math) [#16040](https://github.com/cosmos/cosmos-sdk/pull/16040) Remove aliases in `types/math.go` (part 1).
-- (x/auth) [#16016](https://github.com/cosmos/cosmos-sdk/pull/16016) Use collections for accounts state management:
-- removed: keeper `HasAccountByID`, `AccountAddressByID`, `SetParams
-- (x/genutil) [#15999](https://github.com/cosmos/cosmos-sdk/pull/15999) Genutil now takes the `GenesisTxHanlder` interface instead of deliverTx. The interface is implemented on baseapp
-- (x/gov) [#15988](https://github.com/cosmos/cosmos-sdk/issues/15988) `NewKeeper` now takes a `KVStoreService` instead of a `StoreKey`, methods in the `Keeper` now take a `context.Context` instead of a `sdk.Context` and return an `error` (instead of panicking or returning a `found bool`). Iterators callback functions now return an error instead of a `bool`.
-- (x/auth) [#15985](https://github.com/cosmos/cosmos-sdk/pull/15985) The `AccountKeeper` does not expose the `QueryServer` and `MsgServer` APIs anymore.
-- (x/authz) [#15962](https://github.com/cosmos/cosmos-sdk/issues/15962) `NewKeeper` now takes a `KVStoreService` instead of a `StoreKey`, methods in the `Keeper` now take a `context.Context` instead of a `sdk.Context`. The `Authorization` interface's `Accept` method now takes a `context.Context` instead of a `sdk.Context`.
-- (x/distribution) [#15948](https://github.com/cosmos/cosmos-sdk/issues/15948) `NewKeeper` now takes a `KVStoreService` instead of a `StoreKey` and methods in the `Keeper` now take a `context.Context` instead of a `sdk.Context`. Keeper methods also now return an `error`.
-- (x/bank) [#15891](https://github.com/cosmos/cosmos-sdk/issues/15891) `NewKeeper` now takes a `KVStoreService` instead of a `StoreKey` and methods in the `Keeper` now take a `context.Context` instead of a `sdk.Context`. Also `FundAccount` and `FundModuleAccount` from the `testutil` package accept a `context.Context` instead of a `sdk.Context`, and it's position was moved to the first place.
-- (x/slashing) [#15875](https://github.com/cosmos/cosmos-sdk/pull/15875) `x/slashing.NewAppModule` now requires an `InterfaceRegistry` parameter.
-- (x/crisis) [#15852](https://github.com/cosmos/cosmos-sdk/pull/15852) Crisis keeper now takes a instance of the address codec to be able to decode user addresses
-- (x/auth) [#15822](https://github.com/cosmos/cosmos-sdk/pull/15822) The type of struct field `ante.HandlerOptions.SignModeHandler` has been changed to `x/tx/signing.HandlerMap`.
-- (client) [#15822](https://github.com/cosmos/cosmos-sdk/pull/15822) The return type of the interface method `TxConfig.SignModeHandler` has been changed to `x/tx/signing.HandlerMap`.
-- The signature of `VerifySignature` has been changed to accept a `x/tx/signing.HandlerMap` and other structs from `x/tx` as arguments.
-- The signature of `NewTxConfigWithTextual` has been deprecated and its signature changed to accept a `SignModeOptions`.
-- The signature of `NewSigVerificationDecorator` has been changed to accept a `x/tx/signing.HandlerMap`.
-- (x/bank) [#15818](https://github.com/cosmos/cosmos-sdk/issues/15818) `BaseViewKeeper`'s `Logger` method now doesn't require a context. `NewBaseKeeper`, `NewBaseSendKeeper` and `NewBaseViewKeeper` now also require a `log.Logger` to be passed in.
-- (x/genutil) [#15679](https://github.com/cosmos/cosmos-sdk/pull/15679) `MigrateGenesisCmd` now takes a `MigrationMap` instead of having the SDK genesis migration hardcoded.
-- (client) [#15673](https://github.com/cosmos/cosmos-sdk/pull/15673) Move `client/keys.OutputFormatJSON` and `client/keys.OutputFormatText` to `client/flags` package.
-- (x/*all*) [#15648](https://github.com/cosmos/cosmos-sdk/issues/15648) Make `SetParams` consistent across all modules and validate the params at the message handling instead of `SetParams` method.
-- (codec) [#15600](https://github.com/cosmos/cosmos-sdk/pull/15600) [#15873](https://github.com/cosmos/cosmos-sdk/pull/15873) add support for getting signers to `codec.Codec` and `InterfaceRegistry`:
-- `InterfaceRegistry` is has unexported methods and implements `protodesc.Resolver` plus the `RangeFiles` and `SigningContext` methods. All implementations of `InterfaceRegistry` by other users must now embed the official implementation.
-- `Codec` has new methods `InterfaceRegistry`, `GetMsgAnySigners`, `GetMsgV1Signers`, and `GetMsgV2Signers` as well as unexported methods. All implementations of `Codec` by other users must now embed an official implementation from the `codec` package.
-- `AminoCodec` is marked as deprecated and no longer implements `Codec.
-- (client) [#15597](https://github.com/cosmos/cosmos-sdk/pull/15597) `RegisterNodeService` now requires a config parameter.
-- (x/nft) [#15588](https://github.com/cosmos/cosmos-sdk/pull/15588) `NewKeeper` now takes a `KVStoreService` instead of a `StoreKey` and methods in the `Keeper` now take a `context.Context` instead of a `sdk.Context`.
-- (baseapp) [#15568](https://github.com/cosmos/cosmos-sdk/pull/15568) `SetIAVLLazyLoading` is removed from baseapp.
-- (x/genutil) [#15567](https://github.com/cosmos/cosmos-sdk/pull/15567) `CollectGenTxsCmd` & `GenTxCmd` takes a address.Codec to be able to decode addresses.
-- (x/bank) [#15567](https://github.com/cosmos/cosmos-sdk/pull/15567) `GenesisBalance.GetAddress` now returns a string instead of `sdk.AccAddress`
-- `MsgSendExec` test helper function now takes a address.Codec
-- (x/auth) [#15520](https://github.com/cosmos/cosmos-sdk/pull/15520) `NewAccountKeeper` now takes a `KVStoreService` instead of a `StoreKey` and methods in the `Keeper` now take a `context.Context` instead of a `sdk.Context`.
-- (baseapp) [#15519](https://github.com/cosmos/cosmos-sdk/pull/15519/files) `runTxMode`s were renamed to `execMode`. `ModeDeliver` as changed to `ModeFinalize` and a new `ModeVoteExtension` was added for vote extensions.
-- (baseapp) [#15519](https://github.com/cosmos/cosmos-sdk/pull/15519/files) Writing of state to the multistore was moved to `FinalizeBlock`. `Commit` still handles the committing values to disk.
-- (baseapp) [#15519](https://github.com/cosmos/cosmos-sdk/pull/15519/files) Calls to BeginBlock and EndBlock have been replaced with core api beginblock & endblock.
-- (baseapp) [#15519](https://github.com/cosmos/cosmos-sdk/pull/15519/files) BeginBlock and EndBlock are now internal to baseapp. For testing, user must call `FinalizeBlock`. BeginBlock and EndBlock calls are internal to Baseapp.
-- (baseapp) [#15519](https://github.com/cosmos/cosmos-sdk/pull/15519/files) All calls to ABCI methods now accept a pointer of the abci request and response types
-- (x/consensus) [#15517](https://github.com/cosmos/cosmos-sdk/pull/15517) `NewKeeper` now takes a `KVStoreService` instead of a `StoreKey`.
-- (x/bank) [#15477](https://github.com/cosmos/cosmos-sdk/pull/15477) `banktypes.NewMsgMultiSend` and `keeper.InputOutputCoins` only accept one input.
-- (server) [#15358](https://github.com/cosmos/cosmos-sdk/pull/15358) Remove `server.ErrorCode` that was not used anywhere.
-- (x/capability) [#15344](https://github.com/cosmos/cosmos-sdk/pull/15344) Capability module was removed and is now housed in [IBC-GO](https://github.com/cosmos/ibc-go).
-- (mempool) [#15328](https://github.com/cosmos/cosmos-sdk/pull/15328) The `PriorityNonceMempool` is now generic over type `C comparable` and takes a single `PriorityNonceMempoolConfig[C]` argument. See `DefaultPriorityNonceMempoolConfig` for how to construct the configuration and a `TxPriority` type.
-- [#15299](https://github.com/cosmos/cosmos-sdk/pull/15299) Remove `StdTx` transaction and signing APIs. No SDK version has actually supported `StdTx` since before Stargate.
-- [#15284](https://github.com/cosmos/cosmos-sdk/pull/15284)
-- (x/gov) [#15284](https://github.com/cosmos/cosmos-sdk/pull/15284) `NewKeeper` now requires `codec.Codec`.
-- (x/authx) [#15284](https://github.com/cosmos/cosmos-sdk/pull/15284) `NewKeeper` now requires `codec.Codec`.
-- `types/tx.Tx` no longer implements `sdk.Tx`.
-- `sdk.Tx` now requires a new method `GetMsgsV2()`.
-- `sdk.Msg.GetSigners` was deprecated and is no longer supported. Use the `cosmos.msg.v1.signer` protobuf annotation instead.
-- `TxConfig` has a new method `SigningContext() *signing.Context`.
-- `SigVerifiableTx.GetSigners()` now returns `([][]byte, error)` instead of `[]sdk.AccAddress`.
-- `AccountKeeper` now has an `AddressCodec() address.Codec` method and the expected `AccountKeeper` for `x/auth/ante` expects this method.
-- [#15211](https://github.com/cosmos/cosmos-sdk/pull/15211) Remove usage of `github.com/cometbft/cometbft/libs/bytes.HexBytes` in favor of `[]byte` thorough the SDK.
-- (crypto) [#15070](https://github.com/cosmos/cosmos-sdk/pull/15070) `GenerateFromPassword` and `Cost` from `bcrypt.go` now take a `uint32` instead of a `int` type.
-- (types) [#15067](https://github.com/cosmos/cosmos-sdk/pull/15067) Remove deprecated alias from `types/errors`. Use `cosmossdk.io/errors` instead.
-- (server) [#15041](https://github.com/cosmos/cosmos-sdk/pull/15041) Refactor how gRPC and API servers are started to remove unnecessary sleeps:
-- `api.Server#Start` now accepts a `context.Context`. The caller is responsible for ensuring that the context is canceled such that the API server can gracefully exit. The caller does not need to stop the server.
-- To start the gRPC server you must first create the server via `NewGRPCServer`, after which you can start the gRPC server via `StartGRPCServer` which accepts a `context.Context`. The caller is responsible for ensuring that the context is canceled such that the gRPC server can gracefully exit. The caller does not need to stop the server.
-- Rename `WaitForQuitSignals` to `ListenForQuitSignals`. Note, this function is no longer blocking. Thus the caller is expected to provide a `context.CancelFunc` which indicates that when a signal is caught, that any spawned processes can gracefully exit.
-- Remove `ServerStartTime` constant.
-- [#15011](https://github.com/cosmos/cosmos-sdk/pull/15011) All functions that were taking a CometBFT logger, now take `cosmossdk.io/log.Logger` instead.
-- (simapp) [#14977](https://github.com/cosmos/cosmos-sdk/pull/14977) Move simulation helpers functions (`AppStateFn` and `AppStateRandomizedFn`) to `testutil/sims`. These takes an extra genesisState argument which is the default state of the app.
-- (x/bank) [#14894](https://github.com/cosmos/cosmos-sdk/pull/14894) Allow a human readable denomination for coins when querying bank balances. Added a `ResolveDenom` parameter to `types.QueryAllBalancesRequest`.
-- [#14847](https://github.com/cosmos/cosmos-sdk/pull/14847) App and ModuleManager methods `InitGenesis`, `ExportGenesis`, `BeginBlock` and `EndBlock` now also return an error.
-- (x/upgrade) [#14764](https://github.com/cosmos/cosmos-sdk/pull/14764) The `x/upgrade` module is extracted to have a separate go.mod file which allows it to be a standalone module.
-- (x/auth) [#14758](https://github.com/cosmos/cosmos-sdk/pull/14758) Refactor transaction searching:
-- Refactor `QueryTxsByEvents` to accept a `query` of type `string` instead of `events` of type `[]string`
-- Refactor CLI methods to accept `--query` flag instead of `--events`
-- Pass `prove=false` to Tendermint's `TxSearch` RPC method
-- (simulation) [#14751](https://github.com/cosmos/cosmos-sdk/pull/14751) Remove the `MsgType` field from `simulation.OperationInput` struct.
-- (store) [#14746](https://github.com/cosmos/cosmos-sdk/pull/14746) Extract Store in its own go.mod and rename the package to `cosmossdk.io/store`.
-- (x/nft) [#14725](https://github.com/cosmos/cosmos-sdk/pull/14725) Extract NFT in its own go.mod and rename the package to `cosmossdk.io/x/nft`.
-- (x/gov) [#14720](https://github.com/cosmos/cosmos-sdk/pull/14720) Add an expedited field in the gov v1 proposal and `MsgNewMsgProposal`.
-- (x/feegrant) [#14649](https://github.com/cosmos/cosmos-sdk/pull/14649) Extract Feegrant in its own go.mod and rename the package to `cosmossdk.io/x/feegrant`.
-- (tx) [#14634](https://github.com/cosmos/cosmos-sdk/pull/14634) Move the `tx` go module to `x/tx`.
-- (store/streaming)[#14603](https://github.com/cosmos/cosmos-sdk/pull/14603) `StoreDecoderRegistry` moved from store to `types/simulations` this breaks the `AppModuleSimulation` interface.
-- (snapshots) [#14597](https://github.com/cosmos/cosmos-sdk/pull/14597) Move `snapshots` to `store/snapshots`, rename and bump proto package to v1.
-- (x/staking) [#14590](https://github.com/cosmos/cosmos-sdk/pull/14590) `MsgUndelegateResponse` now includes undelegated amount. `x/staking` module's `keeper.Undelegate` now returns 3 values (completionTime,undelegateAmount,error) instead of 2.
-- (crypto/keyring) [#14151](https://github.com/cosmos/cosmos-sdk/pull/14151) Move keys presentation from `crypto/keyring` to `client/keys`
-- (baseapp) [#14050](https://github.com/cosmos/cosmos-sdk/pull/14050) Refactor `ABCIListener` interface to accept Go contexts.
-- (x/auth) [#13850](https://github.com/cosmos/cosmos-sdk/pull/13850/) Remove `MarshalYAML` methods from module (`x/...`) types.
-- (modules) [#13850](https://github.com/cosmos/cosmos-sdk/pull/13850) and [#14046](https://github.com/cosmos/cosmos-sdk/pull/14046) Remove gogoproto stringer annotations. This removes the custom `String()` methods on all types that were using the annotations.
-- (x/evidence) [14724](https://github.com/cosmos/cosmos-sdk/pull/14724) Extract Evidence in its own go.mod and rename the package to `cosmossdk.io/x/evidence`.
-- (crypto/keyring) [#13734](https://github.com/cosmos/cosmos-sdk/pull/13834) The keyring's `Sign` method now takes a new `signMode` argument. It is only used if the signing key is a Ledger hardware device. You can set it to 0 in all other cases.
-- (snapshots) [14048](https://github.com/cosmos/cosmos-sdk/pull/14048) Move the Snapshot package to the store package. This is done in an effort group all storage related logic under one package.
-- (signing) [#13701](https://github.com/cosmos/cosmos-sdk/pull/) Add `context.Context` as an argument `x/auth/signing.VerifySignature`.
-- (store) [#11825](https://github.com/cosmos/cosmos-sdk/pull/11825) Make extension snapshotter interface safer to use, renamed the util function `WriteExtensionItem` to `WriteExtensionPayload`.
-
-## Client Breaking Changes
-
-- (x/gov) [#17910](https://github.com/cosmos/cosmos-sdk/pull/17910) Remove telemetry for counting votes and proposals. It was incorrectly counting votes. Use alternatives, such as state streaming.
-- (abci) [#15845](https://github.com/cosmos/cosmos-sdk/pull/15845) Remove duplicating events in `logs`.
-- (abci) [#15845](https://github.com/cosmos/cosmos-sdk/pull/15845) Add `msg_index` to all event attributes to associate events and messages.
-- (x/staking) [#15701](https://github.com/cosmos/cosmos-sdk/pull/15701) `HistoricalInfoKey` now has a binary format.
-- (store/streaming) [#15519](https://github.com/cosmos/cosmos-sdk/pull/15519/files) State Streaming removed emitting of beginblock, endblock and delivertx in favour of emitting FinalizeBlock.
-- (baseapp) [#15519](https://github.com/cosmos/cosmos-sdk/pull/15519/files) BeginBlock & EndBlock events have begin or endblock in the events in order to identify which stage they are emitted from since they are returned to comet as FinalizeBlock events.
-- (grpc-web) [#14652](https://github.com/cosmos/cosmos-sdk/pull/14652) Use same port for gRPC-Web and the API server.
-
-## CLI Breaking Changes
-
-- (all) The migration of modules to [AutoCLI](/sdk/v0.53/learn/advanced/autocli) led to no changes in UX but a [small change in CLI outputs](https://github.com/cosmos/cosmos-sdk/issues/16651) where results can be nested.
-- (all) Query pagination flags have been renamed with the migration to AutoCLI:
-- `--reverse` -> `--page-reverse`
-- `--offset` -> `--page-offset`
-- `--limit` -> `--page-limit`
-- `--count-total` -> `--page-count-total`
-- (cli) [#17184](https://github.com/cosmos/cosmos-sdk/pull/17184) All json keys returned by the `status` command are now snake case instead of pascal case.
-- (server) [#17177](https://github.com/cosmos/cosmos-sdk/pull/17177) Remove `iavl-lazy-loading` configuration.
-- (x/gov) [#16987](https://github.com/cosmos/cosmos-sdk/pull/16987) In ` query gov proposals` the proposal status flag have renamed from `--status` to `--proposal-status`. Additionally, that flags now uses the ENUM values: `PROPOSAL_STATUS_DEPOSIT_PERIOD`, `PROPOSAL_STATUS_VOTING_PERIOD`, `PROPOSAL_STATUS_PASSED`, `PROPOSAL_STATUS_REJECTED`, `PROPOSAL_STATUS_FAILED`.
-- (x/bank) [#16899](https://github.com/cosmos/cosmos-sdk/pull/16899) With the migration to AutoCLI some bank commands have been split in two:
-- Use `total-supply` (or `total`) for querying the total supply and `total-supply-of` for querying the supply of a specific denom.
-- Use `denoms-metadata` for querying all denom metadata and `denom-metadata` for querying a specific denom metadata.
-- (rosetta) [#16276](https://github.com/cosmos/cosmos-sdk/issues/16276) Rosetta migration to standalone repo.
-- (cli) [#15826](https://github.com/cosmos/cosmos-sdk/pull/15826) Remove ` q account` command. Use ` q auth account` instead.
-- (cli) [#15299](https://github.com/cosmos/cosmos-sdk/pull/15299) Remove `--amino` flag from `sign` and `multi-sign` commands. Amino `StdTx` has been deprecated for a while. Amino JSON signing still works as expected.
-- (x/gov) [#14880](https://github.com/cosmos/cosmos-sdk/pull/14880) Remove ` tx gov submit-legacy-proposal cancel-software-upgrade` and `software-upgrade` commands. These commands are now in the `x/upgrade` module and using gov v1. Use `tx upgrade software-upgrade` instead.
-- (x/staking) [#14864](https://github.com/cosmos/cosmos-sdk/pull/14864) ` tx staking create-validator` CLI command now takes a json file as an arg instead of using required flags.
-- (cli) [#14659](https://github.com/cosmos/cosmos-sdk/pull/14659) ` q block ` is removed as it just output json. The new command allows either height/hash and is ` q block --type=height|hash `.
-- (grpc-web) [#14652](https://github.com/cosmos/cosmos-sdk/pull/14652) Remove `grpc-web.address` flag.
-- (client) [#14342](https://github.com/cosmos/cosmos-sdk/pull/14342) ` config` command is now a sub-command using Confix. Use ` config --help` to learn more.
-
-## Bug Fixes
-
-- (server) [#18254](https://github.com/cosmos/cosmos-sdk/pull/18254) Don't hardcode gRPC address to localhost.
-- (x/gov) [#18173](https://github.com/cosmos/cosmos-sdk/pull/18173) Gov hooks now return an error and are *blocking* when they fail. Expect for `AfterProposalFailedMinDeposit` and `AfterProposalVotingPeriodEnded` which log the error and continue.
-- (x/gov) [#17873](https://github.com/cosmos/cosmos-sdk/pull/17873) Fail any inactive and active proposals that cannot be decoded.
-- (x/slashing) [#18016](https://github.com/cosmos/cosmos-sdk/pull/18016) Fixed builder function for missed blocks key (`validatorMissedBlockBitArrayPrefixKey`) in slashing/migration/v4.
-- (x/bank) [#18107](https://github.com/cosmos/cosmos-sdk/pull/18107) Add missing keypair of SendEnabled to restore legacy param set before migration.
-- (baseapp) [#17769](https://github.com/cosmos/cosmos-sdk/pull/17769) Ensure we respect block size constraints in the `DefaultProposalHandler`'s `PrepareProposal` handler when a nil or no-op mempool is used. We provide a `TxSelector` type to assist in making transaction selection generalized. We also fix a comparison bug in tx selection when `req.maxTxBytes` is reached.
-- (mempool) [#17668](https://github.com/cosmos/cosmos-sdk/pull/17668) Fix `PriorityNonceIterator.Next()` nil pointer ref for min priority at the end of iteration.
-- (config) [#17649](https://github.com/cosmos/cosmos-sdk/pull/17649) Fix `mempool.max-txs` configuration is invalid in `app.config`.
-- (baseapp) [#17518](https://github.com/cosmos/cosmos-sdk/pull/17518) Utilizing voting power from vote extensions (CometBFT) instead of the current bonded tokens (x/staking) to determine if a set of vote extensions are valid.
-- (baseapp) [#17251](https://github.com/cosmos/cosmos-sdk/pull/17251) VerifyVoteExtensions and ExtendVote initialize their own contexts/states, allowing VerifyVoteExtensions being called without ExtendVote.
-- (x/distribution) [#17236](https://github.com/cosmos/cosmos-sdk/pull/17236) Using "validateCommunityTax" in "Params.ValidateBasic", preventing panic when field "CommunityTax" is nil.
-- (x/bank) [#17170](https://github.com/cosmos/cosmos-sdk/pull/17170) Avoid empty spendable error message on send coins.
-- (x/group) [#17146](https://github.com/cosmos/cosmos-sdk/pull/17146) Rename x/group legacy ORM package's error codespace from "orm" to "legacy_orm", preventing collisions with ORM's error codespace "orm".
-- (types/query) [#16905](https://github.com/cosmos/cosmos-sdk/pull/16905) Collections Pagination now applies proper count when filtering results.
-- (x/bank) [#16841](https://github.com/cosmos/cosmos-sdk/pull/16841) Correctly process legacy `DenomAddressIndex` values.
-- (x/auth/vesting) [#16733](https://github.com/cosmos/cosmos-sdk/pull/16733) Panic on overflowing and negative EndTimes when creating a PeriodicVestingAccount.
-- (x/consensus) [#16713](https://github.com/cosmos/cosmos-sdk/pull/16713) Add missing ABCI param in `MsgUpdateParams`.
-- (baseapp) [#16700](https://github.com/cosmos/cosmos-sdk/pull/16700) Fix consensus failure in returning no response to malformed transactions.
-- [#16639](https://github.com/cosmos/cosmos-sdk/pull/16639) Make sure we don't execute blocks beyond the halt height.
-- (baseapp) [#16613](https://github.com/cosmos/cosmos-sdk/pull/16613) Ensure each message in a transaction has a registered handler, otherwise `CheckTx` will fail.
-- (baseapp) [#16596](https://github.com/cosmos/cosmos-sdk/pull/16596) Return error during `ExtendVote` and `VerifyVoteExtension` if the request height is earlier than `VoteExtensionsEnableHeight`.
-- (baseapp) [#16259](https://github.com/cosmos/cosmos-sdk/pull/16259) Ensure the `Context` block height is correct after `InitChain` and prior to the second block.
-- (x/gov) [#16231](https://github.com/cosmos/cosmos-sdk/pull/16231) Fix Rawlog JSON formatting of proposal_vote option field.* (cli) [#16138](https://github.com/cosmos/cosmos-sdk/pull/16138) Fix snapshot commands panic if snapshot don't exists.
-- (x/staking) [#16043](https://github.com/cosmos/cosmos-sdk/pull/16043) Call `AfterUnbondingInitiated` hook for new unbonding entries only and fix `UnbondingDelegation` entries handling. This is a behavior change compared to Cosmos SDK v0.47.x, now the hook is called only for new unbonding entries.
-- (types) [#16010](https://github.com/cosmos/cosmos-sdk/pull/16010) Let `module.CoreAppModuleBasicAdaptor` fallback to legacy genesis handling.
-- (types) [#15691](https://github.com/cosmos/cosmos-sdk/pull/15691) Make `Coin.Validate()` check that `.Amount` is not nil.
-- (x/crypto) [#15258](https://github.com/cosmos/cosmos-sdk/pull/15258) Write keyhash file with permissions 0600 instead of 0555.
-- (x/auth) [#15059](https://github.com/cosmos/cosmos-sdk/pull/15059) `ante.CountSubKeys` returns 0 when passing a nil `Pubkey`.
-- (x/capability) [#15030](https://github.com/cosmos/cosmos-sdk/pull/15030) Prevent `x/capability` from consuming `GasMeter` gas during `InitMemStore`
-- (types/coin) [#14739](https://github.com/cosmos/cosmos-sdk/pull/14739) Deprecate the method `Coin.IsEqual` in favour of `Coin.Equal`. The difference between the two methods is that the first one results in a panic when denoms are not equal. This panic lead to unexpected behavior.
-
-## Deprecated
-
-- (types) [#16980](https://github.com/cosmos/cosmos-sdk/pull/16980) Deprecate `IntProto` and `DecProto`. Instead, `math.Int` and `math.LegacyDec` should be used respectively. Both types support `Marshal` and `Unmarshal` for binary serialization.
-- (x/staking) [#14567](https://github.com/cosmos/cosmos-sdk/pull/14567) The `delegator_address` field of `MsgCreateValidator` has been deprecated.
-
\ No newline at end of file
diff --git a/sdk/next/coming-soon.mdx b/sdk/next/coming-soon.mdx
deleted file mode 100644
index 08708a407..000000000
--- a/sdk/next/coming-soon.mdx
+++ /dev/null
@@ -1,3 +0,0 @@
----
-title: Coming Soon
----
\ No newline at end of file
diff --git a/sdk/next/learn.mdx b/sdk/next/learn.mdx
new file mode 100644
index 000000000..6febd0410
--- /dev/null
+++ b/sdk/next/learn.mdx
@@ -0,0 +1,27 @@
+---
+title: "Cosmos SDK Documentation"
+description: "Version: v0.53"
+---
+
+The Cosmos SDK is the most widely adopted, battle-tested Layer 1 blockchain stack, trusted by 200+ chains live in production. This modular framework enables you to build secure, high-performance blockchains with comprehensive guides covering everything from core concepts to advanced implementation patterns.
+
+
+
+ Understand the fundamentals of Cosmos SDK, application-specific blockchains, and the SDK's architecture.
+
+
+ Learn essential concepts including application anatomy, transaction lifecycles, accounts, and gas mechanics.
+
+
+ Explore core components, security models, execution patterns, and advanced architectural concepts.
+
+
+ Create blockchain applications with guides on app development, ABCI, testing, and SDK packages.
+
+
+ Develop custom modules with comprehensive guides on module architecture, message handling, and state management.
+
+
+ Set up, configure, and maintain nodes from local development environments to production deployments.
+
+
diff --git a/sdk/next/learn/advanced/autocli.mdx b/sdk/next/learn/advanced/autocli.mdx
new file mode 100644
index 000000000..2aa5902bb
--- /dev/null
+++ b/sdk/next/learn/advanced/autocli.mdx
@@ -0,0 +1,730 @@
+---
+title: AutoCLI
+---
+
+
+**Synopsis**
+This document details how to build CLI and REST interfaces for a module. Examples from various Cosmos SDK modules are included.
+
+
+
+**Prerequisite Readings**
+
+* [CLI](/sdk/v0.53/learn/advanced/cli)
+
+
+
+The `autocli` (also known as `client/v2`) package is a [Go library](https://pkg.go.dev/cosmossdk.io/client/v2/autocli) for generating CLI (command line interface) interfaces for Cosmos SDK-based applications. It provides a simple way to add CLI commands to your application by generating them automatically based on your gRPC service definitions. Autocli generates CLI commands and flags directly from your protobuf messages, including options, input parameters, and output parameters. This means that you can easily add a CLI interface to your application without having to manually create and manage commands.
+
+## Overview
+
+`autocli` generates CLI commands and flags for each method defined in your gRPC service. By default, it generates commands for each gRPC services. The commands are named based on the name of the service method.
+
+For example, given the following protobuf definition for a service:
+
+```protobuf
+service MyService {
+ rpc MyMethod(MyRequest) returns (MyResponse) {}
+}
+```
+
+For instance, `autocli` would generate a command named `my-method` for the `MyMethod` method. The command will have flags for each field in the `MyRequest` message.
+
+It is possible to customize the generation of transactions and queries by defining options for each service.
+
+## Application Wiring
+
+Here are the steps to use AutoCLI:
+
+1. Ensure your app's modules implements the `appmodule.AppModule` interface.
+2. (optional) Configure how behave `autocli` command generation, by implementing the `func (am AppModule) AutoCLIOptions() *autocliv1.ModuleOptions` method on the module.
+3. Use the `autocli.AppOptions` struct to specify the modules you defined. If you are using `depinject`, it can automatically create an instance of `autocli.AppOptions` based on your app's configuration.
+4. Use the `EnhanceRootCommand()` method provided by `autocli` to add the CLI commands for the specified modules to your root command.
+
+
+AutoCLI is additive only, meaning *enhancing* the root command will only add subcommands that are not already registered. This means that you can use AutoCLI alongside other custom commands within your app.
+
+
+Here's an example of how to use `autocli` in your app:
+
+```go expandable
+// Define your app's modules
+ testModules := map[string]appmodule.AppModule{
+ "testModule": &TestModule{
+},
+}
+
+// Define the autocli AppOptions
+ autoCliOpts := autocli.AppOptions{
+ Modules: testModules,
+}
+
+// Create the root command
+ rootCmd := &cobra.Command{
+ Use: "app",
+}
+ if err := appOptions.EnhanceRootCommand(rootCmd); err != nil {
+ return err
+}
+
+// Run the root command
+ if err := rootCmd.Execute(); err != nil {
+ return err
+}
+```
+
+### Keyring
+
+`autocli` uses a keyring for key name resolving names and signing transactions.
+
+
+AutoCLI provides a better UX than normal CLI as it allows to resolve key names directly from the keyring in all transactions and commands.
+
+```sh
+ q bank balances alice
+ tx bank send alice bob 1000denom
+```
+
+
+
+The keyring used for resolving names and signing transactions is provided via the `client.Context`.
+The keyring is then converted to the `client/v2/autocli/keyring` interface.
+If no keyring is provided, the `autocli` generated command will not be able to sign transactions, but will still be able to query the chain.
+
+
+The Cosmos SDK keyring and Hubl keyring both implement the `client/v2/autocli/keyring` interface, thanks to the following wrapper:
+
+```go
+keyring.NewAutoCLIKeyring(kb)
+```
+
+
+
+## Signing
+
+`autocli` supports signing transactions with the keyring.
+The [`cosmos.msg.v1.signer` protobuf annotation](/sdk/v0.53/build/building-modules/protobuf-annotations) defines the signer field of the message.
+This field is automatically filled when using the `--from` flag or defining the signer as a positional argument.
+
+
+AutoCLI currently supports only one signer per transaction.
+
+
+## Module wiring & Customization
+
+The `AutoCLIOptions()` method on your module allows to specify custom commands, sub-commands or flags for each service, as it was a `cobra.Command` instance, within the `RpcCommandOptions` struct. Defining such options will customize the behavior of the `autocli` command generation, which by default generates a command for each method in your gRPC service.
+
+```go
+*autocliv1.RpcCommandOptions{
+ RpcMethod: "Params", // The name of the gRPC service
+ Use: "params", // Command usage that is displayed in the help
+ Short: "Query the parameters of the governance process", // Short description of the command
+ Long: "Query the parameters of the governance process. Specify specific param types (voting|tallying|deposit)
+
+to filter results.", // Long description of the command
+ PositionalArgs: []*autocliv1.PositionalArgDescriptor{
+ {
+ ProtoField: "params_type",
+ Optional: true
+}, // Transform a flag into a positional argument
+},
+}
+```
+
+
+AutoCLI can create a gov proposal of any tx by simply setting the `GovProposal` field to `true` in the `autocli.RpcCommandOptions` struct.
+Users can however use the `--no-proposal` flag to disable the proposal creation (which is useful if the authority isn't the gov module on a chain).
+
+
+### Specifying Subcommands
+
+By default, `autocli` generates a command for each method in your gRPC service. However, you can specify subcommands to group related commands together. To specify subcommands, use the `autocliv1.ServiceCommandDescriptor` struct.
+
+This example shows how to use the `autocliv1.ServiceCommandDescriptor` struct to group related commands together and specify subcommands in your gRPC service by defining an instance of `autocliv1.ModuleOptions` in your `autocli.go`.
+
+```go expandable
+package gov
+
+import (
+
+ "fmt"
+
+ autocliv1 "cosmossdk.io/api/cosmos/autocli/v1"
+ govv1 "cosmossdk.io/api/cosmos/gov/v1"
+ govv1beta1 "cosmossdk.io/api/cosmos/gov/v1beta1"
+ "github.com/cosmos/cosmos-sdk/version"
+)
+
+// AutoCLIOptions implements the autocli.HasAutoCLIConfig interface.
+func (am AppModule)
+
+AutoCLIOptions() *autocliv1.ModuleOptions {
+ return &autocliv1.ModuleOptions{
+ Query: &autocliv1.ServiceCommandDescriptor{
+ Service: govv1.Query_ServiceDesc.ServiceName,
+ RpcCommandOptions: []*autocliv1.RpcCommandOptions{
+ {
+ RpcMethod: "Params",
+ Use: "params",
+ Short: "Query the parameters of the governance process",
+ Long: "Query the parameters of the governance process. Specify specific param types (voting|tallying|deposit)
+
+to filter results.",
+ PositionalArgs: []*autocliv1.PositionalArgDescriptor{
+ {
+ ProtoField: "params_type",
+ Optional: true
+},
+},
+},
+ {
+ RpcMethod: "Proposals",
+ Use: "proposals",
+ Short: "Query proposals with optional filters",
+ Example: fmt.Sprintf("%[1]s query gov proposals --depositor cosmos1...\n%[1]s query gov proposals --voter cosmos1...\n%[1]s query gov proposals --proposal-status (PROPOSAL_STATUS_DEPOSIT_PERIOD|PROPOSAL_STATUS_VOTING_PERIOD|PROPOSAL_STATUS_PASSED|PROPOSAL_STATUS_REJECTED|PROPOSAL_STATUS_FAILED)", version.AppName),
+},
+ {
+ RpcMethod: "Proposal",
+ Use: "proposal [proposal-id]",
+ Short: "Query details of a single proposal",
+ Example: fmt.Sprintf("%s query gov proposal 1", version.AppName),
+ PositionalArgs: []*autocliv1.PositionalArgDescriptor{
+ {
+ ProtoField: "proposal_id"
+},
+},
+},
+ {
+ RpcMethod: "Vote",
+ Use: "vote [proposal-id] [voter-addr]",
+ Short: "Query details of a single vote",
+ Example: fmt.Sprintf("%s query gov vote 1 cosmos1...", version.AppName),
+ PositionalArgs: []*autocliv1.PositionalArgDescriptor{
+ {
+ ProtoField: "proposal_id"
+},
+ {
+ ProtoField: "voter"
+},
+},
+},
+ {
+ RpcMethod: "Votes",
+ Use: "votes [proposal-id]",
+ Short: "Query votes of a single proposal",
+ Example: fmt.Sprintf("%s query gov votes 1", version.AppName),
+ PositionalArgs: []*autocliv1.PositionalArgDescriptor{
+ {
+ ProtoField: "proposal_id"
+},
+},
+},
+ {
+ RpcMethod: "Deposit",
+ Use: "deposit [proposal-id] [depositer-addr]",
+ Short: "Query details of a deposit",
+ PositionalArgs: []*autocliv1.PositionalArgDescriptor{
+ {
+ ProtoField: "proposal_id"
+},
+ {
+ ProtoField: "depositor"
+},
+},
+},
+ {
+ RpcMethod: "Deposits",
+ Use: "deposits [proposal-id]",
+ Short: "Query deposits on a proposal",
+ PositionalArgs: []*autocliv1.PositionalArgDescriptor{
+ {
+ ProtoField: "proposal_id"
+},
+},
+},
+ {
+ RpcMethod: "TallyResult",
+ Use: "tally [proposal-id]",
+ Short: "Query the tally of a proposal vote",
+ Example: fmt.Sprintf("%s query gov tally 1", version.AppName),
+ PositionalArgs: []*autocliv1.PositionalArgDescriptor{
+ {
+ ProtoField: "proposal_id"
+},
+},
+},
+ {
+ RpcMethod: "Constitution",
+ Use: "constitution",
+ Short: "Query the current chain constitution",
+},
+},
+ // map v1beta1 as a sub-command
+ SubCommands: map[string]*autocliv1.ServiceCommandDescriptor{
+ "v1beta1": {
+ Service: govv1beta1.Query_ServiceDesc.ServiceName
+},
+},
+},
+ Tx: &autocliv1.ServiceCommandDescriptor{
+ Service: govv1.Msg_ServiceDesc.ServiceName,
+ // map v1beta1 as a sub-command
+ SubCommands: map[string]*autocliv1.ServiceCommandDescriptor{
+ "v1beta1": {
+ Service: govv1beta1.Msg_ServiceDesc.ServiceName
+},
+},
+},
+}
+}
+```
+
+### Positional Arguments
+
+By default `autocli` generates a flag for each field in your protobuf message. However, you can choose to use positional arguments instead of flags for certain fields.
+
+To add positional arguments to a command, use the `autocliv1.PositionalArgDescriptor` struct, as seen in the example below. Specify the `ProtoField` parameter, which is the name of the protobuf field that should be used as the positional argument. In addition, if the parameter is a variable-length argument, you can specify the `Varargs` parameter as `true`. This can only be applied to the last positional parameter, and the `ProtoField` must be a repeated field.
+
+Here's an example of how to define a positional argument for the `Account` method of the `auth` service:
+
+```go expandable
+package auth
+
+import (
+
+ "fmt"
+
+ authv1beta1 "cosmossdk.io/api/cosmos/auth/v1beta1"
+ autocliv1 "cosmossdk.io/api/cosmos/autocli/v1"
+ _ "cosmossdk.io/api/cosmos/crypto/secp256k1" // register to that it shows up in protoregistry.GlobalTypes
+ _ "cosmossdk.io/api/cosmos/crypto/secp256r1" // register to that it shows up in protoregistry.GlobalTypes
+
+ "github.com/cosmos/cosmos-sdk/version"
+)
+
+// AutoCLIOptions implements the autocli.HasAutoCLIConfig interface.
+func (am AppModule)
+
+AutoCLIOptions() *autocliv1.ModuleOptions {
+ return &autocliv1.ModuleOptions{
+ Query: &autocliv1.ServiceCommandDescriptor{
+ Service: authv1beta1.Query_ServiceDesc.ServiceName,
+ RpcCommandOptions: []*autocliv1.RpcCommandOptions{
+ {
+ RpcMethod: "Accounts",
+ Use: "accounts",
+ Short: "Query all the accounts",
+},
+ {
+ RpcMethod: "Account",
+ Use: "account [address]",
+ Short: "Query account by address",
+ PositionalArgs: []*autocliv1.PositionalArgDescriptor{{
+ ProtoField: "address"
+}},
+},
+ {
+ RpcMethod: "AccountInfo",
+ Use: "account-info [address]",
+ Short: "Query account info which is common to all account types.",
+ PositionalArgs: []*autocliv1.PositionalArgDescriptor{{
+ ProtoField: "address"
+}},
+},
+ {
+ RpcMethod: "AccountAddressByID",
+ Use: "address-by-acc-num [acc-num]",
+ Short: "Query account address by account number",
+ PositionalArgs: []*autocliv1.PositionalArgDescriptor{{
+ ProtoField: "id"
+}},
+},
+ {
+ RpcMethod: "ModuleAccounts",
+ Use: "module-accounts",
+ Short: "Query all module accounts",
+},
+ {
+ RpcMethod: "ModuleAccountByName",
+ Use: "module-account [module-name]",
+ Short: "Query module account info by module name",
+ Example: fmt.Sprintf("%s q auth module-account gov", version.AppName),
+ PositionalArgs: []*autocliv1.PositionalArgDescriptor{{
+ ProtoField: "name"
+}},
+},
+ {
+ RpcMethod: "AddressBytesToString",
+ Use: "address-bytes-to-string [address-bytes]",
+ Short: "Transform an address bytes to string",
+ PositionalArgs: []*autocliv1.PositionalArgDescriptor{{
+ ProtoField: "address_bytes"
+}},
+},
+ {
+ RpcMethod: "AddressStringToBytes",
+ Use: "address-string-to-bytes [address-string]",
+ Short: "Transform an address string to bytes",
+ PositionalArgs: []*autocliv1.PositionalArgDescriptor{{
+ ProtoField: "address_string"
+}},
+},
+ {
+ RpcMethod: "Bech32Prefix",
+ Use: "bech32-prefix",
+ Short: "Query the chain bech32 prefix (if applicable)",
+},
+ {
+ RpcMethod: "Params",
+ Use: "params",
+ Short: "Query the current auth parameters",
+},
+},
+},
+ // Tx is purposely left empty, as the only tx is MsgUpdateParams which is gov gated.
+}
+}
+```
+
+Then the command can be used as follows, instead of having to specify the `--address` flag:
+
+```bash
+ query auth account cosmos1abcd...xyz
+```
+
+#### Flattened Fields in Positional Arguments
+
+AutoCLI also supports flattening nested message fields as positional arguments. This means you can access nested fields
+using dot notation in the `ProtoField` parameter. This is particularly useful when you want to directly set nested
+message fields as positional arguments.
+
+For example, if you have a nested message structure like this:
+
+```protobuf
+message Permissions {
+ string level = 1;
+ repeated string limit_type_urls = 2;
+}
+
+message MsgAuthorizeCircuitBreaker {
+ string grantee = 1;
+ Permissions permissions = 2;
+}
+```
+
+You can flatten the fields in your AutoCLI configuration:
+
+```go
+{
+ RpcMethod: "AuthorizeCircuitBreaker",
+ Use: "authorize ",
+ PositionalArgs: []*autocliv1.PositionalArgDescriptor{
+ {
+ ProtoField: "grantee"
+},
+ {
+ ProtoField: "permissions.level"
+},
+ {
+ ProtoField: "permissions.limit_type_urls"
+},
+},
+}
+```
+
+This allows users to provide values for nested fields directly as positional arguments:
+
+```bash
+ tx circuit authorize cosmos1... super-admin "/cosmos.bank.v1beta1.MsgSend,/cosmos.bank.v1beta1.MsgMultiSend"
+```
+
+Instead of having to provide a complex JSON structure for nested fields, flattening makes the CLI more user-friendly by allowing direct access to nested fields.
+
+#### Customizing Flag Names
+
+By default, `autocli` generates flag names based on the names of the fields in your protobuf message. However, you can customize the flag names by providing a `FlagOptions`. This parameter allows you to specify custom names for flags based on the names of the message fields.
+
+For example, if you have a message with the fields `test` and `test1`, you can use the following naming options to customize the flags:
+
+```go
+autocliv1.RpcCommandOptions{
+ FlagOptions: map[string]*autocliv1.FlagOptions{
+ "test": {
+ Name: "custom_name",
+},
+ "test1": {
+ Name: "other_name",
+},
+},
+}
+```
+
+`FlagsOptions` is defined like sub commands in the `AutoCLIOptions()` method on your module.
+
+### Combining AutoCLI with Other Commands Within A Module
+
+AutoCLI can be used alongside other commands within a module. For example, the `gov` module uses AutoCLI to generate commands for the `query` subcommand, but also defines custom commands for the `proposer` subcommands.
+
+In order to enable this behavior, set in `AutoCLIOptions()` the `EnhanceCustomCommand` field to `true`, for the command type (queries and/or transactions) you want to enhance.
+
+```go expandable
+package gov
+
+import (
+
+ "fmt"
+
+ autocliv1 "cosmossdk.io/api/cosmos/autocli/v1"
+ govv1 "cosmossdk.io/api/cosmos/gov/v1"
+ govv1beta1 "cosmossdk.io/api/cosmos/gov/v1beta1"
+ "github.com/cosmos/cosmos-sdk/version"
+)
+
+// AutoCLIOptions implements the autocli.HasAutoCLIConfig interface.
+func (am AppModule)
+
+AutoCLIOptions() *autocliv1.ModuleOptions {
+ return &autocliv1.ModuleOptions{
+ Query: &autocliv1.ServiceCommandDescriptor{
+ Service: govv1.Query_ServiceDesc.ServiceName,
+ RpcCommandOptions: []*autocliv1.RpcCommandOptions{
+ {
+ RpcMethod: "Params",
+ Use: "params",
+ Short: "Query the parameters of the governance process",
+ Long: "Query the parameters of the governance process. Specify specific param types (voting|tallying|deposit)
+
+to filter results.",
+ PositionalArgs: []*autocliv1.PositionalArgDescriptor{
+ {
+ ProtoField: "params_type",
+ Optional: true
+},
+},
+},
+ {
+ RpcMethod: "Proposals",
+ Use: "proposals",
+ Short: "Query proposals with optional filters",
+ Example: fmt.Sprintf("%[1]s query gov proposals --depositor cosmos1...\n%[1]s query gov proposals --voter cosmos1...\n%[1]s query gov proposals --proposal-status (PROPOSAL_STATUS_DEPOSIT_PERIOD|PROPOSAL_STATUS_VOTING_PERIOD|PROPOSAL_STATUS_PASSED|PROPOSAL_STATUS_REJECTED|PROPOSAL_STATUS_FAILED)", version.AppName),
+},
+ {
+ RpcMethod: "Proposal",
+ Use: "proposal [proposal-id]",
+ Short: "Query details of a single proposal",
+ Example: fmt.Sprintf("%s query gov proposal 1", version.AppName),
+ PositionalArgs: []*autocliv1.PositionalArgDescriptor{
+ {
+ ProtoField: "proposal_id"
+},
+},
+},
+ {
+ RpcMethod: "Vote",
+ Use: "vote [proposal-id] [voter-addr]",
+ Short: "Query details of a single vote",
+ Example: fmt.Sprintf("%s query gov vote 1 cosmos1...", version.AppName),
+ PositionalArgs: []*autocliv1.PositionalArgDescriptor{
+ {
+ ProtoField: "proposal_id"
+},
+ {
+ ProtoField: "voter"
+},
+},
+},
+ {
+ RpcMethod: "Votes",
+ Use: "votes [proposal-id]",
+ Short: "Query votes of a single proposal",
+ Example: fmt.Sprintf("%s query gov votes 1", version.AppName),
+ PositionalArgs: []*autocliv1.PositionalArgDescriptor{
+ {
+ ProtoField: "proposal_id"
+},
+},
+},
+ {
+ RpcMethod: "Deposit",
+ Use: "deposit [proposal-id] [depositer-addr]",
+ Short: "Query details of a deposit",
+ PositionalArgs: []*autocliv1.PositionalArgDescriptor{
+ {
+ ProtoField: "proposal_id"
+},
+ {
+ ProtoField: "depositor"
+},
+},
+},
+ {
+ RpcMethod: "Deposits",
+ Use: "deposits [proposal-id]",
+ Short: "Query deposits on a proposal",
+ PositionalArgs: []*autocliv1.PositionalArgDescriptor{
+ {
+ ProtoField: "proposal_id"
+},
+},
+},
+ {
+ RpcMethod: "TallyResult",
+ Use: "tally [proposal-id]",
+ Short: "Query the tally of a proposal vote",
+ Example: fmt.Sprintf("%s query gov tally 1", version.AppName),
+ PositionalArgs: []*autocliv1.PositionalArgDescriptor{
+ {
+ ProtoField: "proposal_id"
+},
+},
+},
+ {
+ RpcMethod: "Constitution",
+ Use: "constitution",
+ Short: "Query the current chain constitution",
+},
+},
+ // map v1beta1 as a sub-command
+ SubCommands: map[string]*autocliv1.ServiceCommandDescriptor{
+ "v1beta1": {
+ Service: govv1beta1.Query_ServiceDesc.ServiceName
+},
+},
+ EnhanceCustomCommand: true, // We still have manual commands in gov that we want to keep
+},
+ Tx: &autocliv1.ServiceCommandDescriptor{
+ Service: govv1.Msg_ServiceDesc.ServiceName,
+ // map v1beta1 as a sub-command
+ SubCommands: map[string]*autocliv1.ServiceCommandDescriptor{
+ "v1beta1": {
+ Service: govv1beta1.Msg_ServiceDesc.ServiceName
+},
+},
+},
+}
+}
+```
+
+If not set to true, `AutoCLI` will not generate commands for the module if there are already commands registered for the module (when `GetTxCmd()` or `GetTxCmd()` are defined).
+
+### Skip a command
+
+AutoCLI automatically skips unsupported commands when [`cosmos_proto.method_added_in` protobuf annotation](/sdk/v0.53/build/building-modules/protobuf-annotations) is present.
+
+Additionally, a command can be manually skipped using the `autocliv1.RpcCommandOptions`:
+
+```go
+*autocliv1.RpcCommandOptions{
+ RpcMethod: "Params", // The name of the gRPC service
+ Skip: true,
+}
+```
+
+### Use AutoCLI for non module commands
+
+It is possible to use `AutoCLI` for non module commands. The trick is still to implement the `appmodule.Module` interface and append it to the `appOptions.ModuleOptions` map.
+
+For example, here is how the SDK does it for `cometbft` gRPC commands:
+
+```go expandable
+package cmtservice
+
+import (
+
+ autocliv1 "cosmossdk.io/api/cosmos/autocli/v1"
+ cmtv1beta1 "cosmossdk.io/api/cosmos/base/tendermint/v1beta1"
+)
+
+var CometBFTAutoCLIDescriptor = &autocliv1.ServiceCommandDescriptor{
+ Service: cmtv1beta1.Service_ServiceDesc.ServiceName,
+ RpcCommandOptions: []*autocliv1.RpcCommandOptions{
+ {
+ RpcMethod: "GetNodeInfo",
+ Use: "node-info",
+ Short: "Query the current node info",
+},
+ {
+ RpcMethod: "GetSyncing",
+ Use: "syncing",
+ Short: "Query node syncing status",
+},
+ {
+ RpcMethod: "GetLatestBlock",
+ Use: "block-latest",
+ Short: "Query for the latest committed block",
+},
+ {
+ RpcMethod: "GetBlockByHeight",
+ Use: "block-by-height [height]",
+ Short: "Query for a committed block by height",
+ Long: "Query for a specific committed block using the CometBFT RPC `block_by_height` method",
+ PositionalArgs: []*autocliv1.PositionalArgDescriptor{{
+ ProtoField: "height"
+}},
+},
+ {
+ RpcMethod: "GetLatestValidatorSet",
+ Use: "validator-set",
+ Alias: []string{"validator-set-latest", "comet-validator-set", "cometbft-validator-set", "tendermint-validator-set"
+},
+ Short: "Query for the latest validator set",
+},
+ {
+ RpcMethod: "GetValidatorSetByHeight",
+ Use: "validator-set-by-height [height]",
+ Short: "Query for a validator set by height",
+ PositionalArgs: []*autocliv1.PositionalArgDescriptor{{
+ ProtoField: "height"
+}},
+},
+ {
+ RpcMethod: "ABCIQuery",
+ Skip: true,
+},
+},
+}
+
+// NewCometBFTCommands is a fake `appmodule.Module` to be considered as a module
+// and be added in AutoCLI.
+func NewCometBFTCommands() *cometModule { //nolint:revive // fake module and limiting import of core
+ return &cometModule{
+}
+}
+
+type cometModule struct{
+}
+
+func (m cometModule)
+
+IsOnePerModuleType() {
+}
+
+func (m cometModule)
+
+IsAppModule() {
+}
+
+func (m cometModule)
+
+Name()
+
+string {
+ return "comet"
+}
+
+func (m cometModule)
+
+AutoCLIOptions() *autocliv1.ModuleOptions {
+ return &autocliv1.ModuleOptions{
+ Query: CometBFTAutoCLIDescriptor,
+}
+}
+```
+
+## Summary
+
+`autocli` lets you generate CLI for your Cosmos SDK-based applications without any cobra boilerplate. It allows you to easily generate CLI commands and flags from your protobuf messages, and provides many options for customizing the behavior of your CLI application.
+
+To further enhance your CLI experience with Cosmos SDK-based blockchains, you can use `hubl`. `hubl` is a tool that allows you to query any Cosmos SDK-based blockchain using the new AutoCLI feature of the Cosmos SDK. With `hubl`, you can easily configure a new chain and query modules with just a few simple commands.
+
+For more information on `hubl`, including how to configure a new chain and query a module, see the [Hubl documentation](/sdk/v0.53/build/tooling/confix).
diff --git a/sdk/next/learn/advanced/baseapp.mdx b/sdk/next/learn/advanced/baseapp.mdx
new file mode 100644
index 000000000..ab648b6f5
--- /dev/null
+++ b/sdk/next/learn/advanced/baseapp.mdx
@@ -0,0 +1,11309 @@
+---
+title: BaseApp
+---
+
+
+**Synopsis**
+This document describes `BaseApp`, the abstraction that implements the core functionalities of a Cosmos SDK application.
+
+
+
+**Prerequisite Readings**
+
+* [Anatomy of a Cosmos SDK application](/sdk/v0.53/learn/beginner/app-anatomy)
+* [Lifecycle of a Cosmos SDK transaction](/sdk/v0.53/learn/beginner/tx-lifecycle)
+
+
+
+## Introduction
+
+`BaseApp` is a base type that implements the core of a Cosmos SDK application, namely:
+
+* The [Application Blockchain Interface](#main-abci-messages), for the state-machine to communicate with the underlying consensus engine (e.g. CometBFT).
+* [Service Routers](#service-routers), to route messages and queries to the appropriate module.
+* Different [states](#state-updates), as the state-machine can have different volatile states updated based on the ABCI message received.
+
+The goal of `BaseApp` is to provide the fundamental layer of a Cosmos SDK application
+that developers can easily extend to build their own custom application. Usually,
+developers will create a custom type for their application, like so:
+
+```go
+type App struct {
+ // reference to a BaseApp
+ *baseapp.BaseApp
+
+ // list of application store keys
+
+ // list of application keepers
+
+ // module manager
+}
+```
+
+Extending the application with `BaseApp` gives the former access to all of `BaseApp`'s methods.
+This allows developers to compose their custom application with the modules they want, while not
+having to concern themselves with the hard work of implementing the ABCI, the service routers and state
+management logic.
+
+## Type Definition
+
+The `BaseApp` type holds many important parameters for any Cosmos SDK based application.
+
+```go expandable
+package baseapp
+
+import (
+
+ "context"
+ "fmt"
+ "maps"
+ "math"
+ "slices"
+ "strconv"
+ "sync"
+ "github.com/cockroachdb/errors"
+ abci "github.com/cometbft/cometbft/abci/types"
+ "github.com/cometbft/cometbft/crypto/tmhash"
+ cmtproto "github.com/cometbft/cometbft/proto/tendermint/types"
+ dbm "github.com/cosmos/cosmos-db"
+ "github.com/cosmos/gogoproto/proto"
+ protov2 "google.golang.org/protobuf/proto"
+ "cosmossdk.io/core/header"
+ errorsmod "cosmossdk.io/errors"
+ "cosmossdk.io/log"
+ "cosmossdk.io/store"
+ storemetrics "cosmossdk.io/store/metrics"
+ "cosmossdk.io/store/snapshots"
+ storetypes "cosmossdk.io/store/types"
+ "github.com/cosmos/cosmos-sdk/baseapp/oe"
+ "github.com/cosmos/cosmos-sdk/codec"
+ codectypes "github.com/cosmos/cosmos-sdk/codec/types"
+ servertypes "github.com/cosmos/cosmos-sdk/server/types"
+ "github.com/cosmos/cosmos-sdk/telemetry"
+ sdk "github.com/cosmos/cosmos-sdk/types"
+ sdkerrors "github.com/cosmos/cosmos-sdk/types/errors"
+ "github.com/cosmos/cosmos-sdk/types/mempool"
+ "github.com/cosmos/cosmos-sdk/types/msgservice"
+)
+
+type (
+ execMode uint8
+
+ // StoreLoader defines a customizable function to control how we load the
+ // CommitMultiStore from disk. This is useful for state migration, when
+ // loading a datastore written with an older version of the software. In
+ // particular, if a module changed the substore key name (or removed a substore)
+ // between two versions of the software.
+ StoreLoader func(ms storetypes.CommitMultiStore)
+
+error
+)
+
+const (
+ execModeCheck execMode = iota // Check a transaction
+ execModeReCheck // Recheck a (pending)
+
+transaction after a commit
+ execModeSimulate // Simulate a transaction
+ execModePrepareProposal // Prepare a block proposal
+ execModeProcessProposal // Process a block proposal
+ execModeVoteExtension // Extend or verify a pre-commit vote
+ execModeVerifyVoteExtension // Verify a vote extension
+ execModeFinalize // Finalize a block proposal
+)
+
+var _ servertypes.ABCI = (*BaseApp)(nil)
+
+// BaseApp reflects the ABCI application implementation.
+type BaseApp struct {
+ // initialized on creation
+ mu sync.Mutex // mu protects the fields below.
+ logger log.Logger
+ name string // application name from abci.BlockInfo
+ db dbm.DB // common DB backend
+ cms storetypes.CommitMultiStore // Main (uncached)
+
+state
+ qms storetypes.MultiStore // Optional alternative multistore for querying only.
+ storeLoader StoreLoader // function to handle store loading, may be overridden with SetStoreLoader()
+
+grpcQueryRouter *GRPCQueryRouter // router for redirecting gRPC query calls
+ msgServiceRouter *MsgServiceRouter // router for redirecting Msg service messages
+ interfaceRegistry codectypes.InterfaceRegistry
+ txDecoder sdk.TxDecoder // unmarshal []byte into sdk.Tx
+ txEncoder sdk.TxEncoder // marshal sdk.Tx into []byte
+
+ mempool mempool.Mempool // application side mempool
+ anteHandler sdk.AnteHandler // ante handler for fee and auth
+ postHandler sdk.PostHandler // post handler, optional
+
+ checkTxHandler sdk.CheckTxHandler // ABCI CheckTx handler
+ initChainer sdk.InitChainer // ABCI InitChain handler
+ preBlocker sdk.PreBlocker // logic to run before BeginBlocker
+ beginBlocker sdk.BeginBlocker // (legacy ABCI)
+
+BeginBlock handler
+ endBlocker sdk.EndBlocker // (legacy ABCI)
+
+EndBlock handler
+ processProposal sdk.ProcessProposalHandler // ABCI ProcessProposal handler
+ prepareProposal sdk.PrepareProposalHandler // ABCI PrepareProposal
+ extendVote sdk.ExtendVoteHandler // ABCI ExtendVote handler
+ verifyVoteExt sdk.VerifyVoteExtensionHandler // ABCI VerifyVoteExtension handler
+ prepareCheckStater sdk.PrepareCheckStater // logic to run during commit using the checkState
+ precommiter sdk.Precommiter // logic to run during commit using the deliverState
+
+ addrPeerFilter sdk.PeerFilter // filter peers by address and port
+ idPeerFilter sdk.PeerFilter // filter peers by node ID
+ fauxMerkleMode bool // if true, IAVL MountStores uses MountStoresDB for simulation speed.
+ sigverifyTx bool // in the simulation test, since the account does not have a private key, we have to ignore the tx sigverify.
+
+ // manages snapshots, i.e. dumps of app state at certain intervals
+ snapshotManager *snapshots.Manager
+
+ // volatile states:
+ //
+ // - checkState is set on InitChain and reset on Commit
+ // - finalizeBlockState is set on InitChain and FinalizeBlock and set to nil
+ // on Commit.
+ //
+ // - checkState: Used for CheckTx, which is set based on the previous block's
+ // state. This state is never committed.
+ //
+ // - prepareProposalState: Used for PrepareProposal, which is set based on the
+ // previous block's state. This state is never committed. In case of multiple
+ // consensus rounds, the state is always reset to the previous block's state.
+ //
+ // - processProposalState: Used for ProcessProposal, which is set based on the
+ // the previous block's state. This state is never committed. In case of
+ // multiple rounds, the state is always reset to the previous block's state.
+ //
+ // - finalizeBlockState: Used for FinalizeBlock, which is set based on the
+ // previous block's state. This state is committed.
+ checkState *state
+ prepareProposalState *state
+ processProposalState *state
+ finalizeBlockState *state
+
+ // An inter-block write-through cache provided to the context during the ABCI
+ // FinalizeBlock call.
+ interBlockCache storetypes.MultiStorePersistentCache
+
+ // paramStore is used to query for ABCI consensus parameters from an
+ // application parameter store.
+ paramStore ParamStore
+
+ // queryGasLimit defines the maximum gas for queries; unbounded if 0.
+ queryGasLimit uint64
+
+ // The minimum gas prices a validator is willing to accept for processing a
+ // transaction. This is mainly used for DoS and spam prevention.
+ minGasPrices sdk.DecCoins
+
+ // initialHeight is the initial height at which we start the BaseApp
+ initialHeight int64
+
+ // flag for sealing options and parameters to a BaseApp
+ sealed bool
+
+ // block height at which to halt the chain and gracefully shutdown
+ haltHeight uint64
+
+ // minimum block time (in Unix seconds)
+
+at which to halt the chain and gracefully shutdown
+ haltTime uint64
+
+ // minRetainBlocks defines the minimum block height offset from the current
+ // block being committed, such that all blocks past this offset are pruned
+ // from CometBFT. It is used as part of the process of determining the
+ // ResponseCommit.RetainHeight value during ABCI Commit. A value of 0 indicates
+ // that no blocks should be pruned.
+ //
+ // Note: CometBFT block pruning is dependant on this parameter in conjunction
+ // with the unbonding (safety threshold)
+
+period, state pruning and state sync
+ // snapshot parameters to determine the correct minimum value of
+ // ResponseCommit.RetainHeight.
+ minRetainBlocks uint64
+
+ // application's version string
+ version string
+
+ // application's protocol version that increments on every upgrade
+ // if BaseApp is passed to the upgrade keeper's NewKeeper method.
+ appVersion uint64
+
+ // recovery handler for app.runTx method
+ runTxRecoveryMiddleware recoveryMiddleware
+
+ // trace set will return full stack traces for errors in ABCI Log field
+ trace bool
+
+ // indexEvents defines the set of events in the form {
+ eventType
+}.{
+ attributeKey
+},
+ // which informs CometBFT what to index. If empty, all events will be indexed.
+ indexEvents map[string]struct{
+}
+
+ // streamingManager for managing instances and configuration of ABCIListener services
+ streamingManager storetypes.StreamingManager
+
+ chainID string
+
+ cdc codec.Codec
+
+ // optimisticExec contains the context required for Optimistic Execution,
+ // including the goroutine handling.This is experimental and must be enabled
+ // by developers.
+ optimisticExec *oe.OptimisticExecution
+
+ // disableBlockGasMeter will disable the block gas meter if true, block gas meter is tricky to support
+ // when executing transactions in parallel.
+ // when disabled, the block gas meter in context is a noop one.
+ //
+ // SAFETY: it's safe to do if validators validate the total gas wanted in the `ProcessProposal`, which is the case in the default handler.
+ disableBlockGasMeter bool
+}
+
+// NewBaseApp returns a reference to an initialized BaseApp. It accepts a
+// variadic number of option functions, which act on the BaseApp to set
+// configuration choices.
+func NewBaseApp(
+ name string, logger log.Logger, db dbm.DB, txDecoder sdk.TxDecoder, options ...func(*BaseApp),
+) *BaseApp {
+ app := &BaseApp{
+ logger: logger.With(log.ModuleKey, "baseapp"),
+ name: name,
+ db: db,
+ cms: store.NewCommitMultiStore(db, logger, storemetrics.NewNoOpMetrics()), // by default we use a no-op metric gather in store
+ storeLoader: DefaultStoreLoader,
+ grpcQueryRouter: NewGRPCQueryRouter(),
+ msgServiceRouter: NewMsgServiceRouter(),
+ txDecoder: txDecoder,
+ fauxMerkleMode: false,
+ sigverifyTx: true,
+ queryGasLimit: math.MaxUint64,
+}
+ for _, option := range options {
+ option(app)
+}
+ if app.mempool == nil {
+ app.SetMempool(mempool.NoOpMempool{
+})
+}
+ abciProposalHandler := NewDefaultProposalHandler(app.mempool, app)
+ if app.prepareProposal == nil {
+ app.SetPrepareProposal(abciProposalHandler.PrepareProposalHandler())
+}
+ if app.processProposal == nil {
+ app.SetProcessProposal(abciProposalHandler.ProcessProposalHandler())
+}
+ if app.extendVote == nil {
+ app.SetExtendVoteHandler(NoOpExtendVote())
+}
+ if app.verifyVoteExt == nil {
+ app.SetVerifyVoteExtensionHandler(NoOpVerifyVoteExtensionHandler())
+}
+ if app.interBlockCache != nil {
+ app.cms.SetInterBlockCache(app.interBlockCache)
+}
+
+app.runTxRecoveryMiddleware = newDefaultRecoveryMiddleware()
+
+ // Initialize with an empty interface registry to avoid nil pointer dereference.
+ // Unless SetInterfaceRegistry is called with an interface registry with proper address codecs baseapp will panic.
+ app.cdc = codec.NewProtoCodec(codectypes.NewInterfaceRegistry())
+
+protoFiles, err := proto.MergedRegistry()
+ if err != nil {
+ logger.Warn("error creating merged proto registry", "error", err)
+}
+
+else {
+ err = msgservice.ValidateProtoAnnotations(protoFiles)
+ if err != nil {
+ // Once we switch to using protoreflect-based antehandlers, we might
+ // want to panic here instead of logging a warning.
+ logger.Warn("error validating merged proto registry annotations", "error", err)
+}
+
+}
+
+return app
+}
+
+// Name returns the name of the BaseApp.
+func (app *BaseApp)
+
+Name()
+
+string {
+ return app.name
+}
+
+// AppVersion returns the application's protocol version.
+func (app *BaseApp)
+
+AppVersion()
+
+uint64 {
+ return app.appVersion
+}
+
+// Version returns the application's version string.
+func (app *BaseApp)
+
+Version()
+
+string {
+ return app.version
+}
+
+// Logger returns the logger of the BaseApp.
+func (app *BaseApp)
+
+Logger()
+
+log.Logger {
+ return app.logger
+}
+
+// Trace returns the boolean value for logging error stack traces.
+func (app *BaseApp)
+
+Trace()
+
+bool {
+ return app.trace
+}
+
+// MsgServiceRouter returns the MsgServiceRouter of a BaseApp.
+func (app *BaseApp)
+
+MsgServiceRouter() *MsgServiceRouter {
+ return app.msgServiceRouter
+}
+
+// GRPCQueryRouter returns the GRPCQueryRouter of a BaseApp.
+func (app *BaseApp)
+
+GRPCQueryRouter() *GRPCQueryRouter {
+ return app.grpcQueryRouter
+}
+
+// MountStores mounts all IAVL or DB stores to the provided keys in the BaseApp
+// multistore.
+func (app *BaseApp)
+
+MountStores(keys ...storetypes.StoreKey) {
+ for _, key := range keys {
+ switch key.(type) {
+ case *storetypes.KVStoreKey:
+ if !app.fauxMerkleMode {
+ app.MountStore(key, storetypes.StoreTypeIAVL)
+}
+
+else {
+ // StoreTypeDB doesn't do anything upon commit, and it doesn't
+ // retain history, but it's useful for faster simulation.
+ app.MountStore(key, storetypes.StoreTypeDB)
+}
+ case *storetypes.TransientStoreKey:
+ app.MountStore(key, storetypes.StoreTypeTransient)
+ case *storetypes.MemoryStoreKey:
+ app.MountStore(key, storetypes.StoreTypeMemory)
+
+default:
+ panic(fmt.Sprintf("Unrecognized store key type :%T", key))
+}
+
+}
+}
+
+// MountKVStores mounts all IAVL or DB stores to the provided keys in the
+// BaseApp multistore.
+func (app *BaseApp)
+
+MountKVStores(keys map[string]*storetypes.KVStoreKey) {
+ for _, key := range keys {
+ if !app.fauxMerkleMode {
+ app.MountStore(key, storetypes.StoreTypeIAVL)
+}
+
+else {
+ // StoreTypeDB doesn't do anything upon commit, and it doesn't
+ // retain history, but it's useful for faster simulation.
+ app.MountStore(key, storetypes.StoreTypeDB)
+}
+
+}
+}
+
+// MountTransientStores mounts all transient stores to the provided keys in
+// the BaseApp multistore.
+func (app *BaseApp)
+
+MountTransientStores(keys map[string]*storetypes.TransientStoreKey) {
+ for _, key := range keys {
+ app.MountStore(key, storetypes.StoreTypeTransient)
+}
+}
+
+// MountMemoryStores mounts all in-memory KVStores with the BaseApp's internal
+// commit multi-store.
+func (app *BaseApp)
+
+MountMemoryStores(keys map[string]*storetypes.MemoryStoreKey) {
+ skeys := slices.Sorted(maps.Keys(keys))
+ for _, key := range skeys {
+ memKey := keys[key]
+ app.MountStore(memKey, storetypes.StoreTypeMemory)
+}
+}
+
+// MountStore mounts a store to the provided key in the BaseApp multistore,
+// using the default DB.
+func (app *BaseApp)
+
+MountStore(key storetypes.StoreKey, typ storetypes.StoreType) {
+ app.cms.MountStoreWithDB(key, typ, nil)
+}
+
+// LoadLatestVersion loads the latest application version. It will panic if
+// called more than once on a running BaseApp.
+func (app *BaseApp)
+
+LoadLatestVersion()
+
+error {
+ err := app.storeLoader(app.cms)
+ if err != nil {
+ return fmt.Errorf("failed to load latest version: %w", err)
+}
+
+return app.Init()
+}
+
+// DefaultStoreLoader will be used by default and loads the latest version
+func DefaultStoreLoader(ms storetypes.CommitMultiStore)
+
+error {
+ return ms.LoadLatestVersion()
+}
+
+// CommitMultiStore returns the root multi-store.
+// App constructor can use this to access the `cms`.
+// UNSAFE: must not be used during the abci life cycle.
+func (app *BaseApp)
+
+CommitMultiStore()
+
+storetypes.CommitMultiStore {
+ return app.cms
+}
+
+// SnapshotManager returns the snapshot manager.
+// application use this to register extra extension snapshotters.
+func (app *BaseApp)
+
+SnapshotManager() *snapshots.Manager {
+ return app.snapshotManager
+}
+
+// LoadVersion loads the BaseApp application version. It will panic if called
+// more than once on a running baseapp.
+func (app *BaseApp)
+
+LoadVersion(version int64)
+
+error {
+ app.logger.Info("NOTICE: this could take a long time to migrate IAVL store to fastnode if you enable Fast Node.\n")
+ err := app.cms.LoadVersion(version)
+ if err != nil {
+ return fmt.Errorf("failed to load version %d: %w", version, err)
+}
+
+return app.Init()
+}
+
+// LastCommitID returns the last CommitID of the multistore.
+func (app *BaseApp)
+
+LastCommitID()
+
+storetypes.CommitID {
+ return app.cms.LastCommitID()
+}
+
+// LastBlockHeight returns the last committed block height.
+func (app *BaseApp)
+
+LastBlockHeight()
+
+int64 {
+ return app.cms.LastCommitID().Version
+}
+
+// ChainID returns the chainID of the app.
+func (app *BaseApp)
+
+ChainID()
+
+string {
+ return app.chainID
+}
+
+// AnteHandler returns the AnteHandler of the app.
+func (app *BaseApp)
+
+AnteHandler()
+
+sdk.AnteHandler {
+ return app.anteHandler
+}
+
+// Mempool returns the Mempool of the app.
+func (app *BaseApp)
+
+Mempool()
+
+mempool.Mempool {
+ return app.mempool
+}
+
+// Init initializes the app. It seals the app, preventing any
+// further modifications. In addition, it validates the app against
+// the earlier provided settings. Returns an error if validation fails.
+// nil otherwise. Panics if the app is already sealed.
+func (app *BaseApp)
+
+Init()
+
+error {
+ if app.sealed {
+ panic("cannot call initFromMainStore: baseapp already sealed")
+}
+ if app.cms == nil {
+ return errors.New("commit multi-store must not be nil")
+}
+ emptyHeader := cmtproto.Header{
+ ChainID: app.chainID
+}
+
+ // needed for the export command which inits from store but never calls initchain
+ app.setState(execModeCheck, emptyHeader)
+
+app.Seal()
+
+return app.cms.GetPruning().Validate()
+}
+
+func (app *BaseApp)
+
+setMinGasPrices(gasPrices sdk.DecCoins) {
+ app.minGasPrices = gasPrices
+}
+
+func (app *BaseApp)
+
+setHaltHeight(haltHeight uint64) {
+ app.haltHeight = haltHeight
+}
+
+func (app *BaseApp)
+
+setHaltTime(haltTime uint64) {
+ app.haltTime = haltTime
+}
+
+func (app *BaseApp)
+
+setMinRetainBlocks(minRetainBlocks uint64) {
+ app.minRetainBlocks = minRetainBlocks
+}
+
+func (app *BaseApp)
+
+setInterBlockCache(cache storetypes.MultiStorePersistentCache) {
+ app.interBlockCache = cache
+}
+
+func (app *BaseApp)
+
+setTrace(trace bool) {
+ app.trace = trace
+}
+
+func (app *BaseApp)
+
+setIndexEvents(ie []string) {
+ app.indexEvents = make(map[string]struct{
+})
+ for _, e := range ie {
+ app.indexEvents[e] = struct{
+}{
+}
+
+}
+}
+
+// Seal seals a BaseApp. It prohibits any further modifications to a BaseApp.
+func (app *BaseApp)
+
+Seal() {
+ app.sealed = true
+}
+
+// IsSealed returns true if the BaseApp is sealed and false otherwise.
+func (app *BaseApp)
+
+IsSealed()
+
+bool {
+ return app.sealed
+}
+
+// setState sets the BaseApp's state for the corresponding mode with a branched
+// multi-store (i.e. a CacheMultiStore)
+
+and a new Context with the same
+// multi-store branch, and provided header.
+func (app *BaseApp)
+
+setState(mode execMode, h cmtproto.Header) {
+ ms := app.cms.CacheMultiStore()
+ headerInfo := header.Info{
+ Height: h.Height,
+ Time: h.Time,
+ ChainID: h.ChainID,
+ AppHash: h.AppHash,
+}
+ baseState := &state{
+ ms: ms,
+ ctx: sdk.NewContext(ms, h, false, app.logger).
+ WithStreamingManager(app.streamingManager).
+ WithHeaderInfo(headerInfo),
+}
+ switch mode {
+ case execModeCheck:
+ baseState.SetContext(baseState.Context().WithIsCheckTx(true).WithMinGasPrices(app.minGasPrices))
+
+app.checkState = baseState
+ case execModePrepareProposal:
+ app.prepareProposalState = baseState
+ case execModeProcessProposal:
+ app.processProposalState = baseState
+ case execModeFinalize:
+ app.finalizeBlockState = baseState
+
+ default:
+ panic(fmt.Sprintf("invalid runTxMode for setState: %d", mode))
+}
+}
+
+// SetCircuitBreaker sets the circuit breaker for the BaseApp.
+// The circuit breaker is checked on every message execution to verify if a transaction should be executed or not.
+func (app *BaseApp)
+
+SetCircuitBreaker(cb CircuitBreaker) {
+ if app.msgServiceRouter == nil {
+ panic("cannot set circuit breaker with no msg service router set")
+}
+
+app.msgServiceRouter.SetCircuit(cb)
+}
+
+// GetConsensusParams returns the current consensus parameters from the BaseApp's
+// ParamStore. If the BaseApp has no ParamStore defined, nil is returned.
+func (app *BaseApp)
+
+GetConsensusParams(ctx sdk.Context)
+
+cmtproto.ConsensusParams {
+ if app.paramStore == nil {
+ return cmtproto.ConsensusParams{
+}
+
+}
+
+cp, err := app.paramStore.Get(ctx)
+ if err != nil {
+ // This could happen while migrating from v0.45/v0.46 to v0.50, we should
+ // allow it to happen so during preblock the upgrade plan can be executed
+ // and the consensus params set for the first time in the new format.
+ app.logger.Error("failed to get consensus params", "err", err)
+
+return cmtproto.ConsensusParams{
+}
+
+}
+
+return cp
+}
+
+// StoreConsensusParams sets the consensus parameters to the BaseApp's param
+// store.
+//
+// NOTE: We're explicitly not storing the CometBFT app_version in the param store.
+// It's stored instead in the x/upgrade store, with its own bump logic.
+func (app *BaseApp)
+
+StoreConsensusParams(ctx sdk.Context, cp cmtproto.ConsensusParams)
+
+error {
+ if app.paramStore == nil {
+ return errors.New("cannot store consensus params with no params store set")
+}
+
+return app.paramStore.Set(ctx, cp)
+}
+
+// AddRunTxRecoveryHandler adds custom app.runTx method panic handlers.
+func (app *BaseApp)
+
+AddRunTxRecoveryHandler(handlers ...RecoveryHandler) {
+ for _, h := range handlers {
+ app.runTxRecoveryMiddleware = newRecoveryMiddleware(h, app.runTxRecoveryMiddleware)
+}
+}
+
+// GetMaximumBlockGas gets the maximum gas from the consensus params. It panics
+// if maximum block gas is less than negative one and returns zero if negative
+// one.
+func (app *BaseApp)
+
+GetMaximumBlockGas(ctx sdk.Context)
+
+uint64 {
+ cp := app.GetConsensusParams(ctx)
+ if cp.Block == nil {
+ return 0
+}
+ maxGas := cp.Block.MaxGas
+ switch {
+ case maxGas < -1:
+ panic(fmt.Sprintf("invalid maximum block gas: %d", maxGas))
+ case maxGas == -1:
+ return 0
+
+ default:
+ return uint64(maxGas)
+}
+}
+
+func (app *BaseApp)
+
+validateFinalizeBlockHeight(req *abci.RequestFinalizeBlock)
+
+error {
+ if req.Height < 1 {
+ return fmt.Errorf("invalid height: %d", req.Height)
+}
+ lastBlockHeight := app.LastBlockHeight()
+
+ // expectedHeight holds the expected height to validate
+ var expectedHeight int64
+ if lastBlockHeight == 0 && app.initialHeight > 1 {
+ // In this case, we're validating the first block of the chain, i.e no
+ // previous commit. The height we're expecting is the initial height.
+ expectedHeight = app.initialHeight
+}
+
+else {
+ // This case can mean two things:
+ //
+ // - Either there was already a previous commit in the store, in which
+ // case we increment the version from there.
+ // - Or there was no previous commit, in which case we start at version 1.
+ expectedHeight = lastBlockHeight + 1
+}
+ if req.Height != expectedHeight {
+ return fmt.Errorf("invalid height: %d; expected: %d", req.Height, expectedHeight)
+}
+
+return nil
+}
+
+// validateBasicTxMsgs executes basic validator calls for messages.
+func validateBasicTxMsgs(msgs []sdk.Msg)
+
+error {
+ if len(msgs) == 0 {
+ return errorsmod.Wrap(sdkerrors.ErrInvalidRequest, "must contain at least one message")
+}
+ for _, msg := range msgs {
+ m, ok := msg.(sdk.HasValidateBasic)
+ if !ok {
+ continue
+}
+ if err := m.ValidateBasic(); err != nil {
+ return err
+}
+
+}
+
+return nil
+}
+
+func (app *BaseApp)
+
+getState(mode execMode) *state {
+ switch mode {
+ case execModeFinalize:
+ return app.finalizeBlockState
+ case execModePrepareProposal:
+ return app.prepareProposalState
+ case execModeProcessProposal:
+ return app.processProposalState
+
+ default:
+ return app.checkState
+}
+}
+
+func (app *BaseApp)
+
+getBlockGasMeter(ctx sdk.Context)
+
+storetypes.GasMeter {
+ if app.disableBlockGasMeter {
+ return noopGasMeter{
+}
+
+}
+ if maxGas := app.GetMaximumBlockGas(ctx); maxGas > 0 {
+ return storetypes.NewGasMeter(maxGas)
+}
+
+return storetypes.NewInfiniteGasMeter()
+}
+
+// retrieve the context for the tx w/ txBytes and other memoized values.
+func (app *BaseApp)
+
+getContextForTx(mode execMode, txBytes []byte)
+
+sdk.Context {
+ app.mu.Lock()
+
+defer app.mu.Unlock()
+ modeState := app.getState(mode)
+ if modeState == nil {
+ panic(fmt.Sprintf("state is nil for mode %v", mode))
+}
+ ctx := modeState.Context().
+ WithTxBytes(txBytes).
+ WithGasMeter(storetypes.NewInfiniteGasMeter())
+ // WithVoteInfos(app.voteInfos) // TODO: identify if this is needed
+
+ ctx = ctx.WithIsSigverifyTx(app.sigverifyTx)
+
+ctx = ctx.WithConsensusParams(app.GetConsensusParams(ctx))
+ if mode == execModeReCheck {
+ ctx = ctx.WithIsReCheckTx(true)
+}
+ if mode == execModeSimulate {
+ ctx, _ = ctx.CacheContext()
+
+ctx = ctx.WithExecMode(sdk.ExecMode(execModeSimulate))
+}
+
+return ctx
+}
+
+// cacheTxContext returns a new context based off of the provided context with
+// a branched multi-store.
+func (app *BaseApp)
+
+cacheTxContext(ctx sdk.Context, txBytes []byte) (sdk.Context, storetypes.CacheMultiStore) {
+ ms := ctx.MultiStore()
+ msCache := ms.CacheMultiStore()
+ if msCache.TracingEnabled() {
+ msCache = msCache.SetTracingContext(
+ storetypes.TraceContext(
+ map[string]any{
+ "txHash": fmt.Sprintf("%X", tmhash.Sum(txBytes)),
+},
+ ),
+ ).(storetypes.CacheMultiStore)
+}
+
+return ctx.WithMultiStore(msCache), msCache
+}
+
+func (app *BaseApp)
+
+preBlock(req *abci.RequestFinalizeBlock) ([]abci.Event, error) {
+ var events []abci.Event
+ if app.preBlocker != nil {
+ ctx := app.finalizeBlockState.Context().WithEventManager(sdk.NewEventManager())
+
+rsp, err := app.preBlocker(ctx, req)
+ if err != nil {
+ return nil, err
+}
+ // rsp.ConsensusParamsChanged is true from preBlocker means ConsensusParams in store get changed
+ // write the consensus parameters in store to context
+ if rsp.ConsensusParamsChanged {
+ ctx = ctx.WithConsensusParams(app.GetConsensusParams(ctx))
+ // GasMeter must be set after we get a context with updated consensus params.
+ gasMeter := app.getBlockGasMeter(ctx)
+
+ctx = ctx.WithBlockGasMeter(gasMeter)
+
+app.finalizeBlockState.SetContext(ctx)
+}
+
+events = ctx.EventManager().ABCIEvents()
+}
+
+return events, nil
+}
+
+func (app *BaseApp)
+
+beginBlock(_ *abci.RequestFinalizeBlock) (sdk.BeginBlock, error) {
+ var (
+ resp sdk.BeginBlock
+ err error
+ )
+ if app.beginBlocker != nil {
+ resp, err = app.beginBlocker(app.finalizeBlockState.Context())
+ if err != nil {
+ return resp, err
+}
+
+ // append BeginBlock attributes to all events in the EndBlock response
+ for i, event := range resp.Events {
+ resp.Events[i].Attributes = append(
+ event.Attributes,
+ abci.EventAttribute{
+ Key: "mode",
+ Value: "BeginBlock"
+},
+ )
+}
+
+resp.Events = sdk.MarkEventsToIndex(resp.Events, app.indexEvents)
+}
+
+return resp, nil
+}
+
+func (app *BaseApp)
+
+deliverTx(tx []byte) *abci.ExecTxResult {
+ gInfo := sdk.GasInfo{
+}
+ resultStr := "successful"
+
+ var resp *abci.ExecTxResult
+
+ defer func() {
+ telemetry.IncrCounter(1, "tx", "count")
+
+telemetry.IncrCounter(1, "tx", resultStr)
+
+telemetry.SetGauge(float32(gInfo.GasUsed), "tx", "gas", "used")
+
+telemetry.SetGauge(float32(gInfo.GasWanted), "tx", "gas", "wanted")
+}()
+
+gInfo, result, anteEvents, err := app.runTx(execModeFinalize, tx, nil)
+ if err != nil {
+ resultStr = "failed"
+ resp = sdkerrors.ResponseExecTxResultWithEvents(
+ err,
+ gInfo.GasWanted,
+ gInfo.GasUsed,
+ sdk.MarkEventsToIndex(anteEvents, app.indexEvents),
+ app.trace,
+ )
+
+return resp
+}
+
+resp = &abci.ExecTxResult{
+ GasWanted: int64(gInfo.GasWanted),
+ GasUsed: int64(gInfo.GasUsed),
+ Log: result.Log,
+ Data: result.Data,
+ Events: sdk.MarkEventsToIndex(result.Events, app.indexEvents),
+}
+
+return resp
+}
+
+// endBlock is an application-defined function that is called after transactions
+// have been processed in FinalizeBlock.
+func (app *BaseApp)
+
+endBlock(_ context.Context) (sdk.EndBlock, error) {
+ var endblock sdk.EndBlock
+ if app.endBlocker != nil {
+ eb, err := app.endBlocker(app.finalizeBlockState.Context())
+ if err != nil {
+ return endblock, err
+}
+
+ // append EndBlock attributes to all events in the EndBlock response
+ for i, event := range eb.Events {
+ eb.Events[i].Attributes = append(
+ event.Attributes,
+ abci.EventAttribute{
+ Key: "mode",
+ Value: "EndBlock"
+},
+ )
+}
+
+eb.Events = sdk.MarkEventsToIndex(eb.Events, app.indexEvents)
+
+endblock = eb
+}
+
+return endblock, nil
+}
+
+// runTx processes a transaction within a given execution mode, encoded transaction
+// bytes, and the decoded transaction itself. All state transitions occur through
+// a cached Context depending on the mode provided. State only gets persisted
+// if all messages get executed successfully and the execution mode is DeliverTx.
+// Note, gas execution info is always returned. A reference to a Result is
+// returned if the tx does not run out of gas and if all the messages are valid
+// and execute successfully. An error is returned otherwise.
+// both txbytes and the decoded tx are passed to runTx to avoid the state machine encoding the tx and decoding the transaction twice
+// passing the decoded tx to runTX is optional, it will be decoded if the tx is nil
+func (app *BaseApp)
+
+runTx(mode execMode, txBytes []byte, tx sdk.Tx) (gInfo sdk.GasInfo, result *sdk.Result, anteEvents []abci.Event, err error) {
+ // NOTE: GasWanted should be returned by the AnteHandler. GasUsed is
+ // determined by the GasMeter. We need access to the context to get the gas
+ // meter, so we initialize upfront.
+ var gasWanted uint64
+ ctx := app.getContextForTx(mode, txBytes)
+ ms := ctx.MultiStore()
+
+ // only run the tx if there is block gas remaining
+ if mode == execModeFinalize && ctx.BlockGasMeter().IsOutOfGas() {
+ return gInfo, nil, nil, errorsmod.Wrap(sdkerrors.ErrOutOfGas, "no block gas left to run tx")
+}
+
+defer func() {
+ if r := recover(); r != nil {
+ recoveryMW := newOutOfGasRecoveryMiddleware(gasWanted, ctx, app.runTxRecoveryMiddleware)
+
+err, result = processRecovery(r, recoveryMW), nil
+ ctx.Logger().Error("panic recovered in runTx", "err", err)
+}
+
+gInfo = sdk.GasInfo{
+ GasWanted: gasWanted,
+ GasUsed: ctx.GasMeter().GasConsumed()
+}
+
+}()
+ blockGasConsumed := false
+
+ // consumeBlockGas makes sure block gas is consumed at most once. It must
+ // happen after tx processing, and must be executed even if tx processing
+ // fails. Hence, it's execution is deferred.
+ consumeBlockGas := func() {
+ if !blockGasConsumed {
+ blockGasConsumed = true
+ ctx.BlockGasMeter().ConsumeGas(
+ ctx.GasMeter().GasConsumedToLimit(), "block gas meter",
+ )
+}
+
+}
+
+ // If BlockGasMeter()
+
+panics it will be caught by the above recover and will
+ // return an error - in any case BlockGasMeter will consume gas past the limit.
+ //
+ // NOTE: consumeBlockGas must exist in a separate defer function from the
+ // general deferred recovery function to recover from consumeBlockGas as it'll
+ // be executed first (deferred statements are executed as stack).
+ if mode == execModeFinalize {
+ defer consumeBlockGas()
+}
+
+ // if the transaction is not decoded, decode it here
+ if tx == nil {
+ tx, err = app.txDecoder(txBytes)
+ if err != nil {
+ return sdk.GasInfo{
+ GasUsed: 0,
+ GasWanted: 0
+}, nil, nil, sdkerrors.ErrTxDecode.Wrap(err.Error())
+}
+
+}
+ msgs := tx.GetMsgs()
+ if err := validateBasicTxMsgs(msgs); err != nil {
+ return sdk.GasInfo{
+}, nil, nil, err
+}
+ for _, msg := range msgs {
+ handler := app.msgServiceRouter.Handler(msg)
+ if handler == nil {
+ return sdk.GasInfo{
+}, nil, nil, errorsmod.Wrapf(sdkerrors.ErrUnknownRequest, "no message handler found for %T", msg)
+}
+
+}
+ if app.anteHandler != nil {
+ var (
+ anteCtx sdk.Context
+ msCache storetypes.CacheMultiStore
+ )
+
+ // Branch context before AnteHandler call in case it aborts.
+ // This is required for both CheckTx and DeliverTx.
+ // Ref: https://github.com/cosmos/cosmos-sdk/issues/2772
+ //
+ // NOTE: Alternatively, we could require that AnteHandler ensures that
+ // writes do not happen if aborted/failed. This may have some
+ // performance benefits, but it'll be more difficult to get right.
+ anteCtx, msCache = app.cacheTxContext(ctx, txBytes)
+
+anteCtx = anteCtx.WithEventManager(sdk.NewEventManager())
+
+newCtx, err := app.anteHandler(anteCtx, tx, mode == execModeSimulate)
+ if !newCtx.IsZero() {
+ // At this point, newCtx.MultiStore()
+
+is a store branch, or something else
+ // replaced by the AnteHandler. We want the original multistore.
+ //
+ // Also, in the case of the tx aborting, we need to track gas consumed via
+ // the instantiated gas meter in the AnteHandler, so we update the context
+ // prior to returning.
+ ctx = newCtx.WithMultiStore(ms)
+}
+ events := ctx.EventManager().Events()
+
+ // GasMeter expected to be set in AnteHandler
+ gasWanted = ctx.GasMeter().Limit()
+ if err != nil {
+ if mode == execModeReCheck {
+ // if the ante handler fails on recheck, we want to remove the tx from the mempool
+ if mempoolErr := app.mempool.Remove(tx); mempoolErr != nil {
+ return gInfo, nil, anteEvents, errors.Join(err, mempoolErr)
+}
+
+}
+
+return gInfo, nil, nil, err
+}
+
+msCache.Write()
+
+anteEvents = events.ToABCIEvents()
+}
+ switch mode {
+ case execModeCheck:
+ err = app.mempool.Insert(ctx, tx)
+ if err != nil {
+ return gInfo, nil, anteEvents, err
+}
+ case execModeFinalize:
+ err = app.mempool.Remove(tx)
+ if err != nil && !errors.Is(err, mempool.ErrTxNotFound) {
+ return gInfo, nil, anteEvents,
+ fmt.Errorf("failed to remove tx from mempool: %w", err)
+}
+
+}
+
+ // Create a new Context based off of the existing Context with a MultiStore branch
+ // in case message processing fails. At this point, the MultiStore
+ // is a branch of a branch.
+ runMsgCtx, msCache := app.cacheTxContext(ctx, txBytes)
+
+ // Attempt to execute all messages and only update state if all messages pass
+ // and we're in DeliverTx. Note, runMsgs will never return a reference to a
+ // Result if any single message fails or does not have a registered Handler.
+ msgsV2, err := tx.GetMsgsV2()
+ if err == nil {
+ result, err = app.runMsgs(runMsgCtx, msgs, msgsV2, mode)
+}
+
+ // Run optional postHandlers (should run regardless of the execution result).
+ //
+ // Note: If the postHandler fails, we also revert the runMsgs state.
+ if app.postHandler != nil {
+ // The runMsgCtx context currently contains events emitted by the ante handler.
+ // We clear this to correctly order events without duplicates.
+ // Note that the state is still preserved.
+ postCtx := runMsgCtx.WithEventManager(sdk.NewEventManager())
+
+newCtx, errPostHandler := app.postHandler(postCtx, tx, mode == execModeSimulate, err == nil)
+ if errPostHandler != nil {
+ if err == nil {
+ // when the msg was handled successfully, return the post handler error only
+ return gInfo, nil, anteEvents, errPostHandler
+}
+ // otherwise append to the msg error so that we keep the original error code for better user experience
+ return gInfo, nil, anteEvents, errorsmod.Wrapf(err, "postHandler: %s", errPostHandler)
+}
+
+ // we don't want runTx to panic if runMsgs has failed earlier
+ if result == nil {
+ result = &sdk.Result{
+}
+
+}
+
+result.Events = append(result.Events, newCtx.EventManager().ABCIEvents()...)
+}
+ if err == nil {
+ if mode == execModeFinalize {
+ // When block gas exceeds, it'll panic and won't commit the cached store.
+ consumeBlockGas()
+
+msCache.Write()
+}
+ if len(anteEvents) > 0 && (mode == execModeFinalize || mode == execModeSimulate) {
+ // append the events in the order of occurrence
+ result.Events = append(anteEvents, result.Events...)
+}
+
+}
+
+return gInfo, result, anteEvents, err
+}
+
+// runMsgs iterates through a list of messages and executes them with the provided
+// Context and execution mode. Messages will only be executed during simulation
+// and DeliverTx. An error is returned if any single message fails or if a
+// Handler does not exist for a given message route. Otherwise, a reference to a
+// Result is returned. The caller must not commit state if an error is returned.
+func (app *BaseApp)
+
+runMsgs(ctx sdk.Context, msgs []sdk.Msg, msgsV2 []protov2.Message, mode execMode) (*sdk.Result, error) {
+ events := sdk.EmptyEvents()
+
+var msgResponses []*codectypes.Any
+
+ // NOTE: GasWanted is determined by the AnteHandler and GasUsed by the GasMeter.
+ for i, msg := range msgs {
+ if mode != execModeFinalize && mode != execModeSimulate {
+ break
+}
+ handler := app.msgServiceRouter.Handler(msg)
+ if handler == nil {
+ return nil, errorsmod.Wrapf(sdkerrors.ErrUnknownRequest, "no message handler found for %T", msg)
+}
+
+ // ADR 031 request type routing
+ msgResult, err := handler(ctx, msg)
+ if err != nil {
+ return nil, errorsmod.Wrapf(err, "failed to execute message; message index: %d", i)
+}
+
+ // create message events
+ msgEvents, err := createEvents(app.cdc, msgResult.GetEvents(), msg, msgsV2[i])
+ if err != nil {
+ return nil, errorsmod.Wrapf(err, "failed to create message events; message index: %d", i)
+}
+
+ // append message events and data
+ //
+ // Note: Each message result's data must be length-prefixed in order to
+ // separate each result.
+ for j, event := range msgEvents {
+ // append message index to all events
+ msgEvents[j] = event.AppendAttributes(sdk.NewAttribute("msg_index", strconv.Itoa(i)))
+}
+
+events = events.AppendEvents(msgEvents)
+
+ // Each individual sdk.Result that went through the MsgServiceRouter
+ // (which should represent 99% of the Msgs now, since everyone should
+ // be using protobuf Msgs)
+
+has exactly one Msg response, set inside
+ // `WrapServiceResult`. We take that Msg response, and aggregate it
+ // into an array.
+ if len(msgResult.MsgResponses) > 0 {
+ msgResponse := msgResult.MsgResponses[0]
+ if msgResponse == nil {
+ return nil, sdkerrors.ErrLogic.Wrapf("got nil Msg response at index %d for msg %s", i, sdk.MsgTypeURL(msg))
+}
+
+msgResponses = append(msgResponses, msgResponse)
+}
+
+
+}
+
+data, err := makeABCIData(msgResponses)
+ if err != nil {
+ return nil, errorsmod.Wrap(err, "failed to marshal tx data")
+}
+
+return &sdk.Result{
+ Data: data,
+ Events: events.ToABCIEvents(),
+ MsgResponses: msgResponses,
+}, nil
+}
+
+// makeABCIData generates the Data field to be sent to ABCI Check/DeliverTx.
+func makeABCIData(msgResponses []*codectypes.Any) ([]byte, error) {
+ return proto.Marshal(&sdk.TxMsgData{
+ MsgResponses: msgResponses
+})
+}
+
+func createEvents(cdc codec.Codec, events sdk.Events, msg sdk.Msg, msgV2 protov2.Message) (sdk.Events, error) {
+ eventMsgName := sdk.MsgTypeURL(msg)
+ msgEvent := sdk.NewEvent(sdk.EventTypeMessage, sdk.NewAttribute(sdk.AttributeKeyAction, eventMsgName))
+
+ // we set the signer attribute as the sender
+ signers, err := cdc.GetMsgV2Signers(msgV2)
+ if err != nil {
+ return nil, err
+}
+ if len(signers) > 0 && signers[0] != nil {
+ addrStr, err := cdc.InterfaceRegistry().SigningContext().AddressCodec().BytesToString(signers[0])
+ if err != nil {
+ return nil, err
+}
+
+msgEvent = msgEvent.AppendAttributes(sdk.NewAttribute(sdk.AttributeKeySender, addrStr))
+}
+
+ // verify that events have no module attribute set
+ if _, found := events.GetAttributes(sdk.AttributeKeyModule); !found {
+ if moduleName := sdk.GetModuleNameFromTypeURL(eventMsgName); moduleName != "" {
+ msgEvent = msgEvent.AppendAttributes(sdk.NewAttribute(sdk.AttributeKeyModule, moduleName))
+}
+
+}
+
+return sdk.Events{
+ msgEvent
+}.AppendEvents(events), nil
+}
+
+// PrepareProposalVerifyTx performs transaction verification when a proposer is
+// creating a block proposal during PrepareProposal. Any state committed to the
+// PrepareProposal state internally will be discarded. will be
+// returned if the transaction cannot be encoded. will be returned if
+// the transaction is valid, otherwise will be returned.
+func (app *BaseApp)
+
+PrepareProposalVerifyTx(tx sdk.Tx) ([]byte, error) {
+ bz, err := app.txEncoder(tx)
+ if err != nil {
+ return nil, err
+}
+
+ _, _, _, err = app.runTx(execModePrepareProposal, bz, tx)
+ if err != nil {
+ return nil, err
+}
+
+return bz, nil
+}
+
+// ProcessProposalVerifyTx performs transaction verification when receiving a
+// block proposal during ProcessProposal. Any state committed to the
+// ProcessProposal state internally will be discarded. will be
+// returned if the transaction cannot be decoded. will be returned if
+// the transaction is valid, otherwise will be returned.
+func (app *BaseApp)
+
+ProcessProposalVerifyTx(txBz []byte) (sdk.Tx, error) {
+ tx, err := app.txDecoder(txBz)
+ if err != nil {
+ return nil, err
+}
+
+ _, _, _, err = app.runTx(execModeProcessProposal, txBz, tx)
+ if err != nil {
+ return nil, err
+}
+
+return tx, nil
+}
+
+func (app *BaseApp)
+
+TxDecode(txBytes []byte) (sdk.Tx, error) {
+ return app.txDecoder(txBytes)
+}
+
+func (app *BaseApp)
+
+TxEncode(tx sdk.Tx) ([]byte, error) {
+ return app.txEncoder(tx)
+}
+
+func (app *BaseApp)
+
+StreamingManager()
+
+storetypes.StreamingManager {
+ return app.streamingManager
+}
+
+// Close is called in start cmd to gracefully cleanup resources.
+func (app *BaseApp)
+
+Close()
+
+error {
+ var errs []error
+
+ // Close app.db (opened by cosmos-sdk/server/start.go call to openDB)
+ if app.db != nil {
+ app.logger.Info("Closing application.db")
+ if err := app.db.Close(); err != nil {
+ errs = append(errs, err)
+}
+
+}
+
+ // Close app.snapshotManager
+ // - opened when app chains use cosmos-sdk/server/util.go/DefaultBaseappOptions (boilerplate)
+ // - which calls cosmos-sdk/server/util.go/GetSnapshotStore
+ // - which is passed to baseapp/options.go/SetSnapshot
+ // - to set app.snapshotManager = snapshots.NewManager
+ if app.snapshotManager != nil {
+ app.logger.Info("Closing snapshots/metadata.db")
+ if err := app.snapshotManager.Close(); err != nil {
+ errs = append(errs, err)
+}
+
+}
+
+return errors.Join(errs...)
+}
+
+// GetBaseApp returns the pointer to itself.
+func (app *BaseApp)
+
+GetBaseApp() *BaseApp {
+ return app
+}
+```
+
+Let us go through the most important components.
+
+> **Note**: Not all parameters are described, only the most important ones. Refer to the
+> type definition for the full list.
+
+First, the important parameters that are initialized during the bootstrapping of the application:
+
+* [`CommitMultiStore`](/sdk/v0.53/learn/advanced/store#commitmultistore): This is the main store of the application,
+ which holds the canonical state that is committed at the [end of each block](#commit). This store
+ is **not** cached, meaning it is not used to update the application's volatile (un-committed) states.
+ The `CommitMultiStore` is a multi-store, meaning a store of stores. Each module of the application
+ uses one or multiple `KVStores` in the multi-store to persist their subset of the state.
+* Database: The `db` is used by the `CommitMultiStore` to handle data persistence.
+* [`Msg` Service Router](#msg-service-router): The `msgServiceRouter` facilitates the routing of `sdk.Msg` requests to the appropriate
+ module `Msg` service for processing. Here a `sdk.Msg` refers to the transaction component that needs to be
+ processed by a service in order to update the application state, and not to ABCI message which implements
+ the interface between the application and the underlying consensus engine.
+* [gRPC Query Router](#grpc-query-router): The `grpcQueryRouter` facilitates the routing of gRPC queries to the
+ appropriate module for it to be processed. These queries are not ABCI messages themselves, but they
+ are relayed to the relevant module's gRPC `Query` service.
+* [`TxDecoder`](https://pkg.go.dev/github.com/cosmos/cosmos-sdk/types#TxDecoder): It is used to decode
+ raw transaction bytes relayed by the underlying CometBFT engine.
+* [`AnteHandler`](#antehandler): This handler is used to handle signature verification, fee payment,
+ and other pre-message execution checks when a transaction is received. It's executed during
+ [`CheckTx/RecheckTx`](#checktx) and [`FinalizeBlock`](#finalizeblock).
+* [`InitChainer`](/sdk/v0.53/learn/beginner/app-anatomy#initchainer), [`PreBlocker`](/sdk/v0.53/learn/beginner/app-anatomy#preblocker), [`BeginBlocker` and `EndBlocker`](/sdk/v0.53/learn/beginner/app-anatomy#beginblocker-and-endblocker): These are
+ the functions executed when the application receives the `InitChain` and `FinalizeBlock`
+ ABCI messages from the underlying CometBFT engine.
+
+Then, parameters used to define [volatile states](#state-updates) (i.e. cached states):
+
+* `checkState`: This state is updated during [`CheckTx`](#checktx), and reset on [`Commit`](#commit).
+* `finalizeBlockState`: This state is updated during [`FinalizeBlock`](#finalizeblock), and set to `nil` on
+ [`Commit`](#commit) and gets re-initialized on `FinalizeBlock`.
+* `processProposalState`: This state is updated during [`ProcessProposal`](#process-proposal).
+* `prepareProposalState`: This state is updated during [`PrepareProposal`](#prepare-proposal).
+
+Finally, a few more important parameters:
+
+* `voteInfos`: This parameter carries the list of validators whose precommit is missing, either
+ because they did not vote or because the proposer did not include their vote. This information is
+ carried by the [Context](/sdk/v0.53/learn/advanced/context) and can be used by the application for various things like
+ punishing absent validators.
+* `minGasPrices`: This parameter defines the minimum gas prices accepted by the node. This is a
+ **local** parameter, meaning each full-node can set a different `minGasPrices`. It is used in the
+ `AnteHandler` during [`CheckTx`](#checktx), mainly as a spam protection mechanism. The transaction
+ enters the [mempool](https://github.com/cometbft/cometbft/blob/v0.37.x/spec/abci/abci++_basic_concepts.md#mempool-methods)
+ only if the gas prices of the transaction are greater than one of the minimum gas price in
+ `minGasPrices` (e.g. if `minGasPrices == 1uatom,1photon`, the `gas-price` of the transaction must be
+ greater than `1uatom` OR `1photon`).
+* `appVersion`: Version of the application. It is set in the
+ [application's constructor function](/sdk/v0.53/learn/beginner/app-anatomy#constructor-function).
+
+## Constructor
+
+```go
+func NewBaseApp(
+ name string, logger log.Logger, db dbm.DB, txDecoder sdk.TxDecoder, options ...func(*BaseApp),
+) *BaseApp {
+
+ // ...
+}
+```
+
+The `BaseApp` constructor function is pretty straightforward. The only thing worth noting is the
+possibility to provide additional [`options`](https://github.com/cosmos/cosmos-sdk/blob/v0.53.0/baseapp/options.go)
+to the `BaseApp`, which will execute them in order. The `options` are generally `setter` functions
+for important parameters, like `SetPruning()` to set pruning options or `SetMinGasPrices()` to set
+the node's `min-gas-prices`.
+
+Naturally, developers can add additional `options` based on their application's needs.
+
+## State Updates
+
+The `BaseApp` maintains four primary volatile states and a root or main state. The main state
+is the canonical state of the application and the volatile states, `checkState`, `prepareProposalState`, `processProposalState` and `finalizeBlockState`
+are used to handle state transitions in-between the main state made during [`Commit`](#commit).
+
+Internally, there is only a single `CommitMultiStore` which we refer to as the main or root state.
+From this root state, we derive four volatile states by using a mechanism called *store branching* (performed by `CacheWrap` function).
+The types can be illustrated as follows:
+
+
+
+### InitChain State Updates
+
+During `InitChain`, the four volatile states, `checkState`, `prepareProposalState`, `processProposalState`
+and `finalizeBlockState` are set by branching the root `CommitMultiStore`. Any subsequent reads and writes happen
+on branched versions of the `CommitMultiStore`.
+To avoid unnecessary roundtrip to the main state, all reads to the branched store are cached.
+
+
+
+### CheckTx State Updates
+
+During `CheckTx`, the `checkState`, which is based off of the last committed state from the root
+store, is used for any reads and writes. Here we only execute the `AnteHandler` and verify a service router
+exists for every message in the transaction. Note, when we execute the `AnteHandler`, we branch
+the already branched `checkState`.
+This has the side effect that if the `AnteHandler` fails, the state transitions won't be reflected in the `checkState`
+\-- i.e. `checkState` is only updated on success.
+
+
+
+### PrepareProposal State Updates
+
+During `PrepareProposal`, the `prepareProposalState` is set by branching the root `CommitMultiStore`.
+The `prepareProposalState` is used for any reads and writes that occur during the `PrepareProposal` phase.
+The function uses the `Select()` method of the mempool to iterate over the transactions. `runTx` is then called,
+which encodes and validates each transaction and from there the `AnteHandler` is executed.
+If successful, valid transactions are returned inclusive of the events, tags, and data generated
+during the execution of the proposal.
+The described behavior is that of the default handler, applications have the flexibility to define their own
+[custom mempool handlers](/sdk/v0.53/build/building-apps/app-mempool#custom-mempool-handlers).
+
+
+
+### ProcessProposal State Updates
+
+During `ProcessProposal`, the `processProposalState` is set based off of the last committed state
+from the root store and is used to process a signed proposal received from a validator.
+In this state, `runTx` is called and the `AnteHandler` is executed and the context used in this state is built with information
+from the header and the main state, including the minimum gas prices, which are also set.
+Again we want to highlight that the described behavior is that of the default handler and applications have the flexibility to define their own
+[custom mempool handlers](/sdk/v0.53/build/building-apps/app-mempool#custom-mempool-handlers).
+
+
+
+### FinalizeBlock State Updates
+
+During `FinalizeBlock`, the `finalizeBlockState` is set for use during transaction execution and endblock. The
+`finalizeBlockState` is based off of the last committed state from the root store and is branched.
+Note, the `finalizeBlockState` is set to `nil` on [`Commit`](#commit).
+
+The state flow for transaction execution is nearly identical to `CheckTx` except state transitions occur on
+the `finalizeBlockState` and messages in a transaction are executed. Similarly to `CheckTx`, state transitions
+occur on a doubly branched state -- `finalizeBlockState`. Successful message execution results in
+writes being committed to `finalizeBlockState`. Note, if message execution fails, state transitions from
+the AnteHandler are persisted.
+
+### Commit State Updates
+
+During `Commit` all the state transitions that occurred in the `finalizeBlockState` are finally written to
+the root `CommitMultiStore` which in turn is committed to disk and results in a new application
+root hash. These state transitions are now considered final. Finally, the `checkState` is set to the
+newly committed state and `finalizeBlockState` is set to `nil` to be reset on `FinalizeBlock`.
+
+
+
+## ParamStore
+
+During `InitChain`, the `RequestInitChain` provides `ConsensusParams` which contains parameters
+related to block execution such as maximum gas and size in addition to evidence parameters. If these
+parameters are non-nil, they are set in the BaseApp's `ParamStore`. Behind the scenes, the `ParamStore`
+is managed by an `x/consensus_params` module. This allows the parameters to be tweaked via
+on-chain governance.
+
+## Service Routers
+
+When messages and queries are received by the application, they must be routed to the appropriate module in order to be processed. Routing is done via `BaseApp`, which holds a `msgServiceRouter` for messages, and a `grpcQueryRouter` for queries.
+
+### `Msg` Service Router
+
+[`sdk.Msg`s](/sdk/v0.53/build/building-modules/messages-and-queries#messages) need to be routed after they are extracted from transactions, which are sent from the underlying CometBFT engine via the [`CheckTx`](#checktx) and [`FinalizeBlock`](#finalizeblock) ABCI messages. To do so, `BaseApp` holds a `msgServiceRouter` which maps fully-qualified service methods (`string`, defined in each module's Protobuf `Msg` service) to the appropriate module's `MsgServer` implementation.
+
+The [default `msgServiceRouter` included in `BaseApp`](https://github.com/cosmos/cosmos-sdk/blob/v0.53.0/baseapp/msg_service_router.go) is stateless. However, some applications may want to make use of more stateful routing mechanisms such as allowing governance to disable certain routes or point them to new modules for upgrade purposes. For this reason, the `sdk.Context` is also passed into each [route handler inside `msgServiceRouter`](https://github.com/cosmos/cosmos-sdk/blob/v0.53.0/baseapp/msg_service_router.go#L35-L36). For a stateless router that doesn't want to make use of this, you can just ignore the `ctx`.
+
+The application's `msgServiceRouter` is initialized with all the routes using the application's [module manager](/sdk/v0.53/build/building-modules/module-manager#manager) (via the `RegisterServices` method), which itself is initialized with all the application's modules in the application's [constructor](/sdk/v0.53/learn/beginner/app-anatomy#constructor-function).
+
+### gRPC Query Router
+
+Similar to `sdk.Msg`s, [`queries`](/sdk/v0.53/build/building-modules/messages-and-queries#queries) need to be routed to the appropriate module's [`Query` service](/sdk/v0.53/build/building-modules/query-services). To do so, `BaseApp` holds a `grpcQueryRouter`, which maps modules' fully-qualified service methods (`string`, defined in their Protobuf `Query` gRPC) to their `QueryServer` implementation. The `grpcQueryRouter` is called during the initial stages of query processing, which can be either by directly sending a gRPC query to the gRPC endpoint, or via the [`Query` ABCI message](#query) on the CometBFT RPC endpoint.
+
+Just like the `msgServiceRouter`, the `grpcQueryRouter` is initialized with all the query routes using the application's [module manager](/sdk/v0.53/build/building-modules/module-manager) (via the `RegisterServices` method), which itself is initialized with all the application's modules in the application's [constructor](/sdk/v0.53/learn/beginner/app-anatomy#app-constructor).
+
+## Main ABCI 2.0 Messages
+
+The [Application-Blockchain Interface](https://github.com/cometbft/cometbft/blob/v0.37.x/spec/abci/abci++_basic_concepts.md) (ABCI) is a generic interface that connects a state-machine with a consensus engine to form a functional full-node. It can be wrapped in any language, and needs to be implemented by each application-specific blockchain built on top of an ABCI-compatible consensus engine like CometBFT.
+
+The consensus engine handles two main tasks:
+
+* The networking logic, which mainly consists in gossiping block parts, transactions and consensus votes.
+* The consensus logic, which results in the deterministic ordering of transactions in the form of blocks.
+
+It is **not** the role of the consensus engine to define the state or the validity of transactions. Generally, transactions are handled by the consensus engine in the form of `[]bytes`, and relayed to the application via the ABCI to be decoded and processed. At keys moments in the networking and consensus processes (e.g. beginning of a block, commit of a block, reception of an unconfirmed transaction, ...), the consensus engine emits ABCI messages for the state-machine to act on.
+
+Developers building on top of the Cosmos SDK need not implement the ABCI themselves, as `BaseApp` comes with a built-in implementation of the interface. Let us go through the main ABCI messages that `BaseApp` implements:
+
+* [`Prepare Proposal`](#prepare-proposal)
+* [`Process Proposal`](#process-proposal)
+* [`CheckTx`](#checktx)
+* [`FinalizeBlock`](#finalizeblock)
+* [`ExtendVote`](#extendvote)
+* [`VerifyVoteExtension`](#verifyvoteextension)
+
+### Prepare Proposal
+
+The `PrepareProposal` function is part of the new methods introduced in Application Blockchain Interface (ABCI++) in CometBFT and is an important part of the application's overall governance system. In the Cosmos SDK, it allows the application to have more fine-grained control over the transactions that are processed, and ensures that only valid transactions are committed to the blockchain.
+
+Here is how the `PrepareProposal` function can be implemented:
+
+1. Extract the `sdk.Msg`s from the transaction.
+2. Perform *stateful* checks by calling `Validate()` on each of the `sdk.Msg`'s. This is done after *stateless* checks as *stateful* checks are more computationally expensive. If `Validate()` fails, `PrepareProposal` returns before running further checks, which saves resources.
+3. Perform any additional checks that are specific to the application, such as checking account balances, or ensuring that certain conditions are met before a transaction is proposed.hey are processed by the consensus engine, if necessary.
+4. Return the updated transactions to be processed by the consensus engine
+
+Note that, unlike `CheckTx()`, `PrepareProposal` process `sdk.Msg`s, so it can directly update the state. However, unlike `FinalizeBlock()`, it does not commit the state updates. It's important to exercise caution when using `PrepareProposal` as incorrect coding could affect the overall liveness of the network.
+
+It's important to note that `PrepareProposal` complements the `ProcessProposal` method which is executed after this method. The combination of these two methods means that it is possible to guarantee that no invalid transactions are ever committed. Furthermore, such a setup can give rise to other interesting use cases such as Oracles, threshold decryption and more.
+
+`PrepareProposal` returns a response to the underlying consensus engine of type [`abci.ResponseCheckTx`](https://github.com/cometbft/cometbft/blob/v0.37.x/spec/abci/abci++_methods.md#processproposal). The response contains:
+
+* `Code (uint32)`: Response Code. `0` if successful.
+* `Data ([]byte)`: Result bytes, if any.
+* `Log (string):` The output of the application's logger. May be non-deterministic.
+* `Info (string):` Additional information. May be non-deterministic.
+
+### Process Proposal
+
+The `ProcessProposal` function is called by the BaseApp as part of the ABCI message flow, and is executed during the `FinalizeBlock` phase of the consensus process. The purpose of this function is to give more control to the application for block validation, allowing it to check all transactions in a proposed block before the validator sends the prevote for the block. It allows a validator to perform application-dependent work in a proposed block, enabling features such as immediate block execution, and allows the Application to reject invalid blocks.
+
+The `ProcessProposal` function performs several key tasks, including:
+
+1. Validating the proposed block by checking all transactions in it.
+2. Checking the proposed block against the current state of the application, to ensure that it is valid and that it can be executed.
+3. Updating the application's state based on the proposal, if it is valid and passes all checks.
+4. Returning a response to CometBFT indicating the result of the proposal processing.
+
+The `ProcessProposal` is an important part of the application's overall governance system. It is used to manage the network's parameters and other key aspects of its operation. It also ensures that the coherence property is adhered to i.e. all honest validators must accept a proposal by an honest proposer.
+
+It's important to note that `ProcessProposal` complements the `PrepareProposal` method which enables the application to have more fine-grained transaction control by allowing it to reorder, drop, delay, modify, and even add transactions as they see necessary. The combination of these two methods means that it is possible to guarantee that no invalid transactions are ever committed. Furthermore, such a setup can give rise to other interesting use cases such as Oracles, threshold decryption and more.
+
+CometBFT calls it when it receives a proposal and the CometBFT algorithm has not locked on a value. The Application cannot modify the proposal at this point but can reject it if it is invalid. If that is the case, CometBFT will prevote `nil` on the proposal, which has strong liveness implications for CometBFT. As a general rule, the Application SHOULD accept a prepared proposal passed via `ProcessProposal`, even if a part of the proposal is invalid (e.g., an invalid transaction); the Application can ignore the invalid part of the prepared proposal at block execution time.
+
+However, developers must exercise greater caution when using these methods. Incorrectly coding these methods could affect liveness as CometBFT is unable to receive 2/3 valid precommits to finalize a block.
+
+`ProcessProposal` returns a response to the underlying consensus engine of type [`abci.ResponseCheckTx`](https://github.com/cometbft/cometbft/blob/v0.37.x/spec/abci/abci++_methods.md#processproposal). The response contains:
+
+* `Code (uint32)`: Response Code. `0` if successful.
+* `Data ([]byte)`: Result bytes, if any.
+* `Log (string):` The output of the application's logger. May be non-deterministic.
+* `Info (string):` Additional information. May be non-deterministic.
+
+### CheckTx
+
+`CheckTx` is sent by the underlying consensus engine when a new unconfirmed (i.e. not yet included in a valid block)
+transaction is received by a full-node. The role of `CheckTx` is to guard the full-node's mempool
+(where unconfirmed transactions are stored until they are included in a block) from spam transactions.
+Unconfirmed transactions are relayed to peers only if they pass `CheckTx`.
+
+`CheckTx()` can perform both *stateful* and *stateless* checks, but developers should strive to
+make the checks **lightweight** because gas fees are not charged for the resources (CPU, data load...) used during the `CheckTx`.
+
+In the Cosmos SDK, after [decoding transactions](/sdk/v0.53/learn/advanced/encoding), `CheckTx()` is implemented
+to do the following checks:
+
+1. Extract the `sdk.Msg`s from the transaction.
+2. **Optionally** perform *stateless* checks by calling `ValidateBasic()` on each of the `sdk.Msg`s. This is done
+ first, as *stateless* checks are less computationally expensive than *stateful* checks. If
+ `ValidateBasic()` fail, `CheckTx` returns before running *stateful* checks, which saves resources.
+ This check is still performed for messages that have not yet migrated to the new message validation mechanism defined in [RFC 001](/sdk/v0.50/learn/advanced/baseapp) and still have a `ValidateBasic()` method.
+3. Perform non-module related *stateful* checks on the [account](/sdk/v0.53/learn/beginner/accounts). This step is mainly about checking
+ that the `sdk.Msg` signatures are valid, that enough fees are provided and that the sending account
+ has enough funds to pay for said fees. Note that no precise [`gas`](/sdk/v0.53/learn/beginner/gas-fees) counting occurs here,
+ as `sdk.Msg`s are not processed. Usually, the [`AnteHandler`](/sdk/v0.53/learn/beginner/gas-fees#antehandler) will check that the `gas` provided
+ with the transaction is superior to a minimum reference gas amount based on the raw transaction size,
+ in order to avoid spam with transactions that provide 0 gas.
+
+`CheckTx` does **not** process `sdk.Msg`s - they only need to be processed when the canonical state needs to be updated, which happens during `FinalizeBlock`.
+
+Steps 2. and 3. are performed by the [`AnteHandler`](/sdk/v0.53/learn/beginner/gas-fees#antehandler) in the [`RunTx()`](#runtx-antehandler-and-runmsgs)
+function, which `CheckTx()` calls with the `runTxModeCheck` mode. During each step of `CheckTx()`, a
+special [volatile state](#state-updates) called `checkState` is updated. This state is used to keep
+track of the temporary changes triggered by the `CheckTx()` calls of each transaction without modifying
+the [main canonical state](#main-state). For example, when a transaction goes through `CheckTx()`, the
+transaction's fees are deducted from the sender's account in `checkState`. If a second transaction is
+received from the same account before the first is processed, and the account has consumed all its
+funds in `checkState` during the first transaction, the second transaction will fail `CheckTx`() and
+be rejected. In any case, the sender's account will not actually pay the fees until the transaction
+is actually included in a block, because `checkState` never gets committed to the main state. The
+`checkState` is reset to the latest state of the main state each time a blocks gets [committed](#commit).
+
+`CheckTx` returns a response to the underlying consensus engine of type [`abci.ResponseCheckTx`](https://github.com/cometbft/cometbft/blob/v0.37.x/spec/abci/abci++_methods.md#checktx).
+The response contains:
+
+* `Code (uint32)`: Response Code. `0` if successful.
+* `Data ([]byte)`: Result bytes, if any.
+* `Log (string):` The output of the application's logger. May be non-deterministic.
+* `Info (string):` Additional information. May be non-deterministic.
+* `GasWanted (int64)`: Amount of gas requested for transaction. It is provided by users when they generate the transaction.
+* `GasUsed (int64)`: Amount of gas consumed by transaction. During `CheckTx`, this value is computed by multiplying the standard cost of a transaction byte by the size of the raw transaction. Next is an example:
+
+```go expandable
+package ante
+
+import (
+
+ "slices"
+ "time"
+
+ errorsmod "cosmossdk.io/errors"
+ storetypes "cosmossdk.io/store/types"
+ "github.com/cosmos/cosmos-sdk/codec/legacy"
+ "github.com/cosmos/cosmos-sdk/crypto/keys/multisig"
+ cryptotypes "github.com/cosmos/cosmos-sdk/crypto/types"
+ sdk "github.com/cosmos/cosmos-sdk/types"
+ sdkerrors "github.com/cosmos/cosmos-sdk/types/errors"
+ "github.com/cosmos/cosmos-sdk/types/tx/signing"
+ "github.com/cosmos/cosmos-sdk/x/auth/migrations/legacytx"
+ authsigning "github.com/cosmos/cosmos-sdk/x/auth/signing"
+)
+
+// ValidateBasicDecorator will call tx.ValidateBasic and return any non-nil error.
+// If ValidateBasic passes, decorator calls next AnteHandler in chain. Note,
+// ValidateBasicDecorator decorator will not get executed on ReCheckTx since it
+// is not dependent on application state.
+type ValidateBasicDecorator struct{
+}
+
+func NewValidateBasicDecorator()
+
+ValidateBasicDecorator {
+ return ValidateBasicDecorator{
+}
+}
+
+func (vbd ValidateBasicDecorator)
+
+AnteHandle(ctx sdk.Context, tx sdk.Tx, simulate bool, next sdk.AnteHandler) (sdk.Context, error) {
+ // no need to validate basic on recheck tx, call next antehandler
+ if ctx.IsReCheckTx() {
+ return next(ctx, tx, simulate)
+}
+ if validateBasic, ok := tx.(sdk.HasValidateBasic); ok {
+ if err := validateBasic.ValidateBasic(); err != nil {
+ return ctx, err
+}
+
+}
+
+return next(ctx, tx, simulate)
+}
+
+// ValidateMemoDecorator will validate memo given the parameters passed in
+// If memo is too large decorator returns with error, otherwise call next AnteHandler
+// CONTRACT: Tx must implement TxWithMemo interface
+type ValidateMemoDecorator struct {
+ ak AccountKeeper
+}
+
+func NewValidateMemoDecorator(ak AccountKeeper)
+
+ValidateMemoDecorator {
+ return ValidateMemoDecorator{
+ ak: ak,
+}
+}
+
+func (vmd ValidateMemoDecorator)
+
+AnteHandle(ctx sdk.Context, tx sdk.Tx, simulate bool, next sdk.AnteHandler) (sdk.Context, error) {
+ memoTx, ok := tx.(sdk.TxWithMemo)
+ if !ok {
+ return ctx, errorsmod.Wrap(sdkerrors.ErrTxDecode, "invalid transaction type")
+}
+ memoLength := len(memoTx.GetMemo())
+ if memoLength > 0 {
+ params := vmd.ak.GetParams(ctx)
+ if uint64(memoLength) > params.MaxMemoCharacters {
+ return ctx, errorsmod.Wrapf(sdkerrors.ErrMemoTooLarge,
+ "maximum number of characters is %d but received %d characters",
+ params.MaxMemoCharacters, memoLength,
+ )
+}
+
+}
+
+return next(ctx, tx, simulate)
+}
+
+// ConsumeTxSizeGasDecorator will take in parameters and consume gas proportional
+// to the size of tx before calling next AnteHandler. Note, the gas costs will be
+// slightly over estimated due to the fact that any given signing account may need
+// to be retrieved from state.
+//
+// CONTRACT: If simulate=true, then signatures must either be completely filled
+// in or empty.
+// CONTRACT: To use this decorator, signatures of transaction must be represented
+// as legacytx.StdSignature otherwise simulate mode will incorrectly estimate gas cost.
+type ConsumeTxSizeGasDecorator struct {
+ ak AccountKeeper
+}
+
+func NewConsumeGasForTxSizeDecorator(ak AccountKeeper)
+
+ConsumeTxSizeGasDecorator {
+ return ConsumeTxSizeGasDecorator{
+ ak: ak,
+}
+}
+
+func (cgts ConsumeTxSizeGasDecorator)
+
+AnteHandle(ctx sdk.Context, tx sdk.Tx, simulate bool, next sdk.AnteHandler) (sdk.Context, error) {
+ sigTx, ok := tx.(authsigning.SigVerifiableTx)
+ if !ok {
+ return ctx, errorsmod.Wrap(sdkerrors.ErrTxDecode, "invalid tx type")
+}
+ params := cgts.ak.GetParams(ctx)
+
+ctx.GasMeter().ConsumeGas(params.TxSizeCostPerByte*storetypes.Gas(len(ctx.TxBytes())), "txSize")
+
+ // simulate gas cost for signatures in simulate mode
+ if simulate {
+ // in simulate mode, each element should be a nil signature
+ sigs, err := sigTx.GetSignaturesV2()
+ if err != nil {
+ return ctx, err
+}
+ n := len(sigs)
+
+signers, err := sigTx.GetSigners()
+ if err != nil {
+ return sdk.Context{
+}, err
+}
+ for i, signer := range signers {
+ // if signature is already filled in, no need to simulate gas cost
+ if i < n && !isIncompleteSignature(sigs[i].Data) {
+ continue
+}
+
+var pubkey cryptotypes.PubKey
+ acc := cgts.ak.GetAccount(ctx, signer)
+
+ // use placeholder simSecp256k1Pubkey if sig is nil
+ if acc == nil || acc.GetPubKey() == nil {
+ pubkey = simSecp256k1Pubkey
+}
+
+else {
+ pubkey = acc.GetPubKey()
+}
+
+ // use stdsignature to mock the size of a full signature
+ simSig := legacytx.StdSignature{ //nolint:staticcheck // SA1019: legacytx.StdSignature is deprecated
+ Signature: simSecp256k1Sig[:],
+ PubKey: pubkey,
+}
+ sigBz := legacy.Cdc.MustMarshal(simSig)
+ cost := storetypes.Gas(len(sigBz) + 6)
+
+ // If the pubkey is a multi-signature pubkey, then we estimate for the maximum
+ // number of signers.
+ if _, ok := pubkey.(*multisig.LegacyAminoPubKey); ok {
+ cost *= params.TxSigLimit
+}
+
+ctx.GasMeter().ConsumeGas(params.TxSizeCostPerByte*cost, "txSize")
+}
+
+}
+
+return next(ctx, tx, simulate)
+}
+
+// isIncompleteSignature tests whether SignatureData is fully filled in for simulation purposes
+func isIncompleteSignature(data signing.SignatureData)
+
+bool {
+ if data == nil {
+ return true
+}
+ switch data := data.(type) {
+ case *signing.SingleSignatureData:
+ return len(data.Signature) == 0
+ case *signing.MultiSignatureData:
+ if len(data.Signatures) == 0 {
+ return true
+}
+ if slices.ContainsFunc(data.Signatures, isIncompleteSignature) {
+ return true
+}
+
+}
+
+return false
+}
+
+type (
+ // TxTimeoutHeightDecorator defines an AnteHandler decorator that checks for a
+ // tx height timeout.
+ TxTimeoutHeightDecorator struct{
+}
+
+ // TxWithTimeoutHeight defines the interface a tx must implement in order for
+ // TxHeightTimeoutDecorator to process the tx.
+ TxWithTimeoutHeight interface {
+ sdk.Tx
+
+ GetTimeoutHeight()
+
+uint64
+ GetTimeoutTimeStamp()
+
+time.Time
+}
+)
+
+// TxTimeoutHeightDecorator defines an AnteHandler decorator that checks for a
+// tx height timeout.
+func NewTxTimeoutHeightDecorator()
+
+TxTimeoutHeightDecorator {
+ return TxTimeoutHeightDecorator{
+}
+}
+
+// AnteHandle implements an AnteHandler decorator for the TxHeightTimeoutDecorator
+// type where the current block height is checked against the tx's height timeout.
+// If a height timeout is provided (non-zero)
+
+and is less than the current block
+// height, then an error is returned.
+func (txh TxTimeoutHeightDecorator)
+
+AnteHandle(ctx sdk.Context, tx sdk.Tx, simulate bool, next sdk.AnteHandler) (sdk.Context, error) {
+ timeoutTx, ok := tx.(TxWithTimeoutHeight)
+ if !ok {
+ return ctx, errorsmod.Wrap(sdkerrors.ErrTxDecode, "expected tx to implement TxWithTimeoutHeight")
+}
+ timeoutHeight := timeoutTx.GetTimeoutHeight()
+ if timeoutHeight > 0 && uint64(ctx.BlockHeight()) > timeoutHeight {
+ return ctx, errorsmod.Wrapf(
+ sdkerrors.ErrTxTimeoutHeight, "block height: %d, timeout height: %d", ctx.BlockHeight(), timeoutHeight,
+ )
+}
+ timeoutTimestamp := timeoutTx.GetTimeoutTimeStamp()
+ blockTime := ctx.BlockHeader().Time
+ if !timeoutTimestamp.IsZero() && timeoutTimestamp.Unix() != 0 && timeoutTimestamp.Before(blockTime) {
+ return ctx, errorsmod.Wrapf(
+ sdkerrors.ErrTxTimeout, "block time: %s, timeout timestamp: %s", blockTime, timeoutTimestamp.String(),
+ )
+}
+
+return next(ctx, tx, simulate)
+}
+```
+
+* `Events ([]cmn.KVPair)`: Key-Value tags for filtering and indexing transactions (eg. by account). See [`event`s](/sdk/v0.53/learn/advanced/events) for more.
+* `Codespace (string)`: Namespace for the Code.
+
+#### RecheckTx
+
+After `Commit`, `CheckTx` is run again on all transactions that remain in the node's local mempool
+excluding the transactions that are included in the block. To prevent the mempool from rechecking all transactions
+every time a block is committed, the configuration option `mempool.recheck=false` can be set. As of
+Tendermint v0.32.1, an additional `Type` parameter is made available to the `CheckTx` function that
+indicates whether an incoming transaction is new (`CheckTxType_New`), or a recheck (`CheckTxType_Recheck`).
+This allows certain checks like signature verification can be skipped during `CheckTxType_Recheck`.
+
+## RunTx, AnteHandler, RunMsgs, PostHandler
+
+### RunTx
+
+`RunTx` is called from `CheckTx`/`Finalizeblock` to handle the transaction, with `execModeCheck` or `execModeFinalize` as parameter to differentiate between the two modes of execution. Note that when `RunTx` receives a transaction, it has already been decoded.
+
+The first thing `RunTx` does upon being called is to retrieve the `context`'s `CacheMultiStore` by calling the `getContextForTx()` function with the appropriate mode (either `runTxModeCheck` or `execModeFinalize`). This `CacheMultiStore` is a branch of the main store, with cache functionality (for query requests), instantiated during `FinalizeBlock` for transaction execution and during the `Commit` of the previous block for `CheckTx`. After that, two `defer func()` are called for [`gas`](/sdk/v0.53/learn/beginner/gas-fees) management. They are executed when `runTx` returns and make sure `gas` is actually consumed, and will throw errors, if any.
+
+After that, `RunTx()` calls `ValidateBasic()`, when available and for backward compatibility, on each `sdk.Msg`in the `Tx`, which runs preliminary *stateless* validity checks. If any `sdk.Msg` fails to pass `ValidateBasic()`, `RunTx()` returns with an error.
+
+Then, the [`anteHandler`](#antehandler) of the application is run (if it exists). In preparation of this step, both the `checkState`/`finalizeBlockState`'s `context` and `context`'s `CacheMultiStore` are branched using the `cacheTxContext()` function.
+
+```go expandable
+package baseapp
+
+import (
+
+ "context"
+ "fmt"
+ "maps"
+ "math"
+ "slices"
+ "strconv"
+ "sync"
+ "github.com/cockroachdb/errors"
+ abci "github.com/cometbft/cometbft/abci/types"
+ "github.com/cometbft/cometbft/crypto/tmhash"
+ cmtproto "github.com/cometbft/cometbft/proto/tendermint/types"
+ dbm "github.com/cosmos/cosmos-db"
+ "github.com/cosmos/gogoproto/proto"
+ protov2 "google.golang.org/protobuf/proto"
+ "cosmossdk.io/core/header"
+ errorsmod "cosmossdk.io/errors"
+ "cosmossdk.io/log"
+ "cosmossdk.io/store"
+ storemetrics "cosmossdk.io/store/metrics"
+ "cosmossdk.io/store/snapshots"
+ storetypes "cosmossdk.io/store/types"
+ "github.com/cosmos/cosmos-sdk/baseapp/oe"
+ "github.com/cosmos/cosmos-sdk/codec"
+ codectypes "github.com/cosmos/cosmos-sdk/codec/types"
+ servertypes "github.com/cosmos/cosmos-sdk/server/types"
+ "github.com/cosmos/cosmos-sdk/telemetry"
+ sdk "github.com/cosmos/cosmos-sdk/types"
+ sdkerrors "github.com/cosmos/cosmos-sdk/types/errors"
+ "github.com/cosmos/cosmos-sdk/types/mempool"
+ "github.com/cosmos/cosmos-sdk/types/msgservice"
+)
+
+type (
+ execMode uint8
+
+ // StoreLoader defines a customizable function to control how we load the
+ // CommitMultiStore from disk. This is useful for state migration, when
+ // loading a datastore written with an older version of the software. In
+ // particular, if a module changed the substore key name (or removed a substore)
+ // between two versions of the software.
+ StoreLoader func(ms storetypes.CommitMultiStore)
+
+error
+)
+
+const (
+ execModeCheck execMode = iota // Check a transaction
+ execModeReCheck // Recheck a (pending)
+
+transaction after a commit
+ execModeSimulate // Simulate a transaction
+ execModePrepareProposal // Prepare a block proposal
+ execModeProcessProposal // Process a block proposal
+ execModeVoteExtension // Extend or verify a pre-commit vote
+ execModeVerifyVoteExtension // Verify a vote extension
+ execModeFinalize // Finalize a block proposal
+)
+
+var _ servertypes.ABCI = (*BaseApp)(nil)
+
+// BaseApp reflects the ABCI application implementation.
+type BaseApp struct {
+ // initialized on creation
+ mu sync.Mutex // mu protects the fields below.
+ logger log.Logger
+ name string // application name from abci.BlockInfo
+ db dbm.DB // common DB backend
+ cms storetypes.CommitMultiStore // Main (uncached)
+
+state
+ qms storetypes.MultiStore // Optional alternative multistore for querying only.
+ storeLoader StoreLoader // function to handle store loading, may be overridden with SetStoreLoader()
+
+grpcQueryRouter *GRPCQueryRouter // router for redirecting gRPC query calls
+ msgServiceRouter *MsgServiceRouter // router for redirecting Msg service messages
+ interfaceRegistry codectypes.InterfaceRegistry
+ txDecoder sdk.TxDecoder // unmarshal []byte into sdk.Tx
+ txEncoder sdk.TxEncoder // marshal sdk.Tx into []byte
+
+ mempool mempool.Mempool // application side mempool
+ anteHandler sdk.AnteHandler // ante handler for fee and auth
+ postHandler sdk.PostHandler // post handler, optional
+
+ checkTxHandler sdk.CheckTxHandler // ABCI CheckTx handler
+ initChainer sdk.InitChainer // ABCI InitChain handler
+ preBlocker sdk.PreBlocker // logic to run before BeginBlocker
+ beginBlocker sdk.BeginBlocker // (legacy ABCI)
+
+BeginBlock handler
+ endBlocker sdk.EndBlocker // (legacy ABCI)
+
+EndBlock handler
+ processProposal sdk.ProcessProposalHandler // ABCI ProcessProposal handler
+ prepareProposal sdk.PrepareProposalHandler // ABCI PrepareProposal
+ extendVote sdk.ExtendVoteHandler // ABCI ExtendVote handler
+ verifyVoteExt sdk.VerifyVoteExtensionHandler // ABCI VerifyVoteExtension handler
+ prepareCheckStater sdk.PrepareCheckStater // logic to run during commit using the checkState
+ precommiter sdk.Precommiter // logic to run during commit using the deliverState
+
+ addrPeerFilter sdk.PeerFilter // filter peers by address and port
+ idPeerFilter sdk.PeerFilter // filter peers by node ID
+ fauxMerkleMode bool // if true, IAVL MountStores uses MountStoresDB for simulation speed.
+ sigverifyTx bool // in the simulation test, since the account does not have a private key, we have to ignore the tx sigverify.
+
+ // manages snapshots, i.e. dumps of app state at certain intervals
+ snapshotManager *snapshots.Manager
+
+ // volatile states:
+ //
+ // - checkState is set on InitChain and reset on Commit
+ // - finalizeBlockState is set on InitChain and FinalizeBlock and set to nil
+ // on Commit.
+ //
+ // - checkState: Used for CheckTx, which is set based on the previous block's
+ // state. This state is never committed.
+ //
+ // - prepareProposalState: Used for PrepareProposal, which is set based on the
+ // previous block's state. This state is never committed. In case of multiple
+ // consensus rounds, the state is always reset to the previous block's state.
+ //
+ // - processProposalState: Used for ProcessProposal, which is set based on the
+ // the previous block's state. This state is never committed. In case of
+ // multiple rounds, the state is always reset to the previous block's state.
+ //
+ // - finalizeBlockState: Used for FinalizeBlock, which is set based on the
+ // previous block's state. This state is committed.
+ checkState *state
+ prepareProposalState *state
+ processProposalState *state
+ finalizeBlockState *state
+
+ // An inter-block write-through cache provided to the context during the ABCI
+ // FinalizeBlock call.
+ interBlockCache storetypes.MultiStorePersistentCache
+
+ // paramStore is used to query for ABCI consensus parameters from an
+ // application parameter store.
+ paramStore ParamStore
+
+ // queryGasLimit defines the maximum gas for queries; unbounded if 0.
+ queryGasLimit uint64
+
+ // The minimum gas prices a validator is willing to accept for processing a
+ // transaction. This is mainly used for DoS and spam prevention.
+ minGasPrices sdk.DecCoins
+
+ // initialHeight is the initial height at which we start the BaseApp
+ initialHeight int64
+
+ // flag for sealing options and parameters to a BaseApp
+ sealed bool
+
+ // block height at which to halt the chain and gracefully shutdown
+ haltHeight uint64
+
+ // minimum block time (in Unix seconds)
+
+at which to halt the chain and gracefully shutdown
+ haltTime uint64
+
+ // minRetainBlocks defines the minimum block height offset from the current
+ // block being committed, such that all blocks past this offset are pruned
+ // from CometBFT. It is used as part of the process of determining the
+ // ResponseCommit.RetainHeight value during ABCI Commit. A value of 0 indicates
+ // that no blocks should be pruned.
+ //
+ // Note: CometBFT block pruning is dependant on this parameter in conjunction
+ // with the unbonding (safety threshold)
+
+period, state pruning and state sync
+ // snapshot parameters to determine the correct minimum value of
+ // ResponseCommit.RetainHeight.
+ minRetainBlocks uint64
+
+ // application's version string
+ version string
+
+ // application's protocol version that increments on every upgrade
+ // if BaseApp is passed to the upgrade keeper's NewKeeper method.
+ appVersion uint64
+
+ // recovery handler for app.runTx method
+ runTxRecoveryMiddleware recoveryMiddleware
+
+ // trace set will return full stack traces for errors in ABCI Log field
+ trace bool
+
+ // indexEvents defines the set of events in the form {
+ eventType
+}.{
+ attributeKey
+},
+ // which informs CometBFT what to index. If empty, all events will be indexed.
+ indexEvents map[string]struct{
+}
+
+ // streamingManager for managing instances and configuration of ABCIListener services
+ streamingManager storetypes.StreamingManager
+
+ chainID string
+
+ cdc codec.Codec
+
+ // optimisticExec contains the context required for Optimistic Execution,
+ // including the goroutine handling.This is experimental and must be enabled
+ // by developers.
+ optimisticExec *oe.OptimisticExecution
+
+ // disableBlockGasMeter will disable the block gas meter if true, block gas meter is tricky to support
+ // when executing transactions in parallel.
+ // when disabled, the block gas meter in context is a noop one.
+ //
+ // SAFETY: it's safe to do if validators validate the total gas wanted in the `ProcessProposal`, which is the case in the default handler.
+ disableBlockGasMeter bool
+}
+
+// NewBaseApp returns a reference to an initialized BaseApp. It accepts a
+// variadic number of option functions, which act on the BaseApp to set
+// configuration choices.
+func NewBaseApp(
+ name string, logger log.Logger, db dbm.DB, txDecoder sdk.TxDecoder, options ...func(*BaseApp),
+) *BaseApp {
+ app := &BaseApp{
+ logger: logger.With(log.ModuleKey, "baseapp"),
+ name: name,
+ db: db,
+ cms: store.NewCommitMultiStore(db, logger, storemetrics.NewNoOpMetrics()), // by default we use a no-op metric gather in store
+ storeLoader: DefaultStoreLoader,
+ grpcQueryRouter: NewGRPCQueryRouter(),
+ msgServiceRouter: NewMsgServiceRouter(),
+ txDecoder: txDecoder,
+ fauxMerkleMode: false,
+ sigverifyTx: true,
+ queryGasLimit: math.MaxUint64,
+}
+ for _, option := range options {
+ option(app)
+}
+ if app.mempool == nil {
+ app.SetMempool(mempool.NoOpMempool{
+})
+}
+ abciProposalHandler := NewDefaultProposalHandler(app.mempool, app)
+ if app.prepareProposal == nil {
+ app.SetPrepareProposal(abciProposalHandler.PrepareProposalHandler())
+}
+ if app.processProposal == nil {
+ app.SetProcessProposal(abciProposalHandler.ProcessProposalHandler())
+}
+ if app.extendVote == nil {
+ app.SetExtendVoteHandler(NoOpExtendVote())
+}
+ if app.verifyVoteExt == nil {
+ app.SetVerifyVoteExtensionHandler(NoOpVerifyVoteExtensionHandler())
+}
+ if app.interBlockCache != nil {
+ app.cms.SetInterBlockCache(app.interBlockCache)
+}
+
+app.runTxRecoveryMiddleware = newDefaultRecoveryMiddleware()
+
+ // Initialize with an empty interface registry to avoid nil pointer dereference.
+ // Unless SetInterfaceRegistry is called with an interface registry with proper address codecs baseapp will panic.
+ app.cdc = codec.NewProtoCodec(codectypes.NewInterfaceRegistry())
+
+protoFiles, err := proto.MergedRegistry()
+ if err != nil {
+ logger.Warn("error creating merged proto registry", "error", err)
+}
+
+else {
+ err = msgservice.ValidateProtoAnnotations(protoFiles)
+ if err != nil {
+ // Once we switch to using protoreflect-based antehandlers, we might
+ // want to panic here instead of logging a warning.
+ logger.Warn("error validating merged proto registry annotations", "error", err)
+}
+
+}
+
+return app
+}
+
+// Name returns the name of the BaseApp.
+func (app *BaseApp)
+
+Name()
+
+string {
+ return app.name
+}
+
+// AppVersion returns the application's protocol version.
+func (app *BaseApp)
+
+AppVersion()
+
+uint64 {
+ return app.appVersion
+}
+
+// Version returns the application's version string.
+func (app *BaseApp)
+
+Version()
+
+string {
+ return app.version
+}
+
+// Logger returns the logger of the BaseApp.
+func (app *BaseApp)
+
+Logger()
+
+log.Logger {
+ return app.logger
+}
+
+// Trace returns the boolean value for logging error stack traces.
+func (app *BaseApp)
+
+Trace()
+
+bool {
+ return app.trace
+}
+
+// MsgServiceRouter returns the MsgServiceRouter of a BaseApp.
+func (app *BaseApp)
+
+MsgServiceRouter() *MsgServiceRouter {
+ return app.msgServiceRouter
+}
+
+// GRPCQueryRouter returns the GRPCQueryRouter of a BaseApp.
+func (app *BaseApp)
+
+GRPCQueryRouter() *GRPCQueryRouter {
+ return app.grpcQueryRouter
+}
+
+// MountStores mounts all IAVL or DB stores to the provided keys in the BaseApp
+// multistore.
+func (app *BaseApp)
+
+MountStores(keys ...storetypes.StoreKey) {
+ for _, key := range keys {
+ switch key.(type) {
+ case *storetypes.KVStoreKey:
+ if !app.fauxMerkleMode {
+ app.MountStore(key, storetypes.StoreTypeIAVL)
+}
+
+else {
+ // StoreTypeDB doesn't do anything upon commit, and it doesn't
+ // retain history, but it's useful for faster simulation.
+ app.MountStore(key, storetypes.StoreTypeDB)
+}
+ case *storetypes.TransientStoreKey:
+ app.MountStore(key, storetypes.StoreTypeTransient)
+ case *storetypes.MemoryStoreKey:
+ app.MountStore(key, storetypes.StoreTypeMemory)
+
+default:
+ panic(fmt.Sprintf("Unrecognized store key type :%T", key))
+}
+
+}
+}
+
+// MountKVStores mounts all IAVL or DB stores to the provided keys in the
+// BaseApp multistore.
+func (app *BaseApp)
+
+MountKVStores(keys map[string]*storetypes.KVStoreKey) {
+ for _, key := range keys {
+ if !app.fauxMerkleMode {
+ app.MountStore(key, storetypes.StoreTypeIAVL)
+}
+
+else {
+ // StoreTypeDB doesn't do anything upon commit, and it doesn't
+ // retain history, but it's useful for faster simulation.
+ app.MountStore(key, storetypes.StoreTypeDB)
+}
+
+}
+}
+
+// MountTransientStores mounts all transient stores to the provided keys in
+// the BaseApp multistore.
+func (app *BaseApp)
+
+MountTransientStores(keys map[string]*storetypes.TransientStoreKey) {
+ for _, key := range keys {
+ app.MountStore(key, storetypes.StoreTypeTransient)
+}
+}
+
+// MountMemoryStores mounts all in-memory KVStores with the BaseApp's internal
+// commit multi-store.
+func (app *BaseApp)
+
+MountMemoryStores(keys map[string]*storetypes.MemoryStoreKey) {
+ skeys := slices.Sorted(maps.Keys(keys))
+ for _, key := range skeys {
+ memKey := keys[key]
+ app.MountStore(memKey, storetypes.StoreTypeMemory)
+}
+}
+
+// MountStore mounts a store to the provided key in the BaseApp multistore,
+// using the default DB.
+func (app *BaseApp)
+
+MountStore(key storetypes.StoreKey, typ storetypes.StoreType) {
+ app.cms.MountStoreWithDB(key, typ, nil)
+}
+
+// LoadLatestVersion loads the latest application version. It will panic if
+// called more than once on a running BaseApp.
+func (app *BaseApp)
+
+LoadLatestVersion()
+
+error {
+ err := app.storeLoader(app.cms)
+ if err != nil {
+ return fmt.Errorf("failed to load latest version: %w", err)
+}
+
+return app.Init()
+}
+
+// DefaultStoreLoader will be used by default and loads the latest version
+func DefaultStoreLoader(ms storetypes.CommitMultiStore)
+
+error {
+ return ms.LoadLatestVersion()
+}
+
+// CommitMultiStore returns the root multi-store.
+// App constructor can use this to access the `cms`.
+// UNSAFE: must not be used during the abci life cycle.
+func (app *BaseApp)
+
+CommitMultiStore()
+
+storetypes.CommitMultiStore {
+ return app.cms
+}
+
+// SnapshotManager returns the snapshot manager.
+// application use this to register extra extension snapshotters.
+func (app *BaseApp)
+
+SnapshotManager() *snapshots.Manager {
+ return app.snapshotManager
+}
+
+// LoadVersion loads the BaseApp application version. It will panic if called
+// more than once on a running baseapp.
+func (app *BaseApp)
+
+LoadVersion(version int64)
+
+error {
+ app.logger.Info("NOTICE: this could take a long time to migrate IAVL store to fastnode if you enable Fast Node.\n")
+ err := app.cms.LoadVersion(version)
+ if err != nil {
+ return fmt.Errorf("failed to load version %d: %w", version, err)
+}
+
+return app.Init()
+}
+
+// LastCommitID returns the last CommitID of the multistore.
+func (app *BaseApp)
+
+LastCommitID()
+
+storetypes.CommitID {
+ return app.cms.LastCommitID()
+}
+
+// LastBlockHeight returns the last committed block height.
+func (app *BaseApp)
+
+LastBlockHeight()
+
+int64 {
+ return app.cms.LastCommitID().Version
+}
+
+// ChainID returns the chainID of the app.
+func (app *BaseApp)
+
+ChainID()
+
+string {
+ return app.chainID
+}
+
+// AnteHandler returns the AnteHandler of the app.
+func (app *BaseApp)
+
+AnteHandler()
+
+sdk.AnteHandler {
+ return app.anteHandler
+}
+
+// Mempool returns the Mempool of the app.
+func (app *BaseApp)
+
+Mempool()
+
+mempool.Mempool {
+ return app.mempool
+}
+
+// Init initializes the app. It seals the app, preventing any
+// further modifications. In addition, it validates the app against
+// the earlier provided settings. Returns an error if validation fails.
+// nil otherwise. Panics if the app is already sealed.
+func (app *BaseApp)
+
+Init()
+
+error {
+ if app.sealed {
+ panic("cannot call initFromMainStore: baseapp already sealed")
+}
+ if app.cms == nil {
+ return errors.New("commit multi-store must not be nil")
+}
+ emptyHeader := cmtproto.Header{
+ ChainID: app.chainID
+}
+
+ // needed for the export command which inits from store but never calls initchain
+ app.setState(execModeCheck, emptyHeader)
+
+app.Seal()
+
+return app.cms.GetPruning().Validate()
+}
+
+func (app *BaseApp)
+
+setMinGasPrices(gasPrices sdk.DecCoins) {
+ app.minGasPrices = gasPrices
+}
+
+func (app *BaseApp)
+
+setHaltHeight(haltHeight uint64) {
+ app.haltHeight = haltHeight
+}
+
+func (app *BaseApp)
+
+setHaltTime(haltTime uint64) {
+ app.haltTime = haltTime
+}
+
+func (app *BaseApp)
+
+setMinRetainBlocks(minRetainBlocks uint64) {
+ app.minRetainBlocks = minRetainBlocks
+}
+
+func (app *BaseApp)
+
+setInterBlockCache(cache storetypes.MultiStorePersistentCache) {
+ app.interBlockCache = cache
+}
+
+func (app *BaseApp)
+
+setTrace(trace bool) {
+ app.trace = trace
+}
+
+func (app *BaseApp)
+
+setIndexEvents(ie []string) {
+ app.indexEvents = make(map[string]struct{
+})
+ for _, e := range ie {
+ app.indexEvents[e] = struct{
+}{
+}
+
+}
+}
+
+// Seal seals a BaseApp. It prohibits any further modifications to a BaseApp.
+func (app *BaseApp)
+
+Seal() {
+ app.sealed = true
+}
+
+// IsSealed returns true if the BaseApp is sealed and false otherwise.
+func (app *BaseApp)
+
+IsSealed()
+
+bool {
+ return app.sealed
+}
+
+// setState sets the BaseApp's state for the corresponding mode with a branched
+// multi-store (i.e. a CacheMultiStore)
+
+and a new Context with the same
+// multi-store branch, and provided header.
+func (app *BaseApp)
+
+setState(mode execMode, h cmtproto.Header) {
+ ms := app.cms.CacheMultiStore()
+ headerInfo := header.Info{
+ Height: h.Height,
+ Time: h.Time,
+ ChainID: h.ChainID,
+ AppHash: h.AppHash,
+}
+ baseState := &state{
+ ms: ms,
+ ctx: sdk.NewContext(ms, h, false, app.logger).
+ WithStreamingManager(app.streamingManager).
+ WithHeaderInfo(headerInfo),
+}
+ switch mode {
+ case execModeCheck:
+ baseState.SetContext(baseState.Context().WithIsCheckTx(true).WithMinGasPrices(app.minGasPrices))
+
+app.checkState = baseState
+ case execModePrepareProposal:
+ app.prepareProposalState = baseState
+ case execModeProcessProposal:
+ app.processProposalState = baseState
+ case execModeFinalize:
+ app.finalizeBlockState = baseState
+
+ default:
+ panic(fmt.Sprintf("invalid runTxMode for setState: %d", mode))
+}
+}
+
+// SetCircuitBreaker sets the circuit breaker for the BaseApp.
+// The circuit breaker is checked on every message execution to verify if a transaction should be executed or not.
+func (app *BaseApp)
+
+SetCircuitBreaker(cb CircuitBreaker) {
+ if app.msgServiceRouter == nil {
+ panic("cannot set circuit breaker with no msg service router set")
+}
+
+app.msgServiceRouter.SetCircuit(cb)
+}
+
+// GetConsensusParams returns the current consensus parameters from the BaseApp's
+// ParamStore. If the BaseApp has no ParamStore defined, nil is returned.
+func (app *BaseApp)
+
+GetConsensusParams(ctx sdk.Context)
+
+cmtproto.ConsensusParams {
+ if app.paramStore == nil {
+ return cmtproto.ConsensusParams{
+}
+
+}
+
+cp, err := app.paramStore.Get(ctx)
+ if err != nil {
+ // This could happen while migrating from v0.45/v0.46 to v0.50, we should
+ // allow it to happen so during preblock the upgrade plan can be executed
+ // and the consensus params set for the first time in the new format.
+ app.logger.Error("failed to get consensus params", "err", err)
+
+return cmtproto.ConsensusParams{
+}
+
+}
+
+return cp
+}
+
+// StoreConsensusParams sets the consensus parameters to the BaseApp's param
+// store.
+//
+// NOTE: We're explicitly not storing the CometBFT app_version in the param store.
+// It's stored instead in the x/upgrade store, with its own bump logic.
+func (app *BaseApp)
+
+StoreConsensusParams(ctx sdk.Context, cp cmtproto.ConsensusParams)
+
+error {
+ if app.paramStore == nil {
+ return errors.New("cannot store consensus params with no params store set")
+}
+
+return app.paramStore.Set(ctx, cp)
+}
+
+// AddRunTxRecoveryHandler adds custom app.runTx method panic handlers.
+func (app *BaseApp)
+
+AddRunTxRecoveryHandler(handlers ...RecoveryHandler) {
+ for _, h := range handlers {
+ app.runTxRecoveryMiddleware = newRecoveryMiddleware(h, app.runTxRecoveryMiddleware)
+}
+}
+
+// GetMaximumBlockGas gets the maximum gas from the consensus params. It panics
+// if maximum block gas is less than negative one and returns zero if negative
+// one.
+func (app *BaseApp)
+
+GetMaximumBlockGas(ctx sdk.Context)
+
+uint64 {
+ cp := app.GetConsensusParams(ctx)
+ if cp.Block == nil {
+ return 0
+}
+ maxGas := cp.Block.MaxGas
+ switch {
+ case maxGas < -1:
+ panic(fmt.Sprintf("invalid maximum block gas: %d", maxGas))
+ case maxGas == -1:
+ return 0
+
+ default:
+ return uint64(maxGas)
+}
+}
+
+func (app *BaseApp)
+
+validateFinalizeBlockHeight(req *abci.RequestFinalizeBlock)
+
+error {
+ if req.Height < 1 {
+ return fmt.Errorf("invalid height: %d", req.Height)
+}
+ lastBlockHeight := app.LastBlockHeight()
+
+ // expectedHeight holds the expected height to validate
+ var expectedHeight int64
+ if lastBlockHeight == 0 && app.initialHeight > 1 {
+ // In this case, we're validating the first block of the chain, i.e no
+ // previous commit. The height we're expecting is the initial height.
+ expectedHeight = app.initialHeight
+}
+
+else {
+ // This case can mean two things:
+ //
+ // - Either there was already a previous commit in the store, in which
+ // case we increment the version from there.
+ // - Or there was no previous commit, in which case we start at version 1.
+ expectedHeight = lastBlockHeight + 1
+}
+ if req.Height != expectedHeight {
+ return fmt.Errorf("invalid height: %d; expected: %d", req.Height, expectedHeight)
+}
+
+return nil
+}
+
+// validateBasicTxMsgs executes basic validator calls for messages.
+func validateBasicTxMsgs(msgs []sdk.Msg)
+
+error {
+ if len(msgs) == 0 {
+ return errorsmod.Wrap(sdkerrors.ErrInvalidRequest, "must contain at least one message")
+}
+ for _, msg := range msgs {
+ m, ok := msg.(sdk.HasValidateBasic)
+ if !ok {
+ continue
+}
+ if err := m.ValidateBasic(); err != nil {
+ return err
+}
+
+}
+
+return nil
+}
+
+func (app *BaseApp)
+
+getState(mode execMode) *state {
+ switch mode {
+ case execModeFinalize:
+ return app.finalizeBlockState
+ case execModePrepareProposal:
+ return app.prepareProposalState
+ case execModeProcessProposal:
+ return app.processProposalState
+
+ default:
+ return app.checkState
+}
+}
+
+func (app *BaseApp)
+
+getBlockGasMeter(ctx sdk.Context)
+
+storetypes.GasMeter {
+ if app.disableBlockGasMeter {
+ return noopGasMeter{
+}
+
+}
+ if maxGas := app.GetMaximumBlockGas(ctx); maxGas > 0 {
+ return storetypes.NewGasMeter(maxGas)
+}
+
+return storetypes.NewInfiniteGasMeter()
+}
+
+// retrieve the context for the tx w/ txBytes and other memoized values.
+func (app *BaseApp)
+
+getContextForTx(mode execMode, txBytes []byte)
+
+sdk.Context {
+ app.mu.Lock()
+
+defer app.mu.Unlock()
+ modeState := app.getState(mode)
+ if modeState == nil {
+ panic(fmt.Sprintf("state is nil for mode %v", mode))
+}
+ ctx := modeState.Context().
+ WithTxBytes(txBytes).
+ WithGasMeter(storetypes.NewInfiniteGasMeter())
+ // WithVoteInfos(app.voteInfos) // TODO: identify if this is needed
+
+ ctx = ctx.WithIsSigverifyTx(app.sigverifyTx)
+
+ctx = ctx.WithConsensusParams(app.GetConsensusParams(ctx))
+ if mode == execModeReCheck {
+ ctx = ctx.WithIsReCheckTx(true)
+}
+ if mode == execModeSimulate {
+ ctx, _ = ctx.CacheContext()
+
+ctx = ctx.WithExecMode(sdk.ExecMode(execModeSimulate))
+}
+
+return ctx
+}
+
+// cacheTxContext returns a new context based off of the provided context with
+// a branched multi-store.
+func (app *BaseApp)
+
+cacheTxContext(ctx sdk.Context, txBytes []byte) (sdk.Context, storetypes.CacheMultiStore) {
+ ms := ctx.MultiStore()
+ msCache := ms.CacheMultiStore()
+ if msCache.TracingEnabled() {
+ msCache = msCache.SetTracingContext(
+ storetypes.TraceContext(
+ map[string]any{
+ "txHash": fmt.Sprintf("%X", tmhash.Sum(txBytes)),
+},
+ ),
+ ).(storetypes.CacheMultiStore)
+}
+
+return ctx.WithMultiStore(msCache), msCache
+}
+
+func (app *BaseApp)
+
+preBlock(req *abci.RequestFinalizeBlock) ([]abci.Event, error) {
+ var events []abci.Event
+ if app.preBlocker != nil {
+ ctx := app.finalizeBlockState.Context().WithEventManager(sdk.NewEventManager())
+
+rsp, err := app.preBlocker(ctx, req)
+ if err != nil {
+ return nil, err
+}
+ // rsp.ConsensusParamsChanged is true from preBlocker means ConsensusParams in store get changed
+ // write the consensus parameters in store to context
+ if rsp.ConsensusParamsChanged {
+ ctx = ctx.WithConsensusParams(app.GetConsensusParams(ctx))
+ // GasMeter must be set after we get a context with updated consensus params.
+ gasMeter := app.getBlockGasMeter(ctx)
+
+ctx = ctx.WithBlockGasMeter(gasMeter)
+
+app.finalizeBlockState.SetContext(ctx)
+}
+
+events = ctx.EventManager().ABCIEvents()
+}
+
+return events, nil
+}
+
+func (app *BaseApp)
+
+beginBlock(_ *abci.RequestFinalizeBlock) (sdk.BeginBlock, error) {
+ var (
+ resp sdk.BeginBlock
+ err error
+ )
+ if app.beginBlocker != nil {
+ resp, err = app.beginBlocker(app.finalizeBlockState.Context())
+ if err != nil {
+ return resp, err
+}
+
+ // append BeginBlock attributes to all events in the EndBlock response
+ for i, event := range resp.Events {
+ resp.Events[i].Attributes = append(
+ event.Attributes,
+ abci.EventAttribute{
+ Key: "mode",
+ Value: "BeginBlock"
+},
+ )
+}
+
+resp.Events = sdk.MarkEventsToIndex(resp.Events, app.indexEvents)
+}
+
+return resp, nil
+}
+
+func (app *BaseApp)
+
+deliverTx(tx []byte) *abci.ExecTxResult {
+ gInfo := sdk.GasInfo{
+}
+ resultStr := "successful"
+
+ var resp *abci.ExecTxResult
+
+ defer func() {
+ telemetry.IncrCounter(1, "tx", "count")
+
+telemetry.IncrCounter(1, "tx", resultStr)
+
+telemetry.SetGauge(float32(gInfo.GasUsed), "tx", "gas", "used")
+
+telemetry.SetGauge(float32(gInfo.GasWanted), "tx", "gas", "wanted")
+}()
+
+gInfo, result, anteEvents, err := app.runTx(execModeFinalize, tx, nil)
+ if err != nil {
+ resultStr = "failed"
+ resp = sdkerrors.ResponseExecTxResultWithEvents(
+ err,
+ gInfo.GasWanted,
+ gInfo.GasUsed,
+ sdk.MarkEventsToIndex(anteEvents, app.indexEvents),
+ app.trace,
+ )
+
+return resp
+}
+
+resp = &abci.ExecTxResult{
+ GasWanted: int64(gInfo.GasWanted),
+ GasUsed: int64(gInfo.GasUsed),
+ Log: result.Log,
+ Data: result.Data,
+ Events: sdk.MarkEventsToIndex(result.Events, app.indexEvents),
+}
+
+return resp
+}
+
+// endBlock is an application-defined function that is called after transactions
+// have been processed in FinalizeBlock.
+func (app *BaseApp)
+
+endBlock(_ context.Context) (sdk.EndBlock, error) {
+ var endblock sdk.EndBlock
+ if app.endBlocker != nil {
+ eb, err := app.endBlocker(app.finalizeBlockState.Context())
+ if err != nil {
+ return endblock, err
+}
+
+ // append EndBlock attributes to all events in the EndBlock response
+ for i, event := range eb.Events {
+ eb.Events[i].Attributes = append(
+ event.Attributes,
+ abci.EventAttribute{
+ Key: "mode",
+ Value: "EndBlock"
+},
+ )
+}
+
+eb.Events = sdk.MarkEventsToIndex(eb.Events, app.indexEvents)
+
+endblock = eb
+}
+
+return endblock, nil
+}
+
+// runTx processes a transaction within a given execution mode, encoded transaction
+// bytes, and the decoded transaction itself. All state transitions occur through
+// a cached Context depending on the mode provided. State only gets persisted
+// if all messages get executed successfully and the execution mode is DeliverTx.
+// Note, gas execution info is always returned. A reference to a Result is
+// returned if the tx does not run out of gas and if all the messages are valid
+// and execute successfully. An error is returned otherwise.
+// both txbytes and the decoded tx are passed to runTx to avoid the state machine encoding the tx and decoding the transaction twice
+// passing the decoded tx to runTX is optional, it will be decoded if the tx is nil
+func (app *BaseApp)
+
+runTx(mode execMode, txBytes []byte, tx sdk.Tx) (gInfo sdk.GasInfo, result *sdk.Result, anteEvents []abci.Event, err error) {
+ // NOTE: GasWanted should be returned by the AnteHandler. GasUsed is
+ // determined by the GasMeter. We need access to the context to get the gas
+ // meter, so we initialize upfront.
+ var gasWanted uint64
+ ctx := app.getContextForTx(mode, txBytes)
+ ms := ctx.MultiStore()
+
+ // only run the tx if there is block gas remaining
+ if mode == execModeFinalize && ctx.BlockGasMeter().IsOutOfGas() {
+ return gInfo, nil, nil, errorsmod.Wrap(sdkerrors.ErrOutOfGas, "no block gas left to run tx")
+}
+
+defer func() {
+ if r := recover(); r != nil {
+ recoveryMW := newOutOfGasRecoveryMiddleware(gasWanted, ctx, app.runTxRecoveryMiddleware)
+
+err, result = processRecovery(r, recoveryMW), nil
+ ctx.Logger().Error("panic recovered in runTx", "err", err)
+}
+
+gInfo = sdk.GasInfo{
+ GasWanted: gasWanted,
+ GasUsed: ctx.GasMeter().GasConsumed()
+}
+
+}()
+ blockGasConsumed := false
+
+ // consumeBlockGas makes sure block gas is consumed at most once. It must
+ // happen after tx processing, and must be executed even if tx processing
+ // fails. Hence, it's execution is deferred.
+ consumeBlockGas := func() {
+ if !blockGasConsumed {
+ blockGasConsumed = true
+ ctx.BlockGasMeter().ConsumeGas(
+ ctx.GasMeter().GasConsumedToLimit(), "block gas meter",
+ )
+}
+
+}
+
+ // If BlockGasMeter()
+
+panics it will be caught by the above recover and will
+ // return an error - in any case BlockGasMeter will consume gas past the limit.
+ //
+ // NOTE: consumeBlockGas must exist in a separate defer function from the
+ // general deferred recovery function to recover from consumeBlockGas as it'll
+ // be executed first (deferred statements are executed as stack).
+ if mode == execModeFinalize {
+ defer consumeBlockGas()
+}
+
+ // if the transaction is not decoded, decode it here
+ if tx == nil {
+ tx, err = app.txDecoder(txBytes)
+ if err != nil {
+ return sdk.GasInfo{
+ GasUsed: 0,
+ GasWanted: 0
+}, nil, nil, sdkerrors.ErrTxDecode.Wrap(err.Error())
+}
+
+}
+ msgs := tx.GetMsgs()
+ if err := validateBasicTxMsgs(msgs); err != nil {
+ return sdk.GasInfo{
+}, nil, nil, err
+}
+ for _, msg := range msgs {
+ handler := app.msgServiceRouter.Handler(msg)
+ if handler == nil {
+ return sdk.GasInfo{
+}, nil, nil, errorsmod.Wrapf(sdkerrors.ErrUnknownRequest, "no message handler found for %T", msg)
+}
+
+}
+ if app.anteHandler != nil {
+ var (
+ anteCtx sdk.Context
+ msCache storetypes.CacheMultiStore
+ )
+
+ // Branch context before AnteHandler call in case it aborts.
+ // This is required for both CheckTx and DeliverTx.
+ // Ref: https://github.com/cosmos/cosmos-sdk/issues/2772
+ //
+ // NOTE: Alternatively, we could require that AnteHandler ensures that
+ // writes do not happen if aborted/failed. This may have some
+ // performance benefits, but it'll be more difficult to get right.
+ anteCtx, msCache = app.cacheTxContext(ctx, txBytes)
+
+anteCtx = anteCtx.WithEventManager(sdk.NewEventManager())
+
+newCtx, err := app.anteHandler(anteCtx, tx, mode == execModeSimulate)
+ if !newCtx.IsZero() {
+ // At this point, newCtx.MultiStore()
+
+is a store branch, or something else
+ // replaced by the AnteHandler. We want the original multistore.
+ //
+ // Also, in the case of the tx aborting, we need to track gas consumed via
+ // the instantiated gas meter in the AnteHandler, so we update the context
+ // prior to returning.
+ ctx = newCtx.WithMultiStore(ms)
+}
+ events := ctx.EventManager().Events()
+
+ // GasMeter expected to be set in AnteHandler
+ gasWanted = ctx.GasMeter().Limit()
+ if err != nil {
+ if mode == execModeReCheck {
+ // if the ante handler fails on recheck, we want to remove the tx from the mempool
+ if mempoolErr := app.mempool.Remove(tx); mempoolErr != nil {
+ return gInfo, nil, anteEvents, errors.Join(err, mempoolErr)
+}
+
+}
+
+return gInfo, nil, nil, err
+}
+
+msCache.Write()
+
+anteEvents = events.ToABCIEvents()
+}
+ switch mode {
+ case execModeCheck:
+ err = app.mempool.Insert(ctx, tx)
+ if err != nil {
+ return gInfo, nil, anteEvents, err
+}
+ case execModeFinalize:
+ err = app.mempool.Remove(tx)
+ if err != nil && !errors.Is(err, mempool.ErrTxNotFound) {
+ return gInfo, nil, anteEvents,
+ fmt.Errorf("failed to remove tx from mempool: %w", err)
+}
+
+}
+
+ // Create a new Context based off of the existing Context with a MultiStore branch
+ // in case message processing fails. At this point, the MultiStore
+ // is a branch of a branch.
+ runMsgCtx, msCache := app.cacheTxContext(ctx, txBytes)
+
+ // Attempt to execute all messages and only update state if all messages pass
+ // and we're in DeliverTx. Note, runMsgs will never return a reference to a
+ // Result if any single message fails or does not have a registered Handler.
+ msgsV2, err := tx.GetMsgsV2()
+ if err == nil {
+ result, err = app.runMsgs(runMsgCtx, msgs, msgsV2, mode)
+}
+
+ // Run optional postHandlers (should run regardless of the execution result).
+ //
+ // Note: If the postHandler fails, we also revert the runMsgs state.
+ if app.postHandler != nil {
+ // The runMsgCtx context currently contains events emitted by the ante handler.
+ // We clear this to correctly order events without duplicates.
+ // Note that the state is still preserved.
+ postCtx := runMsgCtx.WithEventManager(sdk.NewEventManager())
+
+newCtx, errPostHandler := app.postHandler(postCtx, tx, mode == execModeSimulate, err == nil)
+ if errPostHandler != nil {
+ if err == nil {
+ // when the msg was handled successfully, return the post handler error only
+ return gInfo, nil, anteEvents, errPostHandler
+}
+ // otherwise append to the msg error so that we keep the original error code for better user experience
+ return gInfo, nil, anteEvents, errorsmod.Wrapf(err, "postHandler: %s", errPostHandler)
+}
+
+ // we don't want runTx to panic if runMsgs has failed earlier
+ if result == nil {
+ result = &sdk.Result{
+}
+
+}
+
+result.Events = append(result.Events, newCtx.EventManager().ABCIEvents()...)
+}
+ if err == nil {
+ if mode == execModeFinalize {
+ // When block gas exceeds, it'll panic and won't commit the cached store.
+ consumeBlockGas()
+
+msCache.Write()
+}
+ if len(anteEvents) > 0 && (mode == execModeFinalize || mode == execModeSimulate) {
+ // append the events in the order of occurrence
+ result.Events = append(anteEvents, result.Events...)
+}
+
+}
+
+return gInfo, result, anteEvents, err
+}
+
+// runMsgs iterates through a list of messages and executes them with the provided
+// Context and execution mode. Messages will only be executed during simulation
+// and DeliverTx. An error is returned if any single message fails or if a
+// Handler does not exist for a given message route. Otherwise, a reference to a
+// Result is returned. The caller must not commit state if an error is returned.
+func (app *BaseApp)
+
+runMsgs(ctx sdk.Context, msgs []sdk.Msg, msgsV2 []protov2.Message, mode execMode) (*sdk.Result, error) {
+ events := sdk.EmptyEvents()
+
+var msgResponses []*codectypes.Any
+
+ // NOTE: GasWanted is determined by the AnteHandler and GasUsed by the GasMeter.
+ for i, msg := range msgs {
+ if mode != execModeFinalize && mode != execModeSimulate {
+ break
+}
+ handler := app.msgServiceRouter.Handler(msg)
+ if handler == nil {
+ return nil, errorsmod.Wrapf(sdkerrors.ErrUnknownRequest, "no message handler found for %T", msg)
+}
+
+ // ADR 031 request type routing
+ msgResult, err := handler(ctx, msg)
+ if err != nil {
+ return nil, errorsmod.Wrapf(err, "failed to execute message; message index: %d", i)
+}
+
+ // create message events
+ msgEvents, err := createEvents(app.cdc, msgResult.GetEvents(), msg, msgsV2[i])
+ if err != nil {
+ return nil, errorsmod.Wrapf(err, "failed to create message events; message index: %d", i)
+}
+
+ // append message events and data
+ //
+ // Note: Each message result's data must be length-prefixed in order to
+ // separate each result.
+ for j, event := range msgEvents {
+ // append message index to all events
+ msgEvents[j] = event.AppendAttributes(sdk.NewAttribute("msg_index", strconv.Itoa(i)))
+}
+
+events = events.AppendEvents(msgEvents)
+
+ // Each individual sdk.Result that went through the MsgServiceRouter
+ // (which should represent 99% of the Msgs now, since everyone should
+ // be using protobuf Msgs)
+
+has exactly one Msg response, set inside
+ // `WrapServiceResult`. We take that Msg response, and aggregate it
+ // into an array.
+ if len(msgResult.MsgResponses) > 0 {
+ msgResponse := msgResult.MsgResponses[0]
+ if msgResponse == nil {
+ return nil, sdkerrors.ErrLogic.Wrapf("got nil Msg response at index %d for msg %s", i, sdk.MsgTypeURL(msg))
+}
+
+msgResponses = append(msgResponses, msgResponse)
+}
+
+
+}
+
+data, err := makeABCIData(msgResponses)
+ if err != nil {
+ return nil, errorsmod.Wrap(err, "failed to marshal tx data")
+}
+
+return &sdk.Result{
+ Data: data,
+ Events: events.ToABCIEvents(),
+ MsgResponses: msgResponses,
+}, nil
+}
+
+// makeABCIData generates the Data field to be sent to ABCI Check/DeliverTx.
+func makeABCIData(msgResponses []*codectypes.Any) ([]byte, error) {
+ return proto.Marshal(&sdk.TxMsgData{
+ MsgResponses: msgResponses
+})
+}
+
+func createEvents(cdc codec.Codec, events sdk.Events, msg sdk.Msg, msgV2 protov2.Message) (sdk.Events, error) {
+ eventMsgName := sdk.MsgTypeURL(msg)
+ msgEvent := sdk.NewEvent(sdk.EventTypeMessage, sdk.NewAttribute(sdk.AttributeKeyAction, eventMsgName))
+
+ // we set the signer attribute as the sender
+ signers, err := cdc.GetMsgV2Signers(msgV2)
+ if err != nil {
+ return nil, err
+}
+ if len(signers) > 0 && signers[0] != nil {
+ addrStr, err := cdc.InterfaceRegistry().SigningContext().AddressCodec().BytesToString(signers[0])
+ if err != nil {
+ return nil, err
+}
+
+msgEvent = msgEvent.AppendAttributes(sdk.NewAttribute(sdk.AttributeKeySender, addrStr))
+}
+
+ // verify that events have no module attribute set
+ if _, found := events.GetAttributes(sdk.AttributeKeyModule); !found {
+ if moduleName := sdk.GetModuleNameFromTypeURL(eventMsgName); moduleName != "" {
+ msgEvent = msgEvent.AppendAttributes(sdk.NewAttribute(sdk.AttributeKeyModule, moduleName))
+}
+
+}
+
+return sdk.Events{
+ msgEvent
+}.AppendEvents(events), nil
+}
+
+// PrepareProposalVerifyTx performs transaction verification when a proposer is
+// creating a block proposal during PrepareProposal. Any state committed to the
+// PrepareProposal state internally will be discarded. will be
+// returned if the transaction cannot be encoded. will be returned if
+// the transaction is valid, otherwise will be returned.
+func (app *BaseApp)
+
+PrepareProposalVerifyTx(tx sdk.Tx) ([]byte, error) {
+ bz, err := app.txEncoder(tx)
+ if err != nil {
+ return nil, err
+}
+
+ _, _, _, err = app.runTx(execModePrepareProposal, bz, tx)
+ if err != nil {
+ return nil, err
+}
+
+return bz, nil
+}
+
+// ProcessProposalVerifyTx performs transaction verification when receiving a
+// block proposal during ProcessProposal. Any state committed to the
+// ProcessProposal state internally will be discarded. will be
+// returned if the transaction cannot be decoded. will be returned if
+// the transaction is valid, otherwise will be returned.
+func (app *BaseApp)
+
+ProcessProposalVerifyTx(txBz []byte) (sdk.Tx, error) {
+ tx, err := app.txDecoder(txBz)
+ if err != nil {
+ return nil, err
+}
+
+ _, _, _, err = app.runTx(execModeProcessProposal, txBz, tx)
+ if err != nil {
+ return nil, err
+}
+
+return tx, nil
+}
+
+func (app *BaseApp)
+
+TxDecode(txBytes []byte) (sdk.Tx, error) {
+ return app.txDecoder(txBytes)
+}
+
+func (app *BaseApp)
+
+TxEncode(tx sdk.Tx) ([]byte, error) {
+ return app.txEncoder(tx)
+}
+
+func (app *BaseApp)
+
+StreamingManager()
+
+storetypes.StreamingManager {
+ return app.streamingManager
+}
+
+// Close is called in start cmd to gracefully cleanup resources.
+func (app *BaseApp)
+
+Close()
+
+error {
+ var errs []error
+
+ // Close app.db (opened by cosmos-sdk/server/start.go call to openDB)
+ if app.db != nil {
+ app.logger.Info("Closing application.db")
+ if err := app.db.Close(); err != nil {
+ errs = append(errs, err)
+}
+
+}
+
+ // Close app.snapshotManager
+ // - opened when app chains use cosmos-sdk/server/util.go/DefaultBaseappOptions (boilerplate)
+ // - which calls cosmos-sdk/server/util.go/GetSnapshotStore
+ // - which is passed to baseapp/options.go/SetSnapshot
+ // - to set app.snapshotManager = snapshots.NewManager
+ if app.snapshotManager != nil {
+ app.logger.Info("Closing snapshots/metadata.db")
+ if err := app.snapshotManager.Close(); err != nil {
+ errs = append(errs, err)
+}
+
+}
+
+return errors.Join(errs...)
+}
+
+// GetBaseApp returns the pointer to itself.
+func (app *BaseApp)
+
+GetBaseApp() *BaseApp {
+ return app
+}
+```
+
+This allows `RunTx` not to commit the changes made to the state during the execution of `anteHandler` if it ends up failing. It also prevents the module implementing the `anteHandler` from writing to state, which is an important part of the [object-capabilities](/sdk/v0.53/learn/advanced/ocap) of the Cosmos SDK.
+
+Finally, the [`RunMsgs()`](#runmsgs) function is called to process the `sdk.Msg`s in the `Tx`. In preparation of this step, just like with the `anteHandler`, both the `checkState`/`finalizeBlockState`'s `context` and `context`'s `CacheMultiStore` are branched using the `cacheTxContext()` function.
+
+### AnteHandler
+
+The `AnteHandler` is a special handler that implements the `AnteHandler` interface and is used to authenticate the transaction before the transaction's internal messages are processed.
+
+```go expandable
+package types
+
+// AnteHandler authenticates transactions, before their internal messages are handled.
+// If newCtx.IsZero(), ctx is used instead.
+type AnteHandler func(ctx Context, tx Tx, simulate bool) (newCtx Context, err error)
+
+// PostHandler like AnteHandler but it executes after RunMsgs. Runs on success
+// or failure and enables use cases like gas refunding.
+type PostHandler func(ctx Context, tx Tx, simulate, success bool) (newCtx Context, err error)
+
+// AnteDecorator wraps the next AnteHandler to perform custom pre-processing.
+type AnteDecorator interface {
+ AnteHandle(ctx Context, tx Tx, simulate bool, next AnteHandler) (newCtx Context, err error)
+}
+
+// PostDecorator wraps the next PostHandler to perform custom post-processing.
+type PostDecorator interface {
+ PostHandle(ctx Context, tx Tx, simulate, success bool, next PostHandler) (newCtx Context, err error)
+}
+
+// ChainAnteDecorators ChainDecorator chains AnteDecorators together with each AnteDecorator
+// wrapping over the decorators further along chain and returns a single AnteHandler.
+//
+// NOTE: The first element is outermost decorator, while the last element is innermost
+// decorator. Decorator ordering is critical since some decorators will expect
+// certain checks and updates to be performed (e.g. the Context)
+
+before the decorator
+// is run. These expectations should be documented clearly in a CONTRACT docline
+// in the decorator's godoc.
+//
+// NOTE: Any application that uses GasMeter to limit transaction processing cost
+// MUST set GasMeter with the FIRST AnteDecorator. Failing to do so will cause
+// transactions to be processed with an infinite gasmeter and open a DOS attack vector.
+// Use `ante.SetUpContextDecorator` or a custom Decorator with similar functionality.
+// Returns nil when no AnteDecorator are supplied.
+func ChainAnteDecorators(chain ...AnteDecorator)
+
+AnteHandler {
+ if len(chain) == 0 {
+ return nil
+}
+ handlerChain := make([]AnteHandler, len(chain)+1)
+ // set the terminal AnteHandler decorator
+ handlerChain[len(chain)] = func(ctx Context, tx Tx, simulate bool) (Context, error) {
+ return ctx, nil
+}
+ for i := range chain {
+ ii := i
+ handlerChain[ii] = func(ctx Context, tx Tx, simulate bool) (Context, error) {
+ return chain[ii].AnteHandle(ctx, tx, simulate, handlerChain[ii+1])
+}
+
+}
+
+return handlerChain[0]
+}
+
+// ChainPostDecorators chains PostDecorators together with each PostDecorator
+// wrapping over the decorators further along chain and returns a single PostHandler.
+//
+// NOTE: The first element is outermost decorator, while the last element is innermost
+// decorator. Decorator ordering is critical since some decorators will expect
+// certain checks and updates to be performed (e.g. the Context)
+
+before the decorator
+// is run. These expectations should be documented clearly in a CONTRACT docline
+// in the decorator's godoc.
+func ChainPostDecorators(chain ...PostDecorator)
+
+PostHandler {
+ if len(chain) == 0 {
+ return nil
+}
+ handlerChain := make([]PostHandler, len(chain)+1)
+ // set the terminal PostHandler decorator
+ handlerChain[len(chain)] = func(ctx Context, tx Tx, simulate, success bool) (Context, error) {
+ return ctx, nil
+}
+ for i := range chain {
+ ii := i
+ handlerChain[ii] = func(ctx Context, tx Tx, simulate, success bool) (Context, error) {
+ return chain[ii].PostHandle(ctx, tx, simulate, success, handlerChain[ii+1])
+}
+
+}
+
+return handlerChain[0]
+}
+
+// Terminator AnteDecorator will get added to the chain to simplify decorator code
+// Don't need to check if next == nil further up the chain
+//
+// ______
+// <((((((\\\
+// / .
+}\
+// ;--..--._|
+}
+// (\ '--/\--' )
+// \\ | '-' :'|
+// \\ . -==- .-|
+// \\ \.__.' \--._
+// [\\ __.--| // _/'--.
+// \ \\ .'-._ ('-----'/ __/ \
+// \ \\ / __>| | '--. |
+// \ \\ | \ | / / /
+// \ '\ / \ | | _/ /
+// \ \ \ | | / /
+// snd \ \ \ /
+//
+// Deprecated: Terminator is retired (ref https://github.com/cosmos/cosmos-sdk/pull/16076).
+type Terminator struct{
+}
+
+// AnteHandle returns the provided Context and nil error
+func (t Terminator)
+
+AnteHandle(ctx Context, _ Tx, _ bool, _ AnteHandler) (Context, error) {
+ return ctx, nil
+}
+
+// PostHandle returns the provided Context and nil error
+func (t Terminator)
+
+PostHandle(ctx Context, _ Tx, _, _ bool, _ PostHandler) (Context, error) {
+ return ctx, nil
+}
+```
+
+The `AnteHandler` is theoretically optional, but still a very important component of public blockchain networks. It serves 3 primary purposes:
+
+* Be a primary line of defense against spam and second line of defense (the first one being the mempool) against transaction replay with fees deduction and [`sequence`](/sdk/v0.53/learn/advanced/transactions#transaction-generation) checking.
+* Perform preliminary *stateful* validity checks like ensuring signatures are valid or that the sender has enough funds to pay for fees.
+* Play a role in the incentivisation of stakeholders via the collection of transaction fees.
+
+`BaseApp` holds an `anteHandler` as parameter that is initialized in the [application's constructor](/sdk/v0.53/learn/beginner/app-anatomy#application-constructor). The most widely used `anteHandler` is the [`auth` module](https://github.com/cosmos/cosmos-sdk/blob/v0.53.0/x/auth/ante/ante.go).
+
+Click [here](/sdk/v0.53/learn/beginner/gas-fees#antehandler) for more on the `anteHandler`.
+
+### RunMsgs
+
+`RunMsgs` is called from `RunTx` with `runTxModeCheck` as parameter to check the existence of a route for each message the transaction, and with `execModeFinalize` to actually process the `sdk.Msg`s.
+
+First, it retrieves the `sdk.Msg`'s fully-qualified type name, by checking the `type_url` of the Protobuf `Any` representing the `sdk.Msg`. Then, using the application's [`msgServiceRouter`](#msg-service-router), it checks for the existence of `Msg` service method related to that `type_url`. At this point, if `mode == runTxModeCheck`, `RunMsgs` returns. Otherwise, if `mode == execModeFinalize`, the [`Msg` service](/sdk/v0.53/build/building-modules/msg-services) RPC is executed, before `RunMsgs` returns.
+
+### PostHandler
+
+`PostHandler` is similar to `AnteHandler`, but it, as the name suggests, executes custom post tx processing logic after [`RunMsgs`](#runmsgs) is called. `PostHandler` receives the `Result` of the `RunMsgs` in order to enable this customizable behavior.
+
+Like `AnteHandler`s, `PostHandler`s are theoretically optional.
+
+Other use cases like unused gas refund can also be enabled by `PostHandler`s.
+
+```go expandable
+package posthandler
+
+import (
+
+ sdk "github.com/cosmos/cosmos-sdk/types"
+)
+
+// HandlerOptions are the options required for constructing a default SDK PostHandler.
+type HandlerOptions struct{
+}
+
+// NewPostHandler returns an empty PostHandler chain.
+func NewPostHandler(_ HandlerOptions) (sdk.PostHandler, error) {
+ postDecorators := []sdk.PostDecorator{
+}
+
+return sdk.ChainPostDecorators(postDecorators...), nil
+}
+```
+
+Note, when `PostHandler`s fail, the state from `runMsgs` is also reverted, effectively making the transaction fail.
+
+## Other ABCI Messages
+
+### InitChain
+
+The [`InitChain` ABCI message](https://github.com/cometbft/cometbft/blob/v0.37.x/spec/abci/abci++_basic_concepts.md#method-overview) is sent from the underlying CometBFT engine when the chain is first started. It is mainly used to **initialize** parameters and state like:
+
+* [Consensus Parameters](https://github.com/cometbft/cometbft/blob/v0.37.x/spec/abci/abci++_app_requirements.md#consensus-parameters) via `setConsensusParams`.
+* [`checkState` and `finalizeBlockState`](#state-updates) via `setState`.
+* The [block gas meter](/sdk/v0.53/learn/beginner/gas-fees#block-gas-meter), with infinite gas to process genesis transactions.
+
+Finally, the `InitChain(req abci.RequestInitChain)` method of `BaseApp` calls the [`initChainer()`](/sdk/v0.53/learn/beginner/app-anatomy#initchainer) of the application in order to initialize the main state of the application from the `genesis file` and, if defined, call the [`InitGenesis`](/sdk/v0.53/build/building-modules/genesis#initgenesis) function of each of the application's modules.
+
+### FinalizeBlock
+
+The [`FinalizeBlock` ABCI message](https://github.com/cometbft/cometbft/blob/v0.38.x/spec/abci/abci++_basic_concepts.md#method-overview) is sent from the underlying CometBFT engine when a block proposal created by the correct proposer is received. The previous `BeginBlock, DeliverTx and Endblock` calls are private methods on the BaseApp struct.
+
+```go expandable
+package baseapp
+
+import (
+
+ "context"
+ "fmt"
+ "sort"
+ "strings"
+ "time"
+ "github.com/cockroachdb/errors"
+ abci "github.com/cometbft/cometbft/abci/types"
+ cmtproto "github.com/cometbft/cometbft/proto/tendermint/types"
+ "github.com/cosmos/gogoproto/proto"
+ "google.golang.org/grpc/codes"
+ grpcstatus "google.golang.org/grpc/status"
+
+ coreheader "cosmossdk.io/core/header"
+ errorsmod "cosmossdk.io/errors"
+ "cosmossdk.io/store/rootmulti"
+ snapshottypes "cosmossdk.io/store/snapshots/types"
+ storetypes "cosmossdk.io/store/types"
+ "github.com/cosmos/cosmos-sdk/codec"
+ "github.com/cosmos/cosmos-sdk/telemetry"
+ sdk "github.com/cosmos/cosmos-sdk/types"
+ sdkerrors "github.com/cosmos/cosmos-sdk/types/errors"
+)
+
+// Supported ABCI Query prefixes and paths
+const (
+ QueryPathApp = "app"
+ QueryPathCustom = "custom"
+ QueryPathP2P = "p2p"
+ QueryPathStore = "store"
+
+ QueryPathBroadcastTx = "/cosmos.tx.v1beta1.Service/BroadcastTx"
+)
+
+func (app *BaseApp)
+
+InitChain(req *abci.RequestInitChain) (*abci.ResponseInitChain, error) {
+ if req.ChainId != app.chainID {
+ return nil, fmt.Errorf("invalid chain-id on InitChain; expected: %s, got: %s", app.chainID, req.ChainId)
+}
+
+ // On a new chain, we consider the init chain block height as 0, even though
+ // req.InitialHeight is 1 by default.
+ initHeader := cmtproto.Header{
+ ChainID: req.ChainId,
+ Time: req.Time
+}
+
+app.logger.Info("InitChain", "initialHeight", req.InitialHeight, "chainID", req.ChainId)
+
+ // Set the initial height, which will be used to determine if we are proposing
+ // or processing the first block or not.
+ app.initialHeight = req.InitialHeight
+ if app.initialHeight == 0 { // If initial height is 0, set it to 1
+ app.initialHeight = 1
+}
+
+ // if req.InitialHeight is > 1, then we set the initial version on all stores
+ if req.InitialHeight > 1 {
+ initHeader.Height = req.InitialHeight
+ if err := app.cms.SetInitialVersion(req.InitialHeight); err != nil {
+ return nil, err
+}
+
+}
+
+ // initialize states with a correct header
+ app.setState(execModeFinalize, initHeader)
+
+app.setState(execModeCheck, initHeader)
+
+ // Store the consensus params in the BaseApp's param store. Note, this must be
+ // done after the finalizeBlockState and context have been set as it's persisted
+ // to state.
+ if req.ConsensusParams != nil {
+ err := app.StoreConsensusParams(app.finalizeBlockState.Context(), *req.ConsensusParams)
+ if err != nil {
+ return nil, err
+}
+
+}
+
+defer func() {
+ // InitChain represents the state of the application BEFORE the first block,
+ // i.e. the genesis block. This means that when processing the app's InitChain
+ // handler, the block height is zero by default. However, after Commit is called
+ // the height needs to reflect the true block height.
+ initHeader.Height = req.InitialHeight
+ app.checkState.SetContext(app.checkState.Context().WithBlockHeader(initHeader).
+ WithHeaderInfo(coreheader.Info{
+ ChainID: req.ChainId,
+ Height: req.InitialHeight,
+ Time: req.Time,
+}))
+
+app.finalizeBlockState.SetContext(app.finalizeBlockState.Context().WithBlockHeader(initHeader).
+ WithHeaderInfo(coreheader.Info{
+ ChainID: req.ChainId,
+ Height: req.InitialHeight,
+ Time: req.Time,
+}))
+}()
+ if app.initChainer == nil {
+ return &abci.ResponseInitChain{
+}, nil
+}
+
+ // add block gas meter for any genesis transactions (allow infinite gas)
+
+app.finalizeBlockState.SetContext(app.finalizeBlockState.Context().WithBlockGasMeter(storetypes.NewInfiniteGasMeter()))
+
+res, err := app.initChainer(app.finalizeBlockState.Context(), req)
+ if err != nil {
+ return nil, err
+}
+ if len(req.Validators) > 0 {
+ if len(req.Validators) != len(res.Validators) {
+ return nil, fmt.Errorf(
+ "len(RequestInitChain.Validators) != len(GenesisValidators) (%d != %d)",
+ len(req.Validators), len(res.Validators),
+ )
+}
+
+sort.Sort(abci.ValidatorUpdates(req.Validators))
+
+sort.Sort(abci.ValidatorUpdates(res.Validators))
+ for i := range res.Validators {
+ if !proto.Equal(&res.Validators[i], &req.Validators[i]) {
+ return nil, fmt.Errorf("genesisValidators[%d] != req.Validators[%d] ", i, i)
+}
+
+}
+
+}
+
+ // NOTE: We don't commit, but FinalizeBlock for block InitialHeight starts from
+ // this FinalizeBlockState.
+ return &abci.ResponseInitChain{
+ ConsensusParams: res.ConsensusParams,
+ Validators: res.Validators,
+ AppHash: app.LastCommitID().Hash,
+}, nil
+}
+
+func (app *BaseApp)
+
+Info(_ *abci.RequestInfo) (*abci.ResponseInfo, error) {
+ lastCommitID := app.cms.LastCommitID()
+
+return &abci.ResponseInfo{
+ Data: app.name,
+ Version: app.version,
+ AppVersion: app.appVersion,
+ LastBlockHeight: lastCommitID.Version,
+ LastBlockAppHash: lastCommitID.Hash,
+}, nil
+}
+
+// Query implements the ABCI interface. It delegates to CommitMultiStore if it
+// implements Queryable.
+func (app *BaseApp)
+
+Query(_ context.Context, req *abci.RequestQuery) (resp *abci.ResponseQuery, err error) {
+ // add panic recovery for all queries
+ //
+ // Ref: https://github.com/cosmos/cosmos-sdk/pull/8039
+ defer func() {
+ if r := recover(); r != nil {
+ resp = sdkerrors.QueryResult(errorsmod.Wrapf(sdkerrors.ErrPanic, "%v", r), app.trace)
+}
+
+}()
+
+ // when a client did not provide a query height, manually inject the latest
+ if req.Height == 0 {
+ req.Height = app.LastBlockHeight()
+}
+
+telemetry.IncrCounter(1, "query", "count")
+
+telemetry.IncrCounter(1, "query", req.Path)
+
+defer telemetry.MeasureSince(telemetry.Now(), req.Path)
+ if req.Path == QueryPathBroadcastTx {
+ return sdkerrors.QueryResult(errorsmod.Wrap(sdkerrors.ErrInvalidRequest, "can't route a broadcast tx message"), app.trace), nil
+}
+
+ // handle gRPC routes first rather than calling splitPath because '/' characters
+ // are used as part of gRPC paths
+ if grpcHandler := app.grpcQueryRouter.Route(req.Path); grpcHandler != nil {
+ return app.handleQueryGRPC(grpcHandler, req), nil
+}
+ path := SplitABCIQueryPath(req.Path)
+ if len(path) == 0 {
+ return sdkerrors.QueryResult(errorsmod.Wrap(sdkerrors.ErrUnknownRequest, "no query path provided"), app.trace), nil
+}
+ switch path[0] {
+ case QueryPathApp:
+ // "/app" prefix for special application queries
+ resp = handleQueryApp(app, path, req)
+ case QueryPathStore:
+ resp = handleQueryStore(app, path, *req)
+ case QueryPathP2P:
+ resp = handleQueryP2P(app, path)
+
+default:
+ resp = sdkerrors.QueryResult(errorsmod.Wrap(sdkerrors.ErrUnknownRequest, "unknown query path"), app.trace)
+}
+
+return resp, nil
+}
+
+// ListSnapshots implements the ABCI interface. It delegates to app.snapshotManager if set.
+func (app *BaseApp)
+
+ListSnapshots(req *abci.RequestListSnapshots) (*abci.ResponseListSnapshots, error) {
+ resp := &abci.ResponseListSnapshots{
+ Snapshots: []*abci.Snapshot{
+}}
+ if app.snapshotManager == nil {
+ return resp, nil
+}
+
+snapshots, err := app.snapshotManager.List()
+ if err != nil {
+ app.logger.Error("failed to list snapshots", "err", err)
+
+return nil, err
+}
+ for _, snapshot := range snapshots {
+ abciSnapshot, err := snapshot.ToABCI()
+ if err != nil {
+ app.logger.Error("failed to convert ABCI snapshots", "err", err)
+
+return nil, err
+}
+
+resp.Snapshots = append(resp.Snapshots, &abciSnapshot)
+}
+
+return resp, nil
+}
+
+// LoadSnapshotChunk implements the ABCI interface. It delegates to app.snapshotManager if set.
+func (app *BaseApp)
+
+LoadSnapshotChunk(req *abci.RequestLoadSnapshotChunk) (*abci.ResponseLoadSnapshotChunk, error) {
+ if app.snapshotManager == nil {
+ return &abci.ResponseLoadSnapshotChunk{
+}, nil
+}
+
+chunk, err := app.snapshotManager.LoadChunk(req.Height, req.Format, req.Chunk)
+ if err != nil {
+ app.logger.Error(
+ "failed to load snapshot chunk",
+ "height", req.Height,
+ "format", req.Format,
+ "chunk", req.Chunk,
+ "err", err,
+ )
+
+return nil, err
+}
+
+return &abci.ResponseLoadSnapshotChunk{
+ Chunk: chunk
+}, nil
+}
+
+// OfferSnapshot implements the ABCI interface. It delegates to app.snapshotManager if set.
+func (app *BaseApp)
+
+OfferSnapshot(req *abci.RequestOfferSnapshot) (*abci.ResponseOfferSnapshot, error) {
+ if app.snapshotManager == nil {
+ app.logger.Error("snapshot manager not configured")
+
+return &abci.ResponseOfferSnapshot{
+ Result: abci.ResponseOfferSnapshot_ABORT
+}, nil
+}
+ if req.Snapshot == nil {
+ app.logger.Error("received nil snapshot")
+
+return &abci.ResponseOfferSnapshot{
+ Result: abci.ResponseOfferSnapshot_REJECT
+}, nil
+}
+
+snapshot, err := snapshottypes.SnapshotFromABCI(req.Snapshot)
+ if err != nil {
+ app.logger.Error("failed to decode snapshot metadata", "err", err)
+
+return &abci.ResponseOfferSnapshot{
+ Result: abci.ResponseOfferSnapshot_REJECT
+}, nil
+}
+
+err = app.snapshotManager.Restore(snapshot)
+ switch {
+ case err == nil:
+ return &abci.ResponseOfferSnapshot{
+ Result: abci.ResponseOfferSnapshot_ACCEPT
+}, nil
+ case errors.Is(err, snapshottypes.ErrUnknownFormat):
+ return &abci.ResponseOfferSnapshot{
+ Result: abci.ResponseOfferSnapshot_REJECT_FORMAT
+}, nil
+ case errors.Is(err, snapshottypes.ErrInvalidMetadata):
+ app.logger.Error(
+ "rejecting invalid snapshot",
+ "height", req.Snapshot.Height,
+ "format", req.Snapshot.Format,
+ "err", err,
+ )
+
+return &abci.ResponseOfferSnapshot{
+ Result: abci.ResponseOfferSnapshot_REJECT
+}, nil
+
+ default:
+ // CometBFT errors are defined here: https://github.com/cometbft/cometbft/blob/main/statesync/syncer.go
+ // It may happen that in case of a CometBFT error, such as a timeout (which occurs after two minutes),
+ // the process is aborted. This is done intentionally because deleting the database programmatically
+ // can lead to more complicated situations.
+ app.logger.Error(
+ "failed to restore snapshot",
+ "height", req.Snapshot.Height,
+ "format", req.Snapshot.Format,
+ "err", err,
+ )
+
+ // We currently don't support resetting the IAVL stores and retrying a
+ // different snapshot, so we ask CometBFT to abort all snapshot restoration.
+ return &abci.ResponseOfferSnapshot{
+ Result: abci.ResponseOfferSnapshot_ABORT
+}, nil
+}
+}
+
+// ApplySnapshotChunk implements the ABCI interface. It delegates to app.snapshotManager if set.
+func (app *BaseApp)
+
+ApplySnapshotChunk(req *abci.RequestApplySnapshotChunk) (*abci.ResponseApplySnapshotChunk, error) {
+ if app.snapshotManager == nil {
+ app.logger.Error("snapshot manager not configured")
+
+return &abci.ResponseApplySnapshotChunk{
+ Result: abci.ResponseApplySnapshotChunk_ABORT
+}, nil
+}
+
+ _, err := app.snapshotManager.RestoreChunk(req.Chunk)
+ switch {
+ case err == nil:
+ return &abci.ResponseApplySnapshotChunk{
+ Result: abci.ResponseApplySnapshotChunk_ACCEPT
+}, nil
+ case errors.Is(err, snapshottypes.ErrChunkHashMismatch):
+ app.logger.Error(
+ "chunk checksum mismatch; rejecting sender and requesting refetch",
+ "chunk", req.Index,
+ "sender", req.Sender,
+ "err", err,
+ )
+
+return &abci.ResponseApplySnapshotChunk{
+ Result: abci.ResponseApplySnapshotChunk_RETRY,
+ RefetchChunks: []uint32{
+ req.Index
+},
+ RejectSenders: []string{
+ req.Sender
+},
+}, nil
+
+ default:
+ app.logger.Error("failed to restore snapshot", "err", err)
+
+return &abci.ResponseApplySnapshotChunk{
+ Result: abci.ResponseApplySnapshotChunk_ABORT
+}, nil
+}
+}
+
+// CheckTx implements the ABCI interface and executes a tx in CheckTx mode. In
+// CheckTx mode, messages are not executed. This means messages are only validated
+// and only the AnteHandler is executed. State is persisted to the BaseApp's
+// internal CheckTx state if the AnteHandler passes. Otherwise, the ResponseCheckTx
+// will contain relevant error information. Regardless of tx execution outcome,
+// the ResponseCheckTx will contain relevant gas execution context.
+func (app *BaseApp)
+
+CheckTx(req *abci.RequestCheckTx) (*abci.ResponseCheckTx, error) {
+ var mode execMode
+ switch req.Type {
+ case abci.CheckTxType_New:
+ mode = execModeCheck
+ case abci.CheckTxType_Recheck:
+ mode = execModeReCheck
+
+ default:
+ return nil, fmt.Errorf("unknown RequestCheckTx type: %s", req.Type)
+}
+ if app.checkTxHandler == nil {
+ gInfo, result, anteEvents, err := app.runTx(mode, req.Tx, nil)
+ if err != nil {
+ return sdkerrors.ResponseCheckTxWithEvents(err, gInfo.GasWanted, gInfo.GasUsed, anteEvents, app.trace), nil
+}
+
+return &abci.ResponseCheckTx{
+ GasWanted: int64(gInfo.GasWanted), // TODO: Should type accept unsigned ints?
+ GasUsed: int64(gInfo.GasUsed), // TODO: Should type accept unsigned ints?
+ Log: result.Log,
+ Data: result.Data,
+ Events: sdk.MarkEventsToIndex(result.Events, app.indexEvents),
+}, nil
+}
+
+ // Create wrapper to avoid users overriding the execution mode
+ runTx := func(txBytes []byte, tx sdk.Tx) (gInfo sdk.GasInfo, result *sdk.Result, anteEvents []abci.Event, err error) {
+ return app.runTx(mode, txBytes, tx)
+}
+
+return app.checkTxHandler(runTx, req)
+}
+
+// PrepareProposal implements the PrepareProposal ABCI method and returns a
+// ResponsePrepareProposal object to the client. The PrepareProposal method is
+// responsible for allowing the block proposer to perform application-dependent
+// work in a block before proposing it.
+//
+// Transactions can be modified, removed, or added by the application. Since the
+// application maintains its own local mempool, it will ignore the transactions
+// provided to it in RequestPrepareProposal. Instead, it will determine which
+// transactions to return based on the mempool's semantics and the MaxTxBytes
+// provided by the client's request.
+//
+// Ref: https://github.com/cosmos/cosmos-sdk/blob/main/docs/architecture/adr-060-abci-1.0.md
+// Ref: https://github.com/cometbft/cometbft/blob/main/spec/abci/abci%2B%2B_basic_concepts.md
+func (app *BaseApp)
+
+PrepareProposal(req *abci.RequestPrepareProposal) (resp *abci.ResponsePrepareProposal, err error) {
+ if app.prepareProposal == nil {
+ return nil, errors.New("PrepareProposal handler not set")
+}
+
+ // Always reset state given that PrepareProposal can timeout and be called
+ // again in a subsequent round.
+ header := cmtproto.Header{
+ ChainID: app.chainID,
+ Height: req.Height,
+ Time: req.Time,
+ ProposerAddress: req.ProposerAddress,
+ NextValidatorsHash: req.NextValidatorsHash,
+ AppHash: app.LastCommitID().Hash,
+}
+
+app.setState(execModePrepareProposal, header)
+
+ // CometBFT must never call PrepareProposal with a height of 0.
+ //
+ // Ref: https://github.com/cometbft/cometbft/blob/059798a4f5b0c9f52aa8655fa619054a0154088c/spec/core/state.md?plain=1#L37-L38
+ if req.Height < 1 {
+ return nil, errors.New("PrepareProposal called with invalid height")
+}
+
+app.prepareProposalState.SetContext(app.getContextForProposal(app.prepareProposalState.Context(), req.Height).
+ WithVoteInfos(toVoteInfo(req.LocalLastCommit.Votes)). // this is a set of votes that are not finalized yet, wait for commit
+ WithBlockHeight(req.Height).
+ WithBlockTime(req.Time).
+ WithProposer(req.ProposerAddress).
+ WithExecMode(sdk.ExecModePrepareProposal).
+ WithCometInfo(prepareProposalInfo{
+ req
+}).
+ WithHeaderInfo(coreheader.Info{
+ ChainID: app.chainID,
+ Height: req.Height,
+ Time: req.Time,
+}))
+
+app.prepareProposalState.SetContext(app.prepareProposalState.Context().
+ WithConsensusParams(app.GetConsensusParams(app.prepareProposalState.Context())).
+ WithBlockGasMeter(app.getBlockGasMeter(app.prepareProposalState.Context())))
+
+defer func() {
+ if err := recover(); err != nil {
+ app.logger.Error(
+ "panic recovered in PrepareProposal",
+ "height", req.Height,
+ "time", req.Time,
+ "panic", err,
+ )
+
+resp = &abci.ResponsePrepareProposal{
+ Txs: req.Txs
+}
+
+}
+
+}()
+
+resp, err = app.prepareProposal(app.prepareProposalState.Context(), req)
+ if err != nil {
+ app.logger.Error("failed to prepare proposal", "height", req.Height, "time", req.Time, "err", err)
+
+return &abci.ResponsePrepareProposal{
+ Txs: req.Txs
+}, nil
+}
+
+return resp, nil
+}
+
+// ProcessProposal implements the ProcessProposal ABCI method and returns a
+// ResponseProcessProposal object to the client. The ProcessProposal method is
+// responsible for allowing execution of application-dependent work in a proposed
+// block. Note, the application defines the exact implementation details of
+// ProcessProposal. In general, the application must at the very least ensure
+// that all transactions are valid. If all transactions are valid, then we inform
+// CometBFT that the Status is ACCEPT. However, the application is also able
+// to implement optimizations such as executing the entire proposed block
+// immediately.
+//
+// If a panic is detected during execution of an application's ProcessProposal
+// handler, it will be recovered and we will reject the proposal.
+//
+// Ref: https://github.com/cosmos/cosmos-sdk/blob/main/docs/architecture/adr-060-abci-1.0.md
+// Ref: https://github.com/cometbft/cometbft/blob/main/spec/abci/abci%2B%2B_basic_concepts.md
+func (app *BaseApp)
+
+ProcessProposal(req *abci.RequestProcessProposal) (resp *abci.ResponseProcessProposal, err error) {
+ if app.processProposal == nil {
+ return nil, errors.New("ProcessProposal handler not set")
+}
+
+ // CometBFT must never call ProcessProposal with a height of 0.
+ // Ref: https://github.com/cometbft/cometbft/blob/059798a4f5b0c9f52aa8655fa619054a0154088c/spec/core/state.md?plain=1#L37-L38
+ if req.Height < 1 {
+ return nil, errors.New("ProcessProposal called with invalid height")
+}
+
+ // Always reset state given that ProcessProposal can timeout and be called
+ // again in a subsequent round.
+ header := cmtproto.Header{
+ ChainID: app.chainID,
+ Height: req.Height,
+ Time: req.Time,
+ ProposerAddress: req.ProposerAddress,
+ NextValidatorsHash: req.NextValidatorsHash,
+ AppHash: app.LastCommitID().Hash,
+}
+
+app.setState(execModeProcessProposal, header)
+
+ // Since the application can get access to FinalizeBlock state and write to it,
+ // we must be sure to reset it in case ProcessProposal timeouts and is called
+ // again in a subsequent round. However, we only want to do this after we've
+ // processed the first block, as we want to avoid overwriting the finalizeState
+ // after state changes during InitChain.
+ if req.Height > app.initialHeight {
+ // abort any running OE
+ app.optimisticExec.Abort()
+
+app.setState(execModeFinalize, header)
+}
+
+app.processProposalState.SetContext(app.getContextForProposal(app.processProposalState.Context(), req.Height).
+ WithVoteInfos(req.ProposedLastCommit.Votes). // this is a set of votes that are not finalized yet, wait for commit
+ WithBlockHeight(req.Height).
+ WithBlockTime(req.Time).
+ WithHeaderHash(req.Hash).
+ WithProposer(req.ProposerAddress).
+ WithCometInfo(cometInfo{
+ ProposerAddress: req.ProposerAddress,
+ ValidatorsHash: req.NextValidatorsHash,
+ Misbehavior: req.Misbehavior,
+ LastCommit: req.ProposedLastCommit
+}).
+ WithExecMode(sdk.ExecModeProcessProposal).
+ WithHeaderInfo(coreheader.Info{
+ ChainID: app.chainID,
+ Height: req.Height,
+ Time: req.Time,
+}))
+
+app.processProposalState.SetContext(app.processProposalState.Context().
+ WithConsensusParams(app.GetConsensusParams(app.processProposalState.Context())).
+ WithBlockGasMeter(app.getBlockGasMeter(app.processProposalState.Context())))
+
+defer func() {
+ if err := recover(); err != nil {
+ app.logger.Error(
+ "panic recovered in ProcessProposal",
+ "height", req.Height,
+ "time", req.Time,
+ "hash", fmt.Sprintf("%X", req.Hash),
+ "panic", err,
+ )
+
+resp = &abci.ResponseProcessProposal{
+ Status: abci.ResponseProcessProposal_REJECT
+}
+
+}
+
+}()
+
+resp, err = app.processProposal(app.processProposalState.Context(), req)
+ if err != nil {
+ app.logger.Error("failed to process proposal", "height", req.Height, "time", req.Time, "hash", fmt.Sprintf("%X", req.Hash), "err", err)
+
+return &abci.ResponseProcessProposal{
+ Status: abci.ResponseProcessProposal_REJECT
+}, nil
+}
+
+ // Only execute optimistic execution if the proposal is accepted, OE is
+ // enabled and the block height is greater than the initial height. During
+ // the first block we'll be carrying state from InitChain, so it would be
+ // impossible for us to easily revert.
+ // After the first block has been processed, the next blocks will get executed
+ // optimistically, so that when the ABCI client calls `FinalizeBlock` the app
+ // can have a response ready.
+ if resp.Status == abci.ResponseProcessProposal_ACCEPT &&
+ app.optimisticExec.Enabled() &&
+ req.Height > app.initialHeight {
+ app.optimisticExec.Execute(req)
+}
+
+return resp, nil
+}
+
+// ExtendVote implements the ExtendVote ABCI method and returns a ResponseExtendVote.
+// It calls the application's ExtendVote handler which is responsible for performing
+// application-specific business logic when sending a pre-commit for the NEXT
+// block height. The extensions response may be non-deterministic but must always
+// be returned, even if empty.
+//
+// Agreed upon vote extensions are made available to the proposer of the next
+// height and are committed in the subsequent height, i.e. H+2. An error is
+// returned if vote extensions are not enabled or if extendVote fails or panics.
+func (app *BaseApp)
+
+ExtendVote(_ context.Context, req *abci.RequestExtendVote) (resp *abci.ResponseExtendVote, err error) {
+ // Always reset state given that ExtendVote and VerifyVoteExtension can timeout
+ // and be called again in a subsequent round.
+ var ctx sdk.Context
+
+ // If we're extending the vote for the initial height, we need to use the
+ // finalizeBlockState context, otherwise we don't get the uncommitted data
+ // from InitChain.
+ if req.Height == app.initialHeight {
+ ctx, _ = app.finalizeBlockState.Context().CacheContext()
+}
+
+else {
+ emptyHeader := cmtproto.Header{
+ ChainID: app.chainID,
+ Height: req.Height
+}
+ ms := app.cms.CacheMultiStore()
+
+ctx = sdk.NewContext(ms, emptyHeader, false, app.logger).WithStreamingManager(app.streamingManager)
+}
+ if app.extendVote == nil {
+ return nil, errors.New("application ExtendVote handler not set")
+}
+
+ // If vote extensions are not enabled, as a safety precaution, we return an
+ // error.
+ cp := app.GetConsensusParams(ctx)
+
+ // Note: In this case, we do want to extend vote if the height is equal or
+ // greater than VoteExtensionsEnableHeight. This defers from the check done
+ // in ValidateVoteExtensions and PrepareProposal in which we'll check for
+ // vote extensions on VoteExtensionsEnableHeight+1.
+ extsEnabled := cp.Abci != nil && req.Height >= cp.Abci.VoteExtensionsEnableHeight && cp.Abci.VoteExtensionsEnableHeight != 0
+ if !extsEnabled {
+ return nil, fmt.Errorf("vote extensions are not enabled; unexpected call to ExtendVote at height %d", req.Height)
+}
+
+ctx = ctx.
+ WithConsensusParams(cp).
+ WithBlockGasMeter(storetypes.NewInfiniteGasMeter()).
+ WithBlockHeight(req.Height).
+ WithHeaderHash(req.Hash).
+ WithExecMode(sdk.ExecModeVoteExtension).
+ WithHeaderInfo(coreheader.Info{
+ ChainID: app.chainID,
+ Height: req.Height,
+ Hash: req.Hash,
+})
+
+ // add a deferred recover handler in case extendVote panics
+ defer func() {
+ if r := recover(); r != nil {
+ app.logger.Error(
+ "panic recovered in ExtendVote",
+ "height", req.Height,
+ "hash", fmt.Sprintf("%X", req.Hash),
+ "panic", err,
+ )
+
+err = fmt.Errorf("recovered application panic in ExtendVote: %v", r)
+}
+
+}()
+
+resp, err = app.extendVote(ctx, req)
+ if err != nil {
+ app.logger.Error("failed to extend vote", "height", req.Height, "hash", fmt.Sprintf("%X", req.Hash), "err", err)
+
+return &abci.ResponseExtendVote{
+ VoteExtension: []byte{
+}}, nil
+}
+
+return resp, err
+}
+
+// VerifyVoteExtension implements the VerifyVoteExtension ABCI method and returns
+// a ResponseVerifyVoteExtension. It calls the applications' VerifyVoteExtension
+// handler which is responsible for performing application-specific business
+// logic in verifying a vote extension from another validator during the pre-commit
+// phase. The response MUST be deterministic. An error is returned if vote
+// extensions are not enabled or if verifyVoteExt fails or panics.
+func (app *BaseApp)
+
+VerifyVoteExtension(req *abci.RequestVerifyVoteExtension) (resp *abci.ResponseVerifyVoteExtension, err error) {
+ if app.verifyVoteExt == nil {
+ return nil, errors.New("application VerifyVoteExtension handler not set")
+}
+
+var ctx sdk.Context
+
+ // If we're verifying the vote for the initial height, we need to use the
+ // finalizeBlockState context, otherwise we don't get the uncommitted data
+ // from InitChain.
+ if req.Height == app.initialHeight {
+ ctx, _ = app.finalizeBlockState.Context().CacheContext()
+}
+
+else {
+ emptyHeader := cmtproto.Header{
+ ChainID: app.chainID,
+ Height: req.Height
+}
+ ms := app.cms.CacheMultiStore()
+
+ctx = sdk.NewContext(ms, emptyHeader, false, app.logger).WithStreamingManager(app.streamingManager)
+}
+
+ // If vote extensions are not enabled, as a safety precaution, we return an
+ // error.
+ cp := app.GetConsensusParams(ctx)
+
+ // Note: we verify votes extensions on VoteExtensionsEnableHeight+1. Check
+ // comment in ExtendVote and ValidateVoteExtensions for more details.
+ extsEnabled := cp.Abci != nil && req.Height >= cp.Abci.VoteExtensionsEnableHeight && cp.Abci.VoteExtensionsEnableHeight != 0
+ if !extsEnabled {
+ return nil, fmt.Errorf("vote extensions are not enabled; unexpected call to VerifyVoteExtension at height %d", req.Height)
+}
+
+ // add a deferred recover handler in case verifyVoteExt panics
+ defer func() {
+ if r := recover(); r != nil {
+ app.logger.Error(
+ "panic recovered in VerifyVoteExtension",
+ "height", req.Height,
+ "hash", fmt.Sprintf("%X", req.Hash),
+ "validator", fmt.Sprintf("%X", req.ValidatorAddress),
+ "panic", r,
+ )
+
+err = fmt.Errorf("recovered application panic in VerifyVoteExtension: %v", r)
+}
+
+}()
+
+ctx = ctx.
+ WithConsensusParams(cp).
+ WithBlockGasMeter(storetypes.NewInfiniteGasMeter()).
+ WithBlockHeight(req.Height).
+ WithHeaderHash(req.Hash).
+ WithExecMode(sdk.ExecModeVerifyVoteExtension).
+ WithHeaderInfo(coreheader.Info{
+ ChainID: app.chainID,
+ Height: req.Height,
+ Hash: req.Hash,
+})
+
+resp, err = app.verifyVoteExt(ctx, req)
+ if err != nil {
+ app.logger.Error("failed to verify vote extension", "height", req.Height, "err", err)
+
+return &abci.ResponseVerifyVoteExtension{
+ Status: abci.ResponseVerifyVoteExtension_REJECT
+}, nil
+}
+
+return resp, err
+}
+
+// internalFinalizeBlock executes the block, called by the Optimistic
+// Execution flow or by the FinalizeBlock ABCI method. The context received is
+// only used to handle early cancellation, for anything related to state app.finalizeBlockState.Context()
+// must be used.
+func (app *BaseApp)
+
+internalFinalizeBlock(ctx context.Context, req *abci.RequestFinalizeBlock) (*abci.ResponseFinalizeBlock, error) {
+ var events []abci.Event
+ if err := app.checkHalt(req.Height, req.Time); err != nil {
+ return nil, err
+}
+ if err := app.validateFinalizeBlockHeight(req); err != nil {
+ return nil, err
+}
+ if app.cms.TracingEnabled() {
+ app.cms.SetTracingContext(storetypes.TraceContext(
+ map[string]any{"blockHeight": req.Height
+},
+ ))
+}
+ header := cmtproto.Header{
+ ChainID: app.chainID,
+ Height: req.Height,
+ Time: req.Time,
+ ProposerAddress: req.ProposerAddress,
+ NextValidatorsHash: req.NextValidatorsHash,
+ AppHash: app.LastCommitID().Hash,
+}
+
+ // finalizeBlockState should be set on InitChain or ProcessProposal. If it is
+ // nil, it means we are replaying this block and we need to set the state here
+ // given that during block replay ProcessProposal is not executed by CometBFT.
+ if app.finalizeBlockState == nil {
+ app.setState(execModeFinalize, header)
+}
+
+ // Context is now updated with Header information.
+ app.finalizeBlockState.SetContext(app.finalizeBlockState.Context().
+ WithBlockHeader(header).
+ WithHeaderHash(req.Hash).
+ WithHeaderInfo(coreheader.Info{
+ ChainID: app.chainID,
+ Height: req.Height,
+ Time: req.Time,
+ Hash: req.Hash,
+ AppHash: app.LastCommitID().Hash,
+}).
+ WithConsensusParams(app.GetConsensusParams(app.finalizeBlockState.Context())).
+ WithVoteInfos(req.DecidedLastCommit.Votes).
+ WithExecMode(sdk.ExecModeFinalize).
+ WithCometInfo(cometInfo{
+ Misbehavior: req.Misbehavior,
+ ValidatorsHash: req.NextValidatorsHash,
+ ProposerAddress: req.ProposerAddress,
+ LastCommit: req.DecidedLastCommit,
+}))
+
+ // GasMeter must be set after we get a context with updated consensus params.
+ gasMeter := app.getBlockGasMeter(app.finalizeBlockState.Context())
+
+app.finalizeBlockState.SetContext(app.finalizeBlockState.Context().WithBlockGasMeter(gasMeter))
+ if app.checkState != nil {
+ app.checkState.SetContext(app.checkState.Context().
+ WithBlockGasMeter(gasMeter).
+ WithHeaderHash(req.Hash))
+}
+
+preblockEvents, err := app.preBlock(req)
+ if err != nil {
+ return nil, err
+}
+
+events = append(events, preblockEvents...)
+
+beginBlock, err := app.beginBlock(req)
+ if err != nil {
+ return nil, err
+}
+
+ // First check for an abort signal after beginBlock, as it's the first place
+ // we spend any significant amount of time.
+ select {
+ case <-ctx.Done():
+ return nil, ctx.Err()
+
+default:
+ // continue
+}
+
+events = append(events, beginBlock.Events...)
+
+ // Reset the gas meter so that the AnteHandlers aren't required to
+ gasMeter = app.getBlockGasMeter(app.finalizeBlockState.Context())
+
+app.finalizeBlockState.SetContext(app.finalizeBlockState.Context().WithBlockGasMeter(gasMeter))
+
+ // Iterate over all raw transactions in the proposal and attempt to execute
+ // them, gathering the execution results.
+ //
+ // NOTE: Not all raw transactions may adhere to the sdk.Tx interface, e.g.
+ // vote extensions, so skip those.
+ txResults := make([]*abci.ExecTxResult, 0, len(req.Txs))
+ for _, rawTx := range req.Txs {
+ var response *abci.ExecTxResult
+ if _, err := app.txDecoder(rawTx); err == nil {
+ response = app.deliverTx(rawTx)
+}
+
+else {
+ // In the case where a transaction included in a block proposal is malformed,
+ // we still want to return a default response to comet. This is because comet
+ // expects a response for each transaction included in a block proposal.
+ response = sdkerrors.ResponseExecTxResultWithEvents(
+ sdkerrors.ErrTxDecode,
+ 0,
+ 0,
+ nil,
+ false,
+ )
+}
+
+ // check after every tx if we should abort
+ select {
+ case <-ctx.Done():
+ return nil, ctx.Err()
+
+default:
+ // continue
+}
+
+txResults = append(txResults, response)
+}
+ if app.finalizeBlockState.ms.TracingEnabled() {
+ app.finalizeBlockState.ms = app.finalizeBlockState.ms.SetTracingContext(nil).(storetypes.CacheMultiStore)
+}
+
+endBlock, err := app.endBlock(app.finalizeBlockState.Context())
+ if err != nil {
+ return nil, err
+}
+
+ // check after endBlock if we should abort, to avoid propagating the result
+ select {
+ case <-ctx.Done():
+ return nil, ctx.Err()
+
+default:
+ // continue
+}
+
+events = append(events, endBlock.Events...)
+ cp := app.GetConsensusParams(app.finalizeBlockState.Context())
+
+return &abci.ResponseFinalizeBlock{
+ Events: events,
+ TxResults: txResults,
+ ValidatorUpdates: endBlock.ValidatorUpdates,
+ ConsensusParamUpdates: &cp,
+}, nil
+}
+
+// FinalizeBlock will execute the block proposal provided by RequestFinalizeBlock.
+// Specifically, it will execute an application's BeginBlock (if defined), followed
+// by the transactions in the proposal, finally followed by the application's
+// EndBlock (if defined).
+//
+// For each raw transaction, i.e. a byte slice, BaseApp will only execute it if
+// it adheres to the sdk.Tx interface. Otherwise, the raw transaction will be
+// skipped. This is to support compatibility with proposers injecting vote
+// extensions into the proposal, which should not themselves be executed in cases
+// where they adhere to the sdk.Tx interface.
+func (app *BaseApp)
+
+FinalizeBlock(req *abci.RequestFinalizeBlock) (res *abci.ResponseFinalizeBlock, err error) {
+ defer func() {
+ if res == nil {
+ return
+}
+ // call the streaming service hooks with the FinalizeBlock messages
+ for _, streamingListener := range app.streamingManager.ABCIListeners {
+ if err := streamingListener.ListenFinalizeBlock(app.finalizeBlockState.Context(), *req, *res); err != nil {
+ app.logger.Error("ListenFinalizeBlock listening hook failed", "height", req.Height, "err", err)
+}
+
+}
+
+}()
+ if app.optimisticExec.Initialized() {
+ // check if the hash we got is the same as the one we are executing
+ aborted := app.optimisticExec.AbortIfNeeded(req.Hash)
+ // Wait for the OE to finish, regardless of whether it was aborted or not
+ res, err = app.optimisticExec.WaitResult()
+
+ // only return if we are not aborting
+ if !aborted {
+ if res != nil {
+ res.AppHash = app.workingHash()
+}
+
+return res, err
+}
+
+ // if it was aborted, we need to reset the state
+ app.finalizeBlockState = nil
+ app.optimisticExec.Reset()
+}
+
+ // if no OE is running, just run the block (this is either a block replay or a OE that got aborted)
+
+res, err = app.internalFinalizeBlock(context.Background(), req)
+ if res != nil {
+ res.AppHash = app.workingHash()
+}
+
+return res, err
+}
+
+// checkHalt checkes if height or time exceeds halt-height or halt-time respectively.
+func (app *BaseApp)
+
+checkHalt(height int64, time time.Time)
+
+error {
+ var halt bool
+ switch {
+ case app.haltHeight > 0 && uint64(height) >= app.haltHeight:
+ halt = true
+ case app.haltTime > 0 && time.Unix() >= int64(app.haltTime):
+ halt = true
+}
+ if halt {
+ return fmt.Errorf("halt per configuration height %d time %d", app.haltHeight, app.haltTime)
+}
+
+return nil
+}
+
+// Commit implements the ABCI interface. It will commit all state that exists in
+// the deliver state's multi-store and includes the resulting commit ID in the
+// returned abci.ResponseCommit. Commit will set the check state based on the
+// latest header and reset the deliver state. Also, if a non-zero halt height is
+// defined in config, Commit will execute a deferred function call to check
+// against that height and gracefully halt if it matches the latest committed
+// height.
+func (app *BaseApp)
+
+Commit() (*abci.ResponseCommit, error) {
+ header := app.finalizeBlockState.Context().BlockHeader()
+ retainHeight := app.GetBlockRetentionHeight(header.Height)
+ if app.precommiter != nil {
+ app.precommiter(app.finalizeBlockState.Context())
+}
+
+rms, ok := app.cms.(*rootmulti.Store)
+ if ok {
+ rms.SetCommitHeader(header)
+}
+
+app.cms.Commit()
+ resp := &abci.ResponseCommit{
+ RetainHeight: retainHeight,
+}
+ abciListeners := app.streamingManager.ABCIListeners
+ if len(abciListeners) > 0 {
+ ctx := app.finalizeBlockState.Context()
+ blockHeight := ctx.BlockHeight()
+ changeSet := app.cms.PopStateCache()
+ for _, abciListener := range abciListeners {
+ if err := abciListener.ListenCommit(ctx, *resp, changeSet); err != nil {
+ app.logger.Error("Commit listening hook failed", "height", blockHeight, "err", err)
+}
+
+}
+
+}
+
+ // Reset the CheckTx state to the latest committed.
+ //
+ // NOTE: This is safe because CometBFT holds a lock on the mempool for
+ // Commit. Use the header from this latest block.
+ app.setState(execModeCheck, header)
+
+app.finalizeBlockState = nil
+ if app.prepareCheckStater != nil {
+ app.prepareCheckStater(app.checkState.Context())
+}
+
+ // The SnapshotIfApplicable method will create the snapshot by starting the goroutine
+ app.snapshotManager.SnapshotIfApplicable(header.Height)
+
+return resp, nil
+}
+
+// workingHash gets the apphash that will be finalized in commit.
+// These writes will be persisted to the root multi-store (app.cms)
+
+and flushed to
+// disk in the Commit phase. This means when the ABCI client requests Commit(), the application
+// state transitions will be flushed to disk and as a result, but we already have
+// an application Merkle root.
+func (app *BaseApp)
+
+workingHash() []byte {
+ // Write the FinalizeBlock state into branched storage and commit the MultiStore.
+ // The write to the FinalizeBlock state writes all state transitions to the root
+ // MultiStore (app.cms)
+
+so when Commit()
+
+is called it persists those values.
+ app.finalizeBlockState.ms.Write()
+
+ // Get the hash of all writes in order to return the apphash to the comet in finalizeBlock.
+ commitHash := app.cms.WorkingHash()
+
+app.logger.Debug("hash of all writes", "workingHash", fmt.Sprintf("%X", commitHash))
+
+return commitHash
+}
+
+func handleQueryApp(app *BaseApp, path []string, req *abci.RequestQuery) *abci.ResponseQuery {
+ if len(path) >= 2 {
+ switch path[1] {
+ case "simulate":
+ txBytes := req.Data
+
+ gInfo, res, err := app.Simulate(txBytes)
+ if err != nil {
+ return sdkerrors.QueryResult(errorsmod.Wrap(err, "failed to simulate tx"), app.trace)
+}
+ simRes := &sdk.SimulationResponse{
+ GasInfo: gInfo,
+ Result: res,
+}
+
+bz, err := codec.ProtoMarshalJSON(simRes, app.interfaceRegistry)
+ if err != nil {
+ return sdkerrors.QueryResult(errorsmod.Wrap(err, "failed to JSON encode simulation response"), app.trace)
+}
+
+return &abci.ResponseQuery{
+ Codespace: sdkerrors.RootCodespace,
+ Height: req.Height,
+ Value: bz,
+}
+ case "version":
+ return &abci.ResponseQuery{
+ Codespace: sdkerrors.RootCodespace,
+ Height: req.Height,
+ Value: []byte(app.version),
+}
+
+default:
+ return sdkerrors.QueryResult(errorsmod.Wrapf(sdkerrors.ErrUnknownRequest, "unknown query: %s", path), app.trace)
+}
+
+}
+
+return sdkerrors.QueryResult(
+ errorsmod.Wrap(
+ sdkerrors.ErrUnknownRequest,
+ "expected second parameter to be either 'simulate' or 'version', neither was present",
+ ), app.trace)
+}
+
+func handleQueryStore(app *BaseApp, path []string, req abci.RequestQuery) *abci.ResponseQuery {
+ // "/store" prefix for store queries
+ queryable, ok := app.cms.(storetypes.Queryable)
+ if !ok {
+ return sdkerrors.QueryResult(errorsmod.Wrap(sdkerrors.ErrUnknownRequest, "multi-store does not support queries"), app.trace)
+}
+
+req.Path = "/" + strings.Join(path[1:], "/")
+ if req.Height <= 1 && req.Prove {
+ return sdkerrors.QueryResult(
+ errorsmod.Wrap(
+ sdkerrors.ErrInvalidRequest,
+ "cannot query with proof when height <= 1; please provide a valid height",
+ ), app.trace)
+}
+ sdkReq := storetypes.RequestQuery(req)
+
+resp, err := queryable.Query(&sdkReq)
+ if err != nil {
+ return sdkerrors.QueryResult(err, app.trace)
+}
+
+resp.Height = req.Height
+ abciResp := abci.ResponseQuery(*resp)
+
+return &abciResp
+}
+
+func handleQueryP2P(app *BaseApp, path []string) *abci.ResponseQuery {
+ // "/p2p" prefix for p2p queries
+ if len(path) < 4 {
+ return sdkerrors.QueryResult(errorsmod.Wrap(sdkerrors.ErrUnknownRequest, "path should be p2p filter "), app.trace)
+}
+
+var resp *abci.ResponseQuery
+
+ cmd, typ, arg := path[1], path[2], path[3]
+ switch cmd {
+ case "filter":
+ switch typ {
+ case "addr":
+ resp = app.FilterPeerByAddrPort(arg)
+ case "id":
+ resp = app.FilterPeerByID(arg)
+}
+
+default:
+ resp = sdkerrors.QueryResult(errorsmod.Wrap(sdkerrors.ErrUnknownRequest, "expected second parameter to be 'filter'"), app.trace)
+}
+
+return resp
+}
+
+// SplitABCIQueryPath splits a string path using the delimiter '/'.
+//
+// e.g. "this/is/funny" becomes []string{"this", "is", "funny"
+}
+
+func SplitABCIQueryPath(requestPath string) (path []string) {
+ path = strings.Split(requestPath, "/")
+
+ // first element is empty string
+ if len(path) > 0 && path[0] == "" {
+ path = path[1:]
+}
+
+return path
+}
+
+// FilterPeerByAddrPort filters peers by address/port.
+func (app *BaseApp)
+
+FilterPeerByAddrPort(info string) *abci.ResponseQuery {
+ if app.addrPeerFilter != nil {
+ return app.addrPeerFilter(info)
+}
+
+return &abci.ResponseQuery{
+}
+}
+
+// FilterPeerByID filters peers by node ID.
+func (app *BaseApp)
+
+FilterPeerByID(info string) *abci.ResponseQuery {
+ if app.idPeerFilter != nil {
+ return app.idPeerFilter(info)
+}
+
+return &abci.ResponseQuery{
+}
+}
+
+// getContextForProposal returns the correct Context for PrepareProposal and
+// ProcessProposal. We use finalizeBlockState on the first block to be able to
+// access any state changes made in InitChain.
+func (app *BaseApp)
+
+getContextForProposal(ctx sdk.Context, height int64)
+
+sdk.Context {
+ if height == app.initialHeight {
+ ctx, _ = app.finalizeBlockState.Context().CacheContext()
+
+ // clear all context data set during InitChain to avoid inconsistent behavior
+ ctx = ctx.WithBlockHeader(cmtproto.Header{
+}).WithHeaderInfo(coreheader.Info{
+})
+
+return ctx
+}
+
+return ctx
+}
+
+func (app *BaseApp)
+
+handleQueryGRPC(handler GRPCQueryHandler, req *abci.RequestQuery) *abci.ResponseQuery {
+ ctx, err := app.CreateQueryContext(req.Height, req.Prove)
+ if err != nil {
+ return sdkerrors.QueryResult(err, app.trace)
+}
+
+resp, err := handler(ctx, req)
+ if err != nil {
+ resp = sdkerrors.QueryResult(gRPCErrorToSDKError(err), app.trace)
+
+resp.Height = req.Height
+ return resp
+}
+
+return resp
+}
+
+func gRPCErrorToSDKError(err error)
+
+error {
+ status, ok := grpcstatus.FromError(err)
+ if !ok {
+ return errorsmod.Wrap(sdkerrors.ErrInvalidRequest, err.Error())
+}
+ switch status.Code() {
+ case codes.NotFound:
+ return errorsmod.Wrap(sdkerrors.ErrKeyNotFound, err.Error())
+ case codes.InvalidArgument:
+ return errorsmod.Wrap(sdkerrors.ErrInvalidRequest, err.Error())
+ case codes.FailedPrecondition:
+ return errorsmod.Wrap(sdkerrors.ErrInvalidRequest, err.Error())
+ case codes.Unauthenticated:
+ return errorsmod.Wrap(sdkerrors.ErrUnauthorized, err.Error())
+
+default:
+ return errorsmod.Wrap(sdkerrors.ErrUnknownRequest, err.Error())
+}
+}
+
+func checkNegativeHeight(height int64)
+
+error {
+ if height < 0 {
+ return errorsmod.Wrap(sdkerrors.ErrInvalidRequest, "cannot query with height < 0; please provide a valid height")
+}
+
+return nil
+}
+
+// CreateQueryContext creates a new sdk.Context for a query, taking as args
+// the block height and whether the query needs a proof or not.
+func (app *BaseApp)
+
+CreateQueryContext(height int64, prove bool) (sdk.Context, error) {
+ return app.CreateQueryContextWithCheckHeader(height, prove, true)
+}
+
+// CreateQueryContextWithCheckHeader creates a new sdk.Context for a query, taking as args
+// the block height, whether the query needs a proof or not, and whether to check the header or not.
+func (app *BaseApp)
+
+CreateQueryContextWithCheckHeader(height int64, prove, checkHeader bool) (sdk.Context, error) {
+ if err := checkNegativeHeight(height); err != nil {
+ return sdk.Context{
+}, err
+}
+
+ // use custom query multi-store if provided
+ qms := app.qms
+ if qms == nil {
+ qms = app.cms.(storetypes.MultiStore)
+}
+ lastBlockHeight := qms.LatestVersion()
+ if lastBlockHeight == 0 {
+ return sdk.Context{
+}, errorsmod.Wrapf(sdkerrors.ErrInvalidHeight, "%s is not ready; please wait for first block", app.Name())
+}
+ if height > lastBlockHeight {
+ return sdk.Context{
+},
+ errorsmod.Wrap(
+ sdkerrors.ErrInvalidHeight,
+ "cannot query with height in the future; please provide a valid height",
+ )
+}
+ if height == 1 && prove {
+ return sdk.Context{
+},
+ errorsmod.Wrap(
+ sdkerrors.ErrInvalidRequest,
+ "cannot query with proof when height <= 1; please provide a valid height",
+ )
+}
+
+var header *cmtproto.Header
+ isLatest := height == 0
+ for _, state := range []*state{
+ app.checkState,
+ app.finalizeBlockState,
+} {
+ if state != nil {
+ // branch the commit multi-store for safety
+ h := state.Context().BlockHeader()
+ if isLatest {
+ lastBlockHeight = qms.LatestVersion()
+}
+ if !checkHeader || !isLatest || isLatest && h.Height == lastBlockHeight {
+ header = &h
+ break
+}
+
+}
+
+}
+ if header == nil {
+ return sdk.Context{
+},
+ errorsmod.Wrapf(
+ sdkerrors.ErrInvalidHeight,
+ "context did not contain latest block height in either check state or finalize block state (%d)", lastBlockHeight,
+ )
+}
+
+ // when a client did not provide a query height, manually inject the latest
+ if isLatest {
+ height = lastBlockHeight
+}
+
+cacheMS, err := qms.CacheMultiStoreWithVersion(height)
+ if err != nil {
+ return sdk.Context{
+},
+ errorsmod.Wrapf(
+ sdkerrors.ErrNotFound,
+ "failed to load state at height %d; %s (latest height: %d)", height, err, lastBlockHeight,
+ )
+}
+
+ // branch the commit multi-store for safety
+ ctx := sdk.NewContext(cacheMS, *header, true, app.logger).
+ WithMinGasPrices(app.minGasPrices).
+ WithGasMeter(storetypes.NewGasMeter(app.queryGasLimit)).
+ WithBlockHeader(*header).
+ WithBlockHeight(height)
+ if !isLatest {
+ rms, ok := app.cms.(*rootmulti.Store)
+ if ok {
+ cInfo, err := rms.GetCommitInfo(height)
+ if cInfo != nil && err == nil {
+ ctx = ctx.WithBlockHeight(height).WithBlockTime(cInfo.Timestamp)
+}
+
+}
+
+}
+
+return ctx, nil
+}
+
+// GetBlockRetentionHeight returns the height for which all blocks below this height
+// are pruned from CometBFT. Given a commitment height and a non-zero local
+// minRetainBlocks configuration, the retentionHeight is the smallest height that
+// satisfies:
+//
+// - Unbonding (safety threshold)
+
+time: The block interval in which validators
+// can be economically punished for misbehavior. Blocks in this interval must be
+// auditable e.g. by the light client.
+//
+// - Logical store snapshot interval: The block interval at which the underlying
+// logical store database is persisted to disk, e.g. every 10000 heights. Blocks
+// since the last IAVL snapshot must be available for replay on application restart.
+//
+// - State sync snapshots: Blocks since the oldest available snapshot must be
+// available for state sync nodes to catch up (oldest because a node may be
+// restoring an old snapshot while a new snapshot was taken).
+//
+// - Local (minRetainBlocks)
+
+config: Archive nodes may want to retain more or
+// all blocks, e.g. via a local config option min-retain-blocks. There may also
+// be a need to vary retention for other nodes, e.g. sentry nodes which do not
+// need historical blocks.
+func (app *BaseApp)
+
+GetBlockRetentionHeight(commitHeight int64)
+
+int64 {
+ // If minRetainBlocks is zero, pruning is disabled and we return 0
+ // If commitHeight is less than or equal to minRetainBlocks, return 0 since there are not enough
+ // blocks to trigger pruning yet. This ensures we keep all blocks until we have at least minRetainBlocks.
+ retentionBlockWindow := commitHeight - int64(app.minRetainBlocks)
+ if app.minRetainBlocks == 0 || retentionBlockWindow <= 0 {
+ return 0
+}
+ minNonZero := func(x, y int64)
+
+int64 {
+ switch {
+ case x == 0:
+ return y
+ case y == 0:
+ return x
+ case x < y:
+ return x
+
+ default:
+ return y
+}
+
+}
+
+ // Define retentionHeight as the minimum value that satisfies all non-zero
+ // constraints. All blocks below (commitHeight-retentionHeight)
+
+are pruned
+ // from CometBFT.
+ var retentionHeight int64
+
+ // Define the number of blocks needed to protect against misbehaving validators
+ // which allows light clients to operate safely. Note, we piggy back of the
+ // evidence parameters instead of computing an estimated number of blocks based
+ // on the unbonding period and block commitment time as the two should be
+ // equivalent.
+ cp := app.GetConsensusParams(app.finalizeBlockState.Context())
+ if cp.Evidence != nil && cp.Evidence.MaxAgeNumBlocks > 0 {
+ retentionHeight = commitHeight - cp.Evidence.MaxAgeNumBlocks
+}
+ if app.snapshotManager != nil {
+ snapshotRetentionHeights := app.snapshotManager.GetSnapshotBlockRetentionHeights()
+ if snapshotRetentionHeights > 0 {
+ retentionHeight = minNonZero(retentionHeight, commitHeight-snapshotRetentionHeights)
+}
+
+}
+
+retentionHeight = minNonZero(retentionHeight, retentionBlockWindow)
+ if retentionHeight <= 0 {
+ // prune nothing in the case of a non-positive height
+ return 0
+}
+
+return retentionHeight
+}
+
+// toVoteInfo converts the new ExtendedVoteInfo to VoteInfo.
+func toVoteInfo(votes []abci.ExtendedVoteInfo) []abci.VoteInfo {
+ legacyVotes := make([]abci.VoteInfo, len(votes))
+ for i, vote := range votes {
+ legacyVotes[i] = abci.VoteInfo{
+ Validator: abci.Validator{
+ Address: vote.Validator.Address,
+ Power: vote.Validator.Power,
+},
+ BlockIdFlag: vote.BlockIdFlag,
+}
+
+}
+
+return legacyVotes
+}
+```
+
+#### PreBlock
+
+* Run the application's [`preBlocker()`](/sdk/v0.53/learn/beginner/app-anatomy#preblocker), which mainly runs the [`PreBlocker()`](/sdk/v0.53/build/building-modules/preblock#preblock) method of each of the modules.
+
+#### BeginBlock
+
+* Initialize [`finalizeBlockState`](#state-updates) with the latest header using the `req abci.RequestFinalizeBlock` passed as parameter via the `setState` function.
+
+ ```go expandable
+ package baseapp
+
+ import (
+
+ "context"
+ "fmt"
+ "maps"
+ "math"
+ "slices"
+ "strconv"
+ "sync"
+ "github.com/cockroachdb/errors"
+ abci "github.com/cometbft/cometbft/abci/types"
+ "github.com/cometbft/cometbft/crypto/tmhash"
+ cmtproto "github.com/cometbft/cometbft/proto/tendermint/types"
+ dbm "github.com/cosmos/cosmos-db"
+ "github.com/cosmos/gogoproto/proto"
+ protov2 "google.golang.org/protobuf/proto"
+ "cosmossdk.io/core/header"
+ errorsmod "cosmossdk.io/errors"
+ "cosmossdk.io/log"
+ "cosmossdk.io/store"
+ storemetrics "cosmossdk.io/store/metrics"
+ "cosmossdk.io/store/snapshots"
+ storetypes "cosmossdk.io/store/types"
+ "github.com/cosmos/cosmos-sdk/baseapp/oe"
+ "github.com/cosmos/cosmos-sdk/codec"
+ codectypes "github.com/cosmos/cosmos-sdk/codec/types"
+ servertypes "github.com/cosmos/cosmos-sdk/server/types"
+ "github.com/cosmos/cosmos-sdk/telemetry"
+ sdk "github.com/cosmos/cosmos-sdk/types"
+ sdkerrors "github.com/cosmos/cosmos-sdk/types/errors"
+ "github.com/cosmos/cosmos-sdk/types/mempool"
+ "github.com/cosmos/cosmos-sdk/types/msgservice"
+ )
+
+ type (
+ execMode uint8
+
+ // StoreLoader defines a customizable function to control how we load the
+ // CommitMultiStore from disk. This is useful for state migration, when
+ // loading a datastore written with an older version of the software. In
+ // particular, if a module changed the substore key name (or removed a substore)
+ // between two versions of the software.
+ StoreLoader func(ms storetypes.CommitMultiStore)
+
+ error
+ )
+
+ const (
+ execModeCheck execMode = iota // Check a transaction
+ execModeReCheck // Recheck a (pending)
+
+ transaction after a commit
+ execModeSimulate // Simulate a transaction
+ execModePrepareProposal // Prepare a block proposal
+ execModeProcessProposal // Process a block proposal
+ execModeVoteExtension // Extend or verify a pre-commit vote
+ execModeVerifyVoteExtension // Verify a vote extension
+ execModeFinalize // Finalize a block proposal
+ )
+
+ var _ servertypes.ABCI = (*BaseApp)(nil)
+
+ // BaseApp reflects the ABCI application implementation.
+ type BaseApp struct {
+ // initialized on creation
+ mu sync.Mutex // mu protects the fields below.
+ logger log.Logger
+ name string // application name from abci.BlockInfo
+ db dbm.DB // common DB backend
+ cms storetypes.CommitMultiStore // Main (uncached)
+
+ state
+ qms storetypes.MultiStore // Optional alternative multistore for querying only.
+ storeLoader StoreLoader // function to handle store loading, may be overridden with SetStoreLoader()
+
+ grpcQueryRouter *GRPCQueryRouter // router for redirecting gRPC query calls
+ msgServiceRouter *MsgServiceRouter // router for redirecting Msg service messages
+ interfaceRegistry codectypes.InterfaceRegistry
+ txDecoder sdk.TxDecoder // unmarshal []byte into sdk.Tx
+ txEncoder sdk.TxEncoder // marshal sdk.Tx into []byte
+
+ mempool mempool.Mempool // application side mempool
+ anteHandler sdk.AnteHandler // ante handler for fee and auth
+ postHandler sdk.PostHandler // post handler, optional
+
+ checkTxHandler sdk.CheckTxHandler // ABCI CheckTx handler
+ initChainer sdk.InitChainer // ABCI InitChain handler
+ preBlocker sdk.PreBlocker // logic to run before BeginBlocker
+ beginBlocker sdk.BeginBlocker // (legacy ABCI)
+
+ BeginBlock handler
+ endBlocker sdk.EndBlocker // (legacy ABCI)
+
+ EndBlock handler
+ processProposal sdk.ProcessProposalHandler // ABCI ProcessProposal handler
+ prepareProposal sdk.PrepareProposalHandler // ABCI PrepareProposal
+ extendVote sdk.ExtendVoteHandler // ABCI ExtendVote handler
+ verifyVoteExt sdk.VerifyVoteExtensionHandler // ABCI VerifyVoteExtension handler
+ prepareCheckStater sdk.PrepareCheckStater // logic to run during commit using the checkState
+ precommiter sdk.Precommiter // logic to run during commit using the deliverState
+
+ addrPeerFilter sdk.PeerFilter // filter peers by address and port
+ idPeerFilter sdk.PeerFilter // filter peers by node ID
+ fauxMerkleMode bool // if true, IAVL MountStores uses MountStoresDB for simulation speed.
+ sigverifyTx bool // in the simulation test, since the account does not have a private key, we have to ignore the tx sigverify.
+
+ // manages snapshots, i.e. dumps of app state at certain intervals
+ snapshotManager *snapshots.Manager
+
+ // volatile states:
+ //
+ // - checkState is set on InitChain and reset on Commit
+ // - finalizeBlockState is set on InitChain and FinalizeBlock and set to nil
+ // on Commit.
+ //
+ // - checkState: Used for CheckTx, which is set based on the previous block's
+ // state. This state is never committed.
+ //
+ // - prepareProposalState: Used for PrepareProposal, which is set based on the
+ // previous block's state. This state is never committed. In case of multiple
+ // consensus rounds, the state is always reset to the previous block's state.
+ //
+ // - processProposalState: Used for ProcessProposal, which is set based on the
+ // the previous block's state. This state is never committed. In case of
+ // multiple rounds, the state is always reset to the previous block's state.
+ //
+ // - finalizeBlockState: Used for FinalizeBlock, which is set based on the
+ // previous block's state. This state is committed.
+ checkState *state
+ prepareProposalState *state
+ processProposalState *state
+ finalizeBlockState *state
+
+ // An inter-block write-through cache provided to the context during the ABCI
+ // FinalizeBlock call.
+ interBlockCache storetypes.MultiStorePersistentCache
+
+ // paramStore is used to query for ABCI consensus parameters from an
+ // application parameter store.
+ paramStore ParamStore
+
+ // queryGasLimit defines the maximum gas for queries; unbounded if 0.
+ queryGasLimit uint64
+
+ // The minimum gas prices a validator is willing to accept for processing a
+ // transaction. This is mainly used for DoS and spam prevention.
+ minGasPrices sdk.DecCoins
+
+ // initialHeight is the initial height at which we start the BaseApp
+ initialHeight int64
+
+ // flag for sealing options and parameters to a BaseApp
+ sealed bool
+
+ // block height at which to halt the chain and gracefully shutdown
+ haltHeight uint64
+
+ // minimum block time (in Unix seconds)
+
+ at which to halt the chain and gracefully shutdown
+ haltTime uint64
+
+ // minRetainBlocks defines the minimum block height offset from the current
+ // block being committed, such that all blocks past this offset are pruned
+ // from CometBFT. It is used as part of the process of determining the
+ // ResponseCommit.RetainHeight value during ABCI Commit. A value of 0 indicates
+ // that no blocks should be pruned.
+ //
+ // Note: CometBFT block pruning is dependant on this parameter in conjunction
+ // with the unbonding (safety threshold)
+
+ period, state pruning and state sync
+ // snapshot parameters to determine the correct minimum value of
+ // ResponseCommit.RetainHeight.
+ minRetainBlocks uint64
+
+ // application's version string
+ version string
+
+ // application's protocol version that increments on every upgrade
+ // if BaseApp is passed to the upgrade keeper's NewKeeper method.
+ appVersion uint64
+
+ // recovery handler for app.runTx method
+ runTxRecoveryMiddleware recoveryMiddleware
+
+ // trace set will return full stack traces for errors in ABCI Log field
+ trace bool
+
+ // indexEvents defines the set of events in the form {
+ eventType
+ }.{
+ attributeKey
+ },
+ // which informs CometBFT what to index. If empty, all events will be indexed.
+ indexEvents map[string]struct{
+ }
+
+ // streamingManager for managing instances and configuration of ABCIListener services
+ streamingManager storetypes.StreamingManager
+
+ chainID string
+
+ cdc codec.Codec
+
+ // optimisticExec contains the context required for Optimistic Execution,
+ // including the goroutine handling.This is experimental and must be enabled
+ // by developers.
+ optimisticExec *oe.OptimisticExecution
+
+ // disableBlockGasMeter will disable the block gas meter if true, block gas meter is tricky to support
+ // when executing transactions in parallel.
+ // when disabled, the block gas meter in context is a noop one.
+ //
+ // SAFETY: it's safe to do if validators validate the total gas wanted in the `ProcessProposal`, which is the case in the default handler.
+ disableBlockGasMeter bool
+ }
+
+ // NewBaseApp returns a reference to an initialized BaseApp. It accepts a
+ // variadic number of option functions, which act on the BaseApp to set
+ // configuration choices.
+ func NewBaseApp(
+ name string, logger log.Logger, db dbm.DB, txDecoder sdk.TxDecoder, options ...func(*BaseApp),
+ ) *BaseApp {
+ app := &BaseApp{
+ logger: logger.With(log.ModuleKey, "baseapp"),
+ name: name,
+ db: db,
+ cms: store.NewCommitMultiStore(db, logger, storemetrics.NewNoOpMetrics()), // by default we use a no-op metric gather in store
+ storeLoader: DefaultStoreLoader,
+ grpcQueryRouter: NewGRPCQueryRouter(),
+ msgServiceRouter: NewMsgServiceRouter(),
+ txDecoder: txDecoder,
+ fauxMerkleMode: false,
+ sigverifyTx: true,
+ queryGasLimit: math.MaxUint64,
+ }
+ for _, option := range options {
+ option(app)
+ }
+ if app.mempool == nil {
+ app.SetMempool(mempool.NoOpMempool{
+ })
+ }
+ abciProposalHandler := NewDefaultProposalHandler(app.mempool, app)
+ if app.prepareProposal == nil {
+ app.SetPrepareProposal(abciProposalHandler.PrepareProposalHandler())
+ }
+ if app.processProposal == nil {
+ app.SetProcessProposal(abciProposalHandler.ProcessProposalHandler())
+ }
+ if app.extendVote == nil {
+ app.SetExtendVoteHandler(NoOpExtendVote())
+ }
+ if app.verifyVoteExt == nil {
+ app.SetVerifyVoteExtensionHandler(NoOpVerifyVoteExtensionHandler())
+ }
+ if app.interBlockCache != nil {
+ app.cms.SetInterBlockCache(app.interBlockCache)
+ }
+
+ app.runTxRecoveryMiddleware = newDefaultRecoveryMiddleware()
+
+ // Initialize with an empty interface registry to avoid nil pointer dereference.
+ // Unless SetInterfaceRegistry is called with an interface registry with proper address codecs baseapp will panic.
+ app.cdc = codec.NewProtoCodec(codectypes.NewInterfaceRegistry())
+
+ protoFiles, err := proto.MergedRegistry()
+ if err != nil {
+ logger.Warn("error creating merged proto registry", "error", err)
+ }
+
+ else {
+ err = msgservice.ValidateProtoAnnotations(protoFiles)
+ if err != nil {
+ // Once we switch to using protoreflect-based antehandlers, we might
+ // want to panic here instead of logging a warning.
+ logger.Warn("error validating merged proto registry annotations", "error", err)
+ }
+
+ }
+
+ return app
+ }
+
+ // Name returns the name of the BaseApp.
+ func (app *BaseApp)
+
+ Name()
+
+ string {
+ return app.name
+ }
+
+ // AppVersion returns the application's protocol version.
+ func (app *BaseApp)
+
+ AppVersion()
+
+ uint64 {
+ return app.appVersion
+ }
+
+ // Version returns the application's version string.
+ func (app *BaseApp)
+
+ Version()
+
+ string {
+ return app.version
+ }
+
+ // Logger returns the logger of the BaseApp.
+ func (app *BaseApp)
+
+ Logger()
+
+ log.Logger {
+ return app.logger
+ }
+
+ // Trace returns the boolean value for logging error stack traces.
+ func (app *BaseApp)
+
+ Trace()
+
+ bool {
+ return app.trace
+ }
+
+ // MsgServiceRouter returns the MsgServiceRouter of a BaseApp.
+ func (app *BaseApp)
+
+ MsgServiceRouter() *MsgServiceRouter {
+ return app.msgServiceRouter
+ }
+
+ // GRPCQueryRouter returns the GRPCQueryRouter of a BaseApp.
+ func (app *BaseApp)
+
+ GRPCQueryRouter() *GRPCQueryRouter {
+ return app.grpcQueryRouter
+ }
+
+ // MountStores mounts all IAVL or DB stores to the provided keys in the BaseApp
+ // multistore.
+ func (app *BaseApp)
+
+ MountStores(keys ...storetypes.StoreKey) {
+ for _, key := range keys {
+ switch key.(type) {
+ case *storetypes.KVStoreKey:
+ if !app.fauxMerkleMode {
+ app.MountStore(key, storetypes.StoreTypeIAVL)
+ }
+
+ else {
+ // StoreTypeDB doesn't do anything upon commit, and it doesn't
+ // retain history, but it's useful for faster simulation.
+ app.MountStore(key, storetypes.StoreTypeDB)
+ }
+ case *storetypes.TransientStoreKey:
+ app.MountStore(key, storetypes.StoreTypeTransient)
+ case *storetypes.MemoryStoreKey:
+ app.MountStore(key, storetypes.StoreTypeMemory)
+
+ default:
+ panic(fmt.Sprintf("Unrecognized store key type :%T", key))
+ }
+
+ }
+ }
+
+ // MountKVStores mounts all IAVL or DB stores to the provided keys in the
+ // BaseApp multistore.
+ func (app *BaseApp)
+
+ MountKVStores(keys map[string]*storetypes.KVStoreKey) {
+ for _, key := range keys {
+ if !app.fauxMerkleMode {
+ app.MountStore(key, storetypes.StoreTypeIAVL)
+ }
+
+ else {
+ // StoreTypeDB doesn't do anything upon commit, and it doesn't
+ // retain history, but it's useful for faster simulation.
+ app.MountStore(key, storetypes.StoreTypeDB)
+ }
+
+ }
+ }
+
+ // MountTransientStores mounts all transient stores to the provided keys in
+ // the BaseApp multistore.
+ func (app *BaseApp)
+
+ MountTransientStores(keys map[string]*storetypes.TransientStoreKey) {
+ for _, key := range keys {
+ app.MountStore(key, storetypes.StoreTypeTransient)
+ }
+ }
+
+ // MountMemoryStores mounts all in-memory KVStores with the BaseApp's internal
+ // commit multi-store.
+ func (app *BaseApp)
+
+ MountMemoryStores(keys map[string]*storetypes.MemoryStoreKey) {
+ skeys := slices.Sorted(maps.Keys(keys))
+ for _, key := range skeys {
+ memKey := keys[key]
+ app.MountStore(memKey, storetypes.StoreTypeMemory)
+ }
+ }
+
+ // MountStore mounts a store to the provided key in the BaseApp multistore,
+ // using the default DB.
+ func (app *BaseApp)
+
+ MountStore(key storetypes.StoreKey, typ storetypes.StoreType) {
+ app.cms.MountStoreWithDB(key, typ, nil)
+ }
+
+ // LoadLatestVersion loads the latest application version. It will panic if
+ // called more than once on a running BaseApp.
+ func (app *BaseApp)
+
+ LoadLatestVersion()
+
+ error {
+ err := app.storeLoader(app.cms)
+ if err != nil {
+ return fmt.Errorf("failed to load latest version: %w", err)
+ }
+
+ return app.Init()
+ }
+
+ // DefaultStoreLoader will be used by default and loads the latest version
+ func DefaultStoreLoader(ms storetypes.CommitMultiStore)
+
+ error {
+ return ms.LoadLatestVersion()
+ }
+
+ // CommitMultiStore returns the root multi-store.
+ // App constructor can use this to access the `cms`.
+ // UNSAFE: must not be used during the abci life cycle.
+ func (app *BaseApp)
+
+ CommitMultiStore()
+
+ storetypes.CommitMultiStore {
+ return app.cms
+ }
+
+ // SnapshotManager returns the snapshot manager.
+ // application use this to register extra extension snapshotters.
+ func (app *BaseApp)
+
+ SnapshotManager() *snapshots.Manager {
+ return app.snapshotManager
+ }
+
+ // LoadVersion loads the BaseApp application version. It will panic if called
+ // more than once on a running baseapp.
+ func (app *BaseApp)
+
+ LoadVersion(version int64)
+
+ error {
+ app.logger.Info("NOTICE: this could take a long time to migrate IAVL store to fastnode if you enable Fast Node.\n")
+ err := app.cms.LoadVersion(version)
+ if err != nil {
+ return fmt.Errorf("failed to load version %d: %w", version, err)
+ }
+
+ return app.Init()
+ }
+
+ // LastCommitID returns the last CommitID of the multistore.
+ func (app *BaseApp)
+
+ LastCommitID()
+
+ storetypes.CommitID {
+ return app.cms.LastCommitID()
+ }
+
+ // LastBlockHeight returns the last committed block height.
+ func (app *BaseApp)
+
+ LastBlockHeight()
+
+ int64 {
+ return app.cms.LastCommitID().Version
+ }
+
+ // ChainID returns the chainID of the app.
+ func (app *BaseApp)
+
+ ChainID()
+
+ string {
+ return app.chainID
+ }
+
+ // AnteHandler returns the AnteHandler of the app.
+ func (app *BaseApp)
+
+ AnteHandler()
+
+ sdk.AnteHandler {
+ return app.anteHandler
+ }
+
+ // Mempool returns the Mempool of the app.
+ func (app *BaseApp)
+
+ Mempool()
+
+ mempool.Mempool {
+ return app.mempool
+ }
+
+ // Init initializes the app. It seals the app, preventing any
+ // further modifications. In addition, it validates the app against
+ // the earlier provided settings. Returns an error if validation fails.
+ // nil otherwise. Panics if the app is already sealed.
+ func (app *BaseApp)
+
+ Init()
+
+ error {
+ if app.sealed {
+ panic("cannot call initFromMainStore: baseapp already sealed")
+ }
+ if app.cms == nil {
+ return errors.New("commit multi-store must not be nil")
+ }
+ emptyHeader := cmtproto.Header{
+ ChainID: app.chainID
+ }
+
+ // needed for the export command which inits from store but never calls initchain
+ app.setState(execModeCheck, emptyHeader)
+
+ app.Seal()
+
+ return app.cms.GetPruning().Validate()
+ }
+
+ func (app *BaseApp)
+
+ setMinGasPrices(gasPrices sdk.DecCoins) {
+ app.minGasPrices = gasPrices
+ }
+
+ func (app *BaseApp)
+
+ setHaltHeight(haltHeight uint64) {
+ app.haltHeight = haltHeight
+ }
+
+ func (app *BaseApp)
+
+ setHaltTime(haltTime uint64) {
+ app.haltTime = haltTime
+ }
+
+ func (app *BaseApp)
+
+ setMinRetainBlocks(minRetainBlocks uint64) {
+ app.minRetainBlocks = minRetainBlocks
+ }
+
+ func (app *BaseApp)
+
+ setInterBlockCache(cache storetypes.MultiStorePersistentCache) {
+ app.interBlockCache = cache
+ }
+
+ func (app *BaseApp)
+
+ setTrace(trace bool) {
+ app.trace = trace
+ }
+
+ func (app *BaseApp)
+
+ setIndexEvents(ie []string) {
+ app.indexEvents = make(map[string]struct{
+ })
+ for _, e := range ie {
+ app.indexEvents[e] = struct{
+ }{
+ }
+
+ }
+ }
+
+ // Seal seals a BaseApp. It prohibits any further modifications to a BaseApp.
+ func (app *BaseApp)
+
+ Seal() {
+ app.sealed = true
+ }
+
+ // IsSealed returns true if the BaseApp is sealed and false otherwise.
+ func (app *BaseApp)
+
+ IsSealed()
+
+ bool {
+ return app.sealed
+ }
+
+ // setState sets the BaseApp's state for the corresponding mode with a branched
+ // multi-store (i.e. a CacheMultiStore)
+
+ and a new Context with the same
+ // multi-store branch, and provided header.
+ func (app *BaseApp)
+
+ setState(mode execMode, h cmtproto.Header) {
+ ms := app.cms.CacheMultiStore()
+ headerInfo := header.Info{
+ Height: h.Height,
+ Time: h.Time,
+ ChainID: h.ChainID,
+ AppHash: h.AppHash,
+ }
+ baseState := &state{
+ ms: ms,
+ ctx: sdk.NewContext(ms, h, false, app.logger).
+ WithStreamingManager(app.streamingManager).
+ WithHeaderInfo(headerInfo),
+ }
+ switch mode {
+ case execModeCheck:
+ baseState.SetContext(baseState.Context().WithIsCheckTx(true).WithMinGasPrices(app.minGasPrices))
+
+ app.checkState = baseState
+ case execModePrepareProposal:
+ app.prepareProposalState = baseState
+ case execModeProcessProposal:
+ app.processProposalState = baseState
+ case execModeFinalize:
+ app.finalizeBlockState = baseState
+
+ default:
+ panic(fmt.Sprintf("invalid runTxMode for setState: %d", mode))
+ }
+ }
+
+ // SetCircuitBreaker sets the circuit breaker for the BaseApp.
+ // The circuit breaker is checked on every message execution to verify if a transaction should be executed or not.
+ func (app *BaseApp)
+
+ SetCircuitBreaker(cb CircuitBreaker) {
+ if app.msgServiceRouter == nil {
+ panic("cannot set circuit breaker with no msg service router set")
+ }
+
+ app.msgServiceRouter.SetCircuit(cb)
+ }
+
+ // GetConsensusParams returns the current consensus parameters from the BaseApp's
+ // ParamStore. If the BaseApp has no ParamStore defined, nil is returned.
+ func (app *BaseApp)
+
+ GetConsensusParams(ctx sdk.Context)
+
+ cmtproto.ConsensusParams {
+ if app.paramStore == nil {
+ return cmtproto.ConsensusParams{
+ }
+
+ }
+
+ cp, err := app.paramStore.Get(ctx)
+ if err != nil {
+ // This could happen while migrating from v0.45/v0.46 to v0.50, we should
+ // allow it to happen so during preblock the upgrade plan can be executed
+ // and the consensus params set for the first time in the new format.
+ app.logger.Error("failed to get consensus params", "err", err)
+
+ return cmtproto.ConsensusParams{
+ }
+
+ }
+
+ return cp
+ }
+
+ // StoreConsensusParams sets the consensus parameters to the BaseApp's param
+ // store.
+ //
+ // NOTE: We're explicitly not storing the CometBFT app_version in the param store.
+ // It's stored instead in the x/upgrade store, with its own bump logic.
+ func (app *BaseApp)
+
+ StoreConsensusParams(ctx sdk.Context, cp cmtproto.ConsensusParams)
+
+ error {
+ if app.paramStore == nil {
+ return errors.New("cannot store consensus params with no params store set")
+ }
+
+ return app.paramStore.Set(ctx, cp)
+ }
+
+ // AddRunTxRecoveryHandler adds custom app.runTx method panic handlers.
+ func (app *BaseApp)
+
+ AddRunTxRecoveryHandler(handlers ...RecoveryHandler) {
+ for _, h := range handlers {
+ app.runTxRecoveryMiddleware = newRecoveryMiddleware(h, app.runTxRecoveryMiddleware)
+ }
+ }
+
+ // GetMaximumBlockGas gets the maximum gas from the consensus params. It panics
+ // if maximum block gas is less than negative one and returns zero if negative
+ // one.
+ func (app *BaseApp)
+
+ GetMaximumBlockGas(ctx sdk.Context)
+
+ uint64 {
+ cp := app.GetConsensusParams(ctx)
+ if cp.Block == nil {
+ return 0
+ }
+ maxGas := cp.Block.MaxGas
+ switch {
+ case maxGas < -1:
+ panic(fmt.Sprintf("invalid maximum block gas: %d", maxGas))
+ case maxGas == -1:
+ return 0
+
+ default:
+ return uint64(maxGas)
+ }
+ }
+
+ func (app *BaseApp)
+
+ validateFinalizeBlockHeight(req *abci.RequestFinalizeBlock)
+
+ error {
+ if req.Height < 1 {
+ return fmt.Errorf("invalid height: %d", req.Height)
+ }
+ lastBlockHeight := app.LastBlockHeight()
+
+ // expectedHeight holds the expected height to validate
+ var expectedHeight int64
+ if lastBlockHeight == 0 && app.initialHeight > 1 {
+ // In this case, we're validating the first block of the chain, i.e no
+ // previous commit. The height we're expecting is the initial height.
+ expectedHeight = app.initialHeight
+ }
+
+ else {
+ // This case can mean two things:
+ //
+ // - Either there was already a previous commit in the store, in which
+ // case we increment the version from there.
+ // - Or there was no previous commit, in which case we start at version 1.
+ expectedHeight = lastBlockHeight + 1
+ }
+ if req.Height != expectedHeight {
+ return fmt.Errorf("invalid height: %d; expected: %d", req.Height, expectedHeight)
+ }
+
+ return nil
+ }
+
+ // validateBasicTxMsgs executes basic validator calls for messages.
+ func validateBasicTxMsgs(msgs []sdk.Msg)
+
+ error {
+ if len(msgs) == 0 {
+ return errorsmod.Wrap(sdkerrors.ErrInvalidRequest, "must contain at least one message")
+ }
+ for _, msg := range msgs {
+ m, ok := msg.(sdk.HasValidateBasic)
+ if !ok {
+ continue
+ }
+ if err := m.ValidateBasic(); err != nil {
+ return err
+ }
+
+ }
+
+ return nil
+ }
+
+ func (app *BaseApp)
+
+ getState(mode execMode) *state {
+ switch mode {
+ case execModeFinalize:
+ return app.finalizeBlockState
+ case execModePrepareProposal:
+ return app.prepareProposalState
+ case execModeProcessProposal:
+ return app.processProposalState
+
+ default:
+ return app.checkState
+ }
+ }
+
+ func (app *BaseApp)
+
+ getBlockGasMeter(ctx sdk.Context)
+
+ storetypes.GasMeter {
+ if app.disableBlockGasMeter {
+ return noopGasMeter{
+ }
+
+ }
+ if maxGas := app.GetMaximumBlockGas(ctx); maxGas > 0 {
+ return storetypes.NewGasMeter(maxGas)
+ }
+
+ return storetypes.NewInfiniteGasMeter()
+ }
+
+ // retrieve the context for the tx w/ txBytes and other memoized values.
+ func (app *BaseApp)
+
+ getContextForTx(mode execMode, txBytes []byte)
+
+ sdk.Context {
+ app.mu.Lock()
+
+ defer app.mu.Unlock()
+ modeState := app.getState(mode)
+ if modeState == nil {
+ panic(fmt.Sprintf("state is nil for mode %v", mode))
+ }
+ ctx := modeState.Context().
+ WithTxBytes(txBytes).
+ WithGasMeter(storetypes.NewInfiniteGasMeter())
+ // WithVoteInfos(app.voteInfos) // TODO: identify if this is needed
+
+ ctx = ctx.WithIsSigverifyTx(app.sigverifyTx)
+
+ ctx = ctx.WithConsensusParams(app.GetConsensusParams(ctx))
+ if mode == execModeReCheck {
+ ctx = ctx.WithIsReCheckTx(true)
+ }
+ if mode == execModeSimulate {
+ ctx, _ = ctx.CacheContext()
+
+ ctx = ctx.WithExecMode(sdk.ExecMode(execModeSimulate))
+ }
+
+ return ctx
+ }
+
+ // cacheTxContext returns a new context based off of the provided context with
+ // a branched multi-store.
+ func (app *BaseApp)
+
+ cacheTxContext(ctx sdk.Context, txBytes []byte) (sdk.Context, storetypes.CacheMultiStore) {
+ ms := ctx.MultiStore()
+ msCache := ms.CacheMultiStore()
+ if msCache.TracingEnabled() {
+ msCache = msCache.SetTracingContext(
+ storetypes.TraceContext(
+ map[string]any{
+ "txHash": fmt.Sprintf("%X", tmhash.Sum(txBytes)),
+ },
+ ),
+ ).(storetypes.CacheMultiStore)
+ }
+
+ return ctx.WithMultiStore(msCache), msCache
+ }
+
+ func (app *BaseApp)
+
+ preBlock(req *abci.RequestFinalizeBlock) ([]abci.Event, error) {
+ var events []abci.Event
+ if app.preBlocker != nil {
+ ctx := app.finalizeBlockState.Context().WithEventManager(sdk.NewEventManager())
+
+ rsp, err := app.preBlocker(ctx, req)
+ if err != nil {
+ return nil, err
+ }
+ // rsp.ConsensusParamsChanged is true from preBlocker means ConsensusParams in store get changed
+ // write the consensus parameters in store to context
+ if rsp.ConsensusParamsChanged {
+ ctx = ctx.WithConsensusParams(app.GetConsensusParams(ctx))
+ // GasMeter must be set after we get a context with updated consensus params.
+ gasMeter := app.getBlockGasMeter(ctx)
+
+ ctx = ctx.WithBlockGasMeter(gasMeter)
+
+ app.finalizeBlockState.SetContext(ctx)
+ }
+
+ events = ctx.EventManager().ABCIEvents()
+ }
+
+ return events, nil
+ }
+
+ func (app *BaseApp)
+
+ beginBlock(_ *abci.RequestFinalizeBlock) (sdk.BeginBlock, error) {
+ var (
+ resp sdk.BeginBlock
+ err error
+ )
+ if app.beginBlocker != nil {
+ resp, err = app.beginBlocker(app.finalizeBlockState.Context())
+ if err != nil {
+ return resp, err
+ }
+
+ // append BeginBlock attributes to all events in the EndBlock response
+ for i, event := range resp.Events {
+ resp.Events[i].Attributes = append(
+ event.Attributes,
+ abci.EventAttribute{
+ Key: "mode",
+ Value: "BeginBlock"
+ },
+ )
+ }
+
+ resp.Events = sdk.MarkEventsToIndex(resp.Events, app.indexEvents)
+ }
+
+ return resp, nil
+ }
+
+ func (app *BaseApp)
+
+ deliverTx(tx []byte) *abci.ExecTxResult {
+ gInfo := sdk.GasInfo{
+ }
+ resultStr := "successful"
+
+ var resp *abci.ExecTxResult
+
+ defer func() {
+ telemetry.IncrCounter(1, "tx", "count")
+
+ telemetry.IncrCounter(1, "tx", resultStr)
+
+ telemetry.SetGauge(float32(gInfo.GasUsed), "tx", "gas", "used")
+
+ telemetry.SetGauge(float32(gInfo.GasWanted), "tx", "gas", "wanted")
+ }()
+
+ gInfo, result, anteEvents, err := app.runTx(execModeFinalize, tx, nil)
+ if err != nil {
+ resultStr = "failed"
+ resp = sdkerrors.ResponseExecTxResultWithEvents(
+ err,
+ gInfo.GasWanted,
+ gInfo.GasUsed,
+ sdk.MarkEventsToIndex(anteEvents, app.indexEvents),
+ app.trace,
+ )
+
+ return resp
+ }
+
+ resp = &abci.ExecTxResult{
+ GasWanted: int64(gInfo.GasWanted),
+ GasUsed: int64(gInfo.GasUsed),
+ Log: result.Log,
+ Data: result.Data,
+ Events: sdk.MarkEventsToIndex(result.Events, app.indexEvents),
+ }
+
+ return resp
+ }
+
+ // endBlock is an application-defined function that is called after transactions
+ // have been processed in FinalizeBlock.
+ func (app *BaseApp)
+
+ endBlock(_ context.Context) (sdk.EndBlock, error) {
+ var endblock sdk.EndBlock
+ if app.endBlocker != nil {
+ eb, err := app.endBlocker(app.finalizeBlockState.Context())
+ if err != nil {
+ return endblock, err
+ }
+
+ // append EndBlock attributes to all events in the EndBlock response
+ for i, event := range eb.Events {
+ eb.Events[i].Attributes = append(
+ event.Attributes,
+ abci.EventAttribute{
+ Key: "mode",
+ Value: "EndBlock"
+ },
+ )
+ }
+
+ eb.Events = sdk.MarkEventsToIndex(eb.Events, app.indexEvents)
+
+ endblock = eb
+ }
+
+ return endblock, nil
+ }
+
+ // runTx processes a transaction within a given execution mode, encoded transaction
+ // bytes, and the decoded transaction itself. All state transitions occur through
+ // a cached Context depending on the mode provided. State only gets persisted
+ // if all messages get executed successfully and the execution mode is DeliverTx.
+ // Note, gas execution info is always returned. A reference to a Result is
+ // returned if the tx does not run out of gas and if all the messages are valid
+ // and execute successfully. An error is returned otherwise.
+ // both txbytes and the decoded tx are passed to runTx to avoid the state machine encoding the tx and decoding the transaction twice
+ // passing the decoded tx to runTX is optional, it will be decoded if the tx is nil
+ func (app *BaseApp)
+
+ runTx(mode execMode, txBytes []byte, tx sdk.Tx) (gInfo sdk.GasInfo, result *sdk.Result, anteEvents []abci.Event, err error) {
+ // NOTE: GasWanted should be returned by the AnteHandler. GasUsed is
+ // determined by the GasMeter. We need access to the context to get the gas
+ // meter, so we initialize upfront.
+ var gasWanted uint64
+ ctx := app.getContextForTx(mode, txBytes)
+ ms := ctx.MultiStore()
+
+ // only run the tx if there is block gas remaining
+ if mode == execModeFinalize && ctx.BlockGasMeter().IsOutOfGas() {
+ return gInfo, nil, nil, errorsmod.Wrap(sdkerrors.ErrOutOfGas, "no block gas left to run tx")
+ }
+
+ defer func() {
+ if r := recover(); r != nil {
+ recoveryMW := newOutOfGasRecoveryMiddleware(gasWanted, ctx, app.runTxRecoveryMiddleware)
+
+ err, result = processRecovery(r, recoveryMW), nil
+ ctx.Logger().Error("panic recovered in runTx", "err", err)
+ }
+
+ gInfo = sdk.GasInfo{
+ GasWanted: gasWanted,
+ GasUsed: ctx.GasMeter().GasConsumed()
+ }
+
+ }()
+ blockGasConsumed := false
+
+ // consumeBlockGas makes sure block gas is consumed at most once. It must
+ // happen after tx processing, and must be executed even if tx processing
+ // fails. Hence, it's execution is deferred.
+ consumeBlockGas := func() {
+ if !blockGasConsumed {
+ blockGasConsumed = true
+ ctx.BlockGasMeter().ConsumeGas(
+ ctx.GasMeter().GasConsumedToLimit(), "block gas meter",
+ )
+ }
+
+ }
+
+ // If BlockGasMeter()
+
+ panics it will be caught by the above recover and will
+ // return an error - in any case BlockGasMeter will consume gas past the limit.
+ //
+ // NOTE: consumeBlockGas must exist in a separate defer function from the
+ // general deferred recovery function to recover from consumeBlockGas as it'll
+ // be executed first (deferred statements are executed as stack).
+ if mode == execModeFinalize {
+ defer consumeBlockGas()
+ }
+
+ // if the transaction is not decoded, decode it here
+ if tx == nil {
+ tx, err = app.txDecoder(txBytes)
+ if err != nil {
+ return sdk.GasInfo{
+ GasUsed: 0,
+ GasWanted: 0
+ }, nil, nil, sdkerrors.ErrTxDecode.Wrap(err.Error())
+ }
+
+ }
+ msgs := tx.GetMsgs()
+ if err := validateBasicTxMsgs(msgs); err != nil {
+ return sdk.GasInfo{
+ }, nil, nil, err
+ }
+ for _, msg := range msgs {
+ handler := app.msgServiceRouter.Handler(msg)
+ if handler == nil {
+ return sdk.GasInfo{
+ }, nil, nil, errorsmod.Wrapf(sdkerrors.ErrUnknownRequest, "no message handler found for %T", msg)
+ }
+
+ }
+ if app.anteHandler != nil {
+ var (
+ anteCtx sdk.Context
+ msCache storetypes.CacheMultiStore
+ )
+
+ // Branch context before AnteHandler call in case it aborts.
+ // This is required for both CheckTx and DeliverTx.
+ // Ref: https://github.com/cosmos/cosmos-sdk/issues/2772
+ //
+ // NOTE: Alternatively, we could require that AnteHandler ensures that
+ // writes do not happen if aborted/failed. This may have some
+ // performance benefits, but it'll be more difficult to get right.
+ anteCtx, msCache = app.cacheTxContext(ctx, txBytes)
+
+ anteCtx = anteCtx.WithEventManager(sdk.NewEventManager())
+
+ newCtx, err := app.anteHandler(anteCtx, tx, mode == execModeSimulate)
+ if !newCtx.IsZero() {
+ // At this point, newCtx.MultiStore()
+
+ is a store branch, or something else
+ // replaced by the AnteHandler. We want the original multistore.
+ //
+ // Also, in the case of the tx aborting, we need to track gas consumed via
+ // the instantiated gas meter in the AnteHandler, so we update the context
+ // prior to returning.
+ ctx = newCtx.WithMultiStore(ms)
+ }
+ events := ctx.EventManager().Events()
+
+ // GasMeter expected to be set in AnteHandler
+ gasWanted = ctx.GasMeter().Limit()
+ if err != nil {
+ if mode == execModeReCheck {
+ // if the ante handler fails on recheck, we want to remove the tx from the mempool
+ if mempoolErr := app.mempool.Remove(tx); mempoolErr != nil {
+ return gInfo, nil, anteEvents, errors.Join(err, mempoolErr)
+ }
+
+ }
+
+ return gInfo, nil, nil, err
+ }
+
+ msCache.Write()
+
+ anteEvents = events.ToABCIEvents()
+ }
+ switch mode {
+ case execModeCheck:
+ err = app.mempool.Insert(ctx, tx)
+ if err != nil {
+ return gInfo, nil, anteEvents, err
+ }
+ case execModeFinalize:
+ err = app.mempool.Remove(tx)
+ if err != nil && !errors.Is(err, mempool.ErrTxNotFound) {
+ return gInfo, nil, anteEvents,
+ fmt.Errorf("failed to remove tx from mempool: %w", err)
+ }
+
+ }
+
+ // Create a new Context based off of the existing Context with a MultiStore branch
+ // in case message processing fails. At this point, the MultiStore
+ // is a branch of a branch.
+ runMsgCtx, msCache := app.cacheTxContext(ctx, txBytes)
+
+ // Attempt to execute all messages and only update state if all messages pass
+ // and we're in DeliverTx. Note, runMsgs will never return a reference to a
+ // Result if any single message fails or does not have a registered Handler.
+ msgsV2, err := tx.GetMsgsV2()
+ if err == nil {
+ result, err = app.runMsgs(runMsgCtx, msgs, msgsV2, mode)
+ }
+
+ // Run optional postHandlers (should run regardless of the execution result).
+ //
+ // Note: If the postHandler fails, we also revert the runMsgs state.
+ if app.postHandler != nil {
+ // The runMsgCtx context currently contains events emitted by the ante handler.
+ // We clear this to correctly order events without duplicates.
+ // Note that the state is still preserved.
+ postCtx := runMsgCtx.WithEventManager(sdk.NewEventManager())
+
+ newCtx, errPostHandler := app.postHandler(postCtx, tx, mode == execModeSimulate, err == nil)
+ if errPostHandler != nil {
+ if err == nil {
+ // when the msg was handled successfully, return the post handler error only
+ return gInfo, nil, anteEvents, errPostHandler
+ }
+ // otherwise append to the msg error so that we keep the original error code for better user experience
+ return gInfo, nil, anteEvents, errorsmod.Wrapf(err, "postHandler: %s", errPostHandler)
+ }
+
+ // we don't want runTx to panic if runMsgs has failed earlier
+ if result == nil {
+ result = &sdk.Result{
+ }
+
+ }
+
+ result.Events = append(result.Events, newCtx.EventManager().ABCIEvents()...)
+ }
+ if err == nil {
+ if mode == execModeFinalize {
+ // When block gas exceeds, it'll panic and won't commit the cached store.
+ consumeBlockGas()
+
+ msCache.Write()
+ }
+ if len(anteEvents) > 0 && (mode == execModeFinalize || mode == execModeSimulate) {
+ // append the events in the order of occurrence
+ result.Events = append(anteEvents, result.Events...)
+ }
+
+ }
+
+ return gInfo, result, anteEvents, err
+ }
+
+ // runMsgs iterates through a list of messages and executes them with the provided
+ // Context and execution mode. Messages will only be executed during simulation
+ // and DeliverTx. An error is returned if any single message fails or if a
+ // Handler does not exist for a given message route. Otherwise, a reference to a
+ // Result is returned. The caller must not commit state if an error is returned.
+ func (app *BaseApp)
+
+ runMsgs(ctx sdk.Context, msgs []sdk.Msg, msgsV2 []protov2.Message, mode execMode) (*sdk.Result, error) {
+ events := sdk.EmptyEvents()
+
+ var msgResponses []*codectypes.Any
+
+ // NOTE: GasWanted is determined by the AnteHandler and GasUsed by the GasMeter.
+ for i, msg := range msgs {
+ if mode != execModeFinalize && mode != execModeSimulate {
+ break
+ }
+ handler := app.msgServiceRouter.Handler(msg)
+ if handler == nil {
+ return nil, errorsmod.Wrapf(sdkerrors.ErrUnknownRequest, "no message handler found for %T", msg)
+ }
+
+ // ADR 031 request type routing
+ msgResult, err := handler(ctx, msg)
+ if err != nil {
+ return nil, errorsmod.Wrapf(err, "failed to execute message; message index: %d", i)
+ }
+
+ // create message events
+ msgEvents, err := createEvents(app.cdc, msgResult.GetEvents(), msg, msgsV2[i])
+ if err != nil {
+ return nil, errorsmod.Wrapf(err, "failed to create message events; message index: %d", i)
+ }
+
+ // append message events and data
+ //
+ // Note: Each message result's data must be length-prefixed in order to
+ // separate each result.
+ for j, event := range msgEvents {
+ // append message index to all events
+ msgEvents[j] = event.AppendAttributes(sdk.NewAttribute("msg_index", strconv.Itoa(i)))
+ }
+
+ events = events.AppendEvents(msgEvents)
+
+ // Each individual sdk.Result that went through the MsgServiceRouter
+ // (which should represent 99% of the Msgs now, since everyone should
+ // be using protobuf Msgs)
+
+ has exactly one Msg response, set inside
+ // `WrapServiceResult`. We take that Msg response, and aggregate it
+ // into an array.
+ if len(msgResult.MsgResponses) > 0 {
+ msgResponse := msgResult.MsgResponses[0]
+ if msgResponse == nil {
+ return nil, sdkerrors.ErrLogic.Wrapf("got nil Msg response at index %d for msg %s", i, sdk.MsgTypeURL(msg))
+ }
+
+ msgResponses = append(msgResponses, msgResponse)
+ }
+
+
+ }
+
+ data, err := makeABCIData(msgResponses)
+ if err != nil {
+ return nil, errorsmod.Wrap(err, "failed to marshal tx data")
+ }
+
+ return &sdk.Result{
+ Data: data,
+ Events: events.ToABCIEvents(),
+ MsgResponses: msgResponses,
+ }, nil
+ }
+
+ // makeABCIData generates the Data field to be sent to ABCI Check/DeliverTx.
+ func makeABCIData(msgResponses []*codectypes.Any) ([]byte, error) {
+ return proto.Marshal(&sdk.TxMsgData{
+ MsgResponses: msgResponses
+ })
+ }
+
+ func createEvents(cdc codec.Codec, events sdk.Events, msg sdk.Msg, msgV2 protov2.Message) (sdk.Events, error) {
+ eventMsgName := sdk.MsgTypeURL(msg)
+ msgEvent := sdk.NewEvent(sdk.EventTypeMessage, sdk.NewAttribute(sdk.AttributeKeyAction, eventMsgName))
+
+ // we set the signer attribute as the sender
+ signers, err := cdc.GetMsgV2Signers(msgV2)
+ if err != nil {
+ return nil, err
+ }
+ if len(signers) > 0 && signers[0] != nil {
+ addrStr, err := cdc.InterfaceRegistry().SigningContext().AddressCodec().BytesToString(signers[0])
+ if err != nil {
+ return nil, err
+ }
+
+ msgEvent = msgEvent.AppendAttributes(sdk.NewAttribute(sdk.AttributeKeySender, addrStr))
+ }
+
+ // verify that events have no module attribute set
+ if _, found := events.GetAttributes(sdk.AttributeKeyModule); !found {
+ if moduleName := sdk.GetModuleNameFromTypeURL(eventMsgName); moduleName != "" {
+ msgEvent = msgEvent.AppendAttributes(sdk.NewAttribute(sdk.AttributeKeyModule, moduleName))
+ }
+
+ }
+
+ return sdk.Events{
+ msgEvent
+ }.AppendEvents(events), nil
+ }
+
+ // PrepareProposalVerifyTx performs transaction verification when a proposer is
+ // creating a block proposal during PrepareProposal. Any state committed to the
+ // PrepareProposal state internally will be discarded. will be
+ // returned if the transaction cannot be encoded. will be returned if
+ // the transaction is valid, otherwise will be returned.
+ func (app *BaseApp)
+
+ PrepareProposalVerifyTx(tx sdk.Tx) ([]byte, error) {
+ bz, err := app.txEncoder(tx)
+ if err != nil {
+ return nil, err
+ }
+
+ _, _, _, err = app.runTx(execModePrepareProposal, bz, tx)
+ if err != nil {
+ return nil, err
+ }
+
+ return bz, nil
+ }
+
+ // ProcessProposalVerifyTx performs transaction verification when receiving a
+ // block proposal during ProcessProposal. Any state committed to the
+ // ProcessProposal state internally will be discarded. will be
+ // returned if the transaction cannot be decoded. will be returned if
+ // the transaction is valid, otherwise will be returned.
+ func (app *BaseApp)
+
+ ProcessProposalVerifyTx(txBz []byte) (sdk.Tx, error) {
+ tx, err := app.txDecoder(txBz)
+ if err != nil {
+ return nil, err
+ }
+
+ _, _, _, err = app.runTx(execModeProcessProposal, txBz, tx)
+ if err != nil {
+ return nil, err
+ }
+
+ return tx, nil
+ }
+
+ func (app *BaseApp)
+
+ TxDecode(txBytes []byte) (sdk.Tx, error) {
+ return app.txDecoder(txBytes)
+ }
+
+ func (app *BaseApp)
+
+ TxEncode(tx sdk.Tx) ([]byte, error) {
+ return app.txEncoder(tx)
+ }
+
+ func (app *BaseApp)
+
+ StreamingManager()
+
+ storetypes.StreamingManager {
+ return app.streamingManager
+ }
+
+ // Close is called in start cmd to gracefully cleanup resources.
+ func (app *BaseApp)
+
+ Close()
+
+ error {
+ var errs []error
+
+ // Close app.db (opened by cosmos-sdk/server/start.go call to openDB)
+ if app.db != nil {
+ app.logger.Info("Closing application.db")
+ if err := app.db.Close(); err != nil {
+ errs = append(errs, err)
+ }
+
+ }
+
+ // Close app.snapshotManager
+ // - opened when app chains use cosmos-sdk/server/util.go/DefaultBaseappOptions (boilerplate)
+ // - which calls cosmos-sdk/server/util.go/GetSnapshotStore
+ // - which is passed to baseapp/options.go/SetSnapshot
+ // - to set app.snapshotManager = snapshots.NewManager
+ if app.snapshotManager != nil {
+ app.logger.Info("Closing snapshots/metadata.db")
+ if err := app.snapshotManager.Close(); err != nil {
+ errs = append(errs, err)
+ }
+
+ }
+
+ return errors.Join(errs...)
+ }
+
+ // GetBaseApp returns the pointer to itself.
+ func (app *BaseApp)
+
+ GetBaseApp() *BaseApp {
+ return app
+ }
+ ```
+
+ This function also resets the [main gas meter](/sdk/v0.53/learn/beginner/gas-fees#main-gas-meter).
+
+* Initialize the [block gas meter](/sdk/v0.53/learn/beginner/gas-fees#block-gas-meter) with the `maxGas` limit. The `gas` consumed within the block cannot go above `maxGas`. This parameter is defined in the application's consensus parameters.
+
+* Run the application's [`beginBlocker()`](/sdk/v0.53/learn/beginner/app-anatomy#beginblocker-and-endblocker), which mainly runs the [`BeginBlocker()`](/sdk/v0.53/build/building-modules/beginblock-endblock#beginblock) method of each of the modules.
+
+* Set the [`VoteInfos`](https://github.com/cometbft/cometbft/blob/v0.37.x/spec/abci/abci++_methods.md#voteinfo) of the application, i.e. the list of validators whose *precommit* for the previous block was included by the proposer of the current block. This information is carried into the [`Context`](/sdk/v0.53/learn/advanced/context) so that it can be used during transaction execution and EndBlock.
+
+#### Transaction Execution
+
+When the underlying consensus engine receives a block proposal, each transaction in the block needs to be processed by the application. To that end, the underlying consensus engine sends the transactions in FinalizeBlock message to the application for each transaction in a sequential order.
+
+Before the first transaction of a given block is processed, a [volatile state](#state-updates) called `finalizeBlockState` is initialized during FinalizeBlock. This state is updated each time a transaction is processed via `FinalizeBlock`, and committed to the [main state](#main-state) when the block is [committed](#commit), after what it is set to `nil`.
+
+```go expandable
+package baseapp
+
+import (
+
+ "context"
+ "fmt"
+ "maps"
+ "math"
+ "slices"
+ "strconv"
+ "sync"
+ "github.com/cockroachdb/errors"
+ abci "github.com/cometbft/cometbft/abci/types"
+ "github.com/cometbft/cometbft/crypto/tmhash"
+ cmtproto "github.com/cometbft/cometbft/proto/tendermint/types"
+ dbm "github.com/cosmos/cosmos-db"
+ "github.com/cosmos/gogoproto/proto"
+ protov2 "google.golang.org/protobuf/proto"
+ "cosmossdk.io/core/header"
+ errorsmod "cosmossdk.io/errors"
+ "cosmossdk.io/log"
+ "cosmossdk.io/store"
+ storemetrics "cosmossdk.io/store/metrics"
+ "cosmossdk.io/store/snapshots"
+ storetypes "cosmossdk.io/store/types"
+ "github.com/cosmos/cosmos-sdk/baseapp/oe"
+ "github.com/cosmos/cosmos-sdk/codec"
+ codectypes "github.com/cosmos/cosmos-sdk/codec/types"
+ servertypes "github.com/cosmos/cosmos-sdk/server/types"
+ "github.com/cosmos/cosmos-sdk/telemetry"
+ sdk "github.com/cosmos/cosmos-sdk/types"
+ sdkerrors "github.com/cosmos/cosmos-sdk/types/errors"
+ "github.com/cosmos/cosmos-sdk/types/mempool"
+ "github.com/cosmos/cosmos-sdk/types/msgservice"
+)
+
+type (
+ execMode uint8
+
+ // StoreLoader defines a customizable function to control how we load the
+ // CommitMultiStore from disk. This is useful for state migration, when
+ // loading a datastore written with an older version of the software. In
+ // particular, if a module changed the substore key name (or removed a substore)
+ // between two versions of the software.
+ StoreLoader func(ms storetypes.CommitMultiStore)
+
+error
+)
+
+const (
+ execModeCheck execMode = iota // Check a transaction
+ execModeReCheck // Recheck a (pending)
+
+transaction after a commit
+ execModeSimulate // Simulate a transaction
+ execModePrepareProposal // Prepare a block proposal
+ execModeProcessProposal // Process a block proposal
+ execModeVoteExtension // Extend or verify a pre-commit vote
+ execModeVerifyVoteExtension // Verify a vote extension
+ execModeFinalize // Finalize a block proposal
+)
+
+var _ servertypes.ABCI = (*BaseApp)(nil)
+
+// BaseApp reflects the ABCI application implementation.
+type BaseApp struct {
+ // initialized on creation
+ mu sync.Mutex // mu protects the fields below.
+ logger log.Logger
+ name string // application name from abci.BlockInfo
+ db dbm.DB // common DB backend
+ cms storetypes.CommitMultiStore // Main (uncached)
+
+state
+ qms storetypes.MultiStore // Optional alternative multistore for querying only.
+ storeLoader StoreLoader // function to handle store loading, may be overridden with SetStoreLoader()
+
+grpcQueryRouter *GRPCQueryRouter // router for redirecting gRPC query calls
+ msgServiceRouter *MsgServiceRouter // router for redirecting Msg service messages
+ interfaceRegistry codectypes.InterfaceRegistry
+ txDecoder sdk.TxDecoder // unmarshal []byte into sdk.Tx
+ txEncoder sdk.TxEncoder // marshal sdk.Tx into []byte
+
+ mempool mempool.Mempool // application side mempool
+ anteHandler sdk.AnteHandler // ante handler for fee and auth
+ postHandler sdk.PostHandler // post handler, optional
+
+ checkTxHandler sdk.CheckTxHandler // ABCI CheckTx handler
+ initChainer sdk.InitChainer // ABCI InitChain handler
+ preBlocker sdk.PreBlocker // logic to run before BeginBlocker
+ beginBlocker sdk.BeginBlocker // (legacy ABCI)
+
+BeginBlock handler
+ endBlocker sdk.EndBlocker // (legacy ABCI)
+
+EndBlock handler
+ processProposal sdk.ProcessProposalHandler // ABCI ProcessProposal handler
+ prepareProposal sdk.PrepareProposalHandler // ABCI PrepareProposal
+ extendVote sdk.ExtendVoteHandler // ABCI ExtendVote handler
+ verifyVoteExt sdk.VerifyVoteExtensionHandler // ABCI VerifyVoteExtension handler
+ prepareCheckStater sdk.PrepareCheckStater // logic to run during commit using the checkState
+ precommiter sdk.Precommiter // logic to run during commit using the deliverState
+
+ addrPeerFilter sdk.PeerFilter // filter peers by address and port
+ idPeerFilter sdk.PeerFilter // filter peers by node ID
+ fauxMerkleMode bool // if true, IAVL MountStores uses MountStoresDB for simulation speed.
+ sigverifyTx bool // in the simulation test, since the account does not have a private key, we have to ignore the tx sigverify.
+
+ // manages snapshots, i.e. dumps of app state at certain intervals
+ snapshotManager *snapshots.Manager
+
+ // volatile states:
+ //
+ // - checkState is set on InitChain and reset on Commit
+ // - finalizeBlockState is set on InitChain and FinalizeBlock and set to nil
+ // on Commit.
+ //
+ // - checkState: Used for CheckTx, which is set based on the previous block's
+ // state. This state is never committed.
+ //
+ // - prepareProposalState: Used for PrepareProposal, which is set based on the
+ // previous block's state. This state is never committed. In case of multiple
+ // consensus rounds, the state is always reset to the previous block's state.
+ //
+ // - processProposalState: Used for ProcessProposal, which is set based on the
+ // the previous block's state. This state is never committed. In case of
+ // multiple rounds, the state is always reset to the previous block's state.
+ //
+ // - finalizeBlockState: Used for FinalizeBlock, which is set based on the
+ // previous block's state. This state is committed.
+ checkState *state
+ prepareProposalState *state
+ processProposalState *state
+ finalizeBlockState *state
+
+ // An inter-block write-through cache provided to the context during the ABCI
+ // FinalizeBlock call.
+ interBlockCache storetypes.MultiStorePersistentCache
+
+ // paramStore is used to query for ABCI consensus parameters from an
+ // application parameter store.
+ paramStore ParamStore
+
+ // queryGasLimit defines the maximum gas for queries; unbounded if 0.
+ queryGasLimit uint64
+
+ // The minimum gas prices a validator is willing to accept for processing a
+ // transaction. This is mainly used for DoS and spam prevention.
+ minGasPrices sdk.DecCoins
+
+ // initialHeight is the initial height at which we start the BaseApp
+ initialHeight int64
+
+ // flag for sealing options and parameters to a BaseApp
+ sealed bool
+
+ // block height at which to halt the chain and gracefully shutdown
+ haltHeight uint64
+
+ // minimum block time (in Unix seconds)
+
+at which to halt the chain and gracefully shutdown
+ haltTime uint64
+
+ // minRetainBlocks defines the minimum block height offset from the current
+ // block being committed, such that all blocks past this offset are pruned
+ // from CometBFT. It is used as part of the process of determining the
+ // ResponseCommit.RetainHeight value during ABCI Commit. A value of 0 indicates
+ // that no blocks should be pruned.
+ //
+ // Note: CometBFT block pruning is dependant on this parameter in conjunction
+ // with the unbonding (safety threshold)
+
+period, state pruning and state sync
+ // snapshot parameters to determine the correct minimum value of
+ // ResponseCommit.RetainHeight.
+ minRetainBlocks uint64
+
+ // application's version string
+ version string
+
+ // application's protocol version that increments on every upgrade
+ // if BaseApp is passed to the upgrade keeper's NewKeeper method.
+ appVersion uint64
+
+ // recovery handler for app.runTx method
+ runTxRecoveryMiddleware recoveryMiddleware
+
+ // trace set will return full stack traces for errors in ABCI Log field
+ trace bool
+
+ // indexEvents defines the set of events in the form {
+ eventType
+}.{
+ attributeKey
+},
+ // which informs CometBFT what to index. If empty, all events will be indexed.
+ indexEvents map[string]struct{
+}
+
+ // streamingManager for managing instances and configuration of ABCIListener services
+ streamingManager storetypes.StreamingManager
+
+ chainID string
+
+ cdc codec.Codec
+
+ // optimisticExec contains the context required for Optimistic Execution,
+ // including the goroutine handling.This is experimental and must be enabled
+ // by developers.
+ optimisticExec *oe.OptimisticExecution
+
+ // disableBlockGasMeter will disable the block gas meter if true, block gas meter is tricky to support
+ // when executing transactions in parallel.
+ // when disabled, the block gas meter in context is a noop one.
+ //
+ // SAFETY: it's safe to do if validators validate the total gas wanted in the `ProcessProposal`, which is the case in the default handler.
+ disableBlockGasMeter bool
+}
+
+// NewBaseApp returns a reference to an initialized BaseApp. It accepts a
+// variadic number of option functions, which act on the BaseApp to set
+// configuration choices.
+func NewBaseApp(
+ name string, logger log.Logger, db dbm.DB, txDecoder sdk.TxDecoder, options ...func(*BaseApp),
+) *BaseApp {
+ app := &BaseApp{
+ logger: logger.With(log.ModuleKey, "baseapp"),
+ name: name,
+ db: db,
+ cms: store.NewCommitMultiStore(db, logger, storemetrics.NewNoOpMetrics()), // by default we use a no-op metric gather in store
+ storeLoader: DefaultStoreLoader,
+ grpcQueryRouter: NewGRPCQueryRouter(),
+ msgServiceRouter: NewMsgServiceRouter(),
+ txDecoder: txDecoder,
+ fauxMerkleMode: false,
+ sigverifyTx: true,
+ queryGasLimit: math.MaxUint64,
+}
+ for _, option := range options {
+ option(app)
+}
+ if app.mempool == nil {
+ app.SetMempool(mempool.NoOpMempool{
+})
+}
+ abciProposalHandler := NewDefaultProposalHandler(app.mempool, app)
+ if app.prepareProposal == nil {
+ app.SetPrepareProposal(abciProposalHandler.PrepareProposalHandler())
+}
+ if app.processProposal == nil {
+ app.SetProcessProposal(abciProposalHandler.ProcessProposalHandler())
+}
+ if app.extendVote == nil {
+ app.SetExtendVoteHandler(NoOpExtendVote())
+}
+ if app.verifyVoteExt == nil {
+ app.SetVerifyVoteExtensionHandler(NoOpVerifyVoteExtensionHandler())
+}
+ if app.interBlockCache != nil {
+ app.cms.SetInterBlockCache(app.interBlockCache)
+}
+
+app.runTxRecoveryMiddleware = newDefaultRecoveryMiddleware()
+
+ // Initialize with an empty interface registry to avoid nil pointer dereference.
+ // Unless SetInterfaceRegistry is called with an interface registry with proper address codecs baseapp will panic.
+ app.cdc = codec.NewProtoCodec(codectypes.NewInterfaceRegistry())
+
+protoFiles, err := proto.MergedRegistry()
+ if err != nil {
+ logger.Warn("error creating merged proto registry", "error", err)
+}
+
+else {
+ err = msgservice.ValidateProtoAnnotations(protoFiles)
+ if err != nil {
+ // Once we switch to using protoreflect-based antehandlers, we might
+ // want to panic here instead of logging a warning.
+ logger.Warn("error validating merged proto registry annotations", "error", err)
+}
+
+}
+
+return app
+}
+
+// Name returns the name of the BaseApp.
+func (app *BaseApp)
+
+Name()
+
+string {
+ return app.name
+}
+
+// AppVersion returns the application's protocol version.
+func (app *BaseApp)
+
+AppVersion()
+
+uint64 {
+ return app.appVersion
+}
+
+// Version returns the application's version string.
+func (app *BaseApp)
+
+Version()
+
+string {
+ return app.version
+}
+
+// Logger returns the logger of the BaseApp.
+func (app *BaseApp)
+
+Logger()
+
+log.Logger {
+ return app.logger
+}
+
+// Trace returns the boolean value for logging error stack traces.
+func (app *BaseApp)
+
+Trace()
+
+bool {
+ return app.trace
+}
+
+// MsgServiceRouter returns the MsgServiceRouter of a BaseApp.
+func (app *BaseApp)
+
+MsgServiceRouter() *MsgServiceRouter {
+ return app.msgServiceRouter
+}
+
+// GRPCQueryRouter returns the GRPCQueryRouter of a BaseApp.
+func (app *BaseApp)
+
+GRPCQueryRouter() *GRPCQueryRouter {
+ return app.grpcQueryRouter
+}
+
+// MountStores mounts all IAVL or DB stores to the provided keys in the BaseApp
+// multistore.
+func (app *BaseApp)
+
+MountStores(keys ...storetypes.StoreKey) {
+ for _, key := range keys {
+ switch key.(type) {
+ case *storetypes.KVStoreKey:
+ if !app.fauxMerkleMode {
+ app.MountStore(key, storetypes.StoreTypeIAVL)
+}
+
+else {
+ // StoreTypeDB doesn't do anything upon commit, and it doesn't
+ // retain history, but it's useful for faster simulation.
+ app.MountStore(key, storetypes.StoreTypeDB)
+}
+ case *storetypes.TransientStoreKey:
+ app.MountStore(key, storetypes.StoreTypeTransient)
+ case *storetypes.MemoryStoreKey:
+ app.MountStore(key, storetypes.StoreTypeMemory)
+
+default:
+ panic(fmt.Sprintf("Unrecognized store key type :%T", key))
+}
+
+}
+}
+
+// MountKVStores mounts all IAVL or DB stores to the provided keys in the
+// BaseApp multistore.
+func (app *BaseApp)
+
+MountKVStores(keys map[string]*storetypes.KVStoreKey) {
+ for _, key := range keys {
+ if !app.fauxMerkleMode {
+ app.MountStore(key, storetypes.StoreTypeIAVL)
+}
+
+else {
+ // StoreTypeDB doesn't do anything upon commit, and it doesn't
+ // retain history, but it's useful for faster simulation.
+ app.MountStore(key, storetypes.StoreTypeDB)
+}
+
+}
+}
+
+// MountTransientStores mounts all transient stores to the provided keys in
+// the BaseApp multistore.
+func (app *BaseApp)
+
+MountTransientStores(keys map[string]*storetypes.TransientStoreKey) {
+ for _, key := range keys {
+ app.MountStore(key, storetypes.StoreTypeTransient)
+}
+}
+
+// MountMemoryStores mounts all in-memory KVStores with the BaseApp's internal
+// commit multi-store.
+func (app *BaseApp)
+
+MountMemoryStores(keys map[string]*storetypes.MemoryStoreKey) {
+ skeys := slices.Sorted(maps.Keys(keys))
+ for _, key := range skeys {
+ memKey := keys[key]
+ app.MountStore(memKey, storetypes.StoreTypeMemory)
+}
+}
+
+// MountStore mounts a store to the provided key in the BaseApp multistore,
+// using the default DB.
+func (app *BaseApp)
+
+MountStore(key storetypes.StoreKey, typ storetypes.StoreType) {
+ app.cms.MountStoreWithDB(key, typ, nil)
+}
+
+// LoadLatestVersion loads the latest application version. It will panic if
+// called more than once on a running BaseApp.
+func (app *BaseApp)
+
+LoadLatestVersion()
+
+error {
+ err := app.storeLoader(app.cms)
+ if err != nil {
+ return fmt.Errorf("failed to load latest version: %w", err)
+}
+
+return app.Init()
+}
+
+// DefaultStoreLoader will be used by default and loads the latest version
+func DefaultStoreLoader(ms storetypes.CommitMultiStore)
+
+error {
+ return ms.LoadLatestVersion()
+}
+
+// CommitMultiStore returns the root multi-store.
+// App constructor can use this to access the `cms`.
+// UNSAFE: must not be used during the abci life cycle.
+func (app *BaseApp)
+
+CommitMultiStore()
+
+storetypes.CommitMultiStore {
+ return app.cms
+}
+
+// SnapshotManager returns the snapshot manager.
+// application use this to register extra extension snapshotters.
+func (app *BaseApp)
+
+SnapshotManager() *snapshots.Manager {
+ return app.snapshotManager
+}
+
+// LoadVersion loads the BaseApp application version. It will panic if called
+// more than once on a running baseapp.
+func (app *BaseApp)
+
+LoadVersion(version int64)
+
+error {
+ app.logger.Info("NOTICE: this could take a long time to migrate IAVL store to fastnode if you enable Fast Node.\n")
+ err := app.cms.LoadVersion(version)
+ if err != nil {
+ return fmt.Errorf("failed to load version %d: %w", version, err)
+}
+
+return app.Init()
+}
+
+// LastCommitID returns the last CommitID of the multistore.
+func (app *BaseApp)
+
+LastCommitID()
+
+storetypes.CommitID {
+ return app.cms.LastCommitID()
+}
+
+// LastBlockHeight returns the last committed block height.
+func (app *BaseApp)
+
+LastBlockHeight()
+
+int64 {
+ return app.cms.LastCommitID().Version
+}
+
+// ChainID returns the chainID of the app.
+func (app *BaseApp)
+
+ChainID()
+
+string {
+ return app.chainID
+}
+
+// AnteHandler returns the AnteHandler of the app.
+func (app *BaseApp)
+
+AnteHandler()
+
+sdk.AnteHandler {
+ return app.anteHandler
+}
+
+// Mempool returns the Mempool of the app.
+func (app *BaseApp)
+
+Mempool()
+
+mempool.Mempool {
+ return app.mempool
+}
+
+// Init initializes the app. It seals the app, preventing any
+// further modifications. In addition, it validates the app against
+// the earlier provided settings. Returns an error if validation fails.
+// nil otherwise. Panics if the app is already sealed.
+func (app *BaseApp)
+
+Init()
+
+error {
+ if app.sealed {
+ panic("cannot call initFromMainStore: baseapp already sealed")
+}
+ if app.cms == nil {
+ return errors.New("commit multi-store must not be nil")
+}
+ emptyHeader := cmtproto.Header{
+ ChainID: app.chainID
+}
+
+ // needed for the export command which inits from store but never calls initchain
+ app.setState(execModeCheck, emptyHeader)
+
+app.Seal()
+
+return app.cms.GetPruning().Validate()
+}
+
+func (app *BaseApp)
+
+setMinGasPrices(gasPrices sdk.DecCoins) {
+ app.minGasPrices = gasPrices
+}
+
+func (app *BaseApp)
+
+setHaltHeight(haltHeight uint64) {
+ app.haltHeight = haltHeight
+}
+
+func (app *BaseApp)
+
+setHaltTime(haltTime uint64) {
+ app.haltTime = haltTime
+}
+
+func (app *BaseApp)
+
+setMinRetainBlocks(minRetainBlocks uint64) {
+ app.minRetainBlocks = minRetainBlocks
+}
+
+func (app *BaseApp)
+
+setInterBlockCache(cache storetypes.MultiStorePersistentCache) {
+ app.interBlockCache = cache
+}
+
+func (app *BaseApp)
+
+setTrace(trace bool) {
+ app.trace = trace
+}
+
+func (app *BaseApp)
+
+setIndexEvents(ie []string) {
+ app.indexEvents = make(map[string]struct{
+})
+ for _, e := range ie {
+ app.indexEvents[e] = struct{
+}{
+}
+
+}
+}
+
+// Seal seals a BaseApp. It prohibits any further modifications to a BaseApp.
+func (app *BaseApp)
+
+Seal() {
+ app.sealed = true
+}
+
+// IsSealed returns true if the BaseApp is sealed and false otherwise.
+func (app *BaseApp)
+
+IsSealed()
+
+bool {
+ return app.sealed
+}
+
+// setState sets the BaseApp's state for the corresponding mode with a branched
+// multi-store (i.e. a CacheMultiStore)
+
+and a new Context with the same
+// multi-store branch, and provided header.
+func (app *BaseApp)
+
+setState(mode execMode, h cmtproto.Header) {
+ ms := app.cms.CacheMultiStore()
+ headerInfo := header.Info{
+ Height: h.Height,
+ Time: h.Time,
+ ChainID: h.ChainID,
+ AppHash: h.AppHash,
+}
+ baseState := &state{
+ ms: ms,
+ ctx: sdk.NewContext(ms, h, false, app.logger).
+ WithStreamingManager(app.streamingManager).
+ WithHeaderInfo(headerInfo),
+}
+ switch mode {
+ case execModeCheck:
+ baseState.SetContext(baseState.Context().WithIsCheckTx(true).WithMinGasPrices(app.minGasPrices))
+
+app.checkState = baseState
+ case execModePrepareProposal:
+ app.prepareProposalState = baseState
+ case execModeProcessProposal:
+ app.processProposalState = baseState
+ case execModeFinalize:
+ app.finalizeBlockState = baseState
+
+ default:
+ panic(fmt.Sprintf("invalid runTxMode for setState: %d", mode))
+}
+}
+
+// SetCircuitBreaker sets the circuit breaker for the BaseApp.
+// The circuit breaker is checked on every message execution to verify if a transaction should be executed or not.
+func (app *BaseApp)
+
+SetCircuitBreaker(cb CircuitBreaker) {
+ if app.msgServiceRouter == nil {
+ panic("cannot set circuit breaker with no msg service router set")
+}
+
+app.msgServiceRouter.SetCircuit(cb)
+}
+
+// GetConsensusParams returns the current consensus parameters from the BaseApp's
+// ParamStore. If the BaseApp has no ParamStore defined, nil is returned.
+func (app *BaseApp)
+
+GetConsensusParams(ctx sdk.Context)
+
+cmtproto.ConsensusParams {
+ if app.paramStore == nil {
+ return cmtproto.ConsensusParams{
+}
+
+}
+
+cp, err := app.paramStore.Get(ctx)
+ if err != nil {
+ // This could happen while migrating from v0.45/v0.46 to v0.50, we should
+ // allow it to happen so during preblock the upgrade plan can be executed
+ // and the consensus params set for the first time in the new format.
+ app.logger.Error("failed to get consensus params", "err", err)
+
+return cmtproto.ConsensusParams{
+}
+
+}
+
+return cp
+}
+
+// StoreConsensusParams sets the consensus parameters to the BaseApp's param
+// store.
+//
+// NOTE: We're explicitly not storing the CometBFT app_version in the param store.
+// It's stored instead in the x/upgrade store, with its own bump logic.
+func (app *BaseApp)
+
+StoreConsensusParams(ctx sdk.Context, cp cmtproto.ConsensusParams)
+
+error {
+ if app.paramStore == nil {
+ return errors.New("cannot store consensus params with no params store set")
+}
+
+return app.paramStore.Set(ctx, cp)
+}
+
+// AddRunTxRecoveryHandler adds custom app.runTx method panic handlers.
+func (app *BaseApp)
+
+AddRunTxRecoveryHandler(handlers ...RecoveryHandler) {
+ for _, h := range handlers {
+ app.runTxRecoveryMiddleware = newRecoveryMiddleware(h, app.runTxRecoveryMiddleware)
+}
+}
+
+// GetMaximumBlockGas gets the maximum gas from the consensus params. It panics
+// if maximum block gas is less than negative one and returns zero if negative
+// one.
+func (app *BaseApp)
+
+GetMaximumBlockGas(ctx sdk.Context)
+
+uint64 {
+ cp := app.GetConsensusParams(ctx)
+ if cp.Block == nil {
+ return 0
+}
+ maxGas := cp.Block.MaxGas
+ switch {
+ case maxGas < -1:
+ panic(fmt.Sprintf("invalid maximum block gas: %d", maxGas))
+ case maxGas == -1:
+ return 0
+
+ default:
+ return uint64(maxGas)
+}
+}
+
+func (app *BaseApp)
+
+validateFinalizeBlockHeight(req *abci.RequestFinalizeBlock)
+
+error {
+ if req.Height < 1 {
+ return fmt.Errorf("invalid height: %d", req.Height)
+}
+ lastBlockHeight := app.LastBlockHeight()
+
+ // expectedHeight holds the expected height to validate
+ var expectedHeight int64
+ if lastBlockHeight == 0 && app.initialHeight > 1 {
+ // In this case, we're validating the first block of the chain, i.e no
+ // previous commit. The height we're expecting is the initial height.
+ expectedHeight = app.initialHeight
+}
+
+else {
+ // This case can mean two things:
+ //
+ // - Either there was already a previous commit in the store, in which
+ // case we increment the version from there.
+ // - Or there was no previous commit, in which case we start at version 1.
+ expectedHeight = lastBlockHeight + 1
+}
+ if req.Height != expectedHeight {
+ return fmt.Errorf("invalid height: %d; expected: %d", req.Height, expectedHeight)
+}
+
+return nil
+}
+
+// validateBasicTxMsgs executes basic validator calls for messages.
+func validateBasicTxMsgs(msgs []sdk.Msg)
+
+error {
+ if len(msgs) == 0 {
+ return errorsmod.Wrap(sdkerrors.ErrInvalidRequest, "must contain at least one message")
+}
+ for _, msg := range msgs {
+ m, ok := msg.(sdk.HasValidateBasic)
+ if !ok {
+ continue
+}
+ if err := m.ValidateBasic(); err != nil {
+ return err
+}
+
+}
+
+return nil
+}
+
+func (app *BaseApp)
+
+getState(mode execMode) *state {
+ switch mode {
+ case execModeFinalize:
+ return app.finalizeBlockState
+ case execModePrepareProposal:
+ return app.prepareProposalState
+ case execModeProcessProposal:
+ return app.processProposalState
+
+ default:
+ return app.checkState
+}
+}
+
+func (app *BaseApp)
+
+getBlockGasMeter(ctx sdk.Context)
+
+storetypes.GasMeter {
+ if app.disableBlockGasMeter {
+ return noopGasMeter{
+}
+
+}
+ if maxGas := app.GetMaximumBlockGas(ctx); maxGas > 0 {
+ return storetypes.NewGasMeter(maxGas)
+}
+
+return storetypes.NewInfiniteGasMeter()
+}
+
+// retrieve the context for the tx w/ txBytes and other memoized values.
+func (app *BaseApp)
+
+getContextForTx(mode execMode, txBytes []byte)
+
+sdk.Context {
+ app.mu.Lock()
+
+defer app.mu.Unlock()
+ modeState := app.getState(mode)
+ if modeState == nil {
+ panic(fmt.Sprintf("state is nil for mode %v", mode))
+}
+ ctx := modeState.Context().
+ WithTxBytes(txBytes).
+ WithGasMeter(storetypes.NewInfiniteGasMeter())
+ // WithVoteInfos(app.voteInfos) // TODO: identify if this is needed
+
+ ctx = ctx.WithIsSigverifyTx(app.sigverifyTx)
+
+ctx = ctx.WithConsensusParams(app.GetConsensusParams(ctx))
+ if mode == execModeReCheck {
+ ctx = ctx.WithIsReCheckTx(true)
+}
+ if mode == execModeSimulate {
+ ctx, _ = ctx.CacheContext()
+
+ctx = ctx.WithExecMode(sdk.ExecMode(execModeSimulate))
+}
+
+return ctx
+}
+
+// cacheTxContext returns a new context based off of the provided context with
+// a branched multi-store.
+func (app *BaseApp)
+
+cacheTxContext(ctx sdk.Context, txBytes []byte) (sdk.Context, storetypes.CacheMultiStore) {
+ ms := ctx.MultiStore()
+ msCache := ms.CacheMultiStore()
+ if msCache.TracingEnabled() {
+ msCache = msCache.SetTracingContext(
+ storetypes.TraceContext(
+ map[string]any{
+ "txHash": fmt.Sprintf("%X", tmhash.Sum(txBytes)),
+},
+ ),
+ ).(storetypes.CacheMultiStore)
+}
+
+return ctx.WithMultiStore(msCache), msCache
+}
+
+func (app *BaseApp)
+
+preBlock(req *abci.RequestFinalizeBlock) ([]abci.Event, error) {
+ var events []abci.Event
+ if app.preBlocker != nil {
+ ctx := app.finalizeBlockState.Context().WithEventManager(sdk.NewEventManager())
+
+rsp, err := app.preBlocker(ctx, req)
+ if err != nil {
+ return nil, err
+}
+ // rsp.ConsensusParamsChanged is true from preBlocker means ConsensusParams in store get changed
+ // write the consensus parameters in store to context
+ if rsp.ConsensusParamsChanged {
+ ctx = ctx.WithConsensusParams(app.GetConsensusParams(ctx))
+ // GasMeter must be set after we get a context with updated consensus params.
+ gasMeter := app.getBlockGasMeter(ctx)
+
+ctx = ctx.WithBlockGasMeter(gasMeter)
+
+app.finalizeBlockState.SetContext(ctx)
+}
+
+events = ctx.EventManager().ABCIEvents()
+}
+
+return events, nil
+}
+
+func (app *BaseApp)
+
+beginBlock(_ *abci.RequestFinalizeBlock) (sdk.BeginBlock, error) {
+ var (
+ resp sdk.BeginBlock
+ err error
+ )
+ if app.beginBlocker != nil {
+ resp, err = app.beginBlocker(app.finalizeBlockState.Context())
+ if err != nil {
+ return resp, err
+}
+
+ // append BeginBlock attributes to all events in the EndBlock response
+ for i, event := range resp.Events {
+ resp.Events[i].Attributes = append(
+ event.Attributes,
+ abci.EventAttribute{
+ Key: "mode",
+ Value: "BeginBlock"
+},
+ )
+}
+
+resp.Events = sdk.MarkEventsToIndex(resp.Events, app.indexEvents)
+}
+
+return resp, nil
+}
+
+func (app *BaseApp)
+
+deliverTx(tx []byte) *abci.ExecTxResult {
+ gInfo := sdk.GasInfo{
+}
+ resultStr := "successful"
+
+ var resp *abci.ExecTxResult
+
+ defer func() {
+ telemetry.IncrCounter(1, "tx", "count")
+
+telemetry.IncrCounter(1, "tx", resultStr)
+
+telemetry.SetGauge(float32(gInfo.GasUsed), "tx", "gas", "used")
+
+telemetry.SetGauge(float32(gInfo.GasWanted), "tx", "gas", "wanted")
+}()
+
+gInfo, result, anteEvents, err := app.runTx(execModeFinalize, tx, nil)
+ if err != nil {
+ resultStr = "failed"
+ resp = sdkerrors.ResponseExecTxResultWithEvents(
+ err,
+ gInfo.GasWanted,
+ gInfo.GasUsed,
+ sdk.MarkEventsToIndex(anteEvents, app.indexEvents),
+ app.trace,
+ )
+
+return resp
+}
+
+resp = &abci.ExecTxResult{
+ GasWanted: int64(gInfo.GasWanted),
+ GasUsed: int64(gInfo.GasUsed),
+ Log: result.Log,
+ Data: result.Data,
+ Events: sdk.MarkEventsToIndex(result.Events, app.indexEvents),
+}
+
+return resp
+}
+
+// endBlock is an application-defined function that is called after transactions
+// have been processed in FinalizeBlock.
+func (app *BaseApp)
+
+endBlock(_ context.Context) (sdk.EndBlock, error) {
+ var endblock sdk.EndBlock
+ if app.endBlocker != nil {
+ eb, err := app.endBlocker(app.finalizeBlockState.Context())
+ if err != nil {
+ return endblock, err
+}
+
+ // append EndBlock attributes to all events in the EndBlock response
+ for i, event := range eb.Events {
+ eb.Events[i].Attributes = append(
+ event.Attributes,
+ abci.EventAttribute{
+ Key: "mode",
+ Value: "EndBlock"
+},
+ )
+}
+
+eb.Events = sdk.MarkEventsToIndex(eb.Events, app.indexEvents)
+
+endblock = eb
+}
+
+return endblock, nil
+}
+
+// runTx processes a transaction within a given execution mode, encoded transaction
+// bytes, and the decoded transaction itself. All state transitions occur through
+// a cached Context depending on the mode provided. State only gets persisted
+// if all messages get executed successfully and the execution mode is DeliverTx.
+// Note, gas execution info is always returned. A reference to a Result is
+// returned if the tx does not run out of gas and if all the messages are valid
+// and execute successfully. An error is returned otherwise.
+// both txbytes and the decoded tx are passed to runTx to avoid the state machine encoding the tx and decoding the transaction twice
+// passing the decoded tx to runTX is optional, it will be decoded if the tx is nil
+func (app *BaseApp)
+
+runTx(mode execMode, txBytes []byte, tx sdk.Tx) (gInfo sdk.GasInfo, result *sdk.Result, anteEvents []abci.Event, err error) {
+ // NOTE: GasWanted should be returned by the AnteHandler. GasUsed is
+ // determined by the GasMeter. We need access to the context to get the gas
+ // meter, so we initialize upfront.
+ var gasWanted uint64
+ ctx := app.getContextForTx(mode, txBytes)
+ ms := ctx.MultiStore()
+
+ // only run the tx if there is block gas remaining
+ if mode == execModeFinalize && ctx.BlockGasMeter().IsOutOfGas() {
+ return gInfo, nil, nil, errorsmod.Wrap(sdkerrors.ErrOutOfGas, "no block gas left to run tx")
+}
+
+defer func() {
+ if r := recover(); r != nil {
+ recoveryMW := newOutOfGasRecoveryMiddleware(gasWanted, ctx, app.runTxRecoveryMiddleware)
+
+err, result = processRecovery(r, recoveryMW), nil
+ ctx.Logger().Error("panic recovered in runTx", "err", err)
+}
+
+gInfo = sdk.GasInfo{
+ GasWanted: gasWanted,
+ GasUsed: ctx.GasMeter().GasConsumed()
+}
+
+}()
+ blockGasConsumed := false
+
+ // consumeBlockGas makes sure block gas is consumed at most once. It must
+ // happen after tx processing, and must be executed even if tx processing
+ // fails. Hence, it's execution is deferred.
+ consumeBlockGas := func() {
+ if !blockGasConsumed {
+ blockGasConsumed = true
+ ctx.BlockGasMeter().ConsumeGas(
+ ctx.GasMeter().GasConsumedToLimit(), "block gas meter",
+ )
+}
+
+}
+
+ // If BlockGasMeter()
+
+panics it will be caught by the above recover and will
+ // return an error - in any case BlockGasMeter will consume gas past the limit.
+ //
+ // NOTE: consumeBlockGas must exist in a separate defer function from the
+ // general deferred recovery function to recover from consumeBlockGas as it'll
+ // be executed first (deferred statements are executed as stack).
+ if mode == execModeFinalize {
+ defer consumeBlockGas()
+}
+
+ // if the transaction is not decoded, decode it here
+ if tx == nil {
+ tx, err = app.txDecoder(txBytes)
+ if err != nil {
+ return sdk.GasInfo{
+ GasUsed: 0,
+ GasWanted: 0
+}, nil, nil, sdkerrors.ErrTxDecode.Wrap(err.Error())
+}
+
+}
+ msgs := tx.GetMsgs()
+ if err := validateBasicTxMsgs(msgs); err != nil {
+ return sdk.GasInfo{
+}, nil, nil, err
+}
+ for _, msg := range msgs {
+ handler := app.msgServiceRouter.Handler(msg)
+ if handler == nil {
+ return sdk.GasInfo{
+}, nil, nil, errorsmod.Wrapf(sdkerrors.ErrUnknownRequest, "no message handler found for %T", msg)
+}
+
+}
+ if app.anteHandler != nil {
+ var (
+ anteCtx sdk.Context
+ msCache storetypes.CacheMultiStore
+ )
+
+ // Branch context before AnteHandler call in case it aborts.
+ // This is required for both CheckTx and DeliverTx.
+ // Ref: https://github.com/cosmos/cosmos-sdk/issues/2772
+ //
+ // NOTE: Alternatively, we could require that AnteHandler ensures that
+ // writes do not happen if aborted/failed. This may have some
+ // performance benefits, but it'll be more difficult to get right.
+ anteCtx, msCache = app.cacheTxContext(ctx, txBytes)
+
+anteCtx = anteCtx.WithEventManager(sdk.NewEventManager())
+
+newCtx, err := app.anteHandler(anteCtx, tx, mode == execModeSimulate)
+ if !newCtx.IsZero() {
+ // At this point, newCtx.MultiStore()
+
+is a store branch, or something else
+ // replaced by the AnteHandler. We want the original multistore.
+ //
+ // Also, in the case of the tx aborting, we need to track gas consumed via
+ // the instantiated gas meter in the AnteHandler, so we update the context
+ // prior to returning.
+ ctx = newCtx.WithMultiStore(ms)
+}
+ events := ctx.EventManager().Events()
+
+ // GasMeter expected to be set in AnteHandler
+ gasWanted = ctx.GasMeter().Limit()
+ if err != nil {
+ if mode == execModeReCheck {
+ // if the ante handler fails on recheck, we want to remove the tx from the mempool
+ if mempoolErr := app.mempool.Remove(tx); mempoolErr != nil {
+ return gInfo, nil, anteEvents, errors.Join(err, mempoolErr)
+}
+
+}
+
+return gInfo, nil, nil, err
+}
+
+msCache.Write()
+
+anteEvents = events.ToABCIEvents()
+}
+ switch mode {
+ case execModeCheck:
+ err = app.mempool.Insert(ctx, tx)
+ if err != nil {
+ return gInfo, nil, anteEvents, err
+}
+ case execModeFinalize:
+ err = app.mempool.Remove(tx)
+ if err != nil && !errors.Is(err, mempool.ErrTxNotFound) {
+ return gInfo, nil, anteEvents,
+ fmt.Errorf("failed to remove tx from mempool: %w", err)
+}
+
+}
+
+ // Create a new Context based off of the existing Context with a MultiStore branch
+ // in case message processing fails. At this point, the MultiStore
+ // is a branch of a branch.
+ runMsgCtx, msCache := app.cacheTxContext(ctx, txBytes)
+
+ // Attempt to execute all messages and only update state if all messages pass
+ // and we're in DeliverTx. Note, runMsgs will never return a reference to a
+ // Result if any single message fails or does not have a registered Handler.
+ msgsV2, err := tx.GetMsgsV2()
+ if err == nil {
+ result, err = app.runMsgs(runMsgCtx, msgs, msgsV2, mode)
+}
+
+ // Run optional postHandlers (should run regardless of the execution result).
+ //
+ // Note: If the postHandler fails, we also revert the runMsgs state.
+ if app.postHandler != nil {
+ // The runMsgCtx context currently contains events emitted by the ante handler.
+ // We clear this to correctly order events without duplicates.
+ // Note that the state is still preserved.
+ postCtx := runMsgCtx.WithEventManager(sdk.NewEventManager())
+
+newCtx, errPostHandler := app.postHandler(postCtx, tx, mode == execModeSimulate, err == nil)
+ if errPostHandler != nil {
+ if err == nil {
+ // when the msg was handled successfully, return the post handler error only
+ return gInfo, nil, anteEvents, errPostHandler
+}
+ // otherwise append to the msg error so that we keep the original error code for better user experience
+ return gInfo, nil, anteEvents, errorsmod.Wrapf(err, "postHandler: %s", errPostHandler)
+}
+
+ // we don't want runTx to panic if runMsgs has failed earlier
+ if result == nil {
+ result = &sdk.Result{
+}
+
+}
+
+result.Events = append(result.Events, newCtx.EventManager().ABCIEvents()...)
+}
+ if err == nil {
+ if mode == execModeFinalize {
+ // When block gas exceeds, it'll panic and won't commit the cached store.
+ consumeBlockGas()
+
+msCache.Write()
+}
+ if len(anteEvents) > 0 && (mode == execModeFinalize || mode == execModeSimulate) {
+ // append the events in the order of occurrence
+ result.Events = append(anteEvents, result.Events...)
+}
+
+}
+
+return gInfo, result, anteEvents, err
+}
+
+// runMsgs iterates through a list of messages and executes them with the provided
+// Context and execution mode. Messages will only be executed during simulation
+// and DeliverTx. An error is returned if any single message fails or if a
+// Handler does not exist for a given message route. Otherwise, a reference to a
+// Result is returned. The caller must not commit state if an error is returned.
+func (app *BaseApp)
+
+runMsgs(ctx sdk.Context, msgs []sdk.Msg, msgsV2 []protov2.Message, mode execMode) (*sdk.Result, error) {
+ events := sdk.EmptyEvents()
+
+var msgResponses []*codectypes.Any
+
+ // NOTE: GasWanted is determined by the AnteHandler and GasUsed by the GasMeter.
+ for i, msg := range msgs {
+ if mode != execModeFinalize && mode != execModeSimulate {
+ break
+}
+ handler := app.msgServiceRouter.Handler(msg)
+ if handler == nil {
+ return nil, errorsmod.Wrapf(sdkerrors.ErrUnknownRequest, "no message handler found for %T", msg)
+}
+
+ // ADR 031 request type routing
+ msgResult, err := handler(ctx, msg)
+ if err != nil {
+ return nil, errorsmod.Wrapf(err, "failed to execute message; message index: %d", i)
+}
+
+ // create message events
+ msgEvents, err := createEvents(app.cdc, msgResult.GetEvents(), msg, msgsV2[i])
+ if err != nil {
+ return nil, errorsmod.Wrapf(err, "failed to create message events; message index: %d", i)
+}
+
+ // append message events and data
+ //
+ // Note: Each message result's data must be length-prefixed in order to
+ // separate each result.
+ for j, event := range msgEvents {
+ // append message index to all events
+ msgEvents[j] = event.AppendAttributes(sdk.NewAttribute("msg_index", strconv.Itoa(i)))
+}
+
+events = events.AppendEvents(msgEvents)
+
+ // Each individual sdk.Result that went through the MsgServiceRouter
+ // (which should represent 99% of the Msgs now, since everyone should
+ // be using protobuf Msgs)
+
+has exactly one Msg response, set inside
+ // `WrapServiceResult`. We take that Msg response, and aggregate it
+ // into an array.
+ if len(msgResult.MsgResponses) > 0 {
+ msgResponse := msgResult.MsgResponses[0]
+ if msgResponse == nil {
+ return nil, sdkerrors.ErrLogic.Wrapf("got nil Msg response at index %d for msg %s", i, sdk.MsgTypeURL(msg))
+}
+
+msgResponses = append(msgResponses, msgResponse)
+}
+
+
+}
+
+data, err := makeABCIData(msgResponses)
+ if err != nil {
+ return nil, errorsmod.Wrap(err, "failed to marshal tx data")
+}
+
+return &sdk.Result{
+ Data: data,
+ Events: events.ToABCIEvents(),
+ MsgResponses: msgResponses,
+}, nil
+}
+
+// makeABCIData generates the Data field to be sent to ABCI Check/DeliverTx.
+func makeABCIData(msgResponses []*codectypes.Any) ([]byte, error) {
+ return proto.Marshal(&sdk.TxMsgData{
+ MsgResponses: msgResponses
+})
+}
+
+func createEvents(cdc codec.Codec, events sdk.Events, msg sdk.Msg, msgV2 protov2.Message) (sdk.Events, error) {
+ eventMsgName := sdk.MsgTypeURL(msg)
+ msgEvent := sdk.NewEvent(sdk.EventTypeMessage, sdk.NewAttribute(sdk.AttributeKeyAction, eventMsgName))
+
+ // we set the signer attribute as the sender
+ signers, err := cdc.GetMsgV2Signers(msgV2)
+ if err != nil {
+ return nil, err
+}
+ if len(signers) > 0 && signers[0] != nil {
+ addrStr, err := cdc.InterfaceRegistry().SigningContext().AddressCodec().BytesToString(signers[0])
+ if err != nil {
+ return nil, err
+}
+
+msgEvent = msgEvent.AppendAttributes(sdk.NewAttribute(sdk.AttributeKeySender, addrStr))
+}
+
+ // verify that events have no module attribute set
+ if _, found := events.GetAttributes(sdk.AttributeKeyModule); !found {
+ if moduleName := sdk.GetModuleNameFromTypeURL(eventMsgName); moduleName != "" {
+ msgEvent = msgEvent.AppendAttributes(sdk.NewAttribute(sdk.AttributeKeyModule, moduleName))
+}
+
+}
+
+return sdk.Events{
+ msgEvent
+}.AppendEvents(events), nil
+}
+
+// PrepareProposalVerifyTx performs transaction verification when a proposer is
+// creating a block proposal during PrepareProposal. Any state committed to the
+// PrepareProposal state internally will be discarded. will be
+// returned if the transaction cannot be encoded. will be returned if
+// the transaction is valid, otherwise will be returned.
+func (app *BaseApp)
+
+PrepareProposalVerifyTx(tx sdk.Tx) ([]byte, error) {
+ bz, err := app.txEncoder(tx)
+ if err != nil {
+ return nil, err
+}
+
+ _, _, _, err = app.runTx(execModePrepareProposal, bz, tx)
+ if err != nil {
+ return nil, err
+}
+
+return bz, nil
+}
+
+// ProcessProposalVerifyTx performs transaction verification when receiving a
+// block proposal during ProcessProposal. Any state committed to the
+// ProcessProposal state internally will be discarded. will be
+// returned if the transaction cannot be decoded. will be returned if
+// the transaction is valid, otherwise will be returned.
+func (app *BaseApp)
+
+ProcessProposalVerifyTx(txBz []byte) (sdk.Tx, error) {
+ tx, err := app.txDecoder(txBz)
+ if err != nil {
+ return nil, err
+}
+
+ _, _, _, err = app.runTx(execModeProcessProposal, txBz, tx)
+ if err != nil {
+ return nil, err
+}
+
+return tx, nil
+}
+
+func (app *BaseApp)
+
+TxDecode(txBytes []byte) (sdk.Tx, error) {
+ return app.txDecoder(txBytes)
+}
+
+func (app *BaseApp)
+
+TxEncode(tx sdk.Tx) ([]byte, error) {
+ return app.txEncoder(tx)
+}
+
+func (app *BaseApp)
+
+StreamingManager()
+
+storetypes.StreamingManager {
+ return app.streamingManager
+}
+
+// Close is called in start cmd to gracefully cleanup resources.
+func (app *BaseApp)
+
+Close()
+
+error {
+ var errs []error
+
+ // Close app.db (opened by cosmos-sdk/server/start.go call to openDB)
+ if app.db != nil {
+ app.logger.Info("Closing application.db")
+ if err := app.db.Close(); err != nil {
+ errs = append(errs, err)
+}
+
+}
+
+ // Close app.snapshotManager
+ // - opened when app chains use cosmos-sdk/server/util.go/DefaultBaseappOptions (boilerplate)
+ // - which calls cosmos-sdk/server/util.go/GetSnapshotStore
+ // - which is passed to baseapp/options.go/SetSnapshot
+ // - to set app.snapshotManager = snapshots.NewManager
+ if app.snapshotManager != nil {
+ app.logger.Info("Closing snapshots/metadata.db")
+ if err := app.snapshotManager.Close(); err != nil {
+ errs = append(errs, err)
+}
+
+}
+
+return errors.Join(errs...)
+}
+
+// GetBaseApp returns the pointer to itself.
+func (app *BaseApp)
+
+GetBaseApp() *BaseApp {
+ return app
+}
+```
+
+Transaction execution within `FinalizeBlock` performs the **exact same steps as `CheckTx`**, with a little caveat at step 3 and the addition of a fifth step:
+
+1. The `AnteHandler` does **not** check that the transaction's `gas-prices` is sufficient. That is because the `min-gas-prices` value `gas-prices` is checked against is local to the node, and therefore what is enough for one full-node might not be for another. This means that the proposer can potentially include transactions for free, although they are not incentivised to do so, as they earn a bonus on the total fee of the block they propose.
+2. For each `sdk.Msg` in the transaction, route to the appropriate module's Protobuf [`Msg` service](/sdk/v0.53/build/building-modules/msg-services). Additional *stateful* checks are performed, and the branched multistore held in `finalizeBlockState`'s `context` is updated by the module's `keeper`. If the `Msg` service returns successfully, the branched multistore held in `context` is written to `finalizeBlockState` `CacheMultiStore`.
+
+During the additional fifth step outlined in (2), each read/write to the store increases the value of `GasConsumed`. You can find the default cost of each operation:
+
+```go expandable
+package types
+
+import (
+
+ "fmt"
+ "math"
+)
+
+// Gas consumption descriptors.
+const (
+ GasIterNextCostFlatDesc = "IterNextFlat"
+ GasValuePerByteDesc = "ValuePerByte"
+ GasWritePerByteDesc = "WritePerByte"
+ GasReadPerByteDesc = "ReadPerByte"
+ GasWriteCostFlatDesc = "WriteFlat"
+ GasReadCostFlatDesc = "ReadFlat"
+ GasHasDesc = "Has"
+ GasDeleteDesc = "Delete"
+)
+
+// Gas measured by the SDK
+type Gas = uint64
+
+// ErrorNegativeGasConsumed defines an error thrown when the amount of gas refunded results in a
+// negative gas consumed amount.
+type ErrorNegativeGasConsumed struct {
+ Descriptor string
+}
+
+// ErrorOutOfGas defines an error thrown when an action results in out of gas.
+type ErrorOutOfGas struct {
+ Descriptor string
+}
+
+// ErrorGasOverflow defines an error thrown when an action results gas consumption
+// unsigned integer overflow.
+type ErrorGasOverflow struct {
+ Descriptor string
+}
+
+// GasMeter interface to track gas consumption
+type GasMeter interface {
+ GasConsumed()
+
+Gas
+ GasConsumedToLimit()
+
+Gas
+ GasRemaining()
+
+Gas
+ Limit()
+
+Gas
+ ConsumeGas(amount Gas, descriptor string)
+
+RefundGas(amount Gas, descriptor string)
+
+IsPastLimit()
+
+bool
+ IsOutOfGas()
+
+bool
+ String()
+
+string
+}
+
+type basicGasMeter struct {
+ limit Gas
+ consumed Gas
+}
+
+// NewGasMeter returns a reference to a new basicGasMeter.
+func NewGasMeter(limit Gas)
+
+GasMeter {
+ return &basicGasMeter{
+ limit: limit,
+ consumed: 0,
+}
+}
+
+// GasConsumed returns the gas consumed from the GasMeter.
+func (g *basicGasMeter)
+
+GasConsumed()
+
+Gas {
+ return g.consumed
+}
+
+// GasRemaining returns the gas left in the GasMeter.
+func (g *basicGasMeter)
+
+GasRemaining()
+
+Gas {
+ if g.IsPastLimit() {
+ return 0
+}
+
+return g.limit - g.consumed
+}
+
+// Limit returns the gas limit of the GasMeter.
+func (g *basicGasMeter)
+
+Limit()
+
+Gas {
+ return g.limit
+}
+
+// GasConsumedToLimit returns the gas limit if gas consumed is past the limit,
+// otherwise it returns the consumed gas.
+//
+// NOTE: This behavior is only called when recovering from panic when
+// BlockGasMeter consumes gas past the limit.
+func (g *basicGasMeter)
+
+GasConsumedToLimit()
+
+Gas {
+ if g.IsPastLimit() {
+ return g.limit
+}
+
+return g.consumed
+}
+
+// addUint64Overflow performs the addition operation on two uint64 integers and
+// returns a boolean on whether or not the result overflows.
+func addUint64Overflow(a, b uint64) (uint64, bool) {
+ if math.MaxUint64-a < b {
+ return 0, true
+}
+
+return a + b, false
+}
+
+// ConsumeGas adds the given amount of gas to the gas consumed and panics if it overflows the limit or out of gas.
+func (g *basicGasMeter)
+
+ConsumeGas(amount Gas, descriptor string) {
+ var overflow bool
+ g.consumed, overflow = addUint64Overflow(g.consumed, amount)
+ if overflow {
+ g.consumed = math.MaxUint64
+ panic(ErrorGasOverflow{
+ descriptor
+})
+}
+ if g.consumed > g.limit {
+ panic(ErrorOutOfGas{
+ descriptor
+})
+}
+}
+
+// RefundGas will deduct the given amount from the gas consumed. If the amount is greater than the
+// gas consumed, the function will panic.
+//
+// Use case: This functionality enables refunding gas to the transaction or block gas pools so that
+// EVM-compatible chains can fully support the go-ethereum StateDb interface.
+// See https://github.com/cosmos/cosmos-sdk/pull/9403 for reference.
+func (g *basicGasMeter)
+
+RefundGas(amount Gas, descriptor string) {
+ if g.consumed < amount {
+ panic(ErrorNegativeGasConsumed{
+ Descriptor: descriptor
+})
+}
+
+g.consumed -= amount
+}
+
+// IsPastLimit returns true if gas consumed is past limit, otherwise it returns false.
+func (g *basicGasMeter)
+
+IsPastLimit()
+
+bool {
+ return g.consumed > g.limit
+}
+
+// IsOutOfGas returns true if gas consumed is greater than or equal to gas limit, otherwise it returns false.
+func (g *basicGasMeter)
+
+IsOutOfGas()
+
+bool {
+ return g.consumed >= g.limit
+}
+
+// String returns the BasicGasMeter's gas limit and gas consumed.
+func (g *basicGasMeter)
+
+String()
+
+string {
+ return fmt.Sprintf("BasicGasMeter:\n limit: %d\n consumed: %d", g.limit, g.consumed)
+}
+
+type infiniteGasMeter struct {
+ consumed Gas
+}
+
+// NewInfiniteGasMeter returns a new gas meter without a limit.
+func NewInfiniteGasMeter()
+
+GasMeter {
+ return &infiniteGasMeter{
+ consumed: 0,
+}
+}
+
+// GasConsumed returns the gas consumed from the GasMeter.
+func (g *infiniteGasMeter)
+
+GasConsumed()
+
+Gas {
+ return g.consumed
+}
+
+// GasConsumedToLimit returns the gas consumed from the GasMeter since the gas is not confined to a limit.
+// NOTE: This behavior is only called when recovering from panic when BlockGasMeter consumes gas past the limit.
+func (g *infiniteGasMeter)
+
+GasConsumedToLimit()
+
+Gas {
+ return g.consumed
+}
+
+// GasRemaining returns MaxUint64 since limit is not confined in infiniteGasMeter.
+func (g *infiniteGasMeter)
+
+GasRemaining()
+
+Gas {
+ return math.MaxUint64
+}
+
+// Limit returns MaxUint64 since limit is not confined in infiniteGasMeter.
+func (g *infiniteGasMeter)
+
+Limit()
+
+Gas {
+ return math.MaxUint64
+}
+
+// ConsumeGas adds the given amount of gas to the gas consumed and panics if it overflows the limit.
+func (g *infiniteGasMeter)
+
+ConsumeGas(amount Gas, descriptor string) {
+ var overflow bool
+ // TODO: Should we set the consumed field after overflow checking?
+ g.consumed, overflow = addUint64Overflow(g.consumed, amount)
+ if overflow {
+ panic(ErrorGasOverflow{
+ descriptor
+})
+}
+}
+
+// RefundGas will deduct the given amount from the gas consumed. If the amount is greater than the
+// gas consumed, the function will panic.
+//
+// Use case: This functionality enables refunding gas to the transaction or block gas pools so that
+// EVM-compatible chains can fully support the go-ethereum StateDb interface.
+// See https://github.com/cosmos/cosmos-sdk/pull/9403 for reference.
+func (g *infiniteGasMeter)
+
+RefundGas(amount Gas, descriptor string) {
+ if g.consumed < amount {
+ panic(ErrorNegativeGasConsumed{
+ Descriptor: descriptor
+})
+}
+
+g.consumed -= amount
+}
+
+// IsPastLimit returns false since the gas limit is not confined.
+func (g *infiniteGasMeter)
+
+IsPastLimit()
+
+bool {
+ return false
+}
+
+// IsOutOfGas returns false since the gas limit is not confined.
+func (g *infiniteGasMeter)
+
+IsOutOfGas()
+
+bool {
+ return false
+}
+
+// String returns the InfiniteGasMeter's gas consumed.
+func (g *infiniteGasMeter)
+
+String()
+
+string {
+ return fmt.Sprintf("InfiniteGasMeter:\n consumed: %d", g.consumed)
+}
+
+// GasConfig defines gas cost for each operation on KVStores
+type GasConfig struct {
+ HasCost Gas
+ DeleteCost Gas
+ ReadCostFlat Gas
+ ReadCostPerByte Gas
+ WriteCostFlat Gas
+ WriteCostPerByte Gas
+ IterNextCostFlat Gas
+}
+
+// KVGasConfig returns a default gas config for KVStores.
+func KVGasConfig()
+
+GasConfig {
+ return GasConfig{
+ HasCost: 1000,
+ DeleteCost: 1000,
+ ReadCostFlat: 1000,
+ ReadCostPerByte: 3,
+ WriteCostFlat: 2000,
+ WriteCostPerByte: 30,
+ IterNextCostFlat: 30,
+}
+}
+
+// TransientGasConfig returns a default gas config for TransientStores.
+func TransientGasConfig()
+
+GasConfig {
+ return GasConfig{
+ HasCost: 100,
+ DeleteCost: 100,
+ ReadCostFlat: 100,
+ ReadCostPerByte: 0,
+ WriteCostFlat: 200,
+ WriteCostPerByte: 3,
+ IterNextCostFlat: 3,
+}
+}
+```
+
+At any point, if `GasConsumed > GasWanted`, the function returns with `Code != 0` and the execution fails.
+
+Each transactions returns a response to the underlying consensus engine of type [`abci.ExecTxResult`](https://github.com/cometbft/cometbft/blob/v0.38.0-rc1/spec/abci/abci%2B%2B_methods.md#exectxresult). The response contains:
+
+* `Code (uint32)`: Response Code. `0` if successful.
+* `Data ([]byte)`: Result bytes, if any.
+* `Log (string):` The output of the application's logger. May be non-deterministic.
+* `Info (string):` Additional information. May be non-deterministic.
+* `GasWanted (int64)`: Amount of gas requested for transaction. It is provided by users when they generate the transaction.
+* `GasUsed (int64)`: Amount of gas consumed by transaction. During transaction execution, this value is computed by multiplying the standard cost of a transaction byte by the size of the raw transaction, and by adding gas each time a read/write to the store occurs.
+* `Events ([]cmn.KVPair)`: Key-Value tags for filtering and indexing transactions (eg. by account). See [`event`s](/sdk/v0.53/learn/advanced/events) for more.
+* `Codespace (string)`: Namespace for the Code.
+
+#### EndBlock
+
+EndBlock is run after transaction execution completes. It allows developers to have logic be executed at the end of each block. In the Cosmos SDK, the bulk EndBlock() method is to run the application's EndBlocker(), which mainly runs the EndBlocker() method of each of the application's modules.
+
+```go expandable
+package baseapp
+
+import (
+
+ "context"
+ "fmt"
+ "maps"
+ "math"
+ "slices"
+ "strconv"
+ "sync"
+ "github.com/cockroachdb/errors"
+ abci "github.com/cometbft/cometbft/abci/types"
+ "github.com/cometbft/cometbft/crypto/tmhash"
+ cmtproto "github.com/cometbft/cometbft/proto/tendermint/types"
+ dbm "github.com/cosmos/cosmos-db"
+ "github.com/cosmos/gogoproto/proto"
+ protov2 "google.golang.org/protobuf/proto"
+ "cosmossdk.io/core/header"
+ errorsmod "cosmossdk.io/errors"
+ "cosmossdk.io/log"
+ "cosmossdk.io/store"
+ storemetrics "cosmossdk.io/store/metrics"
+ "cosmossdk.io/store/snapshots"
+ storetypes "cosmossdk.io/store/types"
+ "github.com/cosmos/cosmos-sdk/baseapp/oe"
+ "github.com/cosmos/cosmos-sdk/codec"
+ codectypes "github.com/cosmos/cosmos-sdk/codec/types"
+ servertypes "github.com/cosmos/cosmos-sdk/server/types"
+ "github.com/cosmos/cosmos-sdk/telemetry"
+ sdk "github.com/cosmos/cosmos-sdk/types"
+ sdkerrors "github.com/cosmos/cosmos-sdk/types/errors"
+ "github.com/cosmos/cosmos-sdk/types/mempool"
+ "github.com/cosmos/cosmos-sdk/types/msgservice"
+)
+
+type (
+ execMode uint8
+
+ // StoreLoader defines a customizable function to control how we load the
+ // CommitMultiStore from disk. This is useful for state migration, when
+ // loading a datastore written with an older version of the software. In
+ // particular, if a module changed the substore key name (or removed a substore)
+ // between two versions of the software.
+ StoreLoader func(ms storetypes.CommitMultiStore)
+
+error
+)
+
+const (
+ execModeCheck execMode = iota // Check a transaction
+ execModeReCheck // Recheck a (pending)
+
+transaction after a commit
+ execModeSimulate // Simulate a transaction
+ execModePrepareProposal // Prepare a block proposal
+ execModeProcessProposal // Process a block proposal
+ execModeVoteExtension // Extend or verify a pre-commit vote
+ execModeVerifyVoteExtension // Verify a vote extension
+ execModeFinalize // Finalize a block proposal
+)
+
+var _ servertypes.ABCI = (*BaseApp)(nil)
+
+// BaseApp reflects the ABCI application implementation.
+type BaseApp struct {
+ // initialized on creation
+ mu sync.Mutex // mu protects the fields below.
+ logger log.Logger
+ name string // application name from abci.BlockInfo
+ db dbm.DB // common DB backend
+ cms storetypes.CommitMultiStore // Main (uncached)
+
+state
+ qms storetypes.MultiStore // Optional alternative multistore for querying only.
+ storeLoader StoreLoader // function to handle store loading, may be overridden with SetStoreLoader()
+
+grpcQueryRouter *GRPCQueryRouter // router for redirecting gRPC query calls
+ msgServiceRouter *MsgServiceRouter // router for redirecting Msg service messages
+ interfaceRegistry codectypes.InterfaceRegistry
+ txDecoder sdk.TxDecoder // unmarshal []byte into sdk.Tx
+ txEncoder sdk.TxEncoder // marshal sdk.Tx into []byte
+
+ mempool mempool.Mempool // application side mempool
+ anteHandler sdk.AnteHandler // ante handler for fee and auth
+ postHandler sdk.PostHandler // post handler, optional
+
+ checkTxHandler sdk.CheckTxHandler // ABCI CheckTx handler
+ initChainer sdk.InitChainer // ABCI InitChain handler
+ preBlocker sdk.PreBlocker // logic to run before BeginBlocker
+ beginBlocker sdk.BeginBlocker // (legacy ABCI)
+
+BeginBlock handler
+ endBlocker sdk.EndBlocker // (legacy ABCI)
+
+EndBlock handler
+ processProposal sdk.ProcessProposalHandler // ABCI ProcessProposal handler
+ prepareProposal sdk.PrepareProposalHandler // ABCI PrepareProposal
+ extendVote sdk.ExtendVoteHandler // ABCI ExtendVote handler
+ verifyVoteExt sdk.VerifyVoteExtensionHandler // ABCI VerifyVoteExtension handler
+ prepareCheckStater sdk.PrepareCheckStater // logic to run during commit using the checkState
+ precommiter sdk.Precommiter // logic to run during commit using the deliverState
+
+ addrPeerFilter sdk.PeerFilter // filter peers by address and port
+ idPeerFilter sdk.PeerFilter // filter peers by node ID
+ fauxMerkleMode bool // if true, IAVL MountStores uses MountStoresDB for simulation speed.
+ sigverifyTx bool // in the simulation test, since the account does not have a private key, we have to ignore the tx sigverify.
+
+ // manages snapshots, i.e. dumps of app state at certain intervals
+ snapshotManager *snapshots.Manager
+
+ // volatile states:
+ //
+ // - checkState is set on InitChain and reset on Commit
+ // - finalizeBlockState is set on InitChain and FinalizeBlock and set to nil
+ // on Commit.
+ //
+ // - checkState: Used for CheckTx, which is set based on the previous block's
+ // state. This state is never committed.
+ //
+ // - prepareProposalState: Used for PrepareProposal, which is set based on the
+ // previous block's state. This state is never committed. In case of multiple
+ // consensus rounds, the state is always reset to the previous block's state.
+ //
+ // - processProposalState: Used for ProcessProposal, which is set based on the
+ // the previous block's state. This state is never committed. In case of
+ // multiple rounds, the state is always reset to the previous block's state.
+ //
+ // - finalizeBlockState: Used for FinalizeBlock, which is set based on the
+ // previous block's state. This state is committed.
+ checkState *state
+ prepareProposalState *state
+ processProposalState *state
+ finalizeBlockState *state
+
+ // An inter-block write-through cache provided to the context during the ABCI
+ // FinalizeBlock call.
+ interBlockCache storetypes.MultiStorePersistentCache
+
+ // paramStore is used to query for ABCI consensus parameters from an
+ // application parameter store.
+ paramStore ParamStore
+
+ // queryGasLimit defines the maximum gas for queries; unbounded if 0.
+ queryGasLimit uint64
+
+ // The minimum gas prices a validator is willing to accept for processing a
+ // transaction. This is mainly used for DoS and spam prevention.
+ minGasPrices sdk.DecCoins
+
+ // initialHeight is the initial height at which we start the BaseApp
+ initialHeight int64
+
+ // flag for sealing options and parameters to a BaseApp
+ sealed bool
+
+ // block height at which to halt the chain and gracefully shutdown
+ haltHeight uint64
+
+ // minimum block time (in Unix seconds)
+
+at which to halt the chain and gracefully shutdown
+ haltTime uint64
+
+ // minRetainBlocks defines the minimum block height offset from the current
+ // block being committed, such that all blocks past this offset are pruned
+ // from CometBFT. It is used as part of the process of determining the
+ // ResponseCommit.RetainHeight value during ABCI Commit. A value of 0 indicates
+ // that no blocks should be pruned.
+ //
+ // Note: CometBFT block pruning is dependant on this parameter in conjunction
+ // with the unbonding (safety threshold)
+
+period, state pruning and state sync
+ // snapshot parameters to determine the correct minimum value of
+ // ResponseCommit.RetainHeight.
+ minRetainBlocks uint64
+
+ // application's version string
+ version string
+
+ // application's protocol version that increments on every upgrade
+ // if BaseApp is passed to the upgrade keeper's NewKeeper method.
+ appVersion uint64
+
+ // recovery handler for app.runTx method
+ runTxRecoveryMiddleware recoveryMiddleware
+
+ // trace set will return full stack traces for errors in ABCI Log field
+ trace bool
+
+ // indexEvents defines the set of events in the form {
+ eventType
+}.{
+ attributeKey
+},
+ // which informs CometBFT what to index. If empty, all events will be indexed.
+ indexEvents map[string]struct{
+}
+
+ // streamingManager for managing instances and configuration of ABCIListener services
+ streamingManager storetypes.StreamingManager
+
+ chainID string
+
+ cdc codec.Codec
+
+ // optimisticExec contains the context required for Optimistic Execution,
+ // including the goroutine handling.This is experimental and must be enabled
+ // by developers.
+ optimisticExec *oe.OptimisticExecution
+
+ // disableBlockGasMeter will disable the block gas meter if true, block gas meter is tricky to support
+ // when executing transactions in parallel.
+ // when disabled, the block gas meter in context is a noop one.
+ //
+ // SAFETY: it's safe to do if validators validate the total gas wanted in the `ProcessProposal`, which is the case in the default handler.
+ disableBlockGasMeter bool
+}
+
+// NewBaseApp returns a reference to an initialized BaseApp. It accepts a
+// variadic number of option functions, which act on the BaseApp to set
+// configuration choices.
+func NewBaseApp(
+ name string, logger log.Logger, db dbm.DB, txDecoder sdk.TxDecoder, options ...func(*BaseApp),
+) *BaseApp {
+ app := &BaseApp{
+ logger: logger.With(log.ModuleKey, "baseapp"),
+ name: name,
+ db: db,
+ cms: store.NewCommitMultiStore(db, logger, storemetrics.NewNoOpMetrics()), // by default we use a no-op metric gather in store
+ storeLoader: DefaultStoreLoader,
+ grpcQueryRouter: NewGRPCQueryRouter(),
+ msgServiceRouter: NewMsgServiceRouter(),
+ txDecoder: txDecoder,
+ fauxMerkleMode: false,
+ sigverifyTx: true,
+ queryGasLimit: math.MaxUint64,
+}
+ for _, option := range options {
+ option(app)
+}
+ if app.mempool == nil {
+ app.SetMempool(mempool.NoOpMempool{
+})
+}
+ abciProposalHandler := NewDefaultProposalHandler(app.mempool, app)
+ if app.prepareProposal == nil {
+ app.SetPrepareProposal(abciProposalHandler.PrepareProposalHandler())
+}
+ if app.processProposal == nil {
+ app.SetProcessProposal(abciProposalHandler.ProcessProposalHandler())
+}
+ if app.extendVote == nil {
+ app.SetExtendVoteHandler(NoOpExtendVote())
+}
+ if app.verifyVoteExt == nil {
+ app.SetVerifyVoteExtensionHandler(NoOpVerifyVoteExtensionHandler())
+}
+ if app.interBlockCache != nil {
+ app.cms.SetInterBlockCache(app.interBlockCache)
+}
+
+app.runTxRecoveryMiddleware = newDefaultRecoveryMiddleware()
+
+ // Initialize with an empty interface registry to avoid nil pointer dereference.
+ // Unless SetInterfaceRegistry is called with an interface registry with proper address codecs baseapp will panic.
+ app.cdc = codec.NewProtoCodec(codectypes.NewInterfaceRegistry())
+
+protoFiles, err := proto.MergedRegistry()
+ if err != nil {
+ logger.Warn("error creating merged proto registry", "error", err)
+}
+
+else {
+ err = msgservice.ValidateProtoAnnotations(protoFiles)
+ if err != nil {
+ // Once we switch to using protoreflect-based antehandlers, we might
+ // want to panic here instead of logging a warning.
+ logger.Warn("error validating merged proto registry annotations", "error", err)
+}
+
+}
+
+return app
+}
+
+// Name returns the name of the BaseApp.
+func (app *BaseApp)
+
+Name()
+
+string {
+ return app.name
+}
+
+// AppVersion returns the application's protocol version.
+func (app *BaseApp)
+
+AppVersion()
+
+uint64 {
+ return app.appVersion
+}
+
+// Version returns the application's version string.
+func (app *BaseApp)
+
+Version()
+
+string {
+ return app.version
+}
+
+// Logger returns the logger of the BaseApp.
+func (app *BaseApp)
+
+Logger()
+
+log.Logger {
+ return app.logger
+}
+
+// Trace returns the boolean value for logging error stack traces.
+func (app *BaseApp)
+
+Trace()
+
+bool {
+ return app.trace
+}
+
+// MsgServiceRouter returns the MsgServiceRouter of a BaseApp.
+func (app *BaseApp)
+
+MsgServiceRouter() *MsgServiceRouter {
+ return app.msgServiceRouter
+}
+
+// GRPCQueryRouter returns the GRPCQueryRouter of a BaseApp.
+func (app *BaseApp)
+
+GRPCQueryRouter() *GRPCQueryRouter {
+ return app.grpcQueryRouter
+}
+
+// MountStores mounts all IAVL or DB stores to the provided keys in the BaseApp
+// multistore.
+func (app *BaseApp)
+
+MountStores(keys ...storetypes.StoreKey) {
+ for _, key := range keys {
+ switch key.(type) {
+ case *storetypes.KVStoreKey:
+ if !app.fauxMerkleMode {
+ app.MountStore(key, storetypes.StoreTypeIAVL)
+}
+
+else {
+ // StoreTypeDB doesn't do anything upon commit, and it doesn't
+ // retain history, but it's useful for faster simulation.
+ app.MountStore(key, storetypes.StoreTypeDB)
+}
+ case *storetypes.TransientStoreKey:
+ app.MountStore(key, storetypes.StoreTypeTransient)
+ case *storetypes.MemoryStoreKey:
+ app.MountStore(key, storetypes.StoreTypeMemory)
+
+default:
+ panic(fmt.Sprintf("Unrecognized store key type :%T", key))
+}
+
+}
+}
+
+// MountKVStores mounts all IAVL or DB stores to the provided keys in the
+// BaseApp multistore.
+func (app *BaseApp)
+
+MountKVStores(keys map[string]*storetypes.KVStoreKey) {
+ for _, key := range keys {
+ if !app.fauxMerkleMode {
+ app.MountStore(key, storetypes.StoreTypeIAVL)
+}
+
+else {
+ // StoreTypeDB doesn't do anything upon commit, and it doesn't
+ // retain history, but it's useful for faster simulation.
+ app.MountStore(key, storetypes.StoreTypeDB)
+}
+
+}
+}
+
+// MountTransientStores mounts all transient stores to the provided keys in
+// the BaseApp multistore.
+func (app *BaseApp)
+
+MountTransientStores(keys map[string]*storetypes.TransientStoreKey) {
+ for _, key := range keys {
+ app.MountStore(key, storetypes.StoreTypeTransient)
+}
+}
+
+// MountMemoryStores mounts all in-memory KVStores with the BaseApp's internal
+// commit multi-store.
+func (app *BaseApp)
+
+MountMemoryStores(keys map[string]*storetypes.MemoryStoreKey) {
+ skeys := slices.Sorted(maps.Keys(keys))
+ for _, key := range skeys {
+ memKey := keys[key]
+ app.MountStore(memKey, storetypes.StoreTypeMemory)
+}
+}
+
+// MountStore mounts a store to the provided key in the BaseApp multistore,
+// using the default DB.
+func (app *BaseApp)
+
+MountStore(key storetypes.StoreKey, typ storetypes.StoreType) {
+ app.cms.MountStoreWithDB(key, typ, nil)
+}
+
+// LoadLatestVersion loads the latest application version. It will panic if
+// called more than once on a running BaseApp.
+func (app *BaseApp)
+
+LoadLatestVersion()
+
+error {
+ err := app.storeLoader(app.cms)
+ if err != nil {
+ return fmt.Errorf("failed to load latest version: %w", err)
+}
+
+return app.Init()
+}
+
+// DefaultStoreLoader will be used by default and loads the latest version
+func DefaultStoreLoader(ms storetypes.CommitMultiStore)
+
+error {
+ return ms.LoadLatestVersion()
+}
+
+// CommitMultiStore returns the root multi-store.
+// App constructor can use this to access the `cms`.
+// UNSAFE: must not be used during the abci life cycle.
+func (app *BaseApp)
+
+CommitMultiStore()
+
+storetypes.CommitMultiStore {
+ return app.cms
+}
+
+// SnapshotManager returns the snapshot manager.
+// application use this to register extra extension snapshotters.
+func (app *BaseApp)
+
+SnapshotManager() *snapshots.Manager {
+ return app.snapshotManager
+}
+
+// LoadVersion loads the BaseApp application version. It will panic if called
+// more than once on a running baseapp.
+func (app *BaseApp)
+
+LoadVersion(version int64)
+
+error {
+ app.logger.Info("NOTICE: this could take a long time to migrate IAVL store to fastnode if you enable Fast Node.\n")
+ err := app.cms.LoadVersion(version)
+ if err != nil {
+ return fmt.Errorf("failed to load version %d: %w", version, err)
+}
+
+return app.Init()
+}
+
+// LastCommitID returns the last CommitID of the multistore.
+func (app *BaseApp)
+
+LastCommitID()
+
+storetypes.CommitID {
+ return app.cms.LastCommitID()
+}
+
+// LastBlockHeight returns the last committed block height.
+func (app *BaseApp)
+
+LastBlockHeight()
+
+int64 {
+ return app.cms.LastCommitID().Version
+}
+
+// ChainID returns the chainID of the app.
+func (app *BaseApp)
+
+ChainID()
+
+string {
+ return app.chainID
+}
+
+// AnteHandler returns the AnteHandler of the app.
+func (app *BaseApp)
+
+AnteHandler()
+
+sdk.AnteHandler {
+ return app.anteHandler
+}
+
+// Mempool returns the Mempool of the app.
+func (app *BaseApp)
+
+Mempool()
+
+mempool.Mempool {
+ return app.mempool
+}
+
+// Init initializes the app. It seals the app, preventing any
+// further modifications. In addition, it validates the app against
+// the earlier provided settings. Returns an error if validation fails.
+// nil otherwise. Panics if the app is already sealed.
+func (app *BaseApp)
+
+Init()
+
+error {
+ if app.sealed {
+ panic("cannot call initFromMainStore: baseapp already sealed")
+}
+ if app.cms == nil {
+ return errors.New("commit multi-store must not be nil")
+}
+ emptyHeader := cmtproto.Header{
+ ChainID: app.chainID
+}
+
+ // needed for the export command which inits from store but never calls initchain
+ app.setState(execModeCheck, emptyHeader)
+
+app.Seal()
+
+return app.cms.GetPruning().Validate()
+}
+
+func (app *BaseApp)
+
+setMinGasPrices(gasPrices sdk.DecCoins) {
+ app.minGasPrices = gasPrices
+}
+
+func (app *BaseApp)
+
+setHaltHeight(haltHeight uint64) {
+ app.haltHeight = haltHeight
+}
+
+func (app *BaseApp)
+
+setHaltTime(haltTime uint64) {
+ app.haltTime = haltTime
+}
+
+func (app *BaseApp)
+
+setMinRetainBlocks(minRetainBlocks uint64) {
+ app.minRetainBlocks = minRetainBlocks
+}
+
+func (app *BaseApp)
+
+setInterBlockCache(cache storetypes.MultiStorePersistentCache) {
+ app.interBlockCache = cache
+}
+
+func (app *BaseApp)
+
+setTrace(trace bool) {
+ app.trace = trace
+}
+
+func (app *BaseApp)
+
+setIndexEvents(ie []string) {
+ app.indexEvents = make(map[string]struct{
+})
+ for _, e := range ie {
+ app.indexEvents[e] = struct{
+}{
+}
+
+}
+}
+
+// Seal seals a BaseApp. It prohibits any further modifications to a BaseApp.
+func (app *BaseApp)
+
+Seal() {
+ app.sealed = true
+}
+
+// IsSealed returns true if the BaseApp is sealed and false otherwise.
+func (app *BaseApp)
+
+IsSealed()
+
+bool {
+ return app.sealed
+}
+
+// setState sets the BaseApp's state for the corresponding mode with a branched
+// multi-store (i.e. a CacheMultiStore)
+
+and a new Context with the same
+// multi-store branch, and provided header.
+func (app *BaseApp)
+
+setState(mode execMode, h cmtproto.Header) {
+ ms := app.cms.CacheMultiStore()
+ headerInfo := header.Info{
+ Height: h.Height,
+ Time: h.Time,
+ ChainID: h.ChainID,
+ AppHash: h.AppHash,
+}
+ baseState := &state{
+ ms: ms,
+ ctx: sdk.NewContext(ms, h, false, app.logger).
+ WithStreamingManager(app.streamingManager).
+ WithHeaderInfo(headerInfo),
+}
+ switch mode {
+ case execModeCheck:
+ baseState.SetContext(baseState.Context().WithIsCheckTx(true).WithMinGasPrices(app.minGasPrices))
+
+app.checkState = baseState
+ case execModePrepareProposal:
+ app.prepareProposalState = baseState
+ case execModeProcessProposal:
+ app.processProposalState = baseState
+ case execModeFinalize:
+ app.finalizeBlockState = baseState
+
+ default:
+ panic(fmt.Sprintf("invalid runTxMode for setState: %d", mode))
+}
+}
+
+// SetCircuitBreaker sets the circuit breaker for the BaseApp.
+// The circuit breaker is checked on every message execution to verify if a transaction should be executed or not.
+func (app *BaseApp)
+
+SetCircuitBreaker(cb CircuitBreaker) {
+ if app.msgServiceRouter == nil {
+ panic("cannot set circuit breaker with no msg service router set")
+}
+
+app.msgServiceRouter.SetCircuit(cb)
+}
+
+// GetConsensusParams returns the current consensus parameters from the BaseApp's
+// ParamStore. If the BaseApp has no ParamStore defined, nil is returned.
+func (app *BaseApp)
+
+GetConsensusParams(ctx sdk.Context)
+
+cmtproto.ConsensusParams {
+ if app.paramStore == nil {
+ return cmtproto.ConsensusParams{
+}
+
+}
+
+cp, err := app.paramStore.Get(ctx)
+ if err != nil {
+ // This could happen while migrating from v0.45/v0.46 to v0.50, we should
+ // allow it to happen so during preblock the upgrade plan can be executed
+ // and the consensus params set for the first time in the new format.
+ app.logger.Error("failed to get consensus params", "err", err)
+
+return cmtproto.ConsensusParams{
+}
+
+}
+
+return cp
+}
+
+// StoreConsensusParams sets the consensus parameters to the BaseApp's param
+// store.
+//
+// NOTE: We're explicitly not storing the CometBFT app_version in the param store.
+// It's stored instead in the x/upgrade store, with its own bump logic.
+func (app *BaseApp)
+
+StoreConsensusParams(ctx sdk.Context, cp cmtproto.ConsensusParams)
+
+error {
+ if app.paramStore == nil {
+ return errors.New("cannot store consensus params with no params store set")
+}
+
+return app.paramStore.Set(ctx, cp)
+}
+
+// AddRunTxRecoveryHandler adds custom app.runTx method panic handlers.
+func (app *BaseApp)
+
+AddRunTxRecoveryHandler(handlers ...RecoveryHandler) {
+ for _, h := range handlers {
+ app.runTxRecoveryMiddleware = newRecoveryMiddleware(h, app.runTxRecoveryMiddleware)
+}
+}
+
+// GetMaximumBlockGas gets the maximum gas from the consensus params. It panics
+// if maximum block gas is less than negative one and returns zero if negative
+// one.
+func (app *BaseApp)
+
+GetMaximumBlockGas(ctx sdk.Context)
+
+uint64 {
+ cp := app.GetConsensusParams(ctx)
+ if cp.Block == nil {
+ return 0
+}
+ maxGas := cp.Block.MaxGas
+ switch {
+ case maxGas < -1:
+ panic(fmt.Sprintf("invalid maximum block gas: %d", maxGas))
+ case maxGas == -1:
+ return 0
+
+ default:
+ return uint64(maxGas)
+}
+}
+
+func (app *BaseApp)
+
+validateFinalizeBlockHeight(req *abci.RequestFinalizeBlock)
+
+error {
+ if req.Height < 1 {
+ return fmt.Errorf("invalid height: %d", req.Height)
+}
+ lastBlockHeight := app.LastBlockHeight()
+
+ // expectedHeight holds the expected height to validate
+ var expectedHeight int64
+ if lastBlockHeight == 0 && app.initialHeight > 1 {
+ // In this case, we're validating the first block of the chain, i.e no
+ // previous commit. The height we're expecting is the initial height.
+ expectedHeight = app.initialHeight
+}
+
+else {
+ // This case can mean two things:
+ //
+ // - Either there was already a previous commit in the store, in which
+ // case we increment the version from there.
+ // - Or there was no previous commit, in which case we start at version 1.
+ expectedHeight = lastBlockHeight + 1
+}
+ if req.Height != expectedHeight {
+ return fmt.Errorf("invalid height: %d; expected: %d", req.Height, expectedHeight)
+}
+
+return nil
+}
+
+// validateBasicTxMsgs executes basic validator calls for messages.
+func validateBasicTxMsgs(msgs []sdk.Msg)
+
+error {
+ if len(msgs) == 0 {
+ return errorsmod.Wrap(sdkerrors.ErrInvalidRequest, "must contain at least one message")
+}
+ for _, msg := range msgs {
+ m, ok := msg.(sdk.HasValidateBasic)
+ if !ok {
+ continue
+}
+ if err := m.ValidateBasic(); err != nil {
+ return err
+}
+
+}
+
+return nil
+}
+
+func (app *BaseApp)
+
+getState(mode execMode) *state {
+ switch mode {
+ case execModeFinalize:
+ return app.finalizeBlockState
+ case execModePrepareProposal:
+ return app.prepareProposalState
+ case execModeProcessProposal:
+ return app.processProposalState
+
+ default:
+ return app.checkState
+}
+}
+
+func (app *BaseApp)
+
+getBlockGasMeter(ctx sdk.Context)
+
+storetypes.GasMeter {
+ if app.disableBlockGasMeter {
+ return noopGasMeter{
+}
+
+}
+ if maxGas := app.GetMaximumBlockGas(ctx); maxGas > 0 {
+ return storetypes.NewGasMeter(maxGas)
+}
+
+return storetypes.NewInfiniteGasMeter()
+}
+
+// retrieve the context for the tx w/ txBytes and other memoized values.
+func (app *BaseApp)
+
+getContextForTx(mode execMode, txBytes []byte)
+
+sdk.Context {
+ app.mu.Lock()
+
+defer app.mu.Unlock()
+ modeState := app.getState(mode)
+ if modeState == nil {
+ panic(fmt.Sprintf("state is nil for mode %v", mode))
+}
+ ctx := modeState.Context().
+ WithTxBytes(txBytes).
+ WithGasMeter(storetypes.NewInfiniteGasMeter())
+ // WithVoteInfos(app.voteInfos) // TODO: identify if this is needed
+
+ ctx = ctx.WithIsSigverifyTx(app.sigverifyTx)
+
+ctx = ctx.WithConsensusParams(app.GetConsensusParams(ctx))
+ if mode == execModeReCheck {
+ ctx = ctx.WithIsReCheckTx(true)
+}
+ if mode == execModeSimulate {
+ ctx, _ = ctx.CacheContext()
+
+ctx = ctx.WithExecMode(sdk.ExecMode(execModeSimulate))
+}
+
+return ctx
+}
+
+// cacheTxContext returns a new context based off of the provided context with
+// a branched multi-store.
+func (app *BaseApp)
+
+cacheTxContext(ctx sdk.Context, txBytes []byte) (sdk.Context, storetypes.CacheMultiStore) {
+ ms := ctx.MultiStore()
+ msCache := ms.CacheMultiStore()
+ if msCache.TracingEnabled() {
+ msCache = msCache.SetTracingContext(
+ storetypes.TraceContext(
+ map[string]any{
+ "txHash": fmt.Sprintf("%X", tmhash.Sum(txBytes)),
+},
+ ),
+ ).(storetypes.CacheMultiStore)
+}
+
+return ctx.WithMultiStore(msCache), msCache
+}
+
+func (app *BaseApp)
+
+preBlock(req *abci.RequestFinalizeBlock) ([]abci.Event, error) {
+ var events []abci.Event
+ if app.preBlocker != nil {
+ ctx := app.finalizeBlockState.Context().WithEventManager(sdk.NewEventManager())
+
+rsp, err := app.preBlocker(ctx, req)
+ if err != nil {
+ return nil, err
+}
+ // rsp.ConsensusParamsChanged is true from preBlocker means ConsensusParams in store get changed
+ // write the consensus parameters in store to context
+ if rsp.ConsensusParamsChanged {
+ ctx = ctx.WithConsensusParams(app.GetConsensusParams(ctx))
+ // GasMeter must be set after we get a context with updated consensus params.
+ gasMeter := app.getBlockGasMeter(ctx)
+
+ctx = ctx.WithBlockGasMeter(gasMeter)
+
+app.finalizeBlockState.SetContext(ctx)
+}
+
+events = ctx.EventManager().ABCIEvents()
+}
+
+return events, nil
+}
+
+func (app *BaseApp)
+
+beginBlock(_ *abci.RequestFinalizeBlock) (sdk.BeginBlock, error) {
+ var (
+ resp sdk.BeginBlock
+ err error
+ )
+ if app.beginBlocker != nil {
+ resp, err = app.beginBlocker(app.finalizeBlockState.Context())
+ if err != nil {
+ return resp, err
+}
+
+ // append BeginBlock attributes to all events in the EndBlock response
+ for i, event := range resp.Events {
+ resp.Events[i].Attributes = append(
+ event.Attributes,
+ abci.EventAttribute{
+ Key: "mode",
+ Value: "BeginBlock"
+},
+ )
+}
+
+resp.Events = sdk.MarkEventsToIndex(resp.Events, app.indexEvents)
+}
+
+return resp, nil
+}
+
+func (app *BaseApp)
+
+deliverTx(tx []byte) *abci.ExecTxResult {
+ gInfo := sdk.GasInfo{
+}
+ resultStr := "successful"
+
+ var resp *abci.ExecTxResult
+
+ defer func() {
+ telemetry.IncrCounter(1, "tx", "count")
+
+telemetry.IncrCounter(1, "tx", resultStr)
+
+telemetry.SetGauge(float32(gInfo.GasUsed), "tx", "gas", "used")
+
+telemetry.SetGauge(float32(gInfo.GasWanted), "tx", "gas", "wanted")
+}()
+
+gInfo, result, anteEvents, err := app.runTx(execModeFinalize, tx, nil)
+ if err != nil {
+ resultStr = "failed"
+ resp = sdkerrors.ResponseExecTxResultWithEvents(
+ err,
+ gInfo.GasWanted,
+ gInfo.GasUsed,
+ sdk.MarkEventsToIndex(anteEvents, app.indexEvents),
+ app.trace,
+ )
+
+return resp
+}
+
+resp = &abci.ExecTxResult{
+ GasWanted: int64(gInfo.GasWanted),
+ GasUsed: int64(gInfo.GasUsed),
+ Log: result.Log,
+ Data: result.Data,
+ Events: sdk.MarkEventsToIndex(result.Events, app.indexEvents),
+}
+
+return resp
+}
+
+// endBlock is an application-defined function that is called after transactions
+// have been processed in FinalizeBlock.
+func (app *BaseApp)
+
+endBlock(_ context.Context) (sdk.EndBlock, error) {
+ var endblock sdk.EndBlock
+ if app.endBlocker != nil {
+ eb, err := app.endBlocker(app.finalizeBlockState.Context())
+ if err != nil {
+ return endblock, err
+}
+
+ // append EndBlock attributes to all events in the EndBlock response
+ for i, event := range eb.Events {
+ eb.Events[i].Attributes = append(
+ event.Attributes,
+ abci.EventAttribute{
+ Key: "mode",
+ Value: "EndBlock"
+},
+ )
+}
+
+eb.Events = sdk.MarkEventsToIndex(eb.Events, app.indexEvents)
+
+endblock = eb
+}
+
+return endblock, nil
+}
+
+// runTx processes a transaction within a given execution mode, encoded transaction
+// bytes, and the decoded transaction itself. All state transitions occur through
+// a cached Context depending on the mode provided. State only gets persisted
+// if all messages get executed successfully and the execution mode is DeliverTx.
+// Note, gas execution info is always returned. A reference to a Result is
+// returned if the tx does not run out of gas and if all the messages are valid
+// and execute successfully. An error is returned otherwise.
+// both txbytes and the decoded tx are passed to runTx to avoid the state machine encoding the tx and decoding the transaction twice
+// passing the decoded tx to runTX is optional, it will be decoded if the tx is nil
+func (app *BaseApp)
+
+runTx(mode execMode, txBytes []byte, tx sdk.Tx) (gInfo sdk.GasInfo, result *sdk.Result, anteEvents []abci.Event, err error) {
+ // NOTE: GasWanted should be returned by the AnteHandler. GasUsed is
+ // determined by the GasMeter. We need access to the context to get the gas
+ // meter, so we initialize upfront.
+ var gasWanted uint64
+ ctx := app.getContextForTx(mode, txBytes)
+ ms := ctx.MultiStore()
+
+ // only run the tx if there is block gas remaining
+ if mode == execModeFinalize && ctx.BlockGasMeter().IsOutOfGas() {
+ return gInfo, nil, nil, errorsmod.Wrap(sdkerrors.ErrOutOfGas, "no block gas left to run tx")
+}
+
+defer func() {
+ if r := recover(); r != nil {
+ recoveryMW := newOutOfGasRecoveryMiddleware(gasWanted, ctx, app.runTxRecoveryMiddleware)
+
+err, result = processRecovery(r, recoveryMW), nil
+ ctx.Logger().Error("panic recovered in runTx", "err", err)
+}
+
+gInfo = sdk.GasInfo{
+ GasWanted: gasWanted,
+ GasUsed: ctx.GasMeter().GasConsumed()
+}
+
+}()
+ blockGasConsumed := false
+
+ // consumeBlockGas makes sure block gas is consumed at most once. It must
+ // happen after tx processing, and must be executed even if tx processing
+ // fails. Hence, it's execution is deferred.
+ consumeBlockGas := func() {
+ if !blockGasConsumed {
+ blockGasConsumed = true
+ ctx.BlockGasMeter().ConsumeGas(
+ ctx.GasMeter().GasConsumedToLimit(), "block gas meter",
+ )
+}
+
+}
+
+ // If BlockGasMeter()
+
+panics it will be caught by the above recover and will
+ // return an error - in any case BlockGasMeter will consume gas past the limit.
+ //
+ // NOTE: consumeBlockGas must exist in a separate defer function from the
+ // general deferred recovery function to recover from consumeBlockGas as it'll
+ // be executed first (deferred statements are executed as stack).
+ if mode == execModeFinalize {
+ defer consumeBlockGas()
+}
+
+ // if the transaction is not decoded, decode it here
+ if tx == nil {
+ tx, err = app.txDecoder(txBytes)
+ if err != nil {
+ return sdk.GasInfo{
+ GasUsed: 0,
+ GasWanted: 0
+}, nil, nil, sdkerrors.ErrTxDecode.Wrap(err.Error())
+}
+
+}
+ msgs := tx.GetMsgs()
+ if err := validateBasicTxMsgs(msgs); err != nil {
+ return sdk.GasInfo{
+}, nil, nil, err
+}
+ for _, msg := range msgs {
+ handler := app.msgServiceRouter.Handler(msg)
+ if handler == nil {
+ return sdk.GasInfo{
+}, nil, nil, errorsmod.Wrapf(sdkerrors.ErrUnknownRequest, "no message handler found for %T", msg)
+}
+
+}
+ if app.anteHandler != nil {
+ var (
+ anteCtx sdk.Context
+ msCache storetypes.CacheMultiStore
+ )
+
+ // Branch context before AnteHandler call in case it aborts.
+ // This is required for both CheckTx and DeliverTx.
+ // Ref: https://github.com/cosmos/cosmos-sdk/issues/2772
+ //
+ // NOTE: Alternatively, we could require that AnteHandler ensures that
+ // writes do not happen if aborted/failed. This may have some
+ // performance benefits, but it'll be more difficult to get right.
+ anteCtx, msCache = app.cacheTxContext(ctx, txBytes)
+
+anteCtx = anteCtx.WithEventManager(sdk.NewEventManager())
+
+newCtx, err := app.anteHandler(anteCtx, tx, mode == execModeSimulate)
+ if !newCtx.IsZero() {
+ // At this point, newCtx.MultiStore()
+
+is a store branch, or something else
+ // replaced by the AnteHandler. We want the original multistore.
+ //
+ // Also, in the case of the tx aborting, we need to track gas consumed via
+ // the instantiated gas meter in the AnteHandler, so we update the context
+ // prior to returning.
+ ctx = newCtx.WithMultiStore(ms)
+}
+ events := ctx.EventManager().Events()
+
+ // GasMeter expected to be set in AnteHandler
+ gasWanted = ctx.GasMeter().Limit()
+ if err != nil {
+ if mode == execModeReCheck {
+ // if the ante handler fails on recheck, we want to remove the tx from the mempool
+ if mempoolErr := app.mempool.Remove(tx); mempoolErr != nil {
+ return gInfo, nil, anteEvents, errors.Join(err, mempoolErr)
+}
+
+}
+
+return gInfo, nil, nil, err
+}
+
+msCache.Write()
+
+anteEvents = events.ToABCIEvents()
+}
+ switch mode {
+ case execModeCheck:
+ err = app.mempool.Insert(ctx, tx)
+ if err != nil {
+ return gInfo, nil, anteEvents, err
+}
+ case execModeFinalize:
+ err = app.mempool.Remove(tx)
+ if err != nil && !errors.Is(err, mempool.ErrTxNotFound) {
+ return gInfo, nil, anteEvents,
+ fmt.Errorf("failed to remove tx from mempool: %w", err)
+}
+
+}
+
+ // Create a new Context based off of the existing Context with a MultiStore branch
+ // in case message processing fails. At this point, the MultiStore
+ // is a branch of a branch.
+ runMsgCtx, msCache := app.cacheTxContext(ctx, txBytes)
+
+ // Attempt to execute all messages and only update state if all messages pass
+ // and we're in DeliverTx. Note, runMsgs will never return a reference to a
+ // Result if any single message fails or does not have a registered Handler.
+ msgsV2, err := tx.GetMsgsV2()
+ if err == nil {
+ result, err = app.runMsgs(runMsgCtx, msgs, msgsV2, mode)
+}
+
+ // Run optional postHandlers (should run regardless of the execution result).
+ //
+ // Note: If the postHandler fails, we also revert the runMsgs state.
+ if app.postHandler != nil {
+ // The runMsgCtx context currently contains events emitted by the ante handler.
+ // We clear this to correctly order events without duplicates.
+ // Note that the state is still preserved.
+ postCtx := runMsgCtx.WithEventManager(sdk.NewEventManager())
+
+newCtx, errPostHandler := app.postHandler(postCtx, tx, mode == execModeSimulate, err == nil)
+ if errPostHandler != nil {
+ if err == nil {
+ // when the msg was handled successfully, return the post handler error only
+ return gInfo, nil, anteEvents, errPostHandler
+}
+ // otherwise append to the msg error so that we keep the original error code for better user experience
+ return gInfo, nil, anteEvents, errorsmod.Wrapf(err, "postHandler: %s", errPostHandler)
+}
+
+ // we don't want runTx to panic if runMsgs has failed earlier
+ if result == nil {
+ result = &sdk.Result{
+}
+
+}
+
+result.Events = append(result.Events, newCtx.EventManager().ABCIEvents()...)
+}
+ if err == nil {
+ if mode == execModeFinalize {
+ // When block gas exceeds, it'll panic and won't commit the cached store.
+ consumeBlockGas()
+
+msCache.Write()
+}
+ if len(anteEvents) > 0 && (mode == execModeFinalize || mode == execModeSimulate) {
+ // append the events in the order of occurrence
+ result.Events = append(anteEvents, result.Events...)
+}
+
+}
+
+return gInfo, result, anteEvents, err
+}
+
+// runMsgs iterates through a list of messages and executes them with the provided
+// Context and execution mode. Messages will only be executed during simulation
+// and DeliverTx. An error is returned if any single message fails or if a
+// Handler does not exist for a given message route. Otherwise, a reference to a
+// Result is returned. The caller must not commit state if an error is returned.
+func (app *BaseApp)
+
+runMsgs(ctx sdk.Context, msgs []sdk.Msg, msgsV2 []protov2.Message, mode execMode) (*sdk.Result, error) {
+ events := sdk.EmptyEvents()
+
+var msgResponses []*codectypes.Any
+
+ // NOTE: GasWanted is determined by the AnteHandler and GasUsed by the GasMeter.
+ for i, msg := range msgs {
+ if mode != execModeFinalize && mode != execModeSimulate {
+ break
+}
+ handler := app.msgServiceRouter.Handler(msg)
+ if handler == nil {
+ return nil, errorsmod.Wrapf(sdkerrors.ErrUnknownRequest, "no message handler found for %T", msg)
+}
+
+ // ADR 031 request type routing
+ msgResult, err := handler(ctx, msg)
+ if err != nil {
+ return nil, errorsmod.Wrapf(err, "failed to execute message; message index: %d", i)
+}
+
+ // create message events
+ msgEvents, err := createEvents(app.cdc, msgResult.GetEvents(), msg, msgsV2[i])
+ if err != nil {
+ return nil, errorsmod.Wrapf(err, "failed to create message events; message index: %d", i)
+}
+
+ // append message events and data
+ //
+ // Note: Each message result's data must be length-prefixed in order to
+ // separate each result.
+ for j, event := range msgEvents {
+ // append message index to all events
+ msgEvents[j] = event.AppendAttributes(sdk.NewAttribute("msg_index", strconv.Itoa(i)))
+}
+
+events = events.AppendEvents(msgEvents)
+
+ // Each individual sdk.Result that went through the MsgServiceRouter
+ // (which should represent 99% of the Msgs now, since everyone should
+ // be using protobuf Msgs)
+
+has exactly one Msg response, set inside
+ // `WrapServiceResult`. We take that Msg response, and aggregate it
+ // into an array.
+ if len(msgResult.MsgResponses) > 0 {
+ msgResponse := msgResult.MsgResponses[0]
+ if msgResponse == nil {
+ return nil, sdkerrors.ErrLogic.Wrapf("got nil Msg response at index %d for msg %s", i, sdk.MsgTypeURL(msg))
+}
+
+msgResponses = append(msgResponses, msgResponse)
+}
+
+
+}
+
+data, err := makeABCIData(msgResponses)
+ if err != nil {
+ return nil, errorsmod.Wrap(err, "failed to marshal tx data")
+}
+
+return &sdk.Result{
+ Data: data,
+ Events: events.ToABCIEvents(),
+ MsgResponses: msgResponses,
+}, nil
+}
+
+// makeABCIData generates the Data field to be sent to ABCI Check/DeliverTx.
+func makeABCIData(msgResponses []*codectypes.Any) ([]byte, error) {
+ return proto.Marshal(&sdk.TxMsgData{
+ MsgResponses: msgResponses
+})
+}
+
+func createEvents(cdc codec.Codec, events sdk.Events, msg sdk.Msg, msgV2 protov2.Message) (sdk.Events, error) {
+ eventMsgName := sdk.MsgTypeURL(msg)
+ msgEvent := sdk.NewEvent(sdk.EventTypeMessage, sdk.NewAttribute(sdk.AttributeKeyAction, eventMsgName))
+
+ // we set the signer attribute as the sender
+ signers, err := cdc.GetMsgV2Signers(msgV2)
+ if err != nil {
+ return nil, err
+}
+ if len(signers) > 0 && signers[0] != nil {
+ addrStr, err := cdc.InterfaceRegistry().SigningContext().AddressCodec().BytesToString(signers[0])
+ if err != nil {
+ return nil, err
+}
+
+msgEvent = msgEvent.AppendAttributes(sdk.NewAttribute(sdk.AttributeKeySender, addrStr))
+}
+
+ // verify that events have no module attribute set
+ if _, found := events.GetAttributes(sdk.AttributeKeyModule); !found {
+ if moduleName := sdk.GetModuleNameFromTypeURL(eventMsgName); moduleName != "" {
+ msgEvent = msgEvent.AppendAttributes(sdk.NewAttribute(sdk.AttributeKeyModule, moduleName))
+}
+
+}
+
+return sdk.Events{
+ msgEvent
+}.AppendEvents(events), nil
+}
+
+// PrepareProposalVerifyTx performs transaction verification when a proposer is
+// creating a block proposal during PrepareProposal. Any state committed to the
+// PrepareProposal state internally will be discarded. will be
+// returned if the transaction cannot be encoded. will be returned if
+// the transaction is valid, otherwise will be returned.
+func (app *BaseApp)
+
+PrepareProposalVerifyTx(tx sdk.Tx) ([]byte, error) {
+ bz, err := app.txEncoder(tx)
+ if err != nil {
+ return nil, err
+}
+
+ _, _, _, err = app.runTx(execModePrepareProposal, bz, tx)
+ if err != nil {
+ return nil, err
+}
+
+return bz, nil
+}
+
+// ProcessProposalVerifyTx performs transaction verification when receiving a
+// block proposal during ProcessProposal. Any state committed to the
+// ProcessProposal state internally will be discarded. will be
+// returned if the transaction cannot be decoded. will be returned if
+// the transaction is valid, otherwise will be returned.
+func (app *BaseApp)
+
+ProcessProposalVerifyTx(txBz []byte) (sdk.Tx, error) {
+ tx, err := app.txDecoder(txBz)
+ if err != nil {
+ return nil, err
+}
+
+ _, _, _, err = app.runTx(execModeProcessProposal, txBz, tx)
+ if err != nil {
+ return nil, err
+}
+
+return tx, nil
+}
+
+func (app *BaseApp)
+
+TxDecode(txBytes []byte) (sdk.Tx, error) {
+ return app.txDecoder(txBytes)
+}
+
+func (app *BaseApp)
+
+TxEncode(tx sdk.Tx) ([]byte, error) {
+ return app.txEncoder(tx)
+}
+
+func (app *BaseApp)
+
+StreamingManager()
+
+storetypes.StreamingManager {
+ return app.streamingManager
+}
+
+// Close is called in start cmd to gracefully cleanup resources.
+func (app *BaseApp)
+
+Close()
+
+error {
+ var errs []error
+
+ // Close app.db (opened by cosmos-sdk/server/start.go call to openDB)
+ if app.db != nil {
+ app.logger.Info("Closing application.db")
+ if err := app.db.Close(); err != nil {
+ errs = append(errs, err)
+}
+
+}
+
+ // Close app.snapshotManager
+ // - opened when app chains use cosmos-sdk/server/util.go/DefaultBaseappOptions (boilerplate)
+ // - which calls cosmos-sdk/server/util.go/GetSnapshotStore
+ // - which is passed to baseapp/options.go/SetSnapshot
+ // - to set app.snapshotManager = snapshots.NewManager
+ if app.snapshotManager != nil {
+ app.logger.Info("Closing snapshots/metadata.db")
+ if err := app.snapshotManager.Close(); err != nil {
+ errs = append(errs, err)
+}
+
+}
+
+return errors.Join(errs...)
+}
+
+// GetBaseApp returns the pointer to itself.
+func (app *BaseApp)
+
+GetBaseApp() *BaseApp {
+ return app
+}
+```
+
+### Commit
+
+The [`Commit` ABCI message](https://github.com/cometbft/cometbft/blob/v0.37.x/spec/abci/abci++_basic_concepts.md#method-overview) is sent from the underlying CometBFT engine after the full-node has received *precommits* from 2/3+ of validators (weighted by voting power). On the `BaseApp` end, the `Commit(res abci.ResponseCommit)` function is implemented to commit all the valid state transitions that occurred during `FinalizeBlock` and to reset state for the next block.
+
+To commit state-transitions, the `Commit` function calls the `Write()` function on `finalizeBlockState.ms`, where `finalizeBlockState.ms` is a branched multistore of the main store `app.cms`. Then, the `Commit` function sets `checkState` to the latest header (obtained from `finalizeBlockState.ctx.BlockHeader`) and `finalizeBlockState` to `nil`.
+
+Finally, `Commit` returns the hash of the commitment of `app.cms` back to the underlying consensus engine. This hash is used as a reference in the header of the next block.
+
+### Info
+
+The [`Info` ABCI message](https://github.com/cometbft/cometbft/blob/v0.37.x/spec/abci/abci++_basic_concepts.md#info-methods) is a simple query from the underlying consensus engine, notably used to sync the latter with the application during a handshake that happens on startup. When called, the `Info(res abci.ResponseInfo)` function from `BaseApp` will return the application's name, version and the hash of the last commit of `app.cms`.
+
+### Query
+
+The [`Query` ABCI message](https://github.com/cometbft/cometbft/blob/v0.37.x/spec/abci/abci++_basic_concepts.md#info-methods) is used to serve queries received from the underlying consensus engine, including queries received via RPC like CometBFT RPC. It used to be the main entrypoint to build interfaces with the application, but with the introduction of [gRPC queries](/sdk/v0.53/build/building-modules/query-services) in Cosmos SDK v0.40, its usage is more limited. The application must respect a few rules when implementing the `Query` method, which are outlined [here](https://github.com/cometbft/cometbft/blob/v0.37.x/spec/abci/abci++_app_requirements.md#query).
+
+Each CometBFT `query` comes with a `path`, which is a `string` which denotes what to query. If the `path` matches a gRPC fully-qualified service method, then `BaseApp` will defer the query to the `grpcQueryRouter` and let it handle it like explained [above](#grpc-query-router). Otherwise, the `path` represents a query that is not (yet) handled by the gRPC router. `BaseApp` splits the `path` string with the `/` delimiter. By convention, the first element of the split string (`split[0]`) contains the category of `query` (`app`, `p2p`, `store` or `custom` ). The `BaseApp` implementation of the `Query(req abci.RequestQuery)` method is a simple dispatcher serving these 4 main categories of queries:
+
+* Application-related queries like querying the application's version, which are served via the `handleQueryApp` method.
+* Direct queries to the multistore, which are served by the `handlerQueryStore` method. These direct queries are different from custom queries which go through `app.queryRouter`, and are mainly used by third-party service provider like block explorers.
+* P2P queries, which are served via the `handleQueryP2P` method. These queries return either `app.addrPeerFilter` or `app.ipPeerFilter` that contain the list of peers filtered by address or IP respectively. These lists are first initialized via `options` in `BaseApp`'s [constructor](#constructor).
+
+### ExtendVote
+
+`ExtendVote` allows an application to extend a pre-commit vote with arbitrary data. This process does NOT have to be deterministic and the data returned can be unique to the validator process.
+
+In the Cosmos-SDK this is implemented as a NoOp:
+
+```go expandable
+package baseapp
+
+import (
+
+ "bytes"
+ "context"
+ "fmt"
+ "slices"
+ "github.com/cockroachdb/errors"
+ abci "github.com/cometbft/cometbft/abci/types"
+ cryptoenc "github.com/cometbft/cometbft/crypto/encoding"
+ cmtprotocrypto "github.com/cometbft/cometbft/proto/tendermint/crypto"
+ cmtproto "github.com/cometbft/cometbft/proto/tendermint/types"
+ cmttypes "github.com/cometbft/cometbft/types"
+ protoio "github.com/cosmos/gogoproto/io"
+ "github.com/cosmos/gogoproto/proto"
+ "cosmossdk.io/core/comet"
+
+ sdk "github.com/cosmos/cosmos-sdk/types"
+ "github.com/cosmos/cosmos-sdk/types/mempool"
+)
+
+type (
+ // ValidatorStore defines the interface contract required for verifying vote
+ // extension signatures. Typically, this will be implemented by the x/staking
+ // module, which has knowledge of the CometBFT public key.
+ ValidatorStore interface {
+ GetPubKeyByConsAddr(context.Context, sdk.ConsAddress) (cmtprotocrypto.PublicKey, error)
+}
+
+ // GasTx defines the contract that a transaction with a gas limit must implement.
+ GasTx interface {
+ GetGas()
+
+uint64
+}
+)
+
+// ValidateVoteExtensions defines a helper function for verifying vote extension
+// signatures that may be passed or manually injected into a block proposal from
+// a proposer in PrepareProposal. It returns an error if any signature is invalid
+// or if unexpected vote extensions and/or signatures are found or less than 2/3
+// power is received.
+// NOTE: From v0.50.5 `currentHeight` and `chainID` arguments are ignored for fixing an issue.
+// They will be removed from the function in v0.51+.
+func ValidateVoteExtensions(
+ ctx sdk.Context,
+ valStore ValidatorStore,
+ _ int64,
+ _ string,
+ extCommit abci.ExtendedCommitInfo,
+)
+
+error {
+ // Get values from context
+ cp := ctx.ConsensusParams()
+ currentHeight := ctx.HeaderInfo().Height
+ chainID := ctx.HeaderInfo().ChainID
+ commitInfo := ctx.CometInfo().GetLastCommit()
+
+ // Check that both extCommit + commit are ordered in accordance with vp/address.
+ if err := validateExtendedCommitAgainstLastCommit(extCommit, commitInfo); err != nil {
+ return err
+}
+
+ // Start checking vote extensions only **after** the vote extensions enable
+ // height, because when `currentHeight == VoteExtensionsEnableHeight`
+ // PrepareProposal doesn't get any vote extensions in its request.
+ extsEnabled := cp.Abci != nil && currentHeight > cp.Abci.VoteExtensionsEnableHeight && cp.Abci.VoteExtensionsEnableHeight != 0
+ marshalDelimitedFn := func(msg proto.Message) ([]byte, error) {
+ var buf bytes.Buffer
+ if err := protoio.NewDelimitedWriter(&buf).WriteMsg(msg); err != nil {
+ return nil, err
+}
+
+return buf.Bytes(), nil
+}
+
+var (
+ // Total voting power of all vote extensions.
+ totalVP int64
+ // Total voting power of all validators that submitted valid vote extensions.
+ sumVP int64
+ )
+ for _, vote := range extCommit.Votes {
+ totalVP += vote.Validator.Power
+
+ // Only check + include power if the vote is a commit vote. There must be super-majority, otherwise the
+ // previous block (the block the vote is for)
+
+could not have been committed.
+ if vote.BlockIdFlag != cmtproto.BlockIDFlagCommit {
+ continue
+}
+ if !extsEnabled {
+ if len(vote.VoteExtension) > 0 {
+ return fmt.Errorf("vote extensions disabled; received non-empty vote extension at height %d", currentHeight)
+}
+ if len(vote.ExtensionSignature) > 0 {
+ return fmt.Errorf("vote extensions disabled; received non-empty vote extension signature at height %d", currentHeight)
+}
+
+continue
+}
+ if len(vote.ExtensionSignature) == 0 {
+ return fmt.Errorf("vote extensions enabled; received empty vote extension signature at height %d", currentHeight)
+}
+ valConsAddr := sdk.ConsAddress(vote.Validator.Address)
+
+pubKeyProto, err := valStore.GetPubKeyByConsAddr(ctx, valConsAddr)
+ if err != nil {
+ return fmt.Errorf("failed to get validator %X public key: %w", valConsAddr, err)
+}
+
+cmtPubKey, err := cryptoenc.PubKeyFromProto(pubKeyProto)
+ if err != nil {
+ return fmt.Errorf("failed to convert validator %X public key: %w", valConsAddr, err)
+}
+ cve := cmtproto.CanonicalVoteExtension{
+ Extension: vote.VoteExtension,
+ Height: currentHeight - 1, // the vote extension was signed in the previous height
+ Round: int64(extCommit.Round),
+ ChainId: chainID,
+}
+
+extSignBytes, err := marshalDelimitedFn(&cve)
+ if err != nil {
+ return fmt.Errorf("failed to encode CanonicalVoteExtension: %w", err)
+}
+ if !cmtPubKey.VerifySignature(extSignBytes, vote.ExtensionSignature) {
+ return fmt.Errorf("failed to verify validator %X vote extension signature", valConsAddr)
+}
+
+sumVP += vote.Validator.Power
+}
+
+ // This check is probably unnecessary, but better safe than sorry.
+ if totalVP <= 0 {
+ return fmt.Errorf("total voting power must be positive, got: %d", totalVP)
+}
+
+ // If the sum of the voting power has not reached (2/3 + 1)
+
+we need to error.
+ if requiredVP := ((totalVP * 2) / 3) + 1; sumVP < requiredVP {
+ return fmt.Errorf(
+ "insufficient cumulative voting power received to verify vote extensions; got: %d, expected: >=%d",
+ sumVP, requiredVP,
+ )
+}
+
+return nil
+}
+
+// validateExtendedCommitAgainstLastCommit validates an ExtendedCommitInfo against a LastCommit. Specifically,
+// it checks that the ExtendedCommit + LastCommit (for the same height), are consistent with each other + that
+// they are ordered correctly (by voting power)
+
+in accordance with
+// [comet](https://github.com/cometbft/cometbft/blob/4ce0277b35f31985bbf2c25d3806a184a4510010/types/validator_set.go#L784).
+func validateExtendedCommitAgainstLastCommit(ec abci.ExtendedCommitInfo, lc comet.CommitInfo)
+
+error {
+ // check that the rounds are the same
+ if ec.Round != lc.Round() {
+ return fmt.Errorf("extended commit round %d does not match last commit round %d", ec.Round, lc.Round())
+}
+
+ // check that the # of votes are the same
+ if len(ec.Votes) != lc.Votes().Len() {
+ return fmt.Errorf("extended commit votes length %d does not match last commit votes length %d", len(ec.Votes), lc.Votes().Len())
+}
+
+ // check sort order of extended commit votes
+ if !slices.IsSortedFunc(ec.Votes, func(vote1, vote2 abci.ExtendedVoteInfo)
+
+int {
+ if vote1.Validator.Power == vote2.Validator.Power {
+ return bytes.Compare(vote1.Validator.Address, vote2.Validator.Address) // addresses sorted in ascending order (used to break vp conflicts)
+}
+
+return -int(vote1.Validator.Power - vote2.Validator.Power) // vp sorted in descending order
+}) {
+ return fmt.Errorf("extended commit votes are not sorted by voting power")
+}
+ addressCache := make(map[string]struct{
+}, len(ec.Votes))
+ // check that consistency between LastCommit and ExtendedCommit
+ for i, vote := range ec.Votes {
+ // cache addresses to check for duplicates
+ if _, ok := addressCache[string(vote.Validator.Address)]; ok {
+ return fmt.Errorf("extended commit vote address %X is duplicated", vote.Validator.Address)
+}
+
+addressCache[string(vote.Validator.Address)] = struct{
+}{
+}
+ if !bytes.Equal(vote.Validator.Address, lc.Votes().Get(i).Validator().Address()) {
+ return fmt.Errorf("extended commit vote address %X does not match last commit vote address %X", vote.Validator.Address, lc.Votes().Get(i).Validator().Address())
+}
+ if vote.Validator.Power != lc.Votes().Get(i).Validator().Power() {
+ return fmt.Errorf("extended commit vote power %d does not match last commit vote power %d", vote.Validator.Power, lc.Votes().Get(i).Validator().Power())
+}
+
+}
+
+return nil
+}
+
+type (
+ // ProposalTxVerifier defines the interface that is implemented by BaseApp,
+ // that any custom ABCI PrepareProposal and ProcessProposal handler can use
+ // to verify a transaction.
+ ProposalTxVerifier interface {
+ PrepareProposalVerifyTx(tx sdk.Tx) ([]byte, error)
+
+ProcessProposalVerifyTx(txBz []byte) (sdk.Tx, error)
+
+TxDecode(txBz []byte) (sdk.Tx, error)
+
+TxEncode(tx sdk.Tx) ([]byte, error)
+}
+
+ // DefaultProposalHandler defines the default ABCI PrepareProposal and
+ // ProcessProposal handlers.
+ DefaultProposalHandler struct {
+ mempool mempool.Mempool
+ txVerifier ProposalTxVerifier
+ txSelector TxSelector
+ signerExtAdapter mempool.SignerExtractionAdapter
+}
+)
+
+func NewDefaultProposalHandler(mp mempool.Mempool, txVerifier ProposalTxVerifier) *DefaultProposalHandler {
+ return &DefaultProposalHandler{
+ mempool: mp,
+ txVerifier: txVerifier,
+ txSelector: NewDefaultTxSelector(),
+ signerExtAdapter: mempool.NewDefaultSignerExtractionAdapter(),
+}
+}
+
+// SetTxSelector sets the TxSelector function on the DefaultProposalHandler.
+func (h *DefaultProposalHandler)
+
+SetTxSelector(ts TxSelector) {
+ h.txSelector = ts
+}
+
+// PrepareProposalHandler returns the default implementation for processing an
+// ABCI proposal. The application's mempool is enumerated and all valid
+// transactions are added to the proposal. Transactions are valid if they:
+//
+// 1)
+
+Successfully encode to bytes.
+// 2)
+
+Are valid (i.e. pass runTx, AnteHandler only).
+//
+// Enumeration is halted once RequestPrepareProposal.MaxBytes of transactions is
+// reached or the mempool is exhausted.
+//
+// Note:
+//
+// - Step (2)
+
+is identical to the validation step performed in
+// DefaultProcessProposal. It is very important that the same validation logic
+// is used in both steps, and applications must ensure that this is the case in
+// non-default handlers.
+//
+// - If no mempool is set or if the mempool is a no-op mempool, the transactions
+// requested from CometBFT will simply be returned, which, by default, are in
+// FIFO order.
+func (h *DefaultProposalHandler)
+
+PrepareProposalHandler()
+
+sdk.PrepareProposalHandler {
+ return func(ctx sdk.Context, req *abci.RequestPrepareProposal) (*abci.ResponsePrepareProposal, error) {
+ var maxBlockGas uint64
+ if b := ctx.ConsensusParams().Block; b != nil {
+ maxBlockGas = uint64(b.MaxGas)
+}
+
+defer h.txSelector.Clear()
+
+ // If the mempool is nil or NoOp we simply return the transactions
+ // requested from CometBFT, which, by default, should be in FIFO order.
+ //
+ // Note, we still need to ensure the transactions returned respect req.MaxTxBytes.
+ _, isNoOp := h.mempool.(mempool.NoOpMempool)
+ if h.mempool == nil || isNoOp {
+ for _, txBz := range req.Txs {
+ tx, err := h.txVerifier.TxDecode(txBz)
+ if err != nil {
+ return nil, err
+}
+ stop := h.txSelector.SelectTxForProposal(ctx, uint64(req.MaxTxBytes), maxBlockGas, tx, txBz)
+ if stop {
+ break
+}
+
+}
+
+return &abci.ResponsePrepareProposal{
+ Txs: h.txSelector.SelectedTxs(ctx)
+}, nil
+}
+ selectedTxsSignersSeqs := make(map[string]uint64)
+
+var (
+ resError error
+ selectedTxsNums int
+ invalidTxs []sdk.Tx // invalid txs to be removed out of the loop to avoid dead lock
+ )
+
+mempool.SelectBy(ctx, h.mempool, req.Txs, func(memTx sdk.Tx)
+
+bool {
+ unorderedTx, ok := memTx.(sdk.TxWithUnordered)
+ isUnordered := ok && unorderedTx.GetUnordered()
+ txSignersSeqs := make(map[string]uint64)
+
+ // if the tx is unordered, we don't need to check the sequence, we just add it
+ if !isUnordered {
+ signerData, err := h.signerExtAdapter.GetSigners(memTx)
+ if err != nil {
+ // propagate the error to the caller
+ resError = err
+ return false
+}
+
+ // If the signers aren't in selectedTxsSignersSeqs then we haven't seen them before
+ // so we add them and continue given that we don't need to check the sequence.
+ shouldAdd := true
+ for _, signer := range signerData {
+ seq, ok := selectedTxsSignersSeqs[signer.Signer.String()]
+ if !ok {
+ txSignersSeqs[signer.Signer.String()] = signer.Sequence
+ continue
+}
+
+ // If we have seen this signer before in this block, we must make
+ // sure that the current sequence is seq+1; otherwise is invalid
+ // and we skip it.
+ if seq+1 != signer.Sequence {
+ shouldAdd = false
+ break
+}
+
+txSignersSeqs[signer.Signer.String()] = signer.Sequence
+}
+ if !shouldAdd {
+ return true
+}
+
+}
+
+ // NOTE: Since transaction verification was already executed in CheckTx,
+ // which calls mempool.Insert, in theory everything in the pool should be
+ // valid. But some mempool implementations may insert invalid txs, so we
+ // check again.
+ txBz, err := h.txVerifier.PrepareProposalVerifyTx(memTx)
+ if err != nil {
+ invalidTxs = append(invalidTxs, memTx)
+}
+
+else {
+ stop := h.txSelector.SelectTxForProposal(ctx, uint64(req.MaxTxBytes), maxBlockGas, memTx, txBz)
+ if stop {
+ return false
+}
+ txsLen := len(h.txSelector.SelectedTxs(ctx))
+ // If the tx is unordered, we don't need to update the sender sequence.
+ if !isUnordered {
+ for sender, seq := range txSignersSeqs {
+ // If txsLen != selectedTxsNums is true, it means that we've
+ // added a new tx to the selected txs, so we need to update
+ // the sequence of the sender.
+ if txsLen != selectedTxsNums {
+ selectedTxsSignersSeqs[sender] = seq
+}
+
+else if _, ok := selectedTxsSignersSeqs[sender]; !ok {
+ // The transaction hasn't been added but it passed the
+ // verification, so we know that the sequence is correct.
+ // So we set this sender's sequence to seq-1, in order
+ // to avoid unnecessary calls to PrepareProposalVerifyTx.
+ selectedTxsSignersSeqs[sender] = seq - 1
+}
+
+}
+
+}
+
+selectedTxsNums = txsLen
+}
+
+return true
+})
+ if resError != nil {
+ return nil, resError
+}
+ for _, tx := range invalidTxs {
+ err := h.mempool.Remove(tx)
+ if err != nil && !errors.Is(err, mempool.ErrTxNotFound) {
+ return nil, err
+}
+
+}
+
+return &abci.ResponsePrepareProposal{
+ Txs: h.txSelector.SelectedTxs(ctx)
+}, nil
+}
+}
+
+// ProcessProposalHandler returns the default implementation for processing an
+// ABCI proposal. Every transaction in the proposal must pass 2 conditions:
+//
+// 1. The transaction bytes must decode to a valid transaction.
+// 2. The transaction must be valid (i.e. pass runTx, AnteHandler only)
+//
+// If any transaction fails to pass either condition, the proposal is rejected.
+// Note that step (2)
+
+is identical to the validation step performed in
+// DefaultPrepareProposal. It is very important that the same validation logic
+// is used in both steps, and applications must ensure that this is the case in
+// non-default handlers.
+func (h *DefaultProposalHandler)
+
+ProcessProposalHandler()
+
+sdk.ProcessProposalHandler {
+ // If the mempool is nil or NoOp we simply return ACCEPT,
+ // because PrepareProposal may have included txs that could fail verification.
+ _, isNoOp := h.mempool.(mempool.NoOpMempool)
+ if h.mempool == nil || isNoOp {
+ return NoOpProcessProposal()
+}
+
+return func(ctx sdk.Context, req *abci.RequestProcessProposal) (*abci.ResponseProcessProposal, error) {
+ var totalTxGas uint64
+
+ var maxBlockGas int64
+ if b := ctx.ConsensusParams().Block; b != nil {
+ maxBlockGas = b.MaxGas
+}
+ for _, txBytes := range req.Txs {
+ tx, err := h.txVerifier.ProcessProposalVerifyTx(txBytes)
+ if err != nil {
+ return &abci.ResponseProcessProposal{
+ Status: abci.ResponseProcessProposal_REJECT
+}, nil
+}
+ if maxBlockGas > 0 {
+ gasTx, ok := tx.(GasTx)
+ if ok {
+ totalTxGas += gasTx.GetGas()
+}
+ if totalTxGas > uint64(maxBlockGas) {
+ return &abci.ResponseProcessProposal{
+ Status: abci.ResponseProcessProposal_REJECT
+}, nil
+}
+
+}
+
+}
+
+return &abci.ResponseProcessProposal{
+ Status: abci.ResponseProcessProposal_ACCEPT
+}, nil
+}
+}
+
+// NoOpPrepareProposal defines a no-op PrepareProposal handler. It will always
+// return the transactions sent by the client's request.
+func NoOpPrepareProposal()
+
+sdk.PrepareProposalHandler {
+ return func(_ sdk.Context, req *abci.RequestPrepareProposal) (*abci.ResponsePrepareProposal, error) {
+ return &abci.ResponsePrepareProposal{
+ Txs: req.Txs
+}, nil
+}
+}
+
+// NoOpProcessProposal defines a no-op ProcessProposal Handler. It will always
+// return ACCEPT.
+func NoOpProcessProposal()
+
+sdk.ProcessProposalHandler {
+ return func(_ sdk.Context, _ *abci.RequestProcessProposal) (*abci.ResponseProcessProposal, error) {
+ return &abci.ResponseProcessProposal{
+ Status: abci.ResponseProcessProposal_ACCEPT
+}, nil
+}
+}
+
+// NoOpExtendVote defines a no-op ExtendVote handler. It will always return an
+// empty byte slice as the vote extension.
+func NoOpExtendVote()
+
+sdk.ExtendVoteHandler {
+ return func(_ sdk.Context, _ *abci.RequestExtendVote) (*abci.ResponseExtendVote, error) {
+ return &abci.ResponseExtendVote{
+ VoteExtension: []byte{
+}}, nil
+}
+}
+
+// NoOpVerifyVoteExtensionHandler defines a no-op VerifyVoteExtension handler. It
+// will always return an ACCEPT status with no error.
+func NoOpVerifyVoteExtensionHandler()
+
+sdk.VerifyVoteExtensionHandler {
+ return func(_ sdk.Context, _ *abci.RequestVerifyVoteExtension) (*abci.ResponseVerifyVoteExtension, error) {
+ return &abci.ResponseVerifyVoteExtension{
+ Status: abci.ResponseVerifyVoteExtension_ACCEPT
+}, nil
+}
+}
+
+// TxSelector defines a helper type that assists in selecting transactions during
+// mempool transaction selection in PrepareProposal. It keeps track of the total
+// number of bytes and total gas of the selected transactions. It also keeps
+// track of the selected transactions themselves.
+type TxSelector interface {
+ // SelectedTxs should return a copy of the selected transactions.
+ SelectedTxs(ctx context.Context) [][]byte
+
+ // Clear should clear the TxSelector, nulling out all relevant fields.
+ Clear()
+
+ // SelectTxForProposal should attempt to select a transaction for inclusion in
+ // a proposal based on inclusion criteria defined by the TxSelector. It must
+ // return if the caller should halt the transaction selection loop
+ // (typically over a mempool)
+
+or otherwise.
+ SelectTxForProposal(ctx context.Context, maxTxBytes, maxBlockGas uint64, memTx sdk.Tx, txBz []byte)
+
+bool
+}
+
+type defaultTxSelector struct {
+ totalTxBytes uint64
+ totalTxGas uint64
+ selectedTxs [][]byte
+}
+
+func NewDefaultTxSelector()
+
+TxSelector {
+ return &defaultTxSelector{
+}
+}
+
+func (ts *defaultTxSelector)
+
+SelectedTxs(_ context.Context) [][]byte {
+ txs := make([][]byte, len(ts.selectedTxs))
+
+copy(txs, ts.selectedTxs)
+
+return txs
+}
+
+func (ts *defaultTxSelector)
+
+Clear() {
+ ts.totalTxBytes = 0
+ ts.totalTxGas = 0
+ ts.selectedTxs = nil
+}
+
+func (ts *defaultTxSelector)
+
+SelectTxForProposal(_ context.Context, maxTxBytes, maxBlockGas uint64, memTx sdk.Tx, txBz []byte)
+
+bool {
+ txSize := uint64(cmttypes.ComputeProtoSizeForTxs([]cmttypes.Tx{
+ txBz
+}))
+
+var txGasLimit uint64
+ if memTx != nil {
+ if gasTx, ok := memTx.(GasTx); ok {
+ txGasLimit = gasTx.GetGas()
+}
+
+}
+
+ // only add the transaction to the proposal if we have enough capacity
+ if (txSize + ts.totalTxBytes) <= maxTxBytes {
+ // If there is a max block gas limit, add the tx only if the limit has
+ // not been met.
+ if maxBlockGas > 0 {
+ if (txGasLimit + ts.totalTxGas) <= maxBlockGas {
+ ts.totalTxGas += txGasLimit
+ ts.totalTxBytes += txSize
+ ts.selectedTxs = append(ts.selectedTxs, txBz)
+}
+
+}
+
+else {
+ ts.totalTxBytes += txSize
+ ts.selectedTxs = append(ts.selectedTxs, txBz)
+}
+
+}
+
+ // check if we've reached capacity; if so, we cannot select any more transactions
+ return ts.totalTxBytes >= maxTxBytes || (maxBlockGas > 0 && (ts.totalTxGas >= maxBlockGas))
+}
+```
+
+### VerifyVoteExtension
+
+`VerifyVoteExtension` allows an application to verify that the data returned by `ExtendVote` is valid. This process MUST be deterministic. Moreover, the value of ResponseVerifyVoteExtension.status MUST exclusively depend on the parameters passed in the call to RequestVerifyVoteExtension, and the last committed Application state.
+
+In the Cosmos-SDK this is implemented as a NoOp:
+
+```go expandable
+package baseapp
+
+import (
+
+ "bytes"
+ "context"
+ "fmt"
+ "slices"
+ "github.com/cockroachdb/errors"
+ abci "github.com/cometbft/cometbft/abci/types"
+ cryptoenc "github.com/cometbft/cometbft/crypto/encoding"
+ cmtprotocrypto "github.com/cometbft/cometbft/proto/tendermint/crypto"
+ cmtproto "github.com/cometbft/cometbft/proto/tendermint/types"
+ cmttypes "github.com/cometbft/cometbft/types"
+ protoio "github.com/cosmos/gogoproto/io"
+ "github.com/cosmos/gogoproto/proto"
+ "cosmossdk.io/core/comet"
+
+ sdk "github.com/cosmos/cosmos-sdk/types"
+ "github.com/cosmos/cosmos-sdk/types/mempool"
+)
+
+type (
+ // ValidatorStore defines the interface contract required for verifying vote
+ // extension signatures. Typically, this will be implemented by the x/staking
+ // module, which has knowledge of the CometBFT public key.
+ ValidatorStore interface {
+ GetPubKeyByConsAddr(context.Context, sdk.ConsAddress) (cmtprotocrypto.PublicKey, error)
+}
+
+ // GasTx defines the contract that a transaction with a gas limit must implement.
+ GasTx interface {
+ GetGas()
+
+uint64
+}
+)
+
+// ValidateVoteExtensions defines a helper function for verifying vote extension
+// signatures that may be passed or manually injected into a block proposal from
+// a proposer in PrepareProposal. It returns an error if any signature is invalid
+// or if unexpected vote extensions and/or signatures are found or less than 2/3
+// power is received.
+// NOTE: From v0.50.5 `currentHeight` and `chainID` arguments are ignored for fixing an issue.
+// They will be removed from the function in v0.51+.
+func ValidateVoteExtensions(
+ ctx sdk.Context,
+ valStore ValidatorStore,
+ _ int64,
+ _ string,
+ extCommit abci.ExtendedCommitInfo,
+)
+
+error {
+ // Get values from context
+ cp := ctx.ConsensusParams()
+ currentHeight := ctx.HeaderInfo().Height
+ chainID := ctx.HeaderInfo().ChainID
+ commitInfo := ctx.CometInfo().GetLastCommit()
+
+ // Check that both extCommit + commit are ordered in accordance with vp/address.
+ if err := validateExtendedCommitAgainstLastCommit(extCommit, commitInfo); err != nil {
+ return err
+}
+
+ // Start checking vote extensions only **after** the vote extensions enable
+ // height, because when `currentHeight == VoteExtensionsEnableHeight`
+ // PrepareProposal doesn't get any vote extensions in its request.
+ extsEnabled := cp.Abci != nil && currentHeight > cp.Abci.VoteExtensionsEnableHeight && cp.Abci.VoteExtensionsEnableHeight != 0
+ marshalDelimitedFn := func(msg proto.Message) ([]byte, error) {
+ var buf bytes.Buffer
+ if err := protoio.NewDelimitedWriter(&buf).WriteMsg(msg); err != nil {
+ return nil, err
+}
+
+return buf.Bytes(), nil
+}
+
+var (
+ // Total voting power of all vote extensions.
+ totalVP int64
+ // Total voting power of all validators that submitted valid vote extensions.
+ sumVP int64
+ )
+ for _, vote := range extCommit.Votes {
+ totalVP += vote.Validator.Power
+
+ // Only check + include power if the vote is a commit vote. There must be super-majority, otherwise the
+ // previous block (the block the vote is for)
+
+could not have been committed.
+ if vote.BlockIdFlag != cmtproto.BlockIDFlagCommit {
+ continue
+}
+ if !extsEnabled {
+ if len(vote.VoteExtension) > 0 {
+ return fmt.Errorf("vote extensions disabled; received non-empty vote extension at height %d", currentHeight)
+}
+ if len(vote.ExtensionSignature) > 0 {
+ return fmt.Errorf("vote extensions disabled; received non-empty vote extension signature at height %d", currentHeight)
+}
+
+continue
+}
+ if len(vote.ExtensionSignature) == 0 {
+ return fmt.Errorf("vote extensions enabled; received empty vote extension signature at height %d", currentHeight)
+}
+ valConsAddr := sdk.ConsAddress(vote.Validator.Address)
+
+pubKeyProto, err := valStore.GetPubKeyByConsAddr(ctx, valConsAddr)
+ if err != nil {
+ return fmt.Errorf("failed to get validator %X public key: %w", valConsAddr, err)
+}
+
+cmtPubKey, err := cryptoenc.PubKeyFromProto(pubKeyProto)
+ if err != nil {
+ return fmt.Errorf("failed to convert validator %X public key: %w", valConsAddr, err)
+}
+ cve := cmtproto.CanonicalVoteExtension{
+ Extension: vote.VoteExtension,
+ Height: currentHeight - 1, // the vote extension was signed in the previous height
+ Round: int64(extCommit.Round),
+ ChainId: chainID,
+}
+
+extSignBytes, err := marshalDelimitedFn(&cve)
+ if err != nil {
+ return fmt.Errorf("failed to encode CanonicalVoteExtension: %w", err)
+}
+ if !cmtPubKey.VerifySignature(extSignBytes, vote.ExtensionSignature) {
+ return fmt.Errorf("failed to verify validator %X vote extension signature", valConsAddr)
+}
+
+sumVP += vote.Validator.Power
+}
+
+ // This check is probably unnecessary, but better safe than sorry.
+ if totalVP <= 0 {
+ return fmt.Errorf("total voting power must be positive, got: %d", totalVP)
+}
+
+ // If the sum of the voting power has not reached (2/3 + 1)
+
+we need to error.
+ if requiredVP := ((totalVP * 2) / 3) + 1; sumVP < requiredVP {
+ return fmt.Errorf(
+ "insufficient cumulative voting power received to verify vote extensions; got: %d, expected: >=%d",
+ sumVP, requiredVP,
+ )
+}
+
+return nil
+}
+
+// validateExtendedCommitAgainstLastCommit validates an ExtendedCommitInfo against a LastCommit. Specifically,
+// it checks that the ExtendedCommit + LastCommit (for the same height), are consistent with each other + that
+// they are ordered correctly (by voting power)
+
+in accordance with
+// [comet](https://github.com/cometbft/cometbft/blob/4ce0277b35f31985bbf2c25d3806a184a4510010/types/validator_set.go#L784).
+func validateExtendedCommitAgainstLastCommit(ec abci.ExtendedCommitInfo, lc comet.CommitInfo)
+
+error {
+ // check that the rounds are the same
+ if ec.Round != lc.Round() {
+ return fmt.Errorf("extended commit round %d does not match last commit round %d", ec.Round, lc.Round())
+}
+
+ // check that the # of votes are the same
+ if len(ec.Votes) != lc.Votes().Len() {
+ return fmt.Errorf("extended commit votes length %d does not match last commit votes length %d", len(ec.Votes), lc.Votes().Len())
+}
+
+ // check sort order of extended commit votes
+ if !slices.IsSortedFunc(ec.Votes, func(vote1, vote2 abci.ExtendedVoteInfo)
+
+int {
+ if vote1.Validator.Power == vote2.Validator.Power {
+ return bytes.Compare(vote1.Validator.Address, vote2.Validator.Address) // addresses sorted in ascending order (used to break vp conflicts)
+}
+
+return -int(vote1.Validator.Power - vote2.Validator.Power) // vp sorted in descending order
+}) {
+ return fmt.Errorf("extended commit votes are not sorted by voting power")
+}
+ addressCache := make(map[string]struct{
+}, len(ec.Votes))
+ // check that consistency between LastCommit and ExtendedCommit
+ for i, vote := range ec.Votes {
+ // cache addresses to check for duplicates
+ if _, ok := addressCache[string(vote.Validator.Address)]; ok {
+ return fmt.Errorf("extended commit vote address %X is duplicated", vote.Validator.Address)
+}
+
+addressCache[string(vote.Validator.Address)] = struct{
+}{
+}
+ if !bytes.Equal(vote.Validator.Address, lc.Votes().Get(i).Validator().Address()) {
+ return fmt.Errorf("extended commit vote address %X does not match last commit vote address %X", vote.Validator.Address, lc.Votes().Get(i).Validator().Address())
+}
+ if vote.Validator.Power != lc.Votes().Get(i).Validator().Power() {
+ return fmt.Errorf("extended commit vote power %d does not match last commit vote power %d", vote.Validator.Power, lc.Votes().Get(i).Validator().Power())
+}
+
+}
+
+return nil
+}
+
+type (
+ // ProposalTxVerifier defines the interface that is implemented by BaseApp,
+ // that any custom ABCI PrepareProposal and ProcessProposal handler can use
+ // to verify a transaction.
+ ProposalTxVerifier interface {
+ PrepareProposalVerifyTx(tx sdk.Tx) ([]byte, error)
+
+ProcessProposalVerifyTx(txBz []byte) (sdk.Tx, error)
+
+TxDecode(txBz []byte) (sdk.Tx, error)
+
+TxEncode(tx sdk.Tx) ([]byte, error)
+}
+
+ // DefaultProposalHandler defines the default ABCI PrepareProposal and
+ // ProcessProposal handlers.
+ DefaultProposalHandler struct {
+ mempool mempool.Mempool
+ txVerifier ProposalTxVerifier
+ txSelector TxSelector
+ signerExtAdapter mempool.SignerExtractionAdapter
+}
+)
+
+func NewDefaultProposalHandler(mp mempool.Mempool, txVerifier ProposalTxVerifier) *DefaultProposalHandler {
+ return &DefaultProposalHandler{
+ mempool: mp,
+ txVerifier: txVerifier,
+ txSelector: NewDefaultTxSelector(),
+ signerExtAdapter: mempool.NewDefaultSignerExtractionAdapter(),
+}
+}
+
+// SetTxSelector sets the TxSelector function on the DefaultProposalHandler.
+func (h *DefaultProposalHandler)
+
+SetTxSelector(ts TxSelector) {
+ h.txSelector = ts
+}
+
+// PrepareProposalHandler returns the default implementation for processing an
+// ABCI proposal. The application's mempool is enumerated and all valid
+// transactions are added to the proposal. Transactions are valid if they:
+//
+// 1)
+
+Successfully encode to bytes.
+// 2)
+
+Are valid (i.e. pass runTx, AnteHandler only).
+//
+// Enumeration is halted once RequestPrepareProposal.MaxBytes of transactions is
+// reached or the mempool is exhausted.
+//
+// Note:
+//
+// - Step (2)
+
+is identical to the validation step performed in
+// DefaultProcessProposal. It is very important that the same validation logic
+// is used in both steps, and applications must ensure that this is the case in
+// non-default handlers.
+//
+// - If no mempool is set or if the mempool is a no-op mempool, the transactions
+// requested from CometBFT will simply be returned, which, by default, are in
+// FIFO order.
+func (h *DefaultProposalHandler)
+
+PrepareProposalHandler()
+
+sdk.PrepareProposalHandler {
+ return func(ctx sdk.Context, req *abci.RequestPrepareProposal) (*abci.ResponsePrepareProposal, error) {
+ var maxBlockGas uint64
+ if b := ctx.ConsensusParams().Block; b != nil {
+ maxBlockGas = uint64(b.MaxGas)
+}
+
+defer h.txSelector.Clear()
+
+ // If the mempool is nil or NoOp we simply return the transactions
+ // requested from CometBFT, which, by default, should be in FIFO order.
+ //
+ // Note, we still need to ensure the transactions returned respect req.MaxTxBytes.
+ _, isNoOp := h.mempool.(mempool.NoOpMempool)
+ if h.mempool == nil || isNoOp {
+ for _, txBz := range req.Txs {
+ tx, err := h.txVerifier.TxDecode(txBz)
+ if err != nil {
+ return nil, err
+}
+ stop := h.txSelector.SelectTxForProposal(ctx, uint64(req.MaxTxBytes), maxBlockGas, tx, txBz)
+ if stop {
+ break
+}
+
+}
+
+return &abci.ResponsePrepareProposal{
+ Txs: h.txSelector.SelectedTxs(ctx)
+}, nil
+}
+ selectedTxsSignersSeqs := make(map[string]uint64)
+
+var (
+ resError error
+ selectedTxsNums int
+ invalidTxs []sdk.Tx // invalid txs to be removed out of the loop to avoid dead lock
+ )
+
+mempool.SelectBy(ctx, h.mempool, req.Txs, func(memTx sdk.Tx)
+
+bool {
+ unorderedTx, ok := memTx.(sdk.TxWithUnordered)
+ isUnordered := ok && unorderedTx.GetUnordered()
+ txSignersSeqs := make(map[string]uint64)
+
+ // if the tx is unordered, we don't need to check the sequence, we just add it
+ if !isUnordered {
+ signerData, err := h.signerExtAdapter.GetSigners(memTx)
+ if err != nil {
+ // propagate the error to the caller
+ resError = err
+ return false
+}
+
+ // If the signers aren't in selectedTxsSignersSeqs then we haven't seen them before
+ // so we add them and continue given that we don't need to check the sequence.
+ shouldAdd := true
+ for _, signer := range signerData {
+ seq, ok := selectedTxsSignersSeqs[signer.Signer.String()]
+ if !ok {
+ txSignersSeqs[signer.Signer.String()] = signer.Sequence
+ continue
+}
+
+ // If we have seen this signer before in this block, we must make
+ // sure that the current sequence is seq+1; otherwise is invalid
+ // and we skip it.
+ if seq+1 != signer.Sequence {
+ shouldAdd = false
+ break
+}
+
+txSignersSeqs[signer.Signer.String()] = signer.Sequence
+}
+ if !shouldAdd {
+ return true
+}
+
+}
+
+ // NOTE: Since transaction verification was already executed in CheckTx,
+ // which calls mempool.Insert, in theory everything in the pool should be
+ // valid. But some mempool implementations may insert invalid txs, so we
+ // check again.
+ txBz, err := h.txVerifier.PrepareProposalVerifyTx(memTx)
+ if err != nil {
+ invalidTxs = append(invalidTxs, memTx)
+}
+
+else {
+ stop := h.txSelector.SelectTxForProposal(ctx, uint64(req.MaxTxBytes), maxBlockGas, memTx, txBz)
+ if stop {
+ return false
+}
+ txsLen := len(h.txSelector.SelectedTxs(ctx))
+ // If the tx is unordered, we don't need to update the sender sequence.
+ if !isUnordered {
+ for sender, seq := range txSignersSeqs {
+ // If txsLen != selectedTxsNums is true, it means that we've
+ // added a new tx to the selected txs, so we need to update
+ // the sequence of the sender.
+ if txsLen != selectedTxsNums {
+ selectedTxsSignersSeqs[sender] = seq
+}
+
+else if _, ok := selectedTxsSignersSeqs[sender]; !ok {
+ // The transaction hasn't been added but it passed the
+ // verification, so we know that the sequence is correct.
+ // So we set this sender's sequence to seq-1, in order
+ // to avoid unnecessary calls to PrepareProposalVerifyTx.
+ selectedTxsSignersSeqs[sender] = seq - 1
+}
+
+}
+
+}
+
+selectedTxsNums = txsLen
+}
+
+return true
+})
+ if resError != nil {
+ return nil, resError
+}
+ for _, tx := range invalidTxs {
+ err := h.mempool.Remove(tx)
+ if err != nil && !errors.Is(err, mempool.ErrTxNotFound) {
+ return nil, err
+}
+
+}
+
+return &abci.ResponsePrepareProposal{
+ Txs: h.txSelector.SelectedTxs(ctx)
+}, nil
+}
+}
+
+// ProcessProposalHandler returns the default implementation for processing an
+// ABCI proposal. Every transaction in the proposal must pass 2 conditions:
+//
+// 1. The transaction bytes must decode to a valid transaction.
+// 2. The transaction must be valid (i.e. pass runTx, AnteHandler only)
+//
+// If any transaction fails to pass either condition, the proposal is rejected.
+// Note that step (2)
+
+is identical to the validation step performed in
+// DefaultPrepareProposal. It is very important that the same validation logic
+// is used in both steps, and applications must ensure that this is the case in
+// non-default handlers.
+func (h *DefaultProposalHandler)
+
+ProcessProposalHandler()
+
+sdk.ProcessProposalHandler {
+ // If the mempool is nil or NoOp we simply return ACCEPT,
+ // because PrepareProposal may have included txs that could fail verification.
+ _, isNoOp := h.mempool.(mempool.NoOpMempool)
+ if h.mempool == nil || isNoOp {
+ return NoOpProcessProposal()
+}
+
+return func(ctx sdk.Context, req *abci.RequestProcessProposal) (*abci.ResponseProcessProposal, error) {
+ var totalTxGas uint64
+
+ var maxBlockGas int64
+ if b := ctx.ConsensusParams().Block; b != nil {
+ maxBlockGas = b.MaxGas
+}
+ for _, txBytes := range req.Txs {
+ tx, err := h.txVerifier.ProcessProposalVerifyTx(txBytes)
+ if err != nil {
+ return &abci.ResponseProcessProposal{
+ Status: abci.ResponseProcessProposal_REJECT
+}, nil
+}
+ if maxBlockGas > 0 {
+ gasTx, ok := tx.(GasTx)
+ if ok {
+ totalTxGas += gasTx.GetGas()
+}
+ if totalTxGas > uint64(maxBlockGas) {
+ return &abci.ResponseProcessProposal{
+ Status: abci.ResponseProcessProposal_REJECT
+}, nil
+}
+
+}
+
+}
+
+return &abci.ResponseProcessProposal{
+ Status: abci.ResponseProcessProposal_ACCEPT
+}, nil
+}
+}
+
+// NoOpPrepareProposal defines a no-op PrepareProposal handler. It will always
+// return the transactions sent by the client's request.
+func NoOpPrepareProposal()
+
+sdk.PrepareProposalHandler {
+ return func(_ sdk.Context, req *abci.RequestPrepareProposal) (*abci.ResponsePrepareProposal, error) {
+ return &abci.ResponsePrepareProposal{
+ Txs: req.Txs
+}, nil
+}
+}
+
+// NoOpProcessProposal defines a no-op ProcessProposal Handler. It will always
+// return ACCEPT.
+func NoOpProcessProposal()
+
+sdk.ProcessProposalHandler {
+ return func(_ sdk.Context, _ *abci.RequestProcessProposal) (*abci.ResponseProcessProposal, error) {
+ return &abci.ResponseProcessProposal{
+ Status: abci.ResponseProcessProposal_ACCEPT
+}, nil
+}
+}
+
+// NoOpExtendVote defines a no-op ExtendVote handler. It will always return an
+// empty byte slice as the vote extension.
+func NoOpExtendVote()
+
+sdk.ExtendVoteHandler {
+ return func(_ sdk.Context, _ *abci.RequestExtendVote) (*abci.ResponseExtendVote, error) {
+ return &abci.ResponseExtendVote{
+ VoteExtension: []byte{
+}}, nil
+}
+}
+
+// NoOpVerifyVoteExtensionHandler defines a no-op VerifyVoteExtension handler. It
+// will always return an ACCEPT status with no error.
+func NoOpVerifyVoteExtensionHandler()
+
+sdk.VerifyVoteExtensionHandler {
+ return func(_ sdk.Context, _ *abci.RequestVerifyVoteExtension) (*abci.ResponseVerifyVoteExtension, error) {
+ return &abci.ResponseVerifyVoteExtension{
+ Status: abci.ResponseVerifyVoteExtension_ACCEPT
+}, nil
+}
+}
+
+// TxSelector defines a helper type that assists in selecting transactions during
+// mempool transaction selection in PrepareProposal. It keeps track of the total
+// number of bytes and total gas of the selected transactions. It also keeps
+// track of the selected transactions themselves.
+type TxSelector interface {
+ // SelectedTxs should return a copy of the selected transactions.
+ SelectedTxs(ctx context.Context) [][]byte
+
+ // Clear should clear the TxSelector, nulling out all relevant fields.
+ Clear()
+
+ // SelectTxForProposal should attempt to select a transaction for inclusion in
+ // a proposal based on inclusion criteria defined by the TxSelector. It must
+ // return if the caller should halt the transaction selection loop
+ // (typically over a mempool)
+
+or otherwise.
+ SelectTxForProposal(ctx context.Context, maxTxBytes, maxBlockGas uint64, memTx sdk.Tx, txBz []byte)
+
+bool
+}
+
+type defaultTxSelector struct {
+ totalTxBytes uint64
+ totalTxGas uint64
+ selectedTxs [][]byte
+}
+
+func NewDefaultTxSelector()
+
+TxSelector {
+ return &defaultTxSelector{
+}
+}
+
+func (ts *defaultTxSelector)
+
+SelectedTxs(_ context.Context) [][]byte {
+ txs := make([][]byte, len(ts.selectedTxs))
+
+copy(txs, ts.selectedTxs)
+
+return txs
+}
+
+func (ts *defaultTxSelector)
+
+Clear() {
+ ts.totalTxBytes = 0
+ ts.totalTxGas = 0
+ ts.selectedTxs = nil
+}
+
+func (ts *defaultTxSelector)
+
+SelectTxForProposal(_ context.Context, maxTxBytes, maxBlockGas uint64, memTx sdk.Tx, txBz []byte)
+
+bool {
+ txSize := uint64(cmttypes.ComputeProtoSizeForTxs([]cmttypes.Tx{
+ txBz
+}))
+
+var txGasLimit uint64
+ if memTx != nil {
+ if gasTx, ok := memTx.(GasTx); ok {
+ txGasLimit = gasTx.GetGas()
+}
+
+}
+
+ // only add the transaction to the proposal if we have enough capacity
+ if (txSize + ts.totalTxBytes) <= maxTxBytes {
+ // If there is a max block gas limit, add the tx only if the limit has
+ // not been met.
+ if maxBlockGas > 0 {
+ if (txGasLimit + ts.totalTxGas) <= maxBlockGas {
+ ts.totalTxGas += txGasLimit
+ ts.totalTxBytes += txSize
+ ts.selectedTxs = append(ts.selectedTxs, txBz)
+}
+
+}
+
+else {
+ ts.totalTxBytes += txSize
+ ts.selectedTxs = append(ts.selectedTxs, txBz)
+}
+
+}
+
+ // check if we've reached capacity; if so, we cannot select any more transactions
+ return ts.totalTxBytes >= maxTxBytes || (maxBlockGas > 0 && (ts.totalTxGas >= maxBlockGas))
+}
+```
diff --git a/sdk/next/learn/advanced/cli.mdx b/sdk/next/learn/advanced/cli.mdx
new file mode 100644
index 000000000..7fbe1b119
--- /dev/null
+++ b/sdk/next/learn/advanced/cli.mdx
@@ -0,0 +1,231 @@
+---
+title: Command-Line Interface
+---
+
+
+**Synopsis**
+This document describes how the command-line interface (CLI) works at a high level for an [**application**](/sdk/v0.53/learn/beginner/app-anatomy). A separate document for implementing a CLI for a Cosmos SDK [**module**](/sdk/v0.53/build/building-modules/intro) can be found [here](/sdk/v0.53/build/building-modules/module-interfaces#cli).
+
+
+## Command-Line Interface
+
+### Example Command
+
+There is no set way to create a CLI, but Cosmos SDK modules typically use the [Cobra Library](https://github.com/spf13/cobra). Building a CLI with Cobra entails defining commands, arguments, and flags. [**Commands**](#root-command) understand the actions users wish to take, such as `tx` for creating a transaction and `query` for querying the application. Each command can also have nested subcommands, necessary for naming the specific transaction type. Users also supply **Arguments**, such as account numbers to send coins to, and [**Flags**](#flags) to modify various aspects of the commands, such as gas prices or which node to broadcast to.
+
+Here is an example of a command a user might enter to interact with the simapp CLI `simd` in order to send some tokens:
+
+```bash
+simd tx bank send $MY_VALIDATOR_ADDRESS $RECIPIENT 1000stake --gas auto --gas-prices
+```
+
+The first four strings specify the command:
+
+* The root command for the entire application `simd`.
+* The subcommand `tx`, which contains all commands that let users create transactions.
+* The subcommand `bank` to indicate which module to route the command to ([`x/bank`](/sdk/v0.53/build/modules/bank/README.mdx) module in this case).
+* The type of transaction `send`.
+
+The next two strings are arguments: the `from_address` the user wishes to send from, the `to_address` of the recipient, and the `amount` they want to send. Finally, the last few strings of the command are optional flags to indicate how much the user is willing to pay in fees (calculated using the amount of gas used to execute the transaction and the gas prices provided by the user).
+
+The CLI interacts with a [node](/sdk/v0.53/learn/advanced/node) to handle this command. The interface itself is defined in a `main.go` file.
+
+### Building the CLI
+
+The `main.go` file needs to have a `main()` function that creates a root command, to which all the application commands will be added as subcommands. The root command additionally handles:
+
+* **setting configurations** by reading in configuration files (e.g., the Cosmos SDK config file).
+* **adding any flags** to it, such as `--chain-id`.
+* **instantiating the `codec`** by injecting the application codecs. The [`codec`](/sdk/v0.53/learn/advanced/encoding) is used to encode and decode data structures for the application—stores can only persist `[]byte`s, so the developer must define a serialization format for their data structures or use the default, Protobuf.
+* **adding subcommands** for all the possible user interactions, including [transaction commands](#transaction-commands) and [query commands](#query-commands).
+
+The `main()` function finally creates an executor and [executes](https://pkg.go.dev/github.com/spf13/cobra#Command.Execute) the root command. See an example of a `main()` function from the `simapp` application:
+
+```go expandable
+package main
+
+import (
+
+ "fmt"
+ "os"
+
+ clientv2helpers "cosmossdk.io/client/v2/helpers"
+ "cosmossdk.io/simapp"
+ "cosmossdk.io/simapp/simd/cmd"
+
+ svrcmd "github.com/cosmos/cosmos-sdk/server/cmd"
+)
+
+func main() {
+ rootCmd := cmd.NewRootCmd()
+ if err := svrcmd.Execute(rootCmd, clientv2helpers.EnvPrefix, simapp.DefaultNodeHome); err != nil {
+ fmt.Fprintln(rootCmd.OutOrStderr(), err)
+
+os.Exit(1)
+}
+}
+```
+
+The rest of the document will detail what needs to be implemented for each step and include smaller portions of code from the `simapp` CLI files.
+
+## Adding Commands to the CLI
+
+Every application CLI first constructs a root command, then adds functionality by aggregating subcommands (often with further nested subcommands) using `rootCmd.AddCommand()`. The bulk of an application's unique capabilities lies in its transaction and query commands, called `TxCmd` and `QueryCmd` respectively.
+
+### Root Command
+
+The root command (called `rootCmd`) is what the user first types into the command line to indicate which application they wish to interact with. The string used to invoke the command (the "Use" field) is typically the name of the application suffixed with `-d`, e.g., `simd` or `gaiad`. The root command typically includes the following commands to support basic functionality in the application.
+
+* **Status** command from the Cosmos SDK rpc client tools, which prints information about the status of the connected [`Node`](/sdk/v0.53/learn/advanced/node). The status of a node includes `NodeInfo`, `SyncInfo`, and `ValidatorInfo`.
+* **Keys** [commands](https://github.com/cosmos/cosmos-sdk/blob/v0.53.0/client/keys) from the Cosmos SDK client tools, which includes a collection of subcommands for using the key functions in the Cosmos SDK crypto tools, including adding a new key and saving it to the keyring, listing all public keys stored in the keyring, and deleting a key. For example, users can type `simd keys add ` to add a new key and save an encrypted copy to the keyring, using the flag `--recover` to recover a private key from a seed phrase or the flag `--multisig` to group multiple keys together to create a multisig key. For full details on the `add` key command, see the code [here](https://github.com/cosmos/cosmos-sdk/blob/v0.53.0/client/keys/add.go). For more details about usage of `--keyring-backend` for storage of key credentials, look at the [keyring docs](/sdk/v0.53/user/run-node/keyring).
+* **Server** commands from the Cosmos SDK server package. These commands are responsible for providing the mechanisms necessary to start an ABCI CometBFT application and provide the CLI framework (based on [cobra](https://github.com/spf13/cobra)) necessary to fully bootstrap an application. The package exposes two core functions: `StartCmd` and `ExportCmd`, which create commands to start the application and export state respectively.
+ Learn more [here](https://github.com/cosmos/cosmos-sdk/blob/v0.53.0/server).
+* [**Transaction**](#transaction-commands) commands.
+* [**Query**](#query-commands) commands.
+
+Next is an example `rootCmd` function from the `simapp` application. It instantiates the root command, adds a [*persistent* flag](#flags) and `PreRun` function to be run before every execution, and adds all of the necessary subcommands.
+
+```go
+// Reference: https://github.com/cosmos/cosmos-sdk/blob/v0.53.0/simapp/simd/cmd/root_v2.go#L47-L130
+```
+
+
+Use the `EnhanceRootCommand()` from the AutoCLI options to automatically add auto-generated commands from the modules to the root command.
+Additionnally it adds all manually defined modules commands (`tx` and `query`) as well.
+Read more about [AutoCLI](/sdk/v0.53/learn/advanced/autocli) in its dedicated section.
+
+
+`rootCmd` has a function called `initAppConfig()`, which is useful for setting the application's custom configs.
+By default, the app uses the CometBFT app config template from Cosmos SDK, which can be overwritten via `initAppConfig()`.
+Here's an example code to override the default `app.toml` template.
+
+```go
+// Reference: https://github.com/cosmos/cosmos-sdk/blob/v0.53.0/simapp/simd/cmd/root_v2.go#L144-L199
+```
+
+The `initAppConfig()` also allows overriding the default Cosmos SDK [server config](https://github.com/cosmos/cosmos-sdk/blob/v0.53.0/server/config/config.go#L231). One example is the `min-gas-prices` config, which defines the minimum gas prices a validator is willing to accept for processing a transaction. By default, the Cosmos SDK sets this parameter to `""` (empty string), which forces all validators to tweak their own `app.toml` and set a non-empty value, or else the node will halt on startup. This might not be the best UX for validators, so the chain developer can set a default `app.toml` value for validators inside this `initAppConfig()` function.
+
+```go
+// Reference: https://github.com/cosmos/cosmos-sdk/blob/v0.53.0/simapp/simd/cmd/root_v2.go#L164-L180
+```
+
+The root-level `status` and `keys` subcommands are common across most applications and do not interact with application state. The bulk of an application's functionality—what users can actually *do* with it—is enabled by its `tx` and `query` commands.
+
+### Transaction Commands
+
+[Transactions](/sdk/v0.53/learn/advanced/transactions) are objects wrapping [`Msg`s](/sdk/v0.53/build/building-modules/messages-and-queries#messages) that trigger state changes. To enable the creation of transactions using the CLI interface, a function `txCommand` is generally added to the `rootCmd`:
+
+```go
+// Reference: https://github.com/cosmos/cosmos-sdk/blob/v0.53.0/simapp/simd/cmd/root_v2.go#L222-L229
+```
+
+This `txCommand` function adds all the transactions available to end-users for the application. This typically includes:
+
+* **Sign command** from the [`auth`](/sdk/v0.53/build/modules/auth/auth) module that signs messages in a transaction. To enable multisig, add the `auth` module's `MultiSign` command. Since every transaction requires some sort of signature in order to be valid, the signing command is necessary for every application.
+* **Broadcast command** from the Cosmos SDK client tools, to broadcast transactions.
+* **All [module transaction commands](/sdk/v0.53/build/building-modules/module-interfaces#transaction-commands)** the application is dependent on, retrieved by using the [basic module manager's](/sdk/v0.53/build/building-modules/module-manager#basic-manager) `AddTxCommands()` function, or enhanced by [AutoCLI](/sdk/v0.53/learn/advanced/autocli).
+
+Here is an example of a `txCommand` aggregating these subcommands from the `simapp` application:
+
+```go
+// Reference: https://github.com/cosmos/cosmos-sdk/blob/v0.53.0/simapp/simd/cmd/root_v2.go#L270-L292
+```
+
+
+When using AutoCLI to generate module transaction commands, `EnhanceRootCommand()` automatically adds the module `tx` command to the root command.
+Read more about [AutoCLI](/sdk/v0.53/learn/advanced/autocli) in its dedicated section.
+
+
+### Query Commands
+
+[**Queries**](/sdk/v0.53/build/building-modules/messages-and-queries#queries) are objects that allow users to retrieve information about the application's state. To enable the creation of queries using the CLI interface, a function `queryCommand` is generally added to the `rootCmd`:
+
+```go
+// Reference: https://github.com/cosmos/cosmos-sdk/blob/v0.53.0/simapp/simd/cmd/root_v2.go#L222-L229
+```
+
+This `queryCommand` function adds all the queries available to end-users for the application. This typically includes:
+
+* **QueryTx** and/or other transaction query commands from the `auth` module, which allow the user to search for a transaction by inputting its hash, a list of tags, or a block height. These queries allow users to see if transactions have been included in a block.
+* **Account command** from the `auth` module, which displays the state (e.g., account balance) of an account given an address.
+* **Validator command** from the Cosmos SDK rpc client tools, which displays the validator set of a given height.
+* **Block command** from the Cosmos SDK RPC client tools, which displays the block data for a given height.
+* **All [module query commands](/sdk/v0.53/build/building-modules/module-interfaces#query-commands)** the application is dependent on, retrieved by using the [basic module manager's](/sdk/v0.53/build/building-modules/module-manager#basic-manager) `AddQueryCommands()` function, or enhanced by [AutoCLI](/sdk/v0.53/learn/advanced/autocli).
+
+Here is an example of a `queryCommand` aggregating subcommands from the `simapp` application:
+
+```go
+// Reference: https://github.com/cosmos/cosmos-sdk/blob/v0.53.0/simapp/simd/cmd/root_v2.go#L249-L268
+```
+
+
+When using AutoCLI to generate module query commands, `EnhanceRootCommand()` automatically adds the module `query` command to the root command.
+Read more about [AutoCLI](/sdk/v0.53/learn/advanced/autocli) in its dedicated section.
+
+
+## Flags
+
+Flags are used to modify commands; developers can include them in a `flags.go` file with their CLI. Users can explicitly include them in commands or pre-configure them inside their [`app.toml`](/sdk/v0.53/user/run-node/run-node#configuring-the-node-using-apptoml-and-configtoml). Commonly pre-configured flags include the `--node` to connect to and `--chain-id` of the blockchain the user wishes to interact with.
+
+A *persistent* flag (as opposed to a *local* flag) added to a command transcends all of its children: subcommands will inherit the configured values for these flags. Additionally, all flags have default values when they are added to commands; some toggle an option off, but others are empty values that the user needs to override to create valid commands. A flag can be explicitly marked as *required* so that an error is automatically thrown if the user does not provide a value, but it is also acceptable to handle unexpected missing flags differently.
+
+Flags are added to commands directly (generally in the [module's CLI file](/sdk/v0.53/build/building-modules/module-interfaces#flags) where module commands are defined), and no flag except for the `rootCmd` persistent flags has to be added at the application level. It is common to add a *persistent* flag for `--chain-id`, the unique identifier of the blockchain the application pertains to, to the root command. Adding this flag can be done in the `main()` function. Adding this flag makes sense as the chain ID should not be changing across commands in this application CLI.
+
+## Environment variables
+
+Each flag is bound to its respective named environment variable. The name of the environment variable consists of two parts—capital case `basename` followed by the flag name. `-` must be substituted with `_`. For example, the flag `--node` for an application with basename `GAIA` is bound to `GAIA_NODE`. This allows reducing the number of flags typed for routine operations. For example, instead of:
+
+```shell
+gaia --home=./ --node= --chain-id="testchain-1" --keyring-backend=test tx ... --from=
+```
+
+this will be more convenient:
+
+```shell
+# define env variables in .env, .envrc etc
+GAIA_HOME=
+GAIA_NODE=
+GAIA_CHAIN_ID="testchain-1"
+GAIA_KEYRING_BACKEND="test"
+
+# and later just use
+gaia tx ... --from=
+```
+
+## Configurations
+
+It is vital that the root command of an application uses the `PersistentPreRun()` cobra command property for executing the command, so all child commands have access to the server and client contexts. These contexts are set as their default values initially and may be modified, scoped to the command, in their respective `PersistentPreRun()` functions. Note that the `client.Context` is typically pre-populated with "default" values that may be useful for all commands to inherit and override if necessary.
+
+Here is an example of a `PersistentPreRun()` function from `simapp`:
+
+```go
+// Reference: https://github.com/cosmos/cosmos-sdk/blob/v0.53.0/simapp/simd/cmd/root_v2.go#L81-L120
+```
+
+The `SetCmdClientContextHandler` call reads persistent flags via `ReadPersistentCommandFlags`, which creates a `client.Context` and sets that on the root command's `Context`.
+
+The `InterceptConfigsPreRunHandler` call creates a viper literal, default `server.Context`, and a logger and sets that on the root command's `Context`. The `server.Context` will be modified and saved to disk. The internal `interceptConfigs` call reads or creates a CometBFT configuration based on the home path provided. In addition, `interceptConfigs` also reads and loads the application configuration, `app.toml`, and binds that to the `server.Context` viper literal. This is vital so the application can get access to not only the CLI flags but also to the application configuration values provided by this file.
+
+
+When configuring which logger is used, do not use `InterceptConfigsPreRunHandler`, which sets the default SDK logger, but instead use `InterceptConfigsAndCreateContext` and set the server context and the logger manually:
+
+```diff expandable
+-return server.InterceptConfigsPreRunHandler(cmd, customAppTemplate, customAppConfig, customCMTConfig)
+
++serverCtx, err := server.InterceptConfigsAndCreateContext(cmd, customAppTemplate, customAppConfig, customCMTConfig)
++if err != nil {
++ return err
++}
+
++// overwrite default server logger
++logger, err := server.CreateSDKLogger(serverCtx, cmd.OutOrStdout())
++if err != nil {
++ return err
++}
++serverCtx.Logger = logger.With(log.ModuleKey, "server")
+
++// set server context
++return server.SetCmdServerContext(cmd, serverCtx)
+```
+
+
diff --git a/sdk/next/learn/advanced/config.mdx b/sdk/next/learn/advanced/config.mdx
new file mode 100644
index 000000000..28bd6c1fd
--- /dev/null
+++ b/sdk/next/learn/advanced/config.mdx
@@ -0,0 +1,26 @@
+---
+title: Configuration
+description: >-
+ This documentation refers to the app.toml. If you'd like to read about the
+ config.toml, please visit the CometBFT docs.
+---
+
+This documentation refers to the app.toml. If you'd like to read about the config.toml, please visit [CometBFT docs](/cometbft/v0.38/docs/core/configuration).
+
+{/* the following is not a python reference, however syntax coloring makes the file more readable in the docs */}
+
+```python
+# Reference: https://github.com/cosmos/cosmos-sdk/blob/main/tools/confix/data/v0.47-app.toml
+```
+
+## inter-block-cache
+
+This feature will consume more RAM than a normal node if enabled.
+
+## iavl-cache-size
+
+Using this feature will increase RAM consumption.
+
+## iavl-lazy-loading
+
+This feature is to be used for archive nodes, allowing them to have a faster startup time.
diff --git a/sdk/next/learn/advanced/context.mdx b/sdk/next/learn/advanced/context.mdx
new file mode 100644
index 000000000..0183bd4ba
--- /dev/null
+++ b/sdk/next/learn/advanced/context.mdx
@@ -0,0 +1,822 @@
+---
+title: Context
+---
+
+
+**Synopsis**
+The `context` is a data structure intended to be passed from function to function that carries information about the current state of the application. It provides access to a branched storage (a safe branch of the entire state) as well as useful objects and information like `gasMeter`, `block height`, `consensus parameters` and more.
+
+
+
+**Prerequisite Readings**
+
+* [Anatomy of a Cosmos SDK Application](/sdk/v0.53/learn/beginner/app-anatomy)
+* [Lifecycle of a Transaction](/sdk/v0.53/learn/beginner/tx-lifecycle)
+
+
+
+## Context Definition
+
+The Cosmos SDK `Context` is a custom data structure that contains Go's stdlib [`context`](https://pkg.go.dev/context) as its base, and has many additional types within its definition that are specific to the Cosmos SDK. The `Context` is integral to transaction processing in that it allows modules to easily access their respective [store](/sdk/v0.53/learn/advanced/store#base-layer-kvstores) in the [`multistore`](/sdk/v0.53/learn/advanced/store#multistore) and retrieve transactional context such as the block header and gas meter.
+
+```go expandable
+package types
+
+import (
+
+ "context"
+ "time"
+
+ abci "github.com/cometbft/cometbft/abci/types"
+ cmtproto "github.com/cometbft/cometbft/proto/tendermint/types"
+ "cosmossdk.io/core/comet"
+ "cosmossdk.io/core/header"
+ "cosmossdk.io/log"
+ "cosmossdk.io/store/gaskv"
+ storetypes "cosmossdk.io/store/types"
+)
+
+// ExecMode defines the execution mode which can be set on a Context.
+type ExecMode uint8
+
+// All possible execution modes.
+const (
+ ExecModeCheck ExecMode = iota
+ ExecModeReCheck
+ ExecModeSimulate
+ ExecModePrepareProposal
+ ExecModeProcessProposal
+ ExecModeVoteExtension
+ ExecModeVerifyVoteExtension
+ ExecModeFinalize
+)
+
+/*
+Context is an immutable object contains all information needed to
+process a request.
+
+It contains a context.Context object inside if you want to use that,
+but please do not over-use it. We try to keep all data structured
+and standard additions here would be better just to add to the Context struct
+*/
+type Context struct {
+ baseCtx context.Context
+ ms storetypes.MultiStore
+ // Deprecated: Use HeaderService for height, time, and chainID and CometService for the rest
+ header cmtproto.Header
+ // Deprecated: Use HeaderService for hash
+ headerHash []byte
+ // Deprecated: Use HeaderService for chainID and CometService for the rest
+ chainID string
+ txBytes []byte
+ logger log.Logger
+ voteInfo []abci.VoteInfo
+ gasMeter storetypes.GasMeter
+ blockGasMeter storetypes.GasMeter
+ checkTx bool
+ recheckTx bool // if recheckTx == true, then checkTx must also be true
+ sigverifyTx bool // when run simulation, because the private key corresponding to the account in the genesis.json randomly generated, we must skip the sigverify.
+ execMode ExecMode
+ minGasPrice DecCoins
+ consParams cmtproto.ConsensusParams
+ eventManager EventManagerI
+ priority int64 // The tx priority, only relevant in CheckTx
+ kvGasConfig storetypes.GasConfig
+ transientKVGasConfig storetypes.GasConfig
+ streamingManager storetypes.StreamingManager
+ cometInfo comet.BlockInfo
+ headerInfo header.Info
+}
+
+// Proposed rename, not done to avoid API breakage
+type Request = Context
+
+// Read-only accessors
+func (c Context)
+
+Context()
+
+context.Context {
+ return c.baseCtx
+}
+
+func (c Context)
+
+MultiStore()
+
+storetypes.MultiStore {
+ return c.ms
+}
+
+func (c Context)
+
+BlockHeight()
+
+int64 {
+ return c.header.Height
+}
+
+func (c Context)
+
+BlockTime()
+
+time.Time {
+ return c.header.Time
+}
+
+func (c Context)
+
+ChainID()
+
+string {
+ return c.chainID
+}
+
+func (c Context)
+
+TxBytes() []byte {
+ return c.txBytes
+}
+
+func (c Context)
+
+Logger()
+
+log.Logger {
+ return c.logger
+}
+
+func (c Context)
+
+VoteInfos() []abci.VoteInfo {
+ return c.voteInfo
+}
+
+func (c Context)
+
+GasMeter()
+
+storetypes.GasMeter {
+ return c.gasMeter
+}
+
+func (c Context)
+
+BlockGasMeter()
+
+storetypes.GasMeter {
+ return c.blockGasMeter
+}
+
+func (c Context)
+
+IsCheckTx()
+
+bool {
+ return c.checkTx
+}
+
+func (c Context)
+
+IsReCheckTx()
+
+bool {
+ return c.recheckTx
+}
+
+func (c Context)
+
+IsSigverifyTx()
+
+bool {
+ return c.sigverifyTx
+}
+
+func (c Context)
+
+ExecMode()
+
+ExecMode {
+ return c.execMode
+}
+
+func (c Context)
+
+MinGasPrices()
+
+DecCoins {
+ return c.minGasPrice
+}
+
+func (c Context)
+
+EventManager()
+
+EventManagerI {
+ return c.eventManager
+}
+
+func (c Context)
+
+Priority()
+
+int64 {
+ return c.priority
+}
+
+func (c Context)
+
+KVGasConfig()
+
+storetypes.GasConfig {
+ return c.kvGasConfig
+}
+
+func (c Context)
+
+TransientKVGasConfig()
+
+storetypes.GasConfig {
+ return c.transientKVGasConfig
+}
+
+func (c Context)
+
+StreamingManager()
+
+storetypes.StreamingManager {
+ return c.streamingManager
+}
+
+func (c Context)
+
+CometInfo()
+
+comet.BlockInfo {
+ return c.cometInfo
+}
+
+func (c Context)
+
+HeaderInfo()
+
+header.Info {
+ return c.headerInfo
+}
+
+// BlockHeader returns the header by value.
+func (c Context)
+
+BlockHeader()
+
+cmtproto.Header {
+ return c.header
+}
+
+// HeaderHash returns a copy of the header hash obtained during abci.RequestBeginBlock
+func (c Context)
+
+HeaderHash() []byte {
+ hash := make([]byte, len(c.headerHash))
+
+copy(hash, c.headerHash)
+
+return hash
+}
+
+func (c Context)
+
+ConsensusParams()
+
+cmtproto.ConsensusParams {
+ return c.consParams
+}
+
+func (c Context)
+
+Deadline() (deadline time.Time, ok bool) {
+ return c.baseCtx.Deadline()
+}
+
+func (c Context)
+
+Done() <-chan struct{
+} {
+ return c.baseCtx.Done()
+}
+
+func (c Context)
+
+Err()
+
+error {
+ return c.baseCtx.Err()
+}
+
+// create a new context
+func NewContext(ms storetypes.MultiStore, header cmtproto.Header, isCheckTx bool, logger log.Logger)
+
+Context {
+ // https://github.com/gogo/protobuf/issues/519
+ header.Time = header.Time.UTC()
+
+return Context{
+ baseCtx: context.Background(),
+ ms: ms,
+ header: header,
+ chainID: header.ChainID,
+ checkTx: isCheckTx,
+ sigverifyTx: true,
+ logger: logger,
+ gasMeter: storetypes.NewInfiniteGasMeter(),
+ minGasPrice: DecCoins{
+},
+ eventManager: NewEventManager(),
+ kvGasConfig: storetypes.KVGasConfig(),
+ transientKVGasConfig: storetypes.TransientGasConfig(),
+}
+}
+
+// WithContext returns a Context with an updated context.Context.
+func (c Context)
+
+WithContext(ctx context.Context)
+
+Context {
+ c.baseCtx = ctx
+ return c
+}
+
+// WithMultiStore returns a Context with an updated MultiStore.
+func (c Context)
+
+WithMultiStore(ms storetypes.MultiStore)
+
+Context {
+ c.ms = ms
+ return c
+}
+
+// WithBlockHeader returns a Context with an updated CometBFT block header in UTC time.
+func (c Context)
+
+WithBlockHeader(header cmtproto.Header)
+
+Context {
+ // https://github.com/gogo/protobuf/issues/519
+ header.Time = header.Time.UTC()
+
+c.header = header
+ return c
+}
+
+// WithHeaderHash returns a Context with an updated CometBFT block header hash.
+func (c Context)
+
+WithHeaderHash(hash []byte)
+
+Context {
+ temp := make([]byte, len(hash))
+
+copy(temp, hash)
+
+c.headerHash = temp
+ return c
+}
+
+// WithBlockTime returns a Context with an updated CometBFT block header time in UTC with no monotonic component.
+// Stripping the monotonic component is for time equality.
+func (c Context)
+
+WithBlockTime(newTime time.Time)
+
+Context {
+ newHeader := c.BlockHeader()
+ // https://github.com/gogo/protobuf/issues/519
+ newHeader.Time = newTime.Round(0).UTC()
+
+return c.WithBlockHeader(newHeader)
+}
+
+// WithProposer returns a Context with an updated proposer consensus address.
+func (c Context)
+
+WithProposer(addr ConsAddress)
+
+Context {
+ newHeader := c.BlockHeader()
+
+newHeader.ProposerAddress = addr.Bytes()
+
+return c.WithBlockHeader(newHeader)
+}
+
+// WithBlockHeight returns a Context with an updated block height.
+func (c Context)
+
+WithBlockHeight(height int64)
+
+Context {
+ newHeader := c.BlockHeader()
+
+newHeader.Height = height
+ return c.WithBlockHeader(newHeader)
+}
+
+// WithChainID returns a Context with an updated chain identifier.
+func (c Context)
+
+WithChainID(chainID string)
+
+Context {
+ c.chainID = chainID
+ return c
+}
+
+// WithTxBytes returns a Context with an updated txBytes.
+func (c Context)
+
+WithTxBytes(txBytes []byte)
+
+Context {
+ c.txBytes = txBytes
+ return c
+}
+
+// WithLogger returns a Context with an updated logger.
+func (c Context)
+
+WithLogger(logger log.Logger)
+
+Context {
+ c.logger = logger
+ return c
+}
+
+// WithVoteInfos returns a Context with an updated consensus VoteInfo.
+func (c Context)
+
+WithVoteInfos(voteInfo []abci.VoteInfo)
+
+Context {
+ c.voteInfo = voteInfo
+ return c
+}
+
+// WithGasMeter returns a Context with an updated transaction GasMeter.
+func (c Context)
+
+WithGasMeter(meter storetypes.GasMeter)
+
+Context {
+ c.gasMeter = meter
+ return c
+}
+
+// WithBlockGasMeter returns a Context with an updated block GasMeter
+func (c Context)
+
+WithBlockGasMeter(meter storetypes.GasMeter)
+
+Context {
+ c.blockGasMeter = meter
+ return c
+}
+
+// WithKVGasConfig returns a Context with an updated gas configuration for
+// the KVStore
+func (c Context)
+
+WithKVGasConfig(gasConfig storetypes.GasConfig)
+
+Context {
+ c.kvGasConfig = gasConfig
+ return c
+}
+
+// WithTransientKVGasConfig returns a Context with an updated gas configuration for
+// the transient KVStore
+func (c Context)
+
+WithTransientKVGasConfig(gasConfig storetypes.GasConfig)
+
+Context {
+ c.transientKVGasConfig = gasConfig
+ return c
+}
+
+// WithIsCheckTx enables or disables CheckTx value for verifying transactions and returns an updated Context
+func (c Context)
+
+WithIsCheckTx(isCheckTx bool)
+
+Context {
+ c.checkTx = isCheckTx
+ c.execMode = ExecModeCheck
+ return c
+}
+
+// WithIsRecheckTx called with true will also set true on checkTx in order to
+// enforce the invariant that if recheckTx = true then checkTx = true as well.
+func (c Context)
+
+WithIsReCheckTx(isRecheckTx bool)
+
+Context {
+ if isRecheckTx {
+ c.checkTx = true
+}
+
+c.recheckTx = isRecheckTx
+ c.execMode = ExecModeReCheck
+ return c
+}
+
+// WithIsSigverifyTx called with true will sigverify in auth module
+func (c Context)
+
+WithIsSigverifyTx(isSigverifyTx bool)
+
+Context {
+ c.sigverifyTx = isSigverifyTx
+ return c
+}
+
+// WithExecMode returns a Context with an updated ExecMode.
+func (c Context)
+
+WithExecMode(m ExecMode)
+
+Context {
+ c.execMode = m
+ return c
+}
+
+// WithMinGasPrices returns a Context with an updated minimum gas price value
+func (c Context)
+
+WithMinGasPrices(gasPrices DecCoins)
+
+Context {
+ c.minGasPrice = gasPrices
+ return c
+}
+
+// WithConsensusParams returns a Context with an updated consensus params
+func (c Context)
+
+WithConsensusParams(params cmtproto.ConsensusParams)
+
+Context {
+ c.consParams = params
+ return c
+}
+
+// WithEventManager returns a Context with an updated event manager
+func (c Context)
+
+WithEventManager(em EventManagerI)
+
+Context {
+ c.eventManager = em
+ return c
+}
+
+// WithPriority returns a Context with an updated tx priority
+func (c Context)
+
+WithPriority(p int64)
+
+Context {
+ c.priority = p
+ return c
+}
+
+// WithStreamingManager returns a Context with an updated streaming manager
+func (c Context)
+
+WithStreamingManager(sm storetypes.StreamingManager)
+
+Context {
+ c.streamingManager = sm
+ return c
+}
+
+// WithCometInfo returns a Context with an updated comet info
+func (c Context)
+
+WithCometInfo(cometInfo comet.BlockInfo)
+
+Context {
+ c.cometInfo = cometInfo
+ return c
+}
+
+// WithHeaderInfo returns a Context with an updated header info
+func (c Context)
+
+WithHeaderInfo(headerInfo header.Info)
+
+Context {
+ // Settime to UTC
+ headerInfo.Time = headerInfo.Time.UTC()
+
+c.headerInfo = headerInfo
+ return c
+}
+
+// TODO: remove???
+func (c Context)
+
+IsZero()
+
+bool {
+ return c.ms == nil
+}
+
+func (c Context)
+
+WithValue(key, value interface{
+})
+
+Context {
+ c.baseCtx = context.WithValue(c.baseCtx, key, value)
+
+return c
+}
+
+func (c Context)
+
+Value(key interface{
+})
+
+interface{
+} {
+ if key == SdkContextKey {
+ return c
+}
+
+return c.baseCtx.Value(key)
+}
+
+// ----------------------------------------------------------------------------
+// Store / Caching
+// ----------------------------------------------------------------------------
+
+// KVStore fetches a KVStore from the MultiStore.
+func (c Context)
+
+KVStore(key storetypes.StoreKey)
+
+storetypes.KVStore {
+ return gaskv.NewStore(c.ms.GetKVStore(key), c.gasMeter, c.kvGasConfig)
+}
+
+// TransientStore fetches a TransientStore from the MultiStore.
+func (c Context)
+
+TransientStore(key storetypes.StoreKey)
+
+storetypes.KVStore {
+ return gaskv.NewStore(c.ms.GetKVStore(key), c.gasMeter, c.transientKVGasConfig)
+}
+
+// CacheContext returns a new Context with the multi-store cached and a new
+// EventManager. The cached context is written to the context when writeCache
+// is called. Note, events are automatically emitted on the parent context's
+// EventManager when the caller executes the write.
+func (c Context)
+
+CacheContext() (cc Context, writeCache func()) {
+ cms := c.ms.CacheMultiStore()
+
+cc = c.WithMultiStore(cms).WithEventManager(NewEventManager())
+
+writeCache = func() {
+ c.EventManager().EmitEvents(cc.EventManager().Events())
+
+cms.Write()
+}
+
+return cc, writeCache
+}
+
+var (
+ _ context.Context = Context{
+}
+ _ storetypes.Context = Context{
+}
+)
+
+// ContextKey defines a type alias for a stdlib Context key.
+type ContextKey string
+
+// SdkContextKey is the key in the context.Context which holds the sdk.Context.
+const SdkContextKey ContextKey = "sdk-context"
+
+// WrapSDKContext returns a stdlib context.Context with the provided sdk.Context's internal
+// context as a value. It is useful for passing an sdk.Context through methods that take a
+// stdlib context.Context parameter such as generated gRPC methods. To get the original
+// sdk.Context back, call UnwrapSDKContext.
+//
+// Deprecated: there is no need to wrap anymore as the Cosmos SDK context implements context.Context.
+func WrapSDKContext(ctx Context)
+
+context.Context {
+ return ctx
+}
+
+// UnwrapSDKContext retrieves a Context from a context.Context instance
+// attached with WrapSDKContext. It panics if a Context was not properly
+// attached
+func UnwrapSDKContext(ctx context.Context)
+
+Context {
+ if sdkCtx, ok := ctx.(Context); ok {
+ return sdkCtx
+}
+
+return ctx.Value(SdkContextKey).(Context)
+}
+```
+
+* **Base Context:** The base type is a Go [Context](https://pkg.go.dev/context), which is explained further in the [Go Context Package](#go-context-package) section below.
+* **Multistore:** Every application's `BaseApp` contains a [`CommitMultiStore`](/sdk/v0.53/learn/advanced/store#multistore) which is provided when a `Context` is created. Calling the `KVStore()` and `TransientStore()` methods allows modules to fetch their respective [`KVStore`](/sdk/v0.53/learn/advanced/store#base-layer-kvstores) using their unique `StoreKey`.
+* **Header:** The [header](/cometbft/v0.38/spec/core/Data_structures#header) is a Blockchain type. It carries important information about the state of the blockchain, such as block height and proposer of the current block.
+* **Header Hash:** The current block header hash, obtained during `abci.FinalizeBlock`.
+* **Chain ID:** The unique identification number of the blockchain a block pertains to.
+* **Transaction Bytes:** The `[]byte` representation of a transaction being processed using the context. Every transaction is processed by various parts of the Cosmos SDK and consensus engine (e.g. CometBFT) throughout its [lifecycle](/sdk/v0.53/learn/beginner/tx-lifecycle), some of which do not have any understanding of transaction types. Thus, transactions are marshaled into the generic `[]byte` type using some kind of [encoding format](/sdk/v0.53/learn/advanced/encoding) such as [Amino](/sdk/v0.53/learn/advanced/encoding).
+* **Logger:** A `logger` from the CometBFT libraries. Learn more about logs [here](/cometbft/v0.38/docs/core/configuration). Modules call this method to create their own unique module-specific logger.
+* **VoteInfo:** A list of the ABCI type [`VoteInfo`](/cometbft/v0.38/spec/abci/Methods#voteinfo), which includes the name of a validator and a boolean indicating whether they have signed the block.
+* **Gas Meters:** Specifically, a [`gasMeter`](/sdk/v0.53/learn/beginner/gas-fees#main-gas-meter) for the transaction currently being processed using the context and a [`blockGasMeter`](/sdk/v0.53/learn/beginner/gas-fees#block-gas-meter) for the entire block it belongs to. Users specify how much in fees they wish to pay for the execution of their transaction; these gas meters keep track of how much [gas](/sdk/v0.53/learn/beginner/gas-fees) has been used in the transaction or block so far. If the gas meter runs out, execution halts.
+* **CheckTx Mode:** A boolean value indicating whether a transaction should be processed in `CheckTx` or `DeliverTx` mode.
+* **Min Gas Price:** The minimum [gas](/sdk/v0.53/learn/beginner/gas-fees) price a node is willing to take in order to include a transaction in its block. This price is a local value configured by each node individually, and should therefore **not be used in any functions used in sequences leading to state-transitions**.
+* **Consensus Params:** The ABCI type [Consensus Parameters](/cometbft/v0.38/spec/abci/Methods#consensus-parameters), which specify certain limits for the blockchain, such as maximum gas for a block.
+* **Event Manager:** The event manager allows any caller with access to a `Context` to emit [`Events`](/sdk/v0.53/learn/advanced/events). Modules may define module specific
+ `Events` by defining various `Types` and `Attributes` or use the common definitions found in `types/`. Clients can subscribe or query for these `Events`. These `Events` are collected throughout `FinalizeBlock` and are returned to CometBFT for indexing.
+* **Priority:** The transaction priority, only relevant in `CheckTx`.
+* **KV `GasConfig`:** Enables applications to set a custom `GasConfig` for the `KVStore`.
+* **Transient KV `GasConfig`:** Enables applications to set a custom `GasConfig` for the transiant `KVStore`.
+* **StreamingManager:** The streamingManager field provides access to the streaming manager, which allows modules to subscribe to state changes emitted by the blockchain. The streaming manager is used by the state listening API, which is described in [ADR 038](/sdk/v0.53/build/architecture/adr-038-state-listening).
+* **CometInfo:** A lightweight field that contains information about the current block, such as the block height, time, and hash. This information can be used for validating evidence, providing historical data, and enhancing the user experience. For further details see [here](https://github.com/cosmos/cosmos-sdk/blob/main/core/comet/service.go#L14).
+* **HeaderInfo:** The `headerInfo` field contains information about the current block header, such as the chain ID, gas limit, and timestamp. For further details see [here](https://github.com/cosmos/cosmos-sdk/blob/main/core/header/service.go#L14).
+
+## Go Context Package
+
+A basic `Context` is defined in the [Golang Context Package](https://pkg.go.dev/context). A `Context`
+is an immutable data structure that carries request-scoped data across APIs and processes. Contexts
+are also designed to enable concurrency and to be used in goroutines.
+
+Contexts are intended to be **immutable**; they should never be edited. Instead, the convention is
+to create a child context from its parent using a `With` function. For example:
+
+```go
+childCtx = parentCtx.WithBlockHeader(header)
+```
+
+The [Golang Context Package](https://pkg.go.dev/context) documentation instructs developers to
+explicitly pass a context `ctx` as the first argument of a process.
+
+## Store branching
+
+The `Context` contains a `MultiStore`, which allows for branching and caching functionality using `CacheMultiStore`
+(queries in `CacheMultiStore` are cached to avoid future round trips).
+Each `KVStore` is branched in a safe and isolated ephemeral storage. Processes are free to write changes to
+the `CacheMultiStore`. If a state-transition sequence is performed without issue, the store branch can
+be committed to the underlying store at the end of the sequence or disregard them if something
+goes wrong. The pattern of usage for a Context is as follows:
+
+1. A process receives a Context `ctx` from its parent process, which provides information needed to
+ perform the process.
+2. The `ctx.ms` is a **branched store**, i.e. a branch of the [multistore](/sdk/v0.53/learn/advanced/store#multistore) is made so that the process can make changes to the state as it executes, without changing the original`ctx.ms`. This is useful to protect the underlying multistore in case the changes need to be reverted at some point in the execution.
+3. The process may read and write from `ctx` as it is executing. It may call a subprocess and pass
+ `ctx` to it as needed.
+4. When a subprocess returns, it checks if the result is a success or failure. If a failure, nothing
+ needs to be done - the branch `ctx` is simply discarded. If successful, the changes made to
+ the `CacheMultiStore` can be committed to the original `ctx.ms` via `Write()`.
+
+For example, here is a snippet from the [`runTx`](/sdk/v0.53/learn/advanced/baseapp#runtx-antehandler-runmsgs-posthandler) function in [`baseapp`](/sdk/v0.53/learn/advanced/baseapp):
+
+```go
+runMsgCtx, msCache := app.cacheTxContext(ctx, txBytes)
+
+result = app.runMsgs(runMsgCtx, msgs, mode)
+
+result.GasWanted = gasWanted
+ if mode != runTxModeDeliver {
+ return result
+}
+ if result.IsOK() {
+ msCache.Write()
+}
+```
+
+Here is the process:
+
+1. Prior to calling `runMsgs` on the message(s) in the transaction, it uses `app.cacheTxContext()`
+ to branch and cache the context and multistore.
+2. `runMsgCtx` - the context with branched store, is used in `runMsgs` to return a result.
+3. If the process is running in [`checkTxMode`](/sdk/v0.53/learn/advanced/baseapp#checktx), there is no need to write the
+ changes - the result is returned immediately.
+4. If the process is running in [`deliverTxMode`](/sdk/v0.53/learn/advanced/baseapp#delivertx) and the result indicates
+ a successful run over all the messages, the branched multistore is written back to the original.
diff --git a/sdk/next/learn/advanced/encoding.mdx b/sdk/next/learn/advanced/encoding.mdx
new file mode 100644
index 000000000..9d81b561a
--- /dev/null
+++ b/sdk/next/learn/advanced/encoding.mdx
@@ -0,0 +1,1947 @@
+---
+title: Encoding
+---
+
+
+**Synopsis**
+While encoding in the Cosmos SDK used to be mainly handled by the `go-amino` codec, the Cosmos SDK is moving toward using `gogoprotobuf` for both state and client-side encoding.
+
+
+
+**Prerequisite Readings**
+
+* [Anatomy of a Cosmos SDK application](/sdk/v0.53/learn/beginner/app-anatomy)
+
+
+
+## Encoding
+
+The Cosmos SDK utilizes two binary wire encoding protocols: [Amino](https://github.com/tendermint/go-amino/), which is an object encoding specification, and [Protocol Buffers](https://developers.google.com/protocol-buffers), a subset of Proto3 with an extension for interface support. See the [Proto3 spec](https://developers.google.com/protocol-buffers/docs/proto3) for more information on Proto3, which Amino is largely compatible with (but not with Proto2).
+
+Due to Amino having significant performance drawbacks, being reflection-based, and not having any meaningful cross-language/client support, Protocol Buffers, specifically [gogoprotobuf](https://github.com/cosmos/gogoproto/), is being used in place of Amino. Note that this process of using Protocol Buffers over Amino is still ongoing.
+
+Binary wire encoding of types in the Cosmos SDK can be broken down into two main categories: client encoding and store encoding. Client encoding mainly revolves around transaction processing and signing, whereas store encoding revolves around types used in state-machine transitions and what is ultimately stored in the Merkle tree.
+
+For store encoding, protobuf definitions can exist for any type and will typically have an Amino-based "intermediary" type. Specifically, the protobuf-based type definition is used for serialization and persistence, whereas the Amino-based type is used for business logic in the state-machine where they may convert back and forth. Note that the Amino-based types may slowly be phased out in the future, so developers should take note to use the protobuf message definitions where possible.
+
+In the `codec` package, there exist two core interfaces, `BinaryCodec` and `JSONCodec`, where the former encapsulates the current Amino interface except it operates on types implementing the latter instead of generic `interface{}` types.
+
+The `ProtoCodec` handles both binary and JSON serialization via Protobuf. This means that modules may use Protobuf encoding, but the types must implement `ProtoMarshaler`. If modules wish to avoid implementing this interface for their types, it can be autogenerated via [buf](https://buf.build/).
+
+If modules use [Collections](/sdk/v0.53/build/packages/collections), encoding and decoding are handled automatically; marshal and unmarshal should not be handled manually unless for specific cases identified by the developer.
+
+### Gogoproto
+
+Modules are encouraged to utilize Protobuf encoding for their respective types. In the Cosmos SDK, we use the [Gogoproto](https://github.com/cosmos/gogoproto) specific implementation of the Protobuf spec that offers speed and developer experience improvements compared to the official [Google protobuf implementation](https://github.com/protocolbuffers/protobuf).
+
+### Guidelines for protobuf message definitions
+
+In addition to [following official Protocol Buffer guidelines](https://developers.google.com/protocol-buffers/docs/proto3#simple), we recommend using these annotations in `.proto` files when dealing with interfaces:
+
+* Use `cosmos_proto.accepts_interface` to annotate `Any` fields that accept interfaces:
+ * Pass the same fully qualified name as `protoName` to `InterfaceRegistry.RegisterInterface`.
+ * Example: `(cosmos_proto.accepts_interface) = "cosmos.gov.v1beta1.Content"` (not just `Content`).
+* Annotate interface implementations with `cosmos_proto.implements_interface`:
+ * Pass the same fully qualified name as `protoName` to `InterfaceRegistry.RegisterInterface`.
+ * Example: `(cosmos_proto.implements_interface) = "cosmos.authz.v1beta1.Authorization"` (not just `Authorization`).
+
+Code generators can then match the `accepts_interface` and `implements_interface` annotations to determine whether some Protobuf messages are allowed to be packed in a given `Any` field.
+
+### Transaction Encoding
+
+Another important use of Protobuf is the encoding and decoding of [transactions](/sdk/v0.53/learn/advanced/transactions). Transactions are defined by the application or the Cosmos SDK but are then passed to the underlying consensus engine to be relayed to other peers. Since the underlying consensus engine is agnostic to the application, the consensus engine accepts only transactions in the form of raw bytes.
+
+* The `TxEncoder` object performs the encoding.
+* The `TxDecoder` object performs the decoding.
+
+```go expandable
+package types
+
+import (
+
+ "encoding/json"
+ fmt "fmt"
+ strings "strings"
+ "time"
+ "github.com/cosmos/gogoproto/proto"
+ protov2 "google.golang.org/protobuf/proto"
+ "github.com/cosmos/cosmos-sdk/codec"
+ codectypes "github.com/cosmos/cosmos-sdk/codec/types"
+ cryptotypes "github.com/cosmos/cosmos-sdk/crypto/types"
+)
+
+type (
+ // Msg defines the interface a transaction message needed to fulfill.
+ Msg = proto.Message
+
+ // LegacyMsg defines the interface a transaction message needed to fulfill up through
+ // v0.47.
+ LegacyMsg interface {
+ Msg
+
+ // GetSigners returns the addrs of signers that must sign.
+ // CONTRACT: All signatures must be present to be valid.
+ // CONTRACT: Returns addrs in some deterministic order.
+ GetSigners() []AccAddress
+}
+
+ // Fee defines an interface for an application application-defined concrete
+ // transaction type to be able to set and return the transaction fee.
+ Fee interface {
+ GetGas()
+
+uint64
+ GetAmount()
+
+Coins
+}
+
+ // Signature defines an interface for an application application-defined
+ // concrete transaction type to be able to set and return transaction signatures.
+ Signature interface {
+ GetPubKey()
+
+cryptotypes.PubKey
+ GetSignature() []byte
+}
+
+ // HasMsgs defines an interface a transaction must fulfill.
+ HasMsgs interface {
+ // GetMsgs gets the all the transaction's messages.
+ GetMsgs() []Msg
+}
+
+ // Tx defines an interface a transaction must fulfill.
+ Tx interface {
+ HasMsgs
+
+ // GetMsgsV2 gets the transaction's messages as google.golang.org/protobuf/proto.Message's.
+ GetMsgsV2() ([]protov2.Message, error)
+}
+
+ // FeeTx defines the interface to be implemented by Tx to use the FeeDecorators
+ FeeTx interface {
+ Tx
+ GetGas()
+
+uint64
+ GetFee()
+
+Coins
+ FeePayer() []byte
+ FeeGranter() []byte
+}
+
+ // TxWithMemo must have GetMemo()
+
+method to use ValidateMemoDecorator
+ TxWithMemo interface {
+ Tx
+ GetMemo()
+
+string
+}
+
+ // TxWithTimeoutTimeStamp extends the Tx interface by allowing a transaction to
+ // set a timeout timestamp.
+ TxWithTimeoutTimeStamp interface {
+ Tx
+
+ GetTimeoutTimeStamp()
+
+time.Time
+}
+
+ // TxWithTimeoutHeight extends the Tx interface by allowing a transaction to
+ // set a height timeout.
+ TxWithTimeoutHeight interface {
+ Tx
+
+ GetTimeoutHeight()
+
+uint64
+}
+
+ // TxWithUnordered extends the Tx interface by allowing a transaction to set
+ // the unordered field, which implicitly relies on TxWithTimeoutTimeStamp.
+ TxWithUnordered interface {
+ TxWithTimeoutTimeStamp
+
+ GetUnordered()
+
+bool
+}
+
+ // HasValidateBasic defines a type that has a ValidateBasic method.
+ // ValidateBasic is deprecated and now optional.
+ // Prefer validating messages directly in the msg server.
+ HasValidateBasic interface {
+ // ValidateBasic does a simple validation check that
+ // doesn't require access to any other information.
+ ValidateBasic()
+
+error
+}
+)
+
+// TxDecoder unmarshals transaction bytes
+type TxDecoder func(txBytes []byte) (Tx, error)
+
+// TxEncoder marshals transaction to bytes
+type TxEncoder func(tx Tx) ([]byte, error)
+
+// MsgTypeURL returns the TypeURL of a `sdk.Msg`.
+var MsgTypeURL = codectypes.MsgTypeURL
+
+// GetMsgFromTypeURL returns a `sdk.Msg` message type from a type URL
+func GetMsgFromTypeURL(cdc codec.Codec, input string) (Msg, error) {
+ var msg Msg
+ bz, err := json.Marshal(struct {
+ Type string `json:"@type"`
+}{
+ Type: input,
+})
+ if err != nil {
+ return nil, err
+}
+ if err := cdc.UnmarshalInterfaceJSON(bz, &msg); err != nil {
+ return nil, fmt.Errorf("failed to determine sdk.Msg for %s URL : %w", input, err)
+}
+
+return msg, nil
+}
+
+// GetModuleNameFromTypeURL assumes that module name is the second element of the msg type URL
+// e.g. "cosmos.bank.v1beta1.MsgSend" => "bank"
+// It returns an empty string if the input is not a valid type URL
+func GetModuleNameFromTypeURL(input string)
+
+string {
+ moduleName := strings.Split(input, ".")
+ if len(moduleName) > 1 {
+ return moduleName[1]
+}
+
+return ""
+}
+```
+
+A standard implementation of both these objects can be found in the [`auth/tx` module](/sdk/v0.53/build/modules/auth/tx):
+
+```go expandable
+package tx
+
+import (
+
+ "fmt"
+ "google.golang.org/protobuf/encoding/protowire"
+
+ errorsmod "cosmossdk.io/errors"
+ "github.com/cosmos/cosmos-sdk/codec"
+ "github.com/cosmos/cosmos-sdk/codec/unknownproto"
+ sdk "github.com/cosmos/cosmos-sdk/types"
+ sdkerrors "github.com/cosmos/cosmos-sdk/types/errors"
+ "github.com/cosmos/cosmos-sdk/types/tx"
+)
+
+// DefaultTxDecoder returns a default protobuf TxDecoder using the provided Marshaler.
+func DefaultTxDecoder(cdc codec.Codec)
+
+sdk.TxDecoder {
+ return func(txBytes []byte) (sdk.Tx, error) {
+ // Make sure txBytes follow ADR-027.
+ err := rejectNonADR027TxRaw(txBytes)
+ if err != nil {
+ return nil, errorsmod.Wrap(sdkerrors.ErrTxDecode, err.Error())
+}
+
+var raw tx.TxRaw
+
+ // reject all unknown proto fields in the root TxRaw
+ err = unknownproto.RejectUnknownFieldsStrict(txBytes, &raw, cdc.InterfaceRegistry())
+ if err != nil {
+ return nil, errorsmod.Wrap(sdkerrors.ErrTxDecode, err.Error())
+}
+
+err = cdc.Unmarshal(txBytes, &raw)
+ if err != nil {
+ return nil, err
+}
+
+var body tx.TxBody
+
+ // allow non-critical unknown fields in TxBody
+ txBodyHasUnknownNonCriticals, err := unknownproto.RejectUnknownFields(raw.BodyBytes, &body, true, cdc.InterfaceRegistry())
+ if err != nil {
+ return nil, errorsmod.Wrap(sdkerrors.ErrTxDecode, err.Error())
+}
+
+err = cdc.Unmarshal(raw.BodyBytes, &body)
+ if err != nil {
+ return nil, errorsmod.Wrap(sdkerrors.ErrTxDecode, err.Error())
+}
+
+var authInfo tx.AuthInfo
+
+ // reject all unknown proto fields in AuthInfo
+ err = unknownproto.RejectUnknownFieldsStrict(raw.AuthInfoBytes, &authInfo, cdc.InterfaceRegistry())
+ if err != nil {
+ return nil, errorsmod.Wrap(sdkerrors.ErrTxDecode, err.Error())
+}
+
+err = cdc.Unmarshal(raw.AuthInfoBytes, &authInfo)
+ if err != nil {
+ return nil, errorsmod.Wrap(sdkerrors.ErrTxDecode, err.Error())
+}
+ theTx := &tx.Tx{
+ Body: &body,
+ AuthInfo: &authInfo,
+ Signatures: raw.Signatures,
+}
+
+return &wrapper{
+ tx: theTx,
+ bodyBz: raw.BodyBytes,
+ authInfoBz: raw.AuthInfoBytes,
+ txBodyHasUnknownNonCriticals: txBodyHasUnknownNonCriticals,
+ cdc: cdc,
+}, nil
+}
+}
+
+// DefaultJSONTxDecoder returns a default protobuf JSON TxDecoder using the provided Marshaler.
+func DefaultJSONTxDecoder(cdc codec.Codec)
+
+sdk.TxDecoder {
+ return func(txBytes []byte) (sdk.Tx, error) {
+ var theTx tx.Tx
+ err := cdc.UnmarshalJSON(txBytes, &theTx)
+ if err != nil {
+ return nil, errorsmod.Wrap(sdkerrors.ErrTxDecode, err.Error())
+}
+
+return &wrapper{
+ tx: &theTx,
+ cdc: cdc,
+}, nil
+}
+}
+
+// rejectNonADR027TxRaw rejects txBytes that do not follow ADR-027. This is NOT
+// a generic ADR-027 checker, it only applies to decoding TxRaw. Specifically, it
+// only checks that:
+// - field numbers are in ascending order (1, 2, and potentially multiple 3s),
+// - and varints are as short as possible.
+// All other ADR-027 edge cases (e.g. default values) are not applicable with
+// TxRaw.
+func rejectNonADR027TxRaw(txBytes []byte)
+
+error {
+ // Make sure all fields are ordered in ascending order with this variable.
+ prevTagNum := protowire.Number(0)
+ for len(txBytes) > 0 {
+ tagNum, wireType, m := protowire.ConsumeTag(txBytes)
+ if m < 0 {
+ return fmt.Errorf("invalid length; %w", protowire.ParseError(m))
+}
+ // TxRaw only has bytes fields.
+ if wireType != protowire.BytesType {
+ return fmt.Errorf("expected %d wire type, got %d", protowire.BytesType, wireType)
+}
+ // Make sure fields are ordered in ascending order.
+ if tagNum < prevTagNum {
+ return fmt.Errorf("txRaw must follow ADR-027, got tagNum %d after tagNum %d", tagNum, prevTagNum)
+}
+
+prevTagNum = tagNum
+
+ // All 3 fields of TxRaw have wireType == 2, so their next component
+ // is a varint, so we can safely call ConsumeVarint here.
+ // Byte structure:
+ // Inner fields are verified in `DefaultTxDecoder`
+ lengthPrefix, m := protowire.ConsumeVarint(txBytes[m:])
+ if m < 0 {
+ return fmt.Errorf("invalid length; %w", protowire.ParseError(m))
+}
+ // We make sure that this varint is as short as possible.
+ n := varintMinLength(lengthPrefix)
+ if n != m {
+ return fmt.Errorf("length prefix varint for tagNum %d is not as short as possible, read %d, only need %d", tagNum, m, n)
+}
+
+ // Skip over the bytes that store fieldNumber and wireType bytes.
+ _, _, m = protowire.ConsumeField(txBytes)
+ if m < 0 {
+ return fmt.Errorf("invalid length; %w", protowire.ParseError(m))
+}
+
+txBytes = txBytes[m:]
+}
+
+return nil
+}
+
+// varintMinLength returns the minimum number of bytes necessary to encode an
+// uint using varint encoding.
+func varintMinLength(n uint64)
+
+int {
+ switch {
+ // Note: 1< valz[j].ConsensusPower(r)
+}
+
+func (valz ValidatorsByVotingPower)
+
+Swap(i, j int) {
+ valz[i], valz[j] = valz[j], valz[i]
+}
+
+// UnpackInterfaces implements UnpackInterfacesMessage.UnpackInterfaces
+func (v Validators)
+
+UnpackInterfaces(c codectypes.AnyUnpacker)
+
+error {
+ for i := range v.Validators {
+ if err := v.Validators[i].UnpackInterfaces(c); err != nil {
+ return err
+}
+
+}
+
+return nil
+}
+
+// return the redelegation
+func MustMarshalValidator(cdc codec.BinaryCodec, validator *Validator) []byte {
+ return cdc.MustMarshal(validator)
+}
+
+// unmarshal a redelegation from a store value
+func MustUnmarshalValidator(cdc codec.BinaryCodec, value []byte)
+
+Validator {
+ validator, err := UnmarshalValidator(cdc, value)
+ if err != nil {
+ panic(err)
+}
+
+return validator
+}
+
+// unmarshal a redelegation from a store value
+func UnmarshalValidator(cdc codec.BinaryCodec, value []byte) (v Validator, err error) {
+ err = cdc.Unmarshal(value, &v)
+
+return v, err
+}
+
+// IsBonded checks if the validator status equals Bonded
+func (v Validator)
+
+IsBonded()
+
+bool {
+ return v.GetStatus() == Bonded
+}
+
+// IsUnbonded checks if the validator status equals Unbonded
+func (v Validator)
+
+IsUnbonded()
+
+bool {
+ return v.GetStatus() == Unbonded
+}
+
+// IsUnbonding checks if the validator status equals Unbonding
+func (v Validator)
+
+IsUnbonding()
+
+bool {
+ return v.GetStatus() == Unbonding
+}
+
+// constant used in flags to indicate that description field should not be updated
+const DoNotModifyDesc = "[do-not-modify]"
+
+func NewDescription(moniker, identity, website, securityContact, details string)
+
+Description {
+ return Description{
+ Moniker: moniker,
+ Identity: identity,
+ Website: website,
+ SecurityContact: securityContact,
+ Details: details,
+}
+}
+
+// UpdateDescription updates the fields of a given description. An error is
+// returned if the resulting description contains an invalid length.
+func (d Description)
+
+UpdateDescription(d2 Description) (Description, error) {
+ if d2.Moniker == DoNotModifyDesc {
+ d2.Moniker = d.Moniker
+}
+ if d2.Identity == DoNotModifyDesc {
+ d2.Identity = d.Identity
+}
+ if d2.Website == DoNotModifyDesc {
+ d2.Website = d.Website
+}
+ if d2.SecurityContact == DoNotModifyDesc {
+ d2.SecurityContact = d.SecurityContact
+}
+ if d2.Details == DoNotModifyDesc {
+ d2.Details = d.Details
+}
+
+return NewDescription(
+ d2.Moniker,
+ d2.Identity,
+ d2.Website,
+ d2.SecurityContact,
+ d2.Details,
+ ).EnsureLength()
+}
+
+// EnsureLength ensures the length of a validator's description.
+func (d Description)
+
+EnsureLength() (Description, error) {
+ if len(d.Moniker) > MaxMonikerLength {
+ return d, errors.Wrapf(sdkerrors.ErrInvalidRequest, "invalid moniker length; got: %d, max: %d", len(d.Moniker), MaxMonikerLength)
+}
+ if len(d.Identity) > MaxIdentityLength {
+ return d, errors.Wrapf(sdkerrors.ErrInvalidRequest, "invalid identity length; got: %d, max: %d", len(d.Identity), MaxIdentityLength)
+}
+ if len(d.Website) > MaxWebsiteLength {
+ return d, errors.Wrapf(sdkerrors.ErrInvalidRequest, "invalid website length; got: %d, max: %d", len(d.Website), MaxWebsiteLength)
+}
+ if len(d.SecurityContact) > MaxSecurityContactLength {
+ return d, errors.Wrapf(sdkerrors.ErrInvalidRequest, "invalid security contact length; got: %d, max: %d", len(d.SecurityContact), MaxSecurityContactLength)
+}
+ if len(d.Details) > MaxDetailsLength {
+ return d, errors.Wrapf(sdkerrors.ErrInvalidRequest, "invalid details length; got: %d, max: %d", len(d.Details), MaxDetailsLength)
+}
+
+return d, nil
+}
+
+// ABCIValidatorUpdate returns an abci.ValidatorUpdate from a staking validator type
+// with the full validator power
+func (v Validator)
+
+ABCIValidatorUpdate(r math.Int)
+
+abci.ValidatorUpdate {
+ tmProtoPk, err := v.TmConsPublicKey()
+ if err != nil {
+ panic(err)
+}
+
+return abci.ValidatorUpdate{
+ PubKey: tmProtoPk,
+ Power: v.ConsensusPower(r),
+}
+}
+
+// ABCIValidatorUpdateZero returns an abci.ValidatorUpdate from a staking validator type
+// with zero power used for validator updates.
+func (v Validator)
+
+ABCIValidatorUpdateZero()
+
+abci.ValidatorUpdate {
+ tmProtoPk, err := v.TmConsPublicKey()
+ if err != nil {
+ panic(err)
+}
+
+return abci.ValidatorUpdate{
+ PubKey: tmProtoPk,
+ Power: 0,
+}
+}
+
+// SetInitialCommission attempts to set a validator's initial commission. An
+// error is returned if the commission is invalid.
+func (v Validator)
+
+SetInitialCommission(commission Commission) (Validator, error) {
+ if err := commission.Validate(); err != nil {
+ return v, err
+}
+
+v.Commission = commission
+
+ return v, nil
+}
+
+// In some situations, the exchange rate becomes invalid, e.g. if
+// Validator loses all tokens due to slashing. In this case,
+// make all future delegations invalid.
+func (v Validator)
+
+InvalidExRate()
+
+bool {
+ return v.Tokens.IsZero() && v.DelegatorShares.IsPositive()
+}
+
+// calculate the token worth of provided shares
+func (v Validator)
+
+TokensFromShares(shares math.LegacyDec)
+
+math.LegacyDec {
+ return (shares.MulInt(v.Tokens)).Quo(v.DelegatorShares)
+}
+
+// calculate the token worth of provided shares, truncated
+func (v Validator)
+
+TokensFromSharesTruncated(shares math.LegacyDec)
+
+math.LegacyDec {
+ return (shares.MulInt(v.Tokens)).QuoTruncate(v.DelegatorShares)
+}
+
+// TokensFromSharesRoundUp returns the token worth of provided shares, rounded
+// up.
+func (v Validator)
+
+TokensFromSharesRoundUp(shares math.LegacyDec)
+
+math.LegacyDec {
+ return (shares.MulInt(v.Tokens)).QuoRoundUp(v.DelegatorShares)
+}
+
+// SharesFromTokens returns the shares of a delegation given a bond amount. It
+// returns an error if the validator has no tokens.
+func (v Validator)
+
+SharesFromTokens(amt math.Int) (math.LegacyDec, error) {
+ if v.Tokens.IsZero() {
+ return math.LegacyZeroDec(), ErrInsufficientShares
+}
+
+return v.GetDelegatorShares().MulInt(amt).QuoInt(v.GetTokens()), nil
+}
+
+// SharesFromTokensTruncated returns the truncated shares of a delegation given
+// a bond amount. It returns an error if the validator has no tokens.
+func (v Validator)
+
+SharesFromTokensTruncated(amt math.Int) (math.LegacyDec, error) {
+ if v.Tokens.IsZero() {
+ return math.LegacyZeroDec(), ErrInsufficientShares
+}
+
+return v.GetDelegatorShares().MulInt(amt).QuoTruncate(math.LegacyNewDecFromInt(v.GetTokens())), nil
+}
+
+// get the bonded tokens which the validator holds
+func (v Validator)
+
+BondedTokens()
+
+math.Int {
+ if v.IsBonded() {
+ return v.Tokens
+}
+
+return math.ZeroInt()
+}
+
+// ConsensusPower gets the consensus-engine power. A reduction of 10^6 from
+// validator tokens is applied
+func (v Validator)
+
+ConsensusPower(r math.Int)
+
+int64 {
+ if v.IsBonded() {
+ return v.PotentialConsensusPower(r)
+}
+
+return 0
+}
+
+// PotentialConsensusPower returns the potential consensus-engine power.
+func (v Validator)
+
+PotentialConsensusPower(r math.Int)
+
+int64 {
+ return sdk.TokensToConsensusPower(v.Tokens, r)
+}
+
+// UpdateStatus updates the location of the shares within a validator
+// to reflect the new status
+func (v Validator)
+
+UpdateStatus(newStatus BondStatus)
+
+Validator {
+ v.Status = newStatus
+ return v
+}
+
+// AddTokensFromDel adds tokens to a validator
+func (v Validator)
+
+AddTokensFromDel(amount math.Int) (Validator, math.LegacyDec) {
+ // calculate the shares to issue
+ var issuedShares math.LegacyDec
+ if v.DelegatorShares.IsZero() {
+ // the first delegation to a validator sets the exchange rate to one
+ issuedShares = math.LegacyNewDecFromInt(amount)
+}
+
+else {
+ shares, err := v.SharesFromTokens(amount)
+ if err != nil {
+ panic(err)
+}
+
+issuedShares = shares
+}
+
+v.Tokens = v.Tokens.Add(amount)
+
+v.DelegatorShares = v.DelegatorShares.Add(issuedShares)
+
+return v, issuedShares
+}
+
+// RemoveTokens removes tokens from a validator
+func (v Validator)
+
+RemoveTokens(tokens math.Int)
+
+Validator {
+ if tokens.IsNegative() {
+ panic(fmt.Sprintf("should not happen: trying to remove negative tokens %v", tokens))
+}
+ if v.Tokens.LT(tokens) {
+ panic(fmt.Sprintf("should not happen: only have %v tokens, trying to remove %v", v.Tokens, tokens))
+}
+
+v.Tokens = v.Tokens.Sub(tokens)
+
+return v
+}
+
+// RemoveDelShares removes delegator shares from a validator.
+// NOTE: because token fractions are left in the validator,
+// the exchange rate of future shares of this validator can increase.
+func (v Validator)
+
+RemoveDelShares(delShares math.LegacyDec) (Validator, math.Int) {
+ remainingShares := v.DelegatorShares.Sub(delShares)
+
+var issuedTokens math.Int
+ if remainingShares.IsZero() {
+ // last delegation share gets any trimmings
+ issuedTokens = v.Tokens
+ v.Tokens = math.ZeroInt()
+}
+
+else {
+ // leave excess tokens in the validator
+ // however fully use all the delegator shares
+ issuedTokens = v.TokensFromShares(delShares).TruncateInt()
+
+v.Tokens = v.Tokens.Sub(issuedTokens)
+ if v.Tokens.IsNegative() {
+ panic("attempting to remove more tokens than available in validator")
+}
+
+}
+
+v.DelegatorShares = remainingShares
+
+ return v, issuedTokens
+}
+
+// MinEqual defines a more minimum set of equality conditions when comparing two
+// validators.
+func (v *Validator)
+
+MinEqual(other *Validator)
+
+bool {
+ return v.OperatorAddress == other.OperatorAddress &&
+ v.Status == other.Status &&
+ v.Tokens.Equal(other.Tokens) &&
+ v.DelegatorShares.Equal(other.DelegatorShares) &&
+ v.Description.Equal(other.Description) &&
+ v.Commission.Equal(other.Commission) &&
+ v.Jailed == other.Jailed &&
+ v.MinSelfDelegation.Equal(other.MinSelfDelegation) &&
+ v.ConsensusPubkey.Equal(other.ConsensusPubkey)
+}
+
+// Equal checks if the receiver equals the parameter
+func (v *Validator)
+
+Equal(v2 *Validator)
+
+bool {
+ return v.MinEqual(v2) &&
+ v.UnbondingHeight == v2.UnbondingHeight &&
+ v.UnbondingTime.Equal(v2.UnbondingTime)
+}
+
+func (v Validator)
+
+IsJailed()
+
+bool {
+ return v.Jailed
+}
+
+func (v Validator)
+
+GetMoniker()
+
+string {
+ return v.Description.Moniker
+}
+
+func (v Validator)
+
+GetStatus()
+
+BondStatus {
+ return v.Status
+}
+
+func (v Validator)
+
+GetOperator()
+
+string {
+ return v.OperatorAddress
+}
+
+// ConsPubKey returns the validator PubKey as a cryptotypes.PubKey.
+func (v Validator)
+
+ConsPubKey() (cryptotypes.PubKey, error) {
+ pk, ok := v.ConsensusPubkey.GetCachedValue().(cryptotypes.PubKey)
+ if !ok {
+ return nil, errors.Wrapf(sdkerrors.ErrInvalidType, "expecting cryptotypes.PubKey, got %T", pk)
+}
+
+return pk, nil
+}
+
+// Deprecated: use CmtConsPublicKey instead
+func (v Validator)
+
+TmConsPublicKey() (cmtprotocrypto.PublicKey, error) {
+ return v.CmtConsPublicKey()
+}
+
+// CmtConsPublicKey casts Validator.ConsensusPubkey to cmtprotocrypto.PubKey.
+func (v Validator)
+
+CmtConsPublicKey() (cmtprotocrypto.PublicKey, error) {
+ pk, err := v.ConsPubKey()
+ if err != nil {
+ return cmtprotocrypto.PublicKey{
+}, err
+}
+
+tmPk, err := cryptocodec.ToCmtProtoPublicKey(pk)
+ if err != nil {
+ return cmtprotocrypto.PublicKey{
+}, err
+}
+
+return tmPk, nil
+}
+
+// GetConsAddr extracts Consensus key address
+func (v Validator)
+
+GetConsAddr() ([]byte, error) {
+ pk, ok := v.ConsensusPubkey.GetCachedValue().(cryptotypes.PubKey)
+ if !ok {
+ return nil, errors.Wrapf(sdkerrors.ErrInvalidType, "expecting cryptotypes.PubKey, got %T", pk)
+}
+
+return pk.Address().Bytes(), nil
+}
+
+func (v Validator)
+
+GetTokens()
+
+math.Int {
+ return v.Tokens
+}
+
+func (v Validator)
+
+GetBondedTokens()
+
+math.Int {
+ return v.BondedTokens()
+}
+
+func (v Validator)
+
+GetConsensusPower(r math.Int)
+
+int64 {
+ return v.ConsensusPower(r)
+}
+
+func (v Validator)
+
+GetCommission()
+
+math.LegacyDec {
+ return v.Commission.Rate
+}
+
+func (v Validator)
+
+GetMinSelfDelegation()
+
+math.Int {
+ return v.MinSelfDelegation
+}
+
+func (v Validator)
+
+GetDelegatorShares()
+
+math.LegacyDec {
+ return v.DelegatorShares
+}
+
+// UnpackInterfaces implements UnpackInterfacesMessage.UnpackInterfaces
+func (v Validator)
+
+UnpackInterfaces(unpacker codectypes.AnyUnpacker)
+
+error {
+ var pk cryptotypes.PubKey
+ return unpacker.UnpackAny(v.ConsensusPubkey, &pk)
+}
+```
+
+#### `Any`'s TypeURL
+
+When packing a protobuf message inside an `Any`, the message's type is uniquely defined by its type URL, which is the message's fully qualified name prefixed by a `/` (slash) character. In some implementations of `Any`, like the gogoproto one, there's generally [a resolvable prefix, e.g. `type.googleapis.com`](https://github.com/gogo/protobuf/blob/b03c65ea87cdc3521ede29f62fe3ce239267c1bc/protobuf/google/protobuf/any.proto#L87-L91). However, in the Cosmos SDK, we made the decision not to include such a prefix, to have shorter type URLs. The Cosmos SDK's own `Any` implementation can be found in `github.com/cosmos/cosmos-sdk/codec/types`.
+
+The Cosmos SDK is also transitioning away from gogoproto to the official `google.golang.org/protobuf` (known as the Protobuf API v2). Its default `Any` implementation also contains the [`type.googleapis.com`](https://github.com/protocolbuffers/protobuf-go/blob/v1.28.1/types/known/anypb/any.pb.go#L266) prefix. To maintain compatibility with the SDK, the following methods from `"google.golang.org/protobuf/types/known/anypb"` should not be used:
+
+* `anypb.New`
+* `anypb.MarshalFrom`
+* `anypb.Any#MarshalFrom`
+
+Instead, the Cosmos SDK provides helper functions in `"github.com/cosmos/cosmos-proto/anyutil"`, which create an official `anypb.Any` without inserting the prefixes:
+
+* `anyutil.New`
+* `anyutil.MarshalFrom`
+
+For example, to pack a `sdk.Msg` called `internalMsg`, use:
+
+```diff
+import (
+- "google.golang.org/protobuf/types/known/anypb"
++ "github.com/cosmos/cosmos-proto/anyutil"
+)
+
+- anyMsg, err := anypb.New(internalMsg.Message().Interface())
++ anyMsg, err := anyutil.New(internalMsg.Message().Interface())
+
+- fmt.Println(anyMsg.TypeURL) // type.googleapis.com/cosmos.bank.v1beta1.MsgSend
++ fmt.Println(anyMsg.TypeURL) // /cosmos.bank.v1beta1.MsgSend
+```
+
+## FAQ
+
+### How to create modules using protobuf encoding
+
+#### Defining module types
+
+Protobuf types can be defined to encode:
+
+* state
+* [`Msg`s](/sdk/v0.53/build/building-modules/messages-and-queries#messages)
+* [Query services](/sdk/v0.53/build/building-modules/query-services)
+* [genesis](/sdk/v0.53/build/building-modules/genesis)
+
+#### Naming and conventions
+
+We encourage developers to follow industry guidelines: [Protocol Buffers style guide](https://developers.google.com/protocol-buffers/docs/style)
+and [Buf](https://buf.build/docs/style-guide). See more details in [ADR 023](https://github.com/cosmos/cosmos-sdk/blob/release/v0.53.x/docs/architecture/adr-023-protobuf-naming.md).
+
+### How to update modules to protobuf encoding
+
+If modules do not contain any interfaces (e.g. `Account` or `Content`), then they
+may simply migrate any existing types that are encoded and persisted via their concrete Amino codec to Protobuf (see 1. for further guidelines) and accept a `Marshaler` as the codec, which is implemented via the `ProtoCodec` without any further customization.
+
+However, if a module type composes an interface, it must wrap it in the `sdk.Any` (from the `/types` package) type. To do that, a module-level `.proto` file must use [`google.protobuf.Any`](https://github.com/protocolbuffers/protobuf/blob/master/src/google/protobuf/any.proto) for respective message type interface types.
+
+For example, the `x/evidence` module defines an `Evidence` interface, which is used by `MsgSubmitEvidence`. The structure definition must use `sdk.Any` to wrap the evidence file. In the proto file we define it as follows:
+
+```protobuf
+// proto/cosmos/evidence/v1beta1/tx.proto
+
+message MsgSubmitEvidence {
+ string submitter = 1;
+ google.protobuf.Any evidence = 2 [(cosmos_proto.accepts_interface) = "cosmos.evidence.v1beta1.Evidence"];
+}
+```
+
+The Cosmos SDK `codec.Codec` interface provides support methods `MarshalInterface` and `UnmarshalInterface` to easily encode state to `Any`.
+
+Modules should register interfaces using `InterfaceRegistry`, which provides a mechanism for registering interfaces: `RegisterInterface(protoName string, iface interface{}, impls ...proto.Message)` and implementations: `RegisterImplementations(iface interface{}, impls ...proto.Message)` that can be safely unpacked from `Any`, similarly to type registration with Amino:
+
+```go expandable
+package types
+
+import (
+
+ "errors"
+ "fmt"
+ "reflect"
+ "github.com/cosmos/gogoproto/jsonpb"
+ "github.com/cosmos/gogoproto/proto"
+ "google.golang.org/protobuf/reflect/protodesc"
+ "google.golang.org/protobuf/reflect/protoreflect"
+ "cosmossdk.io/x/tx/signing"
+)
+
+var (
+
+ // MaxUnpackAnySubCalls extension point that defines the maximum number of sub-calls allowed during the unpacking
+ // process of protobuf Any messages.
+ MaxUnpackAnySubCalls = 100
+
+ // MaxUnpackAnyRecursionDepth extension point that defines the maximum allowed recursion depth during protobuf Any
+ // message unpacking.
+ MaxUnpackAnyRecursionDepth = 10
+)
+
+// AnyUnpacker is an interface which allows safely unpacking types packed
+// in Any's against a whitelist of registered types
+type AnyUnpacker interface {
+ // UnpackAny unpacks the value in any to the interface pointer passed in as
+ // iface. Note that the type in any must have been registered in the
+ // underlying whitelist registry as a concrete type for that interface
+ // Ex:
+ // var msg sdk.Msg
+ // err := cdc.UnpackAny(any, &msg)
+ // ...
+ UnpackAny(any *Any, iface interface{
+})
+
+error
+}
+
+// InterfaceRegistry provides a mechanism for registering interfaces and
+// implementations that can be safely unpacked from Any
+type InterfaceRegistry interface {
+ AnyUnpacker
+ jsonpb.AnyResolver
+
+ // RegisterInterface associates protoName as the public name for the
+ // interface passed in as iface. This is to be used primarily to create
+ // a public facing registry of interface implementations for clients.
+ // protoName should be a well-chosen public facing name that remains stable.
+ // RegisterInterface takes an optional list of impls to be registered
+ // as implementations of iface.
+ //
+ // Ex:
+ // registry.RegisterInterface("cosmos.base.v1beta1.Msg", (*sdk.Msg)(nil))
+
+RegisterInterface(protoName string, iface interface{
+}, impls ...proto.Message)
+
+ // RegisterImplementations registers impls as concrete implementations of
+ // the interface iface.
+ //
+ // Ex:
+ // registry.RegisterImplementations((*sdk.Msg)(nil), &MsgSend{
+}, &MsgMultiSend{
+})
+
+RegisterImplementations(iface interface{
+}, impls ...proto.Message)
+
+ // ListAllInterfaces list the type URLs of all registered interfaces.
+ ListAllInterfaces() []string
+
+ // ListImplementations lists the valid type URLs for the given interface name that can be used
+ // for the provided interface type URL.
+ ListImplementations(ifaceTypeURL string) []string
+
+ // EnsureRegistered ensures there is a registered interface for the given concrete type.
+ EnsureRegistered(iface interface{
+})
+
+error
+
+ protodesc.Resolver
+
+ // RangeFiles iterates over all registered files and calls f on each one. This
+ // implements the part of protoregistry.Files that is needed for reflecting over
+ // the entire FileDescriptorSet.
+ RangeFiles(f func(protoreflect.FileDescriptor)
+
+bool)
+
+SigningContext() *signing.Context
+
+ // mustEmbedInterfaceRegistry requires that all implementations of InterfaceRegistry embed an official implementation
+ // from this package. This allows new methods to be added to the InterfaceRegistry interface without breaking
+ // backwards compatibility.
+ mustEmbedInterfaceRegistry()
+}
+
+// UnpackInterfacesMessage is meant to extend protobuf types (which implement
+// proto.Message) to support a post-deserialization phase which unpacks
+// types packed within Any's using the whitelist provided by AnyUnpacker
+type UnpackInterfacesMessage interface {
+ // UnpackInterfaces is implemented in order to unpack values packed within
+ // Any's using the AnyUnpacker. It should generally be implemented as
+ // follows:
+ // func (s *MyStruct)
+
+UnpackInterfaces(unpacker AnyUnpacker)
+
+error {
+ // var x AnyInterface
+ // // where X is an Any field on MyStruct
+ // err := unpacker.UnpackAny(s.X, &x)
+ // if err != nil {
+ // return nil
+ //
+}
+ // // where Y is a field on MyStruct that implements UnpackInterfacesMessage itself
+ // err = s.Y.UnpackInterfaces(unpacker)
+ // if err != nil {
+ // return nil
+ //
+}
+ // return nil
+ //
+}
+
+UnpackInterfaces(unpacker AnyUnpacker)
+
+error
+}
+
+type interfaceRegistry struct {
+ signing.ProtoFileResolver
+ interfaceNames map[string]reflect.Type
+ interfaceImpls map[reflect.Type]interfaceMap
+ implInterfaces map[reflect.Type]reflect.Type
+ typeURLMap map[string]reflect.Type
+ signingCtx *signing.Context
+}
+
+type interfaceMap = map[string]reflect.Type
+
+// NewInterfaceRegistry returns a new InterfaceRegistry
+func NewInterfaceRegistry()
+
+InterfaceRegistry {
+ registry, err := NewInterfaceRegistryWithOptions(InterfaceRegistryOptions{
+ ProtoFiles: proto.HybridResolver,
+ SigningOptions: signing.Options{
+ AddressCodec: failingAddressCodec{
+},
+ ValidatorAddressCodec: failingAddressCodec{
+},
+},
+})
+ if err != nil {
+ panic(err)
+}
+
+return registry
+}
+
+// InterfaceRegistryOptions are options for creating a new InterfaceRegistry.
+type InterfaceRegistryOptions struct {
+ // ProtoFiles is the set of files to use for the registry. It is required.
+ ProtoFiles signing.ProtoFileResolver
+
+ // SigningOptions are the signing options to use for the registry.
+ SigningOptions signing.Options
+}
+
+// NewInterfaceRegistryWithOptions returns a new InterfaceRegistry with the given options.
+func NewInterfaceRegistryWithOptions(options InterfaceRegistryOptions) (InterfaceRegistry, error) {
+ if options.ProtoFiles == nil {
+ return nil, fmt.Errorf("proto files must be provided")
+}
+
+options.SigningOptions.FileResolver = options.ProtoFiles
+ signingCtx, err := signing.NewContext(options.SigningOptions)
+ if err != nil {
+ return nil, err
+}
+
+return &interfaceRegistry{
+ interfaceNames: map[string]reflect.Type{
+},
+ interfaceImpls: map[reflect.Type]interfaceMap{
+},
+ implInterfaces: map[reflect.Type]reflect.Type{
+},
+ typeURLMap: map[string]reflect.Type{
+},
+ ProtoFileResolver: options.ProtoFiles,
+ signingCtx: signingCtx,
+}, nil
+}
+
+func (registry *interfaceRegistry)
+
+RegisterInterface(protoName string, iface interface{
+}, impls ...proto.Message) {
+ typ := reflect.TypeOf(iface)
+ if typ.Elem().Kind() != reflect.Interface {
+ panic(fmt.Errorf("%T is not an interface type", iface))
+}
+
+registry.interfaceNames[protoName] = typ
+ registry.RegisterImplementations(iface, impls...)
+}
+
+// EnsureRegistered ensures there is a registered interface for the given concrete type.
+//
+// Returns an error if not, and nil if so.
+func (registry *interfaceRegistry)
+
+EnsureRegistered(impl interface{
+})
+
+error {
+ if reflect.ValueOf(impl).Kind() != reflect.Ptr {
+ return fmt.Errorf("%T is not a pointer", impl)
+}
+ if _, found := registry.implInterfaces[reflect.TypeOf(impl)]; !found {
+ return fmt.Errorf("%T does not have a registered interface", impl)
+}
+
+return nil
+}
+
+// RegisterImplementations registers a concrete proto Message which implements
+// the given interface.
+//
+// This function PANICs if different concrete types are registered under the
+// same typeURL.
+func (registry *interfaceRegistry)
+
+RegisterImplementations(iface interface{
+}, impls ...proto.Message) {
+ for _, impl := range impls {
+ typeURL := MsgTypeURL(impl)
+
+registry.registerImpl(iface, typeURL, impl)
+}
+}
+
+// RegisterCustomTypeURL registers a concrete type which implements the given
+// interface under `typeURL`.
+//
+// This function PANICs if different concrete types are registered under the
+// same typeURL.
+func (registry *interfaceRegistry)
+
+RegisterCustomTypeURL(iface interface{
+}, typeURL string, impl proto.Message) {
+ registry.registerImpl(iface, typeURL, impl)
+}
+
+// registerImpl registers a concrete type which implements the given
+// interface under `typeURL`.
+//
+// This function PANICs if different concrete types are registered under the
+// same typeURL.
+func (registry *interfaceRegistry)
+
+registerImpl(iface interface{
+}, typeURL string, impl proto.Message) {
+ ityp := reflect.TypeOf(iface).Elem()
+
+imap, found := registry.interfaceImpls[ityp]
+ if !found {
+ imap = map[string]reflect.Type{
+}
+
+}
+ implType := reflect.TypeOf(impl)
+ if !implType.AssignableTo(ityp) {
+ panic(fmt.Errorf("type %T doesn't actually implement interface %+v", impl, ityp))
+}
+
+ // Check if we already registered something under the given typeURL. It's
+ // okay to register the same concrete type again, but if we are registering
+ // a new concrete type under the same typeURL, then we throw an error (here,
+ // we panic).
+ foundImplType, found := imap[typeURL]
+ if found && foundImplType != implType {
+ panic(
+ fmt.Errorf(
+ "concrete type %s has already been registered under typeURL %s, cannot register %s under same typeURL. "+
+ "This usually means that there are conflicting modules registering different concrete types "+
+ "for a same interface implementation",
+ foundImplType,
+ typeURL,
+ implType,
+ ),
+ )
+}
+
+imap[typeURL] = implType
+ registry.typeURLMap[typeURL] = implType
+ registry.implInterfaces[implType] = ityp
+ registry.interfaceImpls[ityp] = imap
+}
+
+func (registry *interfaceRegistry)
+
+ListAllInterfaces() []string {
+ interfaceNames := registry.interfaceNames
+ keys := make([]string, 0, len(interfaceNames))
+ for key := range interfaceNames {
+ keys = append(keys, key)
+}
+
+return keys
+}
+
+func (registry *interfaceRegistry)
+
+ListImplementations(ifaceName string) []string {
+ typ, ok := registry.interfaceNames[ifaceName]
+ if !ok {
+ return []string{
+}
+
+}
+
+impls, ok := registry.interfaceImpls[typ.Elem()]
+ if !ok {
+ return []string{
+}
+
+}
+ keys := make([]string, 0, len(impls))
+ for key := range impls {
+ keys = append(keys, key)
+}
+
+return keys
+}
+
+func (registry *interfaceRegistry)
+
+UnpackAny(any *Any, iface interface{
+})
+
+error {
+ unpacker := &statefulUnpacker{
+ registry: registry,
+ maxDepth: MaxUnpackAnyRecursionDepth,
+ maxCalls: &sharedCounter{
+ count: MaxUnpackAnySubCalls
+},
+}
+
+return unpacker.UnpackAny(any, iface)
+}
+
+// sharedCounter is a type that encapsulates a counter value
+type sharedCounter struct {
+ count int
+}
+
+// statefulUnpacker is a struct that helps in deserializing and unpacking
+// protobuf Any messages while maintaining certain stateful constraints.
+type statefulUnpacker struct {
+ registry *interfaceRegistry
+ maxDepth int
+ maxCalls *sharedCounter
+}
+
+// cloneForRecursion returns a new statefulUnpacker instance with maxDepth reduced by one, preserving the registry and maxCalls.
+func (r statefulUnpacker)
+
+cloneForRecursion() *statefulUnpacker {
+ return &statefulUnpacker{
+ registry: r.registry,
+ maxDepth: r.maxDepth - 1,
+ maxCalls: r.maxCalls,
+}
+}
+
+// UnpackAny deserializes a protobuf Any message into the provided interface, ensuring the interface is a pointer.
+// It applies stateful constraints such as max depth and call limits, and unpacks interfaces if required.
+func (r *statefulUnpacker)
+
+UnpackAny(any *Any, iface interface{
+})
+
+error {
+ if r.maxDepth == 0 {
+ return errors.New("max depth exceeded")
+}
+ if r.maxCalls.count == 0 {
+ return errors.New("call limit exceeded")
+}
+ // here we gracefully handle the case in which `any` itself is `nil`, which may occur in message decoding
+ if any == nil {
+ return nil
+}
+ if any.TypeUrl == "" {
+ // if TypeUrl is empty return nil because without it we can't actually unpack anything
+ return nil
+}
+
+r.maxCalls.count--
+ rv := reflect.ValueOf(iface)
+ if rv.Kind() != reflect.Ptr {
+ return fmt.Errorf("UnpackAny expects a pointer")
+}
+ rt := rv.Elem().Type()
+ cachedValue := any.cachedValue
+ if cachedValue != nil {
+ if reflect.TypeOf(cachedValue).AssignableTo(rt) {
+ rv.Elem().Set(reflect.ValueOf(cachedValue))
+
+return nil
+}
+
+}
+
+imap, found := r.registry.interfaceImpls[rt]
+ if !found {
+ return fmt.Errorf("no registered implementations of type %+v", rt)
+}
+
+typ, found := imap[any.TypeUrl]
+ if !found {
+ return fmt.Errorf("no concrete type registered for type URL %s against interface %T", any.TypeUrl, iface)
+}
+
+msg, ok := reflect.New(typ.Elem()).Interface().(proto.Message)
+ if !ok {
+ return fmt.Errorf("can't proto unmarshal %T", msg)
+}
+ err := proto.Unmarshal(any.Value, msg)
+ if err != nil {
+ return err
+}
+
+err = UnpackInterfaces(msg, r.cloneForRecursion())
+ if err != nil {
+ return err
+}
+
+rv.Elem().Set(reflect.ValueOf(msg))
+
+any.cachedValue = msg
+
+ return nil
+}
+
+// Resolve returns the proto message given its typeURL. It works with types
+// registered with RegisterInterface/RegisterImplementations, as well as those
+// registered with RegisterWithCustomTypeURL.
+func (registry *interfaceRegistry)
+
+Resolve(typeURL string) (proto.Message, error) {
+ typ, found := registry.typeURLMap[typeURL]
+ if !found {
+ return nil, fmt.Errorf("unable to resolve type URL %s", typeURL)
+}
+
+msg, ok := reflect.New(typ.Elem()).Interface().(proto.Message)
+ if !ok {
+ return nil, fmt.Errorf("can't resolve type URL %s", typeURL)
+}
+
+return msg, nil
+}
+
+func (registry *interfaceRegistry)
+
+SigningContext() *signing.Context {
+ return registry.signingCtx
+}
+
+func (registry *interfaceRegistry)
+
+mustEmbedInterfaceRegistry() {
+}
+
+// UnpackInterfaces is a convenience function that calls UnpackInterfaces
+// on x if x implements UnpackInterfacesMessage
+func UnpackInterfaces(x interface{
+}, unpacker AnyUnpacker)
+
+error {
+ if msg, ok := x.(UnpackInterfacesMessage); ok {
+ return msg.UnpackInterfaces(unpacker)
+}
+
+return nil
+}
+
+type failingAddressCodec struct{
+}
+
+func (f failingAddressCodec)
+
+StringToBytes(string) ([]byte, error) {
+ return nil, fmt.Errorf("InterfaceRegistry requires a proper address codec implementation to do address conversion")
+}
+
+func (f failingAddressCodec)
+
+BytesToString([]byte) (string, error) {
+ return "", fmt.Errorf("InterfaceRegistry requires a proper address codec implementation to do address conversion")
+}
+```
+
+In addition, an `UnpackInterfaces` phase should be introduced to deserialization to unpack interfaces before they're needed. Protobuf types that contain a protobuf `Any` either directly or via one of their members should implement the `UnpackInterfacesMessage` interface:
+
+```go
+type UnpackInterfacesMessage interface {
+ UnpackInterfaces(InterfaceUnpacker)
+
+error
+}
+```
diff --git a/sdk/next/learn/advanced/events.mdx b/sdk/next/learn/advanced/events.mdx
new file mode 100644
index 000000000..d66616fb7
--- /dev/null
+++ b/sdk/next/learn/advanced/events.mdx
@@ -0,0 +1,2329 @@
+---
+title: Events
+---
+
+
+**Synopsis**
+
+`Event`s are objects that contain information about the execution of the application. They are mainly used by service providers like block explorers and wallets to track the execution of various messages and index transactions.
+
+
+
+**Prerequisite Readings**
+
+* [Anatomy of a Cosmos SDK application](/sdk/v0.53/learn/beginner/app-anatomy)
+* [CometBFT Documentation on Events](/cometbft/v0.38/spec/abci/Overview#events)
+
+
+
+## Events
+
+Events are implemented in the Cosmos SDK as an alias of the ABCI `Event` type and
+take the form of: `{eventType}.{attributeKey}={attributeValue}`.
+
+```protobuf
+// Reference: https://github.com/cometbft/cometbft/blob/v0.37.0/proto/tendermint/abci/types.proto#L334-L343
+```
+
+An Event contains:
+
+* A `type` to categorize the Event at a high level; for example, the Cosmos SDK uses the `"message"` type to filter Events by `Msg`s.
+* A list of `attributes` which are key-value pairs that give more information about the categorized Event. For example, for the `"message"` type, we can filter Events by key-value pairs using `message.action={some_action}`, `message.module={some_module}`, or `message.sender={some_sender}`.
+* A `msg_index` to identify which messages relate to the same transaction.
+
+
+To parse the attribute values as strings, make sure to add `'` (single quotes) around each attribute value.
+
+
+*Typed Events* are Protobuf-defined [messages](/sdk/v0.53/build/architecture/adr-032-typed-events) used by the Cosmos SDK
+for emitting and querying Events. They are defined in an `event.proto` file, on a **per-module basis**, and are read as `proto.Message`.
+*Legacy Events* are defined on a **per-module basis** in the module's `/types/events.go` file.
+They are triggered from the module's Protobuf [`Msg` service](/sdk/v0.53/build/building-modules/msg-services)
+by using the [`EventManager`](#eventmanager).
+
+In addition, each module documents its events in the `Events` section of its specs (x/`{moduleName}`/`README.md`).
+
+Lastly, Events are returned to the underlying consensus engine in the response of the following ABCI messages:
+
+* [`BeginBlock`](/sdk/v0.53/learn/advanced/baseapp#beginblock)
+* [`EndBlock`](/sdk/v0.53/learn/advanced/baseapp#endblock)
+* [`CheckTx`](/sdk/v0.53/learn/advanced/baseapp#checktx)
+* [`Transaction Execution`](/sdk/v0.53/learn/advanced/baseapp#transactionexecution)
+
+### Examples
+
+The following examples show how to query Events using the Cosmos SDK.
+
+| Event | Description |
+| ------------------------------------------------ | ----------------------------------------------------------------------------------------------------------------------------------------------------------- |
+| `tx.height=23` | Query all transactions at height 23 |
+| `message.action='/cosmos.bank.v1beta1.Msg/Send'` | Query all transactions containing an x/bank `Send` [Service `Msg`](/sdk/v0.53/build/building-modules/msg-services). Note the `'`s around the value. |
+| `message.module='bank'` | Query all transactions containing messages from the x/bank module. Note the `'`s around the value. |
+| `create_validator.validator='cosmosval1...'` | x/staking-specific Event, see [x/staking SPEC](/sdk/v0.53/build/modules/staking/README.mdx). |
+
+## EventManager
+
+In Cosmos SDK applications, Events are managed by an abstraction called the `EventManager`.
+Internally, the `EventManager` tracks a list of Events for the entire execution flow of `FinalizeBlock`
+(i.e., transaction execution, `BeginBlock`, `EndBlock`).
+
+```go expandable
+package types
+
+import (
+
+ "encoding/json"
+ "fmt"
+ "maps"
+ "reflect"
+ "slices"
+ "strings"
+
+ abci "github.com/cometbft/cometbft/abci/types"
+ "github.com/cosmos/gogoproto/jsonpb"
+ proto "github.com/cosmos/gogoproto/proto"
+ "github.com/cosmos/cosmos-sdk/codec"
+)
+
+type EventManagerI interface {
+ Events()
+
+Events
+ ABCIEvents() []abci.Event
+ EmitTypedEvent(tev proto.Message)
+
+error
+ EmitTypedEvents(tevs ...proto.Message)
+
+error
+ EmitEvent(event Event)
+
+EmitEvents(events Events)
+}
+
+// ----------------------------------------------------------------------------
+// Event Manager
+// ----------------------------------------------------------------------------
+
+var _ EventManagerI = (*EventManager)(nil)
+
+// EventManager implements a simple wrapper around a slice of Event objects that
+// can be emitted from.
+type EventManager struct {
+ events Events
+}
+
+func NewEventManager() *EventManager {
+ return &EventManager{
+ EmptyEvents()
+}
+}
+
+func (em *EventManager)
+
+Events()
+
+Events {
+ return em.events
+}
+
+// EmitEvent stores a single Event object.
+func (em *EventManager)
+
+EmitEvent(event Event) {
+ em.events = em.events.AppendEvent(event)
+}
+
+// EmitEvents stores a series of Event objects.
+func (em *EventManager)
+
+EmitEvents(events Events) {
+ em.events = em.events.AppendEvents(events)
+}
+
+// ABCIEvents returns all stored Event objects as abci.Event objects.
+func (em EventManager)
+
+ABCIEvents() []abci.Event {
+ return em.events.ToABCIEvents()
+}
+
+// EmitTypedEvent takes typed event and emits converting it into Event
+func (em *EventManager)
+
+EmitTypedEvent(tev proto.Message)
+
+error {
+ event, err := TypedEventToEvent(tev)
+ if err != nil {
+ return err
+}
+
+em.EmitEvent(event)
+
+return nil
+}
+
+// EmitTypedEvents takes series of typed events and emit
+func (em *EventManager)
+
+EmitTypedEvents(tevs ...proto.Message)
+
+error {
+ events := make(Events, len(tevs))
+ for i, tev := range tevs {
+ res, err := TypedEventToEvent(tev)
+ if err != nil {
+ return err
+}
+
+events[i] = res
+}
+
+em.EmitEvents(events)
+
+return nil
+}
+
+// TypedEventToEvent takes typed event and converts to Event object
+func TypedEventToEvent(tev proto.Message) (Event, error) {
+ evtType := proto.MessageName(tev)
+
+evtJSON, err := codec.ProtoMarshalJSON(tev, nil)
+ if err != nil {
+ return Event{
+}, err
+}
+
+var attrMap map[string]json.RawMessage
+ err = json.Unmarshal(evtJSON, &attrMap)
+ if err != nil {
+ return Event{
+}, err
+}
+
+ // sort the keys to ensure the order is always the same
+ keys := slices.Sorted(maps.Keys(attrMap))
+ attrs := make([]abci.EventAttribute, 0, len(attrMap))
+ for _, k := range keys {
+ v := attrMap[k]
+ attrs = append(attrs, abci.EventAttribute{
+ Key: k,
+ Value: string(v),
+})
+}
+
+return Event{
+ Type: evtType,
+ Attributes: attrs,
+}, nil
+}
+
+// ParseTypedEvent converts abci.Event back to a typed event.
+func ParseTypedEvent(event abci.Event) (proto.Message, error) {
+ concreteGoType := proto.MessageType(event.Type)
+ if concreteGoType == nil {
+ return nil, fmt.Errorf("failed to retrieve the message of type %q", event.Type)
+}
+
+var value reflect.Value
+ if concreteGoType.Kind() == reflect.Ptr {
+ value = reflect.New(concreteGoType.Elem())
+}
+
+else {
+ value = reflect.Zero(concreteGoType)
+}
+
+protoMsg, ok := value.Interface().(proto.Message)
+ if !ok {
+ return nil, fmt.Errorf("%q does not implement proto.Message", event.Type)
+}
+ attrMap := make(map[string]json.RawMessage)
+ for _, attr := range event.Attributes {
+ attrMap[attr.Key] = json.RawMessage(attr.Value)
+}
+
+attrBytes, err := json.Marshal(attrMap)
+ if err != nil {
+ return nil, err
+}
+ unmarshaler := jsonpb.Unmarshaler{
+ AllowUnknownFields: true
+}
+ if err := unmarshaler.Unmarshal(strings.NewReader(string(attrBytes)), protoMsg); err != nil {
+ return nil, err
+}
+
+return protoMsg, nil
+}
+
+// ----------------------------------------------------------------------------
+// Events
+// ----------------------------------------------------------------------------
+
+type (
+ // Event is a type alias for an ABCI Event
+ Event abci.Event
+
+ // Events defines a slice of Event objects
+ Events []Event
+)
+
+// NewEvent creates a new Event object with a given type and slice of one or more
+// attributes.
+func NewEvent(ty string, attrs ...Attribute)
+
+Event {
+ e := Event{
+ Type: ty
+}
+ for _, attr := range attrs {
+ e.Attributes = append(e.Attributes, attr.ToKVPair())
+}
+
+return e
+}
+
+// NewAttribute returns a new key/value Attribute object.
+func NewAttribute(k, v string)
+
+Attribute {
+ return Attribute{
+ k, v
+}
+}
+
+// EmptyEvents returns an empty slice of events.
+func EmptyEvents()
+
+Events {
+ return make(Events, 0)
+}
+
+func (a Attribute)
+
+String()
+
+string {
+ return fmt.Sprintf("%s: %s", a.Key, a.Value)
+}
+
+// ToKVPair converts an Attribute object into a CometBFT key/value pair.
+func (a Attribute)
+
+ToKVPair()
+
+abci.EventAttribute {
+ return abci.EventAttribute{
+ Key: a.Key,
+ Value: a.Value
+}
+}
+
+// AppendAttributes adds one or more attributes to an Event.
+func (e Event)
+
+AppendAttributes(attrs ...Attribute)
+
+Event {
+ for _, attr := range attrs {
+ e.Attributes = append(e.Attributes, attr.ToKVPair())
+}
+
+return e
+}
+
+// GetAttribute returns an attribute for a given key present in an event.
+// If the key is not found, the boolean value will be false.
+func (e Event)
+
+GetAttribute(key string) (Attribute, bool) {
+ for _, attr := range e.Attributes {
+ if attr.Key == key {
+ return Attribute{
+ Key: attr.Key,
+ Value: attr.Value
+}, true
+}
+
+}
+
+return Attribute{
+}, false
+}
+
+// AppendEvent adds an Event to a slice of events.
+func (e Events)
+
+AppendEvent(event Event)
+
+Events {
+ return append(e, event)
+}
+
+// AppendEvents adds a slice of Event objects to an exist slice of Event objects.
+func (e Events)
+
+AppendEvents(events Events)
+
+Events {
+ return append(e, events...)
+}
+
+// ToABCIEvents converts a slice of Event objects to a slice of abci.Event
+// objects.
+func (e Events)
+
+ToABCIEvents() []abci.Event {
+ res := make([]abci.Event, len(e))
+ for i, ev := range e {
+ res[i] = abci.Event{
+ Type: ev.Type,
+ Attributes: ev.Attributes
+}
+
+}
+
+return res
+}
+
+// GetAttributes returns all attributes matching a given key present in events.
+// If the key is not found, the boolean value will be false.
+func (e Events)
+
+GetAttributes(key string) ([]Attribute, bool) {
+ attrs := make([]Attribute, 0)
+ for _, event := range e {
+ if attr, found := event.GetAttribute(key); found {
+ attrs = append(attrs, attr)
+}
+
+}
+
+return attrs, len(attrs) > 0
+}
+
+// Common event types and attribute keys
+const (
+ EventTypeTx = "tx"
+
+ AttributeKeyAccountSequence = "acc_seq"
+ AttributeKeySignature = "signature"
+ AttributeKeyFee = "fee"
+ AttributeKeyFeePayer = "fee_payer"
+
+ EventTypeMessage = "message"
+
+ AttributeKeyAction = "action"
+ AttributeKeyModule = "module"
+ AttributeKeySender = "sender"
+ AttributeKeyAmount = "amount"
+)
+
+type (
+ // StringAttributes defines a slice of StringEvents objects.
+ StringEvents []StringEvent
+)
+
+func (se StringEvents)
+
+String()
+
+string {
+ var sb strings.Builder
+ for _, e := range se {
+ fmt.Fprintf(&sb, "\t\t- %s\n", e.Type)
+ for _, attr := range e.Attributes {
+ fmt.Fprintf(&sb, "\t\t\t- %s\n", attr)
+}
+
+}
+
+return strings.TrimRight(sb.String(), "\n")
+}
+
+// StringifyEvent converts an Event object to a StringEvent object.
+func StringifyEvent(e abci.Event)
+
+StringEvent {
+ res := StringEvent{
+ Type: e.Type
+}
+ for _, attr := range e.Attributes {
+ res.Attributes = append(
+ res.Attributes,
+ Attribute{
+ Key: attr.Key,
+ Value: attr.Value
+},
+ )
+}
+
+return res
+}
+
+// StringifyEvents converts a slice of Event objects into a slice of StringEvent
+// objects.
+func StringifyEvents(events []abci.Event)
+
+StringEvents {
+ res := make(StringEvents, 0, len(events))
+ for _, e := range events {
+ res = append(res, StringifyEvent(e))
+}
+
+return res
+}
+
+// MarkEventsToIndex returns the set of ABCI events, where each event's attribute
+// has it's index value marked based on the provided set of events to index.
+func MarkEventsToIndex(events []abci.Event, indexSet map[string]struct{
+}) []abci.Event {
+ indexAll := len(indexSet) == 0
+ updatedEvents := make([]abci.Event, len(events))
+ for i, e := range events {
+ updatedEvent := abci.Event{
+ Type: e.Type,
+ Attributes: make([]abci.EventAttribute, len(e.Attributes)),
+}
+ for j, attr := range e.Attributes {
+ _, index := indexSet[fmt.Sprintf("%s.%s", e.Type, attr.Key)]
+ updatedAttr := abci.EventAttribute{
+ Key: attr.Key,
+ Value: attr.Value,
+ Index: index || indexAll,
+}
+
+updatedEvent.Attributes[j] = updatedAttr
+}
+
+updatedEvents[i] = updatedEvent
+}
+
+return updatedEvents
+}
+```
+
+The `EventManager` comes with a set of useful methods to manage Events. The method
+that is most used by module and application developers is `EmitTypedEvent` or `EmitEvent`, which tracks
+an Event in the `EventManager`.
+
+```go expandable
+package types
+
+import (
+
+ "encoding/json"
+ "fmt"
+ "maps"
+ "reflect"
+ "slices"
+ "strings"
+
+ abci "github.com/cometbft/cometbft/abci/types"
+ "github.com/cosmos/gogoproto/jsonpb"
+ proto "github.com/cosmos/gogoproto/proto"
+ "github.com/cosmos/cosmos-sdk/codec"
+)
+
+type EventManagerI interface {
+ Events()
+
+Events
+ ABCIEvents() []abci.Event
+ EmitTypedEvent(tev proto.Message)
+
+error
+ EmitTypedEvents(tevs ...proto.Message)
+
+error
+ EmitEvent(event Event)
+
+EmitEvents(events Events)
+}
+
+// ----------------------------------------------------------------------------
+// Event Manager
+// ----------------------------------------------------------------------------
+
+var _ EventManagerI = (*EventManager)(nil)
+
+// EventManager implements a simple wrapper around a slice of Event objects that
+// can be emitted from.
+type EventManager struct {
+ events Events
+}
+
+func NewEventManager() *EventManager {
+ return &EventManager{
+ EmptyEvents()
+}
+}
+
+func (em *EventManager)
+
+Events()
+
+Events {
+ return em.events
+}
+
+// EmitEvent stores a single Event object.
+func (em *EventManager)
+
+EmitEvent(event Event) {
+ em.events = em.events.AppendEvent(event)
+}
+
+// EmitEvents stores a series of Event objects.
+func (em *EventManager)
+
+EmitEvents(events Events) {
+ em.events = em.events.AppendEvents(events)
+}
+
+// ABCIEvents returns all stored Event objects as abci.Event objects.
+func (em EventManager)
+
+ABCIEvents() []abci.Event {
+ return em.events.ToABCIEvents()
+}
+
+// EmitTypedEvent takes typed event and emits converting it into Event
+func (em *EventManager)
+
+EmitTypedEvent(tev proto.Message)
+
+error {
+ event, err := TypedEventToEvent(tev)
+ if err != nil {
+ return err
+}
+
+em.EmitEvent(event)
+
+return nil
+}
+
+// EmitTypedEvents takes series of typed events and emit
+func (em *EventManager)
+
+EmitTypedEvents(tevs ...proto.Message)
+
+error {
+ events := make(Events, len(tevs))
+ for i, tev := range tevs {
+ res, err := TypedEventToEvent(tev)
+ if err != nil {
+ return err
+}
+
+events[i] = res
+}
+
+em.EmitEvents(events)
+
+return nil
+}
+
+// TypedEventToEvent takes typed event and converts to Event object
+func TypedEventToEvent(tev proto.Message) (Event, error) {
+ evtType := proto.MessageName(tev)
+
+evtJSON, err := codec.ProtoMarshalJSON(tev, nil)
+ if err != nil {
+ return Event{
+}, err
+}
+
+var attrMap map[string]json.RawMessage
+ err = json.Unmarshal(evtJSON, &attrMap)
+ if err != nil {
+ return Event{
+}, err
+}
+
+ // sort the keys to ensure the order is always the same
+ keys := slices.Sorted(maps.Keys(attrMap))
+ attrs := make([]abci.EventAttribute, 0, len(attrMap))
+ for _, k := range keys {
+ v := attrMap[k]
+ attrs = append(attrs, abci.EventAttribute{
+ Key: k,
+ Value: string(v),
+})
+}
+
+return Event{
+ Type: evtType,
+ Attributes: attrs,
+}, nil
+}
+
+// ParseTypedEvent converts abci.Event back to a typed event.
+func ParseTypedEvent(event abci.Event) (proto.Message, error) {
+ concreteGoType := proto.MessageType(event.Type)
+ if concreteGoType == nil {
+ return nil, fmt.Errorf("failed to retrieve the message of type %q", event.Type)
+}
+
+var value reflect.Value
+ if concreteGoType.Kind() == reflect.Ptr {
+ value = reflect.New(concreteGoType.Elem())
+}
+
+else {
+ value = reflect.Zero(concreteGoType)
+}
+
+protoMsg, ok := value.Interface().(proto.Message)
+ if !ok {
+ return nil, fmt.Errorf("%q does not implement proto.Message", event.Type)
+}
+ attrMap := make(map[string]json.RawMessage)
+ for _, attr := range event.Attributes {
+ attrMap[attr.Key] = json.RawMessage(attr.Value)
+}
+
+attrBytes, err := json.Marshal(attrMap)
+ if err != nil {
+ return nil, err
+}
+ unmarshaler := jsonpb.Unmarshaler{
+ AllowUnknownFields: true
+}
+ if err := unmarshaler.Unmarshal(strings.NewReader(string(attrBytes)), protoMsg); err != nil {
+ return nil, err
+}
+
+return protoMsg, nil
+}
+
+// ----------------------------------------------------------------------------
+// Events
+// ----------------------------------------------------------------------------
+
+type (
+ // Event is a type alias for an ABCI Event
+ Event abci.Event
+
+ // Events defines a slice of Event objects
+ Events []Event
+)
+
+// NewEvent creates a new Event object with a given type and slice of one or more
+// attributes.
+func NewEvent(ty string, attrs ...Attribute)
+
+Event {
+ e := Event{
+ Type: ty
+}
+ for _, attr := range attrs {
+ e.Attributes = append(e.Attributes, attr.ToKVPair())
+}
+
+return e
+}
+
+// NewAttribute returns a new key/value Attribute object.
+func NewAttribute(k, v string)
+
+Attribute {
+ return Attribute{
+ k, v
+}
+}
+
+// EmptyEvents returns an empty slice of events.
+func EmptyEvents()
+
+Events {
+ return make(Events, 0)
+}
+
+func (a Attribute)
+
+String()
+
+string {
+ return fmt.Sprintf("%s: %s", a.Key, a.Value)
+}
+
+// ToKVPair converts an Attribute object into a CometBFT key/value pair.
+func (a Attribute)
+
+ToKVPair()
+
+abci.EventAttribute {
+ return abci.EventAttribute{
+ Key: a.Key,
+ Value: a.Value
+}
+}
+
+// AppendAttributes adds one or more attributes to an Event.
+func (e Event)
+
+AppendAttributes(attrs ...Attribute)
+
+Event {
+ for _, attr := range attrs {
+ e.Attributes = append(e.Attributes, attr.ToKVPair())
+}
+
+return e
+}
+
+// GetAttribute returns an attribute for a given key present in an event.
+// If the key is not found, the boolean value will be false.
+func (e Event)
+
+GetAttribute(key string) (Attribute, bool) {
+ for _, attr := range e.Attributes {
+ if attr.Key == key {
+ return Attribute{
+ Key: attr.Key,
+ Value: attr.Value
+}, true
+}
+
+}
+
+return Attribute{
+}, false
+}
+
+// AppendEvent adds an Event to a slice of events.
+func (e Events)
+
+AppendEvent(event Event)
+
+Events {
+ return append(e, event)
+}
+
+// AppendEvents adds a slice of Event objects to an exist slice of Event objects.
+func (e Events)
+
+AppendEvents(events Events)
+
+Events {
+ return append(e, events...)
+}
+
+// ToABCIEvents converts a slice of Event objects to a slice of abci.Event
+// objects.
+func (e Events)
+
+ToABCIEvents() []abci.Event {
+ res := make([]abci.Event, len(e))
+ for i, ev := range e {
+ res[i] = abci.Event{
+ Type: ev.Type,
+ Attributes: ev.Attributes
+}
+
+}
+
+return res
+}
+
+// GetAttributes returns all attributes matching a given key present in events.
+// If the key is not found, the boolean value will be false.
+func (e Events)
+
+GetAttributes(key string) ([]Attribute, bool) {
+ attrs := make([]Attribute, 0)
+ for _, event := range e {
+ if attr, found := event.GetAttribute(key); found {
+ attrs = append(attrs, attr)
+}
+
+}
+
+return attrs, len(attrs) > 0
+}
+
+// Common event types and attribute keys
+const (
+ EventTypeTx = "tx"
+
+ AttributeKeyAccountSequence = "acc_seq"
+ AttributeKeySignature = "signature"
+ AttributeKeyFee = "fee"
+ AttributeKeyFeePayer = "fee_payer"
+
+ EventTypeMessage = "message"
+
+ AttributeKeyAction = "action"
+ AttributeKeyModule = "module"
+ AttributeKeySender = "sender"
+ AttributeKeyAmount = "amount"
+)
+
+type (
+ // StringAttributes defines a slice of StringEvents objects.
+ StringEvents []StringEvent
+)
+
+func (se StringEvents)
+
+String()
+
+string {
+ var sb strings.Builder
+ for _, e := range se {
+ fmt.Fprintf(&sb, "\t\t- %s\n", e.Type)
+ for _, attr := range e.Attributes {
+ fmt.Fprintf(&sb, "\t\t\t- %s\n", attr)
+}
+
+}
+
+return strings.TrimRight(sb.String(), "\n")
+}
+
+// StringifyEvent converts an Event object to a StringEvent object.
+func StringifyEvent(e abci.Event)
+
+StringEvent {
+ res := StringEvent{
+ Type: e.Type
+}
+ for _, attr := range e.Attributes {
+ res.Attributes = append(
+ res.Attributes,
+ Attribute{
+ Key: attr.Key,
+ Value: attr.Value
+},
+ )
+}
+
+return res
+}
+
+// StringifyEvents converts a slice of Event objects into a slice of StringEvent
+// objects.
+func StringifyEvents(events []abci.Event)
+
+StringEvents {
+ res := make(StringEvents, 0, len(events))
+ for _, e := range events {
+ res = append(res, StringifyEvent(e))
+}
+
+return res
+}
+
+// MarkEventsToIndex returns the set of ABCI events, where each event's attribute
+// has it's index value marked based on the provided set of events to index.
+func MarkEventsToIndex(events []abci.Event, indexSet map[string]struct{
+}) []abci.Event {
+ indexAll := len(indexSet) == 0
+ updatedEvents := make([]abci.Event, len(events))
+ for i, e := range events {
+ updatedEvent := abci.Event{
+ Type: e.Type,
+ Attributes: make([]abci.EventAttribute, len(e.Attributes)),
+}
+ for j, attr := range e.Attributes {
+ _, index := indexSet[fmt.Sprintf("%s.%s", e.Type, attr.Key)]
+ updatedAttr := abci.EventAttribute{
+ Key: attr.Key,
+ Value: attr.Value,
+ Index: index || indexAll,
+}
+
+updatedEvent.Attributes[j] = updatedAttr
+}
+
+updatedEvents[i] = updatedEvent
+}
+
+return updatedEvents
+}
+```
+
+Module developers should handle Event emission via `EventManager#EmitTypedEvent` or `EventManager#EmitEvent` in each message
+`Handler` and in each `BeginBlock`/`EndBlock` handler. The `EventManager` is accessed via
+the [`Context`](/sdk/v0.53/learn/advanced/context), where Events should be already registered and emitted like this:
+
+**Typed events:**
+
+```go expandable
+package keeper
+
+import (
+
+ "bytes"
+ "context"
+ "encoding/binary"
+ "encoding/json"
+ "fmt"
+ "slices"
+ "strings"
+
+ errorsmod "cosmossdk.io/errors"
+
+ sdk "github.com/cosmos/cosmos-sdk/types"
+ sdkerrors "github.com/cosmos/cosmos-sdk/types/errors"
+ authtypes "github.com/cosmos/cosmos-sdk/x/auth/types"
+ govtypes "github.com/cosmos/cosmos-sdk/x/gov/types"
+ "github.com/cosmos/cosmos-sdk/x/group"
+ "github.com/cosmos/cosmos-sdk/x/group/errors"
+ "github.com/cosmos/cosmos-sdk/x/group/internal/math"
+ "github.com/cosmos/cosmos-sdk/x/group/internal/orm"
+)
+
+var _ group.MsgServer = Keeper{
+}
+
+// TODO: Revisit this once we have proper gas fee framework.
+// Tracking issues https://github.com/cosmos/cosmos-sdk/issues/9054, https://github.com/cosmos/cosmos-sdk/discussions/9072
+const gasCostPerIteration = uint64(20)
+
+func (k Keeper)
+
+CreateGroup(goCtx context.Context, msg *group.MsgCreateGroup) (*group.MsgCreateGroupResponse, error) {
+ if _, err := k.accKeeper.AddressCodec().StringToBytes(msg.Admin); err != nil {
+ return nil, errorsmod.Wrapf(sdkerrors.ErrInvalidAddress, "invalid admin address: %s", msg.Admin)
+}
+ if err := k.validateMembers(msg.Members); err != nil {
+ return nil, errorsmod.Wrap(err, "members")
+}
+ if err := k.assertMetadataLength(msg.Metadata, "group metadata"); err != nil {
+ return nil, err
+}
+ totalWeight := math.NewDecFromInt64(0)
+ for _, m := range msg.Members {
+ if err := k.assertMetadataLength(m.Metadata, "member metadata"); err != nil {
+ return nil, err
+}
+
+ // Members of a group must have a positive weight.
+ // NOTE: group member with zero weight are only allowed when updating group members.
+ // If the member has a zero weight, it will be removed from the group.
+ weight, err := math.NewPositiveDecFromString(m.Weight)
+ if err != nil {
+ return nil, err
+}
+
+ // Adding up members weights to compute group total weight.
+ totalWeight, err = totalWeight.Add(weight)
+ if err != nil {
+ return nil, err
+}
+
+}
+
+ // Create a new group in the groupTable.
+ ctx := sdk.UnwrapSDKContext(goCtx)
+ groupInfo := &group.GroupInfo{
+ Id: k.groupTable.Sequence().PeekNextVal(ctx.KVStore(k.key)),
+ Admin: msg.Admin,
+ Metadata: msg.Metadata,
+ Version: 1,
+ TotalWeight: totalWeight.String(),
+ CreatedAt: ctx.BlockTime(),
+}
+
+groupID, err := k.groupTable.Create(ctx.KVStore(k.key), groupInfo)
+ if err != nil {
+ return nil, errorsmod.Wrap(err, "could not create group")
+}
+
+ // Create new group members in the groupMemberTable.
+ for i, m := range msg.Members {
+ err := k.groupMemberTable.Create(ctx.KVStore(k.key), &group.GroupMember{
+ GroupId: groupID,
+ Member: &group.Member{
+ Address: m.Address,
+ Weight: m.Weight,
+ Metadata: m.Metadata,
+ AddedAt: ctx.BlockTime(),
+},
+})
+ if err != nil {
+ return nil, errorsmod.Wrapf(err, "could not store member %d", i)
+}
+
+}
+ if err := ctx.EventManager().EmitTypedEvent(&group.EventCreateGroup{
+ GroupId: groupID
+}); err != nil {
+ return nil, err
+}
+
+return &group.MsgCreateGroupResponse{
+ GroupId: groupID
+}, nil
+}
+
+func (k Keeper)
+
+UpdateGroupMembers(goCtx context.Context, msg *group.MsgUpdateGroupMembers) (*group.MsgUpdateGroupMembersResponse, error) {
+ if msg.GroupId == 0 {
+ return nil, errorsmod.Wrap(errors.ErrEmpty, "group id")
+}
+ if len(msg.MemberUpdates) == 0 {
+ return nil, errorsmod.Wrap(errors.ErrEmpty, "member updates")
+}
+ if err := k.validateMembers(msg.MemberUpdates); err != nil {
+ return nil, errorsmod.Wrap(err, "members")
+}
+ ctx := sdk.UnwrapSDKContext(goCtx)
+ action := func(g *group.GroupInfo)
+
+error {
+ totalWeight, err := math.NewNonNegativeDecFromString(g.TotalWeight)
+ if err != nil {
+ return errorsmod.Wrap(err, "group total weight")
+}
+ for _, member := range msg.MemberUpdates {
+ if err := k.assertMetadataLength(member.Metadata, "group member metadata"); err != nil {
+ return err
+}
+ groupMember := group.GroupMember{
+ GroupId: msg.GroupId,
+ Member: &group.Member{
+ Address: member.Address,
+ Weight: member.Weight,
+ Metadata: member.Metadata,
+},
+}
+
+ // Checking if the group member is already part of the group
+ var found bool
+ var prevGroupMember group.GroupMember
+ switch err := k.groupMemberTable.GetOne(ctx.KVStore(k.key), orm.PrimaryKey(&groupMember), &prevGroupMember); {
+ case err == nil:
+ found = true
+ case sdkerrors.ErrNotFound.Is(err):
+ found = false
+ default:
+ return errorsmod.Wrap(err, "get group member")
+}
+
+newMemberWeight, err := math.NewNonNegativeDecFromString(groupMember.Member.Weight)
+ if err != nil {
+ return err
+}
+
+ // Handle delete for members with zero weight.
+ if newMemberWeight.IsZero() {
+ // We can't delete a group member that doesn't already exist.
+ if !found {
+ return errorsmod.Wrap(sdkerrors.ErrNotFound, "unknown member")
+}
+
+previousMemberWeight, err := math.NewPositiveDecFromString(prevGroupMember.Member.Weight)
+ if err != nil {
+ return err
+}
+
+ // Subtract the weight of the group member to delete from the group total weight.
+ totalWeight, err = math.SubNonNegative(totalWeight, previousMemberWeight)
+ if err != nil {
+ return err
+}
+
+ // Delete group member in the groupMemberTable.
+ if err := k.groupMemberTable.Delete(ctx.KVStore(k.key), &groupMember); err != nil {
+ return errorsmod.Wrap(err, "delete member")
+}
+
+continue
+}
+ // If group member already exists, handle update
+ if found {
+ previousMemberWeight, err := math.NewPositiveDecFromString(prevGroupMember.Member.Weight)
+ if err != nil {
+ return err
+}
+ // Subtract previous weight from the group total weight.
+ totalWeight, err = math.SubNonNegative(totalWeight, previousMemberWeight)
+ if err != nil {
+ return err
+}
+ // Save updated group member in the groupMemberTable.
+ groupMember.Member.AddedAt = prevGroupMember.Member.AddedAt
+ if err := k.groupMemberTable.Update(ctx.KVStore(k.key), &groupMember); err != nil {
+ return errorsmod.Wrap(err, "add member")
+}
+
+}
+
+else { // else handle create.
+ groupMember.Member.AddedAt = ctx.BlockTime()
+ if err := k.groupMemberTable.Create(ctx.KVStore(k.key), &groupMember); err != nil {
+ return errorsmod.Wrap(err, "add member")
+}
+
+}
+ // In both cases (handle + update), we need to add the new member's weight to the group total weight.
+ totalWeight, err = totalWeight.Add(newMemberWeight)
+ if err != nil {
+ return err
+}
+
+}
+ // ensure that group has one or more members
+ if totalWeight.IsZero() {
+ return errorsmod.Wrap(errors.ErrInvalid, "group must not be empty")
+}
+ // Update group in the groupTable.
+ g.TotalWeight = totalWeight.String()
+
+g.Version++
+ if err := k.validateDecisionPolicies(ctx, *g); err != nil {
+ return err
+}
+
+return k.groupTable.Update(ctx.KVStore(k.key), g.Id, g)
+}
+ if err := k.doUpdateGroup(ctx, msg.GetGroupID(), msg.GetAdmin(), action, "members updated"); err != nil {
+ return nil, err
+}
+
+return &group.MsgUpdateGroupMembersResponse{
+}, nil
+}
+
+func (k Keeper)
+
+UpdateGroupAdmin(goCtx context.Context, msg *group.MsgUpdateGroupAdmin) (*group.MsgUpdateGroupAdminResponse, error) {
+ if msg.GroupId == 0 {
+ return nil, errorsmod.Wrap(errors.ErrEmpty, "group id")
+}
+ if strings.EqualFold(msg.Admin, msg.NewAdmin) {
+ return nil, errorsmod.Wrap(errors.ErrInvalid, "new and old admin are the same")
+}
+ if _, err := k.accKeeper.AddressCodec().StringToBytes(msg.Admin); err != nil {
+ return nil, errorsmod.Wrap(sdkerrors.ErrInvalidAddress, "admin address")
+}
+ if _, err := k.accKeeper.AddressCodec().StringToBytes(msg.NewAdmin); err != nil {
+ return nil, errorsmod.Wrap(sdkerrors.ErrInvalidAddress, "new admin address")
+}
+ ctx := sdk.UnwrapSDKContext(goCtx)
+ action := func(g *group.GroupInfo)
+
+error {
+ g.Admin = msg.NewAdmin
+ g.Version++
+
+ return k.groupTable.Update(ctx.KVStore(k.key), g.Id, g)
+}
+ if err := k.doUpdateGroup(ctx, msg.GetGroupID(), msg.GetAdmin(), action, "admin updated"); err != nil {
+ return nil, err
+}
+
+return &group.MsgUpdateGroupAdminResponse{
+}, nil
+}
+
+func (k Keeper)
+
+UpdateGroupMetadata(goCtx context.Context, msg *group.MsgUpdateGroupMetadata) (*group.MsgUpdateGroupMetadataResponse, error) {
+ if msg.GroupId == 0 {
+ return nil, errorsmod.Wrap(errors.ErrEmpty, "group id")
+}
+ if err := k.assertMetadataLength(msg.Metadata, "group metadata"); err != nil {
+ return nil, err
+}
+ if _, err := k.accKeeper.AddressCodec().StringToBytes(msg.Admin); err != nil {
+ return nil, errorsmod.Wrap(sdkerrors.ErrInvalidAddress, "admin address")
+}
+ ctx := sdk.UnwrapSDKContext(goCtx)
+ action := func(g *group.GroupInfo)
+
+error {
+ g.Metadata = msg.Metadata
+ g.Version++
+ return k.groupTable.Update(ctx.KVStore(k.key), g.Id, g)
+}
+ if err := k.doUpdateGroup(ctx, msg.GetGroupID(), msg.GetAdmin(), action, "metadata updated"); err != nil {
+ return nil, err
+}
+
+return &group.MsgUpdateGroupMetadataResponse{
+}, nil
+}
+
+func (k Keeper)
+
+CreateGroupWithPolicy(ctx context.Context, msg *group.MsgCreateGroupWithPolicy) (*group.MsgCreateGroupWithPolicyResponse, error) {
+ // NOTE: admin, and group message validation is performed in the CreateGroup method
+ groupRes, err := k.CreateGroup(ctx, &group.MsgCreateGroup{
+ Admin: msg.Admin,
+ Members: msg.Members,
+ Metadata: msg.GroupMetadata,
+})
+ if err != nil {
+ return nil, errorsmod.Wrap(err, "group response")
+}
+ groupID := groupRes.GroupId
+
+ // NOTE: group policy message validation is performed in the CreateGroupPolicy method
+ groupPolicyRes, err := k.CreateGroupPolicy(ctx, &group.MsgCreateGroupPolicy{
+ Admin: msg.Admin,
+ GroupId: groupID,
+ Metadata: msg.GroupPolicyMetadata,
+ DecisionPolicy: msg.DecisionPolicy,
+})
+ if err != nil {
+ return nil, errorsmod.Wrap(err, "group policy response")
+}
+ if msg.GroupPolicyAsAdmin {
+ updateAdminReq := &group.MsgUpdateGroupAdmin{
+ GroupId: groupID,
+ Admin: msg.Admin,
+ NewAdmin: groupPolicyRes.Address,
+}
+ _, err = k.UpdateGroupAdmin(ctx, updateAdminReq)
+ if err != nil {
+ return nil, err
+}
+ updatePolicyAddressReq := &group.MsgUpdateGroupPolicyAdmin{
+ Admin: msg.Admin,
+ GroupPolicyAddress: groupPolicyRes.Address,
+ NewAdmin: groupPolicyRes.Address,
+}
+ _, err = k.UpdateGroupPolicyAdmin(ctx, updatePolicyAddressReq)
+ if err != nil {
+ return nil, err
+}
+
+}
+
+return &group.MsgCreateGroupWithPolicyResponse{
+ GroupId: groupID,
+ GroupPolicyAddress: groupPolicyRes.Address
+}, nil
+}
+
+func (k Keeper)
+
+CreateGroupPolicy(goCtx context.Context, msg *group.MsgCreateGroupPolicy) (*group.MsgCreateGroupPolicyResponse, error) {
+ if msg.GroupId == 0 {
+ return nil, errorsmod.Wrap(errors.ErrEmpty, "group id")
+}
+ if err := k.assertMetadataLength(msg.GetMetadata(), "group policy metadata"); err != nil {
+ return nil, err
+}
+
+policy, err := msg.GetDecisionPolicy()
+ if err != nil {
+ return nil, errorsmod.Wrap(err, "request decision policy")
+}
+ if err := policy.ValidateBasic(); err != nil {
+ return nil, errorsmod.Wrap(err, "decision policy")
+}
+
+reqGroupAdmin, err := k.accKeeper.AddressCodec().StringToBytes(msg.GetAdmin())
+ if err != nil {
+ return nil, errorsmod.Wrap(err, "request admin")
+}
+ ctx := sdk.UnwrapSDKContext(goCtx)
+
+groupInfo, err := k.getGroupInfo(ctx, msg.GetGroupID())
+ if err != nil {
+ return nil, err
+}
+
+groupAdmin, err := k.accKeeper.AddressCodec().StringToBytes(groupInfo.Admin)
+ if err != nil {
+ return nil, errorsmod.Wrap(err, "group admin")
+}
+
+ // Only current group admin is authorized to create a group policy for this
+ if !bytes.Equal(groupAdmin, reqGroupAdmin) {
+ return nil, errorsmod.Wrap(sdkerrors.ErrUnauthorized, "not group admin")
+}
+ if err := policy.Validate(groupInfo, k.config); err != nil {
+ return nil, err
+}
+
+ // Generate account address of group policy.
+ var accountAddr sdk.AccAddress
+ // loop here in the rare case where a ADR-028-derived address creates a
+ // collision with an existing address.
+ for {
+ nextAccVal := k.groupPolicySeq.NextVal(ctx.KVStore(k.key))
+ derivationKey := make([]byte, 8)
+
+binary.BigEndian.PutUint64(derivationKey, nextAccVal)
+
+ac, err := authtypes.NewModuleCredential(group.ModuleName, []byte{
+ GroupPolicyTablePrefix
+}, derivationKey)
+ if err != nil {
+ return nil, err
+}
+
+accountAddr = sdk.AccAddress(ac.Address())
+ if k.accKeeper.GetAccount(ctx, accountAddr) != nil {
+ // handle a rare collision, in which case we just go on to the
+ // next sequence value and derive a new address.
+ continue
+}
+
+ // group policy accounts are unclaimable base accounts
+ account, err := authtypes.NewBaseAccountWithPubKey(ac)
+ if err != nil {
+ return nil, errorsmod.Wrap(err, "could not create group policy account")
+}
+ acc := k.accKeeper.NewAccount(ctx, account)
+
+k.accKeeper.SetAccount(ctx, acc)
+
+break
+}
+
+groupPolicy, err := group.NewGroupPolicyInfo(
+ accountAddr,
+ msg.GetGroupID(),
+ reqGroupAdmin,
+ msg.GetMetadata(),
+ 1,
+ policy,
+ ctx.BlockTime(),
+ )
+ if err != nil {
+ return nil, err
+}
+ if err := k.groupPolicyTable.Create(ctx.KVStore(k.key), &groupPolicy); err != nil {
+ return nil, errorsmod.Wrap(err, "could not create group policy")
+}
+ if err := ctx.EventManager().EmitTypedEvent(&group.EventCreateGroupPolicy{
+ Address: accountAddr.String()
+}); err != nil {
+ return nil, err
+}
+
+return &group.MsgCreateGroupPolicyResponse{
+ Address: accountAddr.String()
+}, nil
+}
+
+func (k Keeper)
+
+UpdateGroupPolicyAdmin(goCtx context.Context, msg *group.MsgUpdateGroupPolicyAdmin) (*group.MsgUpdateGroupPolicyAdminResponse, error) {
+ if strings.EqualFold(msg.Admin, msg.NewAdmin) {
+ return nil, errorsmod.Wrap(errors.ErrInvalid, "new and old admin are same")
+}
+ if _, err := k.accKeeper.AddressCodec().StringToBytes(msg.NewAdmin); err != nil {
+ return nil, errorsmod.Wrap(sdkerrors.ErrInvalidAddress, "new admin address")
+}
+ ctx := sdk.UnwrapSDKContext(goCtx)
+ action := func(groupPolicy *group.GroupPolicyInfo)
+
+error {
+ groupPolicy.Admin = msg.NewAdmin
+ groupPolicy.Version++
+ return k.groupPolicyTable.Update(ctx.KVStore(k.key), groupPolicy)
+}
+ if err := k.doUpdateGroupPolicy(ctx, msg.GroupPolicyAddress, msg.Admin, action, "group policy admin updated"); err != nil {
+ return nil, err
+}
+
+return &group.MsgUpdateGroupPolicyAdminResponse{
+}, nil
+}
+
+func (k Keeper)
+
+UpdateGroupPolicyDecisionPolicy(goCtx context.Context, msg *group.MsgUpdateGroupPolicyDecisionPolicy) (*group.MsgUpdateGroupPolicyDecisionPolicyResponse, error) {
+ policy, err := msg.GetDecisionPolicy()
+ if err != nil {
+ return nil, errorsmod.Wrap(err, "decision policy")
+}
+ if err := policy.ValidateBasic(); err != nil {
+ return nil, errorsmod.Wrap(err, "decision policy")
+}
+ ctx := sdk.UnwrapSDKContext(goCtx)
+ action := func(groupPolicy *group.GroupPolicyInfo)
+
+error {
+ groupInfo, err := k.getGroupInfo(ctx, groupPolicy.GroupId)
+ if err != nil {
+ return err
+}
+
+err = policy.Validate(groupInfo, k.config)
+ if err != nil {
+ return err
+}
+
+err = groupPolicy.SetDecisionPolicy(policy)
+ if err != nil {
+ return err
+}
+
+groupPolicy.Version++
+ return k.groupPolicyTable.Update(ctx.KVStore(k.key), groupPolicy)
+}
+ if err = k.doUpdateGroupPolicy(ctx, msg.GroupPolicyAddress, msg.Admin, action, "group policy's decision policy updated"); err != nil {
+ return nil, err
+}
+
+return &group.MsgUpdateGroupPolicyDecisionPolicyResponse{
+}, nil
+}
+
+func (k Keeper)
+
+UpdateGroupPolicyMetadata(goCtx context.Context, msg *group.MsgUpdateGroupPolicyMetadata) (*group.MsgUpdateGroupPolicyMetadataResponse, error) {
+ ctx := sdk.UnwrapSDKContext(goCtx)
+ metadata := msg.GetMetadata()
+ action := func(groupPolicy *group.GroupPolicyInfo)
+
+error {
+ groupPolicy.Metadata = metadata
+ groupPolicy.Version++
+ return k.groupPolicyTable.Update(ctx.KVStore(k.key), groupPolicy)
+}
+ if err := k.assertMetadataLength(metadata, "group policy metadata"); err != nil {
+ return nil, err
+}
+ err := k.doUpdateGroupPolicy(ctx, msg.GroupPolicyAddress, msg.Admin, action, "group policy metadata updated")
+ if err != nil {
+ return nil, err
+}
+
+return &group.MsgUpdateGroupPolicyMetadataResponse{
+}, nil
+}
+
+func (k Keeper)
+
+SubmitProposal(goCtx context.Context, msg *group.MsgSubmitProposal) (*group.MsgSubmitProposalResponse, error) {
+ if len(msg.Proposers) == 0 {
+ return nil, errorsmod.Wrap(errors.ErrEmpty, "proposers")
+}
+ if err := k.validateProposers(msg.Proposers); err != nil {
+ return nil, err
+}
+
+groupPolicyAddr, err := k.accKeeper.AddressCodec().StringToBytes(msg.GroupPolicyAddress)
+ if err != nil {
+ return nil, errorsmod.Wrap(err, "request account address of group policy")
+}
+ if err := k.assertMetadataLength(msg.Title, "proposal Title"); err != nil {
+ return nil, err
+}
+ if err := k.assertSummaryLength(msg.Summary); err != nil {
+ return nil, err
+}
+ if err := k.assertMetadataLength(msg.Metadata, "metadata"); err != nil {
+ return nil, err
+}
+
+ // verify that if present, the metadata title and summary equals the proposal title and summary
+ if len(msg.Metadata) != 0 {
+ proposalMetadata := govtypes.ProposalMetadata{
+}
+ if err := json.Unmarshal([]byte(msg.Metadata), &proposalMetadata); err == nil {
+ if proposalMetadata.Title != msg.Title {
+ return nil, fmt.Errorf("metadata title '%s' must equal proposal title '%s'", proposalMetadata.Title, msg.Title)
+}
+ if proposalMetadata.Summary != msg.Summary {
+ return nil, fmt.Errorf("metadata summary '%s' must equal proposal summary '%s'", proposalMetadata.Summary, msg.Summary)
+}
+
+}
+
+ // if we can't unmarshal the metadata, this means the client didn't use the recommended metadata format
+ // nothing can be done here, and this is still a valid case, so we ignore the error
+}
+
+msgs, err := msg.GetMsgs()
+ if err != nil {
+ return nil, errorsmod.Wrap(err, "request msgs")
+}
+ if err := validateMsgs(msgs); err != nil {
+ return nil, err
+}
+ ctx := sdk.UnwrapSDKContext(goCtx)
+
+policyAcc, err := k.getGroupPolicyInfo(ctx, msg.GroupPolicyAddress)
+ if err != nil {
+ return nil, errorsmod.Wrapf(err, "load group policy: %s", msg.GroupPolicyAddress)
+}
+
+groupInfo, err := k.getGroupInfo(ctx, policyAcc.GroupId)
+ if err != nil {
+ return nil, errorsmod.Wrap(err, "get group by groupId of group policy")
+}
+
+ // Only members of the group can submit a new proposal.
+ for _, proposer := range msg.Proposers {
+ if !k.groupMemberTable.Has(ctx.KVStore(k.key), orm.PrimaryKey(&group.GroupMember{
+ GroupId: groupInfo.Id,
+ Member: &group.Member{
+ Address: proposer
+}})) {
+ return nil, errorsmod.Wrapf(errors.ErrUnauthorized, "not in group: %s", proposer)
+}
+
+}
+
+ // Check that if the messages require signers, they are all equal to the given account address of group policy.
+ if err := ensureMsgAuthZ(msgs, groupPolicyAddr, k.cdc); err != nil {
+ return nil, err
+}
+
+policy, err := policyAcc.GetDecisionPolicy()
+ if err != nil {
+ return nil, errorsmod.Wrap(err, "proposal group policy decision policy")
+}
+
+ // Prevent proposal that cannot succeed.
+ if err = policy.Validate(groupInfo, k.config); err != nil {
+ return nil, err
+}
+ m := &group.Proposal{
+ Id: k.proposalTable.Sequence().PeekNextVal(ctx.KVStore(k.key)),
+ GroupPolicyAddress: msg.GroupPolicyAddress,
+ Metadata: msg.Metadata,
+ Proposers: msg.Proposers,
+ SubmitTime: ctx.BlockTime(),
+ GroupVersion: groupInfo.Version,
+ GroupPolicyVersion: policyAcc.Version,
+ Status: group.PROPOSAL_STATUS_SUBMITTED,
+ ExecutorResult: group.PROPOSAL_EXECUTOR_RESULT_NOT_RUN,
+ VotingPeriodEnd: ctx.BlockTime().Add(policy.GetVotingPeriod()), // The voting window begins as soon as the proposal is submitted.
+ FinalTallyResult: group.DefaultTallyResult(),
+ Title: msg.Title,
+ Summary: msg.Summary,
+}
+ if err := m.SetMsgs(msgs); err != nil {
+ return nil, errorsmod.Wrap(err, "create proposal")
+}
+
+id, err := k.proposalTable.Create(ctx.KVStore(k.key), m)
+ if err != nil {
+ return nil, errorsmod.Wrap(err, "create proposal")
+}
+ if err := ctx.EventManager().EmitTypedEvent(&group.EventSubmitProposal{
+ ProposalId: id
+}); err != nil {
+ return nil, err
+}
+
+ // Try to execute proposal immediately
+ if msg.Exec == group.Exec_EXEC_TRY {
+ // Consider proposers as Yes votes
+ for _, proposer := range msg.Proposers {
+ ctx.GasMeter().ConsumeGas(gasCostPerIteration, "vote on proposal")
+ _, err = k.Vote(ctx, &group.MsgVote{
+ ProposalId: id,
+ Voter: proposer,
+ Option: group.VOTE_OPTION_YES,
+})
+ if err != nil {
+ return &group.MsgSubmitProposalResponse{
+ ProposalId: id
+}, errorsmod.Wrapf(err, "the proposal was created but failed on vote for voter %s", proposer)
+}
+
+}
+
+ // Then try to execute the proposal
+ _, err = k.Exec(ctx, &group.MsgExec{
+ ProposalId: id,
+ // We consider the first proposer as the MsgExecRequest signer
+ // but that could be revisited (eg using the group policy)
+
+Executor: msg.Proposers[0],
+})
+ if err != nil {
+ return &group.MsgSubmitProposalResponse{
+ ProposalId: id
+}, errorsmod.Wrap(err, "the proposal was created but failed on exec")
+}
+
+}
+
+return &group.MsgSubmitProposalResponse{
+ ProposalId: id
+}, nil
+}
+
+func (k Keeper)
+
+WithdrawProposal(goCtx context.Context, msg *group.MsgWithdrawProposal) (*group.MsgWithdrawProposalResponse, error) {
+ if msg.ProposalId == 0 {
+ return nil, errorsmod.Wrap(errors.ErrEmpty, "proposal id")
+}
+ if _, err := k.accKeeper.AddressCodec().StringToBytes(msg.Address); err != nil {
+ return nil, errorsmod.Wrapf(sdkerrors.ErrInvalidAddress, "invalid group policy admin / proposer address: %s", msg.Address)
+}
+ ctx := sdk.UnwrapSDKContext(goCtx)
+
+proposal, err := k.getProposal(ctx, msg.ProposalId)
+ if err != nil {
+ return nil, err
+}
+
+ // Ensure the proposal can be withdrawn.
+ if proposal.Status != group.PROPOSAL_STATUS_SUBMITTED {
+ return nil, errorsmod.Wrapf(errors.ErrInvalid, "cannot withdraw a proposal with the status of %s", proposal.Status.String())
+}
+
+var policyInfo group.GroupPolicyInfo
+ if policyInfo, err = k.getGroupPolicyInfo(ctx, proposal.GroupPolicyAddress); err != nil {
+ return nil, errorsmod.Wrap(err, "load group policy")
+}
+
+ // check address is the group policy admin he is in proposers list..
+ if msg.Address != policyInfo.Admin && !isProposer(proposal, msg.Address) {
+ return nil, errorsmod.Wrapf(errors.ErrUnauthorized, "given address is neither group policy admin nor in proposers: %s", msg.Address)
+}
+
+proposal.Status = group.PROPOSAL_STATUS_WITHDRAWN
+ if err := k.proposalTable.Update(ctx.KVStore(k.key), msg.ProposalId, &proposal); err != nil {
+ return nil, err
+}
+ if err := ctx.EventManager().EmitTypedEvent(&group.EventWithdrawProposal{
+ ProposalId: msg.ProposalId
+}); err != nil {
+ return nil, err
+}
+
+return &group.MsgWithdrawProposalResponse{
+}, nil
+}
+
+func (k Keeper)
+
+Vote(goCtx context.Context, msg *group.MsgVote) (*group.MsgVoteResponse, error) {
+ if msg.ProposalId == 0 {
+ return nil, errorsmod.Wrap(errors.ErrEmpty, "proposal id")
+}
+
+ // verify vote options
+ if msg.Option == group.VOTE_OPTION_UNSPECIFIED {
+ return nil, errorsmod.Wrap(errors.ErrEmpty, "vote option")
+}
+ if _, ok := group.VoteOption_name[int32(msg.Option)]; !ok {
+ return nil, errorsmod.Wrap(errors.ErrInvalid, "vote option")
+}
+ if err := k.assertMetadataLength(msg.Metadata, "metadata"); err != nil {
+ return nil, err
+}
+ if _, err := k.accKeeper.AddressCodec().StringToBytes(msg.Voter); err != nil {
+ return nil, errorsmod.Wrapf(sdkerrors.ErrInvalidAddress, "invalid voter address: %s", msg.Voter)
+}
+ ctx := sdk.UnwrapSDKContext(goCtx)
+
+proposal, err := k.getProposal(ctx, msg.ProposalId)
+ if err != nil {
+ return nil, err
+}
+
+ // Ensure that we can still accept votes for this proposal.
+ if proposal.Status != group.PROPOSAL_STATUS_SUBMITTED {
+ return nil, errorsmod.Wrap(errors.ErrInvalid, "proposal not open for voting")
+}
+ if ctx.BlockTime().After(proposal.VotingPeriodEnd) {
+ return nil, errorsmod.Wrap(errors.ErrExpired, "voting period has ended already")
+}
+
+policyInfo, err := k.getGroupPolicyInfo(ctx, proposal.GroupPolicyAddress)
+ if err != nil {
+ return nil, errorsmod.Wrap(err, "load group policy")
+}
+
+groupInfo, err := k.getGroupInfo(ctx, policyInfo.GroupId)
+ if err != nil {
+ return nil, err
+}
+
+ // Count and store votes.
+ voter := group.GroupMember{
+ GroupId: groupInfo.Id,
+ Member: &group.Member{
+ Address: msg.Voter
+}}
+ if err := k.groupMemberTable.GetOne(ctx.KVStore(k.key), orm.PrimaryKey(&voter), &voter); err != nil {
+ return nil, errorsmod.Wrapf(err, "voter address: %s", msg.Voter)
+}
+ newVote := group.Vote{
+ ProposalId: msg.ProposalId,
+ Voter: msg.Voter,
+ Option: msg.Option,
+ Metadata: msg.Metadata,
+ SubmitTime: ctx.BlockTime(),
+}
+
+ // The ORM will return an error if the vote already exists,
+ // making sure than a voter hasn't already voted.
+ if err := k.voteTable.Create(ctx.KVStore(k.key), &newVote); err != nil {
+ return nil, errorsmod.Wrap(err, "store vote")
+}
+ if err := ctx.EventManager().EmitTypedEvent(&group.EventVote{
+ ProposalId: msg.ProposalId
+}); err != nil {
+ return nil, err
+}
+
+ // Try to execute proposal immediately
+ if msg.Exec == group.Exec_EXEC_TRY {
+ _, err = k.Exec(ctx, &group.MsgExec{
+ ProposalId: msg.ProposalId,
+ Executor: msg.Voter
+})
+ if err != nil {
+ return nil, err
+}
+
+}
+
+return &group.MsgVoteResponse{
+}, nil
+}
+
+// doTallyAndUpdate performs a tally, and, if the tally result is final, then:
+// - updates the proposal's `Status` and `FinalTallyResult` fields,
+// - prune all the votes.
+func (k Keeper)
+
+doTallyAndUpdate(ctx sdk.Context, proposal *group.Proposal, groupInfo group.GroupInfo, policyInfo group.GroupPolicyInfo)
+
+error {
+ policy, err := policyInfo.GetDecisionPolicy()
+ if err != nil {
+ return err
+}
+
+var result group.DecisionPolicyResult
+ tallyResult, err := k.Tally(ctx, *proposal, policyInfo.GroupId)
+ if err == nil {
+ result, err = policy.Allow(tallyResult, groupInfo.TotalWeight)
+}
+ if err != nil {
+ if err := k.pruneVotes(ctx, proposal.Id); err != nil {
+ return err
+}
+
+proposal.Status = group.PROPOSAL_STATUS_REJECTED
+ return ctx.EventManager().EmitTypedEvents(
+ &group.EventTallyError{
+ ProposalId: proposal.Id,
+ ErrorMessage: err.Error(),
+})
+}
+
+ // If the result was final (i.e. enough votes to pass) or if the voting
+ // period ended, then we consider the proposal as final.
+ if isFinal := result.Final || ctx.BlockTime().After(proposal.VotingPeriodEnd); isFinal {
+ if err := k.pruneVotes(ctx, proposal.Id); err != nil {
+ return err
+}
+
+proposal.FinalTallyResult = tallyResult
+ if result.Allow {
+ proposal.Status = group.PROPOSAL_STATUS_ACCEPTED
+}
+
+else {
+ proposal.Status = group.PROPOSAL_STATUS_REJECTED
+}
+
+
+}
+
+return nil
+}
+
+// Exec executes the messages from a proposal.
+func (k Keeper)
+
+Exec(goCtx context.Context, msg *group.MsgExec) (*group.MsgExecResponse, error) {
+ if msg.ProposalId == 0 {
+ return nil, errorsmod.Wrap(errors.ErrEmpty, "proposal id")
+}
+ ctx := sdk.UnwrapSDKContext(goCtx)
+
+proposal, err := k.getProposal(ctx, msg.ProposalId)
+ if err != nil {
+ return nil, err
+}
+ if proposal.Status != group.PROPOSAL_STATUS_SUBMITTED && proposal.Status != group.PROPOSAL_STATUS_ACCEPTED {
+ return nil, errorsmod.Wrapf(errors.ErrInvalid, "not possible to exec with proposal status %s", proposal.Status.String())
+}
+
+policyInfo, err := k.getGroupPolicyInfo(ctx, proposal.GroupPolicyAddress)
+ if err != nil {
+ return nil, errorsmod.Wrap(err, "load group policy")
+}
+
+ // If proposal is still in SUBMITTED phase, it means that the voting period
+ // didn't end yet, and tallying hasn't been done. In this case, we need to
+ // tally first.
+ if proposal.Status == group.PROPOSAL_STATUS_SUBMITTED {
+ groupInfo, err := k.getGroupInfo(ctx, policyInfo.GroupId)
+ if err != nil {
+ return nil, errorsmod.Wrap(err, "load group")
+}
+ if err = k.doTallyAndUpdate(ctx, &proposal, groupInfo, policyInfo); err != nil {
+ return nil, err
+}
+
+}
+
+ // Execute proposal payload.
+ var logs string
+ if proposal.Status == group.PROPOSAL_STATUS_ACCEPTED && proposal.ExecutorResult != group.PROPOSAL_EXECUTOR_RESULT_SUCCESS {
+ // Caching context so that we don't update the store in case of failure.
+ cacheCtx, flush := ctx.CacheContext()
+
+addr, err := k.accKeeper.AddressCodec().StringToBytes(policyInfo.Address)
+ if err != nil {
+ return nil, err
+}
+ decisionPolicy := policyInfo.DecisionPolicy.GetCachedValue().(group.DecisionPolicy)
+ if results, err := k.doExecuteMsgs(cacheCtx, k.router, proposal, addr, decisionPolicy); err != nil {
+ proposal.ExecutorResult = group.PROPOSAL_EXECUTOR_RESULT_FAILURE
+ logs = fmt.Sprintf("proposal execution failed on proposal %d, because of error %s", proposal.Id, err.Error())
+
+k.Logger(ctx).Info("proposal execution failed", "cause", err, "proposalID", proposal.Id)
+}
+
+else {
+ proposal.ExecutorResult = group.PROPOSAL_EXECUTOR_RESULT_SUCCESS
+ flush()
+ for _, res := range results {
+ // NOTE: The sdk msg handler creates a new EventManager, so events must be correctly propagated back to the current context
+ ctx.EventManager().EmitEvents(res.GetEvents())
+}
+
+}
+
+}
+
+ // Update proposal in proposalTable
+ // If proposal has successfully run, delete it from state.
+ if proposal.ExecutorResult == group.PROPOSAL_EXECUTOR_RESULT_SUCCESS {
+ if err := k.pruneProposal(ctx, proposal.Id); err != nil {
+ return nil, err
+}
+
+ // Emit event for proposal finalized with its result
+ if err := ctx.EventManager().EmitTypedEvent(
+ &group.EventProposalPruned{
+ ProposalId: proposal.Id,
+ Status: proposal.Status,
+ TallyResult: &proposal.FinalTallyResult,
+}); err != nil {
+ return nil, err
+}
+
+}
+
+else {
+ store := ctx.KVStore(k.key)
+ if err := k.proposalTable.Update(store, proposal.Id, &proposal); err != nil {
+ return nil, err
+}
+
+}
+ if err := ctx.EventManager().EmitTypedEvent(&group.EventExec{
+ ProposalId: proposal.Id,
+ Logs: logs,
+ Result: proposal.ExecutorResult,
+}); err != nil {
+ return nil, err
+}
+
+return &group.MsgExecResponse{
+ Result: proposal.ExecutorResult,
+}, nil
+}
+
+// LeaveGroup implements the MsgServer/LeaveGroup method.
+func (k Keeper)
+
+LeaveGroup(goCtx context.Context, msg *group.MsgLeaveGroup) (*group.MsgLeaveGroupResponse, error) {
+ if msg.GroupId == 0 {
+ return nil, errorsmod.Wrap(errors.ErrEmpty, "group-id")
+}
+
+ _, err := k.accKeeper.AddressCodec().StringToBytes(msg.Address)
+ if err != nil {
+ return nil, errorsmod.Wrap(err, "group member")
+}
+ ctx := sdk.UnwrapSDKContext(goCtx)
+
+groupInfo, err := k.getGroupInfo(ctx, msg.GroupId)
+ if err != nil {
+ return nil, errorsmod.Wrap(err, "group")
+}
+
+groupWeight, err := math.NewNonNegativeDecFromString(groupInfo.TotalWeight)
+ if err != nil {
+ return nil, err
+}
+
+gm, err := k.getGroupMember(ctx, &group.GroupMember{
+ GroupId: msg.GroupId,
+ Member: &group.Member{
+ Address: msg.Address
+},
+})
+ if err != nil {
+ return nil, err
+}
+
+memberWeight, err := math.NewPositiveDecFromString(gm.Member.Weight)
+ if err != nil {
+ return nil, err
+}
+
+updatedWeight, err := math.SubNonNegative(groupWeight, memberWeight)
+ if err != nil {
+ return nil, err
+}
+
+ // delete group member in the groupMemberTable.
+ if err := k.groupMemberTable.Delete(ctx.KVStore(k.key), gm); err != nil {
+ return nil, errorsmod.Wrap(err, "group member")
+}
+
+ // update group weight
+ groupInfo.TotalWeight = updatedWeight.String()
+
+groupInfo.Version++
+ if err := k.validateDecisionPolicies(ctx, groupInfo); err != nil {
+ return nil, err
+}
+ if err := k.groupTable.Update(ctx.KVStore(k.key), groupInfo.Id, &groupInfo); err != nil {
+ return nil, err
+}
+ if err := ctx.EventManager().EmitTypedEvent(&group.EventLeaveGroup{
+ GroupId: msg.GroupId,
+ Address: msg.Address,
+}); err != nil {
+ return nil, err
+}
+
+return &group.MsgLeaveGroupResponse{
+}, nil
+}
+
+func (k Keeper)
+
+getGroupMember(ctx sdk.Context, member *group.GroupMember) (*group.GroupMember, error) {
+ var groupMember group.GroupMember
+ switch err := k.groupMemberTable.GetOne(ctx.KVStore(k.key),
+ orm.PrimaryKey(member), &groupMember); {
+ case err == nil:
+ break
+ case sdkerrors.ErrNotFound.Is(err):
+ return nil, sdkerrors.ErrNotFound.Wrapf("%s is not part of group %d", member.Member.Address, member.GroupId)
+
+default:
+ return nil, err
+}
+
+return &groupMember, nil
+}
+
+type (
+ actionFn func(m *group.GroupInfo)
+
+error
+ groupPolicyActionFn func(m *group.GroupPolicyInfo)
+
+error
+)
+
+// doUpdateGroupPolicy first makes sure that the group policy admin initiated the group policy update,
+// before performing the group policy update and emitting an event.
+func (k Keeper)
+
+doUpdateGroupPolicy(ctx sdk.Context, reqGroupPolicy, reqAdmin string, action groupPolicyActionFn, note string)
+
+error {
+ groupPolicyAddr, err := k.accKeeper.AddressCodec().StringToBytes(reqGroupPolicy)
+ if err != nil {
+ return errorsmod.Wrap(err, "group policy address")
+}
+
+ _, err = k.accKeeper.AddressCodec().StringToBytes(reqAdmin)
+ if err != nil {
+ return errorsmod.Wrap(err, "group policy admin")
+}
+
+groupPolicyInfo, err := k.getGroupPolicyInfo(ctx, reqGroupPolicy)
+ if err != nil {
+ return errorsmod.Wrap(err, "load group policy")
+}
+
+ // Only current group policy admin is authorized to update a group policy.
+ if reqAdmin != groupPolicyInfo.Admin {
+ return errorsmod.Wrap(sdkerrors.ErrUnauthorized, "not group policy admin")
+}
+ if err := action(&groupPolicyInfo); err != nil {
+ return errorsmod.Wrap(err, note)
+}
+ if err = k.abortProposals(ctx, groupPolicyAddr); err != nil {
+ return err
+}
+ if err = ctx.EventManager().EmitTypedEvent(&group.EventUpdateGroupPolicy{
+ Address: groupPolicyInfo.Address
+}); err != nil {
+ return err
+}
+
+return nil
+}
+
+// doUpdateGroup first makes sure that the group admin initiated the group update,
+// before performing the group update and emitting an event.
+func (k Keeper)
+
+doUpdateGroup(ctx sdk.Context, groupID uint64, reqGroupAdmin string, action actionFn, errNote string)
+
+error {
+ groupInfo, err := k.getGroupInfo(ctx, groupID)
+ if err != nil {
+ return err
+}
+ if !strings.EqualFold(groupInfo.Admin, reqGroupAdmin) {
+ return errorsmod.Wrapf(sdkerrors.ErrUnauthorized, "not group admin; got %s, expected %s", reqGroupAdmin, groupInfo.Admin)
+}
+ if err := action(&groupInfo); err != nil {
+ return errorsmod.Wrap(err, errNote)
+}
+ if err := ctx.EventManager().EmitTypedEvent(&group.EventUpdateGroup{
+ GroupId: groupID
+}); err != nil {
+ return err
+}
+
+return nil
+}
+
+// assertMetadataLength returns an error if given metadata length
+// is greater than a pre-defined maxMetadataLen.
+func (k Keeper)
+
+assertMetadataLength(metadata, description string)
+
+error {
+ if metadata != "" && uint64(len(metadata)) > k.config.MaxMetadataLen {
+ return errorsmod.Wrapf(errors.ErrMaxLimit, description)
+}
+
+return nil
+}
+
+// assertSummaryLength returns an error if given summary length
+// is greater than a pre-defined 40*MaxMetadataLen.
+func (k Keeper)
+
+assertSummaryLength(summary string)
+
+error {
+ if summary != "" && uint64(len(summary)) > 40*k.config.MaxMetadataLen {
+ return errorsmod.Wrapf(errors.ErrMaxLimit, "proposal summary is too long")
+}
+
+return nil
+}
+
+// validateDecisionPolicies loops through all decision policies from the group,
+// and calls each of their Validate() method.
+func (k Keeper)
+
+validateDecisionPolicies(ctx sdk.Context, g group.GroupInfo)
+
+error {
+ it, err := k.groupPolicyByGroupIndex.Get(ctx.KVStore(k.key), g.Id)
+ if err != nil {
+ return err
+}
+
+defer it.Close()
+ for {
+ var groupPolicy group.GroupPolicyInfo
+ _, err = it.LoadNext(&groupPolicy)
+ if errors.ErrORMIteratorDone.Is(err) {
+ break
+}
+ if err != nil {
+ return err
+}
+
+err = groupPolicy.DecisionPolicy.GetCachedValue().(group.DecisionPolicy).Validate(g, k.config)
+ if err != nil {
+ return err
+}
+
+}
+
+return nil
+}
+
+// validateProposers checks that all proposers addresses are valid.
+// It also verifies that there is no duplicate address.
+func (k Keeper)
+
+validateProposers(proposers []string)
+
+error {
+ index := make(map[string]struct{
+}, len(proposers))
+ for _, proposer := range proposers {
+ if _, exists := index[proposer]; exists {
+ return errorsmod.Wrapf(errors.ErrDuplicate, "address: %s", proposer)
+}
+
+ _, err := k.accKeeper.AddressCodec().StringToBytes(proposer)
+ if err != nil {
+ return errorsmod.Wrapf(err, "proposer address %s", proposer)
+}
+
+index[proposer] = struct{
+}{
+}
+
+}
+
+return nil
+}
+
+// validateMembers checks that all members addresses are valid.
+// Additionally, it verifies that there is no duplicate address
+// and the member weight is non-negative.
+// Note: in state, a member's weight MUST be positive. However, in some Msgs,
+// it's possible to set a zero member weight, for example in
+// MsgUpdateGroupMembers to denote that we're removing a member.
+// It returns an error if any of the above conditions is not met.
+func (k Keeper)
+
+validateMembers(members []group.MemberRequest)
+
+error {
+ index := make(map[string]struct{
+}, len(members))
+ for _, member := range members {
+ if _, exists := index[member.Address]; exists {
+ return errorsmod.Wrapf(errors.ErrDuplicate, "address: %s", member.Address)
+}
+
+ _, err := k.accKeeper.AddressCodec().StringToBytes(member.Address)
+ if err != nil {
+ return errorsmod.Wrapf(err, "member address %s", member.Address)
+}
+ if _, err := math.NewNonNegativeDecFromString(member.Weight); err != nil {
+ return errorsmod.Wrap(err, "weight must be non negative")
+}
+
+index[member.Address] = struct{
+}{
+}
+
+}
+
+return nil
+}
+
+// isProposer checks that an address is a proposer of a given proposal.
+func isProposer(proposal group.Proposal, address string)
+
+bool {
+ return slices.Contains(proposal.Proposers, address)
+}
+
+func validateMsgs(msgs []sdk.Msg)
+
+error {
+ for i, msg := range msgs {
+ m, ok := msg.(sdk.HasValidateBasic)
+ if !ok {
+ continue
+}
+ if err := m.ValidateBasic(); err != nil {
+ return errorsmod.Wrapf(err, "msg %d", i)
+}
+
+}
+
+return nil
+}
+```
+
+**Legacy events:**
+
+```go
+ctx.EventManager().EmitEvent(
+ sdk.NewEvent(eventType, sdk.NewAttribute(attributeKey, attributeValue)),
+)
+```
+
+The `EventManager` is accessed via the [`Context`](/sdk/v0.53/learn/advanced/context).
+
+See the [`Msg` services](/sdk/v0.53/build/building-modules/msg-services) concept doc for a more detailed
+view on how to typically implement Events and use the `EventManager` in modules.
+
+## Subscribing to Events
+
+You can use CometBFT's [Websocket](/cometbft/v0.38/docs/core/Subscribing-to-events-via-Websocket) to subscribe to Events by calling the `subscribe` RPC method:
+
+```json
+{
+ "jsonrpc": "2.0",
+ "method": "subscribe",
+ "id": "0",
+ "params": {
+ "query": "tm.event='eventCategory' AND eventType.eventAttribute='attributeValue'"
+ }
+}
+```
+
+The main `eventCategory` values you can subscribe to are:
+
+* `NewBlock`: Contains Events triggered during `BeginBlock` and `EndBlock`.
+* `Tx`: Contains Events triggered during `DeliverTx` (i.e., transaction processing).
+* `ValidatorSetUpdates`: Contains validator set updates for the block.
+
+These Events are triggered from the `state` package after a block is committed. You can get the
+full list of Event categories [on the CometBFT Go documentation](https://pkg.go.dev/github.com/cometbft/cometbft/types#pkg-constants).
+
+The `type` and `attribute` value of the `query` allow you to filter the specific Event you are looking for. For example, a `Mint` transaction triggers an Event of type `EventMint` and has an `Id` and an `Owner` as `attributes` (as defined in the [`events.proto` file of the `NFT` module](https://github.com/cosmos/cosmos-sdk/blob/v0.53.0-rc.2/proto/cosmos/nft/v1beta1/event.proto#L21-L31)).
+
+Subscribing to this Event would be done like so:
+
+```json
+{
+ "jsonrpc": "2.0",
+ "method": "subscribe",
+ "id": "0",
+ "params": {
+ "query": "tm.event='Tx' AND mint.owner='ownerAddress'"
+ }
+}
+```
+
+where `ownerAddress` is an address following the [`AccAddress`](/sdk/v0.53/learn/beginner/accounts#addresses) format.
+
+The same approach can be used to subscribe to [legacy events](https://github.com/cosmos/cosmos-sdk/blob/v0.53.0-rc.2/x/bank/types/events.go).
+
+## Default Events
+
+There are a few events that are automatically emitted for all messages, directly from `baseapp`.
+
+* `message.action`: The name of the message type.
+* `message.sender`: The address of the message signer.
+* `message.module`: The name of the module that emitted the message.
+
+
+The module name is assumed by `baseapp` to be the second element of the message route: `"cosmos.bank.v1beta1.MsgSend" -> "bank"`.
+In case a module does not follow the standard message path (e.g., IBC), it is advised to keep emitting the module name event.
+`Baseapp` only emits that event if the module has not already done so.
+
diff --git a/sdk/next/learn/advanced/grpc_rest.mdx b/sdk/next/learn/advanced/grpc_rest.mdx
new file mode 100644
index 000000000..6889baf24
--- /dev/null
+++ b/sdk/next/learn/advanced/grpc_rest.mdx
@@ -0,0 +1,218 @@
+---
+title: 'gRPC, REST, and CometBFT Endpoints'
+---
+
+
+**Synopsis**
+This document presents an overview of all the endpoints a node exposes: gRPC, REST as well as some other endpoints.
+
+
+## An Overview of All Endpoints
+
+Each node exposes the following endpoints for users to interact with a node, each endpoint is served on a different port. Details on how to configure each endpoint is provided in the endpoint's own section.
+
+* the gRPC server (default port: `9090`),
+* the REST server (default port: `1317`),
+* the CometBFT RPC endpoint (default port: `26657`).
+
+
+The node also exposes some other endpoints, such as the CometBFT P2P endpoint, or the [Prometheus endpoint](/cometbft/v0.38/docs/core/metrics), which are not directly related to the Cosmos SDK. Please refer to the [CometBFT documentation](/cometbft/v0.38/docs/core/configuration) for more information about these endpoints.
+
+
+
+All endpoints are defaulted to localhost and must be modified to be exposed to the public internet.
+
+
+## gRPC Server
+
+In the Cosmos SDK, Protobuf is the main [encoding](/sdk/v0.53/learn/advanced/encoding) library. This brings a wide range of Protobuf-based tools that can be plugged into the Cosmos SDK. One such tool is [gRPC](https://grpc.io), a modern open-source high performance RPC framework that has decent client support in several languages.
+
+Each module exposes a [Protobuf `Query` service](/sdk/v0.53/build/building-modules/messages-and-queries#queries) that defines state queries. The `Query` services and a transaction service used to broadcast transactions are hooked up to the gRPC server via the following function inside the application:
+
+```go expandable
+package types
+
+import (
+
+ "encoding/json"
+ "io"
+
+ cmtproto "github.com/cometbft/cometbft/proto/tendermint/types"
+ cmttypes "github.com/cometbft/cometbft/types"
+ dbm "github.com/cosmos/cosmos-db"
+ "github.com/cosmos/gogoproto/grpc"
+ "github.com/spf13/cobra"
+ "cosmossdk.io/log"
+ "cosmossdk.io/store/snapshots"
+ storetypes "cosmossdk.io/store/types"
+ "github.com/cosmos/cosmos-sdk/client"
+ "github.com/cosmos/cosmos-sdk/server/api"
+ "github.com/cosmos/cosmos-sdk/server/config"
+)
+
+type (
+ // AppOptions defines an interface that is passed into an application
+ // constructor, typically used to set BaseApp options that are either supplied
+ // via config file or through CLI arguments/flags. The underlying implementation
+ // is defined by the server package and is typically implemented via a Viper
+ // literal defined on the server Context. Note, casting Get calls may not yield
+ // the expected types and could result in type assertion errors. It is recommend
+ // to either use the cast package or perform manual conversion for safety.
+ AppOptions interface {
+ Get(string)
+
+interface{
+}
+
+}
+
+ // Application defines an application interface that wraps abci.Application.
+ // The interface defines the necessary contracts to be implemented in order
+ // to fully bootstrap and start an application.
+ Application interface {
+ ABCI
+
+ RegisterAPIRoutes(*api.Server, config.APIConfig)
+
+ // RegisterGRPCServerWithSkipCheckHeader registers gRPC services directly with the gRPC
+ // server and bypass check header flag.
+ RegisterGRPCServerWithSkipCheckHeader(grpc.Server, bool)
+
+ // RegisterTxService registers the gRPC Query service for tx (such as tx
+ // simulation, fetching txs by hash...).
+ RegisterTxService(client.Context)
+
+ // RegisterTendermintService registers the gRPC Query service for CometBFT queries.
+ RegisterTendermintService(client.Context)
+
+ // RegisterNodeService registers the node gRPC Query service.
+ RegisterNodeService(client.Context, config.Config)
+
+ // CommitMultiStore return the multistore instance
+ CommitMultiStore()
+
+storetypes.CommitMultiStore
+
+ // Return the snapshot manager
+ SnapshotManager() *snapshots.Manager
+
+ // Close is called in start cmd to gracefully cleanup resources.
+ // Must be safe to be called multiple times.
+ Close()
+
+error
+}
+
+ // AppCreator is a function that allows us to lazily initialize an
+ // application using various configurations.
+ AppCreator func(log.Logger, dbm.DB, io.Writer, AppOptions)
+
+Application
+
+ // ModuleInitFlags takes a start command and adds modules specific init flags.
+ ModuleInitFlags func(startCmd *cobra.Command)
+
+ // ExportedApp represents an exported app state, along with
+ // validators, consensus params and latest app height.
+ ExportedApp struct {
+ // AppState is the application state as JSON.
+ AppState json.RawMessage
+ // Validators is the exported validator set.
+ Validators []cmttypes.GenesisValidator
+ // Height is the app's latest block height.
+ Height int64
+ // ConsensusParams are the exported consensus params for ABCI.
+ ConsensusParams cmtproto.ConsensusParams
+}
+
+ // AppExporter is a function that dumps all app state to
+ // JSON-serializable structure and returns the current validator set.
+ AppExporter func(
+ logger log.Logger,
+ db dbm.DB,
+ traceWriter io.Writer,
+ height int64,
+ forZeroHeight bool,
+ jailAllowedAddrs []string,
+ opts AppOptions,
+ modulesToExport []string,
+ ) (ExportedApp, error)
+)
+```
+
+Note: It is not possible to expose any [Protobuf `Msg` service](/sdk/v0.53/build/building-modules/messages-and-queries#messages) endpoints via gRPC. Transactions must be generated and signed using the CLI or programmatically before they can be broadcasted using gRPC. See [Generating, Signing, and Broadcasting Transactions](/sdk/v0.53/user/run-node/txs) for more information.
+
+The `grpc.Server` is a concrete gRPC server, which spawns and serves all gRPC query requests and a broadcast transaction request. This server can be configured inside `~/.simapp/config/app.toml`:
+
+* `grpc.enable = true|false` field defines if the gRPC server should be enabled. Defaults to `true`.
+* `grpc.address = {string}` field defines the `ip:port` the server should bind to. Defaults to `localhost:9090`.
+
+
+`~/.simapp` is the directory where the node's configuration and databases are stored. By default, it's set to `~/.{app_name}`.
+
+
+Once the gRPC server is started, you can send requests to it using a gRPC client. Some examples are given in our [Interact with the Node](/sdk/v0.53/user/run-node/interact-node#using-grpc) tutorial.
+
+An overview of all available gRPC endpoints shipped with the Cosmos SDK is [Protobuf documentation](https://buf.build/cosmos/cosmos-sdk).
+
+
+As of v0.54, the gRPC server includes several enhancements:
+- New endpoints: `GetBlockResults` and `GetLatestBlockResults` expose CometBFT block results including finalize_block_events
+- Enhanced `GetSyncingResponse` with `earliest_block_height` and `latest_block_height` fields
+- Multi-client support for serving historical state with historical binaries
+
+
+## REST Server
+
+Cosmos SDK supports REST routes via gRPC-gateway.
+
+All routes are configured under the following fields in `~/.simapp/config/app.toml`:
+
+* `api.enable = true|false` field defines if the REST server should be enabled. Defaults to `false`.
+* `api.address = {string}` field defines the `ip:port` the server should bind to. Defaults to `tcp://localhost:1317`.
+* some additional API configuration options are defined in `~/.simapp/config/app.toml`, along with comments, please refer to that file directly.
+
+### gRPC-gateway REST Routes
+
+If, for various reasons, you cannot use gRPC (for example, you are building a web application, and browsers don't support HTTP2 on which gRPC is built), then the Cosmos SDK offers REST routes via gRPC-gateway.
+
+[gRPC-gateway](https://grpc-ecosystem.github.io/grpc-gateway/) is a tool to expose gRPC endpoints as REST endpoints. For each gRPC endpoint defined in a Protobuf `Query` service, the Cosmos SDK offers a REST equivalent. For instance, querying a balance could be done via the `/cosmos.bank.v1beta1.QueryAllBalances` gRPC endpoint, or alternatively via the gRPC-gateway `"/cosmos/bank/v1beta1/balances/{address}"` REST endpoint: both will return the same result. For each RPC method defined in a Protobuf `Query` service, the corresponding REST endpoint is defined as an option:
+
+```protobuf
+// Reference: https://github.com/cosmos/cosmos-sdk/blob/v0.53.0-rc.2/proto/cosmos/bank/v1beta1/query.proto#L23-L30
+```
+
+For application developers, gRPC-gateway REST routes needs to be wired up to the REST server, this is done by calling the `RegisterGRPCGatewayRoutes` function on the ModuleManager.
+
+### Swagger
+
+A [Swagger](https://swagger.io/) (or OpenAPIv2) specification file is exposed under the `/swagger` route on the API server. Swagger is an open specification describing the API endpoints a server serves, including description, input arguments, return types and much more about each endpoint.
+
+Enabling the `/swagger` endpoint is configurable inside `~/.simapp/config/app.toml` via the `api.swagger` field, which is set to false by default.
+
+For application developers, you may want to generate your own Swagger definitions based on your custom modules.
+The Cosmos SDK's [Swagger generation script](https://github.com/cosmos/cosmos-sdk/blob/v0.53.0-rc.2/scripts/protoc-swagger-gen.sh) is a good place to start.
+
+## CometBFT RPC
+
+Independently from the Cosmos SDK, CometBFT also exposes a RPC server. This RPC server can be configured by tuning parameters under the `rpc` table in the `~/.simapp/config/config.toml`, the default listening address is `tcp://localhost:26657`. An OpenAPI specification of all CometBFT RPC endpoints is available [here](/cometbft/v0.38/docs/core/RPC).
+
+Some CometBFT RPC endpoints are directly related to the Cosmos SDK:
+
+* `/abci_query`: this endpoint will query the application for state. As the `path` parameter, you can send the following strings:
+ * any Protobuf fully-qualified service method, such as `/cosmos.bank.v1beta1.Query/AllBalances`. The `data` field should then include the method's request parameter(s) encoded as bytes using Protobuf.
+ * `/app/simulate`: this will simulate a transaction, and return some information such as gas used.
+ * `/app/version`: this will return the application's version.
+ * `/store/{storeName}/key`: this will directly query the named store for data associated with the key represented in the `data` parameter.
+ * `/store/{storeName}/subspace`: this will directly query the named store for key/value pairs in which the key has the value of the `data` parameter as a prefix.
+ * `/p2p/filter/addr/{port}`: this will return a filtered list of the node's P2P peers by address port.
+ * `/p2p/filter/id/{id}`: this will return a filtered list of the node's P2P peers by ID.
+* `/broadcast_tx_{sync,async,commit}`: these 3 endpoints will broadcast a transaction to other peers. CLI, gRPC and REST expose [a way to broadcast transactions](/sdk/v0.53/learn/advanced/transactions#broadcasting-the-transaction), but they all use these 3 CometBFT RPCs under the hood.
+
+## Comparison Table
+
+| Name | Advantages | Disadvantages |
+| ------------ | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------- |
+| gRPC | - can use code-generated stubs in various languages
- supports streaming and bidirectional communication (HTTP2)
- small wire binary sizes, faster transmission | - based on HTTP2, not available in browsers
- learning curve (mostly due to Protobuf) |
+| REST | - ubiquitous
- client libraries in all languages, faster implementation
| - only supports unary request-response communication (HTTP1.1)
- bigger over-the-wire message sizes (JSON) |
+| CometBFT RPC | - easy to use | - bigger over-the-wire message sizes (JSON) |
diff --git a/sdk/next/learn/advanced/node.mdx b/sdk/next/learn/advanced/node.mdx
new file mode 100644
index 000000000..c34952cbb
--- /dev/null
+++ b/sdk/next/learn/advanced/node.mdx
@@ -0,0 +1,4193 @@
+---
+title: Node Client (Daemon)
+---
+
+
+**Synopsis**
+The main endpoint of a Cosmos SDK application is the daemon client, otherwise known as the full-node client. The full-node runs the state-machine, starting from a genesis file. It connects to peers running the same client in order to receive and relay transactions, block proposals and signatures. The full-node is constituted of the application, defined with the Cosmos SDK, and of a consensus engine connected to the application via the ABCI.
+
+
+
+**Prerequisite Readings**
+
+* [Anatomy of an SDK application](/sdk/v0.53/learn/beginner/app-anatomy)
+
+
+
+## `main` function
+
+The full-node client of any Cosmos SDK application is built by running a `main` function. The client is generally named by appending the `-d` suffix to the application name (e.g. `appd` for an application named `app`), and the `main` function is defined in a `./appd/cmd/main.go` file. Running this function creates an executable `appd` that comes with a set of commands. For an app named `app`, the main command is [`appd start`](#start-command), which starts the full-node.
+
+In general, developers will implement the `main.go` function with the following structure:
+
+* First, an [`encodingCodec`](/sdk/v0.53/learn/advanced/encoding) is instantiated for the application.
+* Then, the `config` is retrieved and config parameters are set. This mainly involves setting the Bech32 prefixes for [addresses](/sdk/v0.53/learn/beginner/accounts#addresses).
+
+```go expandable
+package types
+
+import (
+
+ "context"
+ "fmt"
+ "sync"
+ "github.com/cosmos/cosmos-sdk/version"
+)
+
+// DefaultKeyringServiceName defines a default service name for the keyring.
+const DefaultKeyringServiceName = "cosmos"
+
+// Config is the structure that holds the SDK configuration parameters.
+// This could be used to initialize certain configuration parameters for the SDK.
+type Config struct {
+ fullFundraiserPath string
+ bech32AddressPrefix map[string]string
+ txEncoder TxEncoder
+ addressVerifier func([]byte)
+
+error
+ mtx sync.RWMutex
+
+ // SLIP-44 related
+ purpose uint32
+ coinType uint32
+
+ sealed bool
+ sealedch chan struct{
+}
+}
+
+// cosmos-sdk wide global singleton
+var (
+ sdkConfig *Config
+ initConfig sync.Once
+)
+
+// New returns a new Config with default values.
+func NewConfig() *Config {
+ return &Config{
+ sealedch: make(chan struct{
+}),
+ bech32AddressPrefix: map[string]string{
+ "account_addr": Bech32PrefixAccAddr,
+ "validator_addr": Bech32PrefixValAddr,
+ "consensus_addr": Bech32PrefixConsAddr,
+ "account_pub": Bech32PrefixAccPub,
+ "validator_pub": Bech32PrefixValPub,
+ "consensus_pub": Bech32PrefixConsPub,
+},
+ fullFundraiserPath: FullFundraiserPath,
+
+ purpose: Purpose,
+ coinType: CoinType,
+ txEncoder: nil,
+}
+}
+
+// GetConfig returns the config instance for the SDK.
+func GetConfig() *Config {
+ initConfig.Do(func() {
+ sdkConfig = NewConfig()
+})
+
+return sdkConfig
+}
+
+// GetSealedConfig returns the config instance for the SDK if/once it is sealed.
+func GetSealedConfig(ctx context.Context) (*Config, error) {
+ config := GetConfig()
+
+select {
+ case <-config.sealedch:
+ return config, nil
+ case <-ctx.Done():
+ return nil, ctx.Err()
+}
+}
+
+func (config *Config)
+
+assertNotSealed() {
+ config.mtx.RLock()
+
+defer config.mtx.RUnlock()
+ if config.sealed {
+ panic("Config is sealed")
+}
+}
+
+// SetBech32PrefixForAccount builds the Config with Bech32 addressPrefix and publKeyPrefix for accounts
+// and returns the config instance
+func (config *Config)
+
+SetBech32PrefixForAccount(addressPrefix, pubKeyPrefix string) {
+ config.assertNotSealed()
+
+config.bech32AddressPrefix["account_addr"] = addressPrefix
+ config.bech32AddressPrefix["account_pub"] = pubKeyPrefix
+}
+
+// SetBech32PrefixForValidator builds the Config with Bech32 addressPrefix and publKeyPrefix for validators
+//
+// and returns the config instance
+func (config *Config)
+
+SetBech32PrefixForValidator(addressPrefix, pubKeyPrefix string) {
+ config.assertNotSealed()
+
+config.bech32AddressPrefix["validator_addr"] = addressPrefix
+ config.bech32AddressPrefix["validator_pub"] = pubKeyPrefix
+}
+
+// SetBech32PrefixForConsensusNode builds the Config with Bech32 addressPrefix and publKeyPrefix for consensus nodes
+// and returns the config instance
+func (config *Config)
+
+SetBech32PrefixForConsensusNode(addressPrefix, pubKeyPrefix string) {
+ config.assertNotSealed()
+
+config.bech32AddressPrefix["consensus_addr"] = addressPrefix
+ config.bech32AddressPrefix["consensus_pub"] = pubKeyPrefix
+}
+
+// SetTxEncoder builds the Config with TxEncoder used to marshal StdTx to bytes
+func (config *Config)
+
+SetTxEncoder(encoder TxEncoder) {
+ config.assertNotSealed()
+
+config.txEncoder = encoder
+}
+
+// SetAddressVerifier builds the Config with the provided function for verifying that addresses
+// have the correct format
+func (config *Config)
+
+SetAddressVerifier(addressVerifier func([]byte)
+
+error) {
+ config.assertNotSealed()
+
+config.addressVerifier = addressVerifier
+}
+
+// Set the FullFundraiserPath (BIP44Prefix)
+
+on the config.
+//
+// Deprecated: This method is supported for backward compatibility only and will be removed in a future release. Use SetPurpose and SetCoinType instead.
+func (config *Config)
+
+SetFullFundraiserPath(fullFundraiserPath string) {
+ config.assertNotSealed()
+
+config.fullFundraiserPath = fullFundraiserPath
+}
+
+// Set the BIP-0044 Purpose code on the config
+func (config *Config)
+
+SetPurpose(purpose uint32) {
+ config.assertNotSealed()
+
+config.purpose = purpose
+}
+
+// Set the BIP-0044 CoinType code on the config
+func (config *Config)
+
+SetCoinType(coinType uint32) {
+ config.assertNotSealed()
+
+config.coinType = coinType
+}
+
+// Seal seals the config such that the config state could not be modified further
+func (config *Config)
+
+Seal() *Config {
+ config.mtx.Lock()
+ if config.sealed {
+ config.mtx.Unlock()
+
+return config
+}
+
+ // signal sealed after state exposed/unlocked
+ config.sealed = true
+ config.mtx.Unlock()
+
+close(config.sealedch)
+
+return config
+}
+
+// GetBech32AccountAddrPrefix returns the Bech32 prefix for account address
+func (config *Config)
+
+GetBech32AccountAddrPrefix()
+
+string {
+ return config.bech32AddressPrefix["account_addr"]
+}
+
+// GetBech32ValidatorAddrPrefix returns the Bech32 prefix for validator address
+func (config *Config)
+
+GetBech32ValidatorAddrPrefix()
+
+string {
+ return config.bech32AddressPrefix["validator_addr"]
+}
+
+// GetBech32ConsensusAddrPrefix returns the Bech32 prefix for consensus node address
+func (config *Config)
+
+GetBech32ConsensusAddrPrefix()
+
+string {
+ return config.bech32AddressPrefix["consensus_addr"]
+}
+
+// GetBech32AccountPubPrefix returns the Bech32 prefix for account public key
+func (config *Config)
+
+GetBech32AccountPubPrefix()
+
+string {
+ return config.bech32AddressPrefix["account_pub"]
+}
+
+// GetBech32ValidatorPubPrefix returns the Bech32 prefix for validator public key
+func (config *Config)
+
+GetBech32ValidatorPubPrefix()
+
+string {
+ return config.bech32AddressPrefix["validator_pub"]
+}
+
+// GetBech32ConsensusPubPrefix returns the Bech32 prefix for consensus node public key
+func (config *Config)
+
+GetBech32ConsensusPubPrefix()
+
+string {
+ return config.bech32AddressPrefix["consensus_pub"]
+}
+
+// GetTxEncoder return function to encode transactions
+func (config *Config)
+
+GetTxEncoder()
+
+TxEncoder {
+ return config.txEncoder
+}
+
+// GetAddressVerifier returns the function to verify that addresses have the correct format
+func (config *Config)
+
+GetAddressVerifier()
+
+func([]byte)
+
+error {
+ return config.addressVerifier
+}
+
+// GetPurpose returns the BIP-0044 Purpose code on the config.
+func (config *Config)
+
+GetPurpose()
+
+uint32 {
+ return config.purpose
+}
+
+// GetCoinType returns the BIP-0044 CoinType code on the config.
+func (config *Config)
+
+GetCoinType()
+
+uint32 {
+ return config.coinType
+}
+
+// GetFullFundraiserPath returns the BIP44Prefix.
+//
+// Deprecated: This method is supported for backward compatibility only and will be removed in a future release. Use GetFullBIP44Path instead.
+func (config *Config)
+
+GetFullFundraiserPath()
+
+string {
+ return config.fullFundraiserPath
+}
+
+// GetFullBIP44Path returns the BIP44Prefix.
+func (config *Config)
+
+GetFullBIP44Path()
+
+string {
+ return fmt.Sprintf("m/%d'/%d'/0'/0/0", config.purpose, config.coinType)
+}
+
+func KeyringServiceName()
+
+string {
+ if len(version.Name) == 0 {
+ return DefaultKeyringServiceName
+}
+
+return version.Name
+}
+```
+
+* Using [cobra](https://github.com/spf13/cobra), the root command of the full-node client is created. After that, all the custom commands of the application are added using the `AddCommand()` method of `rootCmd`.
+* Add default server commands to `rootCmd` using the `server.AddCommands()` method. These commands are separated from the ones added above since they are standard and defined at Cosmos SDK level. They should be shared by all Cosmos SDK-based applications. They include the most important command: the [`start` command](#start-command).
+* Prepare and execute the `executor`.
+
+```go expandable
+package cli
+
+import (
+
+ "fmt"
+ "os"
+ "path/filepath"
+ "runtime"
+ "strings"
+ "github.com/spf13/cobra"
+ "github.com/spf13/viper"
+)
+
+const (
+ HomeFlag = "home"
+ TraceFlag = "trace"
+ OutputFlag = "output"
+ EncodingFlag = "encoding"
+)
+
+// Executable is the minimal interface to *corba.Command, so we can
+// wrap if desired before the test
+type Executable interface {
+ Execute()
+
+error
+}
+
+// PrepareBaseCmd is meant for CometBFT and other servers
+func PrepareBaseCmd(cmd *cobra.Command, envPrefix, defaultHome string)
+
+Executor {
+ cobra.OnInitialize(func() {
+ initEnv(envPrefix)
+})
+
+cmd.PersistentFlags().StringP(HomeFlag, "", defaultHome, "directory for config and data")
+
+cmd.PersistentFlags().Bool(TraceFlag, false, "print out full stack trace on errors")
+
+cmd.PersistentPreRunE = concatCobraCmdFuncs(bindFlagsLoadViper, cmd.PersistentPreRunE)
+
+return Executor{
+ cmd, os.Exit
+}
+}
+
+// PrepareMainCmd is meant for client side libs that want some more flags
+//
+// This adds --encoding (hex, btc, base64)
+
+and --output (text, json)
+
+to
+// the command. These only really make sense in interactive commands.
+func PrepareMainCmd(cmd *cobra.Command, envPrefix, defaultHome string)
+
+Executor {
+ cmd.PersistentFlags().StringP(EncodingFlag, "e", "hex", "Binary encoding (hex|b64|btc)")
+
+cmd.PersistentFlags().StringP(OutputFlag, "o", "text", "Output format (text|json)")
+
+cmd.PersistentPreRunE = concatCobraCmdFuncs(validateOutput, cmd.PersistentPreRunE)
+
+return PrepareBaseCmd(cmd, envPrefix, defaultHome)
+}
+
+// initEnv sets to use ENV variables if set.
+func initEnv(prefix string) {
+ copyEnvVars(prefix)
+
+ // env variables with TM prefix (eg. TM_ROOT)
+
+viper.SetEnvPrefix(prefix)
+
+viper.SetEnvKeyReplacer(strings.NewReplacer(".", "_", "-", "_"))
+
+viper.AutomaticEnv()
+}
+
+// This copies all variables like TMROOT to TM_ROOT,
+// so we can support both formats for the user
+func copyEnvVars(prefix string) {
+ prefix = strings.ToUpper(prefix)
+ ps := prefix + "_"
+ for _, e := range os.Environ() {
+ kv := strings.SplitN(e, "=", 2)
+ if len(kv) == 2 {
+ k, v := kv[0], kv[1]
+ if strings.HasPrefix(k, prefix) && !strings.HasPrefix(k, ps) {
+ k2 := strings.Replace(k, prefix, ps, 1)
+
+os.Setenv(k2, v)
+}
+
+}
+
+}
+}
+
+// Executor wraps the cobra Command with a nicer Execute method
+type Executor struct {
+ *cobra.Command
+ Exit func(int) // this is os.Exit by default, override in tests
+}
+
+type ExitCoder interface {
+ ExitCode()
+
+int
+}
+
+// execute adds all child commands to the root command sets flags appropriately.
+// This is called by main.main(). It only needs to happen once to the rootCmd.
+func (e Executor)
+
+Execute()
+
+error {
+ e.SilenceUsage = true
+ e.SilenceErrors = true
+ err := e.Command.Execute()
+ if err != nil {
+ if viper.GetBool(TraceFlag) {
+ const size = 64 << 10
+ buf := make([]byte, size)
+
+buf = buf[:runtime.Stack(buf, false)]
+ fmt.Fprintf(os.Stderr, "ERROR: %v\n%s\n", err, buf)
+}
+
+else {
+ fmt.Fprintf(os.Stderr, "ERROR: %v\n", err)
+}
+
+ // return error code 1 by default, can override it with a special error type
+ exitCode := 1
+ if ec, ok := err.(ExitCoder); ok {
+ exitCode = ec.ExitCode()
+}
+
+e.Exit(exitCode)
+}
+
+return err
+}
+
+type cobraCmdFunc func(cmd *cobra.Command, args []string)
+
+error
+
+// Returns a single function that calls each argument function in sequence
+// RunE, PreRunE, PersistentPreRunE, etc. all have this same signature
+func concatCobraCmdFuncs(fs ...cobraCmdFunc)
+
+cobraCmdFunc {
+ return func(cmd *cobra.Command, args []string)
+
+error {
+ for _, f := range fs {
+ if f != nil {
+ if err := f(cmd, args); err != nil {
+ return err
+}
+
+}
+
+}
+
+return nil
+}
+}
+
+// Bind all flags and read the config into viper
+func bindFlagsLoadViper(cmd *cobra.Command, args []string)
+
+error {
+ // cmd.Flags()
+
+includes flags from this command and all persistent flags from the parent
+ if err := viper.BindPFlags(cmd.Flags()); err != nil {
+ return err
+}
+ homeDir := viper.GetString(HomeFlag)
+
+viper.Set(HomeFlag, homeDir)
+
+viper.SetConfigName("config") // name of config file (without extension)
+
+viper.AddConfigPath(homeDir) // search root directory
+ viper.AddConfigPath(filepath.Join(homeDir, "config")) // search root directory /config
+
+ // If a config file is found, read it in.
+ if err := viper.ReadInConfig(); err == nil {
+ // stderr, so if we redirect output to json file, this doesn't appear
+ // fmt.Fprintln(os.Stderr, "Using config file:", viper.ConfigFileUsed())
+}
+
+else if _, ok := err.(viper.ConfigFileNotFoundError); !ok {
+ // ignore not found error, return other errors
+ return err
+}
+
+return nil
+}
+
+func validateOutput(cmd *cobra.Command, args []string)
+
+error {
+ // validate output format
+ output := viper.GetString(OutputFlag)
+ switch output {
+ case "text", "json":
+ default:
+ return fmt.Errorf("unsupported output format: %s", output)
+}
+
+return nil
+}
+```
+
+See an example of `main` function from the `simapp` application, the Cosmos SDK's application for demo purposes:
+
+```go expandable
+package main
+
+import (
+
+ "fmt"
+ "os"
+
+ clientv2helpers "cosmossdk.io/client/v2/helpers"
+ "cosmossdk.io/simapp"
+ "cosmossdk.io/simapp/simd/cmd"
+
+ svrcmd "github.com/cosmos/cosmos-sdk/server/cmd"
+)
+
+func main() {
+ rootCmd := cmd.NewRootCmd()
+ if err := svrcmd.Execute(rootCmd, clientv2helpers.EnvPrefix, simapp.DefaultNodeHome); err != nil {
+ fmt.Fprintln(rootCmd.OutOrStderr(), err)
+
+os.Exit(1)
+}
+}
+```
+
+## `start` command
+
+The `start` command is defined in the `/server` folder of the Cosmos SDK. It is added to the root command of the full-node client in the [`main` function](#main-function) and called by the end-user to start their node:
+
+```bash
+# For an example app named "app", the following command starts the full-node.
+appd start
+
+# Using the Cosmos SDK's own simapp, the following commands start the simapp node.
+simd start
+```
+
+As a reminder, the full-node is composed of three conceptual layers: the networking layer, the consensus layer and the application layer. The first two are generally bundled together in an entity called the consensus engine (CometBFT by default), while the third is the state-machine defined with the help of the Cosmos SDK. Currently, the Cosmos SDK uses CometBFT as the default consensus engine, meaning the start command is implemented to boot up a CometBFT node.
+
+The flow of the `start` command is pretty straightforward. First, it retrieves the `config` from the `context` in order to open the `db` (a [`leveldb`](https://github.com/syndtr/goleveldb) instance by default). This `db` contains the latest known state of the application (empty if the application is started from the first time.
+
+With the `db`, the `start` command creates a new instance of the application using an `appCreator` function:
+
+```go expandable
+package server
+
+import (
+
+ "bufio"
+ "context"
+ "fmt"
+ "io"
+ "net"
+ "os"
+ "path/filepath"
+ "runtime/pprof"
+ "strings"
+ "time"
+ "github.com/cometbft/cometbft/abci/server"
+ cmtcmd "github.com/cometbft/cometbft/cmd/cometbft/commands"
+ cmtcfg "github.com/cometbft/cometbft/config"
+ cmtjson "github.com/cometbft/cometbft/libs/json"
+ "github.com/cometbft/cometbft/node"
+ "github.com/cometbft/cometbft/p2p"
+ pvm "github.com/cometbft/cometbft/privval"
+ cmtstate "github.com/cometbft/cometbft/proto/tendermint/state"
+ cmtproto "github.com/cometbft/cometbft/proto/tendermint/types"
+ "github.com/cometbft/cometbft/proxy"
+ rpchttp "github.com/cometbft/cometbft/rpc/client/http"
+ "github.com/cometbft/cometbft/rpc/client/local"
+ sm "github.com/cometbft/cometbft/state"
+ "github.com/cometbft/cometbft/store"
+ cmttypes "github.com/cometbft/cometbft/types"
+ dbm "github.com/cosmos/cosmos-db"
+ "github.com/hashicorp/go-metrics"
+ "github.com/spf13/cobra"
+ "github.com/spf13/pflag"
+ "golang.org/x/sync/errgroup"
+ "google.golang.org/grpc"
+ "google.golang.org/grpc/credentials/insecure"
+
+ pruningtypes "cosmossdk.io/store/pruning/types"
+ "github.com/cosmos/cosmos-sdk/client"
+ "github.com/cosmos/cosmos-sdk/client/flags"
+ "github.com/cosmos/cosmos-sdk/codec"
+ "github.com/cosmos/cosmos-sdk/server/api"
+ serverconfig "github.com/cosmos/cosmos-sdk/server/config"
+ servergrpc "github.com/cosmos/cosmos-sdk/server/grpc"
+ servercmtlog "github.com/cosmos/cosmos-sdk/server/log"
+ "github.com/cosmos/cosmos-sdk/server/types"
+ "github.com/cosmos/cosmos-sdk/telemetry"
+ "github.com/cosmos/cosmos-sdk/types/mempool"
+ "github.com/cosmos/cosmos-sdk/version"
+ genutiltypes "github.com/cosmos/cosmos-sdk/x/genutil/types"
+)
+
+const (
+ // CometBFT full-node start flags
+ flagWithComet = "with-comet"
+ flagAddress = "address"
+ flagTransport = "transport"
+ flagTraceStore = "trace-store"
+ flagCPUProfile = "cpu-profile"
+ FlagMinGasPrices = "minimum-gas-prices"
+ FlagQueryGasLimit = "query-gas-limit"
+ FlagHaltHeight = "halt-height"
+ FlagHaltTime = "halt-time"
+ FlagInterBlockCache = "inter-block-cache"
+ FlagUnsafeSkipUpgrades = "unsafe-skip-upgrades"
+ FlagTrace = "trace"
+ FlagInvCheckPeriod = "inv-check-period"
+
+ FlagPruning = "pruning"
+ FlagPruningKeepRecent = "pruning-keep-recent"
+ FlagPruningInterval = "pruning-interval"
+ FlagIndexEvents = "index-events"
+ FlagMinRetainBlocks = "min-retain-blocks"
+ FlagIAVLCacheSize = "iavl-cache-size"
+ FlagDisableIAVLFastNode = "iavl-disable-fastnode"
+ FlagIAVLSyncPruning = "iavl-sync-pruning"
+ FlagShutdownGrace = "shutdown-grace"
+
+ // state sync-related flags
+ FlagStateSyncSnapshotInterval = "state-sync.snapshot-interval"
+ FlagStateSyncSnapshotKeepRecent = "state-sync.snapshot-keep-recent"
+
+ // api-related flags
+ FlagAPIEnable = "api.enable"
+ FlagAPISwagger = "api.swagger"
+ FlagAPIAddress = "api.address"
+ FlagAPIMaxOpenConnections = "api.max-open-connections"
+ FlagRPCReadTimeout = "api.rpc-read-timeout"
+ FlagRPCWriteTimeout = "api.rpc-write-timeout"
+ FlagRPCMaxBodyBytes = "api.rpc-max-body-bytes"
+ FlagAPIEnableUnsafeCORS = "api.enabled-unsafe-cors"
+
+ // gRPC-related flags
+ flagGRPCOnly = "grpc-only"
+ flagGRPCEnable = "grpc.enable"
+ flagGRPCAddress = "grpc.address"
+ flagGRPCWebEnable = "grpc-web.enable"
+ flagGRPCSkipCheckHeader = "grpc.skip-check-header"
+
+ // mempool flags
+ FlagMempoolMaxTxs = "mempool.max-txs"
+
+ // testnet keys
+ KeyIsTestnet = "is-testnet"
+ KeyNewChainID = "new-chain-ID"
+ KeyNewOpAddr = "new-operator-addr"
+ KeyNewValAddr = "new-validator-addr"
+ KeyUserPubKey = "user-pub-key"
+ KeyTriggerTestnetUpgrade = "trigger-testnet-upgrade"
+)
+
+// StartCmdOptions defines options that can be customized in `StartCmdWithOptions`,
+type StartCmdOptions struct {
+ // DBOpener can be used to customize db opening, for example customize db options or support different db backends,
+ // default to the builtin db opener.
+ DBOpener func(rootDir string, backendType dbm.BackendType) (dbm.DB, error)
+ // PostSetup can be used to setup extra services under the same cancellable context,
+ // it's not called in stand-alone mode, only for in-process mode.
+ PostSetup func(svrCtx *Context, clientCtx client.Context, ctx context.Context, g *errgroup.Group)
+
+error
+ // PostSetupStandalone can be used to setup extra services under the same cancellable context,
+ PostSetupStandalone func(svrCtx *Context, clientCtx client.Context, ctx context.Context, g *errgroup.Group)
+
+error
+ // AddFlags add custom flags to start cmd
+ AddFlags func(cmd *cobra.Command)
+ // StartCommandHanlder can be used to customize the start command handler
+ StartCommandHandler func(svrCtx *Context, clientCtx client.Context, appCreator types.AppCreator, inProcessConsensus bool, opts StartCmdOptions)
+
+error
+}
+
+// StartCmd runs the service passed in, either stand-alone or in-process with
+// CometBFT.
+func StartCmd(appCreator types.AppCreator, defaultNodeHome string) *cobra.Command {
+ return StartCmdWithOptions(appCreator, defaultNodeHome, StartCmdOptions{
+})
+}
+
+// StartCmdWithOptions runs the service passed in, either stand-alone or in-process with
+// CometBFT.
+func StartCmdWithOptions(appCreator types.AppCreator, defaultNodeHome string, opts StartCmdOptions) *cobra.Command {
+ if opts.DBOpener == nil {
+ opts.DBOpener = openDB
+}
+ if opts.StartCommandHandler == nil {
+ opts.StartCommandHandler = start
+}
+ cmd := &cobra.Command{
+ Use: "start",
+ Short: "Run the full node",
+ Long: `Run the full node application with CometBFT in or out of process. By
+default, the application will run with CometBFT in process.
+
+Pruning options can be provided via the '--pruning' flag or alternatively with '--pruning-keep-recent', and
+'pruning-interval' together.
+
+For '--pruning' the options are as follows:
+
+default: the last 362880 states are kept, pruning at 10 block intervals
+nothing: all historic states will be saved, nothing will be deleted (i.e. archiving node)
+
+everything: 2 latest states will be kept; pruning at 10 block intervals.
+custom: allow pruning options to be manually specified through 'pruning-keep-recent', and 'pruning-interval'
+
+Node halting configurations exist in the form of two flags: '--halt-height' and '--halt-time'. During
+the ABCI Commit phase, the node will check if the current block height is greater than or equal to
+the halt-height or if the current block time is greater than or equal to the halt-time. If so, the
+node will attempt to gracefully shutdown and the block will not be committed. In addition, the node
+will not be able to commit subsequent blocks.
+
+For profiling and benchmarking purposes, CPU profiling can be enabled via the '--cpu-profile' flag
+which accepts a path for the resulting pprof file.
+
+The node may be started in a 'query only' mode where only the gRPC and JSON HTTP
+API services are enabled via the 'grpc-only' flag. In this mode, CometBFT is
+bypassed and can be used when legacy queries are needed after an on-chain upgrade
+is performed. Note, when enabled, gRPC will also be automatically enabled.
+`,
+ RunE: func(cmd *cobra.Command, _ []string)
+
+error {
+ serverCtx := GetServerContextFromCmd(cmd)
+
+ _, err := GetPruningOptionsFromFlags(serverCtx.Viper)
+ if err != nil {
+ return err
+}
+
+clientCtx, err := client.GetClientQueryContext(cmd)
+ if err != nil {
+ return err
+}
+
+withCMT, _ := cmd.Flags().GetBool(flagWithComet)
+ if !withCMT {
+ serverCtx.Logger.Info("starting ABCI without CometBFT")
+}
+
+err = wrapCPUProfile(serverCtx, func()
+
+error {
+ return opts.StartCommandHandler(serverCtx, clientCtx, appCreator, withCMT, opts)
+})
+
+serverCtx.Logger.Debug("received quit signal")
+
+graceDuration, _ := cmd.Flags().GetDuration(FlagShutdownGrace)
+ if graceDuration > 0 {
+ serverCtx.Logger.Info("graceful shutdown start", FlagShutdownGrace, graceDuration)
+ <-time.After(graceDuration)
+
+serverCtx.Logger.Info("graceful shutdown complete")
+}
+
+return err
+},
+}
+
+cmd.Flags().String(flags.FlagHome, defaultNodeHome, "The application home directory")
+
+addStartNodeFlags(cmd, opts)
+
+return cmd
+}
+
+func start(svrCtx *Context, clientCtx client.Context, appCreator types.AppCreator, withCmt bool, opts StartCmdOptions)
+
+error {
+ svrCfg, err := getAndValidateConfig(svrCtx)
+ if err != nil {
+ return err
+}
+
+app, appCleanupFn, err := startApp(svrCtx, appCreator, opts)
+ if err != nil {
+ return err
+}
+
+defer appCleanupFn()
+
+metrics, err := startTelemetry(svrCfg)
+ if err != nil {
+ return err
+}
+
+emitServerInfoMetrics()
+ if !withCmt {
+ return startStandAlone(svrCtx, svrCfg, clientCtx, app, metrics, opts)
+}
+
+return startInProcess(svrCtx, svrCfg, clientCtx, app, metrics, opts)
+}
+
+func startStandAlone(svrCtx *Context, svrCfg serverconfig.Config, clientCtx client.Context, app types.Application, metrics *telemetry.Metrics, opts StartCmdOptions)
+
+error {
+ addr := svrCtx.Viper.GetString(flagAddress)
+ transport := svrCtx.Viper.GetString(flagTransport)
+ cmtApp := NewCometABCIWrapper(app)
+
+svr, err := server.NewServer(addr, transport, cmtApp)
+ if err != nil {
+ return fmt.Errorf("error creating listener: %w", err)
+}
+
+svr.SetLogger(servercmtlog.CometLoggerWrapper{
+ Logger: svrCtx.Logger.With("module", "abci-server")
+})
+
+g, ctx := getCtx(svrCtx, false)
+
+ // Add the tx service to the gRPC router. We only need to register this
+ // service if API or gRPC is enabled, and avoid doing so in the general
+ // case, because it spawns a new local CometBFT RPC client.
+ if svrCfg.API.Enable || svrCfg.GRPC.Enable {
+ // create tendermint client
+ // assumes the rpc listen address is where tendermint has its rpc server
+ rpcclient, err := rpchttp.New(svrCtx.Config.RPC.ListenAddress, "/websocket")
+ if err != nil {
+ return err
+}
+ // re-assign for making the client available below
+ // do not use := to avoid shadowing clientCtx
+ clientCtx = clientCtx.WithClient(rpcclient)
+
+ // use the provided clientCtx to register the services
+ app.RegisterTxService(clientCtx)
+
+app.RegisterTendermintService(clientCtx)
+
+app.RegisterNodeService(clientCtx, svrCfg)
+}
+
+grpcSrv, clientCtx, err := startGrpcServer(ctx, g, svrCfg.GRPC, clientCtx, svrCtx, app)
+ if err != nil {
+ return err
+}
+
+err = startAPIServer(ctx, g, svrCfg, clientCtx, svrCtx, app, svrCtx.Config.RootDir, grpcSrv, metrics)
+ if err != nil {
+ return err
+}
+ if opts.PostSetupStandalone != nil {
+ if err := opts.PostSetupStandalone(svrCtx, clientCtx, ctx, g); err != nil {
+ return err
+}
+
+}
+
+g.Go(func()
+
+error {
+ if err := svr.Start(); err != nil {
+ svrCtx.Logger.Error("failed to start out-of-process ABCI server", "err", err)
+
+return err
+}
+
+ // Wait for the calling process to be canceled or close the provided context,
+ // so we can gracefully stop the ABCI server.
+ <-ctx.Done()
+
+svrCtx.Logger.Info("stopping the ABCI server...")
+
+return svr.Stop()
+})
+
+return g.Wait()
+}
+
+func startInProcess(svrCtx *Context, svrCfg serverconfig.Config, clientCtx client.Context, app types.Application,
+ metrics *telemetry.Metrics, opts StartCmdOptions,
+)
+
+error {
+ cmtCfg := svrCtx.Config
+ gRPCOnly := svrCtx.Viper.GetBool(flagGRPCOnly)
+
+g, ctx := getCtx(svrCtx, true)
+ if gRPCOnly {
+ // TODO: Generalize logic so that gRPC only is really in startStandAlone
+ svrCtx.Logger.Info("starting node in gRPC only mode; CometBFT is disabled")
+
+svrCfg.GRPC.Enable = true
+}
+
+else {
+ svrCtx.Logger.Info("starting node with ABCI CometBFT in-process")
+
+tmNode, cleanupFn, err := startCmtNode(ctx, cmtCfg, app, svrCtx)
+ if err != nil {
+ return err
+}
+
+defer cleanupFn()
+
+ // Add the tx service to the gRPC router. We only need to register this
+ // service if API or gRPC is enabled, and avoid doing so in the general
+ // case, because it spawns a new local CometBFT RPC client.
+ if svrCfg.API.Enable || svrCfg.GRPC.Enable {
+ // Re-assign for making the client available below do not use := to avoid
+ // shadowing the clientCtx variable.
+ clientCtx = clientCtx.WithClient(local.New(tmNode))
+
+app.RegisterTxService(clientCtx)
+
+app.RegisterTendermintService(clientCtx)
+
+app.RegisterNodeService(clientCtx, svrCfg)
+}
+
+}
+
+grpcSrv, clientCtx, err := startGrpcServer(ctx, g, svrCfg.GRPC, clientCtx, svrCtx, app)
+ if err != nil {
+ return err
+}
+
+err = startAPIServer(ctx, g, svrCfg, clientCtx, svrCtx, app, cmtCfg.RootDir, grpcSrv, metrics)
+ if err != nil {
+ return err
+}
+ if opts.PostSetup != nil {
+ if err := opts.PostSetup(svrCtx, clientCtx, ctx, g); err != nil {
+ return err
+}
+
+}
+
+ // wait for signal capture and gracefully return
+ // we are guaranteed to be waiting for the "ListenForQuitSignals" goroutine.
+ return g.Wait()
+}
+
+// TODO: Move nodeKey into being created within the function.
+func startCmtNode(
+ ctx context.Context,
+ cfg *cmtcfg.Config,
+ app types.Application,
+ svrCtx *Context,
+) (tmNode *node.Node, cleanupFn func(), err error) {
+ nodeKey, err := p2p.LoadOrGenNodeKey(cfg.NodeKeyFile())
+ if err != nil {
+ return nil, cleanupFn, err
+}
+ cmtApp := NewCometABCIWrapper(app)
+
+tmNode, err = node.NewNodeWithContext(
+ ctx,
+ cfg,
+ pvm.LoadOrGenFilePV(cfg.PrivValidatorKeyFile(), cfg.PrivValidatorStateFile()),
+ nodeKey,
+ proxy.NewLocalClientCreator(cmtApp),
+ getGenDocProvider(cfg),
+ cmtcfg.DefaultDBProvider,
+ node.DefaultMetricsProvider(cfg.Instrumentation),
+ servercmtlog.CometLoggerWrapper{
+ Logger: svrCtx.Logger
+},
+ )
+ if err != nil {
+ return tmNode, cleanupFn, err
+}
+ if err := tmNode.Start(); err != nil {
+ return tmNode, cleanupFn, err
+}
+
+cleanupFn = func() {
+ if tmNode != nil && tmNode.IsRunning() {
+ _ = tmNode.Stop()
+}
+
+}
+
+return tmNode, cleanupFn, nil
+}
+
+func getAndValidateConfig(svrCtx *Context) (serverconfig.Config, error) {
+ config, err := serverconfig.GetConfig(svrCtx.Viper)
+ if err != nil {
+ return config, err
+}
+ if err := config.ValidateBasic(); err != nil {
+ return config, err
+}
+
+return config, nil
+}
+
+// returns a function which returns the genesis doc from the genesis file.
+func getGenDocProvider(cfg *cmtcfg.Config)
+
+func() (*cmttypes.GenesisDoc, error) {
+ return func() (*cmttypes.GenesisDoc, error) {
+ appGenesis, err := genutiltypes.AppGenesisFromFile(cfg.GenesisFile())
+ if err != nil {
+ return nil, err
+}
+
+return appGenesis.ToGenesisDoc()
+}
+}
+
+func setupTraceWriter(svrCtx *Context) (traceWriter io.WriteCloser, cleanup func(), err error) {
+ // clean up the traceWriter when the server is shutting down
+ cleanup = func() {
+}
+ traceWriterFile := svrCtx.Viper.GetString(flagTraceStore)
+
+traceWriter, err = openTraceWriter(traceWriterFile)
+ if err != nil {
+ return traceWriter, cleanup, err
+}
+
+ // if flagTraceStore is not used then traceWriter is nil
+ if traceWriter != nil {
+ cleanup = func() {
+ if err = traceWriter.Close(); err != nil {
+ svrCtx.Logger.Error("failed to close trace writer", "err", err)
+}
+
+}
+
+}
+
+return traceWriter, cleanup, nil
+}
+
+func startGrpcServer(
+ ctx context.Context,
+ g *errgroup.Group,
+ config serverconfig.GRPCConfig,
+ clientCtx client.Context,
+ svrCtx *Context,
+ app types.Application,
+) (*grpc.Server, client.Context, error) {
+ if !config.Enable {
+ // return grpcServer as nil if gRPC is disabled
+ return nil, clientCtx, nil
+}
+ _, _, err := net.SplitHostPort(config.Address)
+ if err != nil {
+ return nil, clientCtx, err
+}
+ maxSendMsgSize := config.MaxSendMsgSize
+ if maxSendMsgSize == 0 {
+ maxSendMsgSize = serverconfig.DefaultGRPCMaxSendMsgSize
+}
+ maxRecvMsgSize := config.MaxRecvMsgSize
+ if maxRecvMsgSize == 0 {
+ maxRecvMsgSize = serverconfig.DefaultGRPCMaxRecvMsgSize
+}
+
+ // if gRPC is enabled, configure gRPC client for gRPC gateway
+ grpcClient, err := grpc.Dial( //nolint: staticcheck // ignore this line for this linter
+ config.Address,
+ grpc.WithTransportCredentials(insecure.NewCredentials()),
+ grpc.WithDefaultCallOptions(
+ grpc.ForceCodec(codec.NewProtoCodec(clientCtx.InterfaceRegistry).GRPCCodec()),
+ grpc.MaxCallRecvMsgSize(maxRecvMsgSize),
+ grpc.MaxCallSendMsgSize(maxSendMsgSize),
+ ),
+ )
+ if err != nil {
+ return nil, clientCtx, err
+}
+
+clientCtx = clientCtx.WithGRPCClient(grpcClient)
+
+svrCtx.Logger.Debug("gRPC client assigned to client context", "target", config.Address)
+
+grpcSrv, err := servergrpc.NewGRPCServer(clientCtx, app, config)
+ if err != nil {
+ return nil, clientCtx, err
+}
+
+ // Start the gRPC server in a goroutine. Note, the provided ctx will ensure
+ // that the server is gracefully shut down.
+ g.Go(func()
+
+error {
+ return servergrpc.StartGRPCServer(ctx, svrCtx.Logger.With("module", "grpc-server"), config, grpcSrv)
+})
+
+return grpcSrv, clientCtx, nil
+}
+
+func startAPIServer(
+ ctx context.Context,
+ g *errgroup.Group,
+ svrCfg serverconfig.Config,
+ clientCtx client.Context,
+ svrCtx *Context,
+ app types.Application,
+ home string,
+ grpcSrv *grpc.Server,
+ metrics *telemetry.Metrics,
+)
+
+error {
+ if !svrCfg.API.Enable {
+ return nil
+}
+
+clientCtx = clientCtx.WithHomeDir(home)
+ apiSrv := api.New(clientCtx, svrCtx.Logger.With("module", "api-server"), grpcSrv)
+
+app.RegisterAPIRoutes(apiSrv, svrCfg.API)
+ if svrCfg.Telemetry.Enabled {
+ apiSrv.SetTelemetry(metrics)
+}
+
+g.Go(func()
+
+error {
+ return apiSrv.Start(ctx, svrCfg)
+})
+
+return nil
+}
+
+func startTelemetry(cfg serverconfig.Config) (*telemetry.Metrics, error) {
+ return telemetry.New(cfg.Telemetry)
+}
+
+// wrapCPUProfile starts CPU profiling, if enabled, and executes the provided
+// callbackFn in a separate goroutine, then will wait for that callback to
+// return.
+//
+// NOTE: We expect the caller to handle graceful shutdown and signal handling.
+func wrapCPUProfile(svrCtx *Context, callbackFn func()
+
+error)
+
+error {
+ if cpuProfile := svrCtx.Viper.GetString(flagCPUProfile); cpuProfile != "" {
+ f, err := os.Create(cpuProfile)
+ if err != nil {
+ return err
+}
+
+svrCtx.Logger.Info("starting CPU profiler", "profile", cpuProfile)
+ if err := pprof.StartCPUProfile(f); err != nil {
+ return err
+}
+
+defer func() {
+ svrCtx.Logger.Info("stopping CPU profiler", "profile", cpuProfile)
+
+pprof.StopCPUProfile()
+ if err := f.Close(); err != nil {
+ svrCtx.Logger.Info("failed to close cpu-profile file", "profile", cpuProfile, "err", err.Error())
+}
+
+}()
+}
+
+return callbackFn()
+}
+
+// emitServerInfoMetrics emits server info related metrics using application telemetry.
+func emitServerInfoMetrics() {
+ var ls []metrics.Label
+ versionInfo := version.NewInfo()
+ if len(versionInfo.GoVersion) > 0 {
+ ls = append(ls, telemetry.NewLabel("go", versionInfo.GoVersion))
+}
+ if len(versionInfo.CosmosSdkVersion) > 0 {
+ ls = append(ls, telemetry.NewLabel("version", versionInfo.CosmosSdkVersion))
+}
+ if len(ls) == 0 {
+ return
+}
+
+telemetry.SetGaugeWithLabels([]string{"server", "info"
+}, 1, ls)
+}
+
+func getCtx(svrCtx *Context, block bool) (*errgroup.Group, context.Context) {
+ ctx, cancelFn := context.WithCancel(context.Background())
+
+g, ctx := errgroup.WithContext(ctx)
+ // listen for quit signals so the calling parent process can gracefully exit
+ ListenForQuitSignals(g, block, cancelFn, svrCtx.Logger)
+
+return g, ctx
+}
+
+func startApp(svrCtx *Context, appCreator types.AppCreator, opts StartCmdOptions) (app types.Application, cleanupFn func(), err error) {
+ traceWriter, traceCleanupFn, err := setupTraceWriter(svrCtx)
+ if err != nil {
+ return app, traceCleanupFn, err
+}
+ home := svrCtx.Config.RootDir
+ db, err := opts.DBOpener(home, GetAppDBBackend(svrCtx.Viper))
+ if err != nil {
+ return app, traceCleanupFn, err
+}
+ if isTestnet, ok := svrCtx.Viper.Get(KeyIsTestnet).(bool); ok && isTestnet {
+ app, err = testnetify(svrCtx, appCreator, db, traceWriter)
+ if err != nil {
+ return app, traceCleanupFn, err
+}
+
+}
+
+else {
+ app = appCreator(svrCtx.Logger, db, traceWriter, svrCtx.Viper)
+}
+
+cleanupFn = func() {
+ traceCleanupFn()
+ if localErr := app.Close(); localErr != nil {
+ svrCtx.Logger.Error(localErr.Error())
+}
+
+}
+
+return app, cleanupFn, nil
+}
+
+// InPlaceTestnetCreator utilizes the provided chainID and operatorAddress as well as the local private validator key to
+// control the network represented in the data folder. This is useful to create testnets nearly identical to your
+// mainnet environment.
+func InPlaceTestnetCreator(testnetAppCreator types.AppCreator) *cobra.Command {
+ opts := StartCmdOptions{
+}
+ if opts.DBOpener == nil {
+ opts.DBOpener = openDB
+}
+ if opts.StartCommandHandler == nil {
+ opts.StartCommandHandler = start
+}
+ cmd := &cobra.Command{
+ Use: "in-place-testnet [newChainID] [newOperatorAddress]",
+ Short: "Create and start a testnet from current local state",
+ Long: `Create and start a testnet from current local state.
+After utilizing this command the network will start. If the network is stopped,
+the normal "start" command should be used. Re-using this command on state that
+has already been modified by this command could result in unexpected behavior.
+
+Additionally, the first block may take up to one minute to be committed, depending
+on how old the block is. For instance, if a snapshot was taken weeks ago and we want
+to turn this into a testnet, it is possible lots of pending state needs to be committed
+(expiring locks, etc.). It is recommended that you should wait for this block to be committed
+before stopping the daemon.
+
+If the --trigger-testnet-upgrade flag is set, the upgrade handler specified by the flag will be run
+on the first block of the testnet.
+
+Regardless of whether the flag is set or not, if any new stores are introduced in the daemon being run,
+those stores will be registered in order to prevent panics. Therefore, you only need to set the flag if
+you want to test the upgrade handler itself.
+`,
+ Example: "in-place-testnet localosmosis osmo12smx2wdlyttvyzvzg54y2vnqwq2qjateuf7thj",
+ Args: cobra.ExactArgs(2),
+ RunE: func(cmd *cobra.Command, args []string)
+
+error {
+ serverCtx := GetServerContextFromCmd(cmd)
+ _, err := GetPruningOptionsFromFlags(serverCtx.Viper)
+ if err != nil {
+ return err
+}
+
+clientCtx, err := client.GetClientQueryContext(cmd)
+ if err != nil {
+ return err
+}
+
+withCMT, _ := cmd.Flags().GetBool(flagWithComet)
+ if !withCMT {
+ serverCtx.Logger.Info("starting ABCI without CometBFT")
+}
+ newChainID := args[0]
+ newOperatorAddress := args[1]
+
+ skipConfirmation, _ := cmd.Flags().GetBool("skip-confirmation")
+ if !skipConfirmation {
+ // Confirmation prompt to prevent accidental modification of state.
+ reader := bufio.NewReader(os.Stdin)
+
+fmt.Println("This operation will modify state in your data folder and cannot be undone. Do you want to continue? (y/n)")
+
+text, _ := reader.ReadString('\n')
+ response := strings.TrimSpace(strings.ToLower(text))
+ if response != "y" && response != "yes" {
+ fmt.Println("Operation canceled.")
+
+return nil
+}
+
+}
+
+ // Set testnet keys to be used by the application.
+ // This is done to prevent changes to existing start API.
+ serverCtx.Viper.Set(KeyIsTestnet, true)
+
+serverCtx.Viper.Set(KeyNewChainID, newChainID)
+
+serverCtx.Viper.Set(KeyNewOpAddr, newOperatorAddress)
+
+err = wrapCPUProfile(serverCtx, func()
+
+error {
+ return opts.StartCommandHandler(serverCtx, clientCtx, testnetAppCreator, withCMT, opts)
+})
+
+serverCtx.Logger.Debug("received quit signal")
+
+graceDuration, _ := cmd.Flags().GetDuration(FlagShutdownGrace)
+ if graceDuration > 0 {
+ serverCtx.Logger.Info("graceful shutdown start", FlagShutdownGrace, graceDuration)
+ <-time.After(graceDuration)
+
+serverCtx.Logger.Info("graceful shutdown complete")
+}
+
+return err
+},
+}
+
+addStartNodeFlags(cmd, opts)
+
+cmd.Flags().String(KeyTriggerTestnetUpgrade, "", "If set (example: \"v21\"), triggers the v21 upgrade handler to run on the first block of the testnet")
+
+cmd.Flags().Bool("skip-confirmation", false, "Skip the confirmation prompt")
+
+return cmd
+}
+
+// testnetify modifies both state and blockStore, allowing the provided operator address and local validator key to control the network
+// that the state in the data folder represents. The chainID of the local genesis file is modified to match the provided chainID.
+func testnetify(ctx *Context, testnetAppCreator types.AppCreator, db dbm.DB, traceWriter io.WriteCloser) (types.Application, error) {
+ config := ctx.Config
+
+ newChainID, ok := ctx.Viper.Get(KeyNewChainID).(string)
+ if !ok {
+ return nil, fmt.Errorf("expected string for key %s", KeyNewChainID)
+}
+
+ // Modify app genesis chain ID and save to genesis file.
+ genFilePath := config.GenesisFile()
+
+appGen, err := genutiltypes.AppGenesisFromFile(genFilePath)
+ if err != nil {
+ return nil, err
+}
+
+appGen.ChainID = newChainID
+ if err := appGen.ValidateAndComplete(); err != nil {
+ return nil, err
+}
+ if err := appGen.SaveAs(genFilePath); err != nil {
+ return nil, err
+}
+
+ // Regenerate addrbook.json to prevent peers on old network from causing error logs.
+ addrBookPath := filepath.Join(config.RootDir, "config", "addrbook.json")
+ if err := os.Remove(addrBookPath); err != nil && !os.IsNotExist(err) {
+ return nil, fmt.Errorf("failed to remove existing addrbook.json: %w", err)
+}
+ emptyAddrBook := []byte("{
+}")
+ if err := os.WriteFile(addrBookPath, emptyAddrBook, 0o600); err != nil {
+ return nil, fmt.Errorf("failed to create empty addrbook.json: %w", err)
+}
+
+ // Load the comet genesis doc provider.
+ genDocProvider := node.DefaultGenesisDocProviderFunc(config)
+
+ // Initialize blockStore and stateDB.
+ blockStoreDB, err := cmtcfg.DefaultDBProvider(&cmtcfg.DBContext{
+ ID: "blockstore",
+ Config: config
+})
+ if err != nil {
+ return nil, err
+}
+ blockStore := store.NewBlockStore(blockStoreDB)
+
+stateDB, err := cmtcfg.DefaultDBProvider(&cmtcfg.DBContext{
+ ID: "state",
+ Config: config
+})
+ if err != nil {
+ return nil, err
+}
+
+defer blockStore.Close()
+
+defer stateDB.Close()
+ privValidator := pvm.LoadOrGenFilePV(config.PrivValidatorKeyFile(), config.PrivValidatorStateFile())
+
+userPubKey, err := privValidator.GetPubKey()
+ if err != nil {
+ return nil, err
+}
+ validatorAddress := userPubKey.Address()
+ stateStore := sm.NewStore(stateDB, sm.StoreOptions{
+ DiscardABCIResponses: config.Storage.DiscardABCIResponses,
+})
+
+state, genDoc, err := node.LoadStateFromDBOrGenesisDocProvider(stateDB, genDocProvider)
+ if err != nil {
+ return nil, err
+}
+
+ctx.Viper.Set(KeyNewValAddr, validatorAddress)
+
+ctx.Viper.Set(KeyUserPubKey, userPubKey)
+ testnetApp := testnetAppCreator(ctx.Logger, db, traceWriter, ctx.Viper)
+
+ // We need to create a temporary proxyApp to get the initial state of the application.
+ // Depending on how the node was stopped, the application height can differ from the blockStore height.
+ // This height difference changes how we go about modifying the state.
+ cmtApp := NewCometABCIWrapper(testnetApp)
+ _, context := getCtx(ctx, true)
+ clientCreator := proxy.NewLocalClientCreator(cmtApp)
+ metrics := node.DefaultMetricsProvider(cmtcfg.DefaultConfig().Instrumentation)
+ _, _, _, _, proxyMetrics, _, _ := metrics(genDoc.ChainID)
+ proxyApp := proxy.NewAppConns(clientCreator, proxyMetrics)
+ if err := proxyApp.Start(); err != nil {
+ return nil, fmt.Errorf("error starting proxy app connections: %w", err)
+}
+
+res, err := proxyApp.Query().Info(context, proxy.RequestInfo)
+ if err != nil {
+ return nil, fmt.Errorf("error calling Info: %w", err)
+}
+
+err = proxyApp.Stop()
+ if err != nil {
+ return nil, err
+}
+ appHash := res.LastBlockAppHash
+ appHeight := res.LastBlockHeight
+
+ var block *cmttypes.Block
+ switch {
+ case appHeight == blockStore.Height():
+ block = blockStore.LoadBlock(blockStore.Height())
+ // If the state's last blockstore height does not match the app and blockstore height, we likely stopped with the halt height flag.
+ if state.LastBlockHeight != appHeight {
+ state.LastBlockHeight = appHeight
+ block.AppHash = appHash
+ state.AppHash = appHash
+}
+
+else {
+ // Node was likely stopped via SIGTERM, delete the next block's seen commit
+ err := blockStoreDB.Delete(fmt.Appendf(nil, "SC:%v", blockStore.Height()+1))
+ if err != nil {
+ return nil, err
+}
+
+}
+ case blockStore.Height() > state.LastBlockHeight:
+ // This state usually occurs when we gracefully stop the node.
+ err = blockStore.DeleteLatestBlock()
+ if err != nil {
+ return nil, err
+}
+
+block = blockStore.LoadBlock(blockStore.Height())
+
+default:
+ // If there is any other state, we just load the block
+ block = blockStore.LoadBlock(blockStore.Height())
+}
+
+block.ChainID = newChainID
+ state.ChainID = newChainID
+
+ block.LastBlockID = state.LastBlockID
+ block.LastCommit.BlockID = state.LastBlockID
+
+ // Create a vote from our validator
+ vote := cmttypes.Vote{
+ Type: cmtproto.PrecommitType,
+ Height: state.LastBlockHeight,
+ Round: 0,
+ BlockID: state.LastBlockID,
+ Timestamp: time.Now(),
+ ValidatorAddress: validatorAddress,
+ ValidatorIndex: 0,
+ Signature: []byte{
+},
+}
+
+ // Sign the vote, and copy the proto changes from the act of signing to the vote itself
+ voteProto := vote.ToProto()
+
+err = privValidator.SignVote(newChainID, voteProto)
+ if err != nil {
+ return nil, err
+}
+
+vote.Signature = voteProto.Signature
+ vote.Timestamp = voteProto.Timestamp
+
+ // Modify the block's lastCommit to be signed only by our validator
+ block.LastCommit.Signatures[0].ValidatorAddress = validatorAddress
+ block.LastCommit.Signatures[0].Signature = vote.Signature
+ block.LastCommit.Signatures = []cmttypes.CommitSig{
+ block.LastCommit.Signatures[0]
+}
+
+ // Load the seenCommit of the lastBlockHeight and modify it to be signed from our validator
+ seenCommit := blockStore.LoadSeenCommit(state.LastBlockHeight)
+
+seenCommit.BlockID = state.LastBlockID
+ seenCommit.Round = vote.Round
+ seenCommit.Signatures[0].Signature = vote.Signature
+ seenCommit.Signatures[0].ValidatorAddress = validatorAddress
+ seenCommit.Signatures[0].Timestamp = vote.Timestamp
+ seenCommit.Signatures = []cmttypes.CommitSig{
+ seenCommit.Signatures[0]
+}
+
+err = blockStore.SaveSeenCommit(state.LastBlockHeight, seenCommit)
+ if err != nil {
+ return nil, err
+}
+
+ // Create ValidatorSet struct containing just our valdiator.
+ newVal := &cmttypes.Validator{
+ Address: validatorAddress,
+ PubKey: userPubKey,
+ VotingPower: 900000000000000,
+}
+ newValSet := &cmttypes.ValidatorSet{
+ Validators: []*cmttypes.Validator{
+ newVal
+},
+ Proposer: newVal,
+}
+
+ // Replace all valSets in state to be the valSet with just our validator.
+ state.Validators = newValSet
+ state.LastValidators = newValSet
+ state.NextValidators = newValSet
+ state.LastHeightValidatorsChanged = blockStore.Height()
+
+err = stateStore.Save(state)
+ if err != nil {
+ return nil, err
+}
+
+ // Create a ValidatorsInfo struct to store in stateDB.
+ valSet, err := state.Validators.ToProto()
+ if err != nil {
+ return nil, err
+}
+ valInfo := &cmtstate.ValidatorsInfo{
+ ValidatorSet: valSet,
+ LastHeightChanged: state.LastBlockHeight,
+}
+
+buf, err := valInfo.Marshal()
+ if err != nil {
+ return nil, err
+}
+
+ // Modfiy Validators stateDB entry.
+ err = stateDB.Set(fmt.Appendf(nil, "validatorsKey:%v", blockStore.Height()), buf)
+ if err != nil {
+ return nil, err
+}
+
+ // Modify LastValidators stateDB entry.
+ err = stateDB.Set(fmt.Appendf(nil, "validatorsKey:%v", blockStore.Height()-1), buf)
+ if err != nil {
+ return nil, err
+}
+
+ // Modify NextValidators stateDB entry.
+ err = stateDB.Set(fmt.Appendf(nil, "validatorsKey:%v", blockStore.Height()+1), buf)
+ if err != nil {
+ return nil, err
+}
+
+ // Since we modified the chainID, we set the new genesisDoc in the stateDB.
+ b, err := cmtjson.Marshal(genDoc)
+ if err != nil {
+ return nil, err
+}
+ if err := stateDB.SetSync([]byte("genesisDoc"), b); err != nil {
+ return nil, err
+}
+
+return testnetApp, err
+}
+
+// addStartNodeFlags should be added to any CLI commands that start the network.
+func addStartNodeFlags(cmd *cobra.Command, opts StartCmdOptions) {
+ cmd.Flags().Bool(flagWithComet, true, "Run abci app embedded in-process with CometBFT")
+
+cmd.Flags().String(flagAddress, "tcp://127.0.0.1:26658", "Listen address")
+
+cmd.Flags().String(flagTransport, "socket", "Transport protocol: socket, grpc")
+
+cmd.Flags().String(flagTraceStore, "", "Enable KVStore tracing to an output file")
+
+cmd.Flags().String(FlagMinGasPrices, "", "Minimum gas prices to accept for transactions; Any fee in a tx must meet this minimum (e.g. 0.01photino;0.0001stake)")
+
+cmd.Flags().Uint64(FlagQueryGasLimit, 0, "Maximum gas a Rest/Grpc query can consume. Blank and 0 imply unbounded.")
+
+cmd.Flags().IntSlice(FlagUnsafeSkipUpgrades, []int{
+}, "Skip a set of upgrade heights to continue the old binary")
+
+cmd.Flags().Uint64(FlagHaltHeight, 0, "Block height at which to gracefully halt the chain and shutdown the node")
+
+cmd.Flags().Uint64(FlagHaltTime, 0, "Minimum block time (in Unix seconds)
+
+at which to gracefully halt the chain and shutdown the node")
+
+cmd.Flags().Bool(FlagInterBlockCache, true, "Enable inter-block caching")
+
+cmd.Flags().String(flagCPUProfile, "", "Enable CPU profiling and write to the provided file")
+
+cmd.Flags().Bool(FlagTrace, false, "Provide full stack traces for errors in ABCI Log")
+
+cmd.Flags().String(FlagPruning, pruningtypes.PruningOptionDefault, "Pruning strategy (default|nothing|everything|custom)")
+
+cmd.Flags().Uint64(FlagPruningKeepRecent, 0, "Number of recent heights to keep on disk (ignored if pruning is not 'custom')")
+
+cmd.Flags().Uint64(FlagPruningInterval, 0, "Height interval at which pruned heights are removed from disk (ignored if pruning is not 'custom')")
+
+cmd.Flags().Uint(FlagInvCheckPeriod, 0, "Assert registered invariants every N blocks")
+
+cmd.Flags().Uint64(FlagMinRetainBlocks, 0, "Minimum block height offset during ABCI commit to prune CometBFT blocks")
+
+cmd.Flags().Bool(FlagAPIEnable, false, "Define if the API server should be enabled")
+
+cmd.Flags().Bool(FlagAPISwagger, false, "Define if swagger documentation should automatically be registered (Note: the API must also be enabled)")
+
+cmd.Flags().String(FlagAPIAddress, serverconfig.DefaultAPIAddress, "the API server address to listen on")
+
+cmd.Flags().Uint(FlagAPIMaxOpenConnections, 1000, "Define the number of maximum open connections")
+
+cmd.Flags().Uint(FlagRPCReadTimeout, 10, "Define the CometBFT RPC read timeout (in seconds)")
+
+cmd.Flags().Uint(FlagRPCWriteTimeout, 0, "Define the CometBFT RPC write timeout (in seconds)")
+
+cmd.Flags().Uint(FlagRPCMaxBodyBytes, 1000000, "Define the CometBFT maximum request body (in bytes)")
+
+cmd.Flags().Bool(FlagAPIEnableUnsafeCORS, false, "Define if CORS should be enabled (unsafe - use it at your own risk)")
+
+cmd.Flags().Bool(flagGRPCOnly, false, "Start the node in gRPC query only mode (no CometBFT process is started)")
+
+cmd.Flags().Bool(flagGRPCEnable, true, "Define if the gRPC server should be enabled")
+
+cmd.Flags().String(flagGRPCAddress, serverconfig.DefaultGRPCAddress, "the gRPC server address to listen on")
+
+cmd.Flags().Bool(flagGRPCWebEnable, true, "Define if the gRPC-Web server should be enabled. (Note: gRPC must also be enabled)")
+
+cmd.Flags().Uint64(FlagStateSyncSnapshotInterval, 0, "State sync snapshot interval")
+
+cmd.Flags().Uint32(FlagStateSyncSnapshotKeepRecent, 2, "State sync snapshot to keep")
+
+cmd.Flags().Bool(FlagDisableIAVLFastNode, false, "Disable fast node for IAVL tree")
+
+cmd.Flags().Int(FlagMempoolMaxTxs, mempool.DefaultMaxTx, "Sets MaxTx value for the app-side mempool")
+
+cmd.Flags().Duration(FlagShutdownGrace, 0*time.Second, "On Shutdown, duration to wait for resource clean up")
+
+ // support old flags name for backwards compatibility
+ cmd.Flags().SetNormalizeFunc(func(f *pflag.FlagSet, name string)
+
+pflag.NormalizedName {
+ if name == "with-tendermint" {
+ name = flagWithComet
+}
+
+return pflag.NormalizedName(name)
+})
+
+ // add support for all CometBFT-specific command line options
+ cmtcmd.AddNodeFlags(cmd)
+ if opts.AddFlags != nil {
+ opts.AddFlags(cmd)
+}
+}
+```
+
+Note that an `appCreator` is a function that fulfills the `AppCreator` signature:
+
+```go expandable
+package types
+
+import (
+
+ "encoding/json"
+ "io"
+
+ cmtproto "github.com/cometbft/cometbft/proto/tendermint/types"
+ cmttypes "github.com/cometbft/cometbft/types"
+ dbm "github.com/cosmos/cosmos-db"
+ "github.com/cosmos/gogoproto/grpc"
+ "github.com/spf13/cobra"
+ "cosmossdk.io/log"
+ "cosmossdk.io/store/snapshots"
+ storetypes "cosmossdk.io/store/types"
+ "github.com/cosmos/cosmos-sdk/client"
+ "github.com/cosmos/cosmos-sdk/server/api"
+ "github.com/cosmos/cosmos-sdk/server/config"
+)
+
+type (
+ // AppOptions defines an interface that is passed into an application
+ // constructor, typically used to set BaseApp options that are either supplied
+ // via config file or through CLI arguments/flags. The underlying implementation
+ // is defined by the server package and is typically implemented via a Viper
+ // literal defined on the server Context. Note, casting Get calls may not yield
+ // the expected types and could result in type assertion errors. It is recommend
+ // to either use the cast package or perform manual conversion for safety.
+ AppOptions interface {
+ Get(string)
+
+any
+}
+
+ // Application defines an application interface that wraps abci.Application.
+ // The interface defines the necessary contracts to be implemented in order
+ // to fully bootstrap and start an application.
+ Application interface {
+ ABCI
+
+ RegisterAPIRoutes(*api.Server, config.APIConfig)
+
+ // RegisterGRPCServerWithSkipCheckHeader registers gRPC services directly with the gRPC
+ // server and bypass check header flag.
+ RegisterGRPCServerWithSkipCheckHeader(grpc.Server, bool)
+
+ // RegisterTxService registers the gRPC Query service for tx (such as tx
+ // simulation, fetching txs by hash...).
+ RegisterTxService(client.Context)
+
+ // RegisterTendermintService registers the gRPC Query service for CometBFT queries.
+ RegisterTendermintService(client.Context)
+
+ // RegisterNodeService registers the node gRPC Query service.
+ RegisterNodeService(client.Context, config.Config)
+
+ // CommitMultiStore return the multistore instance
+ CommitMultiStore()
+
+storetypes.CommitMultiStore
+
+ // Return the snapshot manager
+ SnapshotManager() *snapshots.Manager
+
+ // Close is called in start cmd to gracefully cleanup resources.
+ // Must be safe to be called multiple times.
+ Close()
+
+error
+}
+
+ // AppCreator is a function that allows us to lazily initialize an
+ // application using various configurations.
+ AppCreator func(log.Logger, dbm.DB, io.Writer, AppOptions)
+
+Application
+
+ // ModuleInitFlags takes a start command and adds modules specific init flags.
+ ModuleInitFlags func(startCmd *cobra.Command)
+
+ // ExportedApp represents an exported app state, along with
+ // validators, consensus params and latest app height.
+ ExportedApp struct {
+ // AppState is the application state as JSON.
+ AppState json.RawMessage
+ // Validators is the exported validator set.
+ Validators []cmttypes.GenesisValidator
+ // Height is the app's latest block height.
+ Height int64
+ // ConsensusParams are the exported consensus params for ABCI.
+ ConsensusParams cmtproto.ConsensusParams
+}
+
+ // AppExporter is a function that dumps all app state to
+ // JSON-serializable structure and returns the current validator set.
+ AppExporter func(
+ logger log.Logger,
+ db dbm.DB,
+ traceWriter io.Writer,
+ height int64,
+ forZeroHeight bool,
+ jailAllowedAddrs []string,
+ opts AppOptions,
+ modulesToExport []string,
+ ) (ExportedApp, error)
+)
+```
+
+In practice, the [constructor of the application](/sdk/v0.53/learn/beginner/app-anatomy#constructor-function) is passed as the `appCreator`.
+
+```go
+// Reference: https://github.com/cosmos/cosmos-sdk/blob/v0.53.0/simapp/simd/cmd/root_v2.go#L294-L308
+```
+
+Then, the instance of `app` is used to instantiate a new CometBFT node:
+
+```go expandable
+package server
+
+import (
+
+ "bufio"
+ "context"
+ "fmt"
+ "io"
+ "net"
+ "os"
+ "path/filepath"
+ "runtime/pprof"
+ "strings"
+ "time"
+ "github.com/cometbft/cometbft/abci/server"
+ cmtcmd "github.com/cometbft/cometbft/cmd/cometbft/commands"
+ cmtcfg "github.com/cometbft/cometbft/config"
+ cmtjson "github.com/cometbft/cometbft/libs/json"
+ "github.com/cometbft/cometbft/node"
+ "github.com/cometbft/cometbft/p2p"
+ pvm "github.com/cometbft/cometbft/privval"
+ cmtstate "github.com/cometbft/cometbft/proto/tendermint/state"
+ cmtproto "github.com/cometbft/cometbft/proto/tendermint/types"
+ "github.com/cometbft/cometbft/proxy"
+ rpchttp "github.com/cometbft/cometbft/rpc/client/http"
+ "github.com/cometbft/cometbft/rpc/client/local"
+ sm "github.com/cometbft/cometbft/state"
+ "github.com/cometbft/cometbft/store"
+ cmttypes "github.com/cometbft/cometbft/types"
+ dbm "github.com/cosmos/cosmos-db"
+ "github.com/hashicorp/go-metrics"
+ "github.com/spf13/cobra"
+ "github.com/spf13/pflag"
+ "golang.org/x/sync/errgroup"
+ "google.golang.org/grpc"
+ "google.golang.org/grpc/credentials/insecure"
+
+ pruningtypes "cosmossdk.io/store/pruning/types"
+ "github.com/cosmos/cosmos-sdk/client"
+ "github.com/cosmos/cosmos-sdk/client/flags"
+ "github.com/cosmos/cosmos-sdk/codec"
+ "github.com/cosmos/cosmos-sdk/server/api"
+ serverconfig "github.com/cosmos/cosmos-sdk/server/config"
+ servergrpc "github.com/cosmos/cosmos-sdk/server/grpc"
+ servercmtlog "github.com/cosmos/cosmos-sdk/server/log"
+ "github.com/cosmos/cosmos-sdk/server/types"
+ "github.com/cosmos/cosmos-sdk/telemetry"
+ "github.com/cosmos/cosmos-sdk/types/mempool"
+ "github.com/cosmos/cosmos-sdk/version"
+ genutiltypes "github.com/cosmos/cosmos-sdk/x/genutil/types"
+)
+
+const (
+ // CometBFT full-node start flags
+ flagWithComet = "with-comet"
+ flagAddress = "address"
+ flagTransport = "transport"
+ flagTraceStore = "trace-store"
+ flagCPUProfile = "cpu-profile"
+ FlagMinGasPrices = "minimum-gas-prices"
+ FlagQueryGasLimit = "query-gas-limit"
+ FlagHaltHeight = "halt-height"
+ FlagHaltTime = "halt-time"
+ FlagInterBlockCache = "inter-block-cache"
+ FlagUnsafeSkipUpgrades = "unsafe-skip-upgrades"
+ FlagTrace = "trace"
+ FlagInvCheckPeriod = "inv-check-period"
+
+ FlagPruning = "pruning"
+ FlagPruningKeepRecent = "pruning-keep-recent"
+ FlagPruningInterval = "pruning-interval"
+ FlagIndexEvents = "index-events"
+ FlagMinRetainBlocks = "min-retain-blocks"
+ FlagIAVLCacheSize = "iavl-cache-size"
+ FlagDisableIAVLFastNode = "iavl-disable-fastnode"
+ FlagIAVLSyncPruning = "iavl-sync-pruning"
+ FlagShutdownGrace = "shutdown-grace"
+
+ // state sync-related flags
+ FlagStateSyncSnapshotInterval = "state-sync.snapshot-interval"
+ FlagStateSyncSnapshotKeepRecent = "state-sync.snapshot-keep-recent"
+
+ // api-related flags
+ FlagAPIEnable = "api.enable"
+ FlagAPISwagger = "api.swagger"
+ FlagAPIAddress = "api.address"
+ FlagAPIMaxOpenConnections = "api.max-open-connections"
+ FlagRPCReadTimeout = "api.rpc-read-timeout"
+ FlagRPCWriteTimeout = "api.rpc-write-timeout"
+ FlagRPCMaxBodyBytes = "api.rpc-max-body-bytes"
+ FlagAPIEnableUnsafeCORS = "api.enabled-unsafe-cors"
+
+ // gRPC-related flags
+ flagGRPCOnly = "grpc-only"
+ flagGRPCEnable = "grpc.enable"
+ flagGRPCAddress = "grpc.address"
+ flagGRPCWebEnable = "grpc-web.enable"
+ flagGRPCSkipCheckHeader = "grpc.skip-check-header"
+
+ // mempool flags
+ FlagMempoolMaxTxs = "mempool.max-txs"
+
+ // testnet keys
+ KeyIsTestnet = "is-testnet"
+ KeyNewChainID = "new-chain-ID"
+ KeyNewOpAddr = "new-operator-addr"
+ KeyNewValAddr = "new-validator-addr"
+ KeyUserPubKey = "user-pub-key"
+ KeyTriggerTestnetUpgrade = "trigger-testnet-upgrade"
+)
+
+// StartCmdOptions defines options that can be customized in `StartCmdWithOptions`,
+type StartCmdOptions struct {
+ // DBOpener can be used to customize db opening, for example customize db options or support different db backends,
+ // default to the builtin db opener.
+ DBOpener func(rootDir string, backendType dbm.BackendType) (dbm.DB, error)
+ // PostSetup can be used to setup extra services under the same cancellable context,
+ // it's not called in stand-alone mode, only for in-process mode.
+ PostSetup func(svrCtx *Context, clientCtx client.Context, ctx context.Context, g *errgroup.Group)
+
+error
+ // PostSetupStandalone can be used to setup extra services under the same cancellable context,
+ PostSetupStandalone func(svrCtx *Context, clientCtx client.Context, ctx context.Context, g *errgroup.Group)
+
+error
+ // AddFlags add custom flags to start cmd
+ AddFlags func(cmd *cobra.Command)
+ // StartCommandHanlder can be used to customize the start command handler
+ StartCommandHandler func(svrCtx *Context, clientCtx client.Context, appCreator types.AppCreator, inProcessConsensus bool, opts StartCmdOptions)
+
+error
+}
+
+// StartCmd runs the service passed in, either stand-alone or in-process with
+// CometBFT.
+func StartCmd(appCreator types.AppCreator, defaultNodeHome string) *cobra.Command {
+ return StartCmdWithOptions(appCreator, defaultNodeHome, StartCmdOptions{
+})
+}
+
+// StartCmdWithOptions runs the service passed in, either stand-alone or in-process with
+// CometBFT.
+func StartCmdWithOptions(appCreator types.AppCreator, defaultNodeHome string, opts StartCmdOptions) *cobra.Command {
+ if opts.DBOpener == nil {
+ opts.DBOpener = openDB
+}
+ if opts.StartCommandHandler == nil {
+ opts.StartCommandHandler = start
+}
+ cmd := &cobra.Command{
+ Use: "start",
+ Short: "Run the full node",
+ Long: `Run the full node application with CometBFT in or out of process. By
+default, the application will run with CometBFT in process.
+
+Pruning options can be provided via the '--pruning' flag or alternatively with '--pruning-keep-recent', and
+'pruning-interval' together.
+
+For '--pruning' the options are as follows:
+
+default: the last 362880 states are kept, pruning at 10 block intervals
+nothing: all historic states will be saved, nothing will be deleted (i.e. archiving node)
+
+everything: 2 latest states will be kept; pruning at 10 block intervals.
+custom: allow pruning options to be manually specified through 'pruning-keep-recent', and 'pruning-interval'
+
+Node halting configurations exist in the form of two flags: '--halt-height' and '--halt-time'. During
+the ABCI Commit phase, the node will check if the current block height is greater than or equal to
+the halt-height or if the current block time is greater than or equal to the halt-time. If so, the
+node will attempt to gracefully shutdown and the block will not be committed. In addition, the node
+will not be able to commit subsequent blocks.
+
+For profiling and benchmarking purposes, CPU profiling can be enabled via the '--cpu-profile' flag
+which accepts a path for the resulting pprof file.
+
+The node may be started in a 'query only' mode where only the gRPC and JSON HTTP
+API services are enabled via the 'grpc-only' flag. In this mode, CometBFT is
+bypassed and can be used when legacy queries are needed after an on-chain upgrade
+is performed. Note, when enabled, gRPC will also be automatically enabled.
+`,
+ RunE: func(cmd *cobra.Command, _ []string)
+
+error {
+ serverCtx := GetServerContextFromCmd(cmd)
+
+ _, err := GetPruningOptionsFromFlags(serverCtx.Viper)
+ if err != nil {
+ return err
+}
+
+clientCtx, err := client.GetClientQueryContext(cmd)
+ if err != nil {
+ return err
+}
+
+withCMT, _ := cmd.Flags().GetBool(flagWithComet)
+ if !withCMT {
+ serverCtx.Logger.Info("starting ABCI without CometBFT")
+}
+
+err = wrapCPUProfile(serverCtx, func()
+
+error {
+ return opts.StartCommandHandler(serverCtx, clientCtx, appCreator, withCMT, opts)
+})
+
+serverCtx.Logger.Debug("received quit signal")
+
+graceDuration, _ := cmd.Flags().GetDuration(FlagShutdownGrace)
+ if graceDuration > 0 {
+ serverCtx.Logger.Info("graceful shutdown start", FlagShutdownGrace, graceDuration)
+ <-time.After(graceDuration)
+
+serverCtx.Logger.Info("graceful shutdown complete")
+}
+
+return err
+},
+}
+
+cmd.Flags().String(flags.FlagHome, defaultNodeHome, "The application home directory")
+
+addStartNodeFlags(cmd, opts)
+
+return cmd
+}
+
+func start(svrCtx *Context, clientCtx client.Context, appCreator types.AppCreator, withCmt bool, opts StartCmdOptions)
+
+error {
+ svrCfg, err := getAndValidateConfig(svrCtx)
+ if err != nil {
+ return err
+}
+
+app, appCleanupFn, err := startApp(svrCtx, appCreator, opts)
+ if err != nil {
+ return err
+}
+
+defer appCleanupFn()
+
+metrics, err := startTelemetry(svrCfg)
+ if err != nil {
+ return err
+}
+
+emitServerInfoMetrics()
+ if !withCmt {
+ return startStandAlone(svrCtx, svrCfg, clientCtx, app, metrics, opts)
+}
+
+return startInProcess(svrCtx, svrCfg, clientCtx, app, metrics, opts)
+}
+
+func startStandAlone(svrCtx *Context, svrCfg serverconfig.Config, clientCtx client.Context, app types.Application, metrics *telemetry.Metrics, opts StartCmdOptions)
+
+error {
+ addr := svrCtx.Viper.GetString(flagAddress)
+ transport := svrCtx.Viper.GetString(flagTransport)
+ cmtApp := NewCometABCIWrapper(app)
+
+svr, err := server.NewServer(addr, transport, cmtApp)
+ if err != nil {
+ return fmt.Errorf("error creating listener: %w", err)
+}
+
+svr.SetLogger(servercmtlog.CometLoggerWrapper{
+ Logger: svrCtx.Logger.With("module", "abci-server")
+})
+
+g, ctx := getCtx(svrCtx, false)
+
+ // Add the tx service to the gRPC router. We only need to register this
+ // service if API or gRPC is enabled, and avoid doing so in the general
+ // case, because it spawns a new local CometBFT RPC client.
+ if svrCfg.API.Enable || svrCfg.GRPC.Enable {
+ // create tendermint client
+ // assumes the rpc listen address is where tendermint has its rpc server
+ rpcclient, err := rpchttp.New(svrCtx.Config.RPC.ListenAddress, "/websocket")
+ if err != nil {
+ return err
+}
+ // re-assign for making the client available below
+ // do not use := to avoid shadowing clientCtx
+ clientCtx = clientCtx.WithClient(rpcclient)
+
+ // use the provided clientCtx to register the services
+ app.RegisterTxService(clientCtx)
+
+app.RegisterTendermintService(clientCtx)
+
+app.RegisterNodeService(clientCtx, svrCfg)
+}
+
+grpcSrv, clientCtx, err := startGrpcServer(ctx, g, svrCfg.GRPC, clientCtx, svrCtx, app)
+ if err != nil {
+ return err
+}
+
+err = startAPIServer(ctx, g, svrCfg, clientCtx, svrCtx, app, svrCtx.Config.RootDir, grpcSrv, metrics)
+ if err != nil {
+ return err
+}
+ if opts.PostSetupStandalone != nil {
+ if err := opts.PostSetupStandalone(svrCtx, clientCtx, ctx, g); err != nil {
+ return err
+}
+
+}
+
+g.Go(func()
+
+error {
+ if err := svr.Start(); err != nil {
+ svrCtx.Logger.Error("failed to start out-of-process ABCI server", "err", err)
+
+return err
+}
+
+ // Wait for the calling process to be canceled or close the provided context,
+ // so we can gracefully stop the ABCI server.
+ <-ctx.Done()
+
+svrCtx.Logger.Info("stopping the ABCI server...")
+
+return svr.Stop()
+})
+
+return g.Wait()
+}
+
+func startInProcess(svrCtx *Context, svrCfg serverconfig.Config, clientCtx client.Context, app types.Application,
+ metrics *telemetry.Metrics, opts StartCmdOptions,
+)
+
+error {
+ cmtCfg := svrCtx.Config
+ gRPCOnly := svrCtx.Viper.GetBool(flagGRPCOnly)
+
+g, ctx := getCtx(svrCtx, true)
+ if gRPCOnly {
+ // TODO: Generalize logic so that gRPC only is really in startStandAlone
+ svrCtx.Logger.Info("starting node in gRPC only mode; CometBFT is disabled")
+
+svrCfg.GRPC.Enable = true
+}
+
+else {
+ svrCtx.Logger.Info("starting node with ABCI CometBFT in-process")
+
+tmNode, cleanupFn, err := startCmtNode(ctx, cmtCfg, app, svrCtx)
+ if err != nil {
+ return err
+}
+
+defer cleanupFn()
+
+ // Add the tx service to the gRPC router. We only need to register this
+ // service if API or gRPC is enabled, and avoid doing so in the general
+ // case, because it spawns a new local CometBFT RPC client.
+ if svrCfg.API.Enable || svrCfg.GRPC.Enable {
+ // Re-assign for making the client available below do not use := to avoid
+ // shadowing the clientCtx variable.
+ clientCtx = clientCtx.WithClient(local.New(tmNode))
+
+app.RegisterTxService(clientCtx)
+
+app.RegisterTendermintService(clientCtx)
+
+app.RegisterNodeService(clientCtx, svrCfg)
+}
+
+}
+
+grpcSrv, clientCtx, err := startGrpcServer(ctx, g, svrCfg.GRPC, clientCtx, svrCtx, app)
+ if err != nil {
+ return err
+}
+
+err = startAPIServer(ctx, g, svrCfg, clientCtx, svrCtx, app, cmtCfg.RootDir, grpcSrv, metrics)
+ if err != nil {
+ return err
+}
+ if opts.PostSetup != nil {
+ if err := opts.PostSetup(svrCtx, clientCtx, ctx, g); err != nil {
+ return err
+}
+
+}
+
+ // wait for signal capture and gracefully return
+ // we are guaranteed to be waiting for the "ListenForQuitSignals" goroutine.
+ return g.Wait()
+}
+
+// TODO: Move nodeKey into being created within the function.
+func startCmtNode(
+ ctx context.Context,
+ cfg *cmtcfg.Config,
+ app types.Application,
+ svrCtx *Context,
+) (tmNode *node.Node, cleanupFn func(), err error) {
+ nodeKey, err := p2p.LoadOrGenNodeKey(cfg.NodeKeyFile())
+ if err != nil {
+ return nil, cleanupFn, err
+}
+ cmtApp := NewCometABCIWrapper(app)
+
+tmNode, err = node.NewNodeWithContext(
+ ctx,
+ cfg,
+ pvm.LoadOrGenFilePV(cfg.PrivValidatorKeyFile(), cfg.PrivValidatorStateFile()),
+ nodeKey,
+ proxy.NewLocalClientCreator(cmtApp),
+ getGenDocProvider(cfg),
+ cmtcfg.DefaultDBProvider,
+ node.DefaultMetricsProvider(cfg.Instrumentation),
+ servercmtlog.CometLoggerWrapper{
+ Logger: svrCtx.Logger
+},
+ )
+ if err != nil {
+ return tmNode, cleanupFn, err
+}
+ if err := tmNode.Start(); err != nil {
+ return tmNode, cleanupFn, err
+}
+
+cleanupFn = func() {
+ if tmNode != nil && tmNode.IsRunning() {
+ _ = tmNode.Stop()
+}
+
+}
+
+return tmNode, cleanupFn, nil
+}
+
+func getAndValidateConfig(svrCtx *Context) (serverconfig.Config, error) {
+ config, err := serverconfig.GetConfig(svrCtx.Viper)
+ if err != nil {
+ return config, err
+}
+ if err := config.ValidateBasic(); err != nil {
+ return config, err
+}
+
+return config, nil
+}
+
+// returns a function which returns the genesis doc from the genesis file.
+func getGenDocProvider(cfg *cmtcfg.Config)
+
+func() (*cmttypes.GenesisDoc, error) {
+ return func() (*cmttypes.GenesisDoc, error) {
+ appGenesis, err := genutiltypes.AppGenesisFromFile(cfg.GenesisFile())
+ if err != nil {
+ return nil, err
+}
+
+return appGenesis.ToGenesisDoc()
+}
+}
+
+func setupTraceWriter(svrCtx *Context) (traceWriter io.WriteCloser, cleanup func(), err error) {
+ // clean up the traceWriter when the server is shutting down
+ cleanup = func() {
+}
+ traceWriterFile := svrCtx.Viper.GetString(flagTraceStore)
+
+traceWriter, err = openTraceWriter(traceWriterFile)
+ if err != nil {
+ return traceWriter, cleanup, err
+}
+
+ // if flagTraceStore is not used then traceWriter is nil
+ if traceWriter != nil {
+ cleanup = func() {
+ if err = traceWriter.Close(); err != nil {
+ svrCtx.Logger.Error("failed to close trace writer", "err", err)
+}
+
+}
+
+}
+
+return traceWriter, cleanup, nil
+}
+
+func startGrpcServer(
+ ctx context.Context,
+ g *errgroup.Group,
+ config serverconfig.GRPCConfig,
+ clientCtx client.Context,
+ svrCtx *Context,
+ app types.Application,
+) (*grpc.Server, client.Context, error) {
+ if !config.Enable {
+ // return grpcServer as nil if gRPC is disabled
+ return nil, clientCtx, nil
+}
+ _, _, err := net.SplitHostPort(config.Address)
+ if err != nil {
+ return nil, clientCtx, err
+}
+ maxSendMsgSize := config.MaxSendMsgSize
+ if maxSendMsgSize == 0 {
+ maxSendMsgSize = serverconfig.DefaultGRPCMaxSendMsgSize
+}
+ maxRecvMsgSize := config.MaxRecvMsgSize
+ if maxRecvMsgSize == 0 {
+ maxRecvMsgSize = serverconfig.DefaultGRPCMaxRecvMsgSize
+}
+
+ // if gRPC is enabled, configure gRPC client for gRPC gateway
+ grpcClient, err := grpc.Dial( //nolint: staticcheck // ignore this line for this linter
+ config.Address,
+ grpc.WithTransportCredentials(insecure.NewCredentials()),
+ grpc.WithDefaultCallOptions(
+ grpc.ForceCodec(codec.NewProtoCodec(clientCtx.InterfaceRegistry).GRPCCodec()),
+ grpc.MaxCallRecvMsgSize(maxRecvMsgSize),
+ grpc.MaxCallSendMsgSize(maxSendMsgSize),
+ ),
+ )
+ if err != nil {
+ return nil, clientCtx, err
+}
+
+clientCtx = clientCtx.WithGRPCClient(grpcClient)
+
+svrCtx.Logger.Debug("gRPC client assigned to client context", "target", config.Address)
+
+grpcSrv, err := servergrpc.NewGRPCServer(clientCtx, app, config)
+ if err != nil {
+ return nil, clientCtx, err
+}
+
+ // Start the gRPC server in a goroutine. Note, the provided ctx will ensure
+ // that the server is gracefully shut down.
+ g.Go(func()
+
+error {
+ return servergrpc.StartGRPCServer(ctx, svrCtx.Logger.With("module", "grpc-server"), config, grpcSrv)
+})
+
+return grpcSrv, clientCtx, nil
+}
+
+func startAPIServer(
+ ctx context.Context,
+ g *errgroup.Group,
+ svrCfg serverconfig.Config,
+ clientCtx client.Context,
+ svrCtx *Context,
+ app types.Application,
+ home string,
+ grpcSrv *grpc.Server,
+ metrics *telemetry.Metrics,
+)
+
+error {
+ if !svrCfg.API.Enable {
+ return nil
+}
+
+clientCtx = clientCtx.WithHomeDir(home)
+ apiSrv := api.New(clientCtx, svrCtx.Logger.With("module", "api-server"), grpcSrv)
+
+app.RegisterAPIRoutes(apiSrv, svrCfg.API)
+ if svrCfg.Telemetry.Enabled {
+ apiSrv.SetTelemetry(metrics)
+}
+
+g.Go(func()
+
+error {
+ return apiSrv.Start(ctx, svrCfg)
+})
+
+return nil
+}
+
+func startTelemetry(cfg serverconfig.Config) (*telemetry.Metrics, error) {
+ return telemetry.New(cfg.Telemetry)
+}
+
+// wrapCPUProfile starts CPU profiling, if enabled, and executes the provided
+// callbackFn in a separate goroutine, then will wait for that callback to
+// return.
+//
+// NOTE: We expect the caller to handle graceful shutdown and signal handling.
+func wrapCPUProfile(svrCtx *Context, callbackFn func()
+
+error)
+
+error {
+ if cpuProfile := svrCtx.Viper.GetString(flagCPUProfile); cpuProfile != "" {
+ f, err := os.Create(cpuProfile)
+ if err != nil {
+ return err
+}
+
+svrCtx.Logger.Info("starting CPU profiler", "profile", cpuProfile)
+ if err := pprof.StartCPUProfile(f); err != nil {
+ return err
+}
+
+defer func() {
+ svrCtx.Logger.Info("stopping CPU profiler", "profile", cpuProfile)
+
+pprof.StopCPUProfile()
+ if err := f.Close(); err != nil {
+ svrCtx.Logger.Info("failed to close cpu-profile file", "profile", cpuProfile, "err", err.Error())
+}
+
+}()
+}
+
+return callbackFn()
+}
+
+// emitServerInfoMetrics emits server info related metrics using application telemetry.
+func emitServerInfoMetrics() {
+ var ls []metrics.Label
+ versionInfo := version.NewInfo()
+ if len(versionInfo.GoVersion) > 0 {
+ ls = append(ls, telemetry.NewLabel("go", versionInfo.GoVersion))
+}
+ if len(versionInfo.CosmosSdkVersion) > 0 {
+ ls = append(ls, telemetry.NewLabel("version", versionInfo.CosmosSdkVersion))
+}
+ if len(ls) == 0 {
+ return
+}
+
+telemetry.SetGaugeWithLabels([]string{"server", "info"
+}, 1, ls)
+}
+
+func getCtx(svrCtx *Context, block bool) (*errgroup.Group, context.Context) {
+ ctx, cancelFn := context.WithCancel(context.Background())
+
+g, ctx := errgroup.WithContext(ctx)
+ // listen for quit signals so the calling parent process can gracefully exit
+ ListenForQuitSignals(g, block, cancelFn, svrCtx.Logger)
+
+return g, ctx
+}
+
+func startApp(svrCtx *Context, appCreator types.AppCreator, opts StartCmdOptions) (app types.Application, cleanupFn func(), err error) {
+ traceWriter, traceCleanupFn, err := setupTraceWriter(svrCtx)
+ if err != nil {
+ return app, traceCleanupFn, err
+}
+ home := svrCtx.Config.RootDir
+ db, err := opts.DBOpener(home, GetAppDBBackend(svrCtx.Viper))
+ if err != nil {
+ return app, traceCleanupFn, err
+}
+ if isTestnet, ok := svrCtx.Viper.Get(KeyIsTestnet).(bool); ok && isTestnet {
+ app, err = testnetify(svrCtx, appCreator, db, traceWriter)
+ if err != nil {
+ return app, traceCleanupFn, err
+}
+
+}
+
+else {
+ app = appCreator(svrCtx.Logger, db, traceWriter, svrCtx.Viper)
+}
+
+cleanupFn = func() {
+ traceCleanupFn()
+ if localErr := app.Close(); localErr != nil {
+ svrCtx.Logger.Error(localErr.Error())
+}
+
+}
+
+return app, cleanupFn, nil
+}
+
+// InPlaceTestnetCreator utilizes the provided chainID and operatorAddress as well as the local private validator key to
+// control the network represented in the data folder. This is useful to create testnets nearly identical to your
+// mainnet environment.
+func InPlaceTestnetCreator(testnetAppCreator types.AppCreator) *cobra.Command {
+ opts := StartCmdOptions{
+}
+ if opts.DBOpener == nil {
+ opts.DBOpener = openDB
+}
+ if opts.StartCommandHandler == nil {
+ opts.StartCommandHandler = start
+}
+ cmd := &cobra.Command{
+ Use: "in-place-testnet [newChainID] [newOperatorAddress]",
+ Short: "Create and start a testnet from current local state",
+ Long: `Create and start a testnet from current local state.
+After utilizing this command the network will start. If the network is stopped,
+the normal "start" command should be used. Re-using this command on state that
+has already been modified by this command could result in unexpected behavior.
+
+Additionally, the first block may take up to one minute to be committed, depending
+on how old the block is. For instance, if a snapshot was taken weeks ago and we want
+to turn this into a testnet, it is possible lots of pending state needs to be committed
+(expiring locks, etc.). It is recommended that you should wait for this block to be committed
+before stopping the daemon.
+
+If the --trigger-testnet-upgrade flag is set, the upgrade handler specified by the flag will be run
+on the first block of the testnet.
+
+Regardless of whether the flag is set or not, if any new stores are introduced in the daemon being run,
+those stores will be registered in order to prevent panics. Therefore, you only need to set the flag if
+you want to test the upgrade handler itself.
+`,
+ Example: "in-place-testnet localosmosis osmo12smx2wdlyttvyzvzg54y2vnqwq2qjateuf7thj",
+ Args: cobra.ExactArgs(2),
+ RunE: func(cmd *cobra.Command, args []string)
+
+error {
+ serverCtx := GetServerContextFromCmd(cmd)
+ _, err := GetPruningOptionsFromFlags(serverCtx.Viper)
+ if err != nil {
+ return err
+}
+
+clientCtx, err := client.GetClientQueryContext(cmd)
+ if err != nil {
+ return err
+}
+
+withCMT, _ := cmd.Flags().GetBool(flagWithComet)
+ if !withCMT {
+ serverCtx.Logger.Info("starting ABCI without CometBFT")
+}
+ newChainID := args[0]
+ newOperatorAddress := args[1]
+
+ skipConfirmation, _ := cmd.Flags().GetBool("skip-confirmation")
+ if !skipConfirmation {
+ // Confirmation prompt to prevent accidental modification of state.
+ reader := bufio.NewReader(os.Stdin)
+
+fmt.Println("This operation will modify state in your data folder and cannot be undone. Do you want to continue? (y/n)")
+
+text, _ := reader.ReadString('\n')
+ response := strings.TrimSpace(strings.ToLower(text))
+ if response != "y" && response != "yes" {
+ fmt.Println("Operation canceled.")
+
+return nil
+}
+
+}
+
+ // Set testnet keys to be used by the application.
+ // This is done to prevent changes to existing start API.
+ serverCtx.Viper.Set(KeyIsTestnet, true)
+
+serverCtx.Viper.Set(KeyNewChainID, newChainID)
+
+serverCtx.Viper.Set(KeyNewOpAddr, newOperatorAddress)
+
+err = wrapCPUProfile(serverCtx, func()
+
+error {
+ return opts.StartCommandHandler(serverCtx, clientCtx, testnetAppCreator, withCMT, opts)
+})
+
+serverCtx.Logger.Debug("received quit signal")
+
+graceDuration, _ := cmd.Flags().GetDuration(FlagShutdownGrace)
+ if graceDuration > 0 {
+ serverCtx.Logger.Info("graceful shutdown start", FlagShutdownGrace, graceDuration)
+ <-time.After(graceDuration)
+
+serverCtx.Logger.Info("graceful shutdown complete")
+}
+
+return err
+},
+}
+
+addStartNodeFlags(cmd, opts)
+
+cmd.Flags().String(KeyTriggerTestnetUpgrade, "", "If set (example: \"v21\"), triggers the v21 upgrade handler to run on the first block of the testnet")
+
+cmd.Flags().Bool("skip-confirmation", false, "Skip the confirmation prompt")
+
+return cmd
+}
+
+// testnetify modifies both state and blockStore, allowing the provided operator address and local validator key to control the network
+// that the state in the data folder represents. The chainID of the local genesis file is modified to match the provided chainID.
+func testnetify(ctx *Context, testnetAppCreator types.AppCreator, db dbm.DB, traceWriter io.WriteCloser) (types.Application, error) {
+ config := ctx.Config
+
+ newChainID, ok := ctx.Viper.Get(KeyNewChainID).(string)
+ if !ok {
+ return nil, fmt.Errorf("expected string for key %s", KeyNewChainID)
+}
+
+ // Modify app genesis chain ID and save to genesis file.
+ genFilePath := config.GenesisFile()
+
+appGen, err := genutiltypes.AppGenesisFromFile(genFilePath)
+ if err != nil {
+ return nil, err
+}
+
+appGen.ChainID = newChainID
+ if err := appGen.ValidateAndComplete(); err != nil {
+ return nil, err
+}
+ if err := appGen.SaveAs(genFilePath); err != nil {
+ return nil, err
+}
+
+ // Regenerate addrbook.json to prevent peers on old network from causing error logs.
+ addrBookPath := filepath.Join(config.RootDir, "config", "addrbook.json")
+ if err := os.Remove(addrBookPath); err != nil && !os.IsNotExist(err) {
+ return nil, fmt.Errorf("failed to remove existing addrbook.json: %w", err)
+}
+ emptyAddrBook := []byte("{
+}")
+ if err := os.WriteFile(addrBookPath, emptyAddrBook, 0o600); err != nil {
+ return nil, fmt.Errorf("failed to create empty addrbook.json: %w", err)
+}
+
+ // Load the comet genesis doc provider.
+ genDocProvider := node.DefaultGenesisDocProviderFunc(config)
+
+ // Initialize blockStore and stateDB.
+ blockStoreDB, err := cmtcfg.DefaultDBProvider(&cmtcfg.DBContext{
+ ID: "blockstore",
+ Config: config
+})
+ if err != nil {
+ return nil, err
+}
+ blockStore := store.NewBlockStore(blockStoreDB)
+
+stateDB, err := cmtcfg.DefaultDBProvider(&cmtcfg.DBContext{
+ ID: "state",
+ Config: config
+})
+ if err != nil {
+ return nil, err
+}
+
+defer blockStore.Close()
+
+defer stateDB.Close()
+ privValidator := pvm.LoadOrGenFilePV(config.PrivValidatorKeyFile(), config.PrivValidatorStateFile())
+
+userPubKey, err := privValidator.GetPubKey()
+ if err != nil {
+ return nil, err
+}
+ validatorAddress := userPubKey.Address()
+ stateStore := sm.NewStore(stateDB, sm.StoreOptions{
+ DiscardABCIResponses: config.Storage.DiscardABCIResponses,
+})
+
+state, genDoc, err := node.LoadStateFromDBOrGenesisDocProvider(stateDB, genDocProvider)
+ if err != nil {
+ return nil, err
+}
+
+ctx.Viper.Set(KeyNewValAddr, validatorAddress)
+
+ctx.Viper.Set(KeyUserPubKey, userPubKey)
+ testnetApp := testnetAppCreator(ctx.Logger, db, traceWriter, ctx.Viper)
+
+ // We need to create a temporary proxyApp to get the initial state of the application.
+ // Depending on how the node was stopped, the application height can differ from the blockStore height.
+ // This height difference changes how we go about modifying the state.
+ cmtApp := NewCometABCIWrapper(testnetApp)
+ _, context := getCtx(ctx, true)
+ clientCreator := proxy.NewLocalClientCreator(cmtApp)
+ metrics := node.DefaultMetricsProvider(cmtcfg.DefaultConfig().Instrumentation)
+ _, _, _, _, proxyMetrics, _, _ := metrics(genDoc.ChainID)
+ proxyApp := proxy.NewAppConns(clientCreator, proxyMetrics)
+ if err := proxyApp.Start(); err != nil {
+ return nil, fmt.Errorf("error starting proxy app connections: %w", err)
+}
+
+res, err := proxyApp.Query().Info(context, proxy.RequestInfo)
+ if err != nil {
+ return nil, fmt.Errorf("error calling Info: %w", err)
+}
+
+err = proxyApp.Stop()
+ if err != nil {
+ return nil, err
+}
+ appHash := res.LastBlockAppHash
+ appHeight := res.LastBlockHeight
+
+ var block *cmttypes.Block
+ switch {
+ case appHeight == blockStore.Height():
+ block = blockStore.LoadBlock(blockStore.Height())
+ // If the state's last blockstore height does not match the app and blockstore height, we likely stopped with the halt height flag.
+ if state.LastBlockHeight != appHeight {
+ state.LastBlockHeight = appHeight
+ block.AppHash = appHash
+ state.AppHash = appHash
+}
+
+else {
+ // Node was likely stopped via SIGTERM, delete the next block's seen commit
+ err := blockStoreDB.Delete(fmt.Appendf(nil, "SC:%v", blockStore.Height()+1))
+ if err != nil {
+ return nil, err
+}
+
+}
+ case blockStore.Height() > state.LastBlockHeight:
+ // This state usually occurs when we gracefully stop the node.
+ err = blockStore.DeleteLatestBlock()
+ if err != nil {
+ return nil, err
+}
+
+block = blockStore.LoadBlock(blockStore.Height())
+
+default:
+ // If there is any other state, we just load the block
+ block = blockStore.LoadBlock(blockStore.Height())
+}
+
+block.ChainID = newChainID
+ state.ChainID = newChainID
+
+ block.LastBlockID = state.LastBlockID
+ block.LastCommit.BlockID = state.LastBlockID
+
+ // Create a vote from our validator
+ vote := cmttypes.Vote{
+ Type: cmtproto.PrecommitType,
+ Height: state.LastBlockHeight,
+ Round: 0,
+ BlockID: state.LastBlockID,
+ Timestamp: time.Now(),
+ ValidatorAddress: validatorAddress,
+ ValidatorIndex: 0,
+ Signature: []byte{
+},
+}
+
+ // Sign the vote, and copy the proto changes from the act of signing to the vote itself
+ voteProto := vote.ToProto()
+
+err = privValidator.SignVote(newChainID, voteProto)
+ if err != nil {
+ return nil, err
+}
+
+vote.Signature = voteProto.Signature
+ vote.Timestamp = voteProto.Timestamp
+
+ // Modify the block's lastCommit to be signed only by our validator
+ block.LastCommit.Signatures[0].ValidatorAddress = validatorAddress
+ block.LastCommit.Signatures[0].Signature = vote.Signature
+ block.LastCommit.Signatures = []cmttypes.CommitSig{
+ block.LastCommit.Signatures[0]
+}
+
+ // Load the seenCommit of the lastBlockHeight and modify it to be signed from our validator
+ seenCommit := blockStore.LoadSeenCommit(state.LastBlockHeight)
+
+seenCommit.BlockID = state.LastBlockID
+ seenCommit.Round = vote.Round
+ seenCommit.Signatures[0].Signature = vote.Signature
+ seenCommit.Signatures[0].ValidatorAddress = validatorAddress
+ seenCommit.Signatures[0].Timestamp = vote.Timestamp
+ seenCommit.Signatures = []cmttypes.CommitSig{
+ seenCommit.Signatures[0]
+}
+
+err = blockStore.SaveSeenCommit(state.LastBlockHeight, seenCommit)
+ if err != nil {
+ return nil, err
+}
+
+ // Create ValidatorSet struct containing just our valdiator.
+ newVal := &cmttypes.Validator{
+ Address: validatorAddress,
+ PubKey: userPubKey,
+ VotingPower: 900000000000000,
+}
+ newValSet := &cmttypes.ValidatorSet{
+ Validators: []*cmttypes.Validator{
+ newVal
+},
+ Proposer: newVal,
+}
+
+ // Replace all valSets in state to be the valSet with just our validator.
+ state.Validators = newValSet
+ state.LastValidators = newValSet
+ state.NextValidators = newValSet
+ state.LastHeightValidatorsChanged = blockStore.Height()
+
+err = stateStore.Save(state)
+ if err != nil {
+ return nil, err
+}
+
+ // Create a ValidatorsInfo struct to store in stateDB.
+ valSet, err := state.Validators.ToProto()
+ if err != nil {
+ return nil, err
+}
+ valInfo := &cmtstate.ValidatorsInfo{
+ ValidatorSet: valSet,
+ LastHeightChanged: state.LastBlockHeight,
+}
+
+buf, err := valInfo.Marshal()
+ if err != nil {
+ return nil, err
+}
+
+ // Modfiy Validators stateDB entry.
+ err = stateDB.Set(fmt.Appendf(nil, "validatorsKey:%v", blockStore.Height()), buf)
+ if err != nil {
+ return nil, err
+}
+
+ // Modify LastValidators stateDB entry.
+ err = stateDB.Set(fmt.Appendf(nil, "validatorsKey:%v", blockStore.Height()-1), buf)
+ if err != nil {
+ return nil, err
+}
+
+ // Modify NextValidators stateDB entry.
+ err = stateDB.Set(fmt.Appendf(nil, "validatorsKey:%v", blockStore.Height()+1), buf)
+ if err != nil {
+ return nil, err
+}
+
+ // Since we modified the chainID, we set the new genesisDoc in the stateDB.
+ b, err := cmtjson.Marshal(genDoc)
+ if err != nil {
+ return nil, err
+}
+ if err := stateDB.SetSync([]byte("genesisDoc"), b); err != nil {
+ return nil, err
+}
+
+return testnetApp, err
+}
+
+// addStartNodeFlags should be added to any CLI commands that start the network.
+func addStartNodeFlags(cmd *cobra.Command, opts StartCmdOptions) {
+ cmd.Flags().Bool(flagWithComet, true, "Run abci app embedded in-process with CometBFT")
+
+cmd.Flags().String(flagAddress, "tcp://127.0.0.1:26658", "Listen address")
+
+cmd.Flags().String(flagTransport, "socket", "Transport protocol: socket, grpc")
+
+cmd.Flags().String(flagTraceStore, "", "Enable KVStore tracing to an output file")
+
+cmd.Flags().String(FlagMinGasPrices, "", "Minimum gas prices to accept for transactions; Any fee in a tx must meet this minimum (e.g. 0.01photino;0.0001stake)")
+
+cmd.Flags().Uint64(FlagQueryGasLimit, 0, "Maximum gas a Rest/Grpc query can consume. Blank and 0 imply unbounded.")
+
+cmd.Flags().IntSlice(FlagUnsafeSkipUpgrades, []int{
+}, "Skip a set of upgrade heights to continue the old binary")
+
+cmd.Flags().Uint64(FlagHaltHeight, 0, "Block height at which to gracefully halt the chain and shutdown the node")
+
+cmd.Flags().Uint64(FlagHaltTime, 0, "Minimum block time (in Unix seconds)
+
+at which to gracefully halt the chain and shutdown the node")
+
+cmd.Flags().Bool(FlagInterBlockCache, true, "Enable inter-block caching")
+
+cmd.Flags().String(flagCPUProfile, "", "Enable CPU profiling and write to the provided file")
+
+cmd.Flags().Bool(FlagTrace, false, "Provide full stack traces for errors in ABCI Log")
+
+cmd.Flags().String(FlagPruning, pruningtypes.PruningOptionDefault, "Pruning strategy (default|nothing|everything|custom)")
+
+cmd.Flags().Uint64(FlagPruningKeepRecent, 0, "Number of recent heights to keep on disk (ignored if pruning is not 'custom')")
+
+cmd.Flags().Uint64(FlagPruningInterval, 0, "Height interval at which pruned heights are removed from disk (ignored if pruning is not 'custom')")
+
+cmd.Flags().Uint(FlagInvCheckPeriod, 0, "Assert registered invariants every N blocks")
+
+cmd.Flags().Uint64(FlagMinRetainBlocks, 0, "Minimum block height offset during ABCI commit to prune CometBFT blocks")
+
+cmd.Flags().Bool(FlagAPIEnable, false, "Define if the API server should be enabled")
+
+cmd.Flags().Bool(FlagAPISwagger, false, "Define if swagger documentation should automatically be registered (Note: the API must also be enabled)")
+
+cmd.Flags().String(FlagAPIAddress, serverconfig.DefaultAPIAddress, "the API server address to listen on")
+
+cmd.Flags().Uint(FlagAPIMaxOpenConnections, 1000, "Define the number of maximum open connections")
+
+cmd.Flags().Uint(FlagRPCReadTimeout, 10, "Define the CometBFT RPC read timeout (in seconds)")
+
+cmd.Flags().Uint(FlagRPCWriteTimeout, 0, "Define the CometBFT RPC write timeout (in seconds)")
+
+cmd.Flags().Uint(FlagRPCMaxBodyBytes, 1000000, "Define the CometBFT maximum request body (in bytes)")
+
+cmd.Flags().Bool(FlagAPIEnableUnsafeCORS, false, "Define if CORS should be enabled (unsafe - use it at your own risk)")
+
+cmd.Flags().Bool(flagGRPCOnly, false, "Start the node in gRPC query only mode (no CometBFT process is started)")
+
+cmd.Flags().Bool(flagGRPCEnable, true, "Define if the gRPC server should be enabled")
+
+cmd.Flags().String(flagGRPCAddress, serverconfig.DefaultGRPCAddress, "the gRPC server address to listen on")
+
+cmd.Flags().Bool(flagGRPCWebEnable, true, "Define if the gRPC-Web server should be enabled. (Note: gRPC must also be enabled)")
+
+cmd.Flags().Uint64(FlagStateSyncSnapshotInterval, 0, "State sync snapshot interval")
+
+cmd.Flags().Uint32(FlagStateSyncSnapshotKeepRecent, 2, "State sync snapshot to keep")
+
+cmd.Flags().Bool(FlagDisableIAVLFastNode, false, "Disable fast node for IAVL tree")
+
+cmd.Flags().Int(FlagMempoolMaxTxs, mempool.DefaultMaxTx, "Sets MaxTx value for the app-side mempool")
+
+cmd.Flags().Duration(FlagShutdownGrace, 0*time.Second, "On Shutdown, duration to wait for resource clean up")
+
+ // support old flags name for backwards compatibility
+ cmd.Flags().SetNormalizeFunc(func(f *pflag.FlagSet, name string)
+
+pflag.NormalizedName {
+ if name == "with-tendermint" {
+ name = flagWithComet
+}
+
+return pflag.NormalizedName(name)
+})
+
+ // add support for all CometBFT-specific command line options
+ cmtcmd.AddNodeFlags(cmd)
+ if opts.AddFlags != nil {
+ opts.AddFlags(cmd)
+}
+}
+```
+
+The CometBFT node can be created with `app` because the latter satisfies the [`abci.Application` interface](https://github.com/cometbft/cometbft/blob/v0.37.0/abci/types/application.go#L9-L35) (given that `app` extends [`baseapp`](/sdk/v0.53/learn/advanced/baseapp)). As part of the `node.New` method, CometBFT makes sure that the height of the application (i.e. number of blocks since genesis) is equal to the height of the CometBFT node. The difference between these two heights should always be negative or null. If it is strictly negative, `node.New` will replay blocks until the height of the application reaches the height of the CometBFT node. Finally, if the height of the application is `0`, the CometBFT node will call [`InitChain`](/sdk/v0.53/learn/advanced/baseapp#initchain) on the application to initialize the state from the genesis file.
+
+Once the CometBFT node is instantiated and in sync with the application, the node can be started:
+
+```go expandable
+package server
+
+import (
+
+ "bufio"
+ "context"
+ "fmt"
+ "io"
+ "net"
+ "os"
+ "path/filepath"
+ "runtime/pprof"
+ "strings"
+ "time"
+ "github.com/cometbft/cometbft/abci/server"
+ cmtcmd "github.com/cometbft/cometbft/cmd/cometbft/commands"
+ cmtcfg "github.com/cometbft/cometbft/config"
+ cmtjson "github.com/cometbft/cometbft/libs/json"
+ "github.com/cometbft/cometbft/node"
+ "github.com/cometbft/cometbft/p2p"
+ pvm "github.com/cometbft/cometbft/privval"
+ cmtstate "github.com/cometbft/cometbft/proto/tendermint/state"
+ cmtproto "github.com/cometbft/cometbft/proto/tendermint/types"
+ "github.com/cometbft/cometbft/proxy"
+ rpchttp "github.com/cometbft/cometbft/rpc/client/http"
+ "github.com/cometbft/cometbft/rpc/client/local"
+ sm "github.com/cometbft/cometbft/state"
+ "github.com/cometbft/cometbft/store"
+ cmttypes "github.com/cometbft/cometbft/types"
+ dbm "github.com/cosmos/cosmos-db"
+ "github.com/hashicorp/go-metrics"
+ "github.com/spf13/cobra"
+ "github.com/spf13/pflag"
+ "golang.org/x/sync/errgroup"
+ "google.golang.org/grpc"
+ "google.golang.org/grpc/credentials/insecure"
+
+ pruningtypes "cosmossdk.io/store/pruning/types"
+ "github.com/cosmos/cosmos-sdk/client"
+ "github.com/cosmos/cosmos-sdk/client/flags"
+ "github.com/cosmos/cosmos-sdk/codec"
+ "github.com/cosmos/cosmos-sdk/server/api"
+ serverconfig "github.com/cosmos/cosmos-sdk/server/config"
+ servergrpc "github.com/cosmos/cosmos-sdk/server/grpc"
+ servercmtlog "github.com/cosmos/cosmos-sdk/server/log"
+ "github.com/cosmos/cosmos-sdk/server/types"
+ "github.com/cosmos/cosmos-sdk/telemetry"
+ "github.com/cosmos/cosmos-sdk/types/mempool"
+ "github.com/cosmos/cosmos-sdk/version"
+ genutiltypes "github.com/cosmos/cosmos-sdk/x/genutil/types"
+)
+
+const (
+ // CometBFT full-node start flags
+ flagWithComet = "with-comet"
+ flagAddress = "address"
+ flagTransport = "transport"
+ flagTraceStore = "trace-store"
+ flagCPUProfile = "cpu-profile"
+ FlagMinGasPrices = "minimum-gas-prices"
+ FlagQueryGasLimit = "query-gas-limit"
+ FlagHaltHeight = "halt-height"
+ FlagHaltTime = "halt-time"
+ FlagInterBlockCache = "inter-block-cache"
+ FlagUnsafeSkipUpgrades = "unsafe-skip-upgrades"
+ FlagTrace = "trace"
+ FlagInvCheckPeriod = "inv-check-period"
+
+ FlagPruning = "pruning"
+ FlagPruningKeepRecent = "pruning-keep-recent"
+ FlagPruningInterval = "pruning-interval"
+ FlagIndexEvents = "index-events"
+ FlagMinRetainBlocks = "min-retain-blocks"
+ FlagIAVLCacheSize = "iavl-cache-size"
+ FlagDisableIAVLFastNode = "iavl-disable-fastnode"
+ FlagIAVLSyncPruning = "iavl-sync-pruning"
+ FlagShutdownGrace = "shutdown-grace"
+
+ // state sync-related flags
+ FlagStateSyncSnapshotInterval = "state-sync.snapshot-interval"
+ FlagStateSyncSnapshotKeepRecent = "state-sync.snapshot-keep-recent"
+
+ // api-related flags
+ FlagAPIEnable = "api.enable"
+ FlagAPISwagger = "api.swagger"
+ FlagAPIAddress = "api.address"
+ FlagAPIMaxOpenConnections = "api.max-open-connections"
+ FlagRPCReadTimeout = "api.rpc-read-timeout"
+ FlagRPCWriteTimeout = "api.rpc-write-timeout"
+ FlagRPCMaxBodyBytes = "api.rpc-max-body-bytes"
+ FlagAPIEnableUnsafeCORS = "api.enabled-unsafe-cors"
+
+ // gRPC-related flags
+ flagGRPCOnly = "grpc-only"
+ flagGRPCEnable = "grpc.enable"
+ flagGRPCAddress = "grpc.address"
+ flagGRPCWebEnable = "grpc-web.enable"
+ flagGRPCSkipCheckHeader = "grpc.skip-check-header"
+
+ // mempool flags
+ FlagMempoolMaxTxs = "mempool.max-txs"
+
+ // testnet keys
+ KeyIsTestnet = "is-testnet"
+ KeyNewChainID = "new-chain-ID"
+ KeyNewOpAddr = "new-operator-addr"
+ KeyNewValAddr = "new-validator-addr"
+ KeyUserPubKey = "user-pub-key"
+ KeyTriggerTestnetUpgrade = "trigger-testnet-upgrade"
+)
+
+// StartCmdOptions defines options that can be customized in `StartCmdWithOptions`,
+type StartCmdOptions struct {
+ // DBOpener can be used to customize db opening, for example customize db options or support different db backends,
+ // default to the builtin db opener.
+ DBOpener func(rootDir string, backendType dbm.BackendType) (dbm.DB, error)
+ // PostSetup can be used to setup extra services under the same cancellable context,
+ // it's not called in stand-alone mode, only for in-process mode.
+ PostSetup func(svrCtx *Context, clientCtx client.Context, ctx context.Context, g *errgroup.Group)
+
+error
+ // PostSetupStandalone can be used to setup extra services under the same cancellable context,
+ PostSetupStandalone func(svrCtx *Context, clientCtx client.Context, ctx context.Context, g *errgroup.Group)
+
+error
+ // AddFlags add custom flags to start cmd
+ AddFlags func(cmd *cobra.Command)
+ // StartCommandHanlder can be used to customize the start command handler
+ StartCommandHandler func(svrCtx *Context, clientCtx client.Context, appCreator types.AppCreator, inProcessConsensus bool, opts StartCmdOptions)
+
+error
+}
+
+// StartCmd runs the service passed in, either stand-alone or in-process with
+// CometBFT.
+func StartCmd(appCreator types.AppCreator, defaultNodeHome string) *cobra.Command {
+ return StartCmdWithOptions(appCreator, defaultNodeHome, StartCmdOptions{
+})
+}
+
+// StartCmdWithOptions runs the service passed in, either stand-alone or in-process with
+// CometBFT.
+func StartCmdWithOptions(appCreator types.AppCreator, defaultNodeHome string, opts StartCmdOptions) *cobra.Command {
+ if opts.DBOpener == nil {
+ opts.DBOpener = openDB
+}
+ if opts.StartCommandHandler == nil {
+ opts.StartCommandHandler = start
+}
+ cmd := &cobra.Command{
+ Use: "start",
+ Short: "Run the full node",
+ Long: `Run the full node application with CometBFT in or out of process. By
+default, the application will run with CometBFT in process.
+
+Pruning options can be provided via the '--pruning' flag or alternatively with '--pruning-keep-recent', and
+'pruning-interval' together.
+
+For '--pruning' the options are as follows:
+
+default: the last 362880 states are kept, pruning at 10 block intervals
+nothing: all historic states will be saved, nothing will be deleted (i.e. archiving node)
+
+everything: 2 latest states will be kept; pruning at 10 block intervals.
+custom: allow pruning options to be manually specified through 'pruning-keep-recent', and 'pruning-interval'
+
+Node halting configurations exist in the form of two flags: '--halt-height' and '--halt-time'. During
+the ABCI Commit phase, the node will check if the current block height is greater than or equal to
+the halt-height or if the current block time is greater than or equal to the halt-time. If so, the
+node will attempt to gracefully shutdown and the block will not be committed. In addition, the node
+will not be able to commit subsequent blocks.
+
+For profiling and benchmarking purposes, CPU profiling can be enabled via the '--cpu-profile' flag
+which accepts a path for the resulting pprof file.
+
+The node may be started in a 'query only' mode where only the gRPC and JSON HTTP
+API services are enabled via the 'grpc-only' flag. In this mode, CometBFT is
+bypassed and can be used when legacy queries are needed after an on-chain upgrade
+is performed. Note, when enabled, gRPC will also be automatically enabled.
+`,
+ RunE: func(cmd *cobra.Command, _ []string)
+
+error {
+ serverCtx := GetServerContextFromCmd(cmd)
+
+ _, err := GetPruningOptionsFromFlags(serverCtx.Viper)
+ if err != nil {
+ return err
+}
+
+clientCtx, err := client.GetClientQueryContext(cmd)
+ if err != nil {
+ return err
+}
+
+withCMT, _ := cmd.Flags().GetBool(flagWithComet)
+ if !withCMT {
+ serverCtx.Logger.Info("starting ABCI without CometBFT")
+}
+
+err = wrapCPUProfile(serverCtx, func()
+
+error {
+ return opts.StartCommandHandler(serverCtx, clientCtx, appCreator, withCMT, opts)
+})
+
+serverCtx.Logger.Debug("received quit signal")
+
+graceDuration, _ := cmd.Flags().GetDuration(FlagShutdownGrace)
+ if graceDuration > 0 {
+ serverCtx.Logger.Info("graceful shutdown start", FlagShutdownGrace, graceDuration)
+ <-time.After(graceDuration)
+
+serverCtx.Logger.Info("graceful shutdown complete")
+}
+
+return err
+},
+}
+
+cmd.Flags().String(flags.FlagHome, defaultNodeHome, "The application home directory")
+
+addStartNodeFlags(cmd, opts)
+
+return cmd
+}
+
+func start(svrCtx *Context, clientCtx client.Context, appCreator types.AppCreator, withCmt bool, opts StartCmdOptions)
+
+error {
+ svrCfg, err := getAndValidateConfig(svrCtx)
+ if err != nil {
+ return err
+}
+
+app, appCleanupFn, err := startApp(svrCtx, appCreator, opts)
+ if err != nil {
+ return err
+}
+
+defer appCleanupFn()
+
+metrics, err := startTelemetry(svrCfg)
+ if err != nil {
+ return err
+}
+
+emitServerInfoMetrics()
+ if !withCmt {
+ return startStandAlone(svrCtx, svrCfg, clientCtx, app, metrics, opts)
+}
+
+return startInProcess(svrCtx, svrCfg, clientCtx, app, metrics, opts)
+}
+
+func startStandAlone(svrCtx *Context, svrCfg serverconfig.Config, clientCtx client.Context, app types.Application, metrics *telemetry.Metrics, opts StartCmdOptions)
+
+error {
+ addr := svrCtx.Viper.GetString(flagAddress)
+ transport := svrCtx.Viper.GetString(flagTransport)
+ cmtApp := NewCometABCIWrapper(app)
+
+svr, err := server.NewServer(addr, transport, cmtApp)
+ if err != nil {
+ return fmt.Errorf("error creating listener: %w", err)
+}
+
+svr.SetLogger(servercmtlog.CometLoggerWrapper{
+ Logger: svrCtx.Logger.With("module", "abci-server")
+})
+
+g, ctx := getCtx(svrCtx, false)
+
+ // Add the tx service to the gRPC router. We only need to register this
+ // service if API or gRPC is enabled, and avoid doing so in the general
+ // case, because it spawns a new local CometBFT RPC client.
+ if svrCfg.API.Enable || svrCfg.GRPC.Enable {
+ // create tendermint client
+ // assumes the rpc listen address is where tendermint has its rpc server
+ rpcclient, err := rpchttp.New(svrCtx.Config.RPC.ListenAddress, "/websocket")
+ if err != nil {
+ return err
+}
+ // re-assign for making the client available below
+ // do not use := to avoid shadowing clientCtx
+ clientCtx = clientCtx.WithClient(rpcclient)
+
+ // use the provided clientCtx to register the services
+ app.RegisterTxService(clientCtx)
+
+app.RegisterTendermintService(clientCtx)
+
+app.RegisterNodeService(clientCtx, svrCfg)
+}
+
+grpcSrv, clientCtx, err := startGrpcServer(ctx, g, svrCfg.GRPC, clientCtx, svrCtx, app)
+ if err != nil {
+ return err
+}
+
+err = startAPIServer(ctx, g, svrCfg, clientCtx, svrCtx, app, svrCtx.Config.RootDir, grpcSrv, metrics)
+ if err != nil {
+ return err
+}
+ if opts.PostSetupStandalone != nil {
+ if err := opts.PostSetupStandalone(svrCtx, clientCtx, ctx, g); err != nil {
+ return err
+}
+
+}
+
+g.Go(func()
+
+error {
+ if err := svr.Start(); err != nil {
+ svrCtx.Logger.Error("failed to start out-of-process ABCI server", "err", err)
+
+return err
+}
+
+ // Wait for the calling process to be canceled or close the provided context,
+ // so we can gracefully stop the ABCI server.
+ <-ctx.Done()
+
+svrCtx.Logger.Info("stopping the ABCI server...")
+
+return svr.Stop()
+})
+
+return g.Wait()
+}
+
+func startInProcess(svrCtx *Context, svrCfg serverconfig.Config, clientCtx client.Context, app types.Application,
+ metrics *telemetry.Metrics, opts StartCmdOptions,
+)
+
+error {
+ cmtCfg := svrCtx.Config
+ gRPCOnly := svrCtx.Viper.GetBool(flagGRPCOnly)
+
+g, ctx := getCtx(svrCtx, true)
+ if gRPCOnly {
+ // TODO: Generalize logic so that gRPC only is really in startStandAlone
+ svrCtx.Logger.Info("starting node in gRPC only mode; CometBFT is disabled")
+
+svrCfg.GRPC.Enable = true
+}
+
+else {
+ svrCtx.Logger.Info("starting node with ABCI CometBFT in-process")
+
+tmNode, cleanupFn, err := startCmtNode(ctx, cmtCfg, app, svrCtx)
+ if err != nil {
+ return err
+}
+
+defer cleanupFn()
+
+ // Add the tx service to the gRPC router. We only need to register this
+ // service if API or gRPC is enabled, and avoid doing so in the general
+ // case, because it spawns a new local CometBFT RPC client.
+ if svrCfg.API.Enable || svrCfg.GRPC.Enable {
+ // Re-assign for making the client available below do not use := to avoid
+ // shadowing the clientCtx variable.
+ clientCtx = clientCtx.WithClient(local.New(tmNode))
+
+app.RegisterTxService(clientCtx)
+
+app.RegisterTendermintService(clientCtx)
+
+app.RegisterNodeService(clientCtx, svrCfg)
+}
+
+}
+
+grpcSrv, clientCtx, err := startGrpcServer(ctx, g, svrCfg.GRPC, clientCtx, svrCtx, app)
+ if err != nil {
+ return err
+}
+
+err = startAPIServer(ctx, g, svrCfg, clientCtx, svrCtx, app, cmtCfg.RootDir, grpcSrv, metrics)
+ if err != nil {
+ return err
+}
+ if opts.PostSetup != nil {
+ if err := opts.PostSetup(svrCtx, clientCtx, ctx, g); err != nil {
+ return err
+}
+
+}
+
+ // wait for signal capture and gracefully return
+ // we are guaranteed to be waiting for the "ListenForQuitSignals" goroutine.
+ return g.Wait()
+}
+
+// TODO: Move nodeKey into being created within the function.
+func startCmtNode(
+ ctx context.Context,
+ cfg *cmtcfg.Config,
+ app types.Application,
+ svrCtx *Context,
+) (tmNode *node.Node, cleanupFn func(), err error) {
+ nodeKey, err := p2p.LoadOrGenNodeKey(cfg.NodeKeyFile())
+ if err != nil {
+ return nil, cleanupFn, err
+}
+ cmtApp := NewCometABCIWrapper(app)
+
+tmNode, err = node.NewNodeWithContext(
+ ctx,
+ cfg,
+ pvm.LoadOrGenFilePV(cfg.PrivValidatorKeyFile(), cfg.PrivValidatorStateFile()),
+ nodeKey,
+ proxy.NewLocalClientCreator(cmtApp),
+ getGenDocProvider(cfg),
+ cmtcfg.DefaultDBProvider,
+ node.DefaultMetricsProvider(cfg.Instrumentation),
+ servercmtlog.CometLoggerWrapper{
+ Logger: svrCtx.Logger
+},
+ )
+ if err != nil {
+ return tmNode, cleanupFn, err
+}
+ if err := tmNode.Start(); err != nil {
+ return tmNode, cleanupFn, err
+}
+
+cleanupFn = func() {
+ if tmNode != nil && tmNode.IsRunning() {
+ _ = tmNode.Stop()
+}
+
+}
+
+return tmNode, cleanupFn, nil
+}
+
+func getAndValidateConfig(svrCtx *Context) (serverconfig.Config, error) {
+ config, err := serverconfig.GetConfig(svrCtx.Viper)
+ if err != nil {
+ return config, err
+}
+ if err := config.ValidateBasic(); err != nil {
+ return config, err
+}
+
+return config, nil
+}
+
+// returns a function which returns the genesis doc from the genesis file.
+func getGenDocProvider(cfg *cmtcfg.Config)
+
+func() (*cmttypes.GenesisDoc, error) {
+ return func() (*cmttypes.GenesisDoc, error) {
+ appGenesis, err := genutiltypes.AppGenesisFromFile(cfg.GenesisFile())
+ if err != nil {
+ return nil, err
+}
+
+return appGenesis.ToGenesisDoc()
+}
+}
+
+func setupTraceWriter(svrCtx *Context) (traceWriter io.WriteCloser, cleanup func(), err error) {
+ // clean up the traceWriter when the server is shutting down
+ cleanup = func() {
+}
+ traceWriterFile := svrCtx.Viper.GetString(flagTraceStore)
+
+traceWriter, err = openTraceWriter(traceWriterFile)
+ if err != nil {
+ return traceWriter, cleanup, err
+}
+
+ // if flagTraceStore is not used then traceWriter is nil
+ if traceWriter != nil {
+ cleanup = func() {
+ if err = traceWriter.Close(); err != nil {
+ svrCtx.Logger.Error("failed to close trace writer", "err", err)
+}
+
+}
+
+}
+
+return traceWriter, cleanup, nil
+}
+
+func startGrpcServer(
+ ctx context.Context,
+ g *errgroup.Group,
+ config serverconfig.GRPCConfig,
+ clientCtx client.Context,
+ svrCtx *Context,
+ app types.Application,
+) (*grpc.Server, client.Context, error) {
+ if !config.Enable {
+ // return grpcServer as nil if gRPC is disabled
+ return nil, clientCtx, nil
+}
+ _, _, err := net.SplitHostPort(config.Address)
+ if err != nil {
+ return nil, clientCtx, err
+}
+ maxSendMsgSize := config.MaxSendMsgSize
+ if maxSendMsgSize == 0 {
+ maxSendMsgSize = serverconfig.DefaultGRPCMaxSendMsgSize
+}
+ maxRecvMsgSize := config.MaxRecvMsgSize
+ if maxRecvMsgSize == 0 {
+ maxRecvMsgSize = serverconfig.DefaultGRPCMaxRecvMsgSize
+}
+
+ // if gRPC is enabled, configure gRPC client for gRPC gateway
+ grpcClient, err := grpc.Dial( //nolint: staticcheck // ignore this line for this linter
+ config.Address,
+ grpc.WithTransportCredentials(insecure.NewCredentials()),
+ grpc.WithDefaultCallOptions(
+ grpc.ForceCodec(codec.NewProtoCodec(clientCtx.InterfaceRegistry).GRPCCodec()),
+ grpc.MaxCallRecvMsgSize(maxRecvMsgSize),
+ grpc.MaxCallSendMsgSize(maxSendMsgSize),
+ ),
+ )
+ if err != nil {
+ return nil, clientCtx, err
+}
+
+clientCtx = clientCtx.WithGRPCClient(grpcClient)
+
+svrCtx.Logger.Debug("gRPC client assigned to client context", "target", config.Address)
+
+grpcSrv, err := servergrpc.NewGRPCServer(clientCtx, app, config)
+ if err != nil {
+ return nil, clientCtx, err
+}
+
+ // Start the gRPC server in a goroutine. Note, the provided ctx will ensure
+ // that the server is gracefully shut down.
+ g.Go(func()
+
+error {
+ return servergrpc.StartGRPCServer(ctx, svrCtx.Logger.With("module", "grpc-server"), config, grpcSrv)
+})
+
+return grpcSrv, clientCtx, nil
+}
+
+func startAPIServer(
+ ctx context.Context,
+ g *errgroup.Group,
+ svrCfg serverconfig.Config,
+ clientCtx client.Context,
+ svrCtx *Context,
+ app types.Application,
+ home string,
+ grpcSrv *grpc.Server,
+ metrics *telemetry.Metrics,
+)
+
+error {
+ if !svrCfg.API.Enable {
+ return nil
+}
+
+clientCtx = clientCtx.WithHomeDir(home)
+ apiSrv := api.New(clientCtx, svrCtx.Logger.With("module", "api-server"), grpcSrv)
+
+app.RegisterAPIRoutes(apiSrv, svrCfg.API)
+ if svrCfg.Telemetry.Enabled {
+ apiSrv.SetTelemetry(metrics)
+}
+
+g.Go(func()
+
+error {
+ return apiSrv.Start(ctx, svrCfg)
+})
+
+return nil
+}
+
+func startTelemetry(cfg serverconfig.Config) (*telemetry.Metrics, error) {
+ return telemetry.New(cfg.Telemetry)
+}
+
+// wrapCPUProfile starts CPU profiling, if enabled, and executes the provided
+// callbackFn in a separate goroutine, then will wait for that callback to
+// return.
+//
+// NOTE: We expect the caller to handle graceful shutdown and signal handling.
+func wrapCPUProfile(svrCtx *Context, callbackFn func()
+
+error)
+
+error {
+ if cpuProfile := svrCtx.Viper.GetString(flagCPUProfile); cpuProfile != "" {
+ f, err := os.Create(cpuProfile)
+ if err != nil {
+ return err
+}
+
+svrCtx.Logger.Info("starting CPU profiler", "profile", cpuProfile)
+ if err := pprof.StartCPUProfile(f); err != nil {
+ return err
+}
+
+defer func() {
+ svrCtx.Logger.Info("stopping CPU profiler", "profile", cpuProfile)
+
+pprof.StopCPUProfile()
+ if err := f.Close(); err != nil {
+ svrCtx.Logger.Info("failed to close cpu-profile file", "profile", cpuProfile, "err", err.Error())
+}
+
+}()
+}
+
+return callbackFn()
+}
+
+// emitServerInfoMetrics emits server info related metrics using application telemetry.
+func emitServerInfoMetrics() {
+ var ls []metrics.Label
+ versionInfo := version.NewInfo()
+ if len(versionInfo.GoVersion) > 0 {
+ ls = append(ls, telemetry.NewLabel("go", versionInfo.GoVersion))
+}
+ if len(versionInfo.CosmosSdkVersion) > 0 {
+ ls = append(ls, telemetry.NewLabel("version", versionInfo.CosmosSdkVersion))
+}
+ if len(ls) == 0 {
+ return
+}
+
+telemetry.SetGaugeWithLabels([]string{"server", "info"
+}, 1, ls)
+}
+
+func getCtx(svrCtx *Context, block bool) (*errgroup.Group, context.Context) {
+ ctx, cancelFn := context.WithCancel(context.Background())
+
+g, ctx := errgroup.WithContext(ctx)
+ // listen for quit signals so the calling parent process can gracefully exit
+ ListenForQuitSignals(g, block, cancelFn, svrCtx.Logger)
+
+return g, ctx
+}
+
+func startApp(svrCtx *Context, appCreator types.AppCreator, opts StartCmdOptions) (app types.Application, cleanupFn func(), err error) {
+ traceWriter, traceCleanupFn, err := setupTraceWriter(svrCtx)
+ if err != nil {
+ return app, traceCleanupFn, err
+}
+ home := svrCtx.Config.RootDir
+ db, err := opts.DBOpener(home, GetAppDBBackend(svrCtx.Viper))
+ if err != nil {
+ return app, traceCleanupFn, err
+}
+ if isTestnet, ok := svrCtx.Viper.Get(KeyIsTestnet).(bool); ok && isTestnet {
+ app, err = testnetify(svrCtx, appCreator, db, traceWriter)
+ if err != nil {
+ return app, traceCleanupFn, err
+}
+
+}
+
+else {
+ app = appCreator(svrCtx.Logger, db, traceWriter, svrCtx.Viper)
+}
+
+cleanupFn = func() {
+ traceCleanupFn()
+ if localErr := app.Close(); localErr != nil {
+ svrCtx.Logger.Error(localErr.Error())
+}
+
+}
+
+return app, cleanupFn, nil
+}
+
+// InPlaceTestnetCreator utilizes the provided chainID and operatorAddress as well as the local private validator key to
+// control the network represented in the data folder. This is useful to create testnets nearly identical to your
+// mainnet environment.
+func InPlaceTestnetCreator(testnetAppCreator types.AppCreator) *cobra.Command {
+ opts := StartCmdOptions{
+}
+ if opts.DBOpener == nil {
+ opts.DBOpener = openDB
+}
+ if opts.StartCommandHandler == nil {
+ opts.StartCommandHandler = start
+}
+ cmd := &cobra.Command{
+ Use: "in-place-testnet [newChainID] [newOperatorAddress]",
+ Short: "Create and start a testnet from current local state",
+ Long: `Create and start a testnet from current local state.
+After utilizing this command the network will start. If the network is stopped,
+the normal "start" command should be used. Re-using this command on state that
+has already been modified by this command could result in unexpected behavior.
+
+Additionally, the first block may take up to one minute to be committed, depending
+on how old the block is. For instance, if a snapshot was taken weeks ago and we want
+to turn this into a testnet, it is possible lots of pending state needs to be committed
+(expiring locks, etc.). It is recommended that you should wait for this block to be committed
+before stopping the daemon.
+
+If the --trigger-testnet-upgrade flag is set, the upgrade handler specified by the flag will be run
+on the first block of the testnet.
+
+Regardless of whether the flag is set or not, if any new stores are introduced in the daemon being run,
+those stores will be registered in order to prevent panics. Therefore, you only need to set the flag if
+you want to test the upgrade handler itself.
+`,
+ Example: "in-place-testnet localosmosis osmo12smx2wdlyttvyzvzg54y2vnqwq2qjateuf7thj",
+ Args: cobra.ExactArgs(2),
+ RunE: func(cmd *cobra.Command, args []string)
+
+error {
+ serverCtx := GetServerContextFromCmd(cmd)
+ _, err := GetPruningOptionsFromFlags(serverCtx.Viper)
+ if err != nil {
+ return err
+}
+
+clientCtx, err := client.GetClientQueryContext(cmd)
+ if err != nil {
+ return err
+}
+
+withCMT, _ := cmd.Flags().GetBool(flagWithComet)
+ if !withCMT {
+ serverCtx.Logger.Info("starting ABCI without CometBFT")
+}
+ newChainID := args[0]
+ newOperatorAddress := args[1]
+
+ skipConfirmation, _ := cmd.Flags().GetBool("skip-confirmation")
+ if !skipConfirmation {
+ // Confirmation prompt to prevent accidental modification of state.
+ reader := bufio.NewReader(os.Stdin)
+
+fmt.Println("This operation will modify state in your data folder and cannot be undone. Do you want to continue? (y/n)")
+
+text, _ := reader.ReadString('\n')
+ response := strings.TrimSpace(strings.ToLower(text))
+ if response != "y" && response != "yes" {
+ fmt.Println("Operation canceled.")
+
+return nil
+}
+
+}
+
+ // Set testnet keys to be used by the application.
+ // This is done to prevent changes to existing start API.
+ serverCtx.Viper.Set(KeyIsTestnet, true)
+
+serverCtx.Viper.Set(KeyNewChainID, newChainID)
+
+serverCtx.Viper.Set(KeyNewOpAddr, newOperatorAddress)
+
+err = wrapCPUProfile(serverCtx, func()
+
+error {
+ return opts.StartCommandHandler(serverCtx, clientCtx, testnetAppCreator, withCMT, opts)
+})
+
+serverCtx.Logger.Debug("received quit signal")
+
+graceDuration, _ := cmd.Flags().GetDuration(FlagShutdownGrace)
+ if graceDuration > 0 {
+ serverCtx.Logger.Info("graceful shutdown start", FlagShutdownGrace, graceDuration)
+ <-time.After(graceDuration)
+
+serverCtx.Logger.Info("graceful shutdown complete")
+}
+
+return err
+},
+}
+
+addStartNodeFlags(cmd, opts)
+
+cmd.Flags().String(KeyTriggerTestnetUpgrade, "", "If set (example: \"v21\"), triggers the v21 upgrade handler to run on the first block of the testnet")
+
+cmd.Flags().Bool("skip-confirmation", false, "Skip the confirmation prompt")
+
+return cmd
+}
+
+// testnetify modifies both state and blockStore, allowing the provided operator address and local validator key to control the network
+// that the state in the data folder represents. The chainID of the local genesis file is modified to match the provided chainID.
+func testnetify(ctx *Context, testnetAppCreator types.AppCreator, db dbm.DB, traceWriter io.WriteCloser) (types.Application, error) {
+ config := ctx.Config
+
+ newChainID, ok := ctx.Viper.Get(KeyNewChainID).(string)
+ if !ok {
+ return nil, fmt.Errorf("expected string for key %s", KeyNewChainID)
+}
+
+ // Modify app genesis chain ID and save to genesis file.
+ genFilePath := config.GenesisFile()
+
+appGen, err := genutiltypes.AppGenesisFromFile(genFilePath)
+ if err != nil {
+ return nil, err
+}
+
+appGen.ChainID = newChainID
+ if err := appGen.ValidateAndComplete(); err != nil {
+ return nil, err
+}
+ if err := appGen.SaveAs(genFilePath); err != nil {
+ return nil, err
+}
+
+ // Regenerate addrbook.json to prevent peers on old network from causing error logs.
+ addrBookPath := filepath.Join(config.RootDir, "config", "addrbook.json")
+ if err := os.Remove(addrBookPath); err != nil && !os.IsNotExist(err) {
+ return nil, fmt.Errorf("failed to remove existing addrbook.json: %w", err)
+}
+ emptyAddrBook := []byte("{
+}")
+ if err := os.WriteFile(addrBookPath, emptyAddrBook, 0o600); err != nil {
+ return nil, fmt.Errorf("failed to create empty addrbook.json: %w", err)
+}
+
+ // Load the comet genesis doc provider.
+ genDocProvider := node.DefaultGenesisDocProviderFunc(config)
+
+ // Initialize blockStore and stateDB.
+ blockStoreDB, err := cmtcfg.DefaultDBProvider(&cmtcfg.DBContext{
+ ID: "blockstore",
+ Config: config
+})
+ if err != nil {
+ return nil, err
+}
+ blockStore := store.NewBlockStore(blockStoreDB)
+
+stateDB, err := cmtcfg.DefaultDBProvider(&cmtcfg.DBContext{
+ ID: "state",
+ Config: config
+})
+ if err != nil {
+ return nil, err
+}
+
+defer blockStore.Close()
+
+defer stateDB.Close()
+ privValidator := pvm.LoadOrGenFilePV(config.PrivValidatorKeyFile(), config.PrivValidatorStateFile())
+
+userPubKey, err := privValidator.GetPubKey()
+ if err != nil {
+ return nil, err
+}
+ validatorAddress := userPubKey.Address()
+ stateStore := sm.NewStore(stateDB, sm.StoreOptions{
+ DiscardABCIResponses: config.Storage.DiscardABCIResponses,
+})
+
+state, genDoc, err := node.LoadStateFromDBOrGenesisDocProvider(stateDB, genDocProvider)
+ if err != nil {
+ return nil, err
+}
+
+ctx.Viper.Set(KeyNewValAddr, validatorAddress)
+
+ctx.Viper.Set(KeyUserPubKey, userPubKey)
+ testnetApp := testnetAppCreator(ctx.Logger, db, traceWriter, ctx.Viper)
+
+ // We need to create a temporary proxyApp to get the initial state of the application.
+ // Depending on how the node was stopped, the application height can differ from the blockStore height.
+ // This height difference changes how we go about modifying the state.
+ cmtApp := NewCometABCIWrapper(testnetApp)
+ _, context := getCtx(ctx, true)
+ clientCreator := proxy.NewLocalClientCreator(cmtApp)
+ metrics := node.DefaultMetricsProvider(cmtcfg.DefaultConfig().Instrumentation)
+ _, _, _, _, proxyMetrics, _, _ := metrics(genDoc.ChainID)
+ proxyApp := proxy.NewAppConns(clientCreator, proxyMetrics)
+ if err := proxyApp.Start(); err != nil {
+ return nil, fmt.Errorf("error starting proxy app connections: %w", err)
+}
+
+res, err := proxyApp.Query().Info(context, proxy.RequestInfo)
+ if err != nil {
+ return nil, fmt.Errorf("error calling Info: %w", err)
+}
+
+err = proxyApp.Stop()
+ if err != nil {
+ return nil, err
+}
+ appHash := res.LastBlockAppHash
+ appHeight := res.LastBlockHeight
+
+ var block *cmttypes.Block
+ switch {
+ case appHeight == blockStore.Height():
+ block = blockStore.LoadBlock(blockStore.Height())
+ // If the state's last blockstore height does not match the app and blockstore height, we likely stopped with the halt height flag.
+ if state.LastBlockHeight != appHeight {
+ state.LastBlockHeight = appHeight
+ block.AppHash = appHash
+ state.AppHash = appHash
+}
+
+else {
+ // Node was likely stopped via SIGTERM, delete the next block's seen commit
+ err := blockStoreDB.Delete(fmt.Appendf(nil, "SC:%v", blockStore.Height()+1))
+ if err != nil {
+ return nil, err
+}
+
+}
+ case blockStore.Height() > state.LastBlockHeight:
+ // This state usually occurs when we gracefully stop the node.
+ err = blockStore.DeleteLatestBlock()
+ if err != nil {
+ return nil, err
+}
+
+block = blockStore.LoadBlock(blockStore.Height())
+
+default:
+ // If there is any other state, we just load the block
+ block = blockStore.LoadBlock(blockStore.Height())
+}
+
+block.ChainID = newChainID
+ state.ChainID = newChainID
+
+ block.LastBlockID = state.LastBlockID
+ block.LastCommit.BlockID = state.LastBlockID
+
+ // Create a vote from our validator
+ vote := cmttypes.Vote{
+ Type: cmtproto.PrecommitType,
+ Height: state.LastBlockHeight,
+ Round: 0,
+ BlockID: state.LastBlockID,
+ Timestamp: time.Now(),
+ ValidatorAddress: validatorAddress,
+ ValidatorIndex: 0,
+ Signature: []byte{
+},
+}
+
+ // Sign the vote, and copy the proto changes from the act of signing to the vote itself
+ voteProto := vote.ToProto()
+
+err = privValidator.SignVote(newChainID, voteProto)
+ if err != nil {
+ return nil, err
+}
+
+vote.Signature = voteProto.Signature
+ vote.Timestamp = voteProto.Timestamp
+
+ // Modify the block's lastCommit to be signed only by our validator
+ block.LastCommit.Signatures[0].ValidatorAddress = validatorAddress
+ block.LastCommit.Signatures[0].Signature = vote.Signature
+ block.LastCommit.Signatures = []cmttypes.CommitSig{
+ block.LastCommit.Signatures[0]
+}
+
+ // Load the seenCommit of the lastBlockHeight and modify it to be signed from our validator
+ seenCommit := blockStore.LoadSeenCommit(state.LastBlockHeight)
+
+seenCommit.BlockID = state.LastBlockID
+ seenCommit.Round = vote.Round
+ seenCommit.Signatures[0].Signature = vote.Signature
+ seenCommit.Signatures[0].ValidatorAddress = validatorAddress
+ seenCommit.Signatures[0].Timestamp = vote.Timestamp
+ seenCommit.Signatures = []cmttypes.CommitSig{
+ seenCommit.Signatures[0]
+}
+
+err = blockStore.SaveSeenCommit(state.LastBlockHeight, seenCommit)
+ if err != nil {
+ return nil, err
+}
+
+ // Create ValidatorSet struct containing just our valdiator.
+ newVal := &cmttypes.Validator{
+ Address: validatorAddress,
+ PubKey: userPubKey,
+ VotingPower: 900000000000000,
+}
+ newValSet := &cmttypes.ValidatorSet{
+ Validators: []*cmttypes.Validator{
+ newVal
+},
+ Proposer: newVal,
+}
+
+ // Replace all valSets in state to be the valSet with just our validator.
+ state.Validators = newValSet
+ state.LastValidators = newValSet
+ state.NextValidators = newValSet
+ state.LastHeightValidatorsChanged = blockStore.Height()
+
+err = stateStore.Save(state)
+ if err != nil {
+ return nil, err
+}
+
+ // Create a ValidatorsInfo struct to store in stateDB.
+ valSet, err := state.Validators.ToProto()
+ if err != nil {
+ return nil, err
+}
+ valInfo := &cmtstate.ValidatorsInfo{
+ ValidatorSet: valSet,
+ LastHeightChanged: state.LastBlockHeight,
+}
+
+buf, err := valInfo.Marshal()
+ if err != nil {
+ return nil, err
+}
+
+ // Modfiy Validators stateDB entry.
+ err = stateDB.Set(fmt.Appendf(nil, "validatorsKey:%v", blockStore.Height()), buf)
+ if err != nil {
+ return nil, err
+}
+
+ // Modify LastValidators stateDB entry.
+ err = stateDB.Set(fmt.Appendf(nil, "validatorsKey:%v", blockStore.Height()-1), buf)
+ if err != nil {
+ return nil, err
+}
+
+ // Modify NextValidators stateDB entry.
+ err = stateDB.Set(fmt.Appendf(nil, "validatorsKey:%v", blockStore.Height()+1), buf)
+ if err != nil {
+ return nil, err
+}
+
+ // Since we modified the chainID, we set the new genesisDoc in the stateDB.
+ b, err := cmtjson.Marshal(genDoc)
+ if err != nil {
+ return nil, err
+}
+ if err := stateDB.SetSync([]byte("genesisDoc"), b); err != nil {
+ return nil, err
+}
+
+return testnetApp, err
+}
+
+// addStartNodeFlags should be added to any CLI commands that start the network.
+func addStartNodeFlags(cmd *cobra.Command, opts StartCmdOptions) {
+ cmd.Flags().Bool(flagWithComet, true, "Run abci app embedded in-process with CometBFT")
+
+cmd.Flags().String(flagAddress, "tcp://127.0.0.1:26658", "Listen address")
+
+cmd.Flags().String(flagTransport, "socket", "Transport protocol: socket, grpc")
+
+cmd.Flags().String(flagTraceStore, "", "Enable KVStore tracing to an output file")
+
+cmd.Flags().String(FlagMinGasPrices, "", "Minimum gas prices to accept for transactions; Any fee in a tx must meet this minimum (e.g. 0.01photino;0.0001stake)")
+
+cmd.Flags().Uint64(FlagQueryGasLimit, 0, "Maximum gas a Rest/Grpc query can consume. Blank and 0 imply unbounded.")
+
+cmd.Flags().IntSlice(FlagUnsafeSkipUpgrades, []int{
+}, "Skip a set of upgrade heights to continue the old binary")
+
+cmd.Flags().Uint64(FlagHaltHeight, 0, "Block height at which to gracefully halt the chain and shutdown the node")
+
+cmd.Flags().Uint64(FlagHaltTime, 0, "Minimum block time (in Unix seconds)
+
+at which to gracefully halt the chain and shutdown the node")
+
+cmd.Flags().Bool(FlagInterBlockCache, true, "Enable inter-block caching")
+
+cmd.Flags().String(flagCPUProfile, "", "Enable CPU profiling and write to the provided file")
+
+cmd.Flags().Bool(FlagTrace, false, "Provide full stack traces for errors in ABCI Log")
+
+cmd.Flags().String(FlagPruning, pruningtypes.PruningOptionDefault, "Pruning strategy (default|nothing|everything|custom)")
+
+cmd.Flags().Uint64(FlagPruningKeepRecent, 0, "Number of recent heights to keep on disk (ignored if pruning is not 'custom')")
+
+cmd.Flags().Uint64(FlagPruningInterval, 0, "Height interval at which pruned heights are removed from disk (ignored if pruning is not 'custom')")
+
+cmd.Flags().Uint(FlagInvCheckPeriod, 0, "Assert registered invariants every N blocks")
+
+cmd.Flags().Uint64(FlagMinRetainBlocks, 0, "Minimum block height offset during ABCI commit to prune CometBFT blocks")
+
+cmd.Flags().Bool(FlagAPIEnable, false, "Define if the API server should be enabled")
+
+cmd.Flags().Bool(FlagAPISwagger, false, "Define if swagger documentation should automatically be registered (Note: the API must also be enabled)")
+
+cmd.Flags().String(FlagAPIAddress, serverconfig.DefaultAPIAddress, "the API server address to listen on")
+
+cmd.Flags().Uint(FlagAPIMaxOpenConnections, 1000, "Define the number of maximum open connections")
+
+cmd.Flags().Uint(FlagRPCReadTimeout, 10, "Define the CometBFT RPC read timeout (in seconds)")
+
+cmd.Flags().Uint(FlagRPCWriteTimeout, 0, "Define the CometBFT RPC write timeout (in seconds)")
+
+cmd.Flags().Uint(FlagRPCMaxBodyBytes, 1000000, "Define the CometBFT maximum request body (in bytes)")
+
+cmd.Flags().Bool(FlagAPIEnableUnsafeCORS, false, "Define if CORS should be enabled (unsafe - use it at your own risk)")
+
+cmd.Flags().Bool(flagGRPCOnly, false, "Start the node in gRPC query only mode (no CometBFT process is started)")
+
+cmd.Flags().Bool(flagGRPCEnable, true, "Define if the gRPC server should be enabled")
+
+cmd.Flags().String(flagGRPCAddress, serverconfig.DefaultGRPCAddress, "the gRPC server address to listen on")
+
+cmd.Flags().Bool(flagGRPCWebEnable, true, "Define if the gRPC-Web server should be enabled. (Note: gRPC must also be enabled)")
+
+cmd.Flags().Uint64(FlagStateSyncSnapshotInterval, 0, "State sync snapshot interval")
+
+cmd.Flags().Uint32(FlagStateSyncSnapshotKeepRecent, 2, "State sync snapshot to keep")
+
+cmd.Flags().Bool(FlagDisableIAVLFastNode, false, "Disable fast node for IAVL tree")
+
+cmd.Flags().Int(FlagMempoolMaxTxs, mempool.DefaultMaxTx, "Sets MaxTx value for the app-side mempool")
+
+cmd.Flags().Duration(FlagShutdownGrace, 0*time.Second, "On Shutdown, duration to wait for resource clean up")
+
+ // support old flags name for backwards compatibility
+ cmd.Flags().SetNormalizeFunc(func(f *pflag.FlagSet, name string)
+
+pflag.NormalizedName {
+ if name == "with-tendermint" {
+ name = flagWithComet
+}
+
+return pflag.NormalizedName(name)
+})
+
+ // add support for all CometBFT-specific command line options
+ cmtcmd.AddNodeFlags(cmd)
+ if opts.AddFlags != nil {
+ opts.AddFlags(cmd)
+}
+}
+```
+
+Upon starting, the node will bootstrap its RPC and P2P server and start dialing peers. During handshake with its peers, if the node realizes they are ahead, it will query all the blocks sequentially in order to catch up. Then, it will wait for new block proposals and block signatures from validators in order to make progress.
+
+## Other commands
+
+To discover how to concretely run a node and interact with it, please refer to our [Running a Node, API and CLI](/sdk/v0.53/user/run-node/run-node) guide.
diff --git a/sdk/next/learn/advanced/ocap.mdx b/sdk/next/learn/advanced/ocap.mdx
new file mode 100644
index 000000000..133a041fb
--- /dev/null
+++ b/sdk/next/learn/advanced/ocap.mdx
@@ -0,0 +1,1098 @@
+---
+title: Object-Capability Model
+description: >-
+ When thinking about security, it is good to start with a specific threat
+ model. Our threat model is the following:
+---
+
+## Intro
+
+When thinking about security, it is good to start with a specific threat model. Our threat model is the following:
+
+> We assume that a thriving ecosystem of Cosmos SDK modules that are easy to compose into a blockchain application will contain faulty or malicious modules.
+
+The Cosmos SDK is designed to address this threat by being the
+foundation of an object capability system.
+
+> The structural properties of object capability systems favor
+> modularity in code design and ensure reliable encapsulation in
+> code implementation.
+>
+> These structural properties facilitate the analysis of some
+> security properties of an object-capability program or operating
+> system. Some of these — in particular, information flow properties
+> — can be analyzed at the level of object references and
+> connectivity, independent of any knowledge or analysis of the code
+> that determines the behavior of the objects.
+>
+> As a consequence, these security properties can be established
+> and maintained in the presence of new objects that contain unknown
+> and possibly malicious code.
+>
+> These structural properties stem from the two rules governing
+> access to existing objects:
+>
+> 1. An object A can send a message to B only if object A holds a
+> reference to B.
+> 2. An object A can obtain a reference to C only
+> if object A receives a message containing a reference to C. As a
+> consequence of these two rules, an object can obtain a reference
+> to another object only through a preexisting chain of references.
+> In short, "Only connectivity begets connectivity."
+
+For an introduction to object-capabilities, see this [Wikipedia article](https://en.wikipedia.org/wiki/Object-capability_model).
+
+## Ocaps in practice
+
+The idea is to only reveal what is necessary to get the work done.
+
+For example, the following code snippet violates the object capabilities
+principle:
+
+```go
+type AppAccount struct {...
+}
+ account := &AppAccount{
+ Address: pub.Address(),
+ Coins: sdk.Coins{
+ sdk.NewInt64Coin("ATM", 100)
+},
+}
+ sumValue := externalModule.ComputeSumValue(account)
+```
+
+The method `ComputeSumValue` implies a pure function, yet the implied
+capability of accepting a pointer value is the capability to modify that
+value. The preferred method signature should take a copy instead.
+
+```go
+sumValue := externalModule.ComputeSumValue(*account)
+```
+
+In the Cosmos SDK, you can see the application of this principle in simapp.
+
+```go expandable
+//go:build app_v1
+
+package simapp
+
+import (
+
+ "encoding/json"
+ "fmt"
+ "io"
+ "maps"
+ "os"
+
+ abci "github.com/cometbft/cometbft/abci/types"
+ dbm "github.com/cosmos/cosmos-db"
+ "github.com/cosmos/gogoproto/proto"
+ "github.com/spf13/cast"
+
+ autocliv1 "cosmossdk.io/api/cosmos/autocli/v1"
+ reflectionv1 "cosmossdk.io/api/cosmos/reflection/v1"
+ "cosmossdk.io/client/v2/autocli"
+ clienthelpers "cosmossdk.io/client/v2/helpers"
+ "cosmossdk.io/core/appmodule"
+ "cosmossdk.io/log"
+ storetypes "cosmossdk.io/store/types"
+ "cosmossdk.io/x/circuit"
+ circuitkeeper "cosmossdk.io/x/circuit/keeper"
+ circuittypes "cosmossdk.io/x/circuit/types"
+ "cosmossdk.io/x/evidence"
+ evidencekeeper "cosmossdk.io/x/evidence/keeper"
+ evidencetypes "cosmossdk.io/x/evidence/types"
+ "cosmossdk.io/x/feegrant"
+ feegrantkeeper "cosmossdk.io/x/feegrant/keeper"
+ feegrantmodule "cosmossdk.io/x/feegrant/module"
+ "cosmossdk.io/x/nft"
+ nftkeeper "cosmossdk.io/x/nft/keeper"
+ nftmodule "cosmossdk.io/x/nft/module"
+ "cosmossdk.io/x/tx/signing"
+ "cosmossdk.io/x/upgrade"
+ upgradekeeper "cosmossdk.io/x/upgrade/keeper"
+ upgradetypes "cosmossdk.io/x/upgrade/types"
+ "github.com/cosmos/cosmos-sdk/baseapp"
+ "github.com/cosmos/cosmos-sdk/client"
+ "github.com/cosmos/cosmos-sdk/client/flags"
+ "github.com/cosmos/cosmos-sdk/client/grpc/cmtservice"
+ nodeservice "github.com/cosmos/cosmos-sdk/client/grpc/node"
+ "github.com/cosmos/cosmos-sdk/codec"
+ "github.com/cosmos/cosmos-sdk/codec/address"
+ "github.com/cosmos/cosmos-sdk/codec/types"
+ "github.com/cosmos/cosmos-sdk/runtime"
+ runtimeservices "github.com/cosmos/cosmos-sdk/runtime/services"
+ "github.com/cosmos/cosmos-sdk/server"
+ "github.com/cosmos/cosmos-sdk/server/api"
+ "github.com/cosmos/cosmos-sdk/server/config"
+ servertypes "github.com/cosmos/cosmos-sdk/server/types"
+ "github.com/cosmos/cosmos-sdk/std"
+ testdata_pulsar "github.com/cosmos/cosmos-sdk/testutil/testdata/testpb"
+ sdk "github.com/cosmos/cosmos-sdk/types"
+ "github.com/cosmos/cosmos-sdk/types/module"
+ "github.com/cosmos/cosmos-sdk/types/msgservice"
+ sigtypes "github.com/cosmos/cosmos-sdk/types/tx/signing"
+ "github.com/cosmos/cosmos-sdk/version"
+ "github.com/cosmos/cosmos-sdk/x/auth"
+ "github.com/cosmos/cosmos-sdk/x/auth/ante"
+ authcodec "github.com/cosmos/cosmos-sdk/x/auth/codec"
+ authkeeper "github.com/cosmos/cosmos-sdk/x/auth/keeper"
+ "github.com/cosmos/cosmos-sdk/x/auth/posthandler"
+ authsims "github.com/cosmos/cosmos-sdk/x/auth/simulation"
+ "github.com/cosmos/cosmos-sdk/x/auth/tx"
+ authtx "github.com/cosmos/cosmos-sdk/x/auth/tx"
+ txmodule "github.com/cosmos/cosmos-sdk/x/auth/tx/config"
+ authtypes "github.com/cosmos/cosmos-sdk/x/auth/types"
+ "github.com/cosmos/cosmos-sdk/x/auth/vesting"
+ vestingtypes "github.com/cosmos/cosmos-sdk/x/auth/vesting/types"
+ "github.com/cosmos/cosmos-sdk/x/authz"
+ authzkeeper "github.com/cosmos/cosmos-sdk/x/authz/keeper"
+ authzmodule "github.com/cosmos/cosmos-sdk/x/authz/module"
+ "github.com/cosmos/cosmos-sdk/x/bank"
+ bankkeeper "github.com/cosmos/cosmos-sdk/x/bank/keeper"
+ banktypes "github.com/cosmos/cosmos-sdk/x/bank/types"
+ consensus "github.com/cosmos/cosmos-sdk/x/consensus"
+ consensusparamkeeper "github.com/cosmos/cosmos-sdk/x/consensus/keeper"
+ consensusparamtypes "github.com/cosmos/cosmos-sdk/x/consensus/types"
+ distr "github.com/cosmos/cosmos-sdk/x/distribution"
+ distrkeeper "github.com/cosmos/cosmos-sdk/x/distribution/keeper"
+ distrtypes "github.com/cosmos/cosmos-sdk/x/distribution/types"
+ "github.com/cosmos/cosmos-sdk/x/epochs"
+ epochskeeper "github.com/cosmos/cosmos-sdk/x/epochs/keeper"
+ epochstypes "github.com/cosmos/cosmos-sdk/x/epochs/types"
+ "github.com/cosmos/cosmos-sdk/x/genutil"
+ genutiltypes "github.com/cosmos/cosmos-sdk/x/genutil/types"
+ "github.com/cosmos/cosmos-sdk/x/gov"
+ govclient "github.com/cosmos/cosmos-sdk/x/gov/client"
+ govkeeper "github.com/cosmos/cosmos-sdk/x/gov/keeper"
+ govtypes "github.com/cosmos/cosmos-sdk/x/gov/types"
+ govv1beta1 "github.com/cosmos/cosmos-sdk/x/gov/types/v1beta1"
+ "github.com/cosmos/cosmos-sdk/x/group"
+ groupkeeper "github.com/cosmos/cosmos-sdk/x/group/keeper"
+ groupmodule "github.com/cosmos/cosmos-sdk/x/group/module"
+ "github.com/cosmos/cosmos-sdk/x/mint"
+ mintkeeper "github.com/cosmos/cosmos-sdk/x/mint/keeper"
+ minttypes "github.com/cosmos/cosmos-sdk/x/mint/types"
+ "github.com/cosmos/cosmos-sdk/x/protocolpool"
+ protocolpoolkeeper "github.com/cosmos/cosmos-sdk/x/protocolpool/keeper"
+ protocolpooltypes "github.com/cosmos/cosmos-sdk/x/protocolpool/types"
+ "github.com/cosmos/cosmos-sdk/x/slashing"
+ slashingkeeper "github.com/cosmos/cosmos-sdk/x/slashing/keeper"
+ slashingtypes "github.com/cosmos/cosmos-sdk/x/slashing/types"
+ "github.com/cosmos/cosmos-sdk/x/staking"
+ stakingkeeper "github.com/cosmos/cosmos-sdk/x/staking/keeper"
+ stakingtypes "github.com/cosmos/cosmos-sdk/x/staking/types"
+)
+
+const appName = "SimApp"
+
+var (
+ // DefaultNodeHome default home directories for the application daemon
+ DefaultNodeHome string
+
+ // module account permissions
+ maccPerms = map[string][]string{
+ authtypes.FeeCollectorName: nil,
+ distrtypes.ModuleName: nil,
+ minttypes.ModuleName: {
+ authtypes.Minter
+},
+ stakingtypes.BondedPoolName: {
+ authtypes.Burner, authtypes.Staking
+},
+ stakingtypes.NotBondedPoolName: {
+ authtypes.Burner, authtypes.Staking
+},
+ govtypes.ModuleName: {
+ authtypes.Burner
+},
+ nft.ModuleName: nil,
+ protocolpooltypes.ModuleName: nil,
+ protocolpooltypes.ProtocolPoolEscrowAccount: nil
+}
+)
+
+var (
+ _ runtime.AppI = (*SimApp)(nil)
+ _ servertypes.Application = (*SimApp)(nil)
+)
+
+// SimApp extends an ABCI application, but with most of its parameters exported.
+// They are exported for convenience in creating helper functions, as object
+// capabilities aren't needed for testing.
+type SimApp struct {
+ *baseapp.BaseApp
+ legacyAmino *codec.LegacyAmino
+ appCodec codec.Codec
+ txConfig client.TxConfig
+ interfaceRegistry types.InterfaceRegistry
+
+ // keys to access the substores
+ keys map[string]*storetypes.KVStoreKey
+
+ // essential keepers
+ AccountKeeper authkeeper.AccountKeeper
+ BankKeeper bankkeeper.BaseKeeper
+ StakingKeeper *stakingkeeper.Keeper
+ SlashingKeeper slashingkeeper.Keeper
+ MintKeeper mintkeeper.Keeper
+ DistrKeeper distrkeeper.Keeper
+ GovKeeper govkeeper.Keeper
+ UpgradeKeeper *upgradekeeper.Keeper
+ EvidenceKeeper evidencekeeper.Keeper
+ ConsensusParamsKeeper consensusparamkeeper.Keeper
+ CircuitKeeper circuitkeeper.Keeper
+
+ // supplementary keepers
+ FeeGrantKeeper feegrantkeeper.Keeper
+ GroupKeeper groupkeeper.Keeper
+ AuthzKeeper authzkeeper.Keeper
+ NFTKeeper nftkeeper.Keeper
+ EpochsKeeper epochskeeper.Keeper
+ ProtocolPoolKeeper protocolpoolkeeper.Keeper
+
+ // the module manager
+ ModuleManager *module.Manager
+ BasicModuleManager module.BasicManager
+
+ // simulation manager
+ sm *module.SimulationManager
+
+ // module configurator
+ configurator module.Configurator
+}
+
+func init() {
+ var err error
+ DefaultNodeHome, err = clienthelpers.GetNodeHomeDirectory(".simapp")
+ if err != nil {
+ panic(err)
+}
+}
+
+// NewSimApp returns a reference to an initialized SimApp.
+func NewSimApp(
+ logger log.Logger,
+ db dbm.DB,
+ traceStore io.Writer,
+ loadLatest bool,
+ appOpts servertypes.AppOptions,
+ baseAppOptions ...func(*baseapp.BaseApp),
+) *SimApp {
+ interfaceRegistry, _ := types.NewInterfaceRegistryWithOptions(types.InterfaceRegistryOptions{
+ ProtoFiles: proto.HybridResolver,
+ SigningOptions: signing.Options{
+ AddressCodec: address.Bech32Codec{
+ Bech32Prefix: sdk.GetConfig().GetBech32AccountAddrPrefix(),
+},
+ ValidatorAddressCodec: address.Bech32Codec{
+ Bech32Prefix: sdk.GetConfig().GetBech32ValidatorAddrPrefix(),
+},
+},
+})
+ appCodec := codec.NewProtoCodec(interfaceRegistry)
+ legacyAmino := codec.NewLegacyAmino()
+ txConfig := tx.NewTxConfig(appCodec, tx.DefaultSignModes)
+ if err := interfaceRegistry.SigningContext().Validate(); err != nil {
+ panic(err)
+}
+
+std.RegisterLegacyAminoCodec(legacyAmino)
+
+std.RegisterInterfaces(interfaceRegistry)
+
+ // Below we could construct and set an application specific mempool and
+ // ABCI 1.0 PrepareProposal and ProcessProposal handlers. These defaults are
+ // already set in the SDK's BaseApp, this shows an example of how to override
+ // them.
+ //
+ // Example:
+ //
+ // bApp := baseapp.NewBaseApp(...)
+ // nonceMempool := mempool.NewSenderNonceMempool()
+ // abciPropHandler := NewDefaultProposalHandler(nonceMempool, bApp)
+ //
+ // bApp.SetMempool(nonceMempool)
+ // bApp.SetPrepareProposal(abciPropHandler.PrepareProposalHandler())
+ // bApp.SetProcessProposal(abciPropHandler.ProcessProposalHandler())
+ //
+ // Alternatively, you can construct BaseApp options, append those to
+ // baseAppOptions and pass them to NewBaseApp.
+ //
+ // Example:
+ //
+ // prepareOpt = func(app *baseapp.BaseApp) {
+ // abciPropHandler := baseapp.NewDefaultProposalHandler(nonceMempool, app)
+ // app.SetPrepareProposal(abciPropHandler.PrepareProposalHandler())
+ //
+}
+ // baseAppOptions = append(baseAppOptions, prepareOpt)
+
+ // create and set dummy vote extension handler
+ voteExtOp := func(bApp *baseapp.BaseApp) {
+ voteExtHandler := NewVoteExtensionHandler()
+
+voteExtHandler.SetHandlers(bApp)
+}
+
+baseAppOptions = append(baseAppOptions, voteExtOp, baseapp.SetOptimisticExecution())
+ bApp := baseapp.NewBaseApp(appName, logger, db, txConfig.TxDecoder(), baseAppOptions...)
+
+bApp.SetCommitMultiStoreTracer(traceStore)
+
+bApp.SetVersion(version.Version)
+
+bApp.SetInterfaceRegistry(interfaceRegistry)
+
+bApp.SetTxEncoder(txConfig.TxEncoder())
+ keys := storetypes.NewKVStoreKeys(
+ authtypes.StoreKey,
+ banktypes.StoreKey,
+ stakingtypes.StoreKey,
+ minttypes.StoreKey,
+ distrtypes.StoreKey,
+ slashingtypes.StoreKey,
+ govtypes.StoreKey,
+ consensusparamtypes.StoreKey,
+ upgradetypes.StoreKey,
+ feegrant.StoreKey,
+ evidencetypes.StoreKey,
+ circuittypes.StoreKey,
+ authzkeeper.StoreKey,
+ nftkeeper.StoreKey,
+ group.StoreKey,
+ epochstypes.StoreKey,
+ protocolpooltypes.StoreKey,
+ )
+
+ // register streaming services
+ if err := bApp.RegisterStreamingServices(appOpts, keys); err != nil {
+ panic(err)
+}
+ app := &SimApp{
+ BaseApp: bApp,
+ legacyAmino: legacyAmino,
+ appCodec: appCodec,
+ txConfig: txConfig,
+ interfaceRegistry: interfaceRegistry,
+ keys: keys,
+}
+
+ // set the BaseApp's parameter store
+ app.ConsensusParamsKeeper = consensusparamkeeper.NewKeeper(
+ appCodec,
+ runtime.NewKVStoreService(keys[consensusparamtypes.StoreKey]),
+ authtypes.NewModuleAddress(govtypes.ModuleName).String(),
+ runtime.EventService{
+},
+ )
+
+bApp.SetParamStore(app.ConsensusParamsKeeper.ParamsStore)
+
+ // add keepers
+ app.AccountKeeper = authkeeper.NewAccountKeeper(
+ appCodec,
+ runtime.NewKVStoreService(keys[authtypes.StoreKey]),
+ authtypes.ProtoBaseAccount,
+ maccPerms,
+ authcodec.NewBech32Codec(sdk.Bech32MainPrefix),
+ sdk.Bech32MainPrefix,
+ authtypes.NewModuleAddress(govtypes.ModuleName).String(),
+ )
+
+app.BankKeeper = bankkeeper.NewBaseKeeper(
+ appCodec,
+ runtime.NewKVStoreService(keys[banktypes.StoreKey]),
+ app.AccountKeeper,
+ BlockedAddresses(),
+ authtypes.NewModuleAddress(govtypes.ModuleName).String(),
+ logger,
+ )
+
+ // optional: enable sign mode textual by overwriting the default tx config (after setting the bank keeper)
+ enabledSignModes := append(tx.DefaultSignModes, sigtypes.SignMode_SIGN_MODE_TEXTUAL)
+ txConfigOpts := tx.ConfigOptions{
+ EnabledSignModes: enabledSignModes,
+ TextualCoinMetadataQueryFn: txmodule.NewBankKeeperCoinMetadataQueryFn(app.BankKeeper),
+}
+
+txConfig, err := tx.NewTxConfigWithOptions(
+ appCodec,
+ txConfigOpts,
+ )
+ if err != nil {
+ panic(err)
+}
+
+app.txConfig = txConfig
+
+ app.StakingKeeper = stakingkeeper.NewKeeper(
+ appCodec,
+ runtime.NewKVStoreService(keys[stakingtypes.StoreKey]),
+ app.AccountKeeper,
+ app.BankKeeper,
+ authtypes.NewModuleAddress(govtypes.ModuleName).String(),
+ authcodec.NewBech32Codec(sdk.Bech32PrefixValAddr),
+ authcodec.NewBech32Codec(sdk.Bech32PrefixConsAddr),
+ )
+
+app.MintKeeper = mintkeeper.NewKeeper(
+ appCodec,
+ runtime.NewKVStoreService(keys[minttypes.StoreKey]),
+ app.StakingKeeper,
+ app.AccountKeeper,
+ app.BankKeeper,
+ authtypes.FeeCollectorName,
+ authtypes.NewModuleAddress(govtypes.ModuleName).String(),
+ )
+
+app.ProtocolPoolKeeper = protocolpoolkeeper.NewKeeper(
+ appCodec,
+ runtime.NewKVStoreService(keys[protocolpooltypes.StoreKey]),
+ app.AccountKeeper,
+ app.BankKeeper,
+ authtypes.NewModuleAddress(govtypes.ModuleName).String(),
+ )
+
+app.DistrKeeper = distrkeeper.NewKeeper(
+ appCodec,
+ runtime.NewKVStoreService(keys[distrtypes.StoreKey]),
+ app.AccountKeeper,
+ app.BankKeeper,
+ app.StakingKeeper,
+ authtypes.FeeCollectorName,
+ authtypes.NewModuleAddress(govtypes.ModuleName).String(),
+ distrkeeper.WithExternalCommunityPool(app.ProtocolPoolKeeper),
+ )
+
+app.SlashingKeeper = slashingkeeper.NewKeeper(
+ appCodec,
+ legacyAmino,
+ runtime.NewKVStoreService(keys[slashingtypes.StoreKey]),
+ app.StakingKeeper,
+ authtypes.NewModuleAddress(govtypes.ModuleName).String(),
+ )
+
+app.FeeGrantKeeper = feegrantkeeper.NewKeeper(
+ appCodec,
+ runtime.NewKVStoreService(keys[feegrant.StoreKey]),
+ app.AccountKeeper,
+ )
+
+ // register the staking hooks
+ // NOTE: stakingKeeper above is passed by reference, so that it will contain these hooks
+ app.StakingKeeper.SetHooks(
+ stakingtypes.NewMultiStakingHooks(
+ app.DistrKeeper.Hooks(),
+ app.SlashingKeeper.Hooks(),
+ ),
+ )
+
+app.CircuitKeeper = circuitkeeper.NewKeeper(
+ appCodec,
+ runtime.NewKVStoreService(keys[circuittypes.StoreKey]),
+ authtypes.NewModuleAddress(govtypes.ModuleName).String(),
+ app.AccountKeeper.AddressCodec(),
+ )
+
+app.BaseApp.SetCircuitBreaker(&app.CircuitKeeper)
+
+app.AuthzKeeper = authzkeeper.NewKeeper(
+ runtime.NewKVStoreService(keys[authzkeeper.StoreKey]),
+ appCodec,
+ app.MsgServiceRouter(),
+ app.AccountKeeper,
+ )
+ groupConfig := group.DefaultConfig()
+ /*
+ Example of setting group params:
+ groupConfig.MaxMetadataLen = 1000
+ */
+ app.GroupKeeper = groupkeeper.NewKeeper(
+ keys[group.StoreKey],
+ appCodec,
+ app.MsgServiceRouter(),
+ app.AccountKeeper,
+ groupConfig,
+ )
+
+ // get skipUpgradeHeights from the app options
+ skipUpgradeHeights := map[int64]bool{
+}
+ for _, h := range cast.ToIntSlice(appOpts.Get(server.FlagUnsafeSkipUpgrades)) {
+ skipUpgradeHeights[int64(h)] = true
+}
+ homePath := cast.ToString(appOpts.Get(flags.FlagHome))
+ // set the governance module account as the authority for conducting upgrades
+ app.UpgradeKeeper = upgradekeeper.NewKeeper(
+ skipUpgradeHeights,
+ runtime.NewKVStoreService(keys[upgradetypes.StoreKey]),
+ appCodec,
+ homePath,
+ app.BaseApp,
+ authtypes.NewModuleAddress(govtypes.ModuleName).String(),
+ )
+
+ // Register the proposal types
+ // Deprecated: Avoid adding new handlers, instead use the new proposal flow
+ // by granting the governance module the right to execute the message.
+ // See: /sdk/v0.53/build/modules/gov#proposal-messages
+ govRouter := govv1beta1.NewRouter()
+
+govRouter.AddRoute(govtypes.RouterKey, govv1beta1.ProposalHandler)
+ govConfig := govtypes.DefaultConfig()
+ /*
+ Example of setting gov params:
+ govConfig.MaxMetadataLen = 10000
+ */
+ govKeeper := govkeeper.NewKeeper(
+ appCodec,
+ runtime.NewKVStoreService(keys[govtypes.StoreKey]),
+ app.AccountKeeper,
+ app.BankKeeper,
+ app.StakingKeeper,
+ app.DistrKeeper,
+ app.MsgServiceRouter(),
+ govConfig,
+ authtypes.NewModuleAddress(govtypes.ModuleName).String(),
+ )
+
+ // Set legacy router for backwards compatibility with gov v1beta1
+ govKeeper.SetLegacyRouter(govRouter)
+
+app.GovKeeper = *govKeeper.SetHooks(
+ govtypes.NewMultiGovHooks(
+ // register the governance hooks
+ ),
+ )
+
+app.NFTKeeper = nftkeeper.NewKeeper(
+ runtime.NewKVStoreService(keys[nftkeeper.StoreKey]),
+ appCodec,
+ app.AccountKeeper,
+ app.BankKeeper,
+ )
+
+ // create evidence keeper with router
+ evidenceKeeper := evidencekeeper.NewKeeper(
+ appCodec,
+ runtime.NewKVStoreService(keys[evidencetypes.StoreKey]),
+ app.StakingKeeper,
+ app.SlashingKeeper,
+ app.AccountKeeper.AddressCodec(),
+ runtime.ProvideCometInfoService(),
+ )
+ // If evidence needs to be handled for the app, set routes in router here and seal
+ app.EvidenceKeeper = *evidenceKeeper
+
+ app.EpochsKeeper = epochskeeper.NewKeeper(
+ runtime.NewKVStoreService(keys[epochstypes.StoreKey]),
+ appCodec,
+ )
+
+app.EpochsKeeper.SetHooks(
+ epochstypes.NewMultiEpochHooks(
+ // insert epoch hooks receivers here
+ ),
+ )
+
+ /**** Module Options ****/
+
+ // NOTE: Any module instantiated in the module manager that is later modified
+ // must be passed by reference here.
+ app.ModuleManager = module.NewManager(
+ genutil.NewAppModule(
+ app.AccountKeeper, app.StakingKeeper, app,
+ txConfig,
+ ),
+ auth.NewAppModule(appCodec, app.AccountKeeper, authsims.RandomGenesisAccounts, nil),
+ vesting.NewAppModule(app.AccountKeeper, app.BankKeeper),
+ bank.NewAppModule(appCodec, app.BankKeeper, app.AccountKeeper, nil),
+ feegrantmodule.NewAppModule(appCodec, app.AccountKeeper, app.BankKeeper, app.FeeGrantKeeper, app.interfaceRegistry),
+ gov.NewAppModule(appCodec, &app.GovKeeper, app.AccountKeeper, app.BankKeeper, nil),
+ mint.NewAppModule(appCodec, app.MintKeeper, app.AccountKeeper, nil, nil),
+ slashing.NewAppModule(appCodec, app.SlashingKeeper, app.AccountKeeper, app.BankKeeper, app.StakingKeeper, nil, app.interfaceRegistry),
+ distr.NewAppModule(appCodec, app.DistrKeeper, app.AccountKeeper, app.BankKeeper, app.StakingKeeper, nil),
+ staking.NewAppModule(appCodec, app.StakingKeeper, app.AccountKeeper, app.BankKeeper, nil),
+ upgrade.NewAppModule(app.UpgradeKeeper, app.AccountKeeper.AddressCodec()),
+ evidence.NewAppModule(app.EvidenceKeeper),
+ authzmodule.NewAppModule(appCodec, app.AuthzKeeper, app.AccountKeeper, app.BankKeeper, app.interfaceRegistry),
+ groupmodule.NewAppModule(appCodec, app.GroupKeeper, app.AccountKeeper, app.BankKeeper, app.interfaceRegistry),
+ nftmodule.NewAppModule(appCodec, app.NFTKeeper, app.AccountKeeper, app.BankKeeper, app.interfaceRegistry),
+ consensus.NewAppModule(appCodec, app.ConsensusParamsKeeper),
+ circuit.NewAppModule(appCodec, app.CircuitKeeper),
+ epochs.NewAppModule(appCodec, app.EpochsKeeper),
+ protocolpool.NewAppModule(appCodec, app.ProtocolPoolKeeper, app.AccountKeeper, app.BankKeeper),
+ )
+
+ // BasicModuleManager defines the module BasicManager is in charge of setting up basic,
+ // non-dependant module elements, such as codec registration and genesis verification.
+ // By default it is composed of all the module from the module manager.
+ // Additionally, app module basics can be overwritten by passing them as argument.
+ app.BasicModuleManager = module.NewBasicManagerFromManager(
+ app.ModuleManager,
+ map[string]module.AppModuleBasic{
+ genutiltypes.ModuleName: genutil.NewAppModuleBasic(genutiltypes.DefaultMessageValidator),
+ govtypes.ModuleName: gov.NewAppModuleBasic(
+ []govclient.ProposalHandler{
+},
+ ),
+})
+
+app.BasicModuleManager.RegisterLegacyAminoCodec(legacyAmino)
+
+app.BasicModuleManager.RegisterInterfaces(interfaceRegistry)
+
+ // NOTE: upgrade module is required to be prioritized
+ app.ModuleManager.SetOrderPreBlockers(
+ upgradetypes.ModuleName,
+ authtypes.ModuleName,
+ )
+ // During begin block slashing happens after distr.BeginBlocker so that
+ // there is nothing left over in the validator fee pool, so as to keep the
+ // CanWithdrawInvariant invariant.
+ // NOTE: staking module is required if HistoricalEntries param > 0
+ app.ModuleManager.SetOrderBeginBlockers(
+ minttypes.ModuleName,
+ distrtypes.ModuleName,
+ protocolpooltypes.ModuleName,
+ slashingtypes.ModuleName,
+ evidencetypes.ModuleName,
+ stakingtypes.ModuleName,
+ genutiltypes.ModuleName,
+ authz.ModuleName,
+ epochstypes.ModuleName,
+ )
+
+app.ModuleManager.SetOrderEndBlockers(
+ govtypes.ModuleName,
+ stakingtypes.ModuleName,
+ genutiltypes.ModuleName,
+ feegrant.ModuleName,
+ group.ModuleName,
+ protocolpooltypes.ModuleName,
+ )
+
+ // NOTE: The genutils module must occur after staking so that pools are
+ // properly initialized with tokens from genesis accounts.
+ // NOTE: The genutils module must also occur after auth so that it can access the params from auth.
+ genesisModuleOrder := []string{
+ authtypes.ModuleName,
+ banktypes.ModuleName,
+ distrtypes.ModuleName,
+ stakingtypes.ModuleName,
+ slashingtypes.ModuleName,
+ govtypes.ModuleName,
+ minttypes.ModuleName,
+ genutiltypes.ModuleName,
+ evidencetypes.ModuleName,
+ authz.ModuleName,
+ feegrant.ModuleName,
+ nft.ModuleName,
+ group.ModuleName,
+ upgradetypes.ModuleName,
+ vestingtypes.ModuleName,
+ consensusparamtypes.ModuleName,
+ circuittypes.ModuleName,
+ epochstypes.ModuleName,
+ protocolpooltypes.ModuleName,
+}
+ exportModuleOrder := []string{
+ consensusparamtypes.ModuleName,
+ authtypes.ModuleName,
+ protocolpooltypes.ModuleName, // Must be exported before bank
+ banktypes.ModuleName,
+ distrtypes.ModuleName,
+ stakingtypes.ModuleName,
+ slashingtypes.ModuleName,
+ govtypes.ModuleName,
+ minttypes.ModuleName,
+ genutiltypes.ModuleName,
+ evidencetypes.ModuleName,
+ authz.ModuleName,
+ feegrant.ModuleName,
+ nft.ModuleName,
+ group.ModuleName,
+ upgradetypes.ModuleName,
+ vestingtypes.ModuleName,
+ circuittypes.ModuleName,
+ epochstypes.ModuleName,
+}
+
+app.ModuleManager.SetOrderInitGenesis(genesisModuleOrder...)
+
+app.ModuleManager.SetOrderExportGenesis(exportModuleOrder...)
+
+ // Uncomment if you want to set a custom migration order here.
+ // app.ModuleManager.SetOrderMigrations(custom order)
+
+app.configurator = module.NewConfigurator(app.appCodec, app.MsgServiceRouter(), app.GRPCQueryRouter())
+
+err = app.ModuleManager.RegisterServices(app.configurator)
+ if err != nil {
+ panic(err)
+}
+
+ // RegisterUpgradeHandlers is used for registering any on-chain upgrades.
+ // Make sure it's called after `app.ModuleManager` and `app.configurator` are set.
+ app.RegisterUpgradeHandlers()
+
+autocliv1.RegisterQueryServer(app.GRPCQueryRouter(), runtimeservices.NewAutoCLIQueryService(app.ModuleManager.Modules))
+
+reflectionSvc, err := runtimeservices.NewReflectionService()
+ if err != nil {
+ panic(err)
+}
+
+reflectionv1.RegisterReflectionServiceServer(app.GRPCQueryRouter(), reflectionSvc)
+
+ // add test gRPC service for testing gRPC queries in isolation
+ testdata_pulsar.RegisterQueryServer(app.GRPCQueryRouter(), testdata_pulsar.QueryImpl{
+})
+
+ // create the simulation manager and define the order of the modules for deterministic simulations
+ //
+ // NOTE: this is not required apps that don't use the simulator for fuzz testing
+ // transactions
+ overrideModules := map[string]module.AppModuleSimulation{
+ authtypes.ModuleName: auth.NewAppModule(app.appCodec, app.AccountKeeper, authsims.RandomGenesisAccounts, nil),
+}
+
+app.sm = module.NewSimulationManagerFromAppModules(app.ModuleManager.Modules, overrideModules)
+
+app.sm.RegisterStoreDecoders()
+
+ // initialize stores
+ app.MountKVStores(keys)
+
+ // initialize BaseApp
+ app.SetInitChainer(app.InitChainer)
+
+app.SetPreBlocker(app.PreBlocker)
+
+app.SetBeginBlocker(app.BeginBlocker)
+
+app.SetEndBlocker(app.EndBlocker)
+
+app.setAnteHandler(txConfig)
+
+ // In v0.46, the SDK introduces _postHandlers_. PostHandlers are like
+ // antehandlers, but are run _after_ the `runMsgs` execution. They are also
+ // defined as a chain, and have the same signature as antehandlers.
+ //
+ // In baseapp, postHandlers are run in the same store branch as `runMsgs`,
+ // meaning that both `runMsgs` and `postHandler` state will be committed if
+ // both are successful, and both will be reverted if any of the two fails.
+ //
+ // The SDK exposes a default postHandlers chain
+ //
+ // Please note that changing any of the anteHandler or postHandler chain is
+ // likely to be a state-machine breaking change, which needs a coordinated
+ // upgrade.
+ app.setPostHandler()
+
+ // At startup, after all modules have been registered, check that all prot
+ // annotations are correct.
+ protoFiles, err := proto.MergedRegistry()
+ if err != nil {
+ panic(err)
+}
+
+err = msgservice.ValidateProtoAnnotations(protoFiles)
+ if err != nil {
+ // Once we switch to using protoreflect-based antehandlers, we might
+ // want to panic here instead of logging a warning.
+ fmt.Fprintln(os.Stderr, err.Error())
+}
+ if loadLatest {
+ if err := app.LoadLatestVersion(); err != nil {
+ panic(fmt.Errorf("error loading last version: %w", err))
+}
+
+}
+
+return app
+}
+
+func (app *SimApp)
+
+setAnteHandler(txConfig client.TxConfig) {
+ anteHandler, err := NewAnteHandler(
+ HandlerOptions{
+ ante.HandlerOptions{
+ UnorderedNonceManager: app.AccountKeeper,
+ AccountKeeper: app.AccountKeeper,
+ BankKeeper: app.BankKeeper,
+ SignModeHandler: txConfig.SignModeHandler(),
+ FeegrantKeeper: app.FeeGrantKeeper,
+ SigGasConsumer: ante.DefaultSigVerificationGasConsumer,
+},
+ &app.CircuitKeeper,
+},
+ )
+ if err != nil {
+ panic(err)
+}
+
+ // Set the AnteHandler for the app
+ app.SetAnteHandler(anteHandler)
+}
+
+func (app *SimApp)
+
+setPostHandler() {
+ postHandler, err := posthandler.NewPostHandler(
+ posthandler.HandlerOptions{
+},
+ )
+ if err != nil {
+ panic(err)
+}
+
+app.SetPostHandler(postHandler)
+}
+
+// Name returns the name of the App
+func (app *SimApp)
+
+Name()
+
+string {
+ return app.BaseApp.Name()
+}
+
+// PreBlocker application updates every pre block
+func (app *SimApp)
+
+PreBlocker(ctx sdk.Context, _ *abci.RequestFinalizeBlock) (*sdk.ResponsePreBlock, error) {
+ return app.ModuleManager.PreBlock(ctx)
+}
+
+// BeginBlocker application updates every begin block
+func (app *SimApp)
+
+BeginBlocker(ctx sdk.Context) (sdk.BeginBlock, error) {
+ return app.ModuleManager.BeginBlock(ctx)
+}
+
+// EndBlocker application updates every end block
+func (app *SimApp)
+
+EndBlocker(ctx sdk.Context) (sdk.EndBlock, error) {
+ return app.ModuleManager.EndBlock(ctx)
+}
+
+func (a *SimApp)
+
+Configurator()
+
+module.Configurator {
+ return a.configurator
+}
+
+// InitChainer application update at chain initialization
+func (app *SimApp)
+
+InitChainer(ctx sdk.Context, req *abci.RequestInitChain) (*abci.ResponseInitChain, error) {
+ var genesisState GenesisState
+ if err := json.Unmarshal(req.AppStateBytes, &genesisState); err != nil {
+ panic(err)
+}
+
+app.UpgradeKeeper.SetModuleVersionMap(ctx, app.ModuleManager.GetVersionMap())
+
+return app.ModuleManager.InitGenesis(ctx, app.appCodec, genesisState)
+}
+
+// LoadHeight loads a particular height
+func (app *SimApp)
+
+LoadHeight(height int64)
+
+error {
+ return app.LoadVersion(height)
+}
+
+// LegacyAmino returns SimApp's amino codec.
+//
+// NOTE: This is solely to be used for testing purposes as it may be desirable
+// for modules to register their own custom testing types.
+func (app *SimApp)
+
+LegacyAmino() *codec.LegacyAmino {
+ return app.legacyAmino
+}
+
+// AppCodec returns SimApp's app codec.
+//
+// NOTE: This is solely to be used for testing purposes as it may be desirable
+// for modules to register their own custom testing types.
+func (app *SimApp)
+
+AppCodec()
+
+codec.Codec {
+ return app.appCodec
+}
+
+// InterfaceRegistry returns SimApp's InterfaceRegistry
+func (app *SimApp)
+
+InterfaceRegistry()
+
+types.InterfaceRegistry {
+ return app.interfaceRegistry
+}
+
+// TxConfig returns SimApp's TxConfig
+func (app *SimApp)
+
+TxConfig()
+
+client.TxConfig {
+ return app.txConfig
+}
+
+// AutoCliOpts returns the autocli options for the app.
+func (app *SimApp)
+
+AutoCliOpts()
+
+autocli.AppOptions {
+ modules := make(map[string]appmodule.AppModule, 0)
+ for _, m := range app.ModuleManager.Modules {
+ if moduleWithName, ok := m.(module.HasName); ok {
+ moduleName := moduleWithName.Name()
+ if appModule, ok := moduleWithName.(appmodule.AppModule); ok {
+ modules[moduleName] = appModule
+}
+
+}
+
+}
+
+return autocli.AppOptions{
+ Modules: modules,
+ ModuleOptions: runtimeservices.ExtractAutoCLIOptions(app.ModuleManager.Modules),
+ AddressCodec: authcodec.NewBech32Codec(sdk.GetConfig().GetBech32AccountAddrPrefix()),
+ ValidatorAddressCodec: authcodec.NewBech32Codec(sdk.GetConfig().GetBech32ValidatorAddrPrefix()),
+ ConsensusAddressCodec: authcodec.NewBech32Codec(sdk.GetConfig().GetBech32ConsensusAddrPrefix()),
+}
+}
+
+// DefaultGenesis returns a default genesis from the registered AppModuleBasic's.
+func (a *SimApp)
+
+DefaultGenesis()
+
+map[string]json.RawMessage {
+ return a.BasicModuleManager.DefaultGenesis(a.appCodec)
+}
+
+// GetKey returns the KVStoreKey for the provided store key.
+//
+// NOTE: This is solely to be used for testing purposes.
+func (app *SimApp)
+
+GetKey(storeKey string) *storetypes.KVStoreKey {
+ return app.keys[storeKey]
+}
+
+// GetStoreKeys returns all the stored store keys.
+func (app *SimApp)
+
+GetStoreKeys() []storetypes.StoreKey {
+ keys := make([]storetypes.StoreKey, 0, len(app.keys))
+ for _, key := range app.keys {
+ keys = append(keys, key)
+}
+
+return keys
+}
+
+// SimulationManager implements the SimulationApp interface
+func (app *SimApp)
+
+SimulationManager() *module.SimulationManager {
+ return app.sm
+}
+
+// RegisterAPIRoutes registers all application module routes with the provided
+// API server.
+func (app *SimApp)
+
+RegisterAPIRoutes(apiSvr *api.Server, apiConfig config.APIConfig) {
+ clientCtx := apiSvr.ClientCtx
+ // Register new tx routes from grpc-gateway.
+ authtx.RegisterGRPCGatewayRoutes(clientCtx, apiSvr.GRPCGatewayRouter)
+
+ // Register new CometBFT queries routes from grpc-gateway.
+ cmtservice.RegisterGRPCGatewayRoutes(clientCtx, apiSvr.GRPCGatewayRouter)
+
+ // Register node gRPC service for grpc-gateway.
+ nodeservice.RegisterGRPCGatewayRoutes(clientCtx, apiSvr.GRPCGatewayRouter)
+
+ // Register grpc-gateway routes for all modules.
+ app.BasicModuleManager.RegisterGRPCGatewayRoutes(clientCtx, apiSvr.GRPCGatewayRouter)
+
+ // register swagger API from root so that other applications can override easily
+ if err := server.RegisterSwaggerAPI(apiSvr.ClientCtx, apiSvr.Router, apiConfig.Swagger); err != nil {
+ panic(err)
+}
+}
+
+// RegisterTxService implements the Application.RegisterTxService method.
+func (app *SimApp)
+
+RegisterTxService(clientCtx client.Context) {
+ authtx.RegisterTxService(app.BaseApp.GRPCQueryRouter(), clientCtx, app.BaseApp.Simulate, app.interfaceRegistry)
+}
+
+// RegisterTendermintService implements the Application.RegisterTendermintService method.
+func (app *SimApp)
+
+RegisterTendermintService(clientCtx client.Context) {
+ cmtApp := server.NewCometABCIWrapper(app)
+
+cmtservice.RegisterTendermintService(
+ clientCtx,
+ app.BaseApp.GRPCQueryRouter(),
+ app.interfaceRegistry,
+ cmtApp.Query,
+ )
+}
+
+func (app *SimApp)
+
+RegisterNodeService(clientCtx client.Context, cfg config.Config) {
+ nodeservice.RegisterNodeService(clientCtx, app.GRPCQueryRouter(), cfg)
+}
+
+// GetMaccPerms returns a copy of the module account permissions
+//
+// NOTE: This is solely to be used for testing purposes.
+func GetMaccPerms()
+
+map[string][]string {
+ return maps.Clone(maccPerms)
+}
+
+// BlockedAddresses returns all the app's blocked account addresses.
+func BlockedAddresses()
+
+map[string]bool {
+ modAccAddrs := make(map[string]bool)
+ for acc := range GetMaccPerms() {
+ modAccAddrs[authtypes.NewModuleAddress(acc).String()] = true
+}
+
+ // allow the following addresses to receive funds
+ delete(modAccAddrs, authtypes.NewModuleAddress(govtypes.ModuleName).String())
+
+return modAccAddrs
+}
+```
+
+The following diagram shows the current dependencies between keepers.
+
+
diff --git a/sdk/next/learn/advanced/proto-docs.mdx b/sdk/next/learn/advanced/proto-docs.mdx
new file mode 100644
index 000000000..0f3593eba
--- /dev/null
+++ b/sdk/next/learn/advanced/proto-docs.mdx
@@ -0,0 +1,6 @@
+---
+title: Protobuf Documentation
+description: See Cosmos SDK Buf Proto-docs
+---
+
+See [Cosmos SDK Buf Proto-docs](https://buf.build/cosmos/cosmos-sdk/docs/main)
diff --git a/sdk/next/learn/advanced/runtx_middleware.mdx b/sdk/next/learn/advanced/runtx_middleware.mdx
new file mode 100644
index 000000000..e69e61f8b
--- /dev/null
+++ b/sdk/next/learn/advanced/runtx_middleware.mdx
@@ -0,0 +1,179 @@
+---
+title: RunTx recovery middleware
+---
+
+`BaseApp.runTx()` function handles Go panics that might occur during transaction execution, for example, when a keeper has faced an invalid state and panicked.
+Depending on the panic type different handler is used, for instance the default one prints an error log message.
+Recovery middleware is used to add custom panic recovery for Cosmos SDK application developers.
+
+More context can be found in the corresponding [ADR-022](/sdk/v0.53/build/architecture/adr-022-custom-panic-handling) and the implementation in [recovery.go](https://github.com/cosmos/cosmos-sdk/blob/v0.53.0-rc.2/baseapp/recovery.go).
+
+## Interface
+
+```go expandable
+package baseapp
+
+import (
+
+ "fmt"
+ "runtime/debug"
+
+ errorsmod "cosmossdk.io/errors"
+ storetypes "cosmossdk.io/store/types"
+
+ sdk "github.com/cosmos/cosmos-sdk/types"
+ sdkerrors "github.com/cosmos/cosmos-sdk/types/errors"
+)
+
+// RecoveryHandler handles recovery()
+
+object.
+// Return a non-nil error if recoveryObj was processed.
+// Return nil if recoveryObj was not processed.
+type RecoveryHandler func(recoveryObj interface{
+})
+
+error
+
+// recoveryMiddleware is wrapper for RecoveryHandler to create chained recovery handling.
+// returns (recoveryMiddleware, nil)
+ if recoveryObj was not processed and should be passed to the next middleware in chain.
+// returns (nil, error)
+ if recoveryObj was processed and middleware chain processing should be stopped.
+type recoveryMiddleware func(recoveryObj interface{
+}) (recoveryMiddleware, error)
+
+// processRecovery processes recoveryMiddleware chain for recovery()
+
+object.
+// Chain processing stops on non-nil error or when chain is processed.
+func processRecovery(recoveryObj interface{
+}, middleware recoveryMiddleware)
+
+error {
+ if middleware == nil {
+ return nil
+}
+
+next, err := middleware(recoveryObj)
+ if err != nil {
+ return err
+}
+
+return processRecovery(recoveryObj, next)
+}
+
+// newRecoveryMiddleware creates a RecoveryHandler middleware.
+func newRecoveryMiddleware(handler RecoveryHandler, next recoveryMiddleware)
+
+recoveryMiddleware {
+ return func(recoveryObj interface{
+}) (recoveryMiddleware, error) {
+ if err := handler(recoveryObj); err != nil {
+ return nil, err
+}
+
+return next, nil
+}
+}
+
+// newOutOfGasRecoveryMiddleware creates a standard OutOfGas recovery middleware for app.runTx method.
+func newOutOfGasRecoveryMiddleware(gasWanted uint64, ctx sdk.Context, next recoveryMiddleware)
+
+recoveryMiddleware {
+ handler := func(recoveryObj interface{
+})
+
+error {
+ err, ok := recoveryObj.(storetypes.ErrorOutOfGas)
+ if !ok {
+ return nil
+}
+
+return errorsmod.Wrap(
+ sdkerrors.ErrOutOfGas, fmt.Sprintf(
+ "out of gas in location: %v; gasWanted: %d, gasUsed: %d",
+ err.Descriptor, gasWanted, ctx.GasMeter().GasConsumed(),
+ ),
+ )
+}
+
+return newRecoveryMiddleware(handler, next)
+}
+
+// newDefaultRecoveryMiddleware creates a default (last in chain)
+
+recovery middleware for app.runTx method.
+func newDefaultRecoveryMiddleware()
+
+recoveryMiddleware {
+ handler := func(recoveryObj interface{
+})
+
+error {
+ return errorsmod.Wrap(
+ sdkerrors.ErrPanic, fmt.Sprintf(
+ "recovered: %v\nstack:\n%v", recoveryObj, string(debug.Stack()),
+ ),
+ )
+}
+
+return newRecoveryMiddleware(handler, nil)
+}
+```
+
+`recoveryObj` is a return value for `recover()` function from the `builtin` Go package.
+
+**Contract:**
+
+* RecoveryHandler returns `nil` if `recoveryObj` wasn't handled and should be passed to the next recovery middleware;
+* RecoveryHandler returns a non-nil `error` if `recoveryObj` was handled;
+
+## Custom RecoveryHandler register
+
+`BaseApp.AddRunTxRecoveryHandler(handlers ...RecoveryHandler)`
+
+BaseApp method adds recovery middleware to the default recovery chain.
+
+## Example
+
+Let's assume we want to emit the "Consensus failure" chain state if some particular error occurred.
+
+We have a module keeper that panics:
+
+```go
+func (k FooKeeper)
+
+Do(obj interface{
+}) {
+ if obj == nil {
+ // that shouldn't happen, we need to crash the app
+ err := errorsmod.Wrap(fooTypes.InternalError, "obj is nil")
+
+panic(err)
+}
+}
+```
+
+By default that panic would be recovered and an error message will be printed to log. To override that behavior we should register a custom RecoveryHandler:
+
+```go expandable
+// Cosmos SDK application constructor
+ customHandler := func(recoveryObj interface{
+})
+
+error {
+ err, ok := recoveryObj.(error)
+ if !ok {
+ return nil
+}
+ if fooTypes.InternalError.Is(err) {
+ panic(fmt.Errorf("FooKeeper did panic with error: %w", err))
+}
+
+return nil
+}
+ baseApp := baseapp.NewBaseApp(...)
+
+baseApp.AddRunTxRecoveryHandler(customHandler)
+```
diff --git a/sdk/next/learn/advanced/simulation.mdx b/sdk/next/learn/advanced/simulation.mdx
new file mode 100644
index 000000000..60800c81a
--- /dev/null
+++ b/sdk/next/learn/advanced/simulation.mdx
@@ -0,0 +1,95 @@
+---
+title: Cosmos Blockchain Simulator
+description: >-
+ The Cosmos SDK offers a full-fledged simulation framework to fuzz test every
+ message defined by a module.
+---
+
+The Cosmos SDK offers a full-fledged simulation framework to fuzz test every
+message defined by a module.
+
+On the Cosmos SDK, this functionality is provided by [`SimApp`](https://github.com/cosmos/cosmos-sdk/blob/v0.53.0-rc.2/simapp/app_di.go), which is a
+`Baseapp` application that is used for running the [`simulation`](https://github.com/cosmos/cosmos-sdk/blob/v0.53.0-rc.2/x/simulation) module.
+This module defines all the simulation logic as well as the operations for
+randomized parameters like accounts, balances etc.
+
+## Goals
+
+The blockchain simulator tests how the blockchain application would behave under
+real life circumstances by generating and sending randomized messages.
+The goal of this is to detect and debug failures that could halt a live chain,
+by providing logs and statistics about the operations run by the simulator as
+well as exporting the latest application state when a failure was found.
+
+Its main difference with integration testing is that the simulator app allows
+you to pass parameters to customize the chain that's being simulated.
+This comes in handy when trying to reproduce bugs that were generated in the
+provided operations (randomized or not).
+
+## Simulation commands
+
+The simulation app has different commands, each of which tests a different
+failure type:
+
+* `AppImportExport`: The simulator exports the initial app state and then it
+ creates a new app with the exported `genesis.json` as an input, checking for
+ inconsistencies between the stores.
+* `AppSimulationAfterImport`: Queues two simulations together. The first one provides the app state (*i.e* genesis) to the second. Useful to test software upgrades or hard-forks from a live chain.
+* `AppStateDeterminism`: Checks that all the nodes return the same values, in the same order.
+* `FullAppSimulation`: General simulation mode. Runs the chain and the specified operations for a given number of blocks. Tests that there're no `panics` on the simulation.
+
+Each simulation must receive a set of inputs (*i.e* flags) such as the number of
+blocks that the simulation is run, seed, block size, etc.
+Check the full list of flags [here](https://github.com/cosmos/cosmos-sdk/blob/v0.53.0-rc.2/x/simulation/client/cli/flags.go#L43-L70).
+
+## Simulator Modes
+
+In addition to the various inputs and commands, the simulator runs in three modes:
+
+1. Completely random where the initial state, module parameters and simulation
+ parameters are **pseudo-randomly generated**.
+2. From a `genesis.json` file where the initial state and the module parameters are defined.
+ This mode is helpful for running simulations on a known state such as a live network export where a new (mostly likely breaking) version of the application needs to be tested.
+3. From a `params.json` file where the initial state is pseudo-randomly generated but the module and simulation parameters can be provided manually.
+ This allows for a more controlled and deterministic simulation setup while allowing the state space to still be pseudo-randomly simulated.
+ The list of available parameters are listed [here](https://github.com/cosmos/cosmos-sdk/blob/v0.53.0-rc.2/x/simulation/client/cli/flags.go#L72-L90).
+
+
+These modes are not mutually exclusive. So you can for example run a randomly
+generated genesis state (`1`) with manually generated simulation params (`3`).
+
+
+## Usage
+
+This is a general example of how simulations are run. For more specific examples
+check the Cosmos SDK [Makefile](https://github.com/cosmos/cosmos-sdk/blob/v0.53.0-rc.2/Makefile#L285-L320).
+
+```bash
+ $ go test -mod=readonly github.com/cosmos/cosmos-sdk/simapp \
+ -run=TestApp \
+ ...
+ -v -timeout 24h
+```
+
+## Debugging Tips
+
+Here are some suggestions when encountering a simulation failure:
+
+* Export the app state at the height where the failure was found. You can do this
+ by passing the `-ExportStatePath` flag to the simulator.
+* Use `-Verbose` logs. They could give you a better hint on all the operations
+ involved.
+* Try using another `-Seed`. If it can reproduce the same error and if it fails
+ sooner, you will spend less time running the simulations.
+* Reduce the `-NumBlocks` . How's the app state at the height previous to the
+ failure?
+* Try adding logs to operations that are not logged. You will have to define a
+ [Logger](https://github.com/cosmos/cosmos-sdk/blob/v0.53.0-rc.2/x/staking/keeper/keeper.go#L77-L81) on your `Keeper`.
+
+## Use simulation in your Cosmos SDK-based application
+
+Learn how you can build the simulation into your Cosmos SDK-based application:
+
+* Application Simulation Manager
+* [Building modules: Simulator](/sdk/v0.53/build/building-modules/simulator)
+* Simulator tests
diff --git a/sdk/next/learn/advanced/store.mdx b/sdk/next/learn/advanced/store.mdx
new file mode 100644
index 000000000..6f9592307
--- /dev/null
+++ b/sdk/next/learn/advanced/store.mdx
@@ -0,0 +1,11856 @@
+---
+title: Store
+---
+
+
+**Synopsis**
+A store is a data structure that holds the state of the application.
+
+
+
+**Prerequisite Readings**
+
+* [Anatomy of a Cosmos SDK application](/sdk/v0.53/learn/beginner/app-anatomy)
+
+
+
+## Introduction to Cosmos SDK Stores
+
+The Cosmos SDK comes with a large set of stores to persist the state of applications. By default, the main store of Cosmos SDK applications is a `multistore`, i.e. a store of stores. Developers can add any number of key-value stores to the multistore, depending on their application needs. The multistore exists to support the modularity of the Cosmos SDK, as it lets each module declare and manage their own subset of the state. Key-value stores in the multistore can only be accessed with a specific capability `key`, which is typically held in the [`keeper`](/sdk/v0.53/build/building-modules/keeper) of the module that declared the store.
+
+```text expandable
++-----------------------------------------------------+
+| |
+| +--------------------------------------------+ |
+| | | |
+| | KVStore 1 - Manage by keeper of Module 1 |
+| | | |
+| +--------------------------------------------+ |
+| |
+| +--------------------------------------------+ |
+| | | |
+| | KVStore 2 - Manage by keeper of Module 2 | |
+| | | |
+| +--------------------------------------------+ |
+| |
+| +--------------------------------------------+ |
+| | | |
+| | KVStore 3 - Manage by keeper of Module 2 | |
+| | | |
+| +--------------------------------------------+ |
+| |
+| +--------------------------------------------+ |
+| | | |
+| | KVStore 4 - Manage by keeper of Module 3 | |
+| | | |
+| +--------------------------------------------+ |
+| |
+| +--------------------------------------------+ |
+| | | |
+| | KVStore 5 - Manage by keeper of Module 4 | |
+| | | |
+| +--------------------------------------------+ |
+| |
+| Main Multistore |
+| |
++-----------------------------------------------------+
+
+ Application's State
+```
+
+### Store Interface
+
+At its very core, a Cosmos SDK `store` is an object that holds a `CacheWrapper` and has a `GetStoreType()` method:
+
+```go expandable
+package types
+
+import (
+
+ "fmt"
+ "io"
+ "maps"
+ "slices"
+ "github.com/cometbft/cometbft/proto/tendermint/crypto"
+ dbm "github.com/cosmos/cosmos-db"
+ "cosmossdk.io/store/metrics"
+ pruningtypes "cosmossdk.io/store/pruning/types"
+ snapshottypes "cosmossdk.io/store/snapshots/types"
+)
+
+type Store interface {
+ GetStoreType()
+
+StoreType
+ CacheWrapper
+}
+
+// something that can persist to disk
+type Committer interface {
+ Commit()
+
+CommitID
+ LastCommitID()
+
+CommitID
+
+ // WorkingHash returns the hash of the KVStore's state before commit.
+ WorkingHash() []byte
+
+ SetPruning(pruningtypes.PruningOptions)
+
+GetPruning()
+
+pruningtypes.PruningOptions
+}
+
+// Stores of MultiStore must implement CommitStore.
+type CommitStore interface {
+ Committer
+ Store
+}
+
+// Queryable allows a Store to expose internal state to the abci.Query
+// interface. Multistore can route requests to the proper Store.
+//
+// This is an optional, but useful extension to any CommitStore
+type Queryable interface {
+ Query(*RequestQuery) (*ResponseQuery, error)
+}
+
+type RequestQuery struct {
+ Data []byte
+ Path string
+ Height int64
+ Prove bool
+}
+
+type ResponseQuery struct {
+ Code uint32
+ Log string
+ Info string
+ Index int64
+ Key []byte
+ Value []byte
+ ProofOps *crypto.ProofOps
+ Height int64
+ Codespace string
+}
+
+//----------------------------------------
+// MultiStore
+
+// StoreUpgrades defines a series of transformations to apply the multistore db upon load
+type StoreUpgrades struct {
+ Added []string `json:"added"`
+ Renamed []StoreRename `json:"renamed"`
+ Deleted []string `json:"deleted"`
+}
+
+// StoreRename defines a name change of a sub-store.
+// All data previously under a PrefixStore with OldKey will be copied
+// to a PrefixStore with NewKey, then deleted from OldKey store.
+type StoreRename struct {
+ OldKey string `json:"old_key"`
+ NewKey string `json:"new_key"`
+}
+
+// IsAdded returns true if the given key should be added
+func (s *StoreUpgrades)
+
+IsAdded(key string)
+
+bool {
+ if s == nil {
+ return false
+}
+
+return slices.Contains(s.Added, key)
+}
+
+// IsDeleted returns true if the given key should be deleted
+func (s *StoreUpgrades)
+
+IsDeleted(key string)
+
+bool {
+ if s == nil {
+ return false
+}
+
+return slices.Contains(s.Deleted, key)
+}
+
+// RenamedFrom returns the oldKey if it was renamed
+// Returns "" if it was not renamed
+func (s *StoreUpgrades)
+
+RenamedFrom(key string)
+
+string {
+ if s == nil {
+ return ""
+}
+ for _, re := range s.Renamed {
+ if re.NewKey == key {
+ return re.OldKey
+}
+
+}
+
+return ""
+}
+
+type MultiStore interface {
+ Store
+
+ // Branches MultiStore into a cached storage object.
+ // NOTE: Caller should probably not call .Write()
+
+on each, but
+ // call CacheMultiStore.Write().
+ CacheMultiStore()
+
+CacheMultiStore
+
+ // CacheMultiStoreWithVersion branches the underlying MultiStore where
+ // each stored is loaded at a specific version (height).
+ CacheMultiStoreWithVersion(version int64) (CacheMultiStore, error)
+
+ // Convenience for fetching substores.
+ // If the store does not exist, panics.
+ GetStore(StoreKey)
+
+Store
+ GetKVStore(StoreKey)
+
+KVStore
+
+ // TracingEnabled returns if tracing is enabled for the MultiStore.
+ TracingEnabled()
+
+bool
+
+ // SetTracer sets the tracer for the MultiStore that the underlying
+ // stores will utilize to trace operations. The modified MultiStore is
+ // returned.
+ SetTracer(w io.Writer)
+
+MultiStore
+
+ // SetTracingContext sets the tracing context for a MultiStore. It is
+ // implied that the caller should update the context when necessary between
+ // tracing operations. The modified MultiStore is returned.
+ SetTracingContext(TraceContext)
+
+MultiStore
+
+ // LatestVersion returns the latest version in the store
+ LatestVersion()
+
+int64
+}
+
+// From MultiStore.CacheMultiStore()....
+type CacheMultiStore interface {
+ MultiStore
+ Write() // Writes operations to underlying KVStore
+}
+
+// CommitMultiStore is an interface for a MultiStore without cache capabilities.
+type CommitMultiStore interface {
+ Committer
+ MultiStore
+ snapshottypes.Snapshotter
+
+ // Mount a store of type using the given db.
+ // If db == nil, the new store will use the CommitMultiStore db.
+ MountStoreWithDB(key StoreKey, typ StoreType, db dbm.DB)
+
+ // Panics on a nil key.
+ GetCommitStore(key StoreKey)
+
+CommitStore
+
+ // Panics on a nil key.
+ GetCommitKVStore(key StoreKey)
+
+CommitKVStore
+
+ // Load the latest persisted version. Called once after all calls to
+ // Mount*Store()
+
+are complete.
+ LoadLatestVersion()
+
+error
+
+ // LoadLatestVersionAndUpgrade will load the latest version, but also
+ // rename/delete/create sub-store keys, before registering all the keys
+ // in order to handle breaking formats in migrations
+ LoadLatestVersionAndUpgrade(upgrades *StoreUpgrades)
+
+error
+
+ // LoadVersionAndUpgrade will load the named version, but also
+ // rename/delete/create sub-store keys, before registering all the keys
+ // in order to handle breaking formats in migrations
+ LoadVersionAndUpgrade(ver int64, upgrades *StoreUpgrades)
+
+error
+
+ // Load a specific persisted version. When you load an old version, or when
+ // the last commit attempt didn't complete, the next commit after loading
+ // must be idempotent (return the same commit id). Otherwise the behavior is
+ // undefined.
+ LoadVersion(ver int64)
+
+error
+
+ // Set an inter-block (persistent)
+
+cache that maintains a mapping from
+ // StoreKeys to CommitKVStores.
+ SetInterBlockCache(MultiStorePersistentCache)
+
+ // SetInitialVersion sets the initial version of the IAVL tree. It is used when
+ // starting a new chain at an arbitrary height.
+ SetInitialVersion(version int64)
+
+error
+
+ // SetIAVLCacheSize sets the cache size of the IAVL tree.
+ SetIAVLCacheSize(size int)
+
+ // SetIAVLDisableFastNode enables/disables fastnode feature on iavl.
+ SetIAVLDisableFastNode(disable bool)
+
+ // SetIAVLSyncPruning set sync/async pruning on iavl.
+ // It is not recommended to use this option.
+ // It is here to enable the prune command to force this to true, allowing the command to wait
+ // for the pruning to finish before returning.
+ SetIAVLSyncPruning(sync bool)
+
+ // RollbackToVersion rollback the db to specific version(height).
+ RollbackToVersion(version int64)
+
+error
+
+ // ListeningEnabled returns if listening is enabled for the KVStore belonging the provided StoreKey
+ ListeningEnabled(key StoreKey)
+
+bool
+
+ // AddListeners adds a listener for the KVStore belonging to the provided StoreKey
+ AddListeners(keys []StoreKey)
+
+ // PopStateCache returns the accumulated state change messages from the CommitMultiStore
+ PopStateCache() []*StoreKVPair
+
+ // SetMetrics sets the metrics for the KVStore
+ SetMetrics(metrics metrics.StoreMetrics)
+}
+
+//---------subsp-------------------------------
+// KVStore
+
+// BasicKVStore is a simple interface to get/set data
+type BasicKVStore interface {
+ // Get returns nil if key doesn't exist. Panics on nil key.
+ Get(key []byte) []byte
+
+ // Has checks if a key exists. Panics on nil key.
+ Has(key []byte)
+
+bool
+
+ // Set sets the key. Panics on nil key or value.
+ Set(key, value []byte)
+
+ // Delete deletes the key. Panics on nil key.
+ Delete(key []byte)
+}
+
+// KVStore additionally provides iteration and deletion
+type KVStore interface {
+ Store
+ BasicKVStore
+
+ // Iterator over a domain of keys in ascending order. End is exclusive.
+ // Start must be less than end, or the Iterator is invalid.
+ // Iterator must be closed by caller.
+ // To iterate over entire domain, use store.Iterator(nil, nil)
+ // CONTRACT: No writes may happen within a domain while an iterator exists over it.
+ // Exceptionally allowed for cachekv.Store, safe to write in the modules.
+ Iterator(start, end []byte)
+
+Iterator
+
+ // Iterator over a domain of keys in descending order. End is exclusive.
+ // Start must be less than end, or the Iterator is invalid.
+ // Iterator must be closed by caller.
+ // CONTRACT: No writes may happen within a domain while an iterator exists over it.
+ // Exceptionally allowed for cachekv.Store, safe to write in the modules.
+ ReverseIterator(start, end []byte)
+
+Iterator
+}
+
+// Iterator is an alias db's Iterator for convenience.
+type Iterator = dbm.Iterator
+
+// CacheKVStore branches a KVStore and provides read cache functionality.
+// After calling .Write()
+
+on the CacheKVStore, all previously created
+// CacheKVStores on the object expire.
+type CacheKVStore interface {
+ KVStore
+
+ // Writes operations to underlying KVStore
+ Write()
+}
+
+// CommitKVStore is an interface for MultiStore.
+type CommitKVStore interface {
+ Committer
+ KVStore
+}
+
+//----------------------------------------
+// CacheWrap
+
+// CacheWrap is the most appropriate interface for store ephemeral branching and cache.
+// For example, IAVLStore.CacheWrap()
+
+returns a CacheKVStore. CacheWrap should not return
+// a Committer, since Commit ephemeral store make no sense. It can return KVStore,
+// HeapStore, SpaceStore, etc.
+type CacheWrap interface {
+ // Write syncs with the underlying store.
+ Write()
+
+ // CacheWrap recursively wraps again.
+ CacheWrap()
+
+CacheWrap
+
+ // CacheWrapWithTrace recursively wraps again with tracing enabled.
+ CacheWrapWithTrace(w io.Writer, tc TraceContext)
+
+CacheWrap
+}
+
+type CacheWrapper interface {
+ // CacheWrap branches a store.
+ CacheWrap()
+
+CacheWrap
+
+ // CacheWrapWithTrace branches a store with tracing enabled.
+ CacheWrapWithTrace(w io.Writer, tc TraceContext)
+
+CacheWrap
+}
+
+func (cid CommitID)
+
+IsZero()
+
+bool {
+ return cid.Version == 0 && len(cid.Hash) == 0
+}
+
+func (cid CommitID)
+
+String()
+
+string {
+ return fmt.Sprintf("CommitID{%v:%X
+}", cid.Hash, cid.Version)
+}
+
+//----------------------------------------
+// Store types
+
+// kind of store
+type StoreType int
+
+const (
+ StoreTypeMulti StoreType = iota
+ StoreTypeDB
+ StoreTypeIAVL
+ StoreTypeTransient
+ StoreTypeMemory
+ StoreTypeSMT
+ StoreTypePersistent
+)
+
+func (st StoreType)
+
+String()
+
+string {
+ switch st {
+ case StoreTypeMulti:
+ return "StoreTypeMulti"
+ case StoreTypeDB:
+ return "StoreTypeDB"
+ case StoreTypeIAVL:
+ return "StoreTypeIAVL"
+ case StoreTypeTransient:
+ return "StoreTypeTransient"
+ case StoreTypeMemory:
+ return "StoreTypeMemory"
+ case StoreTypeSMT:
+ return "StoreTypeSMT"
+ case StoreTypePersistent:
+ return "StoreTypePersistent"
+}
+
+return "unknown store type"
+}
+
+//----------------------------------------
+// Keys for accessing substores
+
+// StoreKey is a key used to index stores in a MultiStore.
+type StoreKey interface {
+ Name()
+
+string
+ String()
+
+string
+}
+
+// CapabilityKey represent the Cosmos SDK keys for object-capability
+// generation in the IBC protocol as defined in https://github.com/cosmos/ibc/tree/master/spec/core/ics-005-port-allocation#data-structures
+type CapabilityKey StoreKey
+
+// KVStoreKey is used for accessing substores.
+// Only the pointer value should ever be used - it functions as a capabilities key.
+type KVStoreKey struct {
+ name string
+}
+
+// NewKVStoreKey returns a new pointer to a KVStoreKey.
+// Use a pointer so keys don't collide.
+func NewKVStoreKey(name string) *KVStoreKey {
+ if name == "" {
+ panic("empty key name not allowed")
+}
+
+return &KVStoreKey{
+ name: name,
+}
+}
+
+// NewKVStoreKeys returns a map of new pointers to KVStoreKey's.
+// The function will panic if there is a potential conflict in names (see `assertNoPrefix`
+// function for more details).
+func NewKVStoreKeys(names ...string)
+
+map[string]*KVStoreKey {
+ assertNoCommonPrefix(names)
+ keys := make(map[string]*KVStoreKey, len(names))
+ for _, n := range names {
+ keys[n] = NewKVStoreKey(n)
+}
+
+return keys
+}
+
+func (key *KVStoreKey)
+
+Name()
+
+string {
+ return key.name
+}
+
+func (key *KVStoreKey)
+
+String()
+
+string {
+ return fmt.Sprintf("KVStoreKey{%p, %s
+}", key, key.name)
+}
+
+// TransientStoreKey is used for indexing transient stores in a MultiStore
+type TransientStoreKey struct {
+ name string
+}
+
+// Constructs new TransientStoreKey
+// Must return a pointer according to the ocap principle
+func NewTransientStoreKey(name string) *TransientStoreKey {
+ return &TransientStoreKey{
+ name: name,
+}
+}
+
+// Implements StoreKey
+func (key *TransientStoreKey)
+
+Name()
+
+string {
+ return key.name
+}
+
+// Implements StoreKey
+func (key *TransientStoreKey)
+
+String()
+
+string {
+ return fmt.Sprintf("TransientStoreKey{%p, %s
+}", key, key.name)
+}
+
+// MemoryStoreKey defines a typed key to be used with an in-memory KVStore.
+type MemoryStoreKey struct {
+ name string
+}
+
+func NewMemoryStoreKey(name string) *MemoryStoreKey {
+ return &MemoryStoreKey{
+ name: name
+}
+}
+
+// Name returns the name of the MemoryStoreKey.
+func (key *MemoryStoreKey)
+
+Name()
+
+string {
+ return key.name
+}
+
+// String returns a stringified representation of the MemoryStoreKey.
+func (key *MemoryStoreKey)
+
+String()
+
+string {
+ return fmt.Sprintf("MemoryStoreKey{%p, %s
+}", key, key.name)
+}
+
+//----------------------------------------
+
+// TraceContext contains TraceKVStore context data. It will be written with
+// every trace operation.
+type TraceContext map[string]interface{
+}
+
+// Clone clones tc into another instance of TraceContext.
+func (tc TraceContext)
+
+Clone()
+
+TraceContext {
+ ret := TraceContext{
+}
+
+maps.Copy(ret, tc)
+
+return ret
+}
+
+// Merge merges value of newTc into tc.
+func (tc TraceContext)
+
+Merge(newTc TraceContext)
+
+TraceContext {
+ if tc == nil {
+ tc = TraceContext{
+}
+
+}
+
+maps.Copy(tc, newTc)
+
+return tc
+}
+
+// MultiStorePersistentCache defines an interface which provides inter-block
+// (persistent)
+
+caching capabilities for multiple CommitKVStores based on StoreKeys.
+type MultiStorePersistentCache interface {
+ // Wrap and return the provided CommitKVStore with an inter-block (persistent)
+ // cache.
+ GetStoreCache(key StoreKey, store CommitKVStore)
+
+CommitKVStore
+
+ // Return the underlying CommitKVStore for a StoreKey.
+ Unwrap(key StoreKey)
+
+CommitKVStore
+
+ // Reset the entire set of internal caches.
+ Reset()
+}
+
+// StoreWithInitialVersion is a store that can have an arbitrary initial
+// version.
+type StoreWithInitialVersion interface {
+ // SetInitialVersion sets the initial version of the IAVL tree. It is used when
+ // starting a new chain at an arbitrary height.
+ SetInitialVersion(version int64)
+}
+
+// NewTransientStoreKeys constructs a new map of TransientStoreKey's
+// Must return pointers according to the ocap principle
+// The function will panic if there is a potential conflict in names
+// see `assertNoCommonPrefix` function for more details.
+func NewTransientStoreKeys(names ...string)
+
+map[string]*TransientStoreKey {
+ assertNoCommonPrefix(names)
+ keys := make(map[string]*TransientStoreKey)
+ for _, n := range names {
+ keys[n] = NewTransientStoreKey(n)
+}
+
+return keys
+}
+
+// NewMemoryStoreKeys constructs a new map matching store key names to their
+// respective MemoryStoreKey references.
+// The function will panic if there is a potential conflict in names (see `assertNoPrefix`
+// function for more details).
+func NewMemoryStoreKeys(names ...string)
+
+map[string]*MemoryStoreKey {
+ assertNoCommonPrefix(names)
+ keys := make(map[string]*MemoryStoreKey)
+ for _, n := range names {
+ keys[n] = NewMemoryStoreKey(n)
+}
+
+return keys
+}
+```
+
+The `GetStoreType` is a simple method that returns the type of store, whereas a `CacheWrapper` is a simple interface that implements store read caching and write branching through `Write` method:
+
+```go expandable
+package types
+
+import (
+
+ "fmt"
+ "io"
+ "maps"
+ "slices"
+ "github.com/cometbft/cometbft/proto/tendermint/crypto"
+ dbm "github.com/cosmos/cosmos-db"
+ "cosmossdk.io/store/metrics"
+ pruningtypes "cosmossdk.io/store/pruning/types"
+ snapshottypes "cosmossdk.io/store/snapshots/types"
+)
+
+type Store interface {
+ GetStoreType()
+
+StoreType
+ CacheWrapper
+}
+
+// something that can persist to disk
+type Committer interface {
+ Commit()
+
+CommitID
+ LastCommitID()
+
+CommitID
+
+ // WorkingHash returns the hash of the KVStore's state before commit.
+ WorkingHash() []byte
+
+ SetPruning(pruningtypes.PruningOptions)
+
+GetPruning()
+
+pruningtypes.PruningOptions
+}
+
+// Stores of MultiStore must implement CommitStore.
+type CommitStore interface {
+ Committer
+ Store
+}
+
+// Queryable allows a Store to expose internal state to the abci.Query
+// interface. Multistore can route requests to the proper Store.
+//
+// This is an optional, but useful extension to any CommitStore
+type Queryable interface {
+ Query(*RequestQuery) (*ResponseQuery, error)
+}
+
+type RequestQuery struct {
+ Data []byte
+ Path string
+ Height int64
+ Prove bool
+}
+
+type ResponseQuery struct {
+ Code uint32
+ Log string
+ Info string
+ Index int64
+ Key []byte
+ Value []byte
+ ProofOps *crypto.ProofOps
+ Height int64
+ Codespace string
+}
+
+//----------------------------------------
+// MultiStore
+
+// StoreUpgrades defines a series of transformations to apply the multistore db upon load
+type StoreUpgrades struct {
+ Added []string `json:"added"`
+ Renamed []StoreRename `json:"renamed"`
+ Deleted []string `json:"deleted"`
+}
+
+// StoreRename defines a name change of a sub-store.
+// All data previously under a PrefixStore with OldKey will be copied
+// to a PrefixStore with NewKey, then deleted from OldKey store.
+type StoreRename struct {
+ OldKey string `json:"old_key"`
+ NewKey string `json:"new_key"`
+}
+
+// IsAdded returns true if the given key should be added
+func (s *StoreUpgrades)
+
+IsAdded(key string)
+
+bool {
+ if s == nil {
+ return false
+}
+
+return slices.Contains(s.Added, key)
+}
+
+// IsDeleted returns true if the given key should be deleted
+func (s *StoreUpgrades)
+
+IsDeleted(key string)
+
+bool {
+ if s == nil {
+ return false
+}
+
+return slices.Contains(s.Deleted, key)
+}
+
+// RenamedFrom returns the oldKey if it was renamed
+// Returns "" if it was not renamed
+func (s *StoreUpgrades)
+
+RenamedFrom(key string)
+
+string {
+ if s == nil {
+ return ""
+}
+ for _, re := range s.Renamed {
+ if re.NewKey == key {
+ return re.OldKey
+}
+
+}
+
+return ""
+}
+
+type MultiStore interface {
+ Store
+
+ // Branches MultiStore into a cached storage object.
+ // NOTE: Caller should probably not call .Write()
+
+on each, but
+ // call CacheMultiStore.Write().
+ CacheMultiStore()
+
+CacheMultiStore
+
+ // CacheMultiStoreWithVersion branches the underlying MultiStore where
+ // each stored is loaded at a specific version (height).
+ CacheMultiStoreWithVersion(version int64) (CacheMultiStore, error)
+
+ // Convenience for fetching substores.
+ // If the store does not exist, panics.
+ GetStore(StoreKey)
+
+Store
+ GetKVStore(StoreKey)
+
+KVStore
+
+ // TracingEnabled returns if tracing is enabled for the MultiStore.
+ TracingEnabled()
+
+bool
+
+ // SetTracer sets the tracer for the MultiStore that the underlying
+ // stores will utilize to trace operations. The modified MultiStore is
+ // returned.
+ SetTracer(w io.Writer)
+
+MultiStore
+
+ // SetTracingContext sets the tracing context for a MultiStore. It is
+ // implied that the caller should update the context when necessary between
+ // tracing operations. The modified MultiStore is returned.
+ SetTracingContext(TraceContext)
+
+MultiStore
+
+ // LatestVersion returns the latest version in the store
+ LatestVersion()
+
+int64
+}
+
+// From MultiStore.CacheMultiStore()....
+type CacheMultiStore interface {
+ MultiStore
+ Write() // Writes operations to underlying KVStore
+}
+
+// CommitMultiStore is an interface for a MultiStore without cache capabilities.
+type CommitMultiStore interface {
+ Committer
+ MultiStore
+ snapshottypes.Snapshotter
+
+ // Mount a store of type using the given db.
+ // If db == nil, the new store will use the CommitMultiStore db.
+ MountStoreWithDB(key StoreKey, typ StoreType, db dbm.DB)
+
+ // Panics on a nil key.
+ GetCommitStore(key StoreKey)
+
+CommitStore
+
+ // Panics on a nil key.
+ GetCommitKVStore(key StoreKey)
+
+CommitKVStore
+
+ // Load the latest persisted version. Called once after all calls to
+ // Mount*Store()
+
+are complete.
+ LoadLatestVersion()
+
+error
+
+ // LoadLatestVersionAndUpgrade will load the latest version, but also
+ // rename/delete/create sub-store keys, before registering all the keys
+ // in order to handle breaking formats in migrations
+ LoadLatestVersionAndUpgrade(upgrades *StoreUpgrades)
+
+error
+
+ // LoadVersionAndUpgrade will load the named version, but also
+ // rename/delete/create sub-store keys, before registering all the keys
+ // in order to handle breaking formats in migrations
+ LoadVersionAndUpgrade(ver int64, upgrades *StoreUpgrades)
+
+error
+
+ // Load a specific persisted version. When you load an old version, or when
+ // the last commit attempt didn't complete, the next commit after loading
+ // must be idempotent (return the same commit id). Otherwise the behavior is
+ // undefined.
+ LoadVersion(ver int64)
+
+error
+
+ // Set an inter-block (persistent)
+
+cache that maintains a mapping from
+ // StoreKeys to CommitKVStores.
+ SetInterBlockCache(MultiStorePersistentCache)
+
+ // SetInitialVersion sets the initial version of the IAVL tree. It is used when
+ // starting a new chain at an arbitrary height.
+ SetInitialVersion(version int64)
+
+error
+
+ // SetIAVLCacheSize sets the cache size of the IAVL tree.
+ SetIAVLCacheSize(size int)
+
+ // SetIAVLDisableFastNode enables/disables fastnode feature on iavl.
+ SetIAVLDisableFastNode(disable bool)
+
+ // SetIAVLSyncPruning set sync/async pruning on iavl.
+ // It is not recommended to use this option.
+ // It is here to enable the prune command to force this to true, allowing the command to wait
+ // for the pruning to finish before returning.
+ SetIAVLSyncPruning(sync bool)
+
+ // RollbackToVersion rollback the db to specific version(height).
+ RollbackToVersion(version int64)
+
+error
+
+ // ListeningEnabled returns if listening is enabled for the KVStore belonging the provided StoreKey
+ ListeningEnabled(key StoreKey)
+
+bool
+
+ // AddListeners adds a listener for the KVStore belonging to the provided StoreKey
+ AddListeners(keys []StoreKey)
+
+ // PopStateCache returns the accumulated state change messages from the CommitMultiStore
+ PopStateCache() []*StoreKVPair
+
+ // SetMetrics sets the metrics for the KVStore
+ SetMetrics(metrics metrics.StoreMetrics)
+}
+
+//---------subsp-------------------------------
+// KVStore
+
+// BasicKVStore is a simple interface to get/set data
+type BasicKVStore interface {
+ // Get returns nil if key doesn't exist. Panics on nil key.
+ Get(key []byte) []byte
+
+ // Has checks if a key exists. Panics on nil key.
+ Has(key []byte)
+
+bool
+
+ // Set sets the key. Panics on nil key or value.
+ Set(key, value []byte)
+
+ // Delete deletes the key. Panics on nil key.
+ Delete(key []byte)
+}
+
+// KVStore additionally provides iteration and deletion
+type KVStore interface {
+ Store
+ BasicKVStore
+
+ // Iterator over a domain of keys in ascending order. End is exclusive.
+ // Start must be less than end, or the Iterator is invalid.
+ // Iterator must be closed by caller.
+ // To iterate over entire domain, use store.Iterator(nil, nil)
+ // CONTRACT: No writes may happen within a domain while an iterator exists over it.
+ // Exceptionally allowed for cachekv.Store, safe to write in the modules.
+ Iterator(start, end []byte)
+
+Iterator
+
+ // Iterator over a domain of keys in descending order. End is exclusive.
+ // Start must be less than end, or the Iterator is invalid.
+ // Iterator must be closed by caller.
+ // CONTRACT: No writes may happen within a domain while an iterator exists over it.
+ // Exceptionally allowed for cachekv.Store, safe to write in the modules.
+ ReverseIterator(start, end []byte)
+
+Iterator
+}
+
+// Iterator is an alias db's Iterator for convenience.
+type Iterator = dbm.Iterator
+
+// CacheKVStore branches a KVStore and provides read cache functionality.
+// After calling .Write()
+
+on the CacheKVStore, all previously created
+// CacheKVStores on the object expire.
+type CacheKVStore interface {
+ KVStore
+
+ // Writes operations to underlying KVStore
+ Write()
+}
+
+// CommitKVStore is an interface for MultiStore.
+type CommitKVStore interface {
+ Committer
+ KVStore
+}
+
+//----------------------------------------
+// CacheWrap
+
+// CacheWrap is the most appropriate interface for store ephemeral branching and cache.
+// For example, IAVLStore.CacheWrap()
+
+returns a CacheKVStore. CacheWrap should not return
+// a Committer, since Commit ephemeral store make no sense. It can return KVStore,
+// HeapStore, SpaceStore, etc.
+type CacheWrap interface {
+ // Write syncs with the underlying store.
+ Write()
+
+ // CacheWrap recursively wraps again.
+ CacheWrap()
+
+CacheWrap
+
+ // CacheWrapWithTrace recursively wraps again with tracing enabled.
+ CacheWrapWithTrace(w io.Writer, tc TraceContext)
+
+CacheWrap
+}
+
+type CacheWrapper interface {
+ // CacheWrap branches a store.
+ CacheWrap()
+
+CacheWrap
+
+ // CacheWrapWithTrace branches a store with tracing enabled.
+ CacheWrapWithTrace(w io.Writer, tc TraceContext)
+
+CacheWrap
+}
+
+func (cid CommitID)
+
+IsZero()
+
+bool {
+ return cid.Version == 0 && len(cid.Hash) == 0
+}
+
+func (cid CommitID)
+
+String()
+
+string {
+ return fmt.Sprintf("CommitID{%v:%X
+}", cid.Hash, cid.Version)
+}
+
+//----------------------------------------
+// Store types
+
+// kind of store
+type StoreType int
+
+const (
+ StoreTypeMulti StoreType = iota
+ StoreTypeDB
+ StoreTypeIAVL
+ StoreTypeTransient
+ StoreTypeMemory
+ StoreTypeSMT
+ StoreTypePersistent
+)
+
+func (st StoreType)
+
+String()
+
+string {
+ switch st {
+ case StoreTypeMulti:
+ return "StoreTypeMulti"
+ case StoreTypeDB:
+ return "StoreTypeDB"
+ case StoreTypeIAVL:
+ return "StoreTypeIAVL"
+ case StoreTypeTransient:
+ return "StoreTypeTransient"
+ case StoreTypeMemory:
+ return "StoreTypeMemory"
+ case StoreTypeSMT:
+ return "StoreTypeSMT"
+ case StoreTypePersistent:
+ return "StoreTypePersistent"
+}
+
+return "unknown store type"
+}
+
+//----------------------------------------
+// Keys for accessing substores
+
+// StoreKey is a key used to index stores in a MultiStore.
+type StoreKey interface {
+ Name()
+
+string
+ String()
+
+string
+}
+
+// CapabilityKey represent the Cosmos SDK keys for object-capability
+// generation in the IBC protocol as defined in https://github.com/cosmos/ibc/tree/master/spec/core/ics-005-port-allocation#data-structures
+type CapabilityKey StoreKey
+
+// KVStoreKey is used for accessing substores.
+// Only the pointer value should ever be used - it functions as a capabilities key.
+type KVStoreKey struct {
+ name string
+}
+
+// NewKVStoreKey returns a new pointer to a KVStoreKey.
+// Use a pointer so keys don't collide.
+func NewKVStoreKey(name string) *KVStoreKey {
+ if name == "" {
+ panic("empty key name not allowed")
+}
+
+return &KVStoreKey{
+ name: name,
+}
+}
+
+// NewKVStoreKeys returns a map of new pointers to KVStoreKey's.
+// The function will panic if there is a potential conflict in names (see `assertNoPrefix`
+// function for more details).
+func NewKVStoreKeys(names ...string)
+
+map[string]*KVStoreKey {
+ assertNoCommonPrefix(names)
+ keys := make(map[string]*KVStoreKey, len(names))
+ for _, n := range names {
+ keys[n] = NewKVStoreKey(n)
+}
+
+return keys
+}
+
+func (key *KVStoreKey)
+
+Name()
+
+string {
+ return key.name
+}
+
+func (key *KVStoreKey)
+
+String()
+
+string {
+ return fmt.Sprintf("KVStoreKey{%p, %s
+}", key, key.name)
+}
+
+// TransientStoreKey is used for indexing transient stores in a MultiStore
+type TransientStoreKey struct {
+ name string
+}
+
+// Constructs new TransientStoreKey
+// Must return a pointer according to the ocap principle
+func NewTransientStoreKey(name string) *TransientStoreKey {
+ return &TransientStoreKey{
+ name: name,
+}
+}
+
+// Implements StoreKey
+func (key *TransientStoreKey)
+
+Name()
+
+string {
+ return key.name
+}
+
+// Implements StoreKey
+func (key *TransientStoreKey)
+
+String()
+
+string {
+ return fmt.Sprintf("TransientStoreKey{%p, %s
+}", key, key.name)
+}
+
+// MemoryStoreKey defines a typed key to be used with an in-memory KVStore.
+type MemoryStoreKey struct {
+ name string
+}
+
+func NewMemoryStoreKey(name string) *MemoryStoreKey {
+ return &MemoryStoreKey{
+ name: name
+}
+}
+
+// Name returns the name of the MemoryStoreKey.
+func (key *MemoryStoreKey)
+
+Name()
+
+string {
+ return key.name
+}
+
+// String returns a stringified representation of the MemoryStoreKey.
+func (key *MemoryStoreKey)
+
+String()
+
+string {
+ return fmt.Sprintf("MemoryStoreKey{%p, %s
+}", key, key.name)
+}
+
+//----------------------------------------
+
+// TraceContext contains TraceKVStore context data. It will be written with
+// every trace operation.
+type TraceContext map[string]interface{
+}
+
+// Clone clones tc into another instance of TraceContext.
+func (tc TraceContext)
+
+Clone()
+
+TraceContext {
+ ret := TraceContext{
+}
+
+maps.Copy(ret, tc)
+
+return ret
+}
+
+// Merge merges value of newTc into tc.
+func (tc TraceContext)
+
+Merge(newTc TraceContext)
+
+TraceContext {
+ if tc == nil {
+ tc = TraceContext{
+}
+
+}
+
+maps.Copy(tc, newTc)
+
+return tc
+}
+
+// MultiStorePersistentCache defines an interface which provides inter-block
+// (persistent)
+
+caching capabilities for multiple CommitKVStores based on StoreKeys.
+type MultiStorePersistentCache interface {
+ // Wrap and return the provided CommitKVStore with an inter-block (persistent)
+ // cache.
+ GetStoreCache(key StoreKey, store CommitKVStore)
+
+CommitKVStore
+
+ // Return the underlying CommitKVStore for a StoreKey.
+ Unwrap(key StoreKey)
+
+CommitKVStore
+
+ // Reset the entire set of internal caches.
+ Reset()
+}
+
+// StoreWithInitialVersion is a store that can have an arbitrary initial
+// version.
+type StoreWithInitialVersion interface {
+ // SetInitialVersion sets the initial version of the IAVL tree. It is used when
+ // starting a new chain at an arbitrary height.
+ SetInitialVersion(version int64)
+}
+
+// NewTransientStoreKeys constructs a new map of TransientStoreKey's
+// Must return pointers according to the ocap principle
+// The function will panic if there is a potential conflict in names
+// see `assertNoCommonPrefix` function for more details.
+func NewTransientStoreKeys(names ...string)
+
+map[string]*TransientStoreKey {
+ assertNoCommonPrefix(names)
+ keys := make(map[string]*TransientStoreKey)
+ for _, n := range names {
+ keys[n] = NewTransientStoreKey(n)
+}
+
+return keys
+}
+
+// NewMemoryStoreKeys constructs a new map matching store key names to their
+// respective MemoryStoreKey references.
+// The function will panic if there is a potential conflict in names (see `assertNoPrefix`
+// function for more details).
+func NewMemoryStoreKeys(names ...string)
+
+map[string]*MemoryStoreKey {
+ assertNoCommonPrefix(names)
+ keys := make(map[string]*MemoryStoreKey)
+ for _, n := range names {
+ keys[n] = NewMemoryStoreKey(n)
+}
+
+return keys
+}
+```
+
+Branching and cache is used ubiquitously in the Cosmos SDK and required to be implemented on every store type. A storage branch creates an isolated, ephemeral branch of a store that can be passed around and updated without affecting the main underlying store. This is used to trigger temporary state-transitions that may be reverted later should an error occur. Read more about it in [context](/sdk/v0.53/learn/advanced/context#Store-branching)
+
+### Commit Store
+
+A commit store is a store that has the ability to commit changes made to the underlying tree or db. The Cosmos SDK differentiates simple stores from commit stores by extending the basic store interfaces with a `Committer`:
+
+```go expandable
+package types
+
+import (
+
+ "fmt"
+ "io"
+ "maps"
+ "slices"
+ "github.com/cometbft/cometbft/proto/tendermint/crypto"
+ dbm "github.com/cosmos/cosmos-db"
+ "cosmossdk.io/store/metrics"
+ pruningtypes "cosmossdk.io/store/pruning/types"
+ snapshottypes "cosmossdk.io/store/snapshots/types"
+)
+
+type Store interface {
+ GetStoreType()
+
+StoreType
+ CacheWrapper
+}
+
+// something that can persist to disk
+type Committer interface {
+ Commit()
+
+CommitID
+ LastCommitID()
+
+CommitID
+
+ // WorkingHash returns the hash of the KVStore's state before commit.
+ WorkingHash() []byte
+
+ SetPruning(pruningtypes.PruningOptions)
+
+GetPruning()
+
+pruningtypes.PruningOptions
+}
+
+// Stores of MultiStore must implement CommitStore.
+type CommitStore interface {
+ Committer
+ Store
+}
+
+// Queryable allows a Store to expose internal state to the abci.Query
+// interface. Multistore can route requests to the proper Store.
+//
+// This is an optional, but useful extension to any CommitStore
+type Queryable interface {
+ Query(*RequestQuery) (*ResponseQuery, error)
+}
+
+type RequestQuery struct {
+ Data []byte
+ Path string
+ Height int64
+ Prove bool
+}
+
+type ResponseQuery struct {
+ Code uint32
+ Log string
+ Info string
+ Index int64
+ Key []byte
+ Value []byte
+ ProofOps *crypto.ProofOps
+ Height int64
+ Codespace string
+}
+
+//----------------------------------------
+// MultiStore
+
+// StoreUpgrades defines a series of transformations to apply the multistore db upon load
+type StoreUpgrades struct {
+ Added []string `json:"added"`
+ Renamed []StoreRename `json:"renamed"`
+ Deleted []string `json:"deleted"`
+}
+
+// StoreRename defines a name change of a sub-store.
+// All data previously under a PrefixStore with OldKey will be copied
+// to a PrefixStore with NewKey, then deleted from OldKey store.
+type StoreRename struct {
+ OldKey string `json:"old_key"`
+ NewKey string `json:"new_key"`
+}
+
+// IsAdded returns true if the given key should be added
+func (s *StoreUpgrades)
+
+IsAdded(key string)
+
+bool {
+ if s == nil {
+ return false
+}
+
+return slices.Contains(s.Added, key)
+}
+
+// IsDeleted returns true if the given key should be deleted
+func (s *StoreUpgrades)
+
+IsDeleted(key string)
+
+bool {
+ if s == nil {
+ return false
+}
+
+return slices.Contains(s.Deleted, key)
+}
+
+// RenamedFrom returns the oldKey if it was renamed
+// Returns "" if it was not renamed
+func (s *StoreUpgrades)
+
+RenamedFrom(key string)
+
+string {
+ if s == nil {
+ return ""
+}
+ for _, re := range s.Renamed {
+ if re.NewKey == key {
+ return re.OldKey
+}
+
+}
+
+return ""
+}
+
+type MultiStore interface {
+ Store
+
+ // Branches MultiStore into a cached storage object.
+ // NOTE: Caller should probably not call .Write()
+
+on each, but
+ // call CacheMultiStore.Write().
+ CacheMultiStore()
+
+CacheMultiStore
+
+ // CacheMultiStoreWithVersion branches the underlying MultiStore where
+ // each stored is loaded at a specific version (height).
+ CacheMultiStoreWithVersion(version int64) (CacheMultiStore, error)
+
+ // Convenience for fetching substores.
+ // If the store does not exist, panics.
+ GetStore(StoreKey)
+
+Store
+ GetKVStore(StoreKey)
+
+KVStore
+
+ // TracingEnabled returns if tracing is enabled for the MultiStore.
+ TracingEnabled()
+
+bool
+
+ // SetTracer sets the tracer for the MultiStore that the underlying
+ // stores will utilize to trace operations. The modified MultiStore is
+ // returned.
+ SetTracer(w io.Writer)
+
+MultiStore
+
+ // SetTracingContext sets the tracing context for a MultiStore. It is
+ // implied that the caller should update the context when necessary between
+ // tracing operations. The modified MultiStore is returned.
+ SetTracingContext(TraceContext)
+
+MultiStore
+
+ // LatestVersion returns the latest version in the store
+ LatestVersion()
+
+int64
+}
+
+// From MultiStore.CacheMultiStore()....
+type CacheMultiStore interface {
+ MultiStore
+ Write() // Writes operations to underlying KVStore
+}
+
+// CommitMultiStore is an interface for a MultiStore without cache capabilities.
+type CommitMultiStore interface {
+ Committer
+ MultiStore
+ snapshottypes.Snapshotter
+
+ // Mount a store of type using the given db.
+ // If db == nil, the new store will use the CommitMultiStore db.
+ MountStoreWithDB(key StoreKey, typ StoreType, db dbm.DB)
+
+ // Panics on a nil key.
+ GetCommitStore(key StoreKey)
+
+CommitStore
+
+ // Panics on a nil key.
+ GetCommitKVStore(key StoreKey)
+
+CommitKVStore
+
+ // Load the latest persisted version. Called once after all calls to
+ // Mount*Store()
+
+are complete.
+ LoadLatestVersion()
+
+error
+
+ // LoadLatestVersionAndUpgrade will load the latest version, but also
+ // rename/delete/create sub-store keys, before registering all the keys
+ // in order to handle breaking formats in migrations
+ LoadLatestVersionAndUpgrade(upgrades *StoreUpgrades)
+
+error
+
+ // LoadVersionAndUpgrade will load the named version, but also
+ // rename/delete/create sub-store keys, before registering all the keys
+ // in order to handle breaking formats in migrations
+ LoadVersionAndUpgrade(ver int64, upgrades *StoreUpgrades)
+
+error
+
+ // Load a specific persisted version. When you load an old version, or when
+ // the last commit attempt didn't complete, the next commit after loading
+ // must be idempotent (return the same commit id). Otherwise the behavior is
+ // undefined.
+ LoadVersion(ver int64)
+
+error
+
+ // Set an inter-block (persistent)
+
+cache that maintains a mapping from
+ // StoreKeys to CommitKVStores.
+ SetInterBlockCache(MultiStorePersistentCache)
+
+ // SetInitialVersion sets the initial version of the IAVL tree. It is used when
+ // starting a new chain at an arbitrary height.
+ SetInitialVersion(version int64)
+
+error
+
+ // SetIAVLCacheSize sets the cache size of the IAVL tree.
+ SetIAVLCacheSize(size int)
+
+ // SetIAVLDisableFastNode enables/disables fastnode feature on iavl.
+ SetIAVLDisableFastNode(disable bool)
+
+ // SetIAVLSyncPruning set sync/async pruning on iavl.
+ // It is not recommended to use this option.
+ // It is here to enable the prune command to force this to true, allowing the command to wait
+ // for the pruning to finish before returning.
+ SetIAVLSyncPruning(sync bool)
+
+ // RollbackToVersion rollback the db to specific version(height).
+ RollbackToVersion(version int64)
+
+error
+
+ // ListeningEnabled returns if listening is enabled for the KVStore belonging the provided StoreKey
+ ListeningEnabled(key StoreKey)
+
+bool
+
+ // AddListeners adds a listener for the KVStore belonging to the provided StoreKey
+ AddListeners(keys []StoreKey)
+
+ // PopStateCache returns the accumulated state change messages from the CommitMultiStore
+ PopStateCache() []*StoreKVPair
+
+ // SetMetrics sets the metrics for the KVStore
+ SetMetrics(metrics metrics.StoreMetrics)
+}
+
+//---------subsp-------------------------------
+// KVStore
+
+// BasicKVStore is a simple interface to get/set data
+type BasicKVStore interface {
+ // Get returns nil if key doesn't exist. Panics on nil key.
+ Get(key []byte) []byte
+
+ // Has checks if a key exists. Panics on nil key.
+ Has(key []byte)
+
+bool
+
+ // Set sets the key. Panics on nil key or value.
+ Set(key, value []byte)
+
+ // Delete deletes the key. Panics on nil key.
+ Delete(key []byte)
+}
+
+// KVStore additionally provides iteration and deletion
+type KVStore interface {
+ Store
+ BasicKVStore
+
+ // Iterator over a domain of keys in ascending order. End is exclusive.
+ // Start must be less than end, or the Iterator is invalid.
+ // Iterator must be closed by caller.
+ // To iterate over entire domain, use store.Iterator(nil, nil)
+ // CONTRACT: No writes may happen within a domain while an iterator exists over it.
+ // Exceptionally allowed for cachekv.Store, safe to write in the modules.
+ Iterator(start, end []byte)
+
+Iterator
+
+ // Iterator over a domain of keys in descending order. End is exclusive.
+ // Start must be less than end, or the Iterator is invalid.
+ // Iterator must be closed by caller.
+ // CONTRACT: No writes may happen within a domain while an iterator exists over it.
+ // Exceptionally allowed for cachekv.Store, safe to write in the modules.
+ ReverseIterator(start, end []byte)
+
+Iterator
+}
+
+// Iterator is an alias db's Iterator for convenience.
+type Iterator = dbm.Iterator
+
+// CacheKVStore branches a KVStore and provides read cache functionality.
+// After calling .Write()
+
+on the CacheKVStore, all previously created
+// CacheKVStores on the object expire.
+type CacheKVStore interface {
+ KVStore
+
+ // Writes operations to underlying KVStore
+ Write()
+}
+
+// CommitKVStore is an interface for MultiStore.
+type CommitKVStore interface {
+ Committer
+ KVStore
+}
+
+//----------------------------------------
+// CacheWrap
+
+// CacheWrap is the most appropriate interface for store ephemeral branching and cache.
+// For example, IAVLStore.CacheWrap()
+
+returns a CacheKVStore. CacheWrap should not return
+// a Committer, since Commit ephemeral store make no sense. It can return KVStore,
+// HeapStore, SpaceStore, etc.
+type CacheWrap interface {
+ // Write syncs with the underlying store.
+ Write()
+
+ // CacheWrap recursively wraps again.
+ CacheWrap()
+
+CacheWrap
+
+ // CacheWrapWithTrace recursively wraps again with tracing enabled.
+ CacheWrapWithTrace(w io.Writer, tc TraceContext)
+
+CacheWrap
+}
+
+type CacheWrapper interface {
+ // CacheWrap branches a store.
+ CacheWrap()
+
+CacheWrap
+
+ // CacheWrapWithTrace branches a store with tracing enabled.
+ CacheWrapWithTrace(w io.Writer, tc TraceContext)
+
+CacheWrap
+}
+
+func (cid CommitID)
+
+IsZero()
+
+bool {
+ return cid.Version == 0 && len(cid.Hash) == 0
+}
+
+func (cid CommitID)
+
+String()
+
+string {
+ return fmt.Sprintf("CommitID{%v:%X
+}", cid.Hash, cid.Version)
+}
+
+//----------------------------------------
+// Store types
+
+// kind of store
+type StoreType int
+
+const (
+ StoreTypeMulti StoreType = iota
+ StoreTypeDB
+ StoreTypeIAVL
+ StoreTypeTransient
+ StoreTypeMemory
+ StoreTypeSMT
+ StoreTypePersistent
+)
+
+func (st StoreType)
+
+String()
+
+string {
+ switch st {
+ case StoreTypeMulti:
+ return "StoreTypeMulti"
+ case StoreTypeDB:
+ return "StoreTypeDB"
+ case StoreTypeIAVL:
+ return "StoreTypeIAVL"
+ case StoreTypeTransient:
+ return "StoreTypeTransient"
+ case StoreTypeMemory:
+ return "StoreTypeMemory"
+ case StoreTypeSMT:
+ return "StoreTypeSMT"
+ case StoreTypePersistent:
+ return "StoreTypePersistent"
+}
+
+return "unknown store type"
+}
+
+//----------------------------------------
+// Keys for accessing substores
+
+// StoreKey is a key used to index stores in a MultiStore.
+type StoreKey interface {
+ Name()
+
+string
+ String()
+
+string
+}
+
+// CapabilityKey represent the Cosmos SDK keys for object-capability
+// generation in the IBC protocol as defined in https://github.com/cosmos/ibc/tree/master/spec/core/ics-005-port-allocation#data-structures
+type CapabilityKey StoreKey
+
+// KVStoreKey is used for accessing substores.
+// Only the pointer value should ever be used - it functions as a capabilities key.
+type KVStoreKey struct {
+ name string
+}
+
+// NewKVStoreKey returns a new pointer to a KVStoreKey.
+// Use a pointer so keys don't collide.
+func NewKVStoreKey(name string) *KVStoreKey {
+ if name == "" {
+ panic("empty key name not allowed")
+}
+
+return &KVStoreKey{
+ name: name,
+}
+}
+
+// NewKVStoreKeys returns a map of new pointers to KVStoreKey's.
+// The function will panic if there is a potential conflict in names (see `assertNoPrefix`
+// function for more details).
+func NewKVStoreKeys(names ...string)
+
+map[string]*KVStoreKey {
+ assertNoCommonPrefix(names)
+ keys := make(map[string]*KVStoreKey, len(names))
+ for _, n := range names {
+ keys[n] = NewKVStoreKey(n)
+}
+
+return keys
+}
+
+func (key *KVStoreKey)
+
+Name()
+
+string {
+ return key.name
+}
+
+func (key *KVStoreKey)
+
+String()
+
+string {
+ return fmt.Sprintf("KVStoreKey{%p, %s
+}", key, key.name)
+}
+
+// TransientStoreKey is used for indexing transient stores in a MultiStore
+type TransientStoreKey struct {
+ name string
+}
+
+// Constructs new TransientStoreKey
+// Must return a pointer according to the ocap principle
+func NewTransientStoreKey(name string) *TransientStoreKey {
+ return &TransientStoreKey{
+ name: name,
+}
+}
+
+// Implements StoreKey
+func (key *TransientStoreKey)
+
+Name()
+
+string {
+ return key.name
+}
+
+// Implements StoreKey
+func (key *TransientStoreKey)
+
+String()
+
+string {
+ return fmt.Sprintf("TransientStoreKey{%p, %s
+}", key, key.name)
+}
+
+// MemoryStoreKey defines a typed key to be used with an in-memory KVStore.
+type MemoryStoreKey struct {
+ name string
+}
+
+func NewMemoryStoreKey(name string) *MemoryStoreKey {
+ return &MemoryStoreKey{
+ name: name
+}
+}
+
+// Name returns the name of the MemoryStoreKey.
+func (key *MemoryStoreKey)
+
+Name()
+
+string {
+ return key.name
+}
+
+// String returns a stringified representation of the MemoryStoreKey.
+func (key *MemoryStoreKey)
+
+String()
+
+string {
+ return fmt.Sprintf("MemoryStoreKey{%p, %s
+}", key, key.name)
+}
+
+//----------------------------------------
+
+// TraceContext contains TraceKVStore context data. It will be written with
+// every trace operation.
+type TraceContext map[string]interface{
+}
+
+// Clone clones tc into another instance of TraceContext.
+func (tc TraceContext)
+
+Clone()
+
+TraceContext {
+ ret := TraceContext{
+}
+
+maps.Copy(ret, tc)
+
+return ret
+}
+
+// Merge merges value of newTc into tc.
+func (tc TraceContext)
+
+Merge(newTc TraceContext)
+
+TraceContext {
+ if tc == nil {
+ tc = TraceContext{
+}
+
+}
+
+maps.Copy(tc, newTc)
+
+return tc
+}
+
+// MultiStorePersistentCache defines an interface which provides inter-block
+// (persistent)
+
+caching capabilities for multiple CommitKVStores based on StoreKeys.
+type MultiStorePersistentCache interface {
+ // Wrap and return the provided CommitKVStore with an inter-block (persistent)
+ // cache.
+ GetStoreCache(key StoreKey, store CommitKVStore)
+
+CommitKVStore
+
+ // Return the underlying CommitKVStore for a StoreKey.
+ Unwrap(key StoreKey)
+
+CommitKVStore
+
+ // Reset the entire set of internal caches.
+ Reset()
+}
+
+// StoreWithInitialVersion is a store that can have an arbitrary initial
+// version.
+type StoreWithInitialVersion interface {
+ // SetInitialVersion sets the initial version of the IAVL tree. It is used when
+ // starting a new chain at an arbitrary height.
+ SetInitialVersion(version int64)
+}
+
+// NewTransientStoreKeys constructs a new map of TransientStoreKey's
+// Must return pointers according to the ocap principle
+// The function will panic if there is a potential conflict in names
+// see `assertNoCommonPrefix` function for more details.
+func NewTransientStoreKeys(names ...string)
+
+map[string]*TransientStoreKey {
+ assertNoCommonPrefix(names)
+ keys := make(map[string]*TransientStoreKey)
+ for _, n := range names {
+ keys[n] = NewTransientStoreKey(n)
+}
+
+return keys
+}
+
+// NewMemoryStoreKeys constructs a new map matching store key names to their
+// respective MemoryStoreKey references.
+// The function will panic if there is a potential conflict in names (see `assertNoPrefix`
+// function for more details).
+func NewMemoryStoreKeys(names ...string)
+
+map[string]*MemoryStoreKey {
+ assertNoCommonPrefix(names)
+ keys := make(map[string]*MemoryStoreKey)
+ for _, n := range names {
+ keys[n] = NewMemoryStoreKey(n)
+}
+
+return keys
+}
+```
+
+The `Committer` is an interface that defines methods to persist changes to disk:
+
+```go expandable
+package types
+
+import (
+
+ "fmt"
+ "io"
+ "maps"
+ "slices"
+ "github.com/cometbft/cometbft/proto/tendermint/crypto"
+ dbm "github.com/cosmos/cosmos-db"
+ "cosmossdk.io/store/metrics"
+ pruningtypes "cosmossdk.io/store/pruning/types"
+ snapshottypes "cosmossdk.io/store/snapshots/types"
+)
+
+type Store interface {
+ GetStoreType()
+
+StoreType
+ CacheWrapper
+}
+
+// something that can persist to disk
+type Committer interface {
+ Commit()
+
+CommitID
+ LastCommitID()
+
+CommitID
+
+ // WorkingHash returns the hash of the KVStore's state before commit.
+ WorkingHash() []byte
+
+ SetPruning(pruningtypes.PruningOptions)
+
+GetPruning()
+
+pruningtypes.PruningOptions
+}
+
+// Stores of MultiStore must implement CommitStore.
+type CommitStore interface {
+ Committer
+ Store
+}
+
+// Queryable allows a Store to expose internal state to the abci.Query
+// interface. Multistore can route requests to the proper Store.
+//
+// This is an optional, but useful extension to any CommitStore
+type Queryable interface {
+ Query(*RequestQuery) (*ResponseQuery, error)
+}
+
+type RequestQuery struct {
+ Data []byte
+ Path string
+ Height int64
+ Prove bool
+}
+
+type ResponseQuery struct {
+ Code uint32
+ Log string
+ Info string
+ Index int64
+ Key []byte
+ Value []byte
+ ProofOps *crypto.ProofOps
+ Height int64
+ Codespace string
+}
+
+//----------------------------------------
+// MultiStore
+
+// StoreUpgrades defines a series of transformations to apply the multistore db upon load
+type StoreUpgrades struct {
+ Added []string `json:"added"`
+ Renamed []StoreRename `json:"renamed"`
+ Deleted []string `json:"deleted"`
+}
+
+// StoreRename defines a name change of a sub-store.
+// All data previously under a PrefixStore with OldKey will be copied
+// to a PrefixStore with NewKey, then deleted from OldKey store.
+type StoreRename struct {
+ OldKey string `json:"old_key"`
+ NewKey string `json:"new_key"`
+}
+
+// IsAdded returns true if the given key should be added
+func (s *StoreUpgrades)
+
+IsAdded(key string)
+
+bool {
+ if s == nil {
+ return false
+}
+
+return slices.Contains(s.Added, key)
+}
+
+// IsDeleted returns true if the given key should be deleted
+func (s *StoreUpgrades)
+
+IsDeleted(key string)
+
+bool {
+ if s == nil {
+ return false
+}
+
+return slices.Contains(s.Deleted, key)
+}
+
+// RenamedFrom returns the oldKey if it was renamed
+// Returns "" if it was not renamed
+func (s *StoreUpgrades)
+
+RenamedFrom(key string)
+
+string {
+ if s == nil {
+ return ""
+}
+ for _, re := range s.Renamed {
+ if re.NewKey == key {
+ return re.OldKey
+}
+
+}
+
+return ""
+}
+
+type MultiStore interface {
+ Store
+
+ // Branches MultiStore into a cached storage object.
+ // NOTE: Caller should probably not call .Write()
+
+on each, but
+ // call CacheMultiStore.Write().
+ CacheMultiStore()
+
+CacheMultiStore
+
+ // CacheMultiStoreWithVersion branches the underlying MultiStore where
+ // each stored is loaded at a specific version (height).
+ CacheMultiStoreWithVersion(version int64) (CacheMultiStore, error)
+
+ // Convenience for fetching substores.
+ // If the store does not exist, panics.
+ GetStore(StoreKey)
+
+Store
+ GetKVStore(StoreKey)
+
+KVStore
+
+ // TracingEnabled returns if tracing is enabled for the MultiStore.
+ TracingEnabled()
+
+bool
+
+ // SetTracer sets the tracer for the MultiStore that the underlying
+ // stores will utilize to trace operations. The modified MultiStore is
+ // returned.
+ SetTracer(w io.Writer)
+
+MultiStore
+
+ // SetTracingContext sets the tracing context for a MultiStore. It is
+ // implied that the caller should update the context when necessary between
+ // tracing operations. The modified MultiStore is returned.
+ SetTracingContext(TraceContext)
+
+MultiStore
+
+ // LatestVersion returns the latest version in the store
+ LatestVersion()
+
+int64
+}
+
+// From MultiStore.CacheMultiStore()....
+type CacheMultiStore interface {
+ MultiStore
+ Write() // Writes operations to underlying KVStore
+}
+
+// CommitMultiStore is an interface for a MultiStore without cache capabilities.
+type CommitMultiStore interface {
+ Committer
+ MultiStore
+ snapshottypes.Snapshotter
+
+ // Mount a store of type using the given db.
+ // If db == nil, the new store will use the CommitMultiStore db.
+ MountStoreWithDB(key StoreKey, typ StoreType, db dbm.DB)
+
+ // Panics on a nil key.
+ GetCommitStore(key StoreKey)
+
+CommitStore
+
+ // Panics on a nil key.
+ GetCommitKVStore(key StoreKey)
+
+CommitKVStore
+
+ // Load the latest persisted version. Called once after all calls to
+ // Mount*Store()
+
+are complete.
+ LoadLatestVersion()
+
+error
+
+ // LoadLatestVersionAndUpgrade will load the latest version, but also
+ // rename/delete/create sub-store keys, before registering all the keys
+ // in order to handle breaking formats in migrations
+ LoadLatestVersionAndUpgrade(upgrades *StoreUpgrades)
+
+error
+
+ // LoadVersionAndUpgrade will load the named version, but also
+ // rename/delete/create sub-store keys, before registering all the keys
+ // in order to handle breaking formats in migrations
+ LoadVersionAndUpgrade(ver int64, upgrades *StoreUpgrades)
+
+error
+
+ // Load a specific persisted version. When you load an old version, or when
+ // the last commit attempt didn't complete, the next commit after loading
+ // must be idempotent (return the same commit id). Otherwise the behavior is
+ // undefined.
+ LoadVersion(ver int64)
+
+error
+
+ // Set an inter-block (persistent)
+
+cache that maintains a mapping from
+ // StoreKeys to CommitKVStores.
+ SetInterBlockCache(MultiStorePersistentCache)
+
+ // SetInitialVersion sets the initial version of the IAVL tree. It is used when
+ // starting a new chain at an arbitrary height.
+ SetInitialVersion(version int64)
+
+error
+
+ // SetIAVLCacheSize sets the cache size of the IAVL tree.
+ SetIAVLCacheSize(size int)
+
+ // SetIAVLDisableFastNode enables/disables fastnode feature on iavl.
+ SetIAVLDisableFastNode(disable bool)
+
+ // SetIAVLSyncPruning set sync/async pruning on iavl.
+ // It is not recommended to use this option.
+ // It is here to enable the prune command to force this to true, allowing the command to wait
+ // for the pruning to finish before returning.
+ SetIAVLSyncPruning(sync bool)
+
+ // RollbackToVersion rollback the db to specific version(height).
+ RollbackToVersion(version int64)
+
+error
+
+ // ListeningEnabled returns if listening is enabled for the KVStore belonging the provided StoreKey
+ ListeningEnabled(key StoreKey)
+
+bool
+
+ // AddListeners adds a listener for the KVStore belonging to the provided StoreKey
+ AddListeners(keys []StoreKey)
+
+ // PopStateCache returns the accumulated state change messages from the CommitMultiStore
+ PopStateCache() []*StoreKVPair
+
+ // SetMetrics sets the metrics for the KVStore
+ SetMetrics(metrics metrics.StoreMetrics)
+}
+
+//---------subsp-------------------------------
+// KVStore
+
+// BasicKVStore is a simple interface to get/set data
+type BasicKVStore interface {
+ // Get returns nil if key doesn't exist. Panics on nil key.
+ Get(key []byte) []byte
+
+ // Has checks if a key exists. Panics on nil key.
+ Has(key []byte)
+
+bool
+
+ // Set sets the key. Panics on nil key or value.
+ Set(key, value []byte)
+
+ // Delete deletes the key. Panics on nil key.
+ Delete(key []byte)
+}
+
+// KVStore additionally provides iteration and deletion
+type KVStore interface {
+ Store
+ BasicKVStore
+
+ // Iterator over a domain of keys in ascending order. End is exclusive.
+ // Start must be less than end, or the Iterator is invalid.
+ // Iterator must be closed by caller.
+ // To iterate over entire domain, use store.Iterator(nil, nil)
+ // CONTRACT: No writes may happen within a domain while an iterator exists over it.
+ // Exceptionally allowed for cachekv.Store, safe to write in the modules.
+ Iterator(start, end []byte)
+
+Iterator
+
+ // Iterator over a domain of keys in descending order. End is exclusive.
+ // Start must be less than end, or the Iterator is invalid.
+ // Iterator must be closed by caller.
+ // CONTRACT: No writes may happen within a domain while an iterator exists over it.
+ // Exceptionally allowed for cachekv.Store, safe to write in the modules.
+ ReverseIterator(start, end []byte)
+
+Iterator
+}
+
+// Iterator is an alias db's Iterator for convenience.
+type Iterator = dbm.Iterator
+
+// CacheKVStore branches a KVStore and provides read cache functionality.
+// After calling .Write()
+
+on the CacheKVStore, all previously created
+// CacheKVStores on the object expire.
+type CacheKVStore interface {
+ KVStore
+
+ // Writes operations to underlying KVStore
+ Write()
+}
+
+// CommitKVStore is an interface for MultiStore.
+type CommitKVStore interface {
+ Committer
+ KVStore
+}
+
+//----------------------------------------
+// CacheWrap
+
+// CacheWrap is the most appropriate interface for store ephemeral branching and cache.
+// For example, IAVLStore.CacheWrap()
+
+returns a CacheKVStore. CacheWrap should not return
+// a Committer, since Commit ephemeral store make no sense. It can return KVStore,
+// HeapStore, SpaceStore, etc.
+type CacheWrap interface {
+ // Write syncs with the underlying store.
+ Write()
+
+ // CacheWrap recursively wraps again.
+ CacheWrap()
+
+CacheWrap
+
+ // CacheWrapWithTrace recursively wraps again with tracing enabled.
+ CacheWrapWithTrace(w io.Writer, tc TraceContext)
+
+CacheWrap
+}
+
+type CacheWrapper interface {
+ // CacheWrap branches a store.
+ CacheWrap()
+
+CacheWrap
+
+ // CacheWrapWithTrace branches a store with tracing enabled.
+ CacheWrapWithTrace(w io.Writer, tc TraceContext)
+
+CacheWrap
+}
+
+func (cid CommitID)
+
+IsZero()
+
+bool {
+ return cid.Version == 0 && len(cid.Hash) == 0
+}
+
+func (cid CommitID)
+
+String()
+
+string {
+ return fmt.Sprintf("CommitID{%v:%X
+}", cid.Hash, cid.Version)
+}
+
+//----------------------------------------
+// Store types
+
+// kind of store
+type StoreType int
+
+const (
+ StoreTypeMulti StoreType = iota
+ StoreTypeDB
+ StoreTypeIAVL
+ StoreTypeTransient
+ StoreTypeMemory
+ StoreTypeSMT
+ StoreTypePersistent
+)
+
+func (st StoreType)
+
+String()
+
+string {
+ switch st {
+ case StoreTypeMulti:
+ return "StoreTypeMulti"
+ case StoreTypeDB:
+ return "StoreTypeDB"
+ case StoreTypeIAVL:
+ return "StoreTypeIAVL"
+ case StoreTypeTransient:
+ return "StoreTypeTransient"
+ case StoreTypeMemory:
+ return "StoreTypeMemory"
+ case StoreTypeSMT:
+ return "StoreTypeSMT"
+ case StoreTypePersistent:
+ return "StoreTypePersistent"
+}
+
+return "unknown store type"
+}
+
+//----------------------------------------
+// Keys for accessing substores
+
+// StoreKey is a key used to index stores in a MultiStore.
+type StoreKey interface {
+ Name()
+
+string
+ String()
+
+string
+}
+
+// CapabilityKey represent the Cosmos SDK keys for object-capability
+// generation in the IBC protocol as defined in https://github.com/cosmos/ibc/tree/master/spec/core/ics-005-port-allocation#data-structures
+type CapabilityKey StoreKey
+
+// KVStoreKey is used for accessing substores.
+// Only the pointer value should ever be used - it functions as a capabilities key.
+type KVStoreKey struct {
+ name string
+}
+
+// NewKVStoreKey returns a new pointer to a KVStoreKey.
+// Use a pointer so keys don't collide.
+func NewKVStoreKey(name string) *KVStoreKey {
+ if name == "" {
+ panic("empty key name not allowed")
+}
+
+return &KVStoreKey{
+ name: name,
+}
+}
+
+// NewKVStoreKeys returns a map of new pointers to KVStoreKey's.
+// The function will panic if there is a potential conflict in names (see `assertNoPrefix`
+// function for more details).
+func NewKVStoreKeys(names ...string)
+
+map[string]*KVStoreKey {
+ assertNoCommonPrefix(names)
+ keys := make(map[string]*KVStoreKey, len(names))
+ for _, n := range names {
+ keys[n] = NewKVStoreKey(n)
+}
+
+return keys
+}
+
+func (key *KVStoreKey)
+
+Name()
+
+string {
+ return key.name
+}
+
+func (key *KVStoreKey)
+
+String()
+
+string {
+ return fmt.Sprintf("KVStoreKey{%p, %s
+}", key, key.name)
+}
+
+// TransientStoreKey is used for indexing transient stores in a MultiStore
+type TransientStoreKey struct {
+ name string
+}
+
+// Constructs new TransientStoreKey
+// Must return a pointer according to the ocap principle
+func NewTransientStoreKey(name string) *TransientStoreKey {
+ return &TransientStoreKey{
+ name: name,
+}
+}
+
+// Implements StoreKey
+func (key *TransientStoreKey)
+
+Name()
+
+string {
+ return key.name
+}
+
+// Implements StoreKey
+func (key *TransientStoreKey)
+
+String()
+
+string {
+ return fmt.Sprintf("TransientStoreKey{%p, %s
+}", key, key.name)
+}
+
+// MemoryStoreKey defines a typed key to be used with an in-memory KVStore.
+type MemoryStoreKey struct {
+ name string
+}
+
+func NewMemoryStoreKey(name string) *MemoryStoreKey {
+ return &MemoryStoreKey{
+ name: name
+}
+}
+
+// Name returns the name of the MemoryStoreKey.
+func (key *MemoryStoreKey)
+
+Name()
+
+string {
+ return key.name
+}
+
+// String returns a stringified representation of the MemoryStoreKey.
+func (key *MemoryStoreKey)
+
+String()
+
+string {
+ return fmt.Sprintf("MemoryStoreKey{%p, %s
+}", key, key.name)
+}
+
+//----------------------------------------
+
+// TraceContext contains TraceKVStore context data. It will be written with
+// every trace operation.
+type TraceContext map[string]interface{
+}
+
+// Clone clones tc into another instance of TraceContext.
+func (tc TraceContext)
+
+Clone()
+
+TraceContext {
+ ret := TraceContext{
+}
+
+maps.Copy(ret, tc)
+
+return ret
+}
+
+// Merge merges value of newTc into tc.
+func (tc TraceContext)
+
+Merge(newTc TraceContext)
+
+TraceContext {
+ if tc == nil {
+ tc = TraceContext{
+}
+
+}
+
+maps.Copy(tc, newTc)
+
+return tc
+}
+
+// MultiStorePersistentCache defines an interface which provides inter-block
+// (persistent)
+
+caching capabilities for multiple CommitKVStores based on StoreKeys.
+type MultiStorePersistentCache interface {
+ // Wrap and return the provided CommitKVStore with an inter-block (persistent)
+ // cache.
+ GetStoreCache(key StoreKey, store CommitKVStore)
+
+CommitKVStore
+
+ // Return the underlying CommitKVStore for a StoreKey.
+ Unwrap(key StoreKey)
+
+CommitKVStore
+
+ // Reset the entire set of internal caches.
+ Reset()
+}
+
+// StoreWithInitialVersion is a store that can have an arbitrary initial
+// version.
+type StoreWithInitialVersion interface {
+ // SetInitialVersion sets the initial version of the IAVL tree. It is used when
+ // starting a new chain at an arbitrary height.
+ SetInitialVersion(version int64)
+}
+
+// NewTransientStoreKeys constructs a new map of TransientStoreKey's
+// Must return pointers according to the ocap principle
+// The function will panic if there is a potential conflict in names
+// see `assertNoCommonPrefix` function for more details.
+func NewTransientStoreKeys(names ...string)
+
+map[string]*TransientStoreKey {
+ assertNoCommonPrefix(names)
+ keys := make(map[string]*TransientStoreKey)
+ for _, n := range names {
+ keys[n] = NewTransientStoreKey(n)
+}
+
+return keys
+}
+
+// NewMemoryStoreKeys constructs a new map matching store key names to their
+// respective MemoryStoreKey references.
+// The function will panic if there is a potential conflict in names (see `assertNoPrefix`
+// function for more details).
+func NewMemoryStoreKeys(names ...string)
+
+map[string]*MemoryStoreKey {
+ assertNoCommonPrefix(names)
+ keys := make(map[string]*MemoryStoreKey)
+ for _, n := range names {
+ keys[n] = NewMemoryStoreKey(n)
+}
+
+return keys
+}
+```
+
+The `CommitID` is a deterministic commit of the state tree. Its hash is returned to the underlying consensus engine and stored in the block header. Note that commit store interfaces exist for various purposes, one of which is to make sure not every object can commit the store. As part of the [object-capabilities model](/sdk/v0.53/learn/advanced/ocap) of the Cosmos SDK, only `baseapp` should have the ability to commit stores. For example, this is the reason why the `ctx.KVStore()` method by which modules typically access stores returns a `KVStore` and not a `CommitKVStore`.
+
+The Cosmos SDK comes with many types of stores, the most used being [`CommitMultiStore`](#multistore), [`KVStore`](#kvstore) and [`GasKv` store](#gaskv-store). [Other types of stores](#other-stores) include `Transient` and `TraceKV` stores.
+
+## Multistore
+
+### Multistore Interface
+
+Each Cosmos SDK application holds a multistore at its root to persist its state. The multistore is a store of `KVStores` that follows the `Multistore` interface:
+
+```go expandable
+package types
+
+import (
+
+ "fmt"
+ "io"
+ "maps"
+ "slices"
+ "github.com/cometbft/cometbft/proto/tendermint/crypto"
+ dbm "github.com/cosmos/cosmos-db"
+ "cosmossdk.io/store/metrics"
+ pruningtypes "cosmossdk.io/store/pruning/types"
+ snapshottypes "cosmossdk.io/store/snapshots/types"
+)
+
+type Store interface {
+ GetStoreType()
+
+StoreType
+ CacheWrapper
+}
+
+// something that can persist to disk
+type Committer interface {
+ Commit()
+
+CommitID
+ LastCommitID()
+
+CommitID
+
+ // WorkingHash returns the hash of the KVStore's state before commit.
+ WorkingHash() []byte
+
+ SetPruning(pruningtypes.PruningOptions)
+
+GetPruning()
+
+pruningtypes.PruningOptions
+}
+
+// Stores of MultiStore must implement CommitStore.
+type CommitStore interface {
+ Committer
+ Store
+}
+
+// Queryable allows a Store to expose internal state to the abci.Query
+// interface. Multistore can route requests to the proper Store.
+//
+// This is an optional, but useful extension to any CommitStore
+type Queryable interface {
+ Query(*RequestQuery) (*ResponseQuery, error)
+}
+
+type RequestQuery struct {
+ Data []byte
+ Path string
+ Height int64
+ Prove bool
+}
+
+type ResponseQuery struct {
+ Code uint32
+ Log string
+ Info string
+ Index int64
+ Key []byte
+ Value []byte
+ ProofOps *crypto.ProofOps
+ Height int64
+ Codespace string
+}
+
+//----------------------------------------
+// MultiStore
+
+// StoreUpgrades defines a series of transformations to apply the multistore db upon load
+type StoreUpgrades struct {
+ Added []string `json:"added"`
+ Renamed []StoreRename `json:"renamed"`
+ Deleted []string `json:"deleted"`
+}
+
+// StoreRename defines a name change of a sub-store.
+// All data previously under a PrefixStore with OldKey will be copied
+// to a PrefixStore with NewKey, then deleted from OldKey store.
+type StoreRename struct {
+ OldKey string `json:"old_key"`
+ NewKey string `json:"new_key"`
+}
+
+// IsAdded returns true if the given key should be added
+func (s *StoreUpgrades)
+
+IsAdded(key string)
+
+bool {
+ if s == nil {
+ return false
+}
+
+return slices.Contains(s.Added, key)
+}
+
+// IsDeleted returns true if the given key should be deleted
+func (s *StoreUpgrades)
+
+IsDeleted(key string)
+
+bool {
+ if s == nil {
+ return false
+}
+
+return slices.Contains(s.Deleted, key)
+}
+
+// RenamedFrom returns the oldKey if it was renamed
+// Returns "" if it was not renamed
+func (s *StoreUpgrades)
+
+RenamedFrom(key string)
+
+string {
+ if s == nil {
+ return ""
+}
+ for _, re := range s.Renamed {
+ if re.NewKey == key {
+ return re.OldKey
+}
+
+}
+
+return ""
+}
+
+type MultiStore interface {
+ Store
+
+ // Branches MultiStore into a cached storage object.
+ // NOTE: Caller should probably not call .Write()
+
+on each, but
+ // call CacheMultiStore.Write().
+ CacheMultiStore()
+
+CacheMultiStore
+
+ // CacheMultiStoreWithVersion branches the underlying MultiStore where
+ // each stored is loaded at a specific version (height).
+ CacheMultiStoreWithVersion(version int64) (CacheMultiStore, error)
+
+ // Convenience for fetching substores.
+ // If the store does not exist, panics.
+ GetStore(StoreKey)
+
+Store
+ GetKVStore(StoreKey)
+
+KVStore
+
+ // TracingEnabled returns if tracing is enabled for the MultiStore.
+ TracingEnabled()
+
+bool
+
+ // SetTracer sets the tracer for the MultiStore that the underlying
+ // stores will utilize to trace operations. The modified MultiStore is
+ // returned.
+ SetTracer(w io.Writer)
+
+MultiStore
+
+ // SetTracingContext sets the tracing context for a MultiStore. It is
+ // implied that the caller should update the context when necessary between
+ // tracing operations. The modified MultiStore is returned.
+ SetTracingContext(TraceContext)
+
+MultiStore
+
+ // LatestVersion returns the latest version in the store
+ LatestVersion()
+
+int64
+}
+
+// From MultiStore.CacheMultiStore()....
+type CacheMultiStore interface {
+ MultiStore
+ Write() // Writes operations to underlying KVStore
+}
+
+// CommitMultiStore is an interface for a MultiStore without cache capabilities.
+type CommitMultiStore interface {
+ Committer
+ MultiStore
+ snapshottypes.Snapshotter
+
+ // Mount a store of type using the given db.
+ // If db == nil, the new store will use the CommitMultiStore db.
+ MountStoreWithDB(key StoreKey, typ StoreType, db dbm.DB)
+
+ // Panics on a nil key.
+ GetCommitStore(key StoreKey)
+
+CommitStore
+
+ // Panics on a nil key.
+ GetCommitKVStore(key StoreKey)
+
+CommitKVStore
+
+ // Load the latest persisted version. Called once after all calls to
+ // Mount*Store()
+
+are complete.
+ LoadLatestVersion()
+
+error
+
+ // LoadLatestVersionAndUpgrade will load the latest version, but also
+ // rename/delete/create sub-store keys, before registering all the keys
+ // in order to handle breaking formats in migrations
+ LoadLatestVersionAndUpgrade(upgrades *StoreUpgrades)
+
+error
+
+ // LoadVersionAndUpgrade will load the named version, but also
+ // rename/delete/create sub-store keys, before registering all the keys
+ // in order to handle breaking formats in migrations
+ LoadVersionAndUpgrade(ver int64, upgrades *StoreUpgrades)
+
+error
+
+ // Load a specific persisted version. When you load an old version, or when
+ // the last commit attempt didn't complete, the next commit after loading
+ // must be idempotent (return the same commit id). Otherwise the behavior is
+ // undefined.
+ LoadVersion(ver int64)
+
+error
+
+ // Set an inter-block (persistent)
+
+cache that maintains a mapping from
+ // StoreKeys to CommitKVStores.
+ SetInterBlockCache(MultiStorePersistentCache)
+
+ // SetInitialVersion sets the initial version of the IAVL tree. It is used when
+ // starting a new chain at an arbitrary height.
+ SetInitialVersion(version int64)
+
+error
+
+ // SetIAVLCacheSize sets the cache size of the IAVL tree.
+ SetIAVLCacheSize(size int)
+
+ // SetIAVLDisableFastNode enables/disables fastnode feature on iavl.
+ SetIAVLDisableFastNode(disable bool)
+
+ // SetIAVLSyncPruning set sync/async pruning on iavl.
+ // It is not recommended to use this option.
+ // It is here to enable the prune command to force this to true, allowing the command to wait
+ // for the pruning to finish before returning.
+ SetIAVLSyncPruning(sync bool)
+
+ // RollbackToVersion rollback the db to specific version(height).
+ RollbackToVersion(version int64)
+
+error
+
+ // ListeningEnabled returns if listening is enabled for the KVStore belonging the provided StoreKey
+ ListeningEnabled(key StoreKey)
+
+bool
+
+ // AddListeners adds a listener for the KVStore belonging to the provided StoreKey
+ AddListeners(keys []StoreKey)
+
+ // PopStateCache returns the accumulated state change messages from the CommitMultiStore
+ PopStateCache() []*StoreKVPair
+
+ // SetMetrics sets the metrics for the KVStore
+ SetMetrics(metrics metrics.StoreMetrics)
+}
+
+//---------subsp-------------------------------
+// KVStore
+
+// BasicKVStore is a simple interface to get/set data
+type BasicKVStore interface {
+ // Get returns nil if key doesn't exist. Panics on nil key.
+ Get(key []byte) []byte
+
+ // Has checks if a key exists. Panics on nil key.
+ Has(key []byte)
+
+bool
+
+ // Set sets the key. Panics on nil key or value.
+ Set(key, value []byte)
+
+ // Delete deletes the key. Panics on nil key.
+ Delete(key []byte)
+}
+
+// KVStore additionally provides iteration and deletion
+type KVStore interface {
+ Store
+ BasicKVStore
+
+ // Iterator over a domain of keys in ascending order. End is exclusive.
+ // Start must be less than end, or the Iterator is invalid.
+ // Iterator must be closed by caller.
+ // To iterate over entire domain, use store.Iterator(nil, nil)
+ // CONTRACT: No writes may happen within a domain while an iterator exists over it.
+ // Exceptionally allowed for cachekv.Store, safe to write in the modules.
+ Iterator(start, end []byte)
+
+Iterator
+
+ // Iterator over a domain of keys in descending order. End is exclusive.
+ // Start must be less than end, or the Iterator is invalid.
+ // Iterator must be closed by caller.
+ // CONTRACT: No writes may happen within a domain while an iterator exists over it.
+ // Exceptionally allowed for cachekv.Store, safe to write in the modules.
+ ReverseIterator(start, end []byte)
+
+Iterator
+}
+
+// Iterator is an alias db's Iterator for convenience.
+type Iterator = dbm.Iterator
+
+// CacheKVStore branches a KVStore and provides read cache functionality.
+// After calling .Write()
+
+on the CacheKVStore, all previously created
+// CacheKVStores on the object expire.
+type CacheKVStore interface {
+ KVStore
+
+ // Writes operations to underlying KVStore
+ Write()
+}
+
+// CommitKVStore is an interface for MultiStore.
+type CommitKVStore interface {
+ Committer
+ KVStore
+}
+
+//----------------------------------------
+// CacheWrap
+
+// CacheWrap is the most appropriate interface for store ephemeral branching and cache.
+// For example, IAVLStore.CacheWrap()
+
+returns a CacheKVStore. CacheWrap should not return
+// a Committer, since Commit ephemeral store make no sense. It can return KVStore,
+// HeapStore, SpaceStore, etc.
+type CacheWrap interface {
+ // Write syncs with the underlying store.
+ Write()
+
+ // CacheWrap recursively wraps again.
+ CacheWrap()
+
+CacheWrap
+
+ // CacheWrapWithTrace recursively wraps again with tracing enabled.
+ CacheWrapWithTrace(w io.Writer, tc TraceContext)
+
+CacheWrap
+}
+
+type CacheWrapper interface {
+ // CacheWrap branches a store.
+ CacheWrap()
+
+CacheWrap
+
+ // CacheWrapWithTrace branches a store with tracing enabled.
+ CacheWrapWithTrace(w io.Writer, tc TraceContext)
+
+CacheWrap
+}
+
+func (cid CommitID)
+
+IsZero()
+
+bool {
+ return cid.Version == 0 && len(cid.Hash) == 0
+}
+
+func (cid CommitID)
+
+String()
+
+string {
+ return fmt.Sprintf("CommitID{%v:%X
+}", cid.Hash, cid.Version)
+}
+
+//----------------------------------------
+// Store types
+
+// kind of store
+type StoreType int
+
+const (
+ StoreTypeMulti StoreType = iota
+ StoreTypeDB
+ StoreTypeIAVL
+ StoreTypeTransient
+ StoreTypeMemory
+ StoreTypeSMT
+ StoreTypePersistent
+)
+
+func (st StoreType)
+
+String()
+
+string {
+ switch st {
+ case StoreTypeMulti:
+ return "StoreTypeMulti"
+ case StoreTypeDB:
+ return "StoreTypeDB"
+ case StoreTypeIAVL:
+ return "StoreTypeIAVL"
+ case StoreTypeTransient:
+ return "StoreTypeTransient"
+ case StoreTypeMemory:
+ return "StoreTypeMemory"
+ case StoreTypeSMT:
+ return "StoreTypeSMT"
+ case StoreTypePersistent:
+ return "StoreTypePersistent"
+}
+
+return "unknown store type"
+}
+
+//----------------------------------------
+// Keys for accessing substores
+
+// StoreKey is a key used to index stores in a MultiStore.
+type StoreKey interface {
+ Name()
+
+string
+ String()
+
+string
+}
+
+// CapabilityKey represent the Cosmos SDK keys for object-capability
+// generation in the IBC protocol as defined in https://github.com/cosmos/ibc/tree/master/spec/core/ics-005-port-allocation#data-structures
+type CapabilityKey StoreKey
+
+// KVStoreKey is used for accessing substores.
+// Only the pointer value should ever be used - it functions as a capabilities key.
+type KVStoreKey struct {
+ name string
+}
+
+// NewKVStoreKey returns a new pointer to a KVStoreKey.
+// Use a pointer so keys don't collide.
+func NewKVStoreKey(name string) *KVStoreKey {
+ if name == "" {
+ panic("empty key name not allowed")
+}
+
+return &KVStoreKey{
+ name: name,
+}
+}
+
+// NewKVStoreKeys returns a map of new pointers to KVStoreKey's.
+// The function will panic if there is a potential conflict in names (see `assertNoPrefix`
+// function for more details).
+func NewKVStoreKeys(names ...string)
+
+map[string]*KVStoreKey {
+ assertNoCommonPrefix(names)
+ keys := make(map[string]*KVStoreKey, len(names))
+ for _, n := range names {
+ keys[n] = NewKVStoreKey(n)
+}
+
+return keys
+}
+
+func (key *KVStoreKey)
+
+Name()
+
+string {
+ return key.name
+}
+
+func (key *KVStoreKey)
+
+String()
+
+string {
+ return fmt.Sprintf("KVStoreKey{%p, %s
+}", key, key.name)
+}
+
+// TransientStoreKey is used for indexing transient stores in a MultiStore
+type TransientStoreKey struct {
+ name string
+}
+
+// Constructs new TransientStoreKey
+// Must return a pointer according to the ocap principle
+func NewTransientStoreKey(name string) *TransientStoreKey {
+ return &TransientStoreKey{
+ name: name,
+}
+}
+
+// Implements StoreKey
+func (key *TransientStoreKey)
+
+Name()
+
+string {
+ return key.name
+}
+
+// Implements StoreKey
+func (key *TransientStoreKey)
+
+String()
+
+string {
+ return fmt.Sprintf("TransientStoreKey{%p, %s
+}", key, key.name)
+}
+
+// MemoryStoreKey defines a typed key to be used with an in-memory KVStore.
+type MemoryStoreKey struct {
+ name string
+}
+
+func NewMemoryStoreKey(name string) *MemoryStoreKey {
+ return &MemoryStoreKey{
+ name: name
+}
+}
+
+// Name returns the name of the MemoryStoreKey.
+func (key *MemoryStoreKey)
+
+Name()
+
+string {
+ return key.name
+}
+
+// String returns a stringified representation of the MemoryStoreKey.
+func (key *MemoryStoreKey)
+
+String()
+
+string {
+ return fmt.Sprintf("MemoryStoreKey{%p, %s
+}", key, key.name)
+}
+
+//----------------------------------------
+
+// TraceContext contains TraceKVStore context data. It will be written with
+// every trace operation.
+type TraceContext map[string]interface{
+}
+
+// Clone clones tc into another instance of TraceContext.
+func (tc TraceContext)
+
+Clone()
+
+TraceContext {
+ ret := TraceContext{
+}
+
+maps.Copy(ret, tc)
+
+return ret
+}
+
+// Merge merges value of newTc into tc.
+func (tc TraceContext)
+
+Merge(newTc TraceContext)
+
+TraceContext {
+ if tc == nil {
+ tc = TraceContext{
+}
+
+}
+
+maps.Copy(tc, newTc)
+
+return tc
+}
+
+// MultiStorePersistentCache defines an interface which provides inter-block
+// (persistent)
+
+caching capabilities for multiple CommitKVStores based on StoreKeys.
+type MultiStorePersistentCache interface {
+ // Wrap and return the provided CommitKVStore with an inter-block (persistent)
+ // cache.
+ GetStoreCache(key StoreKey, store CommitKVStore)
+
+CommitKVStore
+
+ // Return the underlying CommitKVStore for a StoreKey.
+ Unwrap(key StoreKey)
+
+CommitKVStore
+
+ // Reset the entire set of internal caches.
+ Reset()
+}
+
+// StoreWithInitialVersion is a store that can have an arbitrary initial
+// version.
+type StoreWithInitialVersion interface {
+ // SetInitialVersion sets the initial version of the IAVL tree. It is used when
+ // starting a new chain at an arbitrary height.
+ SetInitialVersion(version int64)
+}
+
+// NewTransientStoreKeys constructs a new map of TransientStoreKey's
+// Must return pointers according to the ocap principle
+// The function will panic if there is a potential conflict in names
+// see `assertNoCommonPrefix` function for more details.
+func NewTransientStoreKeys(names ...string)
+
+map[string]*TransientStoreKey {
+ assertNoCommonPrefix(names)
+ keys := make(map[string]*TransientStoreKey)
+ for _, n := range names {
+ keys[n] = NewTransientStoreKey(n)
+}
+
+return keys
+}
+
+// NewMemoryStoreKeys constructs a new map matching store key names to their
+// respective MemoryStoreKey references.
+// The function will panic if there is a potential conflict in names (see `assertNoPrefix`
+// function for more details).
+func NewMemoryStoreKeys(names ...string)
+
+map[string]*MemoryStoreKey {
+ assertNoCommonPrefix(names)
+ keys := make(map[string]*MemoryStoreKey)
+ for _, n := range names {
+ keys[n] = NewMemoryStoreKey(n)
+}
+
+return keys
+}
+```
+
+If tracing is enabled, then branching the multistore will firstly wrap all the underlying `KVStore` in [`TraceKv.Store`](#tracekv-store).
+
+### CommitMultiStore
+
+The main type of `Multistore` used in the Cosmos SDK is `CommitMultiStore`, which is an extension of the `Multistore` interface:
+
+```go expandable
+package types
+
+import (
+
+ "fmt"
+ "io"
+ "maps"
+ "slices"
+ "github.com/cometbft/cometbft/proto/tendermint/crypto"
+ dbm "github.com/cosmos/cosmos-db"
+ "cosmossdk.io/store/metrics"
+ pruningtypes "cosmossdk.io/store/pruning/types"
+ snapshottypes "cosmossdk.io/store/snapshots/types"
+)
+
+type Store interface {
+ GetStoreType()
+
+StoreType
+ CacheWrapper
+}
+
+// something that can persist to disk
+type Committer interface {
+ Commit()
+
+CommitID
+ LastCommitID()
+
+CommitID
+
+ // WorkingHash returns the hash of the KVStore's state before commit.
+ WorkingHash() []byte
+
+ SetPruning(pruningtypes.PruningOptions)
+
+GetPruning()
+
+pruningtypes.PruningOptions
+}
+
+// Stores of MultiStore must implement CommitStore.
+type CommitStore interface {
+ Committer
+ Store
+}
+
+// Queryable allows a Store to expose internal state to the abci.Query
+// interface. Multistore can route requests to the proper Store.
+//
+// This is an optional, but useful extension to any CommitStore
+type Queryable interface {
+ Query(*RequestQuery) (*ResponseQuery, error)
+}
+
+type RequestQuery struct {
+ Data []byte
+ Path string
+ Height int64
+ Prove bool
+}
+
+type ResponseQuery struct {
+ Code uint32
+ Log string
+ Info string
+ Index int64
+ Key []byte
+ Value []byte
+ ProofOps *crypto.ProofOps
+ Height int64
+ Codespace string
+}
+
+//----------------------------------------
+// MultiStore
+
+// StoreUpgrades defines a series of transformations to apply the multistore db upon load
+type StoreUpgrades struct {
+ Added []string `json:"added"`
+ Renamed []StoreRename `json:"renamed"`
+ Deleted []string `json:"deleted"`
+}
+
+// StoreRename defines a name change of a sub-store.
+// All data previously under a PrefixStore with OldKey will be copied
+// to a PrefixStore with NewKey, then deleted from OldKey store.
+type StoreRename struct {
+ OldKey string `json:"old_key"`
+ NewKey string `json:"new_key"`
+}
+
+// IsAdded returns true if the given key should be added
+func (s *StoreUpgrades)
+
+IsAdded(key string)
+
+bool {
+ if s == nil {
+ return false
+}
+
+return slices.Contains(s.Added, key)
+}
+
+// IsDeleted returns true if the given key should be deleted
+func (s *StoreUpgrades)
+
+IsDeleted(key string)
+
+bool {
+ if s == nil {
+ return false
+}
+
+return slices.Contains(s.Deleted, key)
+}
+
+// RenamedFrom returns the oldKey if it was renamed
+// Returns "" if it was not renamed
+func (s *StoreUpgrades)
+
+RenamedFrom(key string)
+
+string {
+ if s == nil {
+ return ""
+}
+ for _, re := range s.Renamed {
+ if re.NewKey == key {
+ return re.OldKey
+}
+
+}
+
+return ""
+}
+
+type MultiStore interface {
+ Store
+
+ // Branches MultiStore into a cached storage object.
+ // NOTE: Caller should probably not call .Write()
+
+on each, but
+ // call CacheMultiStore.Write().
+ CacheMultiStore()
+
+CacheMultiStore
+
+ // CacheMultiStoreWithVersion branches the underlying MultiStore where
+ // each stored is loaded at a specific version (height).
+ CacheMultiStoreWithVersion(version int64) (CacheMultiStore, error)
+
+ // Convenience for fetching substores.
+ // If the store does not exist, panics.
+ GetStore(StoreKey)
+
+Store
+ GetKVStore(StoreKey)
+
+KVStore
+
+ // TracingEnabled returns if tracing is enabled for the MultiStore.
+ TracingEnabled()
+
+bool
+
+ // SetTracer sets the tracer for the MultiStore that the underlying
+ // stores will utilize to trace operations. The modified MultiStore is
+ // returned.
+ SetTracer(w io.Writer)
+
+MultiStore
+
+ // SetTracingContext sets the tracing context for a MultiStore. It is
+ // implied that the caller should update the context when necessary between
+ // tracing operations. The modified MultiStore is returned.
+ SetTracingContext(TraceContext)
+
+MultiStore
+
+ // LatestVersion returns the latest version in the store
+ LatestVersion()
+
+int64
+}
+
+// From MultiStore.CacheMultiStore()....
+type CacheMultiStore interface {
+ MultiStore
+ Write() // Writes operations to underlying KVStore
+}
+
+// CommitMultiStore is an interface for a MultiStore without cache capabilities.
+type CommitMultiStore interface {
+ Committer
+ MultiStore
+ snapshottypes.Snapshotter
+
+ // Mount a store of type using the given db.
+ // If db == nil, the new store will use the CommitMultiStore db.
+ MountStoreWithDB(key StoreKey, typ StoreType, db dbm.DB)
+
+ // Panics on a nil key.
+ GetCommitStore(key StoreKey)
+
+CommitStore
+
+ // Panics on a nil key.
+ GetCommitKVStore(key StoreKey)
+
+CommitKVStore
+
+ // Load the latest persisted version. Called once after all calls to
+ // Mount*Store()
+
+are complete.
+ LoadLatestVersion()
+
+error
+
+ // LoadLatestVersionAndUpgrade will load the latest version, but also
+ // rename/delete/create sub-store keys, before registering all the keys
+ // in order to handle breaking formats in migrations
+ LoadLatestVersionAndUpgrade(upgrades *StoreUpgrades)
+
+error
+
+ // LoadVersionAndUpgrade will load the named version, but also
+ // rename/delete/create sub-store keys, before registering all the keys
+ // in order to handle breaking formats in migrations
+ LoadVersionAndUpgrade(ver int64, upgrades *StoreUpgrades)
+
+error
+
+ // Load a specific persisted version. When you load an old version, or when
+ // the last commit attempt didn't complete, the next commit after loading
+ // must be idempotent (return the same commit id). Otherwise the behavior is
+ // undefined.
+ LoadVersion(ver int64)
+
+error
+
+ // Set an inter-block (persistent)
+
+cache that maintains a mapping from
+ // StoreKeys to CommitKVStores.
+ SetInterBlockCache(MultiStorePersistentCache)
+
+ // SetInitialVersion sets the initial version of the IAVL tree. It is used when
+ // starting a new chain at an arbitrary height.
+ SetInitialVersion(version int64)
+
+error
+
+ // SetIAVLCacheSize sets the cache size of the IAVL tree.
+ SetIAVLCacheSize(size int)
+
+ // SetIAVLDisableFastNode enables/disables fastnode feature on iavl.
+ SetIAVLDisableFastNode(disable bool)
+
+ // SetIAVLSyncPruning set sync/async pruning on iavl.
+ // It is not recommended to use this option.
+ // It is here to enable the prune command to force this to true, allowing the command to wait
+ // for the pruning to finish before returning.
+ SetIAVLSyncPruning(sync bool)
+
+ // RollbackToVersion rollback the db to specific version(height).
+ RollbackToVersion(version int64)
+
+error
+
+ // ListeningEnabled returns if listening is enabled for the KVStore belonging the provided StoreKey
+ ListeningEnabled(key StoreKey)
+
+bool
+
+ // AddListeners adds a listener for the KVStore belonging to the provided StoreKey
+ AddListeners(keys []StoreKey)
+
+ // PopStateCache returns the accumulated state change messages from the CommitMultiStore
+ PopStateCache() []*StoreKVPair
+
+ // SetMetrics sets the metrics for the KVStore
+ SetMetrics(metrics metrics.StoreMetrics)
+}
+
+//---------subsp-------------------------------
+// KVStore
+
+// BasicKVStore is a simple interface to get/set data
+type BasicKVStore interface {
+ // Get returns nil if key doesn't exist. Panics on nil key.
+ Get(key []byte) []byte
+
+ // Has checks if a key exists. Panics on nil key.
+ Has(key []byte)
+
+bool
+
+ // Set sets the key. Panics on nil key or value.
+ Set(key, value []byte)
+
+ // Delete deletes the key. Panics on nil key.
+ Delete(key []byte)
+}
+
+// KVStore additionally provides iteration and deletion
+type KVStore interface {
+ Store
+ BasicKVStore
+
+ // Iterator over a domain of keys in ascending order. End is exclusive.
+ // Start must be less than end, or the Iterator is invalid.
+ // Iterator must be closed by caller.
+ // To iterate over entire domain, use store.Iterator(nil, nil)
+ // CONTRACT: No writes may happen within a domain while an iterator exists over it.
+ // Exceptionally allowed for cachekv.Store, safe to write in the modules.
+ Iterator(start, end []byte)
+
+Iterator
+
+ // Iterator over a domain of keys in descending order. End is exclusive.
+ // Start must be less than end, or the Iterator is invalid.
+ // Iterator must be closed by caller.
+ // CONTRACT: No writes may happen within a domain while an iterator exists over it.
+ // Exceptionally allowed for cachekv.Store, safe to write in the modules.
+ ReverseIterator(start, end []byte)
+
+Iterator
+}
+
+// Iterator is an alias db's Iterator for convenience.
+type Iterator = dbm.Iterator
+
+// CacheKVStore branches a KVStore and provides read cache functionality.
+// After calling .Write()
+
+on the CacheKVStore, all previously created
+// CacheKVStores on the object expire.
+type CacheKVStore interface {
+ KVStore
+
+ // Writes operations to underlying KVStore
+ Write()
+}
+
+// CommitKVStore is an interface for MultiStore.
+type CommitKVStore interface {
+ Committer
+ KVStore
+}
+
+//----------------------------------------
+// CacheWrap
+
+// CacheWrap is the most appropriate interface for store ephemeral branching and cache.
+// For example, IAVLStore.CacheWrap()
+
+returns a CacheKVStore. CacheWrap should not return
+// a Committer, since Commit ephemeral store make no sense. It can return KVStore,
+// HeapStore, SpaceStore, etc.
+type CacheWrap interface {
+ // Write syncs with the underlying store.
+ Write()
+
+ // CacheWrap recursively wraps again.
+ CacheWrap()
+
+CacheWrap
+
+ // CacheWrapWithTrace recursively wraps again with tracing enabled.
+ CacheWrapWithTrace(w io.Writer, tc TraceContext)
+
+CacheWrap
+}
+
+type CacheWrapper interface {
+ // CacheWrap branches a store.
+ CacheWrap()
+
+CacheWrap
+
+ // CacheWrapWithTrace branches a store with tracing enabled.
+ CacheWrapWithTrace(w io.Writer, tc TraceContext)
+
+CacheWrap
+}
+
+func (cid CommitID)
+
+IsZero()
+
+bool {
+ return cid.Version == 0 && len(cid.Hash) == 0
+}
+
+func (cid CommitID)
+
+String()
+
+string {
+ return fmt.Sprintf("CommitID{%v:%X
+}", cid.Hash, cid.Version)
+}
+
+//----------------------------------------
+// Store types
+
+// kind of store
+type StoreType int
+
+const (
+ StoreTypeMulti StoreType = iota
+ StoreTypeDB
+ StoreTypeIAVL
+ StoreTypeTransient
+ StoreTypeMemory
+ StoreTypeSMT
+ StoreTypePersistent
+)
+
+func (st StoreType)
+
+String()
+
+string {
+ switch st {
+ case StoreTypeMulti:
+ return "StoreTypeMulti"
+ case StoreTypeDB:
+ return "StoreTypeDB"
+ case StoreTypeIAVL:
+ return "StoreTypeIAVL"
+ case StoreTypeTransient:
+ return "StoreTypeTransient"
+ case StoreTypeMemory:
+ return "StoreTypeMemory"
+ case StoreTypeSMT:
+ return "StoreTypeSMT"
+ case StoreTypePersistent:
+ return "StoreTypePersistent"
+}
+
+return "unknown store type"
+}
+
+//----------------------------------------
+// Keys for accessing substores
+
+// StoreKey is a key used to index stores in a MultiStore.
+type StoreKey interface {
+ Name()
+
+string
+ String()
+
+string
+}
+
+// CapabilityKey represent the Cosmos SDK keys for object-capability
+// generation in the IBC protocol as defined in https://github.com/cosmos/ibc/tree/master/spec/core/ics-005-port-allocation#data-structures
+type CapabilityKey StoreKey
+
+// KVStoreKey is used for accessing substores.
+// Only the pointer value should ever be used - it functions as a capabilities key.
+type KVStoreKey struct {
+ name string
+}
+
+// NewKVStoreKey returns a new pointer to a KVStoreKey.
+// Use a pointer so keys don't collide.
+func NewKVStoreKey(name string) *KVStoreKey {
+ if name == "" {
+ panic("empty key name not allowed")
+}
+
+return &KVStoreKey{
+ name: name,
+}
+}
+
+// NewKVStoreKeys returns a map of new pointers to KVStoreKey's.
+// The function will panic if there is a potential conflict in names (see `assertNoPrefix`
+// function for more details).
+func NewKVStoreKeys(names ...string)
+
+map[string]*KVStoreKey {
+ assertNoCommonPrefix(names)
+ keys := make(map[string]*KVStoreKey, len(names))
+ for _, n := range names {
+ keys[n] = NewKVStoreKey(n)
+}
+
+return keys
+}
+
+func (key *KVStoreKey)
+
+Name()
+
+string {
+ return key.name
+}
+
+func (key *KVStoreKey)
+
+String()
+
+string {
+ return fmt.Sprintf("KVStoreKey{%p, %s
+}", key, key.name)
+}
+
+// TransientStoreKey is used for indexing transient stores in a MultiStore
+type TransientStoreKey struct {
+ name string
+}
+
+// Constructs new TransientStoreKey
+// Must return a pointer according to the ocap principle
+func NewTransientStoreKey(name string) *TransientStoreKey {
+ return &TransientStoreKey{
+ name: name,
+}
+}
+
+// Implements StoreKey
+func (key *TransientStoreKey)
+
+Name()
+
+string {
+ return key.name
+}
+
+// Implements StoreKey
+func (key *TransientStoreKey)
+
+String()
+
+string {
+ return fmt.Sprintf("TransientStoreKey{%p, %s
+}", key, key.name)
+}
+
+// MemoryStoreKey defines a typed key to be used with an in-memory KVStore.
+type MemoryStoreKey struct {
+ name string
+}
+
+func NewMemoryStoreKey(name string) *MemoryStoreKey {
+ return &MemoryStoreKey{
+ name: name
+}
+}
+
+// Name returns the name of the MemoryStoreKey.
+func (key *MemoryStoreKey)
+
+Name()
+
+string {
+ return key.name
+}
+
+// String returns a stringified representation of the MemoryStoreKey.
+func (key *MemoryStoreKey)
+
+String()
+
+string {
+ return fmt.Sprintf("MemoryStoreKey{%p, %s
+}", key, key.name)
+}
+
+//----------------------------------------
+
+// TraceContext contains TraceKVStore context data. It will be written with
+// every trace operation.
+type TraceContext map[string]interface{
+}
+
+// Clone clones tc into another instance of TraceContext.
+func (tc TraceContext)
+
+Clone()
+
+TraceContext {
+ ret := TraceContext{
+}
+
+maps.Copy(ret, tc)
+
+return ret
+}
+
+// Merge merges value of newTc into tc.
+func (tc TraceContext)
+
+Merge(newTc TraceContext)
+
+TraceContext {
+ if tc == nil {
+ tc = TraceContext{
+}
+
+}
+
+maps.Copy(tc, newTc)
+
+return tc
+}
+
+// MultiStorePersistentCache defines an interface which provides inter-block
+// (persistent)
+
+caching capabilities for multiple CommitKVStores based on StoreKeys.
+type MultiStorePersistentCache interface {
+ // Wrap and return the provided CommitKVStore with an inter-block (persistent)
+ // cache.
+ GetStoreCache(key StoreKey, store CommitKVStore)
+
+CommitKVStore
+
+ // Return the underlying CommitKVStore for a StoreKey.
+ Unwrap(key StoreKey)
+
+CommitKVStore
+
+ // Reset the entire set of internal caches.
+ Reset()
+}
+
+// StoreWithInitialVersion is a store that can have an arbitrary initial
+// version.
+type StoreWithInitialVersion interface {
+ // SetInitialVersion sets the initial version of the IAVL tree. It is used when
+ // starting a new chain at an arbitrary height.
+ SetInitialVersion(version int64)
+}
+
+// NewTransientStoreKeys constructs a new map of TransientStoreKey's
+// Must return pointers according to the ocap principle
+// The function will panic if there is a potential conflict in names
+// see `assertNoCommonPrefix` function for more details.
+func NewTransientStoreKeys(names ...string)
+
+map[string]*TransientStoreKey {
+ assertNoCommonPrefix(names)
+ keys := make(map[string]*TransientStoreKey)
+ for _, n := range names {
+ keys[n] = NewTransientStoreKey(n)
+}
+
+return keys
+}
+
+// NewMemoryStoreKeys constructs a new map matching store key names to their
+// respective MemoryStoreKey references.
+// The function will panic if there is a potential conflict in names (see `assertNoPrefix`
+// function for more details).
+func NewMemoryStoreKeys(names ...string)
+
+map[string]*MemoryStoreKey {
+ assertNoCommonPrefix(names)
+ keys := make(map[string]*MemoryStoreKey)
+ for _, n := range names {
+ keys[n] = NewMemoryStoreKey(n)
+}
+
+return keys
+}
+```
+
+As for concrete implementation, the \[`rootMulti.Store`] is the go-to implementation of the `CommitMultiStore` interface.
+
+```go expandable
+package rootmulti
+
+import (
+
+ "crypto/sha256"
+ "errors"
+ "fmt"
+ "io"
+ "maps"
+ "math"
+ "sort"
+ "strings"
+ "sync"
+
+ cmtproto "github.com/cometbft/cometbft/proto/tendermint/types"
+ dbm "github.com/cosmos/cosmos-db"
+ protoio "github.com/cosmos/gogoproto/io"
+ gogotypes "github.com/cosmos/gogoproto/types"
+ iavltree "github.com/cosmos/iavl"
+
+ errorsmod "cosmossdk.io/errors"
+ "cosmossdk.io/log"
+ "cosmossdk.io/store/cachemulti"
+ "cosmossdk.io/store/dbadapter"
+ "cosmossdk.io/store/iavl"
+ "cosmossdk.io/store/listenkv"
+ "cosmossdk.io/store/mem"
+ "cosmossdk.io/store/metrics"
+ "cosmossdk.io/store/pruning"
+ pruningtypes "cosmossdk.io/store/pruning/types"
+ snapshottypes "cosmossdk.io/store/snapshots/types"
+ "cosmossdk.io/store/tracekv"
+ "cosmossdk.io/store/transient"
+ "cosmossdk.io/store/types"
+)
+
+const (
+ latestVersionKey = "s/latest"
+ commitInfoKeyFmt = "s/%d" // s/
+)
+
+const iavlDisablefastNodeDefault = false
+
+// keysFromStoreKeyMap returns a slice of keys for the provided map lexically sorted by StoreKey.Name()
+
+func keysFromStoreKeyMap[V any](m map[types.StoreKey]V) []types.StoreKey {
+ keys := make([]types.StoreKey, 0, len(m))
+ for key := range m {
+ keys = append(keys, key)
+}
+
+sort.Slice(keys, func(i, j int)
+
+bool {
+ ki, kj := keys[i], keys[j]
+ return ki.Name() < kj.Name()
+})
+
+return keys
+}
+
+// Store is composed of many CommitStores. Name contrasts with
+// cacheMultiStore which is used for branching other MultiStores. It implements
+// the CommitMultiStore interface.
+type Store struct {
+ db dbm.DB
+ logger log.Logger
+ lastCommitInfo *types.CommitInfo
+ pruningManager *pruning.Manager
+ iavlCacheSize int
+ iavlDisableFastNode bool
+ // iavlSyncPruning should rarely be set to true.
+ // The Prune command will automatically set this to true.
+ // This allows the prune command to wait for the pruning to finish before returning.
+ iavlSyncPruning bool
+ storesParams map[types.StoreKey]storeParams
+ stores map[types.StoreKey]types.CommitKVStore
+ keysByName map[string]types.StoreKey
+ initialVersion int64
+ removalMap map[types.StoreKey]bool
+ traceWriter io.Writer
+ traceContext types.TraceContext
+ traceContextMutex sync.Mutex
+ interBlockCache types.MultiStorePersistentCache
+ listeners map[types.StoreKey]*types.MemoryListener
+ metrics metrics.StoreMetrics
+ commitHeader cmtproto.Header
+}
+
+var (
+ _ types.CommitMultiStore = (*Store)(nil)
+ _ types.Queryable = (*Store)(nil)
+)
+
+// NewStore returns a reference to a new Store object with the provided DB. The
+// store will be created with a PruneNothing pruning strategy by default. After
+// a store is created, KVStores must be mounted and finally LoadLatestVersion or
+// LoadVersion must be called.
+func NewStore(db dbm.DB, logger log.Logger, metricGatherer metrics.StoreMetrics) *Store {
+ return &Store{
+ db: db,
+ logger: logger,
+ iavlCacheSize: iavl.DefaultIAVLCacheSize,
+ iavlDisableFastNode: iavlDisablefastNodeDefault,
+ storesParams: make(map[types.StoreKey]storeParams),
+ stores: make(map[types.StoreKey]types.CommitKVStore),
+ keysByName: make(map[string]types.StoreKey),
+ listeners: make(map[types.StoreKey]*types.MemoryListener),
+ removalMap: make(map[types.StoreKey]bool),
+ pruningManager: pruning.NewManager(db, logger),
+ metrics: metricGatherer,
+}
+}
+
+// GetPruning fetches the pruning strategy from the root store.
+func (rs *Store)
+
+GetPruning()
+
+pruningtypes.PruningOptions {
+ return rs.pruningManager.GetOptions()
+}
+
+// SetPruning sets the pruning strategy on the root store and all the sub-stores.
+// Note, calling SetPruning on the root store prior to LoadVersion or
+// LoadLatestVersion performs a no-op as the stores aren't mounted yet.
+func (rs *Store)
+
+SetPruning(pruningOpts pruningtypes.PruningOptions) {
+ rs.pruningManager.SetOptions(pruningOpts)
+}
+
+// SetMetrics sets the metrics gatherer for the store package
+func (rs *Store)
+
+SetMetrics(metrics metrics.StoreMetrics) {
+ rs.metrics = metrics
+}
+
+// SetSnapshotInterval sets the interval at which the snapshots are taken.
+// It is used by the store to determine which heights to retain until after the snapshot is complete.
+func (rs *Store)
+
+SetSnapshotInterval(snapshotInterval uint64) {
+ rs.pruningManager.SetSnapshotInterval(snapshotInterval)
+}
+
+func (rs *Store)
+
+SetIAVLCacheSize(cacheSize int) {
+ rs.iavlCacheSize = cacheSize
+}
+
+func (rs *Store)
+
+SetIAVLDisableFastNode(disableFastNode bool) {
+ rs.iavlDisableFastNode = disableFastNode
+}
+
+func (rs *Store)
+
+SetIAVLSyncPruning(syncPruning bool) {
+ rs.iavlSyncPruning = syncPruning
+}
+
+// GetStoreType implements Store.
+func (rs *Store)
+
+GetStoreType()
+
+types.StoreType {
+ return types.StoreTypeMulti
+}
+
+// MountStoreWithDB implements CommitMultiStore.
+func (rs *Store)
+
+MountStoreWithDB(key types.StoreKey, typ types.StoreType, db dbm.DB) {
+ if key == nil {
+ panic("MountIAVLStore()
+
+key cannot be nil")
+}
+ if _, ok := rs.storesParams[key]; ok {
+ panic(fmt.Sprintf("store duplicate store key %v", key))
+}
+ if _, ok := rs.keysByName[key.Name()]; ok {
+ panic(fmt.Sprintf("store duplicate store key name %v", key))
+}
+
+rs.storesParams[key] = newStoreParams(key, db, typ, 0)
+
+rs.keysByName[key.Name()] = key
+}
+
+// GetCommitStore returns a mounted CommitStore for a given StoreKey. If the
+// store is wrapped in an inter-block cache, it will be unwrapped before returning.
+func (rs *Store)
+
+GetCommitStore(key types.StoreKey)
+
+types.CommitStore {
+ return rs.GetCommitKVStore(key)
+}
+
+// GetCommitKVStore returns a mounted CommitKVStore for a given StoreKey. If the
+// store is wrapped in an inter-block cache, it will be unwrapped before returning.
+func (rs *Store)
+
+GetCommitKVStore(key types.StoreKey)
+
+types.CommitKVStore {
+ // If the Store has an inter-block cache, first attempt to lookup and unwrap
+ // the underlying CommitKVStore by StoreKey. If it does not exist, fallback to
+ // the main mapping of CommitKVStores.
+ if rs.interBlockCache != nil {
+ if store := rs.interBlockCache.Unwrap(key); store != nil {
+ return store
+}
+
+}
+
+return rs.stores[key]
+}
+
+// StoreKeysByName returns mapping storeNames -> StoreKeys
+func (rs *Store)
+
+StoreKeysByName()
+
+map[string]types.StoreKey {
+ return rs.keysByName
+}
+
+// LoadLatestVersionAndUpgrade implements CommitMultiStore
+func (rs *Store)
+
+LoadLatestVersionAndUpgrade(upgrades *types.StoreUpgrades)
+
+error {
+ ver := GetLatestVersion(rs.db)
+
+return rs.loadVersion(ver, upgrades)
+}
+
+// LoadVersionAndUpgrade allows us to rename substores while loading an older version
+func (rs *Store)
+
+LoadVersionAndUpgrade(ver int64, upgrades *types.StoreUpgrades)
+
+error {
+ return rs.loadVersion(ver, upgrades)
+}
+
+// LoadLatestVersion implements CommitMultiStore.
+func (rs *Store)
+
+LoadLatestVersion()
+
+error {
+ ver := GetLatestVersion(rs.db)
+
+return rs.loadVersion(ver, nil)
+}
+
+// LoadVersion implements CommitMultiStore.
+func (rs *Store)
+
+LoadVersion(ver int64)
+
+error {
+ return rs.loadVersion(ver, nil)
+}
+
+func (rs *Store)
+
+loadVersion(ver int64, upgrades *types.StoreUpgrades)
+
+error {
+ infos := make(map[string]types.StoreInfo)
+
+rs.logger.Debug("loadVersion", "ver", ver)
+ cInfo := &types.CommitInfo{
+}
+
+ // load old data if we are not version 0
+ if ver != 0 {
+ var err error
+ cInfo, err = rs.GetCommitInfo(ver)
+ if err != nil {
+ return err
+}
+
+ // convert StoreInfos slice to map
+ for _, storeInfo := range cInfo.StoreInfos {
+ infos[storeInfo.Name] = storeInfo
+}
+
+}
+
+ // load each Store (note this doesn't panic on unmounted keys now)
+ newStores := make(map[types.StoreKey]types.CommitKVStore)
+ storesKeys := make([]types.StoreKey, 0, len(rs.storesParams))
+ for key := range rs.storesParams {
+ storesKeys = append(storesKeys, key)
+}
+ if upgrades != nil {
+ // deterministic iteration order for upgrades
+ // (as the underlying store may change and
+ // upgrades make store changes where the execution order may matter)
+
+sort.Slice(storesKeys, func(i, j int)
+
+bool {
+ return storesKeys[i].Name() < storesKeys[j].Name()
+})
+}
+ for _, key := range storesKeys {
+ storeParams := rs.storesParams[key]
+ commitID := rs.getCommitID(infos, key.Name())
+
+rs.logger.Debug("loadVersion commitID", "key", key, "ver", ver, "hash", fmt.Sprintf("%x", commitID.Hash))
+
+ // If it has been added, set the initial version
+ if upgrades.IsAdded(key.Name()) || upgrades.RenamedFrom(key.Name()) != "" {
+ storeParams.initialVersion = uint64(ver) + 1
+}
+
+else if commitID.Version != ver && storeParams.typ == types.StoreTypeIAVL {
+ return fmt.Errorf("version of store %s mismatch root store's version; expected %d got %d; new stores should be added using StoreUpgrades", key.Name(), ver, commitID.Version)
+}
+
+store, err := rs.loadCommitStoreFromParams(key, commitID, storeParams)
+ if err != nil {
+ return errorsmod.Wrap(err, "failed to load store")
+}
+
+newStores[key] = store
+
+ // If it was deleted, remove all data
+ if upgrades.IsDeleted(key.Name()) {
+ if err := deleteKVStore(store.(types.KVStore)); err != nil {
+ return errorsmod.Wrapf(err, "failed to delete store %s", key.Name())
+}
+
+rs.removalMap[key] = true
+}
+
+else if oldName := upgrades.RenamedFrom(key.Name()); oldName != "" {
+ // handle renames specially
+ // make an unregistered key to satisfy loadCommitStore params
+ oldKey := types.NewKVStoreKey(oldName)
+ oldParams := newStoreParams(oldKey, storeParams.db, storeParams.typ, 0)
+
+ // load from the old name
+ oldStore, err := rs.loadCommitStoreFromParams(oldKey, rs.getCommitID(infos, oldName), oldParams)
+ if err != nil {
+ return errorsmod.Wrapf(err, "failed to load old store %s", oldName)
+}
+
+ // move all data
+ if err := moveKVStoreData(oldStore.(types.KVStore), store.(types.KVStore)); err != nil {
+ return errorsmod.Wrapf(err, "failed to move store %s -> %s", oldName, key.Name())
+}
+
+ // add the old key so its deletion is committed
+ newStores[oldKey] = oldStore
+ // this will ensure it's not perpetually stored in commitInfo
+ rs.removalMap[oldKey] = true
+}
+
+}
+
+rs.lastCommitInfo = cInfo
+ rs.stores = newStores
+
+ // load any snapshot heights we missed from disk to be pruned on the next run
+ if err := rs.pruningManager.LoadSnapshotHeights(rs.db); err != nil {
+ return err
+}
+
+return nil
+}
+
+func (rs *Store)
+
+getCommitID(infos map[string]types.StoreInfo, name string)
+
+types.CommitID {
+ info, ok := infos[name]
+ if !ok {
+ return types.CommitID{
+}
+
+}
+
+return info.CommitId
+}
+
+func deleteKVStore(kv types.KVStore)
+
+error {
+ // Note that we cannot write while iterating, so load all keys here, delete below
+ var keys [][]byte
+ itr := kv.Iterator(nil, nil)
+ for itr.Valid() {
+ keys = append(keys, itr.Key())
+
+itr.Next()
+}
+ if err := itr.Close(); err != nil {
+ return err
+}
+ for _, k := range keys {
+ kv.Delete(k)
+}
+
+return nil
+}
+
+// we simulate move by a copy and delete
+func moveKVStoreData(oldDB, newDB types.KVStore)
+
+error {
+ // we read from one and write to another
+ itr := oldDB.Iterator(nil, nil)
+ for itr.Valid() {
+ newDB.Set(itr.Key(), itr.Value())
+
+itr.Next()
+}
+ if err := itr.Close(); err != nil {
+ return err
+}
+
+ // then delete the old store
+ return deleteKVStore(oldDB)
+}
+
+// PruneSnapshotHeight prunes the given height according to the prune strategy.
+// If the strategy is PruneNothing, this is a no-op.
+// For other strategies, this height is persisted until the snapshot is operated.
+func (rs *Store)
+
+PruneSnapshotHeight(height int64) {
+ rs.pruningManager.HandleSnapshotHeight(height)
+}
+
+// SetInterBlockCache sets the Store's internal inter-block (persistent)
+
+cache.
+// When this is defined, all CommitKVStores will be wrapped with their respective
+// inter-block cache.
+func (rs *Store)
+
+SetInterBlockCache(c types.MultiStorePersistentCache) {
+ rs.interBlockCache = c
+}
+
+// SetTracer sets the tracer for the MultiStore that the underlying
+// stores will utilize to trace operations. A MultiStore is returned.
+func (rs *Store)
+
+SetTracer(w io.Writer)
+
+types.MultiStore {
+ rs.traceWriter = w
+ return rs
+}
+
+// SetTracingContext updates the tracing context for the MultiStore by merging
+// the given context with the existing context by key. Any existing keys will
+// be overwritten. It is implied that the caller should update the context when
+// necessary between tracing operations. It returns a modified MultiStore.
+func (rs *Store)
+
+SetTracingContext(tc types.TraceContext)
+
+types.MultiStore {
+ rs.traceContextMutex.Lock()
+
+defer rs.traceContextMutex.Unlock()
+
+rs.traceContext = rs.traceContext.Merge(tc)
+
+return rs
+}
+
+func (rs *Store)
+
+getTracingContext()
+
+types.TraceContext {
+ rs.traceContextMutex.Lock()
+
+defer rs.traceContextMutex.Unlock()
+ if rs.traceContext == nil {
+ return nil
+}
+ ctx := types.TraceContext{
+}
+
+maps.Copy(ctx, rs.traceContext)
+
+return ctx
+}
+
+// TracingEnabled returns if tracing is enabled for the MultiStore.
+func (rs *Store)
+
+TracingEnabled()
+
+bool {
+ return rs.traceWriter != nil
+}
+
+// AddListeners adds a listener for the KVStore belonging to the provided StoreKey
+func (rs *Store)
+
+AddListeners(keys []types.StoreKey) {
+ for i := range keys {
+ listener := rs.listeners[keys[i]]
+ if listener == nil {
+ rs.listeners[keys[i]] = types.NewMemoryListener()
+}
+
+}
+}
+
+// ListeningEnabled returns if listening is enabled for a specific KVStore
+func (rs *Store)
+
+ListeningEnabled(key types.StoreKey)
+
+bool {
+ if ls, ok := rs.listeners[key]; ok {
+ return ls != nil
+}
+
+return false
+}
+
+// PopStateCache returns the accumulated state change messages from the CommitMultiStore
+// Calling PopStateCache destroys only the currently accumulated state in each listener
+// not the state in the store itself. This is a mutating and destructive operation.
+// This method has been synchronized.
+func (rs *Store)
+
+PopStateCache() []*types.StoreKVPair {
+ var cache []*types.StoreKVPair
+ for key := range rs.listeners {
+ ls := rs.listeners[key]
+ if ls != nil {
+ cache = append(cache, ls.PopStateCache()...)
+}
+
+}
+
+sort.SliceStable(cache, func(i, j int)
+
+bool {
+ return cache[i].StoreKey < cache[j].StoreKey
+})
+
+return cache
+}
+
+// LatestVersion returns the latest version in the store
+func (rs *Store)
+
+LatestVersion()
+
+int64 {
+ return rs.LastCommitID().Version
+}
+
+// LastCommitID implements Committer/CommitStore.
+func (rs *Store)
+
+LastCommitID()
+
+types.CommitID {
+ if rs.lastCommitInfo == nil {
+ emptyHash := sha256.Sum256([]byte{
+})
+ appHash := emptyHash[:]
+ return types.CommitID{
+ Version: GetLatestVersion(rs.db),
+ Hash: appHash, // set empty apphash to sha256([]byte{
+})
+ if info is nil
+}
+
+}
+ if len(rs.lastCommitInfo.CommitID().Hash) == 0 {
+ emptyHash := sha256.Sum256([]byte{
+})
+ appHash := emptyHash[:]
+ return types.CommitID{
+ Version: rs.lastCommitInfo.Version,
+ Hash: appHash, // set empty apphash to sha256([]byte{
+})
+ if hash is nil
+}
+
+}
+
+return rs.lastCommitInfo.CommitID()
+}
+
+// Commit implements Committer/CommitStore.
+func (rs *Store)
+
+Commit()
+
+types.CommitID {
+ var previousHeight, version int64
+ if rs.lastCommitInfo.GetVersion() == 0 && rs.initialVersion > 1 {
+ // This case means that no commit has been made in the store, we
+ // start from initialVersion.
+ version = rs.initialVersion
+}
+
+else {
+ // This case can means two things:
+ // - either there was already a previous commit in the store, in which
+ // case we increment the version from there,
+ // - or there was no previous commit, and initial version was not set,
+ // in which case we start at version 1.
+ previousHeight = rs.lastCommitInfo.GetVersion()
+
+version = previousHeight + 1
+}
+ if rs.commitHeader.Height != version {
+ rs.logger.Debug("commit header and version mismatch", "header_height", rs.commitHeader.Height, "version", version)
+}
+
+rs.lastCommitInfo = commitStores(version, rs.stores, rs.removalMap)
+
+rs.lastCommitInfo.Timestamp = rs.commitHeader.Time
+ defer rs.flushMetadata(rs.db, version, rs.lastCommitInfo)
+
+ // remove remnants of removed stores
+ for sk := range rs.removalMap {
+ if _, ok := rs.stores[sk]; ok {
+ delete(rs.stores, sk)
+
+delete(rs.storesParams, sk)
+
+delete(rs.keysByName, sk.Name())
+}
+
+}
+
+ // reset the removalMap
+ rs.removalMap = make(map[types.StoreKey]bool)
+ if err := rs.handlePruning(version); err != nil {
+ rs.logger.Error(
+ "failed to prune store, please check your pruning configuration",
+ "err", err,
+ )
+}
+
+return types.CommitID{
+ Version: version,
+ Hash: rs.lastCommitInfo.Hash(),
+}
+}
+
+// WorkingHash returns the current hash of the store.
+// it will be used to get the current app hash before commit.
+func (rs *Store)
+
+WorkingHash() []byte {
+ storeInfos := make([]types.StoreInfo, 0, len(rs.stores))
+ storeKeys := keysFromStoreKeyMap(rs.stores)
+ for _, key := range storeKeys {
+ store := rs.stores[key]
+ if store.GetStoreType() != types.StoreTypeIAVL {
+ continue
+}
+ if !rs.removalMap[key] {
+ si := types.StoreInfo{
+ Name: key.Name(),
+ CommitId: types.CommitID{
+ Hash: store.WorkingHash(),
+},
+}
+
+storeInfos = append(storeInfos, si)
+}
+
+}
+
+sort.SliceStable(storeInfos, func(i, j int)
+
+bool {
+ return storeInfos[i].Name < storeInfos[j].Name
+})
+
+return types.CommitInfo{
+ StoreInfos: storeInfos
+}.Hash()
+}
+
+// CacheWrap implements CacheWrapper/Store/CommitStore.
+func (rs *Store)
+
+CacheWrap()
+
+types.CacheWrap {
+ return rs.CacheMultiStore().(types.CacheWrap)
+}
+
+// CacheWrapWithTrace implements the CacheWrapper interface.
+func (rs *Store)
+
+CacheWrapWithTrace(_ io.Writer, _ types.TraceContext)
+
+types.CacheWrap {
+ return rs.CacheWrap()
+}
+
+// CacheMultiStore creates ephemeral branch of the multi-store and returns a CacheMultiStore.
+// It implements the MultiStore interface.
+func (rs *Store)
+
+CacheMultiStore()
+
+types.CacheMultiStore {
+ stores := make(map[types.StoreKey]types.CacheWrapper)
+ for k, v := range rs.stores {
+ store := types.KVStore(v)
+ // Wire the listenkv.Store to allow listeners to observe the writes from the cache store,
+ // set same listeners on cache store will observe duplicated writes.
+ if rs.ListeningEnabled(k) {
+ store = listenkv.NewStore(store, k, rs.listeners[k])
+}
+
+stores[k] = store
+}
+
+return cachemulti.NewStore(rs.db, stores, rs.keysByName, rs.traceWriter, rs.getTracingContext())
+}
+
+// CacheMultiStoreWithVersion is analogous to CacheMultiStore except that it
+// attempts to load stores at a given version (height). An error is returned if
+// any store cannot be loaded. This should only be used for querying and
+// iterating at past heights.
+func (rs *Store)
+
+CacheMultiStoreWithVersion(version int64) (types.CacheMultiStore, error) {
+ cachedStores := make(map[types.StoreKey]types.CacheWrapper)
+
+var commitInfo *types.CommitInfo
+ storeInfos := map[string]bool{
+}
+ for key, store := range rs.stores {
+ var cacheStore types.KVStore
+ switch store.GetStoreType() {
+ case types.StoreTypeIAVL:
+ // If the store is wrapped with an inter-block cache, we must first unwrap
+ // it to get the underlying IAVL store.
+ store = rs.GetCommitKVStore(key)
+
+ // Attempt to lazy-load an already saved IAVL store version. If the
+ // version does not exist or is pruned, an error should be returned.
+ var err error
+ cacheStore, err = store.(*iavl.Store).GetImmutable(version)
+ // if we got error from loading a module store
+ // we fetch commit info of this version
+ // we use commit info to check if the store existed at this version or not
+ if err != nil {
+ if commitInfo == nil {
+ var errCommitInfo error
+ commitInfo, errCommitInfo = rs.GetCommitInfo(version)
+ if errCommitInfo != nil {
+ return nil, errCommitInfo
+}
+ for _, storeInfo := range commitInfo.StoreInfos {
+ storeInfos[storeInfo.Name] = true
+}
+
+}
+
+ // If the store existed at this version, it means there's actually an error
+ // getting the root store at this version.
+ if storeInfos[key.Name()] {
+ return nil, err
+}
+
+}
+
+default:
+ cacheStore = store
+}
+
+ // Wire the listenkv.Store to allow listeners to observe the writes from the cache store,
+ // set same listeners on cache store will observe duplicated writes.
+ if rs.ListeningEnabled(key) {
+ cacheStore = listenkv.NewStore(cacheStore, key, rs.listeners[key])
+}
+
+cachedStores[key] = cacheStore
+}
+
+return cachemulti.NewStore(rs.db, cachedStores, rs.keysByName, rs.traceWriter, rs.getTracingContext()), nil
+}
+
+// GetStore returns a mounted Store for a given StoreKey. If the StoreKey does
+// not exist, it will panic. If the Store is wrapped in an inter-block cache, it
+// will be unwrapped prior to being returned.
+//
+// TODO: This isn't used directly upstream. Consider returning the Store as-is
+// instead of unwrapping.
+func (rs *Store)
+
+GetStore(key types.StoreKey)
+
+types.Store {
+ store := rs.GetCommitKVStore(key)
+ if store == nil {
+ panic(fmt.Sprintf("store does not exist for key: %s", key.Name()))
+}
+
+return store
+}
+
+// GetKVStore returns a mounted KVStore for a given StoreKey. If tracing is
+// enabled on the KVStore, a wrapped TraceKVStore will be returned with the root
+// store's tracer, otherwise, the original KVStore will be returned.
+//
+// NOTE: The returned KVStore may be wrapped in an inter-block cache if it is
+// set on the root store.
+func (rs *Store)
+
+GetKVStore(key types.StoreKey)
+
+types.KVStore {
+ s := rs.stores[key]
+ if s == nil {
+ panic(fmt.Sprintf("store does not exist for key: %s", key.Name()))
+}
+ store := types.KVStore(s)
+ if rs.TracingEnabled() {
+ store = tracekv.NewStore(store, rs.traceWriter, rs.getTracingContext())
+}
+ if rs.ListeningEnabled(key) {
+ store = listenkv.NewStore(store, key, rs.listeners[key])
+}
+
+return store
+}
+
+func (rs *Store)
+
+handlePruning(version int64)
+
+error {
+ pruneHeight := rs.pruningManager.GetPruningHeight(version)
+
+rs.logger.Debug("prune start", "height", version)
+
+defer rs.logger.Debug("prune end", "height", version)
+
+return rs.PruneStores(pruneHeight)
+}
+
+// PruneStores prunes all history upto the specific height of the multi store.
+func (rs *Store)
+
+PruneStores(pruningHeight int64) (err error) {
+ if pruningHeight <= 0 {
+ rs.logger.Debug("pruning skipped, height is less than or equal to 0")
+
+return nil
+}
+
+rs.logger.Debug("pruning store", "heights", pruningHeight)
+ for key, store := range rs.stores {
+ rs.logger.Debug("pruning store", "key", key) // Also log store.name (a private variable)?
+
+ // If the store is wrapped with an inter-block cache, we must first unwrap
+ // it to get the underlying IAVL store.
+ if store.GetStoreType() != types.StoreTypeIAVL {
+ continue
+}
+
+store = rs.GetCommitKVStore(key)
+ err := store.(*iavl.Store).DeleteVersionsTo(pruningHeight)
+ if err == nil {
+ continue
+}
+ if errors.Is(err, iavltree.ErrVersionDoesNotExist) {
+ return err
+}
+
+rs.logger.Error("failed to prune store", "key", key, "err", err)
+}
+
+return nil
+}
+
+// getStoreByName performs a lookup of a StoreKey given a store name typically
+// provided in a path. The StoreKey is then used to perform a lookup and return
+// a Store. If the Store is wrapped in an inter-block cache, it will be unwrapped
+// prior to being returned. If the StoreKey does not exist, nil is returned.
+func (rs *Store)
+
+GetStoreByName(name string)
+
+types.Store {
+ key := rs.keysByName[name]
+ if key == nil {
+ return nil
+}
+
+return rs.GetCommitKVStore(key)
+}
+
+// Query calls substore.Query with the same `req` where `req.Path` is
+// modified to remove the substore prefix.
+// Ie. `req.Path` here is `//`, and trimmed to `/` for the substore.
+// TODO: add proof for `multistore -> substore`.
+func (rs *Store)
+
+Query(req *types.RequestQuery) (*types.ResponseQuery, error) {
+ path := req.Path
+ storeName, subpath, err := parsePath(path)
+ if err != nil {
+ return &types.ResponseQuery{
+}, err
+}
+ store := rs.GetStoreByName(storeName)
+ if store == nil {
+ return &types.ResponseQuery{
+}, errorsmod.Wrapf(types.ErrUnknownRequest, "no such store: %s", storeName)
+}
+
+queryable, ok := store.(types.Queryable)
+ if !ok {
+ return &types.ResponseQuery{
+}, errorsmod.Wrapf(types.ErrUnknownRequest, "store %s (type %T)
+
+doesn't support queries", storeName, store)
+}
+
+ // trim the path and make the query
+ req.Path = subpath
+ res, err := queryable.Query(req)
+ if !req.Prove || !RequireProof(subpath) {
+ return res, err
+}
+ if res.ProofOps == nil || len(res.ProofOps.Ops) == 0 {
+ return &types.ResponseQuery{
+}, errorsmod.Wrap(types.ErrInvalidRequest, "proof is unexpectedly empty; ensure height has not been pruned")
+}
+
+ // If the request's height is the latest height we've committed, then utilize
+ // the store's lastCommitInfo as this commit info may not be flushed to disk.
+ // Otherwise, we query for the commit info from disk.
+ var commitInfo *types.CommitInfo
+ if res.Height == rs.lastCommitInfo.Version {
+ commitInfo = rs.lastCommitInfo
+}
+
+else {
+ commitInfo, err = rs.GetCommitInfo(res.Height)
+ if err != nil {
+ return &types.ResponseQuery{
+}, err
+}
+
+}
+
+ // Restore origin path and append proof op.
+ res.ProofOps.Ops = append(res.ProofOps.Ops, commitInfo.ProofOp(storeName))
+
+return res, nil
+}
+
+// SetInitialVersion sets the initial version of the IAVL tree. It is used when
+// starting a new chain at an arbitrary height.
+func (rs *Store)
+
+SetInitialVersion(version int64)
+
+error {
+ rs.initialVersion = version
+
+ // Loop through all the stores, if it's an IAVL store, then set initial
+ // version on it.
+ for key, store := range rs.stores {
+ if store.GetStoreType() == types.StoreTypeIAVL {
+ // If the store is wrapped with an inter-block cache, we must first unwrap
+ // it to get the underlying IAVL store.
+ store = rs.GetCommitKVStore(key)
+
+store.(types.StoreWithInitialVersion).SetInitialVersion(version)
+}
+
+}
+
+return nil
+}
+
+// parsePath expects a format like /[/]
+// Must start with /, subpath may be empty
+// Returns error if it doesn't start with /
+func parsePath(path string) (storeName, subpath string, err error) {
+ if !strings.HasPrefix(path, "/") {
+ return storeName, subpath, errorsmod.Wrapf(types.ErrUnknownRequest, "invalid path: %s", path)
+}
+
+storeName, subpath, found := strings.Cut(path[1:], "/")
+ if !found {
+ return storeName, subpath, nil
+}
+
+return storeName, "/" + subpath, nil
+}
+
+//---------------------- Snapshotting ------------------
+
+// Snapshot implements snapshottypes.Snapshotter. The snapshot output for a given format must be
+// identical across nodes such that chunks from different sources fit together. If the output for a
+// given format changes (at the byte level), the snapshot format must be bumped - see
+// TestMultistoreSnapshot_Checksum test.
+func (rs *Store)
+
+Snapshot(height uint64, protoWriter protoio.Writer)
+
+error {
+ if height == 0 {
+ return errorsmod.Wrap(types.ErrLogic, "cannot snapshot height 0")
+}
+ if height > uint64(GetLatestVersion(rs.db)) {
+ return errorsmod.Wrapf(types.ErrLogic, "cannot snapshot future height %v", height)
+}
+
+ // Collect stores to snapshot (only IAVL stores are supported)
+
+type namedStore struct {
+ *iavl.Store
+ name string
+}
+ stores := []namedStore{
+}
+ keys := keysFromStoreKeyMap(rs.stores)
+ for _, key := range keys {
+ switch store := rs.GetCommitKVStore(key).(type) {
+ case *iavl.Store:
+ stores = append(stores, namedStore{
+ name: key.Name(),
+ Store: store
+})
+ case *transient.Store, *mem.Store:
+ // Non-persisted stores shouldn't be snapshotted
+ continue
+ default:
+ return errorsmod.Wrapf(types.ErrLogic,
+ "don't know how to snapshot store %q of type %T", key.Name(), store)
+}
+
+}
+
+sort.Slice(stores, func(i, j int)
+
+bool {
+ return strings.Compare(stores[i].name, stores[j].name) == -1
+})
+
+ // Export each IAVL store. Stores are serialized as a stream of SnapshotItem Protobuf
+ // messages. The first item contains a SnapshotStore with store metadata (i.e. name),
+ // and the following messages contain a SnapshotNode (i.e. an ExportNode). Store changes
+ // are demarcated by new SnapshotStore items.
+ for _, store := range stores {
+ rs.logger.Debug("starting snapshot", "store", store.name, "height", height)
+
+exporter, err := store.Export(int64(height))
+ if err != nil {
+ rs.logger.Error("snapshot failed; exporter error", "store", store.name, "err", err)
+
+return err
+}
+
+err = func()
+
+error {
+ defer exporter.Close()
+ err := protoWriter.WriteMsg(&snapshottypes.SnapshotItem{
+ Item: &snapshottypes.SnapshotItem_Store{
+ Store: &snapshottypes.SnapshotStoreItem{
+ Name: store.name,
+},
+},
+})
+ if err != nil {
+ rs.logger.Error("snapshot failed; item store write failed", "store", store.name, "err", err)
+
+return err
+}
+ nodeCount := 0
+ for {
+ node, err := exporter.Next()
+ if errors.Is(err, iavltree.ErrorExportDone) {
+ rs.logger.Debug("snapshot Done", "store", store.name, "nodeCount", nodeCount)
+
+break
+}
+
+else if err != nil {
+ return err
+}
+
+err = protoWriter.WriteMsg(&snapshottypes.SnapshotItem{
+ Item: &snapshottypes.SnapshotItem_IAVL{
+ IAVL: &snapshottypes.SnapshotIAVLItem{
+ Key: node.Key,
+ Value: node.Value,
+ Height: int32(node.Height),
+ Version: node.Version,
+},
+},
+})
+ if err != nil {
+ return err
+}
+
+nodeCount++
+}
+
+return nil
+}()
+ if err != nil {
+ return err
+}
+
+}
+
+return nil
+}
+
+// Restore implements snapshottypes.Snapshotter.
+// returns next snapshot item and error.
+func (rs *Store)
+
+Restore(
+ height uint64, format uint32, protoReader protoio.Reader,
+) (snapshottypes.SnapshotItem, error) {
+ // Import nodes into stores. The first item is expected to be a SnapshotItem containing
+ // a SnapshotStoreItem, telling us which store to import into. The following items will contain
+ // SnapshotNodeItem (i.e. ExportNode)
+
+until we reach the next SnapshotStoreItem or EOF.
+ var importer *iavltree.Importer
+ var snapshotItem snapshottypes.SnapshotItem
+loop:
+ for {
+ snapshotItem = snapshottypes.SnapshotItem{
+}
+ err := protoReader.ReadMsg(&snapshotItem)
+ if errors.Is(err, io.EOF) {
+ break
+}
+
+else if err != nil {
+ return snapshottypes.SnapshotItem{
+}, errorsmod.Wrap(err, "invalid protobuf message")
+}
+ switch item := snapshotItem.Item.(type) {
+ case *snapshottypes.SnapshotItem_Store:
+ if importer != nil {
+ err = importer.Commit()
+ if err != nil {
+ return snapshottypes.SnapshotItem{
+}, errorsmod.Wrap(err, "IAVL commit failed")
+}
+
+importer.Close()
+}
+
+store, ok := rs.GetStoreByName(item.Store.Name).(*iavl.Store)
+ if !ok || store == nil {
+ return snapshottypes.SnapshotItem{
+}, errorsmod.Wrapf(types.ErrLogic, "cannot import into non-IAVL store %q", item.Store.Name)
+}
+
+importer, err = store.Import(int64(height))
+ if err != nil {
+ return snapshottypes.SnapshotItem{
+}, errorsmod.Wrap(err, "import failed")
+}
+
+defer importer.Close()
+ // Importer height must reflect the node height (which usually matches the block height, but not always)
+
+rs.logger.Debug("restoring snapshot", "store", item.Store.Name)
+ case *snapshottypes.SnapshotItem_IAVL:
+ if importer == nil {
+ rs.logger.Error("failed to restore; received IAVL node item before store item")
+
+return snapshottypes.SnapshotItem{
+}, errorsmod.Wrap(types.ErrLogic, "received IAVL node item before store item")
+}
+ if item.IAVL.Height > math.MaxInt8 {
+ return snapshottypes.SnapshotItem{
+}, errorsmod.Wrapf(types.ErrLogic, "node height %v cannot exceed %v",
+ item.IAVL.Height, math.MaxInt8)
+}
+ node := &iavltree.ExportNode{
+ Key: item.IAVL.Key,
+ Value: item.IAVL.Value,
+ Height: int8(item.IAVL.Height),
+ Version: item.IAVL.Version,
+}
+ // Protobuf does not differentiate between []byte{
+}
+
+as nil, but fortunately IAVL does
+ // not allow nil keys nor nil values for leaf nodes, so we can always set them to empty.
+ if node.Key == nil {
+ node.Key = []byte{
+}
+
+}
+ if node.Height == 0 && node.Value == nil {
+ node.Value = []byte{
+}
+
+}
+ err := importer.Add(node)
+ if err != nil {
+ return snapshottypes.SnapshotItem{
+}, errorsmod.Wrap(err, "IAVL node import failed")
+}
+
+default:
+ break loop
+}
+
+}
+ if importer != nil {
+ err := importer.Commit()
+ if err != nil {
+ return snapshottypes.SnapshotItem{
+}, errorsmod.Wrap(err, "IAVL commit failed")
+}
+
+importer.Close()
+}
+
+rs.flushMetadata(rs.db, int64(height), rs.buildCommitInfo(int64(height)))
+
+return snapshotItem, rs.LoadLatestVersion()
+}
+
+func (rs *Store)
+
+loadCommitStoreFromParams(key types.StoreKey, id types.CommitID, params storeParams) (types.CommitKVStore, error) {
+ var db dbm.DB
+ if params.db != nil {
+ db = dbm.NewPrefixDB(params.db, []byte("s/_/"))
+}
+
+else {
+ prefix := "s/k:" + params.key.Name() + "/"
+ db = dbm.NewPrefixDB(rs.db, []byte(prefix))
+}
+ switch params.typ {
+ case types.StoreTypeMulti:
+ panic("recursive MultiStores not yet supported")
+ case types.StoreTypeIAVL:
+ store, err := iavl.LoadStoreWithOpts(db, rs.logger, key, id, params.initialVersion, rs.iavlCacheSize, rs.iavlDisableFastNode, rs.metrics, iavltree.AsyncPruningOption(!rs.iavlSyncPruning))
+ if err != nil {
+ return nil, err
+}
+ if rs.interBlockCache != nil {
+ // Wrap and get a CommitKVStore with inter-block caching. Note, this should
+ // only wrap the primary CommitKVStore, not any store that is already
+ // branched as that will create unexpected behavior.
+ store = rs.interBlockCache.GetStoreCache(key, store)
+}
+
+return store, err
+ case types.StoreTypeDB:
+ return commitDBStoreAdapter{
+ Store: dbadapter.Store{
+ DB: db
+}}, nil
+ case types.StoreTypeTransient:
+ _, ok := key.(*types.TransientStoreKey)
+ if !ok {
+ return nil, fmt.Errorf("invalid StoreKey for StoreTypeTransient: %s", key.String())
+}
+
+return transient.NewStore(), nil
+ case types.StoreTypeMemory:
+ if _, ok := key.(*types.MemoryStoreKey); !ok {
+ return nil, fmt.Errorf("unexpected key type for a MemoryStoreKey; got: %s", key.String())
+}
+
+return mem.NewStore(), nil
+
+ default:
+ panic(fmt.Sprintf("unrecognized store type %v", params.typ))
+}
+}
+
+func (rs *Store)
+
+buildCommitInfo(version int64) *types.CommitInfo {
+ keys := keysFromStoreKeyMap(rs.stores)
+ storeInfos := []types.StoreInfo{
+}
+ for _, key := range keys {
+ store := rs.stores[key]
+ storeType := store.GetStoreType()
+ if storeType == types.StoreTypeTransient || storeType == types.StoreTypeMemory {
+ continue
+}
+
+storeInfos = append(storeInfos, types.StoreInfo{
+ Name: key.Name(),
+ CommitId: store.LastCommitID(),
+})
+}
+
+return &types.CommitInfo{
+ Version: version,
+ StoreInfos: storeInfos,
+}
+}
+
+// RollbackToVersion delete the versions after `target` and update the latest version.
+func (rs *Store)
+
+RollbackToVersion(target int64)
+
+error {
+ if target <= 0 {
+ return fmt.Errorf("invalid rollback height target: %d", target)
+}
+ for key, store := range rs.stores {
+ if store.GetStoreType() == types.StoreTypeIAVL {
+ // If the store is wrapped with an inter-block cache, we must first unwrap
+ // it to get the underlying IAVL store.
+ store = rs.GetCommitKVStore(key)
+ err := store.(*iavl.Store).LoadVersionForOverwriting(target)
+ if err != nil {
+ return err
+}
+
+}
+
+}
+
+rs.flushMetadata(rs.db, target, rs.buildCommitInfo(target))
+
+return rs.LoadLatestVersion()
+}
+
+// SetCommitHeader sets the commit block header of the store.
+func (rs *Store)
+
+SetCommitHeader(h cmtproto.Header) {
+ rs.commitHeader = h
+}
+
+// GetCommitInfo attempts to retrieve CommitInfo for a given version/height. It
+// will return an error if no CommitInfo exists, we fail to unmarshal the record
+// or if we cannot retrieve the object from the DB.
+func (rs *Store)
+
+GetCommitInfo(ver int64) (*types.CommitInfo, error) {
+ cInfoKey := fmt.Sprintf(commitInfoKeyFmt, ver)
+
+bz, err := rs.db.Get([]byte(cInfoKey))
+ if err != nil {
+ return nil, errorsmod.Wrap(err, "failed to get commit info")
+}
+
+else if bz == nil {
+ return nil, errors.New("no commit info found")
+}
+ cInfo := &types.CommitInfo{
+}
+ if err = cInfo.Unmarshal(bz); err != nil {
+ return nil, errorsmod.Wrap(err, "failed unmarshal commit info")
+}
+
+return cInfo, nil
+}
+
+func (rs *Store)
+
+flushMetadata(db dbm.DB, version int64, cInfo *types.CommitInfo) {
+ rs.logger.Debug("flushing metadata", "height", version)
+ batch := db.NewBatch()
+
+defer func() {
+ _ = batch.Close()
+}()
+ if cInfo != nil {
+ flushCommitInfo(batch, version, cInfo)
+}
+
+else {
+ rs.logger.Debug("commitInfo is nil, not flushed", "height", version)
+}
+
+flushLatestVersion(batch, version)
+ if err := batch.WriteSync(); err != nil {
+ panic(fmt.Errorf("error on batch write %w", err))
+}
+
+rs.logger.Debug("flushing metadata finished", "height", version)
+}
+
+type storeParams struct {
+ key types.StoreKey
+ db dbm.DB
+ typ types.StoreType
+ initialVersion uint64
+}
+
+func newStoreParams(key types.StoreKey, db dbm.DB, typ types.StoreType, initialVersion uint64)
+
+storeParams {
+ return storeParams{
+ key: key,
+ db: db,
+ typ: typ,
+ initialVersion: initialVersion,
+}
+}
+
+func GetLatestVersion(db dbm.DB)
+
+int64 {
+ bz, err := db.Get([]byte(latestVersionKey))
+ if err != nil {
+ panic(err)
+}
+
+else if bz == nil {
+ return 0
+}
+
+var latestVersion int64
+ if err := gogotypes.StdInt64Unmarshal(&latestVersion, bz); err != nil {
+ panic(err)
+}
+
+return latestVersion
+}
+
+// Commits each store and returns a new commitInfo.
+func commitStores(version int64, storeMap map[types.StoreKey]types.CommitKVStore, removalMap map[types.StoreKey]bool) *types.CommitInfo {
+ storeInfos := make([]types.StoreInfo, 0, len(storeMap))
+ storeKeys := keysFromStoreKeyMap(storeMap)
+ for _, key := range storeKeys {
+ store := storeMap[key]
+ last := store.LastCommitID()
+
+ // If a commit event execution is interrupted, a new iavl store's version
+ // will be larger than the RMS's metadata, when the block is replayed, we
+ // should avoid committing that iavl store again.
+ var commitID types.CommitID
+ if last.Version >= version {
+ last.Version = version
+ commitID = last
+}
+
+else {
+ commitID = store.Commit()
+}
+ storeType := store.GetStoreType()
+ if storeType == types.StoreTypeTransient || storeType == types.StoreTypeMemory {
+ continue
+}
+ if !removalMap[key] {
+ si := types.StoreInfo{
+}
+
+si.Name = key.Name()
+
+si.CommitId = commitID
+ storeInfos = append(storeInfos, si)
+}
+
+}
+
+sort.SliceStable(storeInfos, func(i, j int)
+
+bool {
+ return strings.Compare(storeInfos[i].Name, storeInfos[j].Name) < 0
+})
+
+return &types.CommitInfo{
+ Version: version,
+ StoreInfos: storeInfos,
+}
+}
+
+func flushCommitInfo(batch dbm.Batch, version int64, cInfo *types.CommitInfo) {
+ bz, err := cInfo.Marshal()
+ if err != nil {
+ panic(err)
+}
+ cInfoKey := fmt.Sprintf(commitInfoKeyFmt, version)
+
+err = batch.Set([]byte(cInfoKey), bz)
+ if err != nil {
+ panic(err)
+}
+}
+
+func flushLatestVersion(batch dbm.Batch, version int64) {
+ bz, err := gogotypes.StdInt64Marshal(version)
+ if err != nil {
+ panic(err)
+}
+
+err = batch.Set([]byte(latestVersionKey), bz)
+ if err != nil {
+ panic(err)
+}
+}
+```
+
+The `rootMulti.Store` is a base-layer multistore built around a `db` on top of which multiple `KVStores` can be mounted, and is the default multistore store used in [`baseapp`](/sdk/v0.53/learn/advanced/baseapp).
+
+### CacheMultiStore
+
+Whenever the `rootMulti.Store` needs to be branched, a [`cachemulti.Store`](https://github.com/cosmos/cosmos-sdk/blob/v0.53.0/store/cachemulti/store.go) is used.
+
+```go expandable
+package cachemulti
+
+import (
+
+ "fmt"
+ "io"
+ "maps"
+
+ dbm "github.com/cosmos/cosmos-db"
+ "cosmossdk.io/store/cachekv"
+ "cosmossdk.io/store/dbadapter"
+ "cosmossdk.io/store/tracekv"
+ "cosmossdk.io/store/types"
+)
+
+// storeNameCtxKey is the TraceContext metadata key that identifies
+// the store which emitted a given trace.
+const storeNameCtxKey = "store_name"
+
+//----------------------------------------
+// Store
+
+// Store holds many branched stores.
+// Implements MultiStore.
+// NOTE: a Store (and MultiStores in general)
+
+should never expose the
+// keys for the substores.
+type Store struct {
+ db types.CacheKVStore
+ stores map[types.StoreKey]types.CacheWrap
+ keys map[string]types.StoreKey
+
+ traceWriter io.Writer
+ traceContext types.TraceContext
+}
+
+var _ types.CacheMultiStore = Store{
+}
+
+// NewFromKVStore creates a new Store object from a mapping of store keys to
+// CacheWrapper objects and a KVStore as the database. Each CacheWrapper store
+// is a branched store.
+func NewFromKVStore(
+ store types.KVStore, stores map[types.StoreKey]types.CacheWrapper,
+ keys map[string]types.StoreKey, traceWriter io.Writer, traceContext types.TraceContext,
+)
+
+Store {
+ cms := Store{
+ db: cachekv.NewStore(store),
+ stores: make(map[types.StoreKey]types.CacheWrap, len(stores)),
+ keys: keys,
+ traceWriter: traceWriter,
+ traceContext: traceContext,
+}
+ for key, store := range stores {
+ if cms.TracingEnabled() {
+ tctx := cms.traceContext.Clone().Merge(types.TraceContext{
+ storeNameCtxKey: key.Name(),
+})
+
+store = tracekv.NewStore(store.(types.KVStore), cms.traceWriter, tctx)
+}
+
+cms.stores[key] = cachekv.NewStore(store.(types.KVStore))
+}
+
+return cms
+}
+
+// NewStore creates a new Store object from a mapping of store keys to
+// CacheWrapper objects. Each CacheWrapper store is a branched store.
+func NewStore(
+ db dbm.DB, stores map[types.StoreKey]types.CacheWrapper, keys map[string]types.StoreKey,
+ traceWriter io.Writer, traceContext types.TraceContext,
+)
+
+Store {
+ return NewFromKVStore(dbadapter.Store{
+ DB: db
+}, stores, keys, traceWriter, traceContext)
+}
+
+func newCacheMultiStoreFromCMS(cms Store)
+
+Store {
+ stores := make(map[types.StoreKey]types.CacheWrapper)
+ for k, v := range cms.stores {
+ stores[k] = v
+}
+
+return NewFromKVStore(cms.db, stores, nil, cms.traceWriter, cms.traceContext)
+}
+
+// SetTracer sets the tracer for the MultiStore that the underlying
+// stores will utilize to trace operations. A MultiStore is returned.
+func (cms Store)
+
+SetTracer(w io.Writer)
+
+types.MultiStore {
+ cms.traceWriter = w
+ return cms
+}
+
+// SetTracingContext updates the tracing context for the MultiStore by merging
+// the given context with the existing context by key. Any existing keys will
+// be overwritten. It is implied that the caller should update the context when
+// necessary between tracing operations. It returns a modified MultiStore.
+func (cms Store)
+
+SetTracingContext(tc types.TraceContext)
+
+types.MultiStore {
+ if cms.traceContext != nil {
+ maps.Copy(cms.traceContext, tc)
+}
+
+else {
+ cms.traceContext = tc
+}
+
+return cms
+}
+
+// TracingEnabled returns if tracing is enabled for the MultiStore.
+func (cms Store)
+
+TracingEnabled()
+
+bool {
+ return cms.traceWriter != nil
+}
+
+// LatestVersion returns the branch version of the store
+func (cms Store)
+
+LatestVersion()
+
+int64 {
+ panic("cannot get latest version from branch cached multi-store")
+}
+
+// GetStoreType returns the type of the store.
+func (cms Store)
+
+GetStoreType()
+
+types.StoreType {
+ return types.StoreTypeMulti
+}
+
+// Write calls Write on each underlying store.
+func (cms Store)
+
+Write() {
+ cms.db.Write()
+ for _, store := range cms.stores {
+ store.Write()
+}
+}
+
+// Implements CacheWrapper.
+func (cms Store)
+
+CacheWrap()
+
+types.CacheWrap {
+ return cms.CacheMultiStore().(types.CacheWrap)
+}
+
+// CacheWrapWithTrace implements the CacheWrapper interface.
+func (cms Store)
+
+CacheWrapWithTrace(_ io.Writer, _ types.TraceContext)
+
+types.CacheWrap {
+ return cms.CacheWrap()
+}
+
+// Implements MultiStore.
+func (cms Store)
+
+CacheMultiStore()
+
+types.CacheMultiStore {
+ return newCacheMultiStoreFromCMS(cms)
+}
+
+// CacheMultiStoreWithVersion implements the MultiStore interface. It will panic
+// as an already cached multi-store cannot load previous versions.
+//
+// TODO: The store implementation can possibly be modified to support this as it
+// seems safe to load previous versions (heights).
+func (cms Store)
+
+CacheMultiStoreWithVersion(_ int64) (types.CacheMultiStore, error) {
+ panic("cannot branch cached multi-store with a version")
+}
+
+// GetStore returns an underlying Store by key.
+func (cms Store)
+
+GetStore(key types.StoreKey)
+
+types.Store {
+ s := cms.stores[key]
+ if key == nil || s == nil {
+ panic(fmt.Sprintf("kv store with key %v has not been registered in stores", key))
+}
+
+return s.(types.Store)
+}
+
+// GetKVStore returns an underlying KVStore by key.
+func (cms Store)
+
+GetKVStore(key types.StoreKey)
+
+types.KVStore {
+ store := cms.stores[key]
+ if key == nil || store == nil {
+ panic(fmt.Sprintf("kv store with key %v has not been registered in stores", key))
+}
+
+return store.(types.KVStore)
+}
+```
+
+`cachemulti.Store` branches all substores (creates a virtual store for each substore) in its constructor and hold them in `Store.stores`. Moreover caches all read queries. `Store.GetKVStore()` returns the store from `Store.stores`, and `Store.Write()` recursively calls `CacheWrap.Write()` on all the substores.
+
+## Base-layer KVStores
+
+### `KVStore` and `CommitKVStore` Interfaces
+
+A `KVStore` is a simple key-value store used to store and retrieve data. A `CommitKVStore` is a `KVStore` that also implements a `Committer`. By default, stores mounted in `baseapp`'s main `CommitMultiStore` are `CommitKVStore`s. The `KVStore` interface is primarily used to restrict modules from accessing the committer.
+
+Individual `KVStore`s are used by modules to manage a subset of the global state. `KVStores` can be accessed by objects that hold a specific key. This `key` should only be exposed to the [`keeper`](/sdk/v0.53/build/building-modules/keeper) of the module that defines the store.
+
+`CommitKVStore`s are declared by proxy of their respective `key` and mounted on the application's [multistore](#multistore) in the [main application file](/sdk/v0.53/learn/beginner/app-anatomy#core-application-file). In the same file, the `key` is also passed to the module's `keeper` that is responsible for managing the store.
+
+```go expandable
+package types
+
+import (
+
+ "fmt"
+ "io"
+ "maps"
+ "slices"
+ "github.com/cometbft/cometbft/proto/tendermint/crypto"
+ dbm "github.com/cosmos/cosmos-db"
+ "cosmossdk.io/store/metrics"
+ pruningtypes "cosmossdk.io/store/pruning/types"
+ snapshottypes "cosmossdk.io/store/snapshots/types"
+)
+
+type Store interface {
+ GetStoreType()
+
+StoreType
+ CacheWrapper
+}
+
+// something that can persist to disk
+type Committer interface {
+ Commit()
+
+CommitID
+ LastCommitID()
+
+CommitID
+
+ // WorkingHash returns the hash of the KVStore's state before commit.
+ WorkingHash() []byte
+
+ SetPruning(pruningtypes.PruningOptions)
+
+GetPruning()
+
+pruningtypes.PruningOptions
+}
+
+// Stores of MultiStore must implement CommitStore.
+type CommitStore interface {
+ Committer
+ Store
+}
+
+// Queryable allows a Store to expose internal state to the abci.Query
+// interface. Multistore can route requests to the proper Store.
+//
+// This is an optional, but useful extension to any CommitStore
+type Queryable interface {
+ Query(*RequestQuery) (*ResponseQuery, error)
+}
+
+type RequestQuery struct {
+ Data []byte
+ Path string
+ Height int64
+ Prove bool
+}
+
+type ResponseQuery struct {
+ Code uint32
+ Log string
+ Info string
+ Index int64
+ Key []byte
+ Value []byte
+ ProofOps *crypto.ProofOps
+ Height int64
+ Codespace string
+}
+
+//----------------------------------------
+// MultiStore
+
+// StoreUpgrades defines a series of transformations to apply the multistore db upon load
+type StoreUpgrades struct {
+ Added []string `json:"added"`
+ Renamed []StoreRename `json:"renamed"`
+ Deleted []string `json:"deleted"`
+}
+
+// StoreRename defines a name change of a sub-store.
+// All data previously under a PrefixStore with OldKey will be copied
+// to a PrefixStore with NewKey, then deleted from OldKey store.
+type StoreRename struct {
+ OldKey string `json:"old_key"`
+ NewKey string `json:"new_key"`
+}
+
+// IsAdded returns true if the given key should be added
+func (s *StoreUpgrades)
+
+IsAdded(key string)
+
+bool {
+ if s == nil {
+ return false
+}
+
+return slices.Contains(s.Added, key)
+}
+
+// IsDeleted returns true if the given key should be deleted
+func (s *StoreUpgrades)
+
+IsDeleted(key string)
+
+bool {
+ if s == nil {
+ return false
+}
+
+return slices.Contains(s.Deleted, key)
+}
+
+// RenamedFrom returns the oldKey if it was renamed
+// Returns "" if it was not renamed
+func (s *StoreUpgrades)
+
+RenamedFrom(key string)
+
+string {
+ if s == nil {
+ return ""
+}
+ for _, re := range s.Renamed {
+ if re.NewKey == key {
+ return re.OldKey
+}
+
+}
+
+return ""
+}
+
+type MultiStore interface {
+ Store
+
+ // Branches MultiStore into a cached storage object.
+ // NOTE: Caller should probably not call .Write()
+
+on each, but
+ // call CacheMultiStore.Write().
+ CacheMultiStore()
+
+CacheMultiStore
+
+ // CacheMultiStoreWithVersion branches the underlying MultiStore where
+ // each stored is loaded at a specific version (height).
+ CacheMultiStoreWithVersion(version int64) (CacheMultiStore, error)
+
+ // Convenience for fetching substores.
+ // If the store does not exist, panics.
+ GetStore(StoreKey)
+
+Store
+ GetKVStore(StoreKey)
+
+KVStore
+
+ // TracingEnabled returns if tracing is enabled for the MultiStore.
+ TracingEnabled()
+
+bool
+
+ // SetTracer sets the tracer for the MultiStore that the underlying
+ // stores will utilize to trace operations. The modified MultiStore is
+ // returned.
+ SetTracer(w io.Writer)
+
+MultiStore
+
+ // SetTracingContext sets the tracing context for a MultiStore. It is
+ // implied that the caller should update the context when necessary between
+ // tracing operations. The modified MultiStore is returned.
+ SetTracingContext(TraceContext)
+
+MultiStore
+
+ // LatestVersion returns the latest version in the store
+ LatestVersion()
+
+int64
+}
+
+// From MultiStore.CacheMultiStore()....
+type CacheMultiStore interface {
+ MultiStore
+ Write() // Writes operations to underlying KVStore
+}
+
+// CommitMultiStore is an interface for a MultiStore without cache capabilities.
+type CommitMultiStore interface {
+ Committer
+ MultiStore
+ snapshottypes.Snapshotter
+
+ // Mount a store of type using the given db.
+ // If db == nil, the new store will use the CommitMultiStore db.
+ MountStoreWithDB(key StoreKey, typ StoreType, db dbm.DB)
+
+ // Panics on a nil key.
+ GetCommitStore(key StoreKey)
+
+CommitStore
+
+ // Panics on a nil key.
+ GetCommitKVStore(key StoreKey)
+
+CommitKVStore
+
+ // Load the latest persisted version. Called once after all calls to
+ // Mount*Store()
+
+are complete.
+ LoadLatestVersion()
+
+error
+
+ // LoadLatestVersionAndUpgrade will load the latest version, but also
+ // rename/delete/create sub-store keys, before registering all the keys
+ // in order to handle breaking formats in migrations
+ LoadLatestVersionAndUpgrade(upgrades *StoreUpgrades)
+
+error
+
+ // LoadVersionAndUpgrade will load the named version, but also
+ // rename/delete/create sub-store keys, before registering all the keys
+ // in order to handle breaking formats in migrations
+ LoadVersionAndUpgrade(ver int64, upgrades *StoreUpgrades)
+
+error
+
+ // Load a specific persisted version. When you load an old version, or when
+ // the last commit attempt didn't complete, the next commit after loading
+ // must be idempotent (return the same commit id). Otherwise the behavior is
+ // undefined.
+ LoadVersion(ver int64)
+
+error
+
+ // Set an inter-block (persistent)
+
+cache that maintains a mapping from
+ // StoreKeys to CommitKVStores.
+ SetInterBlockCache(MultiStorePersistentCache)
+
+ // SetInitialVersion sets the initial version of the IAVL tree. It is used when
+ // starting a new chain at an arbitrary height.
+ SetInitialVersion(version int64)
+
+error
+
+ // SetIAVLCacheSize sets the cache size of the IAVL tree.
+ SetIAVLCacheSize(size int)
+
+ // SetIAVLDisableFastNode enables/disables fastnode feature on iavl.
+ SetIAVLDisableFastNode(disable bool)
+
+ // SetIAVLSyncPruning set sync/async pruning on iavl.
+ // It is not recommended to use this option.
+ // It is here to enable the prune command to force this to true, allowing the command to wait
+ // for the pruning to finish before returning.
+ SetIAVLSyncPruning(sync bool)
+
+ // RollbackToVersion rollback the db to specific version(height).
+ RollbackToVersion(version int64)
+
+error
+
+ // ListeningEnabled returns if listening is enabled for the KVStore belonging the provided StoreKey
+ ListeningEnabled(key StoreKey)
+
+bool
+
+ // AddListeners adds a listener for the KVStore belonging to the provided StoreKey
+ AddListeners(keys []StoreKey)
+
+ // PopStateCache returns the accumulated state change messages from the CommitMultiStore
+ PopStateCache() []*StoreKVPair
+
+ // SetMetrics sets the metrics for the KVStore
+ SetMetrics(metrics metrics.StoreMetrics)
+}
+
+//---------subsp-------------------------------
+// KVStore
+
+// BasicKVStore is a simple interface to get/set data
+type BasicKVStore interface {
+ // Get returns nil if key doesn't exist. Panics on nil key.
+ Get(key []byte) []byte
+
+ // Has checks if a key exists. Panics on nil key.
+ Has(key []byte)
+
+bool
+
+ // Set sets the key. Panics on nil key or value.
+ Set(key, value []byte)
+
+ // Delete deletes the key. Panics on nil key.
+ Delete(key []byte)
+}
+
+// KVStore additionally provides iteration and deletion
+type KVStore interface {
+ Store
+ BasicKVStore
+
+ // Iterator over a domain of keys in ascending order. End is exclusive.
+ // Start must be less than end, or the Iterator is invalid.
+ // Iterator must be closed by caller.
+ // To iterate over entire domain, use store.Iterator(nil, nil)
+ // CONTRACT: No writes may happen within a domain while an iterator exists over it.
+ // Exceptionally allowed for cachekv.Store, safe to write in the modules.
+ Iterator(start, end []byte)
+
+Iterator
+
+ // Iterator over a domain of keys in descending order. End is exclusive.
+ // Start must be less than end, or the Iterator is invalid.
+ // Iterator must be closed by caller.
+ // CONTRACT: No writes may happen within a domain while an iterator exists over it.
+ // Exceptionally allowed for cachekv.Store, safe to write in the modules.
+ ReverseIterator(start, end []byte)
+
+Iterator
+}
+
+// Iterator is an alias db's Iterator for convenience.
+type Iterator = dbm.Iterator
+
+// CacheKVStore branches a KVStore and provides read cache functionality.
+// After calling .Write()
+
+on the CacheKVStore, all previously created
+// CacheKVStores on the object expire.
+type CacheKVStore interface {
+ KVStore
+
+ // Writes operations to underlying KVStore
+ Write()
+}
+
+// CommitKVStore is an interface for MultiStore.
+type CommitKVStore interface {
+ Committer
+ KVStore
+}
+
+//----------------------------------------
+// CacheWrap
+
+// CacheWrap is the most appropriate interface for store ephemeral branching and cache.
+// For example, IAVLStore.CacheWrap()
+
+returns a CacheKVStore. CacheWrap should not return
+// a Committer, since Commit ephemeral store make no sense. It can return KVStore,
+// HeapStore, SpaceStore, etc.
+type CacheWrap interface {
+ // Write syncs with the underlying store.
+ Write()
+
+ // CacheWrap recursively wraps again.
+ CacheWrap()
+
+CacheWrap
+
+ // CacheWrapWithTrace recursively wraps again with tracing enabled.
+ CacheWrapWithTrace(w io.Writer, tc TraceContext)
+
+CacheWrap
+}
+
+type CacheWrapper interface {
+ // CacheWrap branches a store.
+ CacheWrap()
+
+CacheWrap
+
+ // CacheWrapWithTrace branches a store with tracing enabled.
+ CacheWrapWithTrace(w io.Writer, tc TraceContext)
+
+CacheWrap
+}
+
+func (cid CommitID)
+
+IsZero()
+
+bool {
+ return cid.Version == 0 && len(cid.Hash) == 0
+}
+
+func (cid CommitID)
+
+String()
+
+string {
+ return fmt.Sprintf("CommitID{%v:%X
+}", cid.Hash, cid.Version)
+}
+
+//----------------------------------------
+// Store types
+
+// kind of store
+type StoreType int
+
+const (
+ StoreTypeMulti StoreType = iota
+ StoreTypeDB
+ StoreTypeIAVL
+ StoreTypeTransient
+ StoreTypeMemory
+ StoreTypeSMT
+ StoreTypePersistent
+)
+
+func (st StoreType)
+
+String()
+
+string {
+ switch st {
+ case StoreTypeMulti:
+ return "StoreTypeMulti"
+ case StoreTypeDB:
+ return "StoreTypeDB"
+ case StoreTypeIAVL:
+ return "StoreTypeIAVL"
+ case StoreTypeTransient:
+ return "StoreTypeTransient"
+ case StoreTypeMemory:
+ return "StoreTypeMemory"
+ case StoreTypeSMT:
+ return "StoreTypeSMT"
+ case StoreTypePersistent:
+ return "StoreTypePersistent"
+}
+
+return "unknown store type"
+}
+
+//----------------------------------------
+// Keys for accessing substores
+
+// StoreKey is a key used to index stores in a MultiStore.
+type StoreKey interface {
+ Name()
+
+string
+ String()
+
+string
+}
+
+// CapabilityKey represent the Cosmos SDK keys for object-capability
+// generation in the IBC protocol as defined in https://github.com/cosmos/ibc/tree/master/spec/core/ics-005-port-allocation#data-structures
+type CapabilityKey StoreKey
+
+// KVStoreKey is used for accessing substores.
+// Only the pointer value should ever be used - it functions as a capabilities key.
+type KVStoreKey struct {
+ name string
+}
+
+// NewKVStoreKey returns a new pointer to a KVStoreKey.
+// Use a pointer so keys don't collide.
+func NewKVStoreKey(name string) *KVStoreKey {
+ if name == "" {
+ panic("empty key name not allowed")
+}
+
+return &KVStoreKey{
+ name: name,
+}
+}
+
+// NewKVStoreKeys returns a map of new pointers to KVStoreKey's.
+// The function will panic if there is a potential conflict in names (see `assertNoPrefix`
+// function for more details).
+func NewKVStoreKeys(names ...string)
+
+map[string]*KVStoreKey {
+ assertNoCommonPrefix(names)
+ keys := make(map[string]*KVStoreKey, len(names))
+ for _, n := range names {
+ keys[n] = NewKVStoreKey(n)
+}
+
+return keys
+}
+
+func (key *KVStoreKey)
+
+Name()
+
+string {
+ return key.name
+}
+
+func (key *KVStoreKey)
+
+String()
+
+string {
+ return fmt.Sprintf("KVStoreKey{%p, %s
+}", key, key.name)
+}
+
+// TransientStoreKey is used for indexing transient stores in a MultiStore
+type TransientStoreKey struct {
+ name string
+}
+
+// Constructs new TransientStoreKey
+// Must return a pointer according to the ocap principle
+func NewTransientStoreKey(name string) *TransientStoreKey {
+ return &TransientStoreKey{
+ name: name,
+}
+}
+
+// Implements StoreKey
+func (key *TransientStoreKey)
+
+Name()
+
+string {
+ return key.name
+}
+
+// Implements StoreKey
+func (key *TransientStoreKey)
+
+String()
+
+string {
+ return fmt.Sprintf("TransientStoreKey{%p, %s
+}", key, key.name)
+}
+
+// MemoryStoreKey defines a typed key to be used with an in-memory KVStore.
+type MemoryStoreKey struct {
+ name string
+}
+
+func NewMemoryStoreKey(name string) *MemoryStoreKey {
+ return &MemoryStoreKey{
+ name: name
+}
+}
+
+// Name returns the name of the MemoryStoreKey.
+func (key *MemoryStoreKey)
+
+Name()
+
+string {
+ return key.name
+}
+
+// String returns a stringified representation of the MemoryStoreKey.
+func (key *MemoryStoreKey)
+
+String()
+
+string {
+ return fmt.Sprintf("MemoryStoreKey{%p, %s
+}", key, key.name)
+}
+
+//----------------------------------------
+
+// TraceContext contains TraceKVStore context data. It will be written with
+// every trace operation.
+type TraceContext map[string]interface{
+}
+
+// Clone clones tc into another instance of TraceContext.
+func (tc TraceContext)
+
+Clone()
+
+TraceContext {
+ ret := TraceContext{
+}
+
+maps.Copy(ret, tc)
+
+return ret
+}
+
+// Merge merges value of newTc into tc.
+func (tc TraceContext)
+
+Merge(newTc TraceContext)
+
+TraceContext {
+ if tc == nil {
+ tc = TraceContext{
+}
+
+}
+
+maps.Copy(tc, newTc)
+
+return tc
+}
+
+// MultiStorePersistentCache defines an interface which provides inter-block
+// (persistent)
+
+caching capabilities for multiple CommitKVStores based on StoreKeys.
+type MultiStorePersistentCache interface {
+ // Wrap and return the provided CommitKVStore with an inter-block (persistent)
+ // cache.
+ GetStoreCache(key StoreKey, store CommitKVStore)
+
+CommitKVStore
+
+ // Return the underlying CommitKVStore for a StoreKey.
+ Unwrap(key StoreKey)
+
+CommitKVStore
+
+ // Reset the entire set of internal caches.
+ Reset()
+}
+
+// StoreWithInitialVersion is a store that can have an arbitrary initial
+// version.
+type StoreWithInitialVersion interface {
+ // SetInitialVersion sets the initial version of the IAVL tree. It is used when
+ // starting a new chain at an arbitrary height.
+ SetInitialVersion(version int64)
+}
+
+// NewTransientStoreKeys constructs a new map of TransientStoreKey's
+// Must return pointers according to the ocap principle
+// The function will panic if there is a potential conflict in names
+// see `assertNoCommonPrefix` function for more details.
+func NewTransientStoreKeys(names ...string)
+
+map[string]*TransientStoreKey {
+ assertNoCommonPrefix(names)
+ keys := make(map[string]*TransientStoreKey)
+ for _, n := range names {
+ keys[n] = NewTransientStoreKey(n)
+}
+
+return keys
+}
+
+// NewMemoryStoreKeys constructs a new map matching store key names to their
+// respective MemoryStoreKey references.
+// The function will panic if there is a potential conflict in names (see `assertNoPrefix`
+// function for more details).
+func NewMemoryStoreKeys(names ...string)
+
+map[string]*MemoryStoreKey {
+ assertNoCommonPrefix(names)
+ keys := make(map[string]*MemoryStoreKey)
+ for _, n := range names {
+ keys[n] = NewMemoryStoreKey(n)
+}
+
+return keys
+}
+```
+
+Apart from the traditional `Get` and `Set` methods, that a `KVStore` must implement via the `BasicKVStore` interface; a `KVStore` must provide an `Iterator(start, end)` method which returns an `Iterator` object. It is used to iterate over a range of keys, typically keys that share a common prefix. Below is an example from the bank's module keeper, used to iterate over all account balances:
+
+```go expandable
+package keeper
+
+import (
+
+ "context"
+ "fmt"
+ "cosmossdk.io/collections"
+ "cosmossdk.io/collections/indexes"
+ "cosmossdk.io/core/store"
+ errorsmod "cosmossdk.io/errors"
+ "cosmossdk.io/log"
+ "cosmossdk.io/math"
+ "github.com/cosmos/cosmos-sdk/codec"
+ sdk "github.com/cosmos/cosmos-sdk/types"
+ sdkerrors "github.com/cosmos/cosmos-sdk/types/errors"
+ "github.com/cosmos/cosmos-sdk/x/bank/types"
+)
+
+var _ ViewKeeper = (*BaseViewKeeper)(nil)
+
+// ViewKeeper defines a module interface that facilitates read only access to
+// account balances.
+type ViewKeeper interface {
+ ValidateBalance(ctx context.Context, addr sdk.AccAddress)
+
+error
+ HasBalance(ctx context.Context, addr sdk.AccAddress, amt sdk.Coin)
+
+bool
+
+ GetAllBalances(ctx context.Context, addr sdk.AccAddress)
+
+sdk.Coins
+ GetAccountsBalances(ctx context.Context) []types.Balance
+ GetBalance(ctx context.Context, addr sdk.AccAddress, denom string)
+
+sdk.Coin
+ LockedCoins(ctx context.Context, addr sdk.AccAddress)
+
+sdk.Coins
+ SpendableCoins(ctx context.Context, addr sdk.AccAddress)
+
+sdk.Coins
+ SpendableCoin(ctx context.Context, addr sdk.AccAddress, denom string)
+
+sdk.Coin
+
+ IterateAccountBalances(ctx context.Context, addr sdk.AccAddress, cb func(coin sdk.Coin) (stop bool))
+
+IterateAllBalances(ctx context.Context, cb func(address sdk.AccAddress, coin sdk.Coin) (stop bool))
+}
+
+func newBalancesIndexes(sb *collections.SchemaBuilder)
+
+BalancesIndexes {
+ return BalancesIndexes{
+ Denom: indexes.NewReversePair[math.Int](
+ sb, types.DenomAddressPrefix, "address_by_denom_index",
+ collections.PairKeyCodec(sdk.LengthPrefixedAddressKey(sdk.AccAddressKey), collections.StringKey), // nolint:staticcheck // Note: refer to the LengthPrefixedAddressKey docs to understand why we do this.
+ indexes.WithReversePairUncheckedValue(), // denom to address indexes were stored as Key: Join(denom, address)
+
+Value: []byte{0
+}, this will migrate the value to []byte{
+}
+
+in a lazy way.
+ ),
+}
+}
+
+type BalancesIndexes struct {
+ Denom *indexes.ReversePair[sdk.AccAddress, string, math.Int]
+}
+
+func (b BalancesIndexes)
+
+IndexesList() []collections.Index[collections.Pair[sdk.AccAddress, string], math.Int] {
+ return []collections.Index[collections.Pair[sdk.AccAddress, string], math.Int]{
+ b.Denom
+}
+}
+
+// BaseViewKeeper implements a read only keeper implementation of ViewKeeper.
+type BaseViewKeeper struct {
+ cdc codec.BinaryCodec
+ storeService store.KVStoreService
+ ak types.AccountKeeper
+ logger log.Logger
+
+ Schema collections.Schema
+ Supply collections.Map[string, math.Int]
+ DenomMetadata collections.Map[string, types.Metadata]
+ SendEnabled collections.Map[string, bool]
+ Balances *collections.IndexedMap[collections.Pair[sdk.AccAddress, string], math.Int, BalancesIndexes]
+ Params collections.Item[types.Params]
+}
+
+// NewBaseViewKeeper returns a new BaseViewKeeper.
+func NewBaseViewKeeper(cdc codec.BinaryCodec, storeService store.KVStoreService, ak types.AccountKeeper, logger log.Logger)
+
+BaseViewKeeper {
+ sb := collections.NewSchemaBuilder(storeService)
+ k := BaseViewKeeper{
+ cdc: cdc,
+ storeService: storeService,
+ ak: ak,
+ logger: logger,
+ Supply: collections.NewMap(sb, types.SupplyKey, "supply", collections.StringKey, sdk.IntValue),
+ DenomMetadata: collections.NewMap(sb, types.DenomMetadataPrefix, "denom_metadata", collections.StringKey, codec.CollValue[types.Metadata](cdc)),
+ SendEnabled: collections.NewMap(sb, types.SendEnabledPrefix, "send_enabled", collections.StringKey, codec.BoolValue), // NOTE: we use a bool value which uses protobuf to retain state backwards compat
+ Balances: collections.NewIndexedMap(sb, types.BalancesPrefix, "balances", collections.PairKeyCodec(sdk.AccAddressKey, collections.StringKey), types.BalanceValueCodec, newBalancesIndexes(sb)),
+ Params: collections.NewItem(sb, types.ParamsKey, "params", codec.CollValue[types.Params](cdc)),
+}
+
+schema, err := sb.Build()
+ if err != nil {
+ panic(err)
+}
+
+k.Schema = schema
+ return k
+}
+
+// HasBalance returns whether or not an account has at least amt balance.
+func (k BaseViewKeeper)
+
+HasBalance(ctx context.Context, addr sdk.AccAddress, amt sdk.Coin)
+
+bool {
+ return k.GetBalance(ctx, addr, amt.Denom).IsGTE(amt)
+}
+
+// Logger returns a module-specific logger.
+func (k BaseViewKeeper)
+
+Logger()
+
+log.Logger {
+ return k.logger
+}
+
+// GetAllBalances returns all the account balances for the given account address.
+func (k BaseViewKeeper)
+
+GetAllBalances(ctx context.Context, addr sdk.AccAddress)
+
+sdk.Coins {
+ balances := sdk.NewCoins()
+
+k.IterateAccountBalances(ctx, addr, func(balance sdk.Coin)
+
+bool {
+ balances = balances.Add(balance)
+
+return false
+})
+
+return balances.Sort()
+}
+
+// GetAccountsBalances returns all the accounts balances from the store.
+func (k BaseViewKeeper)
+
+GetAccountsBalances(ctx context.Context) []types.Balance {
+ balances := make([]types.Balance, 0)
+ mapAddressToBalancesIdx := make(map[string]int)
+
+k.IterateAllBalances(ctx, func(addr sdk.AccAddress, balance sdk.Coin)
+
+bool {
+ idx, ok := mapAddressToBalancesIdx[addr.String()]
+ if ok {
+ // address is already on the set of accounts balances
+ balances[idx].Coins = balances[idx].Coins.Add(balance)
+
+balances[idx].Coins.Sort()
+
+return false
+}
+ accountBalance := types.Balance{
+ Address: addr.String(),
+ Coins: sdk.NewCoins(balance),
+}
+
+balances = append(balances, accountBalance)
+
+mapAddressToBalancesIdx[addr.String()] = len(balances) - 1
+ return false
+})
+
+return balances
+}
+
+// GetBalance returns the balance of a specific denomination for a given account
+// by address.
+func (k BaseViewKeeper)
+
+GetBalance(ctx context.Context, addr sdk.AccAddress, denom string)
+
+sdk.Coin {
+ amt, err := k.Balances.Get(ctx, collections.Join(addr, denom))
+ if err != nil {
+ return sdk.NewCoin(denom, math.ZeroInt())
+}
+
+return sdk.NewCoin(denom, amt)
+}
+
+// IterateAccountBalances iterates over the balances of a single account and
+// provides the token balance to a callback. If true is returned from the
+// callback, iteration is halted.
+func (k BaseViewKeeper)
+
+IterateAccountBalances(ctx context.Context, addr sdk.AccAddress, cb func(sdk.Coin)
+
+bool) {
+ err := k.Balances.Walk(ctx, collections.NewPrefixedPairRange[sdk.AccAddress, string](addr), func(key collections.Pair[sdk.AccAddress, string], value math.Int) (stop bool, err error) {
+ return cb(sdk.NewCoin(key.K2(), value)), nil
+})
+ if err != nil {
+ panic(err)
+}
+}
+
+// IterateAllBalances iterates over all the balances of all accounts and
+// denominations that are provided to a callback. If true is returned from the
+// callback, iteration is halted.
+func (k BaseViewKeeper)
+
+IterateAllBalances(ctx context.Context, cb func(sdk.AccAddress, sdk.Coin)
+
+bool) {
+ err := k.Balances.Walk(ctx, nil, func(key collections.Pair[sdk.AccAddress, string], value math.Int) (stop bool, err error) {
+ return cb(key.K1(), sdk.NewCoin(key.K2(), value)), nil
+})
+ if err != nil {
+ panic(err)
+}
+}
+
+// LockedCoins returns all the coins that are not spendable (i.e. locked)
+ for an
+// account by address. For standard accounts, the result will always be no coins.
+// For vesting accounts, LockedCoins is delegated to the concrete vesting account
+// type.
+func (k BaseViewKeeper)
+
+LockedCoins(ctx context.Context, addr sdk.AccAddress)
+
+sdk.Coins {
+ acc := k.ak.GetAccount(ctx, addr)
+ if acc != nil {
+ vacc, ok := acc.(types.VestingAccount)
+ if ok {
+ sdkCtx := sdk.UnwrapSDKContext(ctx)
+
+return vacc.LockedCoins(sdkCtx.BlockTime())
+}
+
+}
+
+return sdk.NewCoins()
+}
+
+// SpendableCoins returns the total balances of spendable coins for an account
+// by address. If the account has no spendable coins, an empty Coins slice is
+// returned.
+func (k BaseViewKeeper)
+
+SpendableCoins(ctx context.Context, addr sdk.AccAddress)
+
+sdk.Coins {
+ spendable, _ := k.spendableCoins(ctx, addr)
+
+return spendable
+}
+
+// SpendableCoin returns the balance of specific denomination of spendable coins
+// for an account by address. If the account has no spendable coin, a zero Coin
+// is returned.
+func (k BaseViewKeeper)
+
+SpendableCoin(ctx context.Context, addr sdk.AccAddress, denom string)
+
+sdk.Coin {
+ balance := k.GetBalance(ctx, addr, denom)
+ locked := k.LockedCoins(ctx, addr)
+
+return balance.SubAmount(locked.AmountOf(denom))
+}
+
+// spendableCoins returns the coins the given address can spend alongside the total amount of coins it holds.
+// It exists for gas efficiency, in order to avoid to have to get balance multiple times.
+func (k BaseViewKeeper)
+
+spendableCoins(ctx context.Context, addr sdk.AccAddress) (spendable, total sdk.Coins) {
+ total = k.GetAllBalances(ctx, addr)
+ locked := k.LockedCoins(ctx, addr)
+
+spendable, hasNeg := total.SafeSub(locked...)
+ if hasNeg {
+ spendable = sdk.NewCoins()
+
+return
+}
+
+return
+}
+
+// ValidateBalance validates all balances for a given account address returning
+// an error if any balance is invalid. It will check for vesting account types
+// and validate the balances against the original vesting balances.
+//
+// CONTRACT: ValidateBalance should only be called upon genesis state. In the
+// case of vesting accounts, balances may change in a valid manner that would
+// otherwise yield an error from this call.
+func (k BaseViewKeeper)
+
+ValidateBalance(ctx context.Context, addr sdk.AccAddress)
+
+error {
+ acc := k.ak.GetAccount(ctx, addr)
+ if acc == nil {
+ return errorsmod.Wrapf(sdkerrors.ErrUnknownAddress, "account %s does not exist", addr)
+}
+ balances := k.GetAllBalances(ctx, addr)
+ if !balances.IsValid() {
+ return fmt.Errorf("account balance of %s is invalid", balances)
+}
+
+vacc, ok := acc.(types.VestingAccount)
+ if ok {
+ ogv := vacc.GetOriginalVesting()
+ if ogv.IsAnyGT(balances) {
+ return fmt.Errorf("vesting amount %s cannot be greater than total amount %s", ogv, balances)
+}
+
+}
+
+return nil
+}
+```
+
+### `IAVL` Store
+
+The default implementation of `KVStore` and `CommitKVStore` used in `baseapp` is the `iavl.Store`.
+
+```go expandable
+package iavl
+
+import (
+
+ "errors"
+ "fmt"
+ "io"
+
+ cmtprotocrypto "github.com/cometbft/cometbft/proto/tendermint/crypto"
+ dbm "github.com/cosmos/cosmos-db"
+ "github.com/cosmos/iavl"
+ ics23 "github.com/cosmos/ics23/go"
+
+ errorsmod "cosmossdk.io/errors"
+ "cosmossdk.io/log"
+ "cosmossdk.io/store/cachekv"
+ "cosmossdk.io/store/internal/kv"
+ "cosmossdk.io/store/metrics"
+ pruningtypes "cosmossdk.io/store/pruning/types"
+ "cosmossdk.io/store/tracekv"
+ "cosmossdk.io/store/types"
+ "cosmossdk.io/store/wrapper"
+)
+
+const (
+ DefaultIAVLCacheSize = 500000
+)
+
+var (
+ _ types.KVStore = (*Store)(nil)
+ _ types.CommitStore = (*Store)(nil)
+ _ types.CommitKVStore = (*Store)(nil)
+ _ types.Queryable = (*Store)(nil)
+ _ types.StoreWithInitialVersion = (*Store)(nil)
+)
+
+// Store Implements types.KVStore and CommitKVStore.
+type Store struct {
+ tree Tree
+ logger log.Logger
+ metrics metrics.StoreMetrics
+}
+
+// LoadStore returns an IAVL Store as a CommitKVStore. Internally, it will load the
+// store's version (id)
+
+from the provided DB. An error is returned if the version
+// fails to load, or if called with a positive version on an empty tree.
+func LoadStore(db dbm.DB, logger log.Logger, key types.StoreKey, id types.CommitID, cacheSize int, disableFastNode bool, metrics metrics.StoreMetrics) (types.CommitKVStore, error) {
+ return LoadStoreWithInitialVersion(db, logger, key, id, 0, cacheSize, disableFastNode, metrics)
+}
+
+// LoadStoreWithInitialVersion returns an IAVL Store as a CommitKVStore setting its initialVersion
+// to the one given. Internally, it will load the store's version (id)
+
+from the
+// provided DB. An error is returned if the version fails to load, or if called with a positive
+// version on an empty tree.
+func LoadStoreWithInitialVersion(db dbm.DB, logger log.Logger, key types.StoreKey, id types.CommitID, initialVersion uint64, cacheSize int, disableFastNode bool, metrics metrics.StoreMetrics) (types.CommitKVStore, error) {
+ return LoadStoreWithOpts(db, logger, key, id, initialVersion, cacheSize, disableFastNode, metrics, iavl.AsyncPruningOption(true))
+}
+
+func LoadStoreWithOpts(db dbm.DB, logger log.Logger, key types.StoreKey, id types.CommitID, initialVersion uint64, cacheSize int, disableFastNode bool, metrics metrics.StoreMetrics, opts ...iavl.Option) (types.CommitKVStore, error) {
+ // store/v1 and app/v1 flows never require an initial version of 0
+ if initialVersion == 0 {
+ initialVersion = 1
+}
+
+opts = append(opts, iavl.InitialVersionOption(initialVersion))
+ tree := iavl.NewMutableTree(wrapper.NewDBWrapper(db), cacheSize, disableFastNode, logger, opts...)
+
+isUpgradeable, err := tree.IsUpgradeable()
+ if err != nil {
+ return nil, err
+}
+ if isUpgradeable && logger != nil {
+ logger.Info(
+ "Upgrading IAVL storage for faster queries + execution on live state. This may take a while",
+ "store_key", key.String(),
+ "version", initialVersion,
+ "commit", fmt.Sprintf("%X", id),
+ )
+}
+
+ _, err = tree.LoadVersion(id.Version)
+ if err != nil {
+ return nil, err
+}
+ if logger != nil {
+ logger.Debug("Finished loading IAVL tree")
+}
+
+return &Store{
+ tree: tree,
+ logger: logger,
+ metrics: metrics,
+}, nil
+}
+
+// UnsafeNewStore returns a reference to a new IAVL Store with a given mutable
+// IAVL tree reference. It should only be used for testing purposes.
+//
+// CONTRACT: The IAVL tree should be fully loaded.
+// CONTRACT: PruningOptions passed in as argument must be the same as pruning options
+// passed into iavl.MutableTree
+func UnsafeNewStore(tree *iavl.MutableTree) *Store {
+ return &Store{
+ tree: tree,
+ metrics: metrics.NewNoOpMetrics(),
+}
+}
+
+// GetImmutable returns a reference to a new store backed by an immutable IAVL
+// tree at a specific version (height)
+
+without any pruning options. This should
+// be used for querying and iteration only. If the version does not exist or has
+// been pruned, an empty immutable IAVL tree will be used.
+// Any mutable operations executed will result in a panic.
+func (st *Store)
+
+GetImmutable(version int64) (*Store, error) {
+ if !st.VersionExists(version) {
+ return nil, errors.New("version mismatch on immutable IAVL tree; version does not exist. Version has either been pruned, or is for a future block height")
+}
+
+iTree, err := st.tree.GetImmutable(version)
+ if err != nil {
+ return nil, err
+}
+
+return &Store{
+ tree: &immutableTree{
+ iTree
+},
+ metrics: st.metrics,
+}, nil
+}
+
+// Commit commits the current store state and returns a CommitID with the new
+// version and hash.
+func (st *Store)
+
+Commit()
+
+types.CommitID {
+ defer st.metrics.MeasureSince("store", "iavl", "commit")
+
+hash, version, err := st.tree.SaveVersion()
+ if err != nil {
+ panic(err)
+}
+
+return types.CommitID{
+ Version: version,
+ Hash: hash,
+}
+}
+
+// WorkingHash returns the hash of the current working tree.
+func (st *Store)
+
+WorkingHash() []byte {
+ return st.tree.WorkingHash()
+}
+
+// LastCommitID implements Committer.
+func (st *Store)
+
+LastCommitID()
+
+types.CommitID {
+ return types.CommitID{
+ Version: st.tree.Version(),
+ Hash: st.tree.Hash(),
+}
+}
+
+// SetPruning panics as pruning options should be provided at initialization
+// since IAVl accepts pruning options directly.
+func (st *Store)
+
+SetPruning(_ pruningtypes.PruningOptions) {
+ panic("cannot set pruning options on an initialized IAVL store")
+}
+
+// SetPruning panics as pruning options should be provided at initialization
+// since IAVl accepts pruning options directly.
+func (st *Store)
+
+GetPruning()
+
+pruningtypes.PruningOptions {
+ panic("cannot get pruning options on an initialized IAVL store")
+}
+
+// VersionExists returns whether or not a given version is stored.
+func (st *Store)
+
+VersionExists(version int64)
+
+bool {
+ return st.tree.VersionExists(version)
+}
+
+// GetAllVersions returns all versions in the iavl tree
+func (st *Store)
+
+GetAllVersions() []int {
+ return st.tree.AvailableVersions()
+}
+
+// Implements Store.
+func (st *Store)
+
+GetStoreType()
+
+types.StoreType {
+ return types.StoreTypeIAVL
+}
+
+// Implements Store.
+func (st *Store)
+
+CacheWrap()
+
+types.CacheWrap {
+ return cachekv.NewStore(st)
+}
+
+// CacheWrapWithTrace implements the Store interface.
+func (st *Store)
+
+CacheWrapWithTrace(w io.Writer, tc types.TraceContext)
+
+types.CacheWrap {
+ return cachekv.NewStore(tracekv.NewStore(st, w, tc))
+}
+
+// Implements types.KVStore.
+func (st *Store)
+
+Set(key, value []byte) {
+ types.AssertValidKey(key)
+
+types.AssertValidValue(value)
+ _, err := st.tree.Set(key, value)
+ if err != nil && st.logger != nil {
+ st.logger.Error("iavl set error", "error", err.Error())
+}
+}
+
+// Implements types.KVStore.
+func (st *Store)
+
+Get(key []byte) []byte {
+ defer st.metrics.MeasureSince("store", "iavl", "get")
+
+value, err := st.tree.Get(key)
+ if err != nil {
+ panic(err)
+}
+
+return value
+}
+
+// Implements types.KVStore.
+func (st *Store)
+
+Has(key []byte) (exists bool) {
+ defer st.metrics.MeasureSince("store", "iavl", "has")
+
+has, err := st.tree.Has(key)
+ if err != nil {
+ panic(err)
+}
+
+return has
+}
+
+// Implements types.KVStore.
+func (st *Store)
+
+Delete(key []byte) {
+ defer st.metrics.MeasureSince("store", "iavl", "delete")
+ _, _, err := st.tree.Remove(key)
+ if err != nil {
+ panic(err)
+}
+}
+
+// DeleteVersionsTo deletes versions upto the given version from the MutableTree. An error
+// is returned if any single version is invalid or the delete fails. All writes
+// happen in a single batch with a single commit.
+func (st *Store)
+
+DeleteVersionsTo(version int64)
+
+error {
+ return st.tree.DeleteVersionsTo(version)
+}
+
+// LoadVersionForOverwriting attempts to load a tree at a previously committed
+// version. Any versions greater than targetVersion will be deleted.
+func (st *Store)
+
+LoadVersionForOverwriting(targetVersion int64)
+
+error {
+ return st.tree.LoadVersionForOverwriting(targetVersion)
+}
+
+// Implements types.KVStore.
+func (st *Store)
+
+Iterator(start, end []byte)
+
+types.Iterator {
+ iterator, err := st.tree.Iterator(start, end, true)
+ if err != nil {
+ panic(err)
+}
+
+return iterator
+}
+
+// Implements types.KVStore.
+func (st *Store)
+
+ReverseIterator(start, end []byte)
+
+types.Iterator {
+ iterator, err := st.tree.Iterator(start, end, false)
+ if err != nil {
+ panic(err)
+}
+
+return iterator
+}
+
+// SetInitialVersion sets the initial version of the IAVL tree. It is used when
+// starting a new chain at an arbitrary height.
+func (st *Store)
+
+SetInitialVersion(version int64) {
+ st.tree.SetInitialVersion(uint64(version))
+}
+
+// Exports the IAVL store at the given version, returning an iavl.Exporter for the tree.
+func (st *Store)
+
+Export(version int64) (*iavl.Exporter, error) {
+ istore, err := st.GetImmutable(version)
+ if err != nil {
+ return nil, errorsmod.Wrapf(err, "iavl export failed for version %v", version)
+}
+
+tree, ok := istore.tree.(*immutableTree)
+ if !ok || tree == nil {
+ return nil, fmt.Errorf("iavl export failed: unable to fetch tree for version %v", version)
+}
+
+return tree.Export()
+}
+
+// Import imports an IAVL tree at the given version, returning an iavl.Importer for importing.
+func (st *Store)
+
+Import(version int64) (*iavl.Importer, error) {
+ tree, ok := st.tree.(*iavl.MutableTree)
+ if !ok {
+ return nil, errors.New("iavl import failed: unable to find mutable tree")
+}
+
+return tree.Import(version)
+}
+
+// Handle gatest the latest height, if height is 0
+func getHeight(tree Tree, req *types.RequestQuery)
+
+int64 {
+ height := req.Height
+ if height == 0 {
+ latest := tree.Version()
+ if tree.VersionExists(latest - 1) {
+ height = latest - 1
+}
+
+else {
+ height = latest
+}
+
+}
+
+return height
+}
+
+// Query implements ABCI interface, allows queries
+//
+// by default we will return from (latest height -1),
+// as we will have merkle proofs immediately (header height = data height + 1)
+// If latest-1 is not present, use latest (which must be present)
+// if you care to have the latest data to see a tx results, you must
+// explicitly set the height you want to see
+func (st *Store)
+
+Query(req *types.RequestQuery) (res *types.ResponseQuery, err error) {
+ defer st.metrics.MeasureSince("store", "iavl", "query")
+ if len(req.Data) == 0 {
+ return &types.ResponseQuery{
+}, errorsmod.Wrap(types.ErrTxDecode, "query cannot be zero length")
+}
+ tree := st.tree
+
+ // store the height we chose in the response, with 0 being changed to the
+ // latest height
+ res = &types.ResponseQuery{
+ Height: getHeight(tree, req),
+}
+ switch req.Path {
+ case "/key": // get by key
+ key := req.Data // data holds the key bytes
+
+ res.Key = key
+ if !st.VersionExists(res.Height) {
+ res.Log = iavl.ErrVersionDoesNotExist.Error()
+
+break
+}
+
+value, err := tree.GetVersioned(key, res.Height)
+ if err != nil {
+ panic(err)
+}
+
+res.Value = value
+ if !req.Prove {
+ break
+}
+
+ // Continue to prove existence/absence of value
+ // Must convert store.Tree to iavl.MutableTree with given version to use in CreateProof
+ iTree, err := tree.GetImmutable(res.Height)
+ if err != nil {
+ // sanity check: If value for given version was retrieved, immutable tree must also be retrievable
+ panic(fmt.Sprintf("version exists in store but could not retrieve corresponding versioned tree in store, %s", err.Error()))
+}
+ mtree := &iavl.MutableTree{
+ ImmutableTree: iTree,
+}
+
+ // get proof from tree and convert to merkle.Proof before adding to result
+ res.ProofOps = getProofFromTree(mtree, req.Data, res.Value != nil)
+ case "/subspace":
+ pairs := kv.Pairs{
+ Pairs: make([]kv.Pair, 0),
+}
+ subspace := req.Data
+ res.Key = subspace
+ iterator := types.KVStorePrefixIterator(st, subspace)
+ for ; iterator.Valid(); iterator.Next() {
+ pairs.Pairs = append(pairs.Pairs, kv.Pair{
+ Key: iterator.Key(),
+ Value: iterator.Value()
+})
+}
+ if err := iterator.Close(); err != nil {
+ panic(fmt.Errorf("failed to close iterator: %w", err))
+}
+
+bz, err := pairs.Marshal()
+ if err != nil {
+ panic(fmt.Errorf("failed to marshal KV pairs: %w", err))
+}
+
+res.Value = bz
+
+ default:
+ return &types.ResponseQuery{
+}, errorsmod.Wrapf(types.ErrUnknownRequest, "unexpected query path: %v", req.Path)
+}
+
+return res, err
+}
+
+// TraverseStateChanges traverses the state changes between two versions and calls the given function.
+func (st *Store)
+
+TraverseStateChanges(startVersion, endVersion int64, fn func(version int64, changeSet *iavl.ChangeSet)
+
+error)
+
+error {
+ return st.tree.TraverseStateChanges(startVersion, endVersion, fn)
+}
+
+// Takes a MutableTree, a key, and a flag for creating existence or absence proof and returns the
+// appropriate merkle.Proof. Since this must be called after querying for the value, this function should never error
+// Thus, it will panic on error rather than returning it
+func getProofFromTree(tree *iavl.MutableTree, key []byte, exists bool) *cmtprotocrypto.ProofOps {
+ var (
+ commitmentProof *ics23.CommitmentProof
+ err error
+ )
+ if exists {
+ // value was found
+ commitmentProof, err = tree.GetMembershipProof(key)
+ if err != nil {
+ // sanity check: If value was found, membership proof must be creatable
+ panic(fmt.Sprintf("unexpected value for empty proof: %s", err.Error()))
+}
+
+}
+
+else {
+ // value wasn't found
+ commitmentProof, err = tree.GetNonMembershipProof(key)
+ if err != nil {
+ // sanity check: If value wasn't found, nonmembership proof must be creatable
+ panic(fmt.Sprintf("unexpected error for nonexistence proof: %s", err.Error()))
+}
+
+}
+ op := types.NewIavlCommitmentOp(key, commitmentProof)
+
+return &cmtprotocrypto.ProofOps{
+ Ops: []cmtprotocrypto.ProofOp{
+ op.ProofOp()
+}}
+}
+```
+
+`iavl` stores are based around an [IAVL Tree](https://github.com/cosmos/iavl), a self-balancing binary tree which guarantees that:
+
+* `Get` and `Set` operations are O(log n), where n is the number of elements in the tree.
+* Iteration efficiently returns the sorted elements within the range.
+* Each tree version is immutable and can be retrieved even after a commit (depending on the pruning settings).
+
+The documentation on the IAVL Tree is located [here](https://github.com/cosmos/iavl/blob/master/docs/overview.md).
+
+### `DbAdapter` Store
+
+`dbadapter.Store` is an adapter for `dbm.DB` making it fulfilling the `KVStore` interface.
+
+```go expandable
+package dbadapter
+
+import (
+
+ "io"
+
+ dbm "github.com/cosmos/cosmos-db"
+ "cosmossdk.io/store/cachekv"
+ "cosmossdk.io/store/tracekv"
+ "cosmossdk.io/store/types"
+)
+
+// Wrapper type for dbm.Db with implementation of KVStore
+type Store struct {
+ dbm.DB
+}
+
+// Get wraps the underlying DB's Get method panicing on error.
+func (dsa Store)
+
+Get(key []byte) []byte {
+ v, err := dsa.DB.Get(key)
+ if err != nil {
+ panic(err)
+}
+
+return v
+}
+
+// Has wraps the underlying DB's Has method panicing on error.
+func (dsa Store)
+
+Has(key []byte)
+
+bool {
+ ok, err := dsa.DB.Has(key)
+ if err != nil {
+ panic(err)
+}
+
+return ok
+}
+
+// Set wraps the underlying DB's Set method panicing on error.
+func (dsa Store)
+
+Set(key, value []byte) {
+ types.AssertValidKey(key)
+
+types.AssertValidValue(value)
+ if err := dsa.DB.Set(key, value); err != nil {
+ panic(err)
+}
+}
+
+// Delete wraps the underlying DB's Delete method panicing on error.
+func (dsa Store)
+
+Delete(key []byte) {
+ if err := dsa.DB.Delete(key); err != nil {
+ panic(err)
+}
+}
+
+// Iterator wraps the underlying DB's Iterator method panicing on error.
+func (dsa Store)
+
+Iterator(start, end []byte)
+
+types.Iterator {
+ iter, err := dsa.DB.Iterator(start, end)
+ if err != nil {
+ panic(err)
+}
+
+return iter
+}
+
+// ReverseIterator wraps the underlying DB's ReverseIterator method panicing on error.
+func (dsa Store)
+
+ReverseIterator(start, end []byte)
+
+types.Iterator {
+ iter, err := dsa.DB.ReverseIterator(start, end)
+ if err != nil {
+ panic(err)
+}
+
+return iter
+}
+
+// GetStoreType returns the type of the store.
+func (Store)
+
+GetStoreType()
+
+types.StoreType {
+ return types.StoreTypeDB
+}
+
+// CacheWrap branches the underlying store.
+func (dsa Store)
+
+CacheWrap()
+
+types.CacheWrap {
+ return cachekv.NewStore(dsa)
+}
+
+// CacheWrapWithTrace implements KVStore.
+func (dsa Store)
+
+CacheWrapWithTrace(w io.Writer, tc types.TraceContext)
+
+types.CacheWrap {
+ return cachekv.NewStore(tracekv.NewStore(dsa, w, tc))
+}
+
+// dbm.DB implements KVStore so we can CacheKVStore it.
+var _ types.KVStore = Store{
+}
+```
+
+`dbadapter.Store` embeds `dbm.DB`, meaning most of the `KVStore` interface functions are implemented. The other functions (mostly miscellaneous) are manually implemented. This store is primarily used within [Transient Stores](#transient-store)
+
+### `Transient` Store
+
+`Transient.Store` is a base-layer `KVStore` which is automatically discarded at the end of the block.
+
+```go expandable
+package transient
+
+import (
+
+ dbm "github.com/cosmos/cosmos-db"
+ "cosmossdk.io/store/dbadapter"
+ pruningtypes "cosmossdk.io/store/pruning/types"
+ "cosmossdk.io/store/types"
+)
+
+var (
+ _ types.Committer = (*Store)(nil)
+ _ types.KVStore = (*Store)(nil)
+)
+
+// Store is a wrapper for a MemDB with Commiter implementation
+type Store struct {
+ dbadapter.Store
+}
+
+// Constructs new MemDB adapter
+func NewStore() *Store {
+ return &Store{
+ Store: dbadapter.Store{
+ DB: dbm.NewMemDB()
+}}
+}
+
+// Implements CommitStore
+// Commit cleans up Store.
+func (ts *Store)
+
+Commit() (id types.CommitID) {
+ ts.Store = dbadapter.Store{
+ DB: dbm.NewMemDB()
+}
+
+return
+}
+
+func (ts *Store)
+
+SetPruning(_ pruningtypes.PruningOptions) {
+}
+
+// GetPruning is a no-op as pruning options cannot be directly set on this store.
+// They must be set on the root commit multi-store.
+func (ts *Store)
+
+GetPruning()
+
+pruningtypes.PruningOptions {
+ return pruningtypes.NewPruningOptions(pruningtypes.PruningUndefined)
+}
+
+// Implements CommitStore
+func (ts *Store)
+
+LastCommitID()
+
+types.CommitID {
+ return types.CommitID{
+}
+}
+
+func (ts *Store)
+
+WorkingHash() []byte {
+ return []byte{
+}
+}
+
+// Implements Store.
+func (ts *Store)
+
+GetStoreType()
+
+types.StoreType {
+ return types.StoreTypeTransient
+}
+```
+
+`Transient.Store` is a `dbadapter.Store` with a `dbm.NewMemDB()`. All `KVStore` methods are reused. When `Store.Commit()` is called, a new `dbadapter.Store` is assigned, discarding previous reference and making it garbage collected.
+
+This type of store is useful to persist information that is only relevant per-block. One example would be to store parameter changes (i.e. a bool set to `true` if a parameter changed in a block).
+
+```go expandable
+package types
+
+import (
+
+ "fmt"
+ "maps"
+ "reflect"
+ "cosmossdk.io/store/prefix"
+ storetypes "cosmossdk.io/store/types"
+ "github.com/cosmos/cosmos-sdk/codec"
+ sdk "github.com/cosmos/cosmos-sdk/types"
+)
+
+const (
+ // StoreKey is the string store key for the param store
+ StoreKey = "params"
+
+ // TStoreKey is the string store key for the param transient store
+ TStoreKey = "transient_params"
+)
+
+// Individual parameter store for each keeper
+// Transient store persists for a block, so we use it for
+// recording whether the parameter has been changed or not
+type Subspace struct {
+ cdc codec.BinaryCodec
+ legacyAmino *codec.LegacyAmino
+ key storetypes.StoreKey // []byte -> []byte, stores parameter
+ tkey storetypes.StoreKey // []byte -> bool, stores parameter change
+ name []byte
+ table KeyTable
+}
+
+// NewSubspace constructs a store with namestore
+func NewSubspace(cdc codec.BinaryCodec, legacyAmino *codec.LegacyAmino, key, tkey storetypes.StoreKey, name string)
+
+Subspace {
+ return Subspace{
+ cdc: cdc,
+ legacyAmino: legacyAmino,
+ key: key,
+ tkey: tkey,
+ name: []byte(name),
+ table: NewKeyTable(),
+}
+}
+
+// HasKeyTable returns if the Subspace has a KeyTable registered.
+func (s Subspace)
+
+HasKeyTable()
+
+bool {
+ return len(s.table.m) > 0
+}
+
+// WithKeyTable initializes KeyTable and returns modified Subspace
+func (s Subspace)
+
+WithKeyTable(table KeyTable)
+
+Subspace {
+ if table.m == nil {
+ panic("WithKeyTable()
+
+called with nil KeyTable")
+}
+ if len(s.table.m) != 0 {
+ panic("WithKeyTable()
+
+called on already initialized Subspace")
+}
+
+maps.Copy(s.table.m, table.m)
+
+ // Allocate additional capacity for Subspace.name
+ // So we don't have to allocate extra space each time appending to the key
+ name := s.name
+ s.name = make([]byte, len(name), len(name)+table.maxKeyLength())
+
+copy(s.name, name)
+
+return s
+}
+
+// Returns a KVStore identical with ctx.KVStore(s.key).Prefix()
+
+func (s Subspace)
+
+kvStore(ctx sdk.Context)
+
+storetypes.KVStore {
+ // append here is safe, appends within a function won't cause
+ // weird side effects when its singlethreaded
+ return prefix.NewStore(ctx.KVStore(s.key), append(s.name, '/'))
+}
+
+// Returns a transient store for modification
+func (s Subspace)
+
+transientStore(ctx sdk.Context)
+
+storetypes.KVStore {
+ // append here is safe, appends within a function won't cause
+ // weird side effects when its singlethreaded
+ return prefix.NewStore(ctx.TransientStore(s.tkey), append(s.name, '/'))
+}
+
+// Validate attempts to validate a parameter value by its key. If the key is not
+// registered or if the validation of the value fails, an error is returned.
+func (s Subspace)
+
+Validate(ctx sdk.Context, key []byte, value any)
+
+error {
+ attr, ok := s.table.m[string(key)]
+ if !ok {
+ return fmt.Errorf("parameter %s not registered", key)
+}
+ if err := attr.vfn(value); err != nil {
+ return fmt.Errorf("invalid parameter value: %w", err)
+}
+
+return nil
+}
+
+// Get queries for a parameter by key from the Subspace's KVStore and sets the
+// value to the provided pointer. If the value does not exist, it will panic.
+func (s Subspace)
+
+Get(ctx sdk.Context, key []byte, ptr any) {
+ s.checkType(key, ptr)
+ store := s.kvStore(ctx)
+ bz := store.Get(key)
+ if err := s.legacyAmino.UnmarshalJSON(bz, ptr); err != nil {
+ panic(err)
+}
+}
+
+// GetIfExists queries for a parameter by key from the Subspace's KVStore and
+// sets the value to the provided pointer. If the value does not exist, it will
+// perform a no-op.
+func (s Subspace)
+
+GetIfExists(ctx sdk.Context, key []byte, ptr any) {
+ store := s.kvStore(ctx)
+ bz := store.Get(key)
+ if bz == nil {
+ return
+}
+
+s.checkType(key, ptr)
+ if err := s.legacyAmino.UnmarshalJSON(bz, ptr); err != nil {
+ panic(err)
+}
+}
+
+// IterateKeys iterates over all the keys in the subspace and executes the
+// provided callback. If the callback returns true for a given key, iteration
+// will halt.
+func (s Subspace)
+
+IterateKeys(ctx sdk.Context, cb func(key []byte)
+
+bool) {
+ store := s.kvStore(ctx)
+ iter := storetypes.KVStorePrefixIterator(store, nil)
+
+defer iter.Close()
+ for ; iter.Valid(); iter.Next() {
+ if cb(iter.Key()) {
+ break
+}
+
+}
+}
+
+// GetRaw queries for the raw values bytes for a parameter by key.
+func (s Subspace)
+
+GetRaw(ctx sdk.Context, key []byte) []byte {
+ store := s.kvStore(ctx)
+
+return store.Get(key)
+}
+
+// Has returns if a parameter key exists or not in the Subspace's KVStore.
+func (s Subspace)
+
+Has(ctx sdk.Context, key []byte)
+
+bool {
+ store := s.kvStore(ctx)
+
+return store.Has(key)
+}
+
+// Modified returns true if the parameter key is set in the Subspace's transient
+// KVStore.
+func (s Subspace)
+
+Modified(ctx sdk.Context, key []byte)
+
+bool {
+ tstore := s.transientStore(ctx)
+
+return tstore.Has(key)
+}
+
+// checkType verifies that the provided key and value are comptable and registered.
+func (s Subspace)
+
+checkType(key []byte, value any) {
+ attr, ok := s.table.m[string(key)]
+ if !ok {
+ panic(fmt.Sprintf("parameter %s not registered", key))
+}
+ ty := attr.ty
+ pty := reflect.TypeOf(value)
+ if pty.Kind() == reflect.Ptr {
+ pty = pty.Elem()
+}
+ if pty != ty {
+ panic("type mismatch with registered table")
+}
+}
+
+// Set stores a value for given a parameter key assuming the parameter type has
+// been registered. It will panic if the parameter type has not been registered
+// or if the value cannot be encoded. A change record is also set in the Subspace's
+// transient KVStore to mark the parameter as modified.
+func (s Subspace)
+
+Set(ctx sdk.Context, key []byte, value any) {
+ s.checkType(key, value)
+ store := s.kvStore(ctx)
+
+bz, err := s.legacyAmino.MarshalJSON(value)
+ if err != nil {
+ panic(err)
+}
+
+store.Set(key, bz)
+ tstore := s.transientStore(ctx)
+
+tstore.Set(key, []byte{
+})
+}
+
+// Update stores an updated raw value for a given parameter key assuming the
+// parameter type has been registered. It will panic if the parameter type has
+// not been registered or if the value cannot be encoded. An error is returned
+// if the raw value is not compatible with the registered type for the parameter
+// key or if the new value is invalid as determined by the registered type's
+// validation function.
+func (s Subspace)
+
+Update(ctx sdk.Context, key, value []byte)
+
+error {
+ attr, ok := s.table.m[string(key)]
+ if !ok {
+ panic(fmt.Sprintf("parameter %s not registered", key))
+}
+ ty := attr.ty
+ dest := reflect.New(ty).Interface()
+
+s.GetIfExists(ctx, key, dest)
+ if err := s.legacyAmino.UnmarshalJSON(value, dest); err != nil {
+ return err
+}
+
+ // destValue contains the dereferenced value of dest so validation function do
+ // not have to operate on pointers.
+ destValue := reflect.Indirect(reflect.ValueOf(dest)).Interface()
+ if err := s.Validate(ctx, key, destValue); err != nil {
+ return err
+}
+
+s.Set(ctx, key, dest)
+
+return nil
+}
+
+// GetParamSet iterates through each ParamSetPair where for each pair, it will
+// retrieve the value and set it to the corresponding value pointer provided
+// in the ParamSetPair by calling Subspace#Get.
+func (s Subspace)
+
+GetParamSet(ctx sdk.Context, ps ParamSet) {
+ for _, pair := range ps.ParamSetPairs() {
+ s.Get(ctx, pair.Key, pair.Value)
+}
+}
+
+// GetParamSetIfExists iterates through each ParamSetPair where for each pair, it will
+// retrieve the value and set it to the corresponding value pointer provided
+// in the ParamSetPair by calling Subspace#GetIfExists.
+func (s Subspace)
+
+GetParamSetIfExists(ctx sdk.Context, ps ParamSet) {
+ for _, pair := range ps.ParamSetPairs() {
+ s.GetIfExists(ctx, pair.Key, pair.Value)
+}
+}
+
+// SetParamSet iterates through each ParamSetPair and sets the value with the
+// corresponding parameter key in the Subspace's KVStore.
+func (s Subspace)
+
+SetParamSet(ctx sdk.Context, ps ParamSet) {
+ for _, pair := range ps.ParamSetPairs() {
+ // pair.Field is a pointer to the field, so indirecting the ptr.
+ // go-amino automatically handles it but just for sure,
+ // since SetStruct is meant to be used in InitGenesis
+ // so this method will not be called frequently
+ v := reflect.Indirect(reflect.ValueOf(pair.Value)).Interface()
+ if err := pair.ValidatorFn(v); err != nil {
+ panic(fmt.Sprintf("value from ParamSetPair is invalid: %s", err))
+}
+
+s.Set(ctx, pair.Key, v)
+}
+}
+
+// Name returns the name of the Subspace.
+func (s Subspace)
+
+Name()
+
+string {
+ return string(s.name)
+}
+
+// Wrapper of Subspace, provides immutable functions only
+type ReadOnlySubspace struct {
+ s Subspace
+}
+
+// Get delegates a read-only Get call to the Subspace.
+func (ros ReadOnlySubspace)
+
+Get(ctx sdk.Context, key []byte, ptr any) {
+ ros.s.Get(ctx, key, ptr)
+}
+
+// GetRaw delegates a read-only GetRaw call to the Subspace.
+func (ros ReadOnlySubspace)
+
+GetRaw(ctx sdk.Context, key []byte) []byte {
+ return ros.s.GetRaw(ctx, key)
+}
+
+// Has delegates a read-only Has call to the Subspace.
+func (ros ReadOnlySubspace)
+
+Has(ctx sdk.Context, key []byte)
+
+bool {
+ return ros.s.Has(ctx, key)
+}
+
+// Modified delegates a read-only Modified call to the Subspace.
+func (ros ReadOnlySubspace)
+
+Modified(ctx sdk.Context, key []byte)
+
+bool {
+ return ros.s.Modified(ctx, key)
+}
+
+// Name delegates a read-only Name call to the Subspace.
+func (ros ReadOnlySubspace)
+
+Name()
+
+string {
+ return ros.s.Name()
+}
+```
+
+Transient stores are typically accessed via the [`context`](/sdk/v0.53/learn/advanced/context) via the `TransientStore()` method:
+
+```go expandable
+package types
+
+import (
+
+ "context"
+ "time"
+
+ abci "github.com/cometbft/cometbft/abci/types"
+ cmtproto "github.com/cometbft/cometbft/proto/tendermint/types"
+ "cosmossdk.io/core/comet"
+ "cosmossdk.io/core/header"
+ "cosmossdk.io/log"
+ "cosmossdk.io/store/gaskv"
+ storetypes "cosmossdk.io/store/types"
+)
+
+// ExecMode defines the execution mode which can be set on a Context.
+type ExecMode uint8
+
+// All possible execution modes.
+const (
+ ExecModeCheck ExecMode = iota
+ ExecModeReCheck
+ ExecModeSimulate
+ ExecModePrepareProposal
+ ExecModeProcessProposal
+ ExecModeVoteExtension
+ ExecModeVerifyVoteExtension
+ ExecModeFinalize
+)
+
+/*
+Context is an immutable object contains all information needed to
+process a request.
+
+It contains a context.Context object inside if you want to use that,
+but please do not over-use it. We try to keep all data structured
+and standard additions here would be better just to add to the Context struct
+*/
+type Context struct {
+ baseCtx context.Context
+ ms storetypes.MultiStore
+ // Deprecated: Use HeaderService for height, time, and chainID and CometService for the rest
+ header cmtproto.Header
+ // Deprecated: Use HeaderService for hash
+ headerHash []byte
+ // Deprecated: Use HeaderService for chainID and CometService for the rest
+ chainID string
+ txBytes []byte
+ logger log.Logger
+ voteInfo []abci.VoteInfo
+ gasMeter storetypes.GasMeter
+ blockGasMeter storetypes.GasMeter
+ checkTx bool
+ recheckTx bool // if recheckTx == true, then checkTx must also be true
+ sigverifyTx bool // when run simulation, because the private key corresponding to the account in the genesis.json randomly generated, we must skip the sigverify.
+ execMode ExecMode
+ minGasPrice DecCoins
+ consParams cmtproto.ConsensusParams
+ eventManager EventManagerI
+ priority int64 // The tx priority, only relevant in CheckTx
+ kvGasConfig storetypes.GasConfig
+ transientKVGasConfig storetypes.GasConfig
+ streamingManager storetypes.StreamingManager
+ cometInfo comet.BlockInfo
+ headerInfo header.Info
+}
+
+// Proposed rename, not done to avoid API breakage
+type Request = Context
+
+// Read-only accessors
+func (c Context)
+
+Context()
+
+context.Context {
+ return c.baseCtx
+}
+
+func (c Context)
+
+MultiStore()
+
+storetypes.MultiStore {
+ return c.ms
+}
+
+func (c Context)
+
+BlockHeight()
+
+int64 {
+ return c.header.Height
+}
+
+func (c Context)
+
+BlockTime()
+
+time.Time {
+ return c.header.Time
+}
+
+func (c Context)
+
+ChainID()
+
+string {
+ return c.chainID
+}
+
+func (c Context)
+
+TxBytes() []byte {
+ return c.txBytes
+}
+
+func (c Context)
+
+Logger()
+
+log.Logger {
+ return c.logger
+}
+
+func (c Context)
+
+VoteInfos() []abci.VoteInfo {
+ return c.voteInfo
+}
+
+func (c Context)
+
+GasMeter()
+
+storetypes.GasMeter {
+ return c.gasMeter
+}
+
+func (c Context)
+
+BlockGasMeter()
+
+storetypes.GasMeter {
+ return c.blockGasMeter
+}
+
+func (c Context)
+
+IsCheckTx()
+
+bool {
+ return c.checkTx
+}
+
+func (c Context)
+
+IsReCheckTx()
+
+bool {
+ return c.recheckTx
+}
+
+func (c Context)
+
+IsSigverifyTx()
+
+bool {
+ return c.sigverifyTx
+}
+
+func (c Context)
+
+ExecMode()
+
+ExecMode {
+ return c.execMode
+}
+
+func (c Context)
+
+MinGasPrices()
+
+DecCoins {
+ return c.minGasPrice
+}
+
+func (c Context)
+
+EventManager()
+
+EventManagerI {
+ return c.eventManager
+}
+
+func (c Context)
+
+Priority()
+
+int64 {
+ return c.priority
+}
+
+func (c Context)
+
+KVGasConfig()
+
+storetypes.GasConfig {
+ return c.kvGasConfig
+}
+
+func (c Context)
+
+TransientKVGasConfig()
+
+storetypes.GasConfig {
+ return c.transientKVGasConfig
+}
+
+func (c Context)
+
+StreamingManager()
+
+storetypes.StreamingManager {
+ return c.streamingManager
+}
+
+func (c Context)
+
+CometInfo()
+
+comet.BlockInfo {
+ return c.cometInfo
+}
+
+func (c Context)
+
+HeaderInfo()
+
+header.Info {
+ return c.headerInfo
+}
+
+// BlockHeader returns the header by value.
+func (c Context)
+
+BlockHeader()
+
+cmtproto.Header {
+ return c.header
+}
+
+// HeaderHash returns a copy of the header hash obtained during abci.RequestBeginBlock
+func (c Context)
+
+HeaderHash() []byte {
+ hash := make([]byte, len(c.headerHash))
+
+copy(hash, c.headerHash)
+
+return hash
+}
+
+func (c Context)
+
+ConsensusParams()
+
+cmtproto.ConsensusParams {
+ return c.consParams
+}
+
+func (c Context)
+
+Deadline() (deadline time.Time, ok bool) {
+ return c.baseCtx.Deadline()
+}
+
+func (c Context)
+
+Done() <-chan struct{
+} {
+ return c.baseCtx.Done()
+}
+
+func (c Context)
+
+Err()
+
+error {
+ return c.baseCtx.Err()
+}
+
+// create a new context
+func NewContext(ms storetypes.MultiStore, header cmtproto.Header, isCheckTx bool, logger log.Logger)
+
+Context {
+ // https://github.com/gogo/protobuf/issues/519
+ header.Time = header.Time.UTC()
+
+return Context{
+ baseCtx: context.Background(),
+ ms: ms,
+ header: header,
+ chainID: header.ChainID,
+ checkTx: isCheckTx,
+ sigverifyTx: true,
+ logger: logger,
+ gasMeter: storetypes.NewInfiniteGasMeter(),
+ minGasPrice: DecCoins{
+},
+ eventManager: NewEventManager(),
+ kvGasConfig: storetypes.KVGasConfig(),
+ transientKVGasConfig: storetypes.TransientGasConfig(),
+}
+}
+
+// WithContext returns a Context with an updated context.Context.
+func (c Context)
+
+WithContext(ctx context.Context)
+
+Context {
+ c.baseCtx = ctx
+ return c
+}
+
+// WithMultiStore returns a Context with an updated MultiStore.
+func (c Context)
+
+WithMultiStore(ms storetypes.MultiStore)
+
+Context {
+ c.ms = ms
+ return c
+}
+
+// WithBlockHeader returns a Context with an updated CometBFT block header in UTC time.
+func (c Context)
+
+WithBlockHeader(header cmtproto.Header)
+
+Context {
+ // https://github.com/gogo/protobuf/issues/519
+ header.Time = header.Time.UTC()
+
+c.header = header
+ return c
+}
+
+// WithHeaderHash returns a Context with an updated CometBFT block header hash.
+func (c Context)
+
+WithHeaderHash(hash []byte)
+
+Context {
+ temp := make([]byte, len(hash))
+
+copy(temp, hash)
+
+c.headerHash = temp
+ return c
+}
+
+// WithBlockTime returns a Context with an updated CometBFT block header time in UTC with no monotonic component.
+// Stripping the monotonic component is for time equality.
+func (c Context)
+
+WithBlockTime(newTime time.Time)
+
+Context {
+ newHeader := c.BlockHeader()
+ // https://github.com/gogo/protobuf/issues/519
+ newHeader.Time = newTime.Round(0).UTC()
+
+return c.WithBlockHeader(newHeader)
+}
+
+// WithProposer returns a Context with an updated proposer consensus address.
+func (c Context)
+
+WithProposer(addr ConsAddress)
+
+Context {
+ newHeader := c.BlockHeader()
+
+newHeader.ProposerAddress = addr.Bytes()
+
+return c.WithBlockHeader(newHeader)
+}
+
+// WithBlockHeight returns a Context with an updated block height.
+func (c Context)
+
+WithBlockHeight(height int64)
+
+Context {
+ newHeader := c.BlockHeader()
+
+newHeader.Height = height
+ return c.WithBlockHeader(newHeader)
+}
+
+// WithChainID returns a Context with an updated chain identifier.
+func (c Context)
+
+WithChainID(chainID string)
+
+Context {
+ c.chainID = chainID
+ return c
+}
+
+// WithTxBytes returns a Context with an updated txBytes.
+func (c Context)
+
+WithTxBytes(txBytes []byte)
+
+Context {
+ c.txBytes = txBytes
+ return c
+}
+
+// WithLogger returns a Context with an updated logger.
+func (c Context)
+
+WithLogger(logger log.Logger)
+
+Context {
+ c.logger = logger
+ return c
+}
+
+// WithVoteInfos returns a Context with an updated consensus VoteInfo.
+func (c Context)
+
+WithVoteInfos(voteInfo []abci.VoteInfo)
+
+Context {
+ c.voteInfo = voteInfo
+ return c
+}
+
+// WithGasMeter returns a Context with an updated transaction GasMeter.
+func (c Context)
+
+WithGasMeter(meter storetypes.GasMeter)
+
+Context {
+ c.gasMeter = meter
+ return c
+}
+
+// WithBlockGasMeter returns a Context with an updated block GasMeter
+func (c Context)
+
+WithBlockGasMeter(meter storetypes.GasMeter)
+
+Context {
+ c.blockGasMeter = meter
+ return c
+}
+
+// WithKVGasConfig returns a Context with an updated gas configuration for
+// the KVStore
+func (c Context)
+
+WithKVGasConfig(gasConfig storetypes.GasConfig)
+
+Context {
+ c.kvGasConfig = gasConfig
+ return c
+}
+
+// WithTransientKVGasConfig returns a Context with an updated gas configuration for
+// the transient KVStore
+func (c Context)
+
+WithTransientKVGasConfig(gasConfig storetypes.GasConfig)
+
+Context {
+ c.transientKVGasConfig = gasConfig
+ return c
+}
+
+// WithIsCheckTx enables or disables CheckTx value for verifying transactions and returns an updated Context
+func (c Context)
+
+WithIsCheckTx(isCheckTx bool)
+
+Context {
+ c.checkTx = isCheckTx
+ c.execMode = ExecModeCheck
+ return c
+}
+
+// WithIsRecheckTx called with true will also set true on checkTx in order to
+// enforce the invariant that if recheckTx = true then checkTx = true as well.
+func (c Context)
+
+WithIsReCheckTx(isRecheckTx bool)
+
+Context {
+ if isRecheckTx {
+ c.checkTx = true
+}
+
+c.recheckTx = isRecheckTx
+ c.execMode = ExecModeReCheck
+ return c
+}
+
+// WithIsSigverifyTx called with true will sigverify in auth module
+func (c Context)
+
+WithIsSigverifyTx(isSigverifyTx bool)
+
+Context {
+ c.sigverifyTx = isSigverifyTx
+ return c
+}
+
+// WithExecMode returns a Context with an updated ExecMode.
+func (c Context)
+
+WithExecMode(m ExecMode)
+
+Context {
+ c.execMode = m
+ return c
+}
+
+// WithMinGasPrices returns a Context with an updated minimum gas price value
+func (c Context)
+
+WithMinGasPrices(gasPrices DecCoins)
+
+Context {
+ c.minGasPrice = gasPrices
+ return c
+}
+
+// WithConsensusParams returns a Context with an updated consensus params
+func (c Context)
+
+WithConsensusParams(params cmtproto.ConsensusParams)
+
+Context {
+ c.consParams = params
+ return c
+}
+
+// WithEventManager returns a Context with an updated event manager
+func (c Context)
+
+WithEventManager(em EventManagerI)
+
+Context {
+ c.eventManager = em
+ return c
+}
+
+// WithPriority returns a Context with an updated tx priority
+func (c Context)
+
+WithPriority(p int64)
+
+Context {
+ c.priority = p
+ return c
+}
+
+// WithStreamingManager returns a Context with an updated streaming manager
+func (c Context)
+
+WithStreamingManager(sm storetypes.StreamingManager)
+
+Context {
+ c.streamingManager = sm
+ return c
+}
+
+// WithCometInfo returns a Context with an updated comet info
+func (c Context)
+
+WithCometInfo(cometInfo comet.BlockInfo)
+
+Context {
+ c.cometInfo = cometInfo
+ return c
+}
+
+// WithHeaderInfo returns a Context with an updated header info
+func (c Context)
+
+WithHeaderInfo(headerInfo header.Info)
+
+Context {
+ // Settime to UTC
+ headerInfo.Time = headerInfo.Time.UTC()
+
+c.headerInfo = headerInfo
+ return c
+}
+
+// TODO: remove???
+func (c Context)
+
+IsZero()
+
+bool {
+ return c.ms == nil
+}
+
+func (c Context)
+
+WithValue(key, value any)
+
+Context {
+ c.baseCtx = context.WithValue(c.baseCtx, key, value)
+
+return c
+}
+
+func (c Context)
+
+Value(key any)
+
+any {
+ if key == SdkContextKey {
+ return c
+}
+
+return c.baseCtx.Value(key)
+}
+
+// ----------------------------------------------------------------------------
+// Store / Caching
+// ----------------------------------------------------------------------------
+
+// KVStore fetches a KVStore from the MultiStore.
+func (c Context)
+
+KVStore(key storetypes.StoreKey)
+
+storetypes.KVStore {
+ return gaskv.NewStore(c.ms.GetKVStore(key), c.gasMeter, c.kvGasConfig)
+}
+
+// TransientStore fetches a TransientStore from the MultiStore.
+func (c Context)
+
+TransientStore(key storetypes.StoreKey)
+
+storetypes.KVStore {
+ return gaskv.NewStore(c.ms.GetKVStore(key), c.gasMeter, c.transientKVGasConfig)
+}
+
+// CacheContext returns a new Context with the multi-store cached and a new
+// EventManager. The cached context is written to the context when writeCache
+// is called. Note, events are automatically emitted on the parent context's
+// EventManager when the caller executes the write.
+func (c Context)
+
+CacheContext() (cc Context, writeCache func()) {
+ cms := c.ms.CacheMultiStore()
+
+cc = c.WithMultiStore(cms).WithEventManager(NewEventManager())
+
+writeCache = func() {
+ c.EventManager().EmitEvents(cc.EventManager().Events())
+
+cms.Write()
+}
+
+return cc, writeCache
+}
+
+var (
+ _ context.Context = Context{
+}
+ _ storetypes.Context = Context{
+}
+)
+
+// ContextKey defines a type alias for a stdlib Context key.
+type ContextKey string
+
+// SdkContextKey is the key in the context.Context which holds the sdk.Context.
+const SdkContextKey ContextKey = "sdk-context"
+
+// WrapSDKContext returns a stdlib context.Context with the provided sdk.Context's internal
+// context as a value. It is useful for passing an sdk.Context through methods that take a
+// stdlib context.Context parameter such as generated gRPC methods. To get the original
+// sdk.Context back, call UnwrapSDKContext.
+//
+// Deprecated: there is no need to wrap anymore as the Cosmos SDK context implements context.Context.
+func WrapSDKContext(ctx Context)
+
+context.Context {
+ return ctx
+}
+
+// UnwrapSDKContext retrieves a Context from a context.Context instance
+// attached with WrapSDKContext. It panics if a Context was not properly
+// attached
+func UnwrapSDKContext(ctx context.Context)
+
+Context {
+ if sdkCtx, ok := ctx.(Context); ok {
+ return sdkCtx
+}
+
+return ctx.Value(SdkContextKey).(Context)
+}
+```
+
+## KVStore Wrappers
+
+### CacheKVStore
+
+`cachekv.Store` is a wrapper `KVStore` which provides buffered writing / cached reading functionalities over the underlying `KVStore`.
+
+```go expandable
+package cachekv
+
+import (
+
+ "bytes"
+ "io"
+ "sort"
+ "sync"
+
+ dbm "github.com/cosmos/cosmos-db"
+ "cosmossdk.io/math"
+ "cosmossdk.io/store/cachekv/internal"
+ "cosmossdk.io/store/internal/conv"
+ "cosmossdk.io/store/internal/kv"
+ "cosmossdk.io/store/tracekv"
+ "cosmossdk.io/store/types"
+)
+
+// cValue represents a cached value.
+// If dirty is true, it indicates the cached value is different from the underlying value.
+type cValue struct {
+ value []byte
+ dirty bool
+}
+
+// Store wraps an in-memory cache around an underlying types.KVStore.
+type Store struct {
+ mtx sync.Mutex
+ cache map[string]*cValue
+ unsortedCache map[string]struct{
+}
+
+sortedCache internal.BTree // always ascending sorted
+ parent types.KVStore
+}
+
+var _ types.CacheKVStore = (*Store)(nil)
+
+// NewStore creates a new Store object
+func NewStore(parent types.KVStore) *Store {
+ return &Store{
+ cache: make(map[string]*cValue),
+ unsortedCache: make(map[string]struct{
+}),
+ sortedCache: internal.NewBTree(),
+ parent: parent,
+}
+}
+
+// GetStoreType implements Store.
+func (store *Store)
+
+GetStoreType()
+
+types.StoreType {
+ return store.parent.GetStoreType()
+}
+
+// Get implements types.KVStore.
+func (store *Store)
+
+Get(key []byte) (value []byte) {
+ store.mtx.Lock()
+
+defer store.mtx.Unlock()
+
+types.AssertValidKey(key)
+
+cacheValue, ok := store.cache[conv.UnsafeBytesToStr(key)]
+ if !ok {
+ value = store.parent.Get(key)
+
+store.setCacheValue(key, value, false)
+}
+
+else {
+ value = cacheValue.value
+}
+
+return value
+}
+
+// Set implements types.KVStore.
+func (store *Store)
+
+Set(key, value []byte) {
+ types.AssertValidKey(key)
+
+types.AssertValidValue(value)
+
+store.mtx.Lock()
+
+defer store.mtx.Unlock()
+
+store.setCacheValue(key, value, true)
+}
+
+// Has implements types.KVStore.
+func (store *Store)
+
+Has(key []byte)
+
+bool {
+ value := store.Get(key)
+
+return value != nil
+}
+
+// Delete implements types.KVStore.
+func (store *Store)
+
+Delete(key []byte) {
+ types.AssertValidKey(key)
+
+store.mtx.Lock()
+
+defer store.mtx.Unlock()
+
+store.setCacheValue(key, nil, true)
+}
+
+func (store *Store)
+
+resetCaches() {
+ if len(store.cache) > 100_000 {
+ // Cache is too large. We likely did something linear time
+ // (e.g. Epoch block, Genesis block, etc). Free the old caches from memory, and let them get re-allocated.
+ // TODO: In a future CacheKV redesign, such linear workloads should get into a different cache instantiation.
+ // 100_000 is arbitrarily chosen as it solved Osmosis' InitGenesis RAM problem.
+ store.cache = make(map[string]*cValue)
+
+store.unsortedCache = make(map[string]struct{
+})
+}
+
+else {
+ // Clear the cache using the map clearing idiom
+ // and not allocating fresh objects.
+ // Please see https://bencher.orijtech.com/perfclinic/mapclearing/
+ for key := range store.cache {
+ delete(store.cache, key)
+}
+ for key := range store.unsortedCache {
+ delete(store.unsortedCache, key)
+}
+
+}
+
+store.sortedCache = internal.NewBTree()
+}
+
+// Implements Cachetypes.KVStore.
+func (store *Store)
+
+Write() {
+ store.mtx.Lock()
+
+defer store.mtx.Unlock()
+ if len(store.cache) == 0 && len(store.unsortedCache) == 0 {
+ store.sortedCache = internal.NewBTree()
+
+return
+}
+
+type cEntry struct {
+ key string
+ val *cValue
+}
+
+ // We need a copy of all of the keys.
+ // Not the best. To reduce RAM pressure, we copy the values as well
+ // and clear out the old caches right after the copy.
+ sortedCache := make([]cEntry, 0, len(store.cache))
+ for key, dbValue := range store.cache {
+ if dbValue.dirty {
+ sortedCache = append(sortedCache, cEntry{
+ key, dbValue
+})
+}
+
+}
+
+store.resetCaches()
+
+sort.Slice(sortedCache, func(i, j int)
+
+bool {
+ return sortedCache[i].key < sortedCache[j].key
+})
+
+ // TODO: Consider allowing usage of Batch, which would allow the write to
+ // at least happen atomically.
+ for _, obj := range sortedCache {
+ // We use []byte(key)
+
+instead of conv.UnsafeStrToBytes because we cannot
+ // be sure if the underlying store might do a save with the byteslice or
+ // not. Once we get confirmation that .Delete is guaranteed not to
+ // save the byteslice, then we can assume only a read-only copy is sufficient.
+ if obj.val.value != nil {
+ // It already exists in the parent, hence update it.
+ store.parent.Set([]byte(obj.key), obj.val.value)
+}
+
+else {
+ store.parent.Delete([]byte(obj.key))
+}
+
+}
+}
+
+// CacheWrap implements CacheWrapper.
+func (store *Store)
+
+CacheWrap()
+
+types.CacheWrap {
+ return NewStore(store)
+}
+
+// CacheWrapWithTrace implements the CacheWrapper interface.
+func (store *Store)
+
+CacheWrapWithTrace(w io.Writer, tc types.TraceContext)
+
+types.CacheWrap {
+ return NewStore(tracekv.NewStore(store, w, tc))
+}
+
+//----------------------------------------
+// Iteration
+
+// Iterator implements types.KVStore.
+func (store *Store)
+
+Iterator(start, end []byte)
+
+types.Iterator {
+ return store.iterator(start, end, true)
+}
+
+// ReverseIterator implements types.KVStore.
+func (store *Store)
+
+ReverseIterator(start, end []byte)
+
+types.Iterator {
+ return store.iterator(start, end, false)
+}
+
+func (store *Store)
+
+iterator(start, end []byte, ascending bool)
+
+types.Iterator {
+ store.mtx.Lock()
+
+defer store.mtx.Unlock()
+
+store.dirtyItems(start, end)
+ isoSortedCache := store.sortedCache.Copy()
+
+var (
+ err error
+ parent, cache types.Iterator
+ )
+ if ascending {
+ parent = store.parent.Iterator(start, end)
+
+cache, err = isoSortedCache.Iterator(start, end)
+}
+
+else {
+ parent = store.parent.ReverseIterator(start, end)
+
+cache, err = isoSortedCache.ReverseIterator(start, end)
+}
+ if err != nil {
+ panic(err)
+}
+
+return internal.NewCacheMergeIterator(parent, cache, ascending)
+}
+
+func findStartIndex(strL []string, startQ string)
+
+int {
+ // Modified binary search to find the very first element in >=startQ.
+ if len(strL) == 0 {
+ return -1
+}
+
+var left, right, mid int
+ right = len(strL) - 1
+ for left <= right {
+ mid = (left + right) >> 1
+ midStr := strL[mid]
+ if midStr == startQ {
+ // Handle condition where there might be multiple values equal to startQ.
+ // We are looking for the very first value < midStL, that i+1 will be the first
+ // element >= midStr.
+ for i := mid - 1; i >= 0; i-- {
+ if strL[i] != midStr {
+ return i + 1
+}
+
+}
+
+return 0
+}
+ if midStr < startQ {
+ left = mid + 1
+}
+
+else { // midStrL > startQ
+ right = mid - 1
+}
+
+}
+ if left >= 0 && left < len(strL) && strL[left] >= startQ {
+ return left
+}
+
+return -1
+}
+
+func findEndIndex(strL []string, endQ string)
+
+int {
+ if len(strL) == 0 {
+ return -1
+}
+
+ // Modified binary search to find the very first element