diff --git a/.github/workflows/govulncheck.yml b/.github/workflows/govulncheck.yml index f0c2fbd652e..447ecc90784 100644 --- a/.github/workflows/govulncheck.yml +++ b/.github/workflows/govulncheck.yml @@ -16,7 +16,7 @@ jobs: steps: - uses: actions/setup-go@v5 with: - go-version: "1.22" + go-version: "1.23" check-latest: true - uses: actions/checkout@v4 - uses: technote-space/get-diff-action@v6 diff --git a/.golangci.yml b/.golangci.yml index 7585abcf7b2..12e2d8ad6cd 100644 --- a/.golangci.yml +++ b/.golangci.yml @@ -77,6 +77,13 @@ linters-settings: - github.com/stretchr/testify/require - github.com/syndtr/goleveldb - github.com/decred/dcrd/dcrec/secp256k1/v4 + - github.com/aws/aws-sdk-go-v2/aws + - github.com/aws/aws-sdk-go-v2/config + - github.com/aws/aws-sdk-go-v2/credentials + - github.com/grafana/pyroscope-go + - github.com/aws/aws-sdk-go-v2/service/s3 + - github.com/grafana/otel-profiling-go + - github.com/celestiaorg/nmt test: files: - "$test" diff --git a/abci/types/types.pb.go b/abci/types/types.pb.go index 5b9860d6b7e..33f295fc342 100644 --- a/abci/types/types.pb.go +++ b/abci/types/types.pb.go @@ -915,8 +915,9 @@ var xxx_messageInfo_RequestListSnapshots proto.InternalMessageInfo // offers a snapshot to the application type RequestOfferSnapshot struct { - Snapshot *Snapshot `protobuf:"bytes,1,opt,name=snapshot,proto3" json:"snapshot,omitempty"` - AppHash []byte `protobuf:"bytes,2,opt,name=app_hash,json=appHash,proto3" json:"app_hash,omitempty"` + Snapshot *Snapshot `protobuf:"bytes,1,opt,name=snapshot,proto3" json:"snapshot,omitempty"` + AppHash []byte `protobuf:"bytes,2,opt,name=app_hash,json=appHash,proto3" json:"app_hash,omitempty"` + AppVersion uint64 `protobuf:"varint,3,opt,name=app_version,json=appVersion,proto3" json:"app_version,omitempty"` } func (m *RequestOfferSnapshot) Reset() { *m = RequestOfferSnapshot{} } @@ -966,6 +967,13 @@ func (m *RequestOfferSnapshot) GetAppHash() []byte { return nil } +func (m *RequestOfferSnapshot) GetAppVersion() uint64 { + if m != nil { + return m.AppVersion + } + return 0 +} + // loads a snapshot chunk type RequestLoadSnapshotChunk struct { Height uint64 `protobuf:"varint,1,opt,name=height,proto3" json:"height,omitempty"` @@ -1203,6 +1211,8 @@ type RequestProcessProposal struct { NextValidatorsHash []byte `protobuf:"bytes,7,opt,name=next_validators_hash,json=nextValidatorsHash,proto3" json:"next_validators_hash,omitempty"` // address of the public key of the original proposer of the block. ProposerAddress []byte `protobuf:"bytes,8,opt,name=proposer_address,json=proposerAddress,proto3" json:"proposer_address,omitempty"` + SquareSize uint64 `protobuf:"varint,9,opt,name=square_size,json=squareSize,proto3" json:"square_size,omitempty"` + DataRootHash []byte `protobuf:"bytes,10,opt,name=data_root_hash,json=dataRootHash,proto3" json:"data_root_hash,omitempty"` } func (m *RequestProcessProposal) Reset() { *m = RequestProcessProposal{} } @@ -1294,6 +1304,20 @@ func (m *RequestProcessProposal) GetProposerAddress() []byte { return nil } +func (m *RequestProcessProposal) GetSquareSize() uint64 { + if m != nil { + return m.SquareSize + } + return 0 +} + +func (m *RequestProcessProposal) GetDataRootHash() []byte { + if m != nil { + return m.DataRootHash + } + return nil +} + // Extends a vote with application-injected data type RequestExtendVote struct { // the hash of the block that this vote may be referring to @@ -2560,7 +2584,9 @@ func (m *ResponseApplySnapshotChunk) GetRejectSenders() []string { } type ResponsePrepareProposal struct { - Txs [][]byte `protobuf:"bytes,1,rep,name=txs,proto3" json:"txs,omitempty"` + Txs [][]byte `protobuf:"bytes,1,rep,name=txs,proto3" json:"txs,omitempty"` + SquareSize uint64 `protobuf:"varint,2,opt,name=square_size,json=squareSize,proto3" json:"square_size,omitempty"` + DataRootHash []byte `protobuf:"bytes,3,opt,name=data_root_hash,json=dataRootHash,proto3" json:"data_root_hash,omitempty"` } func (m *ResponsePrepareProposal) Reset() { *m = ResponsePrepareProposal{} } @@ -2603,6 +2629,20 @@ func (m *ResponsePrepareProposal) GetTxs() [][]byte { return nil } +func (m *ResponsePrepareProposal) GetSquareSize() uint64 { + if m != nil { + return m.SquareSize + } + return 0 +} + +func (m *ResponsePrepareProposal) GetDataRootHash() []byte { + if m != nil { + return m.DataRootHash + } + return nil +} + type ResponseProcessProposal struct { Status ResponseProcessProposal_ProposalStatus `protobuf:"varint,1,opt,name=status,proto3,enum=tendermint.abci.ResponseProcessProposal_ProposalStatus" json:"status,omitempty"` } @@ -3664,205 +3704,209 @@ func init() { func init() { proto.RegisterFile("tendermint/abci/types.proto", fileDescriptor_252557cfdd89a31a) } var fileDescriptor_252557cfdd89a31a = []byte{ - // 3167 bytes of a gzipped FileDescriptorProto + // 3225 bytes of a gzipped FileDescriptorProto 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xec, 0x5a, 0xcb, 0x73, 0x23, 0xd5, - 0xd5, 0x57, 0xeb, 0xad, 0xa3, 0x87, 0xdb, 0xd7, 0x9e, 0x41, 0x23, 0x06, 0xdb, 0x34, 0x05, 0x0c, - 0x03, 0xd8, 0x7c, 0x9e, 0x6f, 0x78, 0xd4, 0xc0, 0x57, 0x25, 0x6b, 0x34, 0x9f, 0xec, 0x31, 0xb6, + 0xd5, 0x57, 0xeb, 0xad, 0xa3, 0x87, 0xdb, 0xd7, 0x9e, 0x41, 0x23, 0x06, 0x8f, 0x69, 0x3e, 0x60, + 0x18, 0xc0, 0xe6, 0xf3, 0x7c, 0xc3, 0xa3, 0x06, 0xbe, 0x2a, 0x59, 0xa3, 0x89, 0xec, 0x31, 0xb6, 0x69, 0xcb, 0x43, 0x91, 0x07, 0x4d, 0x5b, 0xba, 0xb2, 0x9a, 0x91, 0xd4, 0x4d, 0xf7, 0x95, 0x91, - 0x59, 0xa5, 0x42, 0x52, 0x95, 0x62, 0x45, 0x55, 0xb2, 0x60, 0x11, 0x16, 0x59, 0x64, 0x93, 0xbf, - 0x20, 0xab, 0x64, 0x93, 0x05, 0x8b, 0x2c, 0x58, 0x66, 0x45, 0x52, 0xb0, 0x63, 0x9b, 0x45, 0xb6, - 0xa9, 0xfb, 0xe8, 0x97, 0xa4, 0xb6, 0xa4, 0x81, 0x2c, 0x52, 0xc9, 0xae, 0xef, 0xe9, 0x73, 0xce, - 0xed, 0x7b, 0xee, 0xb9, 0xe7, 0xf1, 0xeb, 0x0b, 0x8f, 0x13, 0x3c, 0x68, 0x63, 0xbb, 0x6f, 0x0c, - 0xc8, 0x96, 0x7e, 0xda, 0x32, 0xb6, 0xc8, 0x85, 0x85, 0x9d, 0x4d, 0xcb, 0x36, 0x89, 0x89, 0x96, - 0xfc, 0x97, 0x9b, 0xf4, 0x65, 0xe5, 0x89, 0x00, 0x77, 0xcb, 0xbe, 0xb0, 0x88, 0xb9, 0x65, 0xd9, - 0xa6, 0xd9, 0xe1, 0xfc, 0x95, 0xeb, 0x93, 0xaf, 0x1f, 0xe2, 0x0b, 0xa1, 0x2d, 0x24, 0xcc, 0x66, - 0xd9, 0xb2, 0x74, 0x5b, 0xef, 0xbb, 0xaf, 0x37, 0x26, 0x5e, 0x9f, 0xeb, 0x3d, 0xa3, 0xad, 0x13, - 0xd3, 0x16, 0x1c, 0xeb, 0x67, 0xa6, 0x79, 0xd6, 0xc3, 0x5b, 0x6c, 0x74, 0x3a, 0xec, 0x6c, 0x11, - 0xa3, 0x8f, 0x1d, 0xa2, 0xf7, 0x2d, 0xc1, 0xb0, 0x7a, 0x66, 0x9e, 0x99, 0xec, 0x71, 0x8b, 0x3e, - 0x71, 0xaa, 0xf2, 0xc7, 0x1c, 0x64, 0x54, 0xfc, 0xc1, 0x10, 0x3b, 0x04, 0x6d, 0x43, 0x12, 0xb7, - 0xba, 0x66, 0x59, 0xda, 0x90, 0x6e, 0xe4, 0xb7, 0xaf, 0x6f, 0x8e, 0x2d, 0x70, 0x53, 0xf0, 0xd5, - 0x5b, 0x5d, 0xb3, 0x11, 0x53, 0x19, 0x2f, 0xba, 0x0d, 0xa9, 0x4e, 0x6f, 0xe8, 0x74, 0xcb, 0x71, - 0x26, 0xf4, 0x44, 0x94, 0xd0, 0x3d, 0xca, 0xd4, 0x88, 0xa9, 0x9c, 0x9b, 0x4e, 0x65, 0x0c, 0x3a, - 0x66, 0x39, 0x71, 0xf9, 0x54, 0xbb, 0x83, 0x0e, 0x9b, 0x8a, 0xf2, 0xa2, 0x1d, 0x00, 0x63, 0x60, - 0x10, 0xad, 0xd5, 0xd5, 0x8d, 0x41, 0x39, 0xc5, 0x24, 0x9f, 0x8c, 0x96, 0x34, 0x48, 0x8d, 0x32, - 0x36, 0x62, 0x6a, 0xce, 0x70, 0x07, 0xf4, 0x73, 0x3f, 0x18, 0x62, 0xfb, 0xa2, 0x9c, 0xbe, 0xfc, - 0x73, 0xdf, 0xa2, 0x4c, 0xf4, 0x73, 0x19, 0x37, 0x7a, 0x1d, 0xb2, 0xad, 0x2e, 0x6e, 0x3d, 0xd4, - 0xc8, 0xa8, 0x9c, 0x65, 0x92, 0xeb, 0x51, 0x92, 0x35, 0xca, 0xd7, 0x1c, 0x35, 0x62, 0x6a, 0xa6, - 0xc5, 0x1f, 0xd1, 0xab, 0x90, 0x6e, 0x99, 0xfd, 0xbe, 0x41, 0xca, 0x79, 0x26, 0xbb, 0x16, 0x29, - 0xcb, 0xb8, 0x1a, 0x31, 0x55, 0xf0, 0xa3, 0x03, 0x28, 0xf5, 0x0c, 0x87, 0x68, 0xce, 0x40, 0xb7, - 0x9c, 0xae, 0x49, 0x9c, 0x72, 0x81, 0x69, 0x78, 0x3a, 0x4a, 0xc3, 0xbe, 0xe1, 0x90, 0x63, 0x97, - 0xb9, 0x11, 0x53, 0x8b, 0xbd, 0x20, 0x81, 0xea, 0x33, 0x3b, 0x1d, 0x6c, 0x7b, 0x0a, 0xcb, 0xc5, - 0xcb, 0xf5, 0x1d, 0x52, 0x6e, 0x57, 0x9e, 0xea, 0x33, 0x83, 0x04, 0xf4, 0x43, 0x58, 0xe9, 0x99, - 0x7a, 0xdb, 0x53, 0xa7, 0xb5, 0xba, 0xc3, 0xc1, 0xc3, 0x72, 0x89, 0x29, 0x7d, 0x2e, 0xf2, 0x23, - 0x4d, 0xbd, 0xed, 0xaa, 0xa8, 0x51, 0x81, 0x46, 0x4c, 0x5d, 0xee, 0x8d, 0x13, 0xd1, 0xbb, 0xb0, - 0xaa, 0x5b, 0x56, 0xef, 0x62, 0x5c, 0xfb, 0x12, 0xd3, 0x7e, 0x33, 0x4a, 0x7b, 0x95, 0xca, 0x8c, - 0xab, 0x47, 0xfa, 0x04, 0x15, 0x35, 0x41, 0xb6, 0x6c, 0x6c, 0xe9, 0x36, 0xd6, 0x2c, 0xdb, 0xb4, - 0x4c, 0x47, 0xef, 0x95, 0x65, 0xa6, 0xfb, 0xd9, 0x28, 0xdd, 0x47, 0x9c, 0xff, 0x48, 0xb0, 0x37, - 0x62, 0xea, 0x92, 0x15, 0x26, 0x71, 0xad, 0x66, 0x0b, 0x3b, 0x8e, 0xaf, 0x75, 0x79, 0x96, 0x56, - 0xc6, 0x1f, 0xd6, 0x1a, 0x22, 0xa1, 0x3a, 0xe4, 0xf1, 0x88, 0x8a, 0x6b, 0xe7, 0x26, 0xc1, 0x65, - 0xc4, 0x14, 0x2a, 0x91, 0x27, 0x94, 0xb1, 0x3e, 0x30, 0x09, 0x6e, 0xc4, 0x54, 0xc0, 0xde, 0x08, - 0xe9, 0x70, 0xe5, 0x1c, 0xdb, 0x46, 0xe7, 0x82, 0xa9, 0xd1, 0xd8, 0x1b, 0xc7, 0x30, 0x07, 0xe5, - 0x15, 0xa6, 0xf0, 0xf9, 0x28, 0x85, 0x0f, 0x98, 0x10, 0x55, 0x51, 0x77, 0x45, 0x1a, 0x31, 0x75, - 0xe5, 0x7c, 0x92, 0x4c, 0x5d, 0xac, 0x63, 0x0c, 0xf4, 0x9e, 0xf1, 0x11, 0xd6, 0x4e, 0x7b, 0x66, - 0xeb, 0x61, 0x79, 0xf5, 0x72, 0x17, 0xbb, 0x27, 0xb8, 0x77, 0x28, 0x33, 0x75, 0xb1, 0x4e, 0x90, - 0xb0, 0x93, 0x81, 0xd4, 0xb9, 0xde, 0x1b, 0xe2, 0xbd, 0x64, 0x36, 0x29, 0xa7, 0xf6, 0x92, 0xd9, - 0x8c, 0x9c, 0xdd, 0x4b, 0x66, 0x73, 0x32, 0xec, 0x25, 0xb3, 0x20, 0xe7, 0x95, 0x67, 0x21, 0x1f, - 0x08, 0x4c, 0xa8, 0x0c, 0x99, 0x3e, 0x76, 0x1c, 0xfd, 0x0c, 0xb3, 0x38, 0x96, 0x53, 0xdd, 0xa1, - 0x52, 0x82, 0x42, 0x30, 0x18, 0x29, 0x9f, 0x4a, 0x9e, 0x24, 0x8d, 0x33, 0x54, 0xf2, 0x1c, 0xdb, - 0xcc, 0x1c, 0x42, 0x52, 0x0c, 0xd1, 0x53, 0x50, 0x64, 0x4b, 0xd1, 0xdc, 0xf7, 0x34, 0xd8, 0x25, - 0xd5, 0x02, 0x23, 0x3e, 0x10, 0x4c, 0xeb, 0x90, 0xb7, 0xb6, 0x2d, 0x8f, 0x25, 0xc1, 0x58, 0xc0, - 0xda, 0xb6, 0x5c, 0x86, 0x27, 0xa1, 0x40, 0xd7, 0xed, 0x71, 0x24, 0xd9, 0x24, 0x79, 0x4a, 0x13, - 0x2c, 0xca, 0x9f, 0xe3, 0x20, 0x8f, 0x07, 0x30, 0xf4, 0x2a, 0x24, 0x69, 0x2c, 0x17, 0x61, 0xb9, - 0xb2, 0xc9, 0x03, 0xfd, 0xa6, 0x1b, 0xe8, 0x37, 0x9b, 0x6e, 0xa0, 0xdf, 0xc9, 0x7e, 0xf1, 0xd5, - 0x7a, 0xec, 0xd3, 0xbf, 0xae, 0x4b, 0x2a, 0x93, 0x40, 0xd7, 0x68, 0xd8, 0xd2, 0x8d, 0x81, 0x66, - 0xb4, 0xd9, 0x27, 0xe7, 0x68, 0x4c, 0xd2, 0x8d, 0xc1, 0x6e, 0x1b, 0xed, 0x83, 0xdc, 0x32, 0x07, - 0x0e, 0x1e, 0x38, 0x43, 0x47, 0xe3, 0xa9, 0x46, 0x04, 0xe3, 0x50, 0x48, 0xe5, 0x09, 0xaf, 0xe6, - 0x72, 0x1e, 0x31, 0x46, 0x75, 0xa9, 0x15, 0x26, 0xa0, 0x7b, 0x00, 0x5e, 0x3e, 0x72, 0xca, 0xc9, - 0x8d, 0xc4, 0x8d, 0xfc, 0xf6, 0xc6, 0xc4, 0x86, 0x3f, 0x70, 0x59, 0x4e, 0xac, 0xb6, 0x4e, 0xf0, - 0x4e, 0x92, 0x7e, 0xae, 0x1a, 0x90, 0x44, 0xcf, 0xc0, 0x92, 0x6e, 0x59, 0x9a, 0x43, 0x74, 0x82, - 0xb5, 0xd3, 0x0b, 0x82, 0x1d, 0x16, 0xe7, 0x0b, 0x6a, 0x51, 0xb7, 0xac, 0x63, 0x4a, 0xdd, 0xa1, - 0x44, 0xf4, 0x34, 0x94, 0x68, 0x4c, 0x37, 0xf4, 0x9e, 0xd6, 0xc5, 0xc6, 0x59, 0x97, 0xb0, 0x78, - 0x9e, 0x50, 0x8b, 0x82, 0xda, 0x60, 0x44, 0xa5, 0xed, 0xed, 0x38, 0x8b, 0xe7, 0x08, 0x41, 0xb2, - 0xad, 0x13, 0x9d, 0x59, 0xb2, 0xa0, 0xb2, 0x67, 0x4a, 0xb3, 0x74, 0xd2, 0x15, 0xf6, 0x61, 0xcf, - 0xe8, 0x2a, 0xa4, 0x85, 0xda, 0x04, 0x53, 0x2b, 0x46, 0x68, 0x15, 0x52, 0x96, 0x6d, 0x9e, 0x63, - 0xb6, 0x75, 0x59, 0x95, 0x0f, 0x14, 0x15, 0x4a, 0xe1, 0xd8, 0x8f, 0x4a, 0x10, 0x27, 0x23, 0x31, - 0x4b, 0x9c, 0x8c, 0xd0, 0x4b, 0x90, 0xa4, 0x86, 0x64, 0x73, 0x94, 0xa6, 0x64, 0x3b, 0x21, 0xd7, - 0xbc, 0xb0, 0xb0, 0xca, 0x38, 0x95, 0x25, 0x28, 0x86, 0x72, 0x82, 0x72, 0x15, 0x56, 0xa7, 0x85, - 0x78, 0xa5, 0xeb, 0xd1, 0x43, 0xa1, 0x1a, 0xdd, 0x86, 0xac, 0x17, 0xe3, 0xb9, 0xe3, 0x5c, 0x9b, - 0x98, 0xd6, 0x65, 0x56, 0x3d, 0x56, 0xea, 0x31, 0x74, 0x03, 0xba, 0xba, 0xc8, 0xe8, 0x05, 0x35, - 0xa3, 0x5b, 0x56, 0x43, 0x77, 0xba, 0xca, 0x7b, 0x50, 0x8e, 0x8a, 0xdf, 0x01, 0x83, 0x49, 0xcc, - 0xed, 0x5d, 0x83, 0x5d, 0x85, 0x74, 0xc7, 0xb4, 0xfb, 0x3a, 0x61, 0xca, 0x8a, 0xaa, 0x18, 0x51, - 0x43, 0xf2, 0x58, 0x9e, 0x60, 0x64, 0x3e, 0x50, 0x34, 0xb8, 0x16, 0x19, 0xc3, 0xa9, 0x88, 0x31, - 0x68, 0x63, 0x6e, 0xd6, 0xa2, 0xca, 0x07, 0xbe, 0x22, 0xfe, 0xb1, 0x7c, 0x40, 0xa7, 0x75, 0xd8, - 0x5a, 0x99, 0xfe, 0x9c, 0x2a, 0x46, 0xca, 0x67, 0x09, 0xb8, 0x3a, 0x3d, 0x92, 0xa3, 0x0d, 0x28, - 0xf4, 0xf5, 0x91, 0x46, 0x46, 0xc2, 0xed, 0x24, 0xb6, 0xf1, 0xd0, 0xd7, 0x47, 0xcd, 0x11, 0xf7, - 0x39, 0x19, 0x12, 0x64, 0xe4, 0x94, 0xe3, 0x1b, 0x89, 0x1b, 0x05, 0x95, 0x3e, 0xa2, 0x13, 0x58, - 0xee, 0x99, 0x2d, 0xbd, 0xa7, 0xf5, 0x74, 0x87, 0x68, 0x22, 0xc5, 0xf3, 0x43, 0xf4, 0xd4, 0x84, - 0xb1, 0x79, 0x4c, 0xc6, 0x6d, 0xbe, 0x9f, 0x34, 0xe0, 0x08, 0xff, 0x5f, 0x62, 0x3a, 0xf6, 0x75, - 0x77, 0xab, 0xd1, 0x5d, 0xc8, 0xf7, 0x0d, 0xe7, 0x14, 0x77, 0xf5, 0x73, 0xc3, 0xb4, 0xc5, 0x69, - 0x9a, 0x74, 0x9a, 0x37, 0x7d, 0x1e, 0xa1, 0x29, 0x28, 0x16, 0xd8, 0x92, 0x54, 0xc8, 0x87, 0xdd, - 0x68, 0x92, 0x5e, 0x38, 0x9a, 0xbc, 0x04, 0xab, 0x03, 0x3c, 0x22, 0x9a, 0x7f, 0x5e, 0xb9, 0x9f, - 0x64, 0x98, 0xe9, 0x11, 0x7d, 0xe7, 0x9d, 0x70, 0x87, 0xba, 0x0c, 0x7a, 0x8e, 0xe5, 0x42, 0xcb, - 0x74, 0xb0, 0xad, 0xe9, 0xed, 0xb6, 0x8d, 0x1d, 0x87, 0x95, 0x4f, 0x05, 0x96, 0xe0, 0x18, 0xbd, - 0xca, 0xc9, 0xca, 0x2f, 0x82, 0x5b, 0x13, 0xce, 0x7d, 0xc2, 0xf0, 0x92, 0x6f, 0xf8, 0x63, 0x58, - 0x15, 0xf2, 0xed, 0x90, 0xed, 0x79, 0x0d, 0xfa, 0xf8, 0xe4, 0xf9, 0x1a, 0xb7, 0x39, 0x72, 0xc5, - 0xa3, 0xcd, 0x9e, 0x78, 0x34, 0xb3, 0x23, 0x48, 0x32, 0xa3, 0x24, 0x79, 0x88, 0xa1, 0xcf, 0xff, - 0x6e, 0x5b, 0xf1, 0x71, 0x02, 0x96, 0x27, 0x0a, 0x09, 0x6f, 0x61, 0xd2, 0xd4, 0x85, 0xc5, 0xa7, - 0x2e, 0x2c, 0xb1, 0xf0, 0xc2, 0xc4, 0x5e, 0x27, 0x67, 0xef, 0x75, 0xea, 0x7b, 0xdc, 0xeb, 0xf4, - 0xa3, 0xed, 0xf5, 0xbf, 0x74, 0x17, 0x7e, 0x2d, 0x41, 0x25, 0xba, 0xfa, 0x9a, 0xba, 0x1d, 0xcf, - 0xc3, 0xb2, 0xf7, 0x29, 0x9e, 0x7a, 0x1e, 0x18, 0x65, 0xef, 0x85, 0xd0, 0x1f, 0x99, 0xe3, 0x9e, - 0x86, 0xd2, 0x58, 0x6d, 0xc8, 0x5d, 0xb9, 0x78, 0x1e, 0x9c, 0x5f, 0xf9, 0x59, 0xc2, 0x4b, 0x3c, - 0xa1, 0x02, 0x6e, 0xca, 0x69, 0x7d, 0x0b, 0x56, 0xda, 0xb8, 0x65, 0xb4, 0x1f, 0xf5, 0xb0, 0x2e, - 0x0b, 0xe9, 0xff, 0x9e, 0xd5, 0x49, 0x2f, 0xf9, 0x15, 0x40, 0x56, 0xc5, 0x8e, 0x45, 0xeb, 0x31, - 0xb4, 0x03, 0x39, 0x3c, 0x6a, 0x61, 0x8b, 0xb8, 0x25, 0xec, 0xf4, 0x16, 0x81, 0x73, 0xd7, 0x5d, - 0x4e, 0xda, 0x20, 0x7b, 0x62, 0xe8, 0x96, 0xc0, 0x00, 0xa2, 0xdb, 0x79, 0x21, 0x1e, 0x04, 0x01, - 0x5e, 0x76, 0x41, 0x80, 0x44, 0x64, 0x7f, 0xcb, 0xa5, 0xc6, 0x50, 0x80, 0x5b, 0x02, 0x05, 0x48, - 0xce, 0x98, 0x2c, 0x04, 0x03, 0xd4, 0x42, 0x30, 0x40, 0x7a, 0xc6, 0x32, 0x23, 0x70, 0x80, 0x97, - 0x5d, 0x1c, 0x20, 0x33, 0xe3, 0x8b, 0xc7, 0x80, 0x80, 0x37, 0x02, 0x40, 0x40, 0x8e, 0x89, 0x6e, - 0x44, 0x8a, 0x4e, 0x41, 0x02, 0x5e, 0xf3, 0x90, 0x80, 0x42, 0x24, 0x8a, 0x20, 0x84, 0xc7, 0xa1, - 0x80, 0xc3, 0x09, 0x28, 0x80, 0xb7, 0xee, 0xcf, 0x44, 0xaa, 0x98, 0x81, 0x05, 0x1c, 0x4e, 0x60, - 0x01, 0xa5, 0x19, 0x0a, 0x67, 0x80, 0x01, 0x3f, 0x9a, 0x0e, 0x06, 0x44, 0xb7, 0xeb, 0xe2, 0x33, - 0xe7, 0x43, 0x03, 0xb4, 0x08, 0x34, 0x40, 0x8e, 0xec, 0x5c, 0xb9, 0xfa, 0xb9, 0xe1, 0x80, 0x93, - 0x29, 0x70, 0x00, 0x6f, 0xdc, 0x6f, 0x44, 0x2a, 0x9f, 0x03, 0x0f, 0x38, 0x99, 0x82, 0x07, 0xa0, - 0x99, 0x6a, 0x67, 0x02, 0x02, 0xf7, 0xc2, 0x80, 0xc0, 0x4a, 0x44, 0xd5, 0xe9, 0x9f, 0xf6, 0x08, - 0x44, 0xe0, 0x34, 0x0a, 0x11, 0xe0, 0x5d, 0xfb, 0x0b, 0x91, 0x1a, 0x17, 0x80, 0x04, 0x0e, 0x27, - 0x20, 0x81, 0x2b, 0x33, 0x3c, 0x6d, 0x7e, 0x4c, 0x20, 0x25, 0xa7, 0xf7, 0x92, 0xd9, 0xac, 0x9c, - 0xe3, 0x68, 0xc0, 0x5e, 0x32, 0x9b, 0x97, 0x0b, 0xca, 0x73, 0xb4, 0x82, 0x19, 0x8b, 0x73, 0xb4, - 0x57, 0xc0, 0xb6, 0x6d, 0xda, 0xa2, 0xbb, 0xe7, 0x03, 0xe5, 0x06, 0xed, 0x11, 0xfd, 0x98, 0x76, - 0x09, 0x7e, 0xc0, 0x7a, 0xb2, 0x40, 0x1c, 0x53, 0x7e, 0x2f, 0xf9, 0xb2, 0x0c, 0x41, 0x08, 0xf6, - 0x97, 0x39, 0xd1, 0x5f, 0x06, 0x50, 0x85, 0x78, 0x18, 0x55, 0x58, 0x87, 0x3c, 0xed, 0xb5, 0xc6, - 0x00, 0x03, 0xdd, 0xf2, 0x00, 0x83, 0x9b, 0xb0, 0xcc, 0x12, 0x26, 0xc7, 0x1e, 0x44, 0x5a, 0x4a, - 0xb2, 0xb4, 0xb4, 0x44, 0x5f, 0x70, 0xeb, 0xf0, 0xfc, 0xf4, 0x22, 0xac, 0x04, 0x78, 0xbd, 0x1e, - 0x8e, 0x77, 0xcf, 0xb2, 0xc7, 0x5d, 0x15, 0xcd, 0xdc, 0x9f, 0x24, 0xdf, 0x42, 0x3e, 0xd2, 0x30, - 0x0d, 0x14, 0x90, 0xbe, 0x27, 0x50, 0x20, 0xfe, 0xc8, 0xa0, 0x40, 0xb0, 0x27, 0x4d, 0x84, 0x7b, - 0xd2, 0x7f, 0x48, 0xfe, 0x9e, 0x78, 0x2d, 0x7e, 0xcb, 0x6c, 0x63, 0xd1, 0x25, 0xb2, 0x67, 0x5a, - 0x92, 0xf4, 0xcc, 0x33, 0xd1, 0x0b, 0xd2, 0x47, 0xca, 0xe5, 0x25, 0x9e, 0x9c, 0xc8, 0x2b, 0x5e, - 0x83, 0xc9, 0x13, 0xbf, 0x68, 0x30, 0x65, 0x48, 0x3c, 0xc4, 0x1c, 0x2e, 0x2e, 0xa8, 0xf4, 0x91, - 0xf2, 0x31, 0xe7, 0x13, 0x09, 0x9c, 0x0f, 0xd0, 0xab, 0x90, 0x63, 0x60, 0xbf, 0x66, 0x5a, 0x8e, - 0x80, 0x88, 0x43, 0xa5, 0x0d, 0x47, 0xfc, 0x37, 0x8f, 0x28, 0xcf, 0xa1, 0xe5, 0xa8, 0x59, 0x4b, - 0x3c, 0x05, 0x2a, 0x8e, 0x5c, 0xa8, 0xe2, 0xb8, 0x0e, 0x39, 0xfa, 0xf5, 0x8e, 0xa5, 0xb7, 0x70, - 0x19, 0xd8, 0x87, 0xfa, 0x04, 0xe5, 0x77, 0x71, 0x58, 0x1a, 0x4b, 0x34, 0x53, 0xd7, 0xee, 0xba, - 0x64, 0x3c, 0x00, 0x79, 0xcc, 0x67, 0x8f, 0x35, 0x80, 0x33, 0xdd, 0xd1, 0x3e, 0xd4, 0x07, 0x04, - 0xb7, 0x85, 0x51, 0x02, 0x14, 0x54, 0x81, 0x2c, 0x1d, 0x0d, 0x1d, 0xdc, 0x16, 0xe8, 0x8b, 0x37, - 0x46, 0x0d, 0x48, 0xe3, 0x73, 0x3c, 0x20, 0x4e, 0x39, 0xc3, 0xb6, 0xfd, 0xea, 0x64, 0x3b, 0x4c, - 0x5f, 0xef, 0x94, 0xe9, 0x66, 0x7f, 0xfb, 0xd5, 0xba, 0xcc, 0xb9, 0x5f, 0x30, 0xfb, 0x06, 0xc1, - 0x7d, 0x8b, 0x5c, 0xa8, 0x42, 0x3e, 0x6c, 0x85, 0xec, 0x98, 0x15, 0x18, 0x0e, 0x58, 0x70, 0xdb, - 0x7b, 0x6a, 0x53, 0xc3, 0xb4, 0x0d, 0x72, 0xa1, 0x16, 0xfb, 0xb8, 0x6f, 0x99, 0x66, 0x4f, 0xe3, - 0x67, 0xbc, 0x0a, 0xa5, 0x70, 0x5e, 0x45, 0x4f, 0x41, 0xd1, 0xc6, 0x44, 0x37, 0x06, 0x5a, 0xa8, - 0x08, 0x2e, 0x70, 0x22, 0x3f, 0x53, 0x7b, 0xc9, 0xac, 0x24, 0xc7, 0xf7, 0x92, 0xd9, 0xb8, 0x9c, - 0x50, 0x8e, 0xe0, 0xca, 0xd4, 0xbc, 0x8a, 0x5e, 0x81, 0x9c, 0x9f, 0x92, 0x25, 0xb6, 0xda, 0x4b, - 0x90, 0x16, 0x9f, 0x57, 0xf9, 0x83, 0xe4, 0xab, 0x0c, 0x63, 0x37, 0x75, 0x48, 0xdb, 0xd8, 0x19, - 0xf6, 0x38, 0x9a, 0x52, 0xda, 0x7e, 0x71, 0xbe, 0x8c, 0x4c, 0xa9, 0xc3, 0x1e, 0x51, 0x85, 0xb0, - 0xf2, 0x2e, 0xa4, 0x39, 0x05, 0xe5, 0x21, 0x73, 0x72, 0x70, 0xff, 0xe0, 0xf0, 0xed, 0x03, 0x39, - 0x86, 0x00, 0xd2, 0xd5, 0x5a, 0xad, 0x7e, 0xd4, 0x94, 0x25, 0x94, 0x83, 0x54, 0x75, 0xe7, 0x50, - 0x6d, 0xca, 0x71, 0x4a, 0x56, 0xeb, 0x7b, 0xf5, 0x5a, 0x53, 0x4e, 0xa0, 0x65, 0x28, 0xf2, 0x67, - 0xed, 0xde, 0xa1, 0xfa, 0x66, 0xb5, 0x29, 0x27, 0x03, 0xa4, 0xe3, 0xfa, 0xc1, 0xdd, 0xba, 0x2a, - 0xa7, 0x94, 0xff, 0x81, 0x6b, 0x91, 0x39, 0xdc, 0x07, 0x66, 0xa4, 0x00, 0x30, 0xa3, 0x7c, 0x16, - 0xa7, 0x4d, 0x4d, 0x54, 0x62, 0x46, 0x7b, 0x63, 0x0b, 0xdf, 0x5e, 0x20, 0xab, 0x8f, 0xad, 0x9e, - 0xf6, 0x31, 0x36, 0xee, 0x60, 0xd2, 0xea, 0xf2, 0x42, 0x81, 0x47, 0xa0, 0xa2, 0x5a, 0x14, 0x54, - 0x26, 0xe4, 0x70, 0xb6, 0xf7, 0x71, 0x8b, 0x68, 0xdc, 0x89, 0x1c, 0xd6, 0x4c, 0xe4, 0x28, 0x1b, - 0xa5, 0x1e, 0x73, 0xa2, 0xf2, 0xde, 0x42, 0xb6, 0xcc, 0x41, 0x4a, 0xad, 0x37, 0xd5, 0x77, 0xe4, - 0x04, 0x42, 0x50, 0x62, 0x8f, 0xda, 0xf1, 0x41, 0xf5, 0xe8, 0xb8, 0x71, 0x48, 0x6d, 0xb9, 0x02, - 0x4b, 0xae, 0x2d, 0x5d, 0x62, 0x4a, 0x79, 0x1e, 0x1e, 0x8b, 0xa8, 0x2a, 0x26, 0x5b, 0x2a, 0xe5, - 0x37, 0x52, 0x90, 0x3b, 0x5c, 0x19, 0x1c, 0x42, 0xda, 0x21, 0x3a, 0x19, 0x3a, 0xc2, 0x88, 0xaf, - 0xcc, 0x5b, 0x66, 0x6c, 0xba, 0x0f, 0xc7, 0x4c, 0x5c, 0x15, 0x6a, 0x94, 0xdb, 0x50, 0x0a, 0xbf, - 0x89, 0xb6, 0x81, 0xef, 0x44, 0x71, 0xe5, 0x0e, 0xa0, 0xc9, 0xea, 0x63, 0x4a, 0x7b, 0x29, 0x4d, - 0x6b, 0x2f, 0x7f, 0x2b, 0xc1, 0xe3, 0x97, 0x54, 0x1a, 0xe8, 0xad, 0xb1, 0x45, 0xbe, 0xb6, 0x48, - 0x9d, 0xb2, 0xc9, 0x69, 0x63, 0xcb, 0xbc, 0x05, 0x85, 0x20, 0x7d, 0xbe, 0x45, 0x7e, 0x1b, 0xf7, - 0x0f, 0x71, 0xb8, 0x0f, 0xf6, 0x43, 0xa0, 0xf4, 0x1d, 0x43, 0xe0, 0xeb, 0x00, 0x64, 0xa4, 0x71, - 0xb7, 0x76, 0xf3, 0xe8, 0x13, 0x53, 0xf0, 0x45, 0xdc, 0x6a, 0x8e, 0xc4, 0x21, 0xc8, 0x11, 0xf1, - 0xe4, 0xa0, 0xe3, 0x20, 0x28, 0x30, 0x64, 0x39, 0xd6, 0x11, 0x0d, 0xf3, 0xbc, 0xc9, 0xd8, 0x07, - 0x0f, 0x38, 0xd9, 0x41, 0xef, 0xc0, 0x63, 0x63, 0x85, 0x82, 0xa7, 0x3a, 0x39, 0x6f, 0xbd, 0x70, - 0x25, 0x5c, 0x2f, 0xb8, 0xaa, 0x83, 0xd9, 0x3e, 0x15, 0xce, 0xf6, 0xef, 0x00, 0xf8, 0xe0, 0x00, - 0x8d, 0x30, 0xb6, 0x39, 0x1c, 0xb4, 0x99, 0x07, 0xa4, 0x54, 0x3e, 0x40, 0xb7, 0x21, 0x45, 0x3d, - 0xc9, 0xb5, 0xd3, 0x64, 0x28, 0xa6, 0x9e, 0x10, 0x00, 0x17, 0x38, 0xb7, 0x62, 0x00, 0x9a, 0x04, - 0x68, 0x23, 0xa6, 0x78, 0x23, 0x3c, 0xc5, 0x93, 0x91, 0x50, 0xef, 0xf4, 0xa9, 0x3e, 0x82, 0x14, - 0xdb, 0x79, 0x9a, 0x74, 0xd9, 0x5f, 0x01, 0x51, 0x2d, 0xd2, 0x67, 0xf4, 0x63, 0x00, 0x9d, 0x10, - 0xdb, 0x38, 0x1d, 0xfa, 0x13, 0xac, 0x4f, 0xf7, 0x9c, 0xaa, 0xcb, 0xb7, 0x73, 0x5d, 0xb8, 0xd0, - 0xaa, 0x2f, 0x1a, 0x70, 0xa3, 0x80, 0x42, 0xe5, 0x00, 0x4a, 0x61, 0x59, 0xb7, 0xbe, 0xe1, 0xdf, - 0x10, 0xae, 0x6f, 0x78, 0xb9, 0x2a, 0xea, 0x1b, 0xaf, 0x3a, 0x4a, 0xf0, 0x5f, 0x1f, 0x6c, 0xa0, - 0xfc, 0x24, 0x0e, 0x85, 0xa0, 0xe3, 0xfd, 0xe7, 0x95, 0x20, 0xca, 0xcf, 0x25, 0xc8, 0x7a, 0xcb, - 0x0f, 0xff, 0x07, 0x09, 0xfd, 0x38, 0xe2, 0xd6, 0x8b, 0x07, 0x7f, 0x5e, 0xf0, 0xdf, 0x44, 0x09, - 0xef, 0x37, 0xd1, 0x1d, 0x2f, 0xfd, 0x45, 0x01, 0x22, 0x41, 0x5b, 0x0b, 0xaf, 0x72, 0xb3, 0xfd, - 0x1d, 0xc8, 0x79, 0xa7, 0x97, 0x36, 0x1d, 0x2e, 0x70, 0x24, 0x89, 0x33, 0x24, 0x60, 0xbf, 0x55, - 0x48, 0x59, 0xe6, 0x87, 0xe2, 0xcf, 0x48, 0x42, 0xe5, 0x03, 0xa5, 0x0d, 0x4b, 0x63, 0x47, 0x1f, - 0xdd, 0x81, 0x8c, 0x35, 0x3c, 0xd5, 0x5c, 0xe7, 0x18, 0x83, 0xd7, 0xdc, 0x72, 0x76, 0x78, 0xda, - 0x33, 0x5a, 0xf7, 0xf1, 0x85, 0xfb, 0x31, 0xd6, 0xf0, 0xf4, 0x3e, 0xf7, 0x21, 0x3e, 0x4b, 0x3c, - 0x38, 0xcb, 0x2f, 0x25, 0xc8, 0xba, 0x67, 0x02, 0xfd, 0x1f, 0xe4, 0xbc, 0xb0, 0xe2, 0xfd, 0xda, - 0x8c, 0x8c, 0x47, 0x42, 0xbf, 0x2f, 0x82, 0xaa, 0xee, 0x3f, 0x59, 0xa3, 0xad, 0x75, 0x7a, 0x3a, - 0xf7, 0xa5, 0x52, 0xd8, 0x66, 0x3c, 0xf0, 0xb0, 0x78, 0xbc, 0x7b, 0xf7, 0x5e, 0x4f, 0x3f, 0x53, - 0xf3, 0x4c, 0x66, 0xb7, 0x4d, 0x07, 0xa2, 0xb2, 0xfb, 0xbb, 0x04, 0xf2, 0xf8, 0x89, 0xfd, 0xce, - 0x5f, 0x37, 0x99, 0xe6, 0x12, 0x53, 0xd2, 0x1c, 0xda, 0x82, 0x15, 0x8f, 0x43, 0x73, 0x8c, 0xb3, - 0x81, 0x4e, 0x86, 0x36, 0x16, 0x80, 0x24, 0xf2, 0x5e, 0x1d, 0xbb, 0x6f, 0x26, 0x57, 0x9d, 0x7a, - 0xc4, 0x55, 0x7f, 0x1c, 0x87, 0x7c, 0x00, 0x1e, 0x45, 0xff, 0x1b, 0x08, 0x46, 0xa5, 0x29, 0x99, - 0x21, 0xc0, 0xeb, 0xff, 0xa6, 0x0c, 0x9b, 0x29, 0xbe, 0xb8, 0x99, 0xa2, 0x40, 0x68, 0x17, 0x6d, - 0x4d, 0x2e, 0x8c, 0xb6, 0xbe, 0x00, 0x88, 0x98, 0x44, 0xef, 0x69, 0xe7, 0x26, 0x31, 0x06, 0x67, - 0x1a, 0x77, 0x43, 0x1e, 0x3a, 0x64, 0xf6, 0xe6, 0x01, 0x7b, 0x71, 0xc4, 0x3c, 0xf2, 0xa7, 0x12, - 0x64, 0xbd, 0xb2, 0x7b, 0xd1, 0x9f, 0x98, 0x57, 0x21, 0x2d, 0x2a, 0x4b, 0xfe, 0x17, 0x53, 0x8c, - 0xa6, 0xc2, 0xca, 0x15, 0xc8, 0xf6, 0x31, 0xd1, 0x59, 0x1c, 0xe4, 0x59, 0xcd, 0x1b, 0xdf, 0x7c, - 0x0d, 0xf2, 0x81, 0x1f, 0xc0, 0x34, 0x34, 0x1e, 0xd4, 0xdf, 0x96, 0x63, 0x95, 0xcc, 0x27, 0x9f, - 0x6f, 0x24, 0x0e, 0xf0, 0x87, 0xf4, 0x34, 0xab, 0xf5, 0x5a, 0xa3, 0x5e, 0xbb, 0x2f, 0x4b, 0x95, - 0xfc, 0x27, 0x9f, 0x6f, 0x64, 0x54, 0xcc, 0x10, 0xc5, 0x9b, 0xf7, 0x61, 0x69, 0x6c, 0x63, 0xc2, - 0x65, 0x0b, 0x82, 0xd2, 0xdd, 0x93, 0xa3, 0xfd, 0xdd, 0x5a, 0xb5, 0x59, 0xd7, 0x1e, 0x1c, 0x36, - 0xeb, 0xb2, 0x84, 0x1e, 0x83, 0x95, 0xfd, 0xdd, 0xff, 0x6f, 0x34, 0xb5, 0xda, 0xfe, 0x6e, 0xfd, - 0xa0, 0xa9, 0x55, 0x9b, 0xcd, 0x6a, 0xed, 0xbe, 0x1c, 0xdf, 0xfe, 0x3c, 0x0f, 0xc9, 0xea, 0x4e, - 0x6d, 0x17, 0xd5, 0x20, 0xc9, 0xa0, 0x90, 0x4b, 0x6f, 0x80, 0x55, 0x2e, 0xc7, 0x86, 0xd1, 0x3d, - 0x48, 0x31, 0x94, 0x04, 0x5d, 0x7e, 0x25, 0xac, 0x32, 0x03, 0x2c, 0xa6, 0x1f, 0xc3, 0x4e, 0xe4, - 0xa5, 0x77, 0xc4, 0x2a, 0x97, 0x63, 0xc7, 0x68, 0x1f, 0x32, 0x6e, 0x93, 0x3c, 0xeb, 0xe2, 0x56, - 0x65, 0x26, 0xa0, 0x4b, 0x97, 0xc6, 0xc1, 0x86, 0xcb, 0xaf, 0x8f, 0x55, 0x66, 0xa0, 0xca, 0x68, - 0x17, 0xd2, 0xa2, 0x1d, 0x9d, 0x71, 0x23, 0xac, 0x32, 0x0b, 0x27, 0x46, 0x2a, 0xe4, 0x7c, 0x18, - 0x67, 0xf6, 0xa5, 0xb8, 0xca, 0x1c, 0x80, 0x39, 0x7a, 0x17, 0x8a, 0xe1, 0x56, 0x77, 0xbe, 0x5b, - 0x67, 0x95, 0x39, 0x11, 0x69, 0xaa, 0x3f, 0xdc, 0xf7, 0xce, 0x77, 0x0b, 0xad, 0x32, 0x27, 0x40, - 0x8d, 0xde, 0x87, 0xe5, 0xc9, 0xbe, 0x74, 0xfe, 0x4b, 0x69, 0x95, 0x05, 0x20, 0x6b, 0xd4, 0x07, - 0x34, 0xa5, 0x9f, 0x5d, 0xe0, 0x8e, 0x5a, 0x65, 0x11, 0x04, 0x1b, 0xb5, 0x61, 0x69, 0xbc, 0x49, - 0x9c, 0xf7, 0xce, 0x5a, 0x65, 0x6e, 0x34, 0x9b, 0xcf, 0x12, 0x6e, 0x2e, 0xe7, 0xbd, 0xc3, 0x56, - 0x99, 0x1b, 0xdc, 0x46, 0x27, 0x00, 0x81, 0xfe, 0x70, 0x8e, 0x3b, 0x6d, 0x95, 0x79, 0x60, 0x6e, - 0x64, 0xc1, 0xca, 0xb4, 0xc6, 0x71, 0x91, 0x2b, 0x6e, 0x95, 0x85, 0xd0, 0x6f, 0xea, 0xcf, 0xe1, - 0x16, 0x70, 0xbe, 0x2b, 0x6f, 0x95, 0x39, 0x61, 0xf0, 0x9d, 0xea, 0x17, 0x5f, 0xaf, 0x49, 0x5f, - 0x7e, 0xbd, 0x26, 0xfd, 0xed, 0xeb, 0x35, 0xe9, 0xd3, 0x6f, 0xd6, 0x62, 0x5f, 0x7e, 0xb3, 0x16, - 0xfb, 0xcb, 0x37, 0x6b, 0xb1, 0x1f, 0x3c, 0x7b, 0x66, 0x90, 0xee, 0xf0, 0x74, 0xb3, 0x65, 0xf6, - 0xb7, 0x5a, 0x66, 0x1f, 0x93, 0xd3, 0x0e, 0xf1, 0x1f, 0xfc, 0x9b, 0xcb, 0xa7, 0x69, 0x96, 0x41, - 0x6f, 0xfd, 0x33, 0x00, 0x00, 0xff, 0xff, 0x64, 0xd0, 0x90, 0x6e, 0xd9, 0x2c, 0x00, 0x00, + 0x67, 0x95, 0x0a, 0xc9, 0x22, 0xac, 0xa8, 0x4a, 0x16, 0x2c, 0xc2, 0x22, 0x8b, 0x6c, 0xf2, 0x17, + 0x64, 0x95, 0x6c, 0xb2, 0x60, 0x91, 0xaa, 0xb0, 0xcc, 0x8a, 0xa4, 0x60, 0xc7, 0x36, 0x8b, 0x6c, + 0x53, 0xf7, 0xd1, 0x2f, 0x49, 0x6d, 0x49, 0x03, 0x59, 0xa4, 0x92, 0xdd, 0xbd, 0xe7, 0x9e, 0x73, + 0x6e, 0xf7, 0xe9, 0x73, 0xcf, 0xe3, 0xd7, 0x17, 0x1e, 0x27, 0x78, 0xd0, 0xc6, 0x76, 0xdf, 0x18, + 0x90, 0x4d, 0xfd, 0xa4, 0x65, 0x6c, 0x92, 0x73, 0x0b, 0x3b, 0x1b, 0x96, 0x6d, 0x12, 0x13, 0x2d, + 0xf9, 0x8b, 0x1b, 0x74, 0xb1, 0xf2, 0x44, 0x80, 0xbb, 0x65, 0x9f, 0x5b, 0xc4, 0xdc, 0xb4, 0x6c, + 0xd3, 0xec, 0x70, 0xfe, 0xca, 0xd5, 0xc9, 0xe5, 0x07, 0xf8, 0x5c, 0x68, 0x0b, 0x09, 0xb3, 0x5d, + 0x36, 0x2d, 0xdd, 0xd6, 0xfb, 0xee, 0xf2, 0xfa, 0xc4, 0xf2, 0x99, 0xde, 0x33, 0xda, 0x3a, 0x31, + 0x6d, 0xc1, 0x71, 0xed, 0xd4, 0x34, 0x4f, 0x7b, 0x78, 0x93, 0xcd, 0x4e, 0x86, 0x9d, 0x4d, 0x62, + 0xf4, 0xb1, 0x43, 0xf4, 0xbe, 0x25, 0x18, 0x56, 0x4f, 0xcd, 0x53, 0x93, 0x0d, 0x37, 0xe9, 0x88, + 0x53, 0x95, 0x3f, 0xe4, 0x20, 0xa3, 0xe2, 0x0f, 0x86, 0xd8, 0x21, 0x68, 0x0b, 0x92, 0xb8, 0xd5, + 0x35, 0xcb, 0xd2, 0xba, 0x74, 0x3d, 0xbf, 0x75, 0x75, 0x63, 0xec, 0x05, 0x37, 0x04, 0x5f, 0xbd, + 0xd5, 0x35, 0x1b, 0x31, 0x95, 0xf1, 0xa2, 0x5b, 0x90, 0xea, 0xf4, 0x86, 0x4e, 0xb7, 0x1c, 0x67, + 0x42, 0x4f, 0x44, 0x09, 0xdd, 0xa5, 0x4c, 0x8d, 0x98, 0xca, 0xb9, 0xe9, 0x56, 0xc6, 0xa0, 0x63, + 0x96, 0x13, 0x17, 0x6f, 0xb5, 0x33, 0xe8, 0xb0, 0xad, 0x28, 0x2f, 0xda, 0x06, 0x30, 0x06, 0x06, + 0xd1, 0x5a, 0x5d, 0xdd, 0x18, 0x94, 0x53, 0x4c, 0xf2, 0xc9, 0x68, 0x49, 0x83, 0xd4, 0x28, 0x63, + 0x23, 0xa6, 0xe6, 0x0c, 0x77, 0x42, 0x1f, 0xf7, 0x83, 0x21, 0xb6, 0xcf, 0xcb, 0xe9, 0x8b, 0x1f, + 0xf7, 0x2d, 0xca, 0x44, 0x1f, 0x97, 0x71, 0xa3, 0xd7, 0x21, 0xdb, 0xea, 0xe2, 0xd6, 0x03, 0x8d, + 0x8c, 0xca, 0x59, 0x26, 0x79, 0x2d, 0x4a, 0xb2, 0x46, 0xf9, 0x9a, 0xa3, 0x46, 0x4c, 0xcd, 0xb4, + 0xf8, 0x10, 0xbd, 0x0a, 0xe9, 0x96, 0xd9, 0xef, 0x1b, 0xa4, 0x9c, 0x67, 0xb2, 0x6b, 0x91, 0xb2, + 0x8c, 0xab, 0x11, 0x53, 0x05, 0x3f, 0xda, 0x87, 0x52, 0xcf, 0x70, 0x88, 0xe6, 0x0c, 0x74, 0xcb, + 0xe9, 0x9a, 0xc4, 0x29, 0x17, 0x98, 0x86, 0xa7, 0xa3, 0x34, 0xec, 0x19, 0x0e, 0x39, 0x72, 0x99, + 0x1b, 0x31, 0xb5, 0xd8, 0x0b, 0x12, 0xa8, 0x3e, 0xb3, 0xd3, 0xc1, 0xb6, 0xa7, 0xb0, 0x5c, 0xbc, + 0x58, 0xdf, 0x01, 0xe5, 0x76, 0xe5, 0xa9, 0x3e, 0x33, 0x48, 0x40, 0x3f, 0x80, 0x95, 0x9e, 0xa9, + 0xb7, 0x3d, 0x75, 0x5a, 0xab, 0x3b, 0x1c, 0x3c, 0x28, 0x97, 0x98, 0xd2, 0xe7, 0x22, 0x1f, 0xd2, + 0xd4, 0xdb, 0xae, 0x8a, 0x1a, 0x15, 0x68, 0xc4, 0xd4, 0xe5, 0xde, 0x38, 0x11, 0xbd, 0x0b, 0xab, + 0xba, 0x65, 0xf5, 0xce, 0xc7, 0xb5, 0x2f, 0x31, 0xed, 0x37, 0xa2, 0xb4, 0x57, 0xa9, 0xcc, 0xb8, + 0x7a, 0xa4, 0x4f, 0x50, 0x51, 0x13, 0x64, 0xcb, 0xc6, 0x96, 0x6e, 0x63, 0xcd, 0xb2, 0x4d, 0xcb, + 0x74, 0xf4, 0x5e, 0x59, 0x66, 0xba, 0x9f, 0x8d, 0xd2, 0x7d, 0xc8, 0xf9, 0x0f, 0x05, 0x7b, 0x23, + 0xa6, 0x2e, 0x59, 0x61, 0x12, 0xd7, 0x6a, 0xb6, 0xb0, 0xe3, 0xf8, 0x5a, 0x97, 0x67, 0x69, 0x65, + 0xfc, 0x61, 0xad, 0x21, 0x12, 0xaa, 0x43, 0x1e, 0x8f, 0xa8, 0xb8, 0x76, 0x66, 0x12, 0x5c, 0x46, + 0x4c, 0xa1, 0x12, 0x79, 0x42, 0x19, 0xeb, 0x7d, 0x93, 0xe0, 0x46, 0x4c, 0x05, 0xec, 0xcd, 0x90, + 0x0e, 0x97, 0xce, 0xb0, 0x6d, 0x74, 0xce, 0x99, 0x1a, 0x8d, 0xad, 0x38, 0x86, 0x39, 0x28, 0xaf, + 0x30, 0x85, 0xcf, 0x47, 0x29, 0xbc, 0xcf, 0x84, 0xa8, 0x8a, 0xba, 0x2b, 0xd2, 0x88, 0xa9, 0x2b, + 0x67, 0x93, 0x64, 0xea, 0x62, 0x1d, 0x63, 0xa0, 0xf7, 0x8c, 0x87, 0x58, 0x3b, 0xe9, 0x99, 0xad, + 0x07, 0xe5, 0xd5, 0x8b, 0x5d, 0xec, 0xae, 0xe0, 0xde, 0xa6, 0xcc, 0xd4, 0xc5, 0x3a, 0x41, 0xc2, + 0x76, 0x06, 0x52, 0x67, 0x7a, 0x6f, 0x88, 0x77, 0x93, 0xd9, 0xa4, 0x9c, 0xda, 0x4d, 0x66, 0x33, + 0x72, 0x76, 0x37, 0x99, 0xcd, 0xc9, 0xb0, 0x9b, 0xcc, 0x82, 0x9c, 0x57, 0x9e, 0x85, 0x7c, 0x20, + 0x30, 0xa1, 0x32, 0x64, 0xfa, 0xd8, 0x71, 0xf4, 0x53, 0xcc, 0xe2, 0x58, 0x4e, 0x75, 0xa7, 0x4a, + 0x09, 0x0a, 0xc1, 0x60, 0xa4, 0x7c, 0x22, 0x79, 0x92, 0x34, 0xce, 0x50, 0xc9, 0x33, 0x6c, 0x33, + 0x73, 0x08, 0x49, 0x31, 0x45, 0x4f, 0x41, 0x91, 0xbd, 0x8a, 0xe6, 0xae, 0xd3, 0x60, 0x97, 0x54, + 0x0b, 0x8c, 0x78, 0x5f, 0x30, 0x5d, 0x83, 0xbc, 0xb5, 0x65, 0x79, 0x2c, 0x09, 0xc6, 0x02, 0xd6, + 0x96, 0xe5, 0x32, 0x3c, 0x09, 0x05, 0xfa, 0xde, 0x1e, 0x47, 0x92, 0x6d, 0x92, 0xa7, 0x34, 0xc1, + 0xa2, 0xfc, 0x29, 0x0e, 0xf2, 0x78, 0x00, 0x43, 0xaf, 0x42, 0x92, 0xc6, 0x72, 0x11, 0x96, 0x2b, + 0x1b, 0x3c, 0xd0, 0x6f, 0xb8, 0x81, 0x7e, 0xa3, 0xe9, 0x06, 0xfa, 0xed, 0xec, 0xe7, 0x5f, 0x5e, + 0x8b, 0x7d, 0xf2, 0xd7, 0x6b, 0x92, 0xca, 0x24, 0xd0, 0x15, 0x1a, 0xb6, 0x74, 0x63, 0xa0, 0x19, + 0x6d, 0xf6, 0xc8, 0x39, 0x1a, 0x93, 0x74, 0x63, 0xb0, 0xd3, 0x46, 0x7b, 0x20, 0xb7, 0xcc, 0x81, + 0x83, 0x07, 0xce, 0xd0, 0xd1, 0x78, 0xaa, 0x11, 0xc1, 0x38, 0x14, 0x52, 0x79, 0xc2, 0xab, 0xb9, + 0x9c, 0x87, 0x8c, 0x51, 0x5d, 0x6a, 0x85, 0x09, 0xe8, 0x2e, 0x80, 0x97, 0x8f, 0x9c, 0x72, 0x72, + 0x3d, 0x71, 0x3d, 0xbf, 0xb5, 0x3e, 0xf1, 0xc1, 0xef, 0xbb, 0x2c, 0xc7, 0x56, 0x5b, 0x27, 0x78, + 0x3b, 0x49, 0x1f, 0x57, 0x0d, 0x48, 0xa2, 0x67, 0x60, 0x49, 0xb7, 0x2c, 0xcd, 0x21, 0x3a, 0xc1, + 0xda, 0xc9, 0x39, 0xc1, 0x0e, 0x8b, 0xf3, 0x05, 0xb5, 0xa8, 0x5b, 0xd6, 0x11, 0xa5, 0x6e, 0x53, + 0x22, 0x7a, 0x1a, 0x4a, 0x34, 0xa6, 0x1b, 0x7a, 0x4f, 0xeb, 0x62, 0xe3, 0xb4, 0x4b, 0x58, 0x3c, + 0x4f, 0xa8, 0x45, 0x41, 0x6d, 0x30, 0xa2, 0xd2, 0xf6, 0xbe, 0x38, 0x8b, 0xe7, 0x08, 0x41, 0xb2, + 0xad, 0x13, 0x9d, 0x59, 0xb2, 0xa0, 0xb2, 0x31, 0xa5, 0x59, 0x3a, 0xe9, 0x0a, 0xfb, 0xb0, 0x31, + 0xba, 0x0c, 0x69, 0xa1, 0x36, 0xc1, 0xd4, 0x8a, 0x19, 0x5a, 0x85, 0x94, 0x65, 0x9b, 0x67, 0x98, + 0x7d, 0xba, 0xac, 0xca, 0x27, 0x8a, 0x0a, 0xa5, 0x70, 0xec, 0x47, 0x25, 0x88, 0x93, 0x91, 0xd8, + 0x25, 0x4e, 0x46, 0xe8, 0x25, 0x48, 0x52, 0x43, 0xb2, 0x3d, 0x4a, 0x53, 0xb2, 0x9d, 0x90, 0x6b, + 0x9e, 0x5b, 0x58, 0x65, 0x9c, 0xca, 0x12, 0x14, 0x43, 0x39, 0x41, 0xb9, 0x0c, 0xab, 0xd3, 0x42, + 0xbc, 0xf2, 0x73, 0xc9, 0x5b, 0x08, 0xc5, 0x6a, 0x74, 0x0b, 0xb2, 0x5e, 0x90, 0xe7, 0x9e, 0x73, + 0x65, 0x62, 0x5f, 0x97, 0x59, 0xf5, 0x58, 0xa9, 0xcb, 0xd0, 0x2f, 0xd0, 0xd5, 0x45, 0x4a, 0x2f, + 0xa8, 0x19, 0xdd, 0xb2, 0x1a, 0xba, 0xd3, 0xa5, 0x0e, 0x4e, 0x97, 0xc6, 0x1c, 0x5c, 0xb7, 0x5c, + 0x07, 0x57, 0xde, 0x83, 0x72, 0x54, 0x84, 0x0f, 0x98, 0x54, 0x62, 0x72, 0xae, 0x49, 0x2f, 0x43, + 0xba, 0x63, 0xda, 0x7d, 0x9d, 0xb0, 0xdd, 0x8a, 0xaa, 0x98, 0x51, 0x53, 0xf3, 0x68, 0x9f, 0x60, + 0x64, 0x3e, 0x51, 0x34, 0xb8, 0x12, 0x19, 0xe5, 0xa9, 0x88, 0x31, 0x68, 0x63, 0x6e, 0xf8, 0xa2, + 0xca, 0x27, 0xbe, 0x22, 0xfe, 0x36, 0x7c, 0x42, 0xb7, 0x75, 0x98, 0x31, 0x98, 0xfe, 0x9c, 0x2a, + 0x66, 0xca, 0xa7, 0x09, 0xb8, 0x3c, 0x3d, 0xd6, 0xa3, 0x75, 0x28, 0xf4, 0xf5, 0x91, 0x46, 0x46, + 0xc2, 0x31, 0x25, 0xe6, 0x1a, 0xd0, 0xd7, 0x47, 0xcd, 0x11, 0xf7, 0x4a, 0x19, 0x12, 0x64, 0xe4, + 0x94, 0xe3, 0xeb, 0x89, 0xeb, 0x05, 0x95, 0x0e, 0xd1, 0x31, 0x2c, 0xf7, 0xcc, 0x96, 0xde, 0xd3, + 0x7a, 0xba, 0x43, 0x34, 0x51, 0x04, 0xf0, 0x63, 0xf6, 0xd4, 0xc4, 0xd7, 0xe0, 0x51, 0x1b, 0xb7, + 0xf9, 0x17, 0xa7, 0x21, 0x49, 0x9c, 0x90, 0x25, 0xa6, 0x63, 0x4f, 0x77, 0x9d, 0x01, 0xdd, 0x81, + 0x7c, 0xdf, 0x70, 0x4e, 0x70, 0x57, 0x3f, 0x33, 0x4c, 0x5b, 0x9c, 0xb7, 0x49, 0xb7, 0x7a, 0xd3, + 0xe7, 0x11, 0x9a, 0x82, 0x62, 0x81, 0x4f, 0x92, 0x0a, 0x79, 0xb9, 0x1b, 0x6f, 0xd2, 0x0b, 0xc7, + 0x9b, 0x97, 0x60, 0x75, 0x80, 0x47, 0x44, 0xf3, 0x4f, 0x34, 0x77, 0xa4, 0x0c, 0x33, 0x3d, 0xa2, + 0x6b, 0x5e, 0x0c, 0x70, 0x98, 0x4f, 0x3d, 0xc7, 0xb2, 0xa5, 0x65, 0x3a, 0xd8, 0xd6, 0xf4, 0x76, + 0xdb, 0xc6, 0x8e, 0xc3, 0x0a, 0xac, 0x02, 0x4b, 0x81, 0x8c, 0x5e, 0xe5, 0x64, 0xe5, 0xcf, 0xc1, + 0x4f, 0x13, 0xce, 0x8e, 0xc2, 0xf0, 0x92, 0x6f, 0xf8, 0x23, 0x58, 0x15, 0xf2, 0xed, 0x90, 0xed, + 0x79, 0x95, 0xfa, 0xf8, 0xe4, 0x09, 0x1c, 0xb7, 0x39, 0x72, 0xc5, 0xa3, 0xcd, 0x9e, 0x78, 0x34, + 0xb3, 0x23, 0x48, 0x32, 0xa3, 0x24, 0x79, 0x10, 0xa2, 0xe3, 0x7f, 0xb3, 0x4f, 0x41, 0x23, 0x81, + 0xf3, 0xc1, 0x90, 0x16, 0x4e, 0x8e, 0xf1, 0x10, 0x97, 0x73, 0x3c, 0x12, 0x70, 0xd2, 0x91, 0xf1, + 0x10, 0xa3, 0xff, 0x81, 0x12, 0x0d, 0xae, 0x9a, 0x6d, 0x9a, 0x84, 0xef, 0x0b, 0x4c, 0x53, 0x81, + 0x52, 0x55, 0xd3, 0x24, 0x74, 0x47, 0xe5, 0xa3, 0x04, 0x2c, 0x4f, 0x54, 0x2c, 0x9e, 0x7d, 0xa4, + 0xa9, 0xf6, 0x89, 0x4f, 0xb5, 0x4f, 0x62, 0x61, 0xfb, 0x08, 0x97, 0x49, 0xce, 0x76, 0x99, 0xd4, + 0x77, 0xe8, 0x32, 0xe9, 0x47, 0x73, 0x99, 0x7f, 0xe9, 0xb9, 0xfa, 0x95, 0x04, 0x95, 0xe8, 0x32, + 0x6f, 0xea, 0xe7, 0x78, 0x1e, 0x96, 0xbd, 0x47, 0xf1, 0xd4, 0xf3, 0xf8, 0x2a, 0x7b, 0x0b, 0xae, + 0xb3, 0x44, 0x25, 0xd3, 0xa7, 0xa1, 0x34, 0x56, 0x84, 0xf2, 0x13, 0x51, 0x3c, 0x0b, 0xee, 0xaf, + 0xfc, 0x34, 0xe1, 0x25, 0xb8, 0x50, 0xa5, 0x38, 0xe5, 0xd0, 0xbf, 0x05, 0x2b, 0x6d, 0xdc, 0x32, + 0xda, 0x8f, 0x7a, 0xe6, 0x97, 0x85, 0xf4, 0x7f, 0x8f, 0xfc, 0xa4, 0x97, 0xfc, 0x12, 0x20, 0xab, + 0x62, 0xc7, 0xa2, 0x85, 0x1f, 0xda, 0x86, 0x1c, 0x1e, 0xb5, 0xb0, 0x45, 0xdc, 0x5a, 0x79, 0x7a, + 0x2f, 0xc2, 0xb9, 0xeb, 0x2e, 0x27, 0xed, 0xc4, 0x3d, 0x31, 0x74, 0x53, 0x80, 0x0d, 0xd1, 0xb8, + 0x81, 0x10, 0x0f, 0xa2, 0x0d, 0x2f, 0xbb, 0x68, 0x43, 0x22, 0xb2, 0x91, 0xe6, 0x52, 0x63, 0x70, + 0xc3, 0x4d, 0x01, 0x37, 0x24, 0x67, 0x6c, 0x16, 0xc2, 0x1b, 0x6a, 0x21, 0xbc, 0x21, 0x3d, 0xe3, + 0x35, 0x23, 0x00, 0x87, 0x97, 0x5d, 0xc0, 0x21, 0x33, 0xe3, 0x89, 0xc7, 0x10, 0x87, 0x37, 0x02, + 0x88, 0x43, 0x8e, 0x89, 0xae, 0x47, 0x8a, 0x4e, 0x81, 0x1c, 0x5e, 0xf3, 0x20, 0x87, 0x42, 0x24, + 0x5c, 0x21, 0x84, 0xc7, 0x31, 0x87, 0x83, 0x09, 0xcc, 0x81, 0x63, 0x04, 0xcf, 0x44, 0xaa, 0x98, + 0x01, 0x3a, 0x1c, 0x4c, 0x80, 0x0e, 0xa5, 0x19, 0x0a, 0x67, 0xa0, 0x0e, 0x3f, 0x9c, 0x8e, 0x3a, + 0x44, 0xe3, 0x02, 0xe2, 0x31, 0xe7, 0x83, 0x1d, 0xb4, 0x08, 0xd8, 0x41, 0x8e, 0x6c, 0x91, 0xb9, + 0xfa, 0xb9, 0x71, 0x87, 0xe3, 0x29, 0xb8, 0x03, 0x47, 0x08, 0xae, 0x47, 0x2a, 0x9f, 0x03, 0x78, + 0x38, 0x9e, 0x02, 0x3c, 0xa0, 0x99, 0x6a, 0x67, 0x22, 0x0f, 0x77, 0xc3, 0xc8, 0xc3, 0x4a, 0x44, + 0xf1, 0xea, 0x9f, 0xf6, 0x08, 0xe8, 0xe1, 0x24, 0x0a, 0x7a, 0xe0, 0xf0, 0xc0, 0x0b, 0x91, 0x1a, + 0x17, 0xc0, 0x1e, 0x0e, 0x26, 0xb0, 0x87, 0x4b, 0x33, 0x3c, 0x6d, 0x7e, 0xf0, 0x21, 0x25, 0xa7, + 0x77, 0x93, 0xd9, 0xac, 0x9c, 0xe3, 0xb0, 0xc3, 0x6e, 0x32, 0x9b, 0x97, 0x0b, 0xca, 0x73, 0xb4, + 0x82, 0x19, 0x8b, 0x73, 0xb4, 0xe5, 0xc0, 0xb6, 0x6d, 0xda, 0x02, 0x46, 0xe0, 0x13, 0xe5, 0x3a, + 0x6d, 0x46, 0xfd, 0x98, 0x76, 0x01, 0x50, 0xc1, 0x9a, 0xbf, 0x40, 0x1c, 0x53, 0x7e, 0x27, 0xf9, + 0xb2, 0x0c, 0xaa, 0x08, 0x36, 0xb2, 0x39, 0xd1, 0xc8, 0x06, 0xe0, 0x8b, 0x78, 0x18, 0xbe, 0x98, + 0xd5, 0xb8, 0xa1, 0x1b, 0xb0, 0xcc, 0x12, 0x26, 0x07, 0x39, 0x44, 0x5a, 0x4a, 0xb2, 0xb4, 0xb4, + 0x44, 0x17, 0xb8, 0x75, 0x78, 0x7e, 0x7a, 0x11, 0x56, 0x02, 0xbc, 0x5e, 0xaf, 0xc8, 0xdb, 0x74, + 0xd9, 0xe3, 0xae, 0xf2, 0xa6, 0x51, 0xf9, 0xa3, 0xe4, 0x5b, 0xc8, 0x87, 0x34, 0xa6, 0xa1, 0x0f, + 0xd2, 0x77, 0x84, 0x3e, 0xc4, 0x1f, 0x19, 0x7d, 0x08, 0xf6, 0xbe, 0x89, 0x50, 0xef, 0xab, 0xfc, + 0x43, 0xf2, 0xbf, 0x89, 0x87, 0x25, 0xb4, 0xcc, 0x36, 0x16, 0xcd, 0x26, 0x1b, 0xd3, 0x92, 0xa4, + 0x67, 0x9e, 0x8a, 0x96, 0x92, 0x0e, 0x29, 0x97, 0x97, 0x78, 0x72, 0x22, 0xaf, 0x78, 0x7d, 0x2a, + 0x4f, 0xfc, 0xa2, 0x4f, 0x95, 0x21, 0xf1, 0x00, 0x73, 0x5c, 0xba, 0xa0, 0xd2, 0x21, 0xe5, 0x63, + 0xce, 0x27, 0x12, 0x38, 0x9f, 0xa0, 0x57, 0x21, 0xc7, 0xfe, 0x2a, 0x68, 0xa6, 0xe5, 0x08, 0x2c, + 0x3a, 0x54, 0xda, 0xf0, 0x5f, 0x0b, 0x1b, 0x87, 0x94, 0xe7, 0xc0, 0x72, 0xd4, 0xac, 0x25, 0x46, + 0x81, 0x8a, 0x23, 0x17, 0xaa, 0x38, 0xae, 0x42, 0x8e, 0x3e, 0xbd, 0x63, 0xe9, 0x2d, 0xcc, 0xea, + 0xf4, 0x9c, 0xea, 0x13, 0x94, 0xdf, 0xc6, 0x61, 0x69, 0x2c, 0xd1, 0x4c, 0x7d, 0x77, 0xd7, 0x25, + 0xe3, 0x01, 0x6c, 0x65, 0x3e, 0x7b, 0xac, 0x01, 0x9c, 0xea, 0x8e, 0xf6, 0xa1, 0x3e, 0x20, 0xb8, + 0x2d, 0x8c, 0x12, 0xa0, 0xa0, 0x0a, 0x64, 0xe9, 0x6c, 0xe8, 0xe0, 0xb6, 0x80, 0x79, 0xbc, 0x39, + 0x6a, 0x40, 0x1a, 0x9f, 0xe1, 0x01, 0x71, 0xca, 0x19, 0xf6, 0xd9, 0x2f, 0x4f, 0x76, 0xd5, 0x74, + 0x79, 0xbb, 0x4c, 0x3f, 0xf6, 0x37, 0x5f, 0x5e, 0x93, 0x39, 0xf7, 0x0b, 0x66, 0xdf, 0x20, 0xb8, + 0x6f, 0x91, 0x73, 0x55, 0xc8, 0x87, 0xad, 0x90, 0x1d, 0xb3, 0x02, 0x03, 0x1c, 0x0b, 0x2e, 0x4a, + 0x40, 0x6d, 0x6a, 0x98, 0xb6, 0x41, 0xce, 0xd5, 0x62, 0x1f, 0xf7, 0x2d, 0xd3, 0xec, 0x69, 0xfc, + 0x8c, 0x57, 0xa1, 0x14, 0xce, 0xab, 0xe8, 0x29, 0x28, 0xda, 0x98, 0xe8, 0xc6, 0x40, 0x0b, 0x15, + 0xc1, 0x05, 0x4e, 0xe4, 0x67, 0x6a, 0x37, 0x99, 0x95, 0xe4, 0xf8, 0x6e, 0x32, 0x1b, 0x97, 0x13, + 0xca, 0x21, 0x5c, 0x9a, 0x9a, 0x57, 0xd1, 0x2b, 0x90, 0xf3, 0x53, 0xb2, 0xc4, 0xde, 0xf6, 0x02, + 0x44, 0xc7, 0xe7, 0x55, 0x7e, 0x2f, 0xf9, 0x2a, 0xc3, 0x18, 0x51, 0x1d, 0xd2, 0x36, 0x76, 0x86, + 0x3d, 0x0e, 0xca, 0x94, 0xb6, 0x5e, 0x9c, 0x2f, 0x23, 0x53, 0xea, 0xb0, 0x47, 0x54, 0x21, 0xac, + 0xbc, 0x0b, 0x69, 0x4e, 0x41, 0x79, 0xc8, 0x1c, 0xef, 0xdf, 0xdb, 0x3f, 0x78, 0x7b, 0x5f, 0x8e, + 0x21, 0x80, 0x74, 0xb5, 0x56, 0xab, 0x1f, 0x36, 0x65, 0x09, 0xe5, 0x20, 0x55, 0xdd, 0x3e, 0x50, + 0x9b, 0x72, 0x9c, 0x92, 0xd5, 0xfa, 0x6e, 0xbd, 0xd6, 0x94, 0x13, 0x68, 0x19, 0x8a, 0x7c, 0xac, + 0xdd, 0x3d, 0x50, 0xdf, 0xac, 0x36, 0xe5, 0x64, 0x80, 0x74, 0x54, 0xdf, 0xbf, 0x53, 0x57, 0xe5, + 0x94, 0xf2, 0xbf, 0x70, 0x25, 0x32, 0x87, 0xfb, 0xf8, 0x8e, 0x14, 0xc0, 0x77, 0x94, 0x4f, 0xe3, + 0xb4, 0xa9, 0x89, 0x4a, 0xcc, 0x68, 0x77, 0xec, 0xc5, 0xb7, 0x16, 0xc8, 0xea, 0x63, 0x6f, 0x4f, + 0xfb, 0x18, 0x1b, 0x77, 0x30, 0x69, 0x75, 0x79, 0xa1, 0xc0, 0x23, 0x50, 0x51, 0x2d, 0x0a, 0x2a, + 0x13, 0x72, 0x38, 0xdb, 0xfb, 0xb8, 0x45, 0x34, 0xee, 0x44, 0x0e, 0x6b, 0x26, 0x72, 0x94, 0x8d, + 0x52, 0x8f, 0x38, 0x51, 0x79, 0x6f, 0x21, 0x5b, 0xe6, 0x20, 0xa5, 0xd6, 0x9b, 0xea, 0x3b, 0x72, + 0x02, 0x21, 0x28, 0xb1, 0xa1, 0x76, 0xb4, 0x5f, 0x3d, 0x3c, 0x6a, 0x1c, 0x50, 0x5b, 0xae, 0xc0, + 0x92, 0x6b, 0x4b, 0x97, 0x98, 0x52, 0x6c, 0x78, 0x2c, 0xa2, 0xaa, 0x98, 0xd2, 0x52, 0x8d, 0x75, + 0xfa, 0xf1, 0x39, 0x3a, 0xfd, 0xc4, 0x94, 0x4e, 0xff, 0xd7, 0x52, 0x70, 0xd3, 0x70, 0x81, 0x71, + 0x00, 0x69, 0x87, 0xe8, 0x64, 0xe8, 0x88, 0x6f, 0xf1, 0xca, 0xbc, 0xd5, 0xca, 0x86, 0x3b, 0x38, + 0x62, 0xe2, 0xaa, 0x50, 0xa3, 0xdc, 0x82, 0x52, 0x78, 0x25, 0xda, 0x94, 0xbe, 0x2f, 0xc6, 0x95, + 0xdb, 0x80, 0x26, 0x8b, 0x98, 0x29, 0x5d, 0xaa, 0x34, 0xad, 0x4b, 0xfd, 0x8d, 0x04, 0x8f, 0x5f, + 0x50, 0xb0, 0xa0, 0xb7, 0xc6, 0x5e, 0xf2, 0xb5, 0x45, 0xca, 0x9d, 0x0d, 0x4e, 0x1b, 0x7b, 0xcd, + 0x9b, 0x50, 0x08, 0xd2, 0xe7, 0x7b, 0xc9, 0x6f, 0xe2, 0x7e, 0x2c, 0x08, 0xb7, 0xd3, 0x7e, 0x24, + 0x95, 0xbe, 0x65, 0x24, 0x7d, 0x1d, 0x80, 0x8c, 0x34, 0x7e, 0x3a, 0xdc, 0x74, 0xfc, 0xc4, 0x14, + 0xb4, 0x13, 0xb7, 0x9a, 0x23, 0x71, 0x96, 0x72, 0x44, 0x8c, 0x1c, 0x74, 0x14, 0xc4, 0x16, 0x86, + 0x2c, 0x55, 0x3b, 0xa2, 0xef, 0x9e, 0x37, 0xa7, 0xfb, 0x18, 0x04, 0x27, 0x3b, 0xe8, 0x1d, 0x78, + 0x6c, 0xac, 0xde, 0xf0, 0x54, 0x27, 0xe7, 0x2d, 0x3b, 0x2e, 0x85, 0xcb, 0x0e, 0x57, 0x75, 0xb0, + 0x68, 0x48, 0x85, 0x8b, 0x86, 0x77, 0x00, 0x7c, 0x8c, 0x81, 0x06, 0x2a, 0xdb, 0x1c, 0x0e, 0xda, + 0xcc, 0x03, 0x52, 0x2a, 0x9f, 0xa0, 0x5b, 0x90, 0xa2, 0x9e, 0xe4, 0xda, 0x69, 0x32, 0xa2, 0x53, + 0x4f, 0x08, 0x60, 0x14, 0x9c, 0x5b, 0x31, 0x00, 0x4d, 0xc2, 0xc5, 0x11, 0x5b, 0xbc, 0x11, 0xde, + 0xe2, 0xc9, 0x48, 0xe0, 0x79, 0xfa, 0x56, 0x0f, 0x21, 0xc5, 0xbe, 0x3c, 0xcd, 0xdd, 0xec, 0x2f, + 0x86, 0x28, 0x3a, 0xe9, 0x18, 0xfd, 0x08, 0x40, 0x27, 0xc4, 0x36, 0x4e, 0x86, 0xfe, 0x06, 0xd7, + 0xa6, 0x7b, 0x4e, 0xd5, 0xe5, 0xdb, 0xbe, 0x2a, 0x5c, 0x68, 0xd5, 0x17, 0x0d, 0xb8, 0x51, 0x40, + 0xa1, 0xb2, 0x0f, 0xa5, 0xb0, 0xac, 0x5b, 0x26, 0xf1, 0x67, 0x08, 0x97, 0x49, 0xbc, 0xea, 0x15, + 0x65, 0x92, 0x57, 0x64, 0x25, 0xf8, 0xaf, 0x1a, 0x36, 0x51, 0x7e, 0x1c, 0x87, 0x42, 0xd0, 0xf1, + 0xfe, 0xf3, 0x2a, 0x19, 0xe5, 0x67, 0x12, 0x64, 0xbd, 0xd7, 0x0f, 0xff, 0x95, 0x09, 0xfd, 0xe8, + 0xe2, 0xd6, 0x8b, 0x07, 0x7f, 0xa5, 0xf0, 0xdf, 0x5a, 0x09, 0xef, 0xb7, 0xd6, 0x6d, 0x2f, 0x8b, + 0x46, 0xe1, 0x2a, 0x41, 0x5b, 0x0b, 0xaf, 0x72, 0x8b, 0x86, 0xdb, 0x90, 0xf3, 0x4e, 0x2f, 0xed, + 0x5d, 0x5c, 0xfc, 0x49, 0x12, 0x67, 0x48, 0xa0, 0x87, 0xab, 0x90, 0xb2, 0xcc, 0x0f, 0xc5, 0x7f, + 0x9a, 0x84, 0xca, 0x27, 0x4a, 0x1b, 0x96, 0xc6, 0x8e, 0x3e, 0xba, 0x0d, 0x19, 0x6b, 0x78, 0xa2, + 0xb9, 0xce, 0x31, 0x86, 0xd2, 0xb9, 0x55, 0xf1, 0xf0, 0xa4, 0x67, 0xb4, 0xee, 0xe1, 0x73, 0xf7, + 0x61, 0xac, 0xe1, 0xc9, 0x3d, 0xee, 0x43, 0x7c, 0x97, 0x78, 0x70, 0x97, 0x5f, 0x48, 0x90, 0x75, + 0xcf, 0x04, 0xfa, 0x7f, 0xc8, 0x79, 0x61, 0xc5, 0xfb, 0x15, 0x1b, 0x19, 0x8f, 0x84, 0x7e, 0x5f, + 0x04, 0x55, 0xdd, 0x7f, 0xc8, 0x46, 0x5b, 0xeb, 0xf4, 0x74, 0xee, 0x4b, 0xa5, 0xb0, 0xcd, 0x78, + 0xe0, 0x61, 0xf1, 0x78, 0xe7, 0xce, 0xdd, 0x9e, 0x7e, 0xaa, 0xe6, 0x99, 0xcc, 0x4e, 0x9b, 0x4e, + 0x44, 0x81, 0xf8, 0x77, 0x09, 0xe4, 0xf1, 0x13, 0xfb, 0xad, 0x9f, 0x6e, 0x32, 0xcd, 0x25, 0xa6, + 0xa4, 0x39, 0xb4, 0x09, 0x2b, 0x1e, 0x87, 0xe6, 0x18, 0xa7, 0x03, 0x9d, 0x0c, 0x6d, 0x2c, 0x70, + 0x4d, 0xe4, 0x2d, 0x1d, 0xb9, 0x2b, 0x93, 0x6f, 0x9d, 0x7a, 0xc4, 0xb7, 0xfe, 0x28, 0x0e, 0xf9, + 0x00, 0xca, 0x8a, 0xfe, 0x2f, 0x10, 0x8c, 0x4a, 0x53, 0x32, 0x43, 0x80, 0xd7, 0xff, 0xad, 0x1a, + 0x36, 0x53, 0x7c, 0x71, 0x33, 0x45, 0x61, 0xd9, 0x2e, 0x68, 0x9b, 0x5c, 0x18, 0xb4, 0x7d, 0x01, + 0x10, 0x31, 0x89, 0xde, 0xd3, 0xce, 0x4c, 0x62, 0x0c, 0x4e, 0x35, 0xee, 0x86, 0x3c, 0x74, 0xc8, + 0x6c, 0xe5, 0x3e, 0x5b, 0x38, 0x64, 0x1e, 0xf9, 0x13, 0x09, 0xb2, 0x5e, 0xf5, 0xbe, 0xe8, 0x2f, + 0xd5, 0xcb, 0x90, 0x16, 0x05, 0x2a, 0xff, 0xa7, 0x2a, 0x66, 0x53, 0xd1, 0xe9, 0x0a, 0x64, 0xfb, + 0x98, 0xe8, 0x2c, 0x0e, 0xf2, 0xac, 0xe6, 0xcd, 0x6f, 0xbc, 0x06, 0xf9, 0xc0, 0x0f, 0x6b, 0x1a, + 0x1a, 0xf7, 0xeb, 0x6f, 0xcb, 0xb1, 0x4a, 0xe6, 0xe3, 0xcf, 0xd6, 0x13, 0xfb, 0xf8, 0x43, 0x7a, + 0x9a, 0xd5, 0x7a, 0xad, 0x51, 0xaf, 0xdd, 0x93, 0xa5, 0x4a, 0xfe, 0xe3, 0xcf, 0xd6, 0x33, 0x2a, + 0x66, 0xc0, 0xe4, 0x8d, 0x7b, 0xb0, 0x34, 0xf6, 0x61, 0xc2, 0x65, 0x0b, 0x82, 0xd2, 0x9d, 0xe3, + 0xc3, 0xbd, 0x9d, 0x5a, 0xb5, 0x59, 0xd7, 0xee, 0x1f, 0x34, 0xeb, 0xb2, 0x84, 0x1e, 0x83, 0x95, + 0xbd, 0x9d, 0xef, 0x35, 0x9a, 0x5a, 0x6d, 0x6f, 0xa7, 0xbe, 0xdf, 0xd4, 0xaa, 0xcd, 0x66, 0xb5, + 0x76, 0x4f, 0x8e, 0x6f, 0x7d, 0x96, 0x87, 0x64, 0x75, 0xbb, 0xb6, 0x83, 0x6a, 0x90, 0x64, 0x88, + 0xca, 0x85, 0x37, 0xd6, 0x2a, 0x17, 0x43, 0xcc, 0xe8, 0x2e, 0xa4, 0x18, 0xd8, 0x82, 0x2e, 0xbe, + 0xc2, 0x56, 0x99, 0x81, 0x39, 0xd3, 0x87, 0x61, 0x27, 0xf2, 0xc2, 0x3b, 0x6d, 0x95, 0x8b, 0x21, + 0x68, 0xb4, 0x07, 0x19, 0xb7, 0xd7, 0x9e, 0x75, 0xd1, 0xac, 0x32, 0x13, 0x17, 0xa6, 0xaf, 0xc6, + 0x31, 0x8b, 0x8b, 0xaf, 0xbb, 0x55, 0x66, 0x80, 0xd3, 0x68, 0x07, 0xd2, 0xa2, 0xab, 0x9d, 0x71, + 0x83, 0xad, 0x32, 0x0b, 0x6e, 0x46, 0x2a, 0xe4, 0x7c, 0x34, 0x68, 0xf6, 0x25, 0xbe, 0xca, 0x1c, + 0xb8, 0x3b, 0x7a, 0x17, 0x8a, 0xe1, 0x8e, 0x79, 0xbe, 0x5b, 0x72, 0x95, 0x39, 0x81, 0x6d, 0xaa, + 0x3f, 0xdc, 0x3e, 0xcf, 0x77, 0x6b, 0xae, 0x32, 0x27, 0xce, 0x8d, 0xde, 0x87, 0xe5, 0xc9, 0xf6, + 0x76, 0xfe, 0x4b, 0x74, 0x95, 0x05, 0x90, 0x6f, 0xd4, 0x07, 0x34, 0xa5, 0x2d, 0x5e, 0xe0, 0x4e, + 0x5d, 0x65, 0x11, 0x20, 0x1c, 0xb5, 0x61, 0x69, 0xbc, 0xd7, 0x9c, 0xf7, 0x8e, 0x5d, 0x65, 0x6e, + 0x50, 0x9c, 0xef, 0x12, 0x6e, 0x2e, 0xe7, 0xbd, 0x73, 0x57, 0x99, 0x1b, 0x23, 0x47, 0xc7, 0x00, + 0x81, 0xfe, 0x70, 0x8e, 0x3b, 0x78, 0x95, 0x79, 0xd0, 0x72, 0x64, 0xc1, 0xca, 0xb4, 0xc6, 0x71, + 0x91, 0x2b, 0x79, 0x95, 0x85, 0x40, 0x74, 0xea, 0xcf, 0xe1, 0x16, 0x70, 0xbe, 0x2b, 0x7a, 0x95, + 0x39, 0xd1, 0xf4, 0xed, 0xea, 0xe7, 0x5f, 0xad, 0x49, 0x5f, 0x7c, 0xb5, 0x26, 0xfd, 0xed, 0xab, + 0x35, 0xe9, 0x93, 0xaf, 0xd7, 0x62, 0x5f, 0x7c, 0xbd, 0x16, 0xfb, 0xcb, 0xd7, 0x6b, 0xb1, 0xef, + 0x3f, 0x7b, 0x6a, 0x90, 0xee, 0xf0, 0x64, 0xa3, 0x65, 0xf6, 0x37, 0x5b, 0x66, 0x1f, 0x93, 0x93, + 0x0e, 0xf1, 0x07, 0xfe, 0x4d, 0xeb, 0x93, 0x34, 0xcb, 0xa0, 0x37, 0xff, 0x19, 0x00, 0x00, 0xff, + 0xff, 0xc8, 0x5b, 0x8f, 0x7e, 0x89, 0x2d, 0x00, 0x00, } // Reference imports to suppress errors if they are not otherwise used. @@ -4412,7 +4456,6 @@ func _ABCI_FinalizeBlock_Handler(srv interface{}, ctx context.Context, dec func( return interceptor(ctx, in, info, handler) } -var ABCI_serviceDesc = _ABCI_serviceDesc var _ABCI_serviceDesc = grpc.ServiceDesc{ ServiceName: "tendermint.abci.ABCI", HandlerType: (*ABCIServer)(nil), @@ -5193,6 +5236,11 @@ func (m *RequestOfferSnapshot) MarshalToSizedBuffer(dAtA []byte) (int, error) { _ = i var l int _ = l + if m.AppVersion != 0 { + i = encodeVarintTypes(dAtA, i, uint64(m.AppVersion)) + i-- + dAtA[i] = 0x18 + } if len(m.AppHash) > 0 { i -= len(m.AppHash) copy(dAtA[i:], m.AppHash) @@ -5403,6 +5451,18 @@ func (m *RequestProcessProposal) MarshalToSizedBuffer(dAtA []byte) (int, error) _ = i var l int _ = l + if len(m.DataRootHash) > 0 { + i -= len(m.DataRootHash) + copy(dAtA[i:], m.DataRootHash) + i = encodeVarintTypes(dAtA, i, uint64(len(m.DataRootHash))) + i-- + dAtA[i] = 0x52 + } + if m.SquareSize != 0 { + i = encodeVarintTypes(dAtA, i, uint64(m.SquareSize)) + i-- + dAtA[i] = 0x48 + } if len(m.ProposerAddress) > 0 { i -= len(m.ProposerAddress) copy(dAtA[i:], m.ProposerAddress) @@ -6659,6 +6719,18 @@ func (m *ResponsePrepareProposal) MarshalToSizedBuffer(dAtA []byte) (int, error) _ = i var l int _ = l + if len(m.DataRootHash) > 0 { + i -= len(m.DataRootHash) + copy(dAtA[i:], m.DataRootHash) + i = encodeVarintTypes(dAtA, i, uint64(len(m.DataRootHash))) + i-- + dAtA[i] = 0x1a + } + if m.SquareSize != 0 { + i = encodeVarintTypes(dAtA, i, uint64(m.SquareSize)) + i-- + dAtA[i] = 0x10 + } if len(m.Txs) > 0 { for iNdEx := len(m.Txs) - 1; iNdEx >= 0; iNdEx-- { i -= len(m.Txs[iNdEx]) @@ -7780,6 +7852,9 @@ func (m *RequestOfferSnapshot) Size() (n int) { if l > 0 { n += 1 + l + sovTypes(uint64(l)) } + if m.AppVersion != 0 { + n += 1 + sovTypes(uint64(m.AppVersion)) + } return n } @@ -7897,6 +7972,13 @@ func (m *RequestProcessProposal) Size() (n int) { if l > 0 { n += 1 + l + sovTypes(uint64(l)) } + if m.SquareSize != 0 { + n += 1 + sovTypes(uint64(m.SquareSize)) + } + l = len(m.DataRootHash) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) + } return n } @@ -8476,6 +8558,13 @@ func (m *ResponsePrepareProposal) Size() (n int) { n += 1 + l + sovTypes(uint64(l)) } } + if m.SquareSize != 0 { + n += 1 + sovTypes(uint64(m.SquareSize)) + } + l = len(m.DataRootHash) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) + } return n } @@ -10396,6 +10485,25 @@ func (m *RequestOfferSnapshot) Unmarshal(dAtA []byte) error { m.AppHash = []byte{} } iNdEx = postIndex + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field AppVersion", wireType) + } + m.AppVersion = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.AppVersion |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } default: iNdEx = preIndex skippy, err := skipTypes(dAtA[iNdEx:]) @@ -11229,6 +11337,59 @@ func (m *RequestProcessProposal) Unmarshal(dAtA []byte) error { m.ProposerAddress = []byte{} } iNdEx = postIndex + case 9: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field SquareSize", wireType) + } + m.SquareSize = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.SquareSize |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 10: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field DataRootHash", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.DataRootHash = append(m.DataRootHash[:0], dAtA[iNdEx:postIndex]...) + if m.DataRootHash == nil { + m.DataRootHash = []byte{} + } + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipTypes(dAtA[iNdEx:]) @@ -14348,6 +14509,59 @@ func (m *ResponsePrepareProposal) Unmarshal(dAtA []byte) error { m.Txs = append(m.Txs, make([]byte, postIndex-iNdEx)) copy(m.Txs[len(m.Txs)-1], dAtA[iNdEx:postIndex]) iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field SquareSize", wireType) + } + m.SquareSize = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.SquareSize |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field DataRootHash", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.DataRootHash = append(m.DataRootHash[:0], dAtA[iNdEx:postIndex]...) + if m.DataRootHash == nil { + m.DataRootHash = []byte{} + } + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipTypes(dAtA[iNdEx:]) diff --git a/blocksync/msgs_test.go b/blocksync/msgs_test.go index 1100771e8a4..544c3d708ca 100644 --- a/blocksync/msgs_test.go +++ b/blocksync/msgs_test.go @@ -81,7 +81,7 @@ func TestBcStatusResponseMessageValidateBasic(t *testing.T) { //nolint:lll // ignore line length in tests func TestBlocksyncMessageVectors(t *testing.T) { - block := types.MakeBlock(int64(3), []types.Tx{types.Tx("Hello World")}, nil, nil) + block := types.MakeBlock(int64(3), types.MakeData([]types.Tx{types.Tx("Hello World")}), nil, nil) block.Version.Block = 11 // overwrite updated protocol version bpb, err := block.ToProto() diff --git a/blocksync/reactor.go b/blocksync/reactor.go index 745f227663d..0f0c48dedcd 100644 --- a/blocksync/reactor.go +++ b/blocksync/reactor.go @@ -29,6 +29,9 @@ const ( statusUpdateIntervalSeconds = 10 // check if we should switch to consensus reactor switchToConsensusIntervalSeconds = 1 + + // ReactorIncomingMessageQueueSize the size of the reactor's message queue. + ReactorIncomingMessageQueueSize = 10 ) type consensusReactor interface { @@ -500,6 +503,22 @@ FOR_LOOP: // validate the block before we persist it err = bcR.blockExec.ValidateBlock(state, first) } + + if err == nil { + var stateMachineValid bool + // Block sync doesn't check that the `Data` in a block is valid. + // Since celestia-core can't determine if the `Data` in a block + // is valid, the next line asks celestia-app to check if the + // block is valid via ProcessProposal. If this step wasn't + // performed, a malicious node could fabricate an alternative + // set of transactions that would cause a different app hash and + // thus cause this node to panic. + stateMachineValid, err = bcR.blockExec.ProcessProposal(first, state) + if !stateMachineValid { + err = fmt.Errorf("application has rejected syncing block (%X) at height %d", first.Hash(), first.Height) + } + } + presentExtCommit := extCommit != nil extensionsEnabled := state.ConsensusParams.ABCI.VoteExtensionsEnabled(first.Height) if presentExtCommit != extensionsEnabled { @@ -546,7 +565,7 @@ FOR_LOOP: // TODO: same thing for app - but we would need a way to // get the hash without persisting the state - state, err = bcR.blockExec.ApplyVerifiedBlock(state, firstID, first) + state, err = bcR.blockExec.ApplyVerifiedBlock(state, firstID, first, second.LastCommit) if err != nil { // TODO This is bad, are we zombie? panic(fmt.Sprintf("Failed to process committed block (%d:%X): %v", first.Height, first.Hash(), err)) diff --git a/blocksync/reactor_test.go b/blocksync/reactor_test.go index acfefff6ab8..83a705ec216 100644 --- a/blocksync/reactor_test.go +++ b/blocksync/reactor_test.go @@ -138,7 +138,7 @@ func newReactor( lastExtCommit := seenExtCommit.Clone() - thisBlock := state.MakeBlock(blockHeight, nil, lastExtCommit.ToCommit(), nil, state.Validators.Proposer.Address) + thisBlock := state.MakeBlock(blockHeight, types.MakeData([]types.Tx{}), lastExtCommit.ToCommit(), nil, state.Validators.Proposer.Address) thisParts, err := thisBlock.MakePartSet(types.BlockPartSizeBytes) require.NoError(t, err) @@ -165,7 +165,7 @@ func newReactor( ExtendedSignatures: []types.ExtendedCommitSig{vote.ExtendedCommitSig()}, } - state, err = blockExec.ApplyBlock(state, blockID, thisBlock) + state, err = blockExec.ApplyBlock(state, blockID, thisBlock, lastExtCommit.ToCommit()) if err != nil { panic(fmt.Errorf("error apply block: %w", err)) } @@ -523,6 +523,7 @@ func (bcR *ByzantineReactor) respondToPeer(msg *bcproto.BlockRequest, src p2p.Pe // Receive implements Reactor by handling 4 types of messages (look below). // Copied unchanged from reactor.go so the correct respondToPeer is called. func (bcR *ByzantineReactor) Receive(e p2p.Envelope) { //nolint: dupl + fmt.Println("Receive", e.Message) if err := ValidateMsg(e.Message); err != nil { bcR.Logger.Error("Peer sent us invalid msg", "peer", e.Src, "msg", e.Message, "err", err) bcR.Switch.StopPeerForError(e.Src, err) diff --git a/cmd/cometbft/commands/run_node.go b/cmd/cometbft/commands/run_node.go index d4a94214070..dc44f2b33fc 100644 --- a/cmd/cometbft/commands/run_node.go +++ b/cmd/cometbft/commands/run_node.go @@ -11,6 +11,7 @@ import ( cfg "github.com/cometbft/cometbft/config" cmtos "github.com/cometbft/cometbft/libs/os" + "github.com/cometbft/cometbft/libs/trace" nm "github.com/cometbft/cometbft/node" ) @@ -90,6 +91,30 @@ func AddNodeFlags(cmd *cobra.Command) { "db_dir", config.DBPath, "database directory") + + cmd.PersistentFlags().String( + trace.FlagTracePushConfig, + config.Instrumentation.TracePushConfig, + trace.FlagTracePushConfigDescription, + ) + + cmd.PersistentFlags().String( + trace.FlagTracePullAddress, + config.Instrumentation.TracePullAddress, + trace.FlagTracePullAddressDescription, + ) + + cmd.PersistentFlags().String( + trace.FlagPyroscopeURL, + config.Instrumentation.PyroscopeURL, + trace.FlagPyroscopeURLDescription, + ) + + cmd.PersistentFlags().Bool( + trace.FlagPyroscopeTrace, + config.Instrumentation.PyroscopeTrace, + trace.FlagPyroscopeTraceDescription, + ) } // NewRunNodeCmd returns the command that allows the CLI to start a node. diff --git a/config/config.go b/config/config.go index f78f8088293..53006678453 100644 --- a/config/config.go +++ b/config/config.go @@ -42,6 +42,7 @@ const ( MempoolTypeFlood = "flood" MempoolTypeNop = "nop" + MempoolTypeCAT = "cat" ) // NOTE: Most of the structs & relevant comments + the @@ -64,6 +65,12 @@ var ( // taken from https://semver.org/ semverRegexp = regexp.MustCompile(`^(?P0|[1-9]\d*)\.(?P0|[1-9]\d*)\.(?P0|[1-9]\d*)(?:-(?P(?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*)(?:\.(?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*))*))?(?:\+(?P[0-9a-zA-Z-]+(?:\.[0-9a-zA-Z-]+)*))?$`) + + // DefaultTracingTables is a list of tables that are used for storing traces. + // This global var is filled by an init function in the schema package. This + // allows for the schema package to contain all the relevant logic while + // avoiding import cycles. + DefaultTracingTables = "" ) // Config defines the top level configuration for a CometBFT node @@ -781,6 +788,12 @@ type MempoolConfig struct { // performance results using the default P2P configuration. ExperimentalMaxGossipConnectionsToPersistentPeers int `mapstructure:"experimental_max_gossip_connections_to_persistent_peers"` ExperimentalMaxGossipConnectionsToNonPersistentPeers int `mapstructure:"experimental_max_gossip_connections_to_non_persistent_peers"` + + // MaxGossipDelay is the maximum allotted time that the reactor expects a transaction to + // arrive before issuing a new request to a different peer + // Only applicable to the v2 / CAT mempool + // Default is 200ms + MaxGossipDelay time.Duration `mapstructure:"max-gossip-delay"` } // DefaultMempoolConfig returns a default configuration for the CometBFT mempool @@ -978,8 +991,13 @@ func (cfg *BlockSyncConfig) ValidateBasic() error { // including timeouts and details about the WAL and the block structure. type ConsensusConfig struct { RootDir string `mapstructure:"home"` - WalPath string `mapstructure:"wal_file"` - walFile string // overrides WalPath if set + // If set to true, only internal messages will be written + // to the WAL. External messages like votes, proposals + // block parts, will not be written + // Default: true + OnlyInternalWal bool `mapstructure:"only_internal_wal"` + WalPath string `mapstructure:"wal_file"` + walFile string // overrides WalPath if set // How long we wait for a proposal block before prevoting nil TimeoutPropose time.Duration `mapstructure:"timeout_propose"` @@ -1016,6 +1034,7 @@ type ConsensusConfig struct { // DefaultConsensusConfig returns a default configuration for the consensus service func DefaultConsensusConfig() *ConsensusConfig { return &ConsensusConfig{ + OnlyInternalWal: true, WalPath: filepath.Join(DefaultDataDir, "cs.wal", "wal"), TimeoutPropose: 3000 * time.Millisecond, TimeoutProposeDelta: 500 * time.Millisecond, @@ -1036,6 +1055,7 @@ func DefaultConsensusConfig() *ConsensusConfig { // TestConsensusConfig returns a configuration for testing the consensus service func TestConsensusConfig() *ConsensusConfig { cfg := DefaultConsensusConfig() + cfg.OnlyInternalWal = false cfg.TimeoutPropose = 40 * time.Millisecond cfg.TimeoutProposeDelta = 1 * time.Millisecond cfg.TimeoutPrevote = 10 * time.Millisecond @@ -1083,6 +1103,32 @@ func (cfg *ConsensusConfig) Commit(t time.Time) time.Time { return t.Add(cfg.TimeoutCommit) } +// ProposeWithCustomTimeout is identical to Propose. However, +// it calculates the amount of time to wait for a proposal using the supplied +// customTimeout. +// If customTimeout is 0, the TimeoutPropose from cfg is used. +func (cfg *ConsensusConfig) ProposeWithCustomTimeout(round int32, customTimeout time.Duration) time.Duration { + // this is to capture any unforeseen cases where the customTimeout is 0 + var timeoutPropose = customTimeout + if timeoutPropose == 0 { + // falling back to default timeout + timeoutPropose = cfg.TimeoutPropose + } + return time.Duration(timeoutPropose.Nanoseconds()+cfg.TimeoutProposeDelta.Nanoseconds()*int64(round)) * time.Nanosecond +} + +// CommitWithCustomTimeout is identical to Commit. However, it calculates the time for commit using the supplied customTimeout. +// If customTimeout is 0, the TimeoutCommit from cfg is used. +func (cfg *ConsensusConfig) CommitWithCustomTimeout(t time.Time, customTimeout time.Duration) time.Time { + // this is to capture any unforeseen cases where the customTimeout is 0 + var timeoutCommit = customTimeout + if timeoutCommit == 0 { + // falling back to default timeout + timeoutCommit = cfg.TimeoutCommit + } + return t.Add(timeoutCommit) +} + // WalFile returns the full path to the write-ahead log file func (cfg *ConsensusConfig) WalFile() string { if cfg.walFile != "" { @@ -1224,6 +1270,38 @@ type InstrumentationConfig struct { // Instrumentation namespace. Namespace string `mapstructure:"namespace"` + + // TracePushConfig is the relative path of the push config. This second + // config contains credentials for where and how often to. + TracePushConfig string `mapstructure:"trace_push_config"` + + // TracePullAddress is the address that the trace server will listen on for + // pulling data. + TracePullAddress string `mapstructure:"trace_pull_address"` + + // TraceType is the type of tracer used. Options are "local" and "noop". + TraceType string `mapstructure:"trace_type"` + + // TraceBufferSize is the number of traces to write in a single batch. + TraceBufferSize int `mapstructure:"trace_push_batch_size"` + + // TracingTables is the list of tables that will be traced. See the + // pkg/trace/schema for a complete list of tables. It is represented as a + // comma separate string. For example: "consensus_round_state,mempool_tx". + TracingTables string `mapstructure:"tracing_tables"` + + // PyroscopeURL is the pyroscope url used to establish a connection with a + // pyroscope continuous profiling server. + PyroscopeURL string `mapstructure:"pyroscope_url"` + + // PyroscopeProfile is a flag that enables tracing with pyroscope. + PyroscopeTrace bool `mapstructure:"pyroscope_trace"` + + // PyroscopeProfileTypes is a list of profile types to be traced with + // pyroscope. Available profile types are: cpu, alloc_objects, alloc_space, + // inuse_objects, inuse_space, goroutines, mutex_count, mutex_duration, + // block_count, block_duration. + PyroscopeProfileTypes []string `mapstructure:"pyroscope_profile_types"` } // DefaultInstrumentationConfig returns a default configuration for metrics @@ -1234,6 +1312,23 @@ func DefaultInstrumentationConfig() *InstrumentationConfig { PrometheusListenAddr: ":26660", MaxOpenConnections: 3, Namespace: "cometbft", + TracePushConfig: "", + TracePullAddress: "", + TraceType: "noop", + TraceBufferSize: 1000, + TracingTables: DefaultTracingTables, + PyroscopeURL: "", + PyroscopeTrace: false, + PyroscopeProfileTypes: []string{ + "cpu", + "alloc_objects", + "inuse_objects", + "goroutines", + "mutex_count", + "mutex_duration", + "block_count", + "block_duration", + }, } } @@ -1249,6 +1344,23 @@ func (cfg *InstrumentationConfig) ValidateBasic() error { if cfg.MaxOpenConnections < 0 { return errors.New("max_open_connections can't be negative") } + if cfg.PyroscopeTrace && cfg.PyroscopeURL == "" { + return errors.New("pyroscope_trace can't be enabled if profiling is disabled") + } + // if there is not TracePushConfig configured, then we do not need to validate the rest + // of the config because we are not connecting. + if cfg.TracePushConfig == "" { + return nil + } + if cfg.TracePullAddress == "" { + return errors.New("token is required") + } + if cfg.TraceType == "" { + return errors.New("org is required") + } + if cfg.TraceBufferSize <= 0 { + return errors.New("batch size must be greater than 0") + } return nil } diff --git a/config/config_test.go b/config/config_test.go index 5092bc156fd..c390ee614d5 100644 --- a/config/config_test.go +++ b/config/config_test.go @@ -201,3 +201,31 @@ func TestInstrumentationConfigValidateBasic(t *testing.T) { cfg.MaxOpenConnections = -1 assert.Error(t, cfg.ValidateBasic()) } + +func TestProposeWithCustomTimeout(t *testing.T) { + cfg := config.DefaultConsensusConfig() + + // customTimeout is 0, should fallback to default timeout + round := int32(1) + expectedTimeout := time.Duration(cfg.TimeoutPropose.Nanoseconds()+cfg.TimeoutProposeDelta.Nanoseconds()*int64(round)) * time.Nanosecond + assert.Equal(t, expectedTimeout, cfg.ProposeWithCustomTimeout(round, time.Duration(0))) + + // customTimeout is not 0 + customTimeout := 2 * time.Second + expectedTimeout = time.Duration(customTimeout.Nanoseconds()+cfg.TimeoutProposeDelta.Nanoseconds()*int64(round)) * time.Nanosecond + assert.Equal(t, expectedTimeout, cfg.ProposeWithCustomTimeout(round, customTimeout)) +} + +func TestCommitWithCustomTimeout(t *testing.T) { + cfg := config.DefaultConsensusConfig() + + // customTimeout is 0, should fallback to default timeout + inputTime := time.Now() + expectedTime := inputTime.Add(cfg.TimeoutCommit) + assert.Equal(t, expectedTime, cfg.CommitWithCustomTimeout(inputTime, time.Duration(0))) + + // customTimeout is not 0 + customTimeout := 2 * time.Second + expectedTime = inputTime.Add(customTimeout) + assert.Equal(t, expectedTime, cfg.CommitWithCustomTimeout(inputTime, customTimeout)) +} diff --git a/config/toml.go b/config/toml.go index 0df9be29eed..c62beecb281 100644 --- a/config/toml.go +++ b/config/toml.go @@ -347,6 +347,7 @@ dial_timeout = "{{ .P2P.DialTimeout }}" # - "nop" : nop-mempool (short for no operation; the ABCI app is responsible # for storing, disseminating and proposing txs). "create_empty_blocks=false" is # not supported. +# - "cat" : content addressable mempool type = "flood" # Recheck (default: true) defines whether CometBFT should recheck the @@ -420,6 +421,12 @@ max_batch_bytes = {{ .Mempool.MaxBatchBytes }} experimental_max_gossip_connections_to_persistent_peers = {{ .Mempool.ExperimentalMaxGossipConnectionsToPersistentPeers }} experimental_max_gossip_connections_to_non_persistent_peers = {{ .Mempool.ExperimentalMaxGossipConnectionsToNonPersistentPeers }} +# max-gossip-delay is the maximum allotted time that the reactor expects a transaction to +# arrive before issuing a new request to a different peer +# Only applicable to the v2 / CAT mempool +# Default is 200ms +max-gossip-delay = "{{ .Mempool.MaxGossipDelay }}" + ####################################################### ### State Sync Configuration Options ### ####################################################### @@ -474,6 +481,11 @@ version = "{{ .BlockSync.Version }}" ####################################################### [consensus] +# If set to "true", only internal messages will be +# written to the WAL. External messages like votes, proposal, +# block parts, will not be written. +only_internal_wal = "{{ .Consensus.OnlyInternalWal }}" + wal_file = "{{ js .Consensus.WalPath }}" # How long we wait for a proposal block before prevoting nil @@ -564,4 +576,39 @@ max_open_connections = {{ .Instrumentation.MaxOpenConnections }} # Instrumentation namespace namespace = "{{ .Instrumentation.Namespace }}" + +# TracePushConfig is the relative path of the push config. +# This second config contains credentials for where and how often to +# push trace data to. For example, if the config is next to this config, +# it would be "push_config.json". +trace_push_config = "{{ .Instrumentation.TracePushConfig }}" + +# The tracer pull address specifies which address will be used for pull based +# event collection. If empty, the pull based server will not be started. +trace_pull_address = "{{ .Instrumentation.TracePullAddress }}" + +# The tracer to use for collecting trace data. +trace_type = "{{ .Instrumentation.TraceType }}" + +# The size of the batches that are sent to the database. +trace_push_batch_size = {{ .Instrumentation.TraceBufferSize }} + +# The list of tables that are updated when tracing. All available tables and +# their schema can be found in the pkg/trace/schema package. It is represented as a +# comma separate string. For example: "consensus_round_state,mempool_tx". +tracing_tables = "{{ .Instrumentation.TracingTables }}" + +# The URL of the pyroscope instance to use for continuous profiling. +# If empty, continuous profiling is disabled. +pyroscope_url = "{{ .Instrumentation.PyroscopeURL }}" + +# When true, tracing data is added to the continuous profiling +# performed by pyroscope. +pyroscope_trace = {{ .Instrumentation.PyroscopeTrace }} + +# pyroscope_profile_types is a list of profile types to be traced with +# pyroscope. Available profile types are: cpu, alloc_objects, alloc_space, +# inuse_objects, inuse_space, goroutines, mutex_count, mutex_duration, +# block_count, block_duration. +pyroscope_profile_types = [{{ range .Instrumentation.PyroscopeProfileTypes }}{{ printf "%q, " . }}{{end}}] ` diff --git a/consensus/byzantine_test.go b/consensus/byzantine_test.go index bb72e7932a9..b31b31dca91 100644 --- a/consensus/byzantine_test.go +++ b/consensus/byzantine_test.go @@ -594,3 +594,7 @@ func (br *ByzantineReactor) Receive(e p2p.Envelope) { br.reactor.Receive(e) } func (br *ByzantineReactor) InitPeer(peer p2p.Peer) p2p.Peer { return peer } + +// func (br *ByzantineReactor) QueueUnprocessedEnvelope(e p2p.UnprocessedEnvelope) { +// br.reactor.QueueUnprocessedEnvelope(e) +// } diff --git a/consensus/metrics.gen.go b/consensus/metrics.gen.go index aea9322cde8..fc90befe252 100644 --- a/consensus/metrics.gen.go +++ b/consensus/metrics.gen.go @@ -198,40 +198,68 @@ func PrometheusMetrics(namespace string, labelsAndValues ...string) *Metrics { Name: "late_votes", Help: "LateVotes stores the number of votes that were received by this node that correspond to earlier heights and rounds than this node is currently in.", }, append(labels, "vote_type")).With(labelsAndValues...), + StartHeight: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{ + Namespace: namespace, + Subsystem: MetricsSubsystem, + Name: "start_height", + Help: "StartHeight is the height at which metrics began.", + }, labels).With(labelsAndValues...), + BlockTimeSeconds: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{ + Namespace: namespace, + Subsystem: MetricsSubsystem, + Name: "block_time_seconds", + Help: "BlockTimeSeconds is the duration between this block and the preceding one.", + }, labels).With(labelsAndValues...), + ApplicationRejectedProposals: prometheus.NewCounterFrom(stdprometheus.CounterOpts{ + Namespace: namespace, + Subsystem: MetricsSubsystem, + Name: "application_rejected_proposals", + Help: "ApplicationRejectedProposals is the number of proposals rejected by the application.", + }, labels).With(labelsAndValues...), + TimedOutProposals: prometheus.NewCounterFrom(stdprometheus.CounterOpts{ + Namespace: namespace, + Subsystem: MetricsSubsystem, + Name: "timed_out_proposals", + Help: "TimedOutProposals is the number of proposals that failed to be received in time.", + }, labels).With(labelsAndValues...), } } func NopMetrics() *Metrics { return &Metrics{ - Height: discard.NewGauge(), - ValidatorLastSignedHeight: discard.NewGauge(), - Rounds: discard.NewGauge(), - RoundDurationSeconds: discard.NewHistogram(), - Validators: discard.NewGauge(), - ValidatorsPower: discard.NewGauge(), - ValidatorPower: discard.NewGauge(), - ValidatorMissedBlocks: discard.NewGauge(), - MissingValidators: discard.NewGauge(), - MissingValidatorsPower: discard.NewGauge(), - ByzantineValidators: discard.NewGauge(), - ByzantineValidatorsPower: discard.NewGauge(), - BlockIntervalSeconds: discard.NewHistogram(), - NumTxs: discard.NewGauge(), - BlockSizeBytes: discard.NewGauge(), - ChainSizeBytes: discard.NewCounter(), - TotalTxs: discard.NewGauge(), - CommittedHeight: discard.NewGauge(), - BlockParts: discard.NewCounter(), - DuplicateBlockPart: discard.NewCounter(), - DuplicateVote: discard.NewCounter(), - StepDurationSeconds: discard.NewHistogram(), - BlockGossipPartsReceived: discard.NewCounter(), - QuorumPrevoteDelay: discard.NewGauge(), - FullPrevoteDelay: discard.NewGauge(), - VoteExtensionReceiveCount: discard.NewCounter(), - ProposalReceiveCount: discard.NewCounter(), - ProposalCreateCount: discard.NewCounter(), - RoundVotingPowerPercent: discard.NewGauge(), - LateVotes: discard.NewCounter(), + Height: discard.NewGauge(), + ValidatorLastSignedHeight: discard.NewGauge(), + Rounds: discard.NewGauge(), + RoundDurationSeconds: discard.NewHistogram(), + Validators: discard.NewGauge(), + ValidatorsPower: discard.NewGauge(), + ValidatorPower: discard.NewGauge(), + ValidatorMissedBlocks: discard.NewGauge(), + MissingValidators: discard.NewGauge(), + MissingValidatorsPower: discard.NewGauge(), + ByzantineValidators: discard.NewGauge(), + ByzantineValidatorsPower: discard.NewGauge(), + BlockIntervalSeconds: discard.NewHistogram(), + NumTxs: discard.NewGauge(), + BlockSizeBytes: discard.NewGauge(), + ChainSizeBytes: discard.NewCounter(), + TotalTxs: discard.NewGauge(), + CommittedHeight: discard.NewGauge(), + BlockParts: discard.NewCounter(), + DuplicateBlockPart: discard.NewCounter(), + DuplicateVote: discard.NewCounter(), + StepDurationSeconds: discard.NewHistogram(), + BlockGossipPartsReceived: discard.NewCounter(), + QuorumPrevoteDelay: discard.NewGauge(), + FullPrevoteDelay: discard.NewGauge(), + VoteExtensionReceiveCount: discard.NewCounter(), + ProposalReceiveCount: discard.NewCounter(), + ProposalCreateCount: discard.NewCounter(), + RoundVotingPowerPercent: discard.NewGauge(), + LateVotes: discard.NewCounter(), + StartHeight: discard.NewGauge(), + BlockTimeSeconds: discard.NewGauge(), + ApplicationRejectedProposals: discard.NewCounter(), + TimedOutProposals: discard.NewCounter(), } } diff --git a/consensus/metrics.go b/consensus/metrics.go index 6e89d6b483f..b733399db63 100644 --- a/consensus/metrics.go +++ b/consensus/metrics.go @@ -125,6 +125,15 @@ type Metrics struct { // correspond to earlier heights and rounds than this node is currently // in. LateVotes metrics.Counter `metrics_labels:"vote_type"` + + // StartHeight is the height at which metrics began. + StartHeight metrics.Gauge + // BlockTimeSeconds is the duration between this block and the preceding one. + BlockTimeSeconds metrics.Gauge + // ApplicationRejectedProposals is the number of proposals rejected by the application. + ApplicationRejectedProposals metrics.Counter + // TimedOutProposals is the number of proposals that failed to be received in time. + TimedOutProposals metrics.Counter } func (m *Metrics) MarkProposalProcessed(accepted bool) { diff --git a/consensus/reactor.go b/consensus/reactor.go index ee87b7ba637..8a95b6d0370 100644 --- a/consensus/reactor.go +++ b/consensus/reactor.go @@ -4,6 +4,7 @@ import ( "errors" "fmt" "reflect" + "strconv" "sync" "time" @@ -13,6 +14,8 @@ import ( cmtjson "github.com/cometbft/cometbft/libs/json" "github.com/cometbft/cometbft/libs/log" cmtsync "github.com/cometbft/cometbft/libs/sync" + "github.com/cometbft/cometbft/libs/trace" + "github.com/cometbft/cometbft/libs/trace/schema" "github.com/cometbft/cometbft/p2p" cmtcons "github.com/cometbft/cometbft/proto/tendermint/consensus" cmtproto "github.com/cometbft/cometbft/proto/tendermint/types" @@ -31,6 +34,9 @@ const ( blocksToContributeToBecomeGoodPeer = 10000 votesToContributeToBecomeGoodPeer = 10000 + + // ReactorIncomingMessageQueueSize the size of the reactor's message queue. + ReactorIncomingMessageQueueSize = 1000 ) //----------------------------------------------------------------------------- @@ -46,7 +52,8 @@ type Reactor struct { eventBus *types.EventBus rs *cstypes.RoundState - Metrics *Metrics + Metrics *Metrics + traceClient trace.Tracer } type ReactorOption func(*Reactor) @@ -55,12 +62,16 @@ type ReactorOption func(*Reactor) // consensusState. func NewReactor(consensusState *State, waitSync bool, options ...ReactorOption) *Reactor { conR := &Reactor{ - conS: consensusState, - waitSync: waitSync, - rs: consensusState.GetRoundState(), - Metrics: NopMetrics(), + conS: consensusState, + waitSync: waitSync, + rs: consensusState.GetRoundState(), + Metrics: NopMetrics(), + traceClient: trace.NoOpTracer(), } - conR.BaseReactor = *p2p.NewBaseReactor("Consensus", conR) + conR.BaseReactor = *p2p.NewBaseReactor( + "Consensus", + conR, + p2p.WithIncomingQueueSize(ReactorIncomingMessageQueueSize)) for _, option := range options { option(conR) @@ -261,6 +272,15 @@ func (conR *Reactor) Receive(e p2p.Envelope) { conR.conS.mtx.Lock() initialHeight := conR.conS.state.InitialHeight conR.conS.mtx.Unlock() + schema.WriteConsensusState( + conR.traceClient, + msg.Height, + msg.Round, + string(e.Src.ID()), + schema.ConsensusNewRoundStep, + schema.Download, + fmt.Sprintf("%d", msg.Step), + ) if err = msg.ValidateHeight(initialHeight); err != nil { conR.Logger.Error("Peer sent us invalid msg", "peer", e.Src, "msg", msg, "err", err) conR.Switch.StopPeerForError(e.Src, err) @@ -268,14 +288,39 @@ func (conR *Reactor) Receive(e p2p.Envelope) { } ps.ApplyNewRoundStepMessage(msg) case *NewValidBlockMessage: + schema.WriteConsensusState( + conR.traceClient, + msg.Height, + msg.Round, + string(e.Src.ID()), + schema.ConsensusNewValidBlock, + schema.Download, + ) ps.ApplyNewValidBlockMessage(msg) case *HasVoteMessage: + schema.WriteConsensusState( + conR.traceClient, + msg.Height, + msg.Round, + string(e.Src.ID()), + schema.ConsensusHasVote, + schema.Download, + msg.Type.String(), + ) ps.ApplyHasVoteMessage(msg) case *VoteSetMaj23Message: cs := conR.conS cs.mtx.Lock() height, votes := cs.Height, cs.Votes cs.mtx.Unlock() + schema.WriteConsensusState( + conR.traceClient, + msg.Height, + msg.Round, + string(e.Src.ID()), + schema.ConsensusVoteSet23Precommit, + schema.Download, + ) if height != msg.Height { return } @@ -305,10 +350,20 @@ func (conR *Reactor) Receive(e p2p.Envelope) { if votes := ourVotes.ToProto(); votes != nil { eMsg.Votes = *votes } - e.Src.TrySend(p2p.Envelope{ + if e.Src.TrySend(p2p.Envelope{ ChannelID: VoteSetBitsChannel, Message: eMsg, - }) + }) { + schema.WriteConsensusState( + conR.traceClient, + msg.Height, + msg.Round, + string(e.Src.ID()), + schema.ConsensusVoteSetBits, + schema.Upload, + msg.Type.String(), + ) + } default: conR.Logger.Error(fmt.Sprintf("Unknown message type %v", reflect.TypeOf(msg))) } @@ -322,11 +377,27 @@ func (conR *Reactor) Receive(e p2p.Envelope) { case *ProposalMessage: ps.SetHasProposal(msg.Proposal) conR.conS.peerMsgQueue <- msgInfo{msg, e.Src.ID()} + schema.WriteProposal( + conR.traceClient, + msg.Proposal.Height, + msg.Proposal.Round, + string(e.Src.ID()), + schema.Download, + ) case *ProposalPOLMessage: ps.ApplyProposalPOLMessage(msg) + schema.WriteConsensusState( + conR.traceClient, + msg.Height, + msg.ProposalPOLRound, + string(e.Src.ID()), + schema.ConsensusPOL, + schema.Download, + ) case *BlockPartMessage: ps.SetHasProposalBlockPart(msg.Height, msg.Round, int(msg.Part.Index)) conR.Metrics.BlockParts.With("peer_id", string(e.Src.ID())).Add(1) + schema.WriteBlockPart(conR.traceClient, msg.Height, msg.Round, msg.Part.Index, false, string(e.Src.ID()), schema.Download) conR.conS.peerMsgQueue <- msgInfo{msg, e.Src.ID()} default: conR.Logger.Error(fmt.Sprintf("Unknown message type %v", reflect.TypeOf(msg))) @@ -443,6 +514,15 @@ func (conR *Reactor) broadcastNewRoundStepMessage(rs *cstypes.RoundState) { ChannelID: StateChannel, Message: nrsMsg, }) + schema.WriteConsensusState( + conR.traceClient, + rs.Height, + rs.Round, + schema.Broadcast, + schema.ConsensusNewRoundStep, + schema.Upload, + strconv.FormatUint(uint64(nrsMsg.Step), 10), + ) } func (conR *Reactor) broadcastNewValidBlockMessage(rs *cstypes.RoundState) { @@ -458,6 +538,14 @@ func (conR *Reactor) broadcastNewValidBlockMessage(rs *cstypes.RoundState) { ChannelID: StateChannel, Message: csMsg, }) + schema.WriteConsensusState( + conR.traceClient, + rs.Height, + rs.Round, + schema.Broadcast, + schema.ConsensusNewValidBlock, + schema.Upload, + ) } // Broadcasts HasVoteMessage to peers that care. @@ -472,6 +560,15 @@ func (conR *Reactor) broadcastHasVoteMessage(vote *types.Vote) { ChannelID: StateChannel, Message: msg, }) + schema.WriteConsensusState( + conR.traceClient, + vote.Height, + vote.Round, + schema.Broadcast, + schema.ConsensusHasVote, + schema.Upload, + vote.Type.String(), + ) /* // TODO: Make this broadcast more selective. for _, peer := range conR.Switch.Peers().List() { @@ -510,10 +607,20 @@ func makeRoundStepMessage(rs *cstypes.RoundState) (nrsMsg *cmtcons.NewRoundStep) func (conR *Reactor) sendNewRoundStepMessage(peer p2p.Peer) { rs := conR.getRoundState() nrsMsg := makeRoundStepMessage(rs) - peer.Send(p2p.Envelope{ + if peer.Send(p2p.Envelope{ ChannelID: StateChannel, Message: nrsMsg, - }) + }) { + schema.WriteConsensusState( + conR.traceClient, + rs.Height, + rs.Round, + string(peer.ID()), + schema.ConsensusNewRoundStep, + schema.Upload, + strconv.FormatUint(uint64(nrsMsg.Step), 10), + ) + } } func (conR *Reactor) updateRoundStateRoutine() { @@ -617,6 +724,13 @@ OUTER_LOOP: }) { // NOTE[ZM]: A peer might have received different proposal msg so this Proposal msg will be rejected! ps.SetHasProposal(rs.Proposal) + schema.WriteProposal( + conR.traceClient, + rs.Height, + rs.Round, + string(peer.ID()), + schema.Upload, + ) } } // ProposalPOL: lets peer know which POL votes we have so far. @@ -625,14 +739,23 @@ OUTER_LOOP: // so we definitely have rs.Votes.Prevotes(rs.Proposal.POLRound). if 0 <= rs.Proposal.POLRound { logger.Debug("Sending POL", "height", prs.Height, "round", prs.Round) - peer.Send(p2p.Envelope{ + if peer.Send(p2p.Envelope{ ChannelID: DataChannel, Message: &cmtcons.ProposalPOL{ Height: rs.Height, ProposalPolRound: rs.Proposal.POLRound, ProposalPol: *rs.Votes.Prevotes(rs.Proposal.POLRound).BitArray().ToProto(), }, - }) + }) { + schema.WriteConsensusState( + conR.traceClient, + rs.Height, + rs.Round, + string(peer.ID()), + schema.ConsensusPOL, + schema.Upload, + ) + } } continue OUTER_LOOP } @@ -684,6 +807,16 @@ func (conR *Reactor) gossipDataForCatchup(logger log.Logger, rs *cstypes.RoundSt }, }) { ps.SetHasProposalBlockPart(prs.Height, prs.Round, index) + schema.WriteBlockPart( + conR.traceClient, + prs.Height, + prs.Round, + //nolint:gosec + uint32(index), + true, + string(peer.ID()), + schema.Upload, + ) } else { logger.Debug("Sending block part for catchup failed") // sleep to avoid retrying too fast @@ -731,7 +864,7 @@ OUTER_LOOP: // Special catchup logic. // If peer is lagging by height 1, send LastCommit. if prs.Height != 0 && rs.Height == prs.Height+1 { - if ps.PickSendVote(rs.LastCommit) { + if conR.pickSendVoteAndTrace(rs.LastCommit, rs, ps) { logger.Debug("Picked rs.LastCommit to send", "height", prs.Height) continue OUTER_LOOP } @@ -762,7 +895,8 @@ OUTER_LOOP: if ec == nil { continue } - if ps.PickSendVote(ec) { + vote := ps.PickSendVote(ec) + if vote != nil { logger.Debug("Picked Catchup commit to send", "height", prs.Height) continue OUTER_LOOP } @@ -784,6 +918,19 @@ OUTER_LOOP: } } +// pickSendVoteAndTrace picks a vote to send and traces it. +// It returns true if a vote is sent. +// Note that it is a wrapper around PickSendVote with the addition of tracing the vote. +func (conR *Reactor) pickSendVoteAndTrace(votes types.VoteSetReader, rs *cstypes.RoundState, ps *PeerState) bool { + vote := ps.PickSendVote(votes) + if vote != nil { // if a vote is sent, trace it + schema.WriteVote(conR.traceClient, rs.Height, rs.Round, vote, + string(ps.peer.ID()), schema.Upload) + return true + } + return false +} + func (conR *Reactor) gossipVotesForHeight( logger log.Logger, rs *cstypes.RoundState, @@ -792,7 +939,7 @@ func (conR *Reactor) gossipVotesForHeight( ) bool { // If there are lastCommits to send... if prs.Step == cstypes.RoundStepNewHeight { - if ps.PickSendVote(rs.LastCommit) { + if conR.pickSendVoteAndTrace(rs.LastCommit, rs, ps) { logger.Debug("Picked rs.LastCommit to send") return true } @@ -800,7 +947,7 @@ func (conR *Reactor) gossipVotesForHeight( // If there are POL prevotes to send... if prs.Step <= cstypes.RoundStepPropose && prs.Round != -1 && prs.Round <= rs.Round && prs.ProposalPOLRound != -1 { if polPrevotes := rs.Votes.Prevotes(prs.ProposalPOLRound); polPrevotes != nil { - if ps.PickSendVote(polPrevotes) { + if conR.pickSendVoteAndTrace(polPrevotes, rs, ps) { logger.Debug("Picked rs.Prevotes(prs.ProposalPOLRound) to send", "round", prs.ProposalPOLRound) return true @@ -809,21 +956,21 @@ func (conR *Reactor) gossipVotesForHeight( } // If there are prevotes to send... if prs.Step <= cstypes.RoundStepPrevoteWait && prs.Round != -1 && prs.Round <= rs.Round { - if ps.PickSendVote(rs.Votes.Prevotes(prs.Round)) { + if conR.pickSendVoteAndTrace(rs.Votes.Prevotes(prs.Round), rs, ps) { logger.Debug("Picked rs.Prevotes(prs.Round) to send", "round", prs.Round) return true } } // If there are precommits to send... if prs.Step <= cstypes.RoundStepPrecommitWait && prs.Round != -1 && prs.Round <= rs.Round { - if ps.PickSendVote(rs.Votes.Precommits(prs.Round)) { + if conR.pickSendVoteAndTrace(rs.Votes.Precommits(prs.Round), rs, ps) { logger.Debug("Picked rs.Precommits(prs.Round) to send", "round", prs.Round) return true } } // If there are prevotes to send...Needed because of validBlock mechanism if prs.Round != -1 && prs.Round <= rs.Round { - if ps.PickSendVote(rs.Votes.Prevotes(prs.Round)) { + if conR.pickSendVoteAndTrace(rs.Votes.Prevotes(prs.Round), rs, ps) { logger.Debug("Picked rs.Prevotes(prs.Round) to send", "round", prs.Round) return true } @@ -831,7 +978,7 @@ func (conR *Reactor) gossipVotesForHeight( // If there are POLPrevotes to send... if prs.ProposalPOLRound != -1 { if polPrevotes := rs.Votes.Prevotes(prs.ProposalPOLRound); polPrevotes != nil { - if ps.PickSendVote(polPrevotes) { + if conR.pickSendVoteAndTrace(polPrevotes, rs, ps) { logger.Debug("Picked rs.Prevotes(prs.ProposalPOLRound) to send", "round", prs.ProposalPOLRound) return true @@ -859,7 +1006,7 @@ OUTER_LOOP: if rs.Height == prs.Height { if maj23, ok := rs.Votes.Prevotes(prs.Round).TwoThirdsMajority(); ok { - peer.TrySend(p2p.Envelope{ + if peer.TrySend(p2p.Envelope{ ChannelID: StateChannel, Message: &cmtcons.VoteSetMaj23{ Height: prs.Height, @@ -867,7 +1014,16 @@ OUTER_LOOP: Type: cmtproto.PrevoteType, BlockID: maj23.ToProto(), }, - }) + }) { + schema.WriteConsensusState( + conR.traceClient, + rs.Height, + rs.Round, + string(peer.ID()), + schema.ConsensusVoteSet23Prevote, + schema.Upload, + ) + } time.Sleep(conR.conS.config.PeerQueryMaj23SleepDuration) } } @@ -879,7 +1035,7 @@ OUTER_LOOP: prs := ps.GetRoundState() if rs.Height == prs.Height { if maj23, ok := rs.Votes.Precommits(prs.Round).TwoThirdsMajority(); ok { - peer.TrySend(p2p.Envelope{ + if peer.TrySend(p2p.Envelope{ ChannelID: StateChannel, Message: &cmtcons.VoteSetMaj23{ Height: prs.Height, @@ -887,7 +1043,16 @@ OUTER_LOOP: Type: cmtproto.PrecommitType, BlockID: maj23.ToProto(), }, - }) + }) { + schema.WriteConsensusState( + conR.traceClient, + rs.Height, + rs.Round, + string(peer.ID()), + schema.ConsensusVoteSet23Prevote, + schema.Upload, + ) + } time.Sleep(conR.conS.config.PeerQueryMaj23SleepDuration) } } @@ -900,7 +1065,7 @@ OUTER_LOOP: if rs.Height == prs.Height && prs.ProposalPOLRound >= 0 { if maj23, ok := rs.Votes.Prevotes(prs.ProposalPOLRound).TwoThirdsMajority(); ok { - peer.TrySend(p2p.Envelope{ + if peer.TrySend(p2p.Envelope{ ChannelID: StateChannel, Message: &cmtcons.VoteSetMaj23{ Height: prs.Height, @@ -908,7 +1073,16 @@ OUTER_LOOP: Type: cmtproto.PrevoteType, BlockID: maj23.ToProto(), }, - }) + }) { + schema.WriteConsensusState( + conR.traceClient, + rs.Height, + rs.Round, + string(peer.ID()), + schema.ConsensusPOL, + schema.Upload, + ) + } time.Sleep(conR.conS.config.PeerQueryMaj23SleepDuration) } } @@ -923,7 +1097,7 @@ OUTER_LOOP: if prs.CatchupCommitRound != -1 && prs.Height > 0 && prs.Height <= conR.conS.blockStore.Height() && prs.Height >= conR.conS.blockStore.Base() { if commit := conR.conS.LoadCommit(prs.Height); commit != nil { - peer.TrySend(p2p.Envelope{ + if peer.TrySend(p2p.Envelope{ ChannelID: StateChannel, Message: &cmtcons.VoteSetMaj23{ Height: prs.Height, @@ -931,7 +1105,16 @@ OUTER_LOOP: Type: cmtproto.PrecommitType, BlockID: commit.BlockID.ToProto(), }, - }) + }) { + schema.WriteConsensusState( + conR.traceClient, + prs.Height, + prs.Round, + string(peer.ID()), + schema.ConsensusVoteSet23Precommit, + schema.Upload, + ) + } time.Sleep(conR.conS.config.PeerQueryMaj23SleepDuration) } } @@ -1011,6 +1194,10 @@ func ReactorMetrics(metrics *Metrics) ReactorOption { return func(conR *Reactor) { conR.Metrics = metrics } } +func ReactorTracing(traceClient trace.Tracer) ReactorOption { + return func(conR *Reactor) { conR.traceClient = traceClient } +} + //----------------------------------------------------------------------------- var ( @@ -1144,7 +1331,7 @@ func (ps *PeerState) SetHasProposalBlockPart(height int64, round int32, index in // PickSendVote picks a vote and sends it to the peer. // Returns true if vote was sent. -func (ps *PeerState) PickSendVote(votes types.VoteSetReader) bool { +func (ps *PeerState) PickSendVote(votes types.VoteSetReader) *types.Vote { if vote, ok := ps.PickVoteToSend(votes); ok { ps.logger.Debug("Sending vote message", "ps", ps, "vote", vote) if ps.peer.Send(p2p.Envelope{ @@ -1154,11 +1341,11 @@ func (ps *PeerState) PickSendVote(votes types.VoteSetReader) bool { }, }) { ps.SetHasVote(vote) - return true + return vote } - return false + return nil } - return false + return nil } // PickVoteToSend picks a vote to send to the peer. diff --git a/consensus/replay.go b/consensus/replay.go index b8e457fa518..f98e07a3898 100644 --- a/consensus/replay.go +++ b/consensus/replay.go @@ -239,41 +239,43 @@ func (h *Handshaker) NBlocks() int { } // TODO: retry the handshake/replay if it fails ? -func (h *Handshaker) Handshake(proxyApp proxy.AppConns) error { +func (h *Handshaker) Handshake(proxyApp proxy.AppConns) (string, error) { return h.HandshakeWithContext(context.TODO(), proxyApp) } // HandshakeWithContext is cancellable version of Handshake -func (h *Handshaker) HandshakeWithContext(ctx context.Context, proxyApp proxy.AppConns) error { +func (h *Handshaker) HandshakeWithContext(ctx context.Context, proxyApp proxy.AppConns) (string, error) { // Handshake is done via ABCI Info on the query conn. res, err := proxyApp.Query().Info(ctx, proxy.RequestInfo) if err != nil { - return fmt.Errorf("error calling Info: %v", err) + return "", fmt.Errorf("error calling Info: %v", err) } blockHeight := res.LastBlockHeight if blockHeight < 0 { - return fmt.Errorf("got a negative last block height (%d) from the app", blockHeight) + return "", fmt.Errorf("got a negative last block height (%d) from the app", blockHeight) } appHash := res.LastBlockAppHash - h.logger.Info("ABCI Handshake App Info", - "height", blockHeight, - "hash", log.NewLazySprintf("%X", appHash), - "software-version", res.Version, - "protocol-version", res.AppVersion, - ) + // h.logger.Info("ABCI Handshake App Info", + // "height", blockHeight, + // "hash", log.NewLazySprintf("%X", appHash), + // "software-version", res.Version, + // "protocol-version", res.AppVersion, + // ) // Only set the version if there is no existing state. - if h.initialState.LastBlockHeight == 0 { + appVersion := h.initialState.Version.Consensus.App + // set app version if it's not set via genesis + if h.initialState.LastBlockHeight == 0 && appVersion == 0 && res.AppVersion != 0 { h.initialState.Version.Consensus.App = res.AppVersion } // Replay blocks up to the latest in the blockstore. appHash, err = h.ReplayBlocksWithContext(ctx, h.initialState, appHash, blockHeight, proxyApp) if err != nil { - return fmt.Errorf("error on replay: %v", err) + return "", fmt.Errorf("error on replay: %v", err) } h.logger.Info("Completed ABCI Handshake - CometBFT and App are synced", @@ -281,7 +283,7 @@ func (h *Handshaker) HandshakeWithContext(ctx context.Context, proxyApp proxy.Ap // TODO: (on restart) replay mempool - return nil + return res.Version, nil } // ReplayBlocks replays all blocks since appBlockHeight and ensures the result @@ -524,6 +526,7 @@ func (h *Handshaker) replayBlocks( func (h *Handshaker) replayBlock(state sm.State, height int64, proxyApp proxy.AppConnConsensus) (sm.State, error) { block := h.store.LoadBlock(height) meta := h.store.LoadBlockMeta(height) + seenCommit := h.store.LoadSeenCommit(height) // Use stubs for both mempool and evidence pool since no transactions nor // evidence are needed here - block already exists. @@ -531,7 +534,7 @@ func (h *Handshaker) replayBlock(state sm.State, height int64, proxyApp proxy.Ap blockExec.SetEventBus(h.eventBus) var err error - state, err = blockExec.ApplyBlock(state, meta.BlockID, block) + state, err = blockExec.ApplyBlock(state, meta.BlockID, block, seenCommit) if err != nil { return sm.State{}, err } diff --git a/consensus/replay_file.go b/consensus/replay_file.go index 269dcc0fd82..62fc3f39889 100644 --- a/consensus/replay_file.go +++ b/consensus/replay_file.go @@ -323,7 +323,7 @@ func newConsensusStateForReplay(config cfg.BaseConfig, csConfig *cfg.ConsensusCo handshaker := NewHandshaker(stateStore, state, blockStore, gdoc) handshaker.SetEventBus(eventBus) - err = handshaker.Handshake(proxyApp) + _, err = handshaker.Handshake(proxyApp) if err != nil { cmtos.Exit(fmt.Sprintf("Error on handshake: %v", err)) } diff --git a/consensus/replay_stubs.go b/consensus/replay_stubs.go index 0c55552f036..31b810a2b94 100644 --- a/consensus/replay_stubs.go +++ b/consensus/replay_stubs.go @@ -24,6 +24,9 @@ func (emptyMempool) CheckTx(types.Tx, func(*abci.ResponseCheckTx), mempl.TxInfo) return nil } +func (emptyMempool) GetTxByKey(types.TxKey) (types.Tx, bool) { return nil, false } +func (emptyMempool) WasRecentlyEvicted(types.TxKey) bool { return false } + func (txmp emptyMempool) RemoveTxByKey(types.TxKey) error { return nil } diff --git a/consensus/replay_test.go b/consensus/replay_test.go index bcf74387f36..70b2b552ef1 100644 --- a/consensus/replay_test.go +++ b/consensus/replay_test.go @@ -29,6 +29,7 @@ import ( cmtrand "github.com/cometbft/cometbft/libs/rand" "github.com/cometbft/cometbft/mempool" "github.com/cometbft/cometbft/privval" + cmtstore "github.com/cometbft/cometbft/proto/tendermint/store" cmtproto "github.com/cometbft/cometbft/proto/tendermint/types" "github.com/cometbft/cometbft/proxy" sm "github.com/cometbft/cometbft/state" @@ -724,7 +725,7 @@ func testHandshakeReplay(t *testing.T, config *cfg.Config, nBlocks int, mode uin }) // perform the replay protocol to sync Tendermint and the application - err = handshaker.Handshake(proxyApp) + _, err = handshaker.Handshake(proxyApp) if expectError { require.Error(t, err) // finish the test early @@ -772,7 +773,7 @@ func applyBlock(t *testing.T, stateStore sm.Store, mempool mempool.Mempool, evpo bps, err := blk.MakePartSet(testPartSize) require.NoError(t, err) blkID := types.BlockID{Hash: blk.Hash(), PartSetHeader: bps.Header()} - newState, err := blockExec.ApplyBlock(st, blkID, blk) + newState, err := blockExec.ApplyBlock(st, blkID, blk, nil) require.NoError(t, err) return newState } @@ -898,7 +899,7 @@ func makeBlocks(n int, state sm.State, privVals []types.PrivValidator) ([]*types if err != nil { return nil, err } - block := state.MakeBlock(height, test.MakeNTxs(height, 10), lastCommit, nil, state.LastValidators.Proposer.Address) + block := state.MakeBlock(height, types.MakeData(test.MakeNTxs(height, 10)), lastCommit, nil, state.LastValidators.Proposer.Address) blocks[i] = block state.LastBlockID = blockID state.LastBlockHeight = height @@ -953,7 +954,8 @@ func TestHandshakePanicsIfAppReturnsWrongAppHash(t *testing.T) { assert.Panics(t, func() { h := NewHandshaker(stateStore, state, store, genDoc) - if err = h.Handshake(proxyApp); err != nil { + _, err = h.Handshake(proxyApp) + if err != nil { t.Log(err) } }) @@ -977,7 +979,8 @@ func TestHandshakePanicsIfAppReturnsWrongAppHash(t *testing.T) { assert.Panics(t, func() { h := NewHandshaker(stateStore, state, store, genDoc) - if err = h.Handshake(proxyApp); err != nil { + _, err = h.Handshake(proxyApp) + if err != nil { t.Log(err) } }) @@ -1179,6 +1182,11 @@ func newMockBlockStore(t *testing.T, config *cfg.Config, params types.ConsensusP } } +func (*mockBlockStore) SaveTxInfo(block *types.Block, txResponseCodes []uint32, logs []string) error { + return nil +} +func (bs *mockBlockStore) LoadTxInfo(hash []byte) *cmtstore.TxInfo { return &cmtstore.TxInfo{} } + func (bs *mockBlockStore) Height() int64 { return int64(len(bs.chain)) } func (bs *mockBlockStore) Base() int64 { return bs.base } func (bs *mockBlockStore) Size() int64 { return bs.Height() - bs.Base() + 1 } @@ -1267,10 +1275,11 @@ func TestHandshakeUpdatesValidators(t *testing.T) { t.Error(err) } }) - if err := handshaker.Handshake(proxyApp); err != nil { + _, err := handshaker.Handshake(proxyApp) + if err != nil { t.Fatalf("Error on abci handshake: %v", err) } - var err error + // reload the state, check the validator set was updated state, err = stateStore.Load() require.NoError(t, err) diff --git a/consensus/state.go b/consensus/state.go index 41999690b33..75d43dd35c5 100644 --- a/consensus/state.go +++ b/consensus/state.go @@ -24,6 +24,8 @@ import ( cmtos "github.com/cometbft/cometbft/libs/os" "github.com/cometbft/cometbft/libs/service" cmtsync "github.com/cometbft/cometbft/libs/sync" + "github.com/cometbft/cometbft/libs/trace" + "github.com/cometbft/cometbft/libs/trace/schema" "github.com/cometbft/cometbft/p2p" cmtproto "github.com/cometbft/cometbft/proto/tendermint/types" sm "github.com/cometbft/cometbft/state" @@ -145,6 +147,9 @@ type State struct { // offline state sync height indicating to which height the node synced offline offlineStateSyncHeight int64 + + // traceClient is used to trace the state machine. + traceClient trace.Tracer } // StateOption sets an optional parameter on the State. @@ -175,6 +180,7 @@ func NewState( evpool: evpool, evsw: cmtevents.NewEventSwitch(), metrics: NopMetrics(), + traceClient: trace.NoOpTracer(), } for _, option := range options { option(cs) @@ -224,6 +230,11 @@ func StateMetrics(metrics *Metrics) StateOption { return func(cs *State) { cs.metrics = metrics } } +// SetTraceClient sets the remote event collector. +func SetTraceClient(ec trace.Tracer) StateOption { + return func(cs *State) { cs.traceClient = ec } +} + // OfflineStateSyncHeight indicates the height at which the node // statesync offline - before booting sets the metrics. func OfflineStateSyncHeight(height int64) StateOption { @@ -324,6 +335,8 @@ func (cs *State) OnStart() error { } } + cs.metrics.Height.Set(float64(cs.Height)) + // we need the timeoutRoutine for replay so // we don't block on the tick chan. // NOTE: we will get a build up of garbage go routines @@ -546,6 +559,7 @@ func (cs *State) updateRoundStep(round int32, step cstypes.RoundStepType) { } if cs.Step != step { cs.metrics.MarkStep(cs.Step) + schema.WriteRoundState(cs.traceClient, cs.Height, round, uint8(step)) } } cs.Round = round @@ -828,8 +842,10 @@ func (cs *State) receiveRoutine(maxSteps int) { cs.handleTxsAvailable() case mi = <-cs.peerMsgQueue: - if err := cs.wal.Write(mi); err != nil { - cs.Logger.Error("failed writing to WAL", "err", err) + if !cs.config.OnlyInternalWal { + if err := cs.wal.Write(mi); err != nil { + cs.Logger.Error("failed writing to WAL", "err", err) + } } // handles proposals, block parts, votes // may generate internal events (votes, complete proposals, 2/3 majorities) @@ -1358,6 +1374,8 @@ func (cs *State) defaultDoPrevote(height int64, round int32) { return } + schema.WriteABCI(cs.traceClient, schema.ProcessProposalStart, height, round) + // Validate proposal block, from consensus' perspective err := cs.blockExec.ValidateBlock(cs.state, cs.ProposalBlock) if err != nil { @@ -1378,6 +1396,8 @@ func (cs *State) defaultDoPrevote(height int64, round int32) { Please see `PrepareProosal`-`ProcessProposal` coherence and determinism properties in the ABCI++ specification. */ + schema.WriteABCI(cs.traceClient, schema.ProcessProposalEnd, height, round) + isAppValid, err := cs.blockExec.ProcessProposal(cs.ProposalBlock, cs.state) if err != nil { panic(fmt.Sprintf( @@ -1584,6 +1604,7 @@ func (cs *State) enterPrecommitWait(height int64, round int32) { defer func() { // Done enterPrecommitWait: cs.TriggeredTimeoutPrecommit = true + cs.updateRoundStep(round, cstypes.RoundStepPrecommitWait) cs.newStep() }() @@ -1724,10 +1745,12 @@ func (cs *State) finalizeCommit(height int64) { fail.Fail() // XXX // Save to blockStore. + var seenCommit *types.Commit if cs.blockStore.Height() < block.Height { // NOTE: the seenCommit is local justification to commit this block, // but may differ from the LastCommit included in the next block seenExtendedCommit := cs.Votes.Precommits(cs.CommitRound).MakeExtendedCommit(cs.state.ConsensusParams.ABCI) + seenCommit = seenExtendedCommit.ToCommit() if cs.state.ConsensusParams.ABCI.VoteExtensionsEnabled(block.Height) { cs.blockStore.SaveBlockWithExtendedCommit(block, blockParts, seenExtendedCommit) } else { @@ -1766,6 +1789,8 @@ func (cs *State) finalizeCommit(height int64) { // Create a copy of the state for staging and an event cache for txs. stateCopy := cs.state.Copy() + schema.WriteABCI(cs.traceClient, schema.CommitStart, height, 0) + // Execute and commit the block, update and save the state, and update the mempool. // We use apply verified block here because we have verified the block in this function already. // NOTE The block.AppHash won't reflect these txs until the next block. @@ -1776,11 +1801,14 @@ func (cs *State) finalizeCommit(height int64) { PartSetHeader: blockParts.Header(), }, block, + seenCommit, ) if err != nil { panic(fmt.Sprintf("failed to apply block; error %v", err)) } + schema.WriteABCI(cs.traceClient, schema.CommitEnd, height, 0) + fail.Fail() // XXX // must be called before we update state @@ -1882,12 +1910,17 @@ func (cs *State) recordMetrics(height int64, block *types.Block) { if height > 1 { lastBlockMeta := cs.blockStore.LoadBlockMeta(height - 1) if lastBlockMeta != nil { - cs.metrics.BlockIntervalSeconds.Observe( - block.Time.Sub(lastBlockMeta.Header.Time).Seconds(), - ) + elapsedTime := block.Time.Sub(lastBlockMeta.Header.Time).Seconds() + cs.metrics.BlockIntervalSeconds.Observe(elapsedTime) + cs.metrics.BlockTimeSeconds.Set(elapsedTime) } } + blockSize := block.Size() + + // trace some metadata about the block + schema.WriteBlockSummary(cs.traceClient, block, blockSize) + cs.metrics.NumTxs.Set(float64(len(block.Data.Txs))) cs.metrics.TotalTxs.Add(float64(len(block.Data.Txs))) cs.metrics.BlockSizeBytes.Set(float64(block.Size())) diff --git a/consensus/state_test.go b/consensus/state_test.go index 867c45d71ae..91c8c7d781c 100644 --- a/consensus/state_test.go +++ b/consensus/state_test.go @@ -2598,7 +2598,7 @@ func findBlockSizeLimit(t *testing.T, height, maxBytes int64, cs *State, partSiz for i := softMaxDataBytes; i < softMaxDataBytes*2; i++ { propBlock := cs.state.MakeBlock( height, - []types.Tx{[]byte("a=" + strings.Repeat("o", i-2))}, + types.MakeData([]types.Tx{[]byte("a=" + strings.Repeat("o", i-2))}), &types.Commit{}, nil, cs.privValidatorPubKey.Address(), diff --git a/crypto/merkle/proof_op.go b/crypto/merkle/proof_op.go index 62820303503..189242f4a9d 100644 --- a/crypto/merkle/proof_op.go +++ b/crypto/merkle/proof_op.go @@ -68,6 +68,36 @@ func (poz ProofOperators) Verify(root []byte, keypath string, args [][]byte) (er return nil } +// VerifyFromKeys performs the same verification logic as the normal Verify +// method, except it does not perform any processing on the keypath. This is +// useful when using keys that have split or escape points as a part of the key. +func (poz ProofOperators) VerifyFromKeys(root []byte, keys [][]byte, args [][]byte) (err error) { + for i, op := range poz { + key := op.GetKey() + if len(key) != 0 { + if len(keys) == 0 { + return fmt.Errorf("key path has insufficient # of parts: expected no more keys but got %+v", string(key)) + } + lastKey := keys[len(keys)-1] + if !bytes.Equal(lastKey, key) { + return fmt.Errorf("key mismatch on operation #%d: expected %+v but got %+v", i, string(lastKey), string(key)) + } + keys = keys[:len(keys)-1] + } + args, err = op.Run(args) + if err != nil { + return err + } + } + if !bytes.Equal(root, args[0]) { + return fmt.Errorf("calculated root hash is invalid: expected %X but got %X", root, args[0]) + } + if len(keys) != 0 { + return fmt.Errorf("keypath not consumed all: %s", string(bytes.Join(keys, []byte("/")))) + } + return nil +} + //---------------------------------------- // ProofRuntime - main entrypoint @@ -115,6 +145,10 @@ func (prt *ProofRuntime) VerifyValue(proof *cmtcrypto.ProofOps, root []byte, key return prt.Verify(proof, root, keypath, [][]byte{value}) } +func (prt *ProofRuntime) VerifyValueFromKeys(proof *cmtcrypto.ProofOps, root []byte, keys [][]byte, value []byte) (err error) { + return prt.VerifyFromKeys(proof, root, keys, [][]byte{value}) +} + // TODO In the long run we'll need a method of classifcation of ops, // whether existence or absence or perhaps a third? func (prt *ProofRuntime) VerifyAbsence(proof *cmtcrypto.ProofOps, root []byte, keypath string) (err error) { @@ -137,3 +171,14 @@ func DefaultProofRuntime() (prt *ProofRuntime) { prt.RegisterOpDecoder(ProofOpValue, ValueOpDecoder) return } + +// VerifyFromKeys performs the same verification logic as the normal Verify +// method, except it does not perform any processing on the keypath. This is +// useful when using keys that have split or escape points as a part of the key. +func (prt *ProofRuntime) VerifyFromKeys(proof *cmtcrypto.ProofOps, root []byte, keys [][]byte, args [][]byte) (err error) { + poz, err := prt.DecodeProof(proof) + if err != nil { + return fmt.Errorf("decoding proof: %w", err) + } + return poz.VerifyFromKeys(root, keys, args) +} diff --git a/crypto/merkle/proof_test.go b/crypto/merkle/proof_test.go index f307380aad6..667c20b6876 100644 --- a/crypto/merkle/proof_test.go +++ b/crypto/merkle/proof_test.go @@ -223,3 +223,67 @@ func TestVsa2022_100(t *testing.T) { assert.NotNil(t, ProofOperators{op}.Verify(root, "/"+string(key), [][]byte{value})) } + +func TestProofOperatorsFromKeys(t *testing.T) { + var err error + + // ProofRuntime setup + // TODO test this somehow. + + // ProofOperators setup + op1 := NewDominoOp("KEY1", "INPUT1", "INPUT2") + op2 := NewDominoOp("KEY%2", "INPUT2", "INPUT3") + op3 := NewDominoOp("", "INPUT3", "INPUT4") + op4 := NewDominoOp("KEY/4", "INPUT4", "OUTPUT4") + + // add characters to the keys that would otherwise result in bad keypath if + // processed + keys1 := [][]byte{bz("KEY/4"), bz("KEY%2"), bz("KEY1")} + badkeys1 := [][]byte{bz("WrongKey"), bz("KEY%2"), bz("KEY1")} + keys2 := [][]byte{bz("KEY3"), bz("KEY%2"), bz("KEY1")} + keys3 := [][]byte{bz("KEY2"), bz("KEY1")} + + // Good + popz := ProofOperators([]ProofOperator{op1, op2, op3, op4}) + err = popz.VerifyFromKeys(bz("OUTPUT4"), keys1, [][]byte{bz("INPUT1")}) + assert.NoError(t, err) + + // BAD INPUT + err = popz.VerifyFromKeys(bz("OUTPUT4"), keys1, [][]byte{bz("INPUT1_WRONG")}) + assert.Error(t, err) + + // BAD KEY 1 + err = popz.VerifyFromKeys(bz("OUTPUT4"), keys2, [][]byte{bz("INPUT1")}) + assert.Error(t, err) + + // BAD KEY 2 + err = popz.VerifyFromKeys(bz("OUTPUT4"), badkeys1, [][]byte{bz("INPUT1")}) + assert.Error(t, err) + + // BAD KEY 5 + err = popz.VerifyFromKeys(bz("OUTPUT4"), keys3, [][]byte{bz("INPUT1")}) + assert.Error(t, err) + + // BAD OUTPUT 1 + err = popz.VerifyFromKeys(bz("OUTPUT4_WRONG"), keys1, [][]byte{bz("INPUT1")}) + assert.Error(t, err) + + // BAD OUTPUT 2 + err = popz.VerifyFromKeys(bz(""), keys1, [][]byte{bz("INPUT1")}) + assert.Error(t, err) + + // BAD POPZ 1 + popz = []ProofOperator{op1, op2, op4} + err = popz.VerifyFromKeys(bz("OUTPUT4"), keys1, [][]byte{bz("INPUT1")}) + assert.Error(t, err) + + // BAD POPZ 2 + popz = []ProofOperator{op4, op3, op2, op1} + err = popz.VerifyFromKeys(bz("OUTPUT4"), keys1, [][]byte{bz("INPUT1")}) + assert.Error(t, err) + + // BAD POPZ 3 + popz = []ProofOperator{} + err = popz.VerifyFromKeys(bz("OUTPUT4"), keys1, [][]byte{bz("INPUT1")}) + assert.Error(t, err) +} diff --git a/evidence/pool_test.go b/evidence/pool_test.go index 2b8a8e886ad..761351c2938 100644 --- a/evidence/pool_test.go +++ b/evidence/pool_test.go @@ -197,7 +197,7 @@ func TestEvidencePoolUpdate(t *testing.T) { val, evidenceChainID) require.NoError(t, err) lastExtCommit := makeExtCommit(height, val.PrivKey.PubKey().Address()) - block := types.MakeBlock(height+1, []types.Tx{}, lastExtCommit.ToCommit(), []types.Evidence{ev}) + block := types.MakeBlock(height+1, types.MakeData([]types.Tx{}), lastExtCommit.ToCommit(), []types.Evidence{ev}) // update state (partially) state.LastBlockHeight = height + 1 state.LastBlockTime = defaultEvidenceTime.Add(22 * time.Minute) @@ -413,7 +413,7 @@ func initializeBlockStore(db dbm.DB, state sm.State, valAddr []byte) (*store.Blo for i := int64(1); i <= state.LastBlockHeight; i++ { lastCommit := makeExtCommit(i-1, valAddr) - block := state.MakeBlock(i, test.MakeNTxs(i, 1), lastCommit.ToCommit(), nil, state.Validators.Proposer.Address) + block := state.MakeBlock(i, types.MakeData(test.MakeNTxs(i, 1)), lastCommit.ToCommit(), nil, state.Validators.Proposer.Address) block.Header.Time = defaultEvidenceTime.Add(time.Duration(i) * time.Minute) block.Header.Version = cmtversion.Consensus{Block: version.BlockProtocol, App: 1} partSet, err := block.MakePartSet(types.BlockPartSizeBytes) diff --git a/evidence/reactor.go b/evidence/reactor.go index 10d3e53111b..b86f37e46a7 100644 --- a/evidence/reactor.go +++ b/evidence/reactor.go @@ -25,6 +25,8 @@ const ( broadcastEvidenceIntervalS = 10 // If a message fails wait this much before sending it again peerRetryMessageIntervalMS = 100 + // ReactorIncomingMessageQueueSize the size of the reactor's message queue. + ReactorIncomingMessageQueueSize = 1 ) // Reactor handles evpool evidence broadcasting amongst peers. @@ -39,7 +41,7 @@ func NewReactor(evpool *Pool) *Reactor { evR := &Reactor{ evpool: evpool, } - evR.BaseReactor = *p2p.NewBaseReactor("Evidence", evR) + evR.BaseReactor = *p2p.NewBaseReactor("Evidence", evR, p2p.WithIncomingQueueSize(ReactorIncomingMessageQueueSize)) return evR } @@ -252,3 +254,7 @@ func evidenceListFromProto(m proto.Message) ([]types.Evidence, error) { return evis, nil } + +func (evR *Reactor) OnStop() { + evR.BaseReactor.OnStop() +} diff --git a/evidence/reactor_test.go b/evidence/reactor_test.go index 620c8fa7d5e..43dcf30e63c 100644 --- a/evidence/reactor_test.go +++ b/evidence/reactor_test.go @@ -224,6 +224,7 @@ func TestReactorBroadcastEvidenceMemoryLeak(t *testing.T) { r.AddPeer(p) _ = sendEvidence(t, pool, val, 2) + r.OnStop() } // evidenceLogger is a TestingLogger which uses a different diff --git a/go.mod b/go.mod index fb9382e5038..52c6fb889ee 100644 --- a/go.mod +++ b/go.mod @@ -1,23 +1,40 @@ module github.com/cometbft/cometbft -go 1.22.11 +go 1.23 + +toolchain go1.23.6 require ( github.com/BurntSushi/toml v1.4.0 + github.com/Masterminds/semver/v3 v3.3.1 github.com/adlio/schema v1.3.6 + github.com/aws/aws-sdk-go-v2 v1.36.1 + github.com/aws/aws-sdk-go-v2/config v1.29.6 + github.com/aws/aws-sdk-go-v2/credentials v1.17.59 + github.com/aws/aws-sdk-go-v2/service/s3 v1.76.1 github.com/btcsuite/btcd/btcutil v1.1.6 + github.com/celestiaorg/nmt v0.23.0 github.com/cenkalti/backoff v2.2.1+incompatible // indirect + github.com/cometbft/cometbft-db v0.14.1 + github.com/cosmos/gogoproto v1.7.0 github.com/decred/dcrd/dcrec/secp256k1/v4 v4.3.0 github.com/fortytw2/leaktest v1.3.0 + github.com/go-git/go-git/v5 v5.13.2 github.com/go-kit/kit v0.13.0 github.com/go-kit/log v0.2.1 github.com/go-logfmt/logfmt v0.6.0 + github.com/gofrs/uuid v4.4.0+incompatible github.com/golang/protobuf v1.5.4 github.com/google/orderedcode v0.0.1 + github.com/google/uuid v1.6.0 github.com/gorilla/websocket v1.5.3 + github.com/grafana/otel-profiling-go v0.5.1 + github.com/grafana/pyroscope-go v1.2.0 + github.com/hashicorp/golang-lru/v2 v2.0.7 github.com/informalsystems/tm-load-test v1.3.0 github.com/lib/pq v1.10.9 github.com/minio/highwayhash v1.0.3 + github.com/oasisprotocol/curve25519-voi v0.0.0-20220708102147-0a8a51822cae github.com/ory/dockertest v3.3.5+incompatible github.com/pkg/errors v0.9.1 github.com/prometheus/client_golang v1.20.5 @@ -30,24 +47,15 @@ require ( github.com/spf13/cobra v1.8.1 github.com/spf13/viper v1.19.0 github.com/stretchr/testify v1.10.0 + github.com/syndtr/goleveldb v1.0.1-0.20210819022825-2ae1ddf74ef7 + go.opentelemetry.io/otel v1.34.0 + go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.34.0 + go.opentelemetry.io/otel/sdk v1.34.0 golang.org/x/crypto v0.32.0 golang.org/x/net v0.34.0 - google.golang.org/grpc v1.70.0 -) - -require github.com/syndtr/goleveldb v1.0.1-0.20210819022825-2ae1ddf74ef7 - -require ( - github.com/Masterminds/semver/v3 v3.3.1 - github.com/cometbft/cometbft-db v0.14.1 - github.com/cosmos/gogoproto v1.7.0 - github.com/go-git/go-git/v5 v5.13.2 - github.com/gofrs/uuid v4.4.0+incompatible - github.com/google/uuid v1.6.0 - github.com/hashicorp/golang-lru/v2 v2.0.7 - github.com/oasisprotocol/curve25519-voi v0.0.0-20220708102147-0a8a51822cae golang.org/x/sync v0.10.0 gonum.org/v1/gonum v0.15.1 + google.golang.org/grpc v1.70.0 google.golang.org/protobuf v1.36.4 ) @@ -58,6 +66,20 @@ require ( github.com/Microsoft/go-winio v0.6.1 // indirect github.com/Nvveen/Gotty v0.0.0-20120604004816-cd527374f1e5 // indirect github.com/ProtonMail/go-crypto v1.1.5 // indirect + github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.6.8 // indirect + github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.28 // indirect + github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.32 // indirect + github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.32 // indirect + github.com/aws/aws-sdk-go-v2/internal/ini v1.8.2 // indirect + github.com/aws/aws-sdk-go-v2/internal/v4a v1.3.32 // indirect + github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.12.2 // indirect + github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.6.0 // indirect + github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.12.13 // indirect + github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.18.13 // indirect + github.com/aws/aws-sdk-go-v2/service/sso v1.24.15 // indirect + github.com/aws/aws-sdk-go-v2/service/ssooidc v1.28.14 // indirect + github.com/aws/aws-sdk-go-v2/service/sts v1.33.14 // indirect + github.com/aws/smithy-go v1.22.2 // indirect github.com/beorn7/perks v1.0.1 // indirect github.com/cespare/xxhash/v2 v2.3.0 // indirect github.com/cloudflare/circl v1.3.7 // indirect @@ -81,6 +103,8 @@ require ( github.com/getsentry/sentry-go v0.27.0 // indirect github.com/go-git/gcfg v1.5.1-0.20230307220236-3a3c6141e376 // indirect github.com/go-git/go-billy/v5 v5.6.2 // indirect + github.com/go-logr/logr v1.4.2 // indirect + github.com/go-logr/stdr v1.2.2 // indirect github.com/gogo/protobuf v1.3.2 // indirect github.com/golang/glog v1.2.3 // indirect github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect @@ -89,6 +113,7 @@ require ( github.com/google/flatbuffers v1.12.1 // indirect github.com/google/go-cmp v0.6.0 // indirect github.com/gotestyourself/gotestyourself v2.2.0+incompatible // indirect + github.com/grafana/pyroscope-go/godeltaprof v0.1.8 // indirect github.com/hashicorp/hcl v1.0.0 // indirect github.com/inconshreveable/mousetrap v1.1.0 // indirect github.com/jbenet/go-context v0.0.0-20150711004518-d14ea06fba99 // indirect @@ -110,7 +135,7 @@ require ( github.com/pjbgf/sha1cd v0.3.2 // indirect github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect github.com/prometheus/procfs v0.15.1 // indirect - github.com/rogpeppe/go-internal v1.12.0 // indirect + github.com/rogpeppe/go-internal v1.13.1 // indirect github.com/sagikazarmark/locafero v0.4.0 // indirect github.com/sagikazarmark/slog-shim v0.1.0 // indirect github.com/satori/go.uuid v1.2.0 // indirect @@ -126,6 +151,9 @@ require ( github.com/xanzy/ssh-agent v0.3.3 // indirect go.etcd.io/bbolt v1.4.0-alpha.0.0.20240404170359-43604f3112c5 // indirect go.opencensus.io v0.24.0 // indirect + go.opentelemetry.io/auto/sdk v1.1.0 // indirect + go.opentelemetry.io/otel/metric v1.34.0 // indirect + go.opentelemetry.io/otel/trace v1.34.0 // indirect go.uber.org/multierr v1.10.0 // indirect golang.org/x/exp v0.0.0-20240719175910-8a7402abbf56 // indirect golang.org/x/mod v0.19.0 // indirect diff --git a/go.sum b/go.sum index cdfcdb5444d..072abc665c2 100644 --- a/go.sum +++ b/go.sum @@ -28,6 +28,42 @@ github.com/anmitsu/go-shlex v0.0.0-20200514113438-38f4b401e2be h1:9AeTilPcZAjCFI github.com/anmitsu/go-shlex v0.0.0-20200514113438-38f4b401e2be/go.mod h1:ySMOLuWl6zY27l47sB3qLNK6tF2fkHG55UZxx8oIVo4= github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5 h1:0CwZNZbxp69SHPdPJAN/hZIm0C4OItdklCFmMRWYpio= github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5/go.mod h1:wHh0iHkYZB8zMSxRWpUBQtwG5a7fFgvEO+odwuTv2gs= +github.com/aws/aws-sdk-go-v2 v1.36.1 h1:iTDl5U6oAhkNPba0e1t1hrwAo02ZMqbrGq4k5JBWM5E= +github.com/aws/aws-sdk-go-v2 v1.36.1/go.mod h1:5PMILGVKiW32oDzjj6RU52yrNrDPUHcbZQYr1sM7qmM= +github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.6.8 h1:zAxi9p3wsZMIaVCdoiQp2uZ9k1LsZvmAnoTBeZPXom0= +github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.6.8/go.mod h1:3XkePX5dSaxveLAYY7nsbsZZrKxCyEuE5pM4ziFxyGg= +github.com/aws/aws-sdk-go-v2/config v1.29.6 h1:fqgqEKK5HaZVWLQoLiC9Q+xDlSp+1LYidp6ybGE2OGg= +github.com/aws/aws-sdk-go-v2/config v1.29.6/go.mod h1:Ft+WLODzDQmCTHDvqAH1JfC2xxbZ0MxpZAcJqmE1LTQ= +github.com/aws/aws-sdk-go-v2/credentials v1.17.59 h1:9btwmrt//Q6JcSdgJOLI98sdr5p7tssS9yAsGe8aKP4= +github.com/aws/aws-sdk-go-v2/credentials v1.17.59/go.mod h1:NM8fM6ovI3zak23UISdWidyZuI1ghNe2xjzUZAyT+08= +github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.28 h1:KwsodFKVQTlI5EyhRSugALzsV6mG/SGrdjlMXSZSdso= +github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.28/go.mod h1:EY3APf9MzygVhKuPXAc5H+MkGb8k/DOSQjWS0LgkKqI= +github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.32 h1:BjUcr3X3K0wZPGFg2bxOWW3VPN8rkE3/61zhP+IHviA= +github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.32/go.mod h1:80+OGC/bgzzFFTUmcuwD0lb4YutwQeKLFpmt6hoWapU= +github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.32 h1:m1GeXHVMJsRsUAqG6HjZWx9dj7F5TR+cF1bjyfYyBd4= +github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.32/go.mod h1:IitoQxGfaKdVLNg0hD8/DXmAqNy0H4K2H2Sf91ti8sI= +github.com/aws/aws-sdk-go-v2/internal/ini v1.8.2 h1:Pg9URiobXy85kgFev3og2CuOZ8JZUBENF+dcgWBaYNk= +github.com/aws/aws-sdk-go-v2/internal/ini v1.8.2/go.mod h1:FbtygfRFze9usAadmnGJNc8KsP346kEe+y2/oyhGAGc= +github.com/aws/aws-sdk-go-v2/internal/v4a v1.3.32 h1:OIHj/nAhVzIXGzbAE+4XmZ8FPvro3THr6NlqErJc3wY= +github.com/aws/aws-sdk-go-v2/internal/v4a v1.3.32/go.mod h1:LiBEsDo34OJXqdDlRGsilhlIiXR7DL+6Cx2f4p1EgzI= +github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.12.2 h1:D4oz8/CzT9bAEYtVhSBmFj2dNOtaHOtMKc2vHBwYizA= +github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.12.2/go.mod h1:Za3IHqTQ+yNcRHxu1OFucBh0ACZT4j4VQFF0BqpZcLY= +github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.6.0 h1:kT2WeWcFySdYpPgyqJMSUE7781Qucjtn6wBvrgm9P+M= +github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.6.0/go.mod h1:WYH1ABybY7JK9TITPnk6ZlP7gQB8psI4c9qDmMsnLSA= +github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.12.13 h1:SYVGSFQHlchIcy6e7x12bsrxClCXSP5et8cqVhL8cuw= +github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.12.13/go.mod h1:kizuDaLX37bG5WZaoxGPQR/LNFXpxp0vsUnqfkWXfNE= +github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.18.13 h1:OBsrtam3rk8NfBEq7OLOMm5HtQ9Yyw32X4UQMya/wjw= +github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.18.13/go.mod h1:3U4gFA5pmoCOja7aq4nSaIAGbaOHv2Yl2ug018cmC+Q= +github.com/aws/aws-sdk-go-v2/service/s3 v1.76.1 h1:d4ZG8mELlLeUWFBMCqPtRfEP3J6aQgg/KTC9jLSlkMs= +github.com/aws/aws-sdk-go-v2/service/s3 v1.76.1/go.mod h1:uZoEIR6PzGOZEjgAZE4hfYfsqK2zOHhq68JLKEvvXj4= +github.com/aws/aws-sdk-go-v2/service/sso v1.24.15 h1:/eE3DogBjYlvlbhd2ssWyeuovWunHLxfgw3s/OJa4GQ= +github.com/aws/aws-sdk-go-v2/service/sso v1.24.15/go.mod h1:2PCJYpi7EKeA5SkStAmZlF6fi0uUABuhtF8ILHjGc3Y= +github.com/aws/aws-sdk-go-v2/service/ssooidc v1.28.14 h1:M/zwXiL2iXUrHputuXgmO94TVNmcenPHxgLXLutodKE= +github.com/aws/aws-sdk-go-v2/service/ssooidc v1.28.14/go.mod h1:RVwIw3y/IqxC2YEXSIkAzRDdEU1iRabDPaYjpGCbCGQ= +github.com/aws/aws-sdk-go-v2/service/sts v1.33.14 h1:TzeR06UCMUq+KA3bDkujxK1GVGy+G8qQN/QVYzGLkQE= +github.com/aws/aws-sdk-go-v2/service/sts v1.33.14/go.mod h1:dspXf/oYWGWo6DEvj98wpaTeqt5+DMidZD0A9BYTizc= +github.com/aws/smithy-go v1.22.2 h1:6D9hW43xKFrRx/tXXfAlIZc4JI+yQe6snnWcQyxSyLQ= +github.com/aws/smithy-go v1.22.2/go.mod h1:irrKGvNn1InZwb2d7fkIRNucdfwR8R+Ts3wxYa/cJHg= github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= github.com/btcsuite/btcd v0.20.1-beta/go.mod h1:wVuoA8VJLEcwgqHBwHmzLRazpKxTv13Px/pDuV7OomQ= @@ -53,6 +89,8 @@ github.com/btcsuite/snappy-go v0.0.0-20151229074030-0bdef8d06723/go.mod h1:8woku github.com/btcsuite/snappy-go v1.0.0/go.mod h1:8woku9dyThutzjeg+3xrA5iCpBRH8XEEg3lh6TiUghc= github.com/btcsuite/websocket v0.0.0-20150119174127-31079b680792/go.mod h1:ghJtEyQwv5/p4Mg4C0fgbePVuGr935/5ddU9Z3TmDRY= github.com/btcsuite/winsvc v1.0.0/go.mod h1:jsenWakMcC0zFBFurPLEAyrnc/teJEM1O46fmI40EZs= +github.com/celestiaorg/nmt v0.23.0 h1:cfYy//hL1HeDSH0ub3CPlJuox5U5xzgg4JGZrw23I/I= +github.com/celestiaorg/nmt v0.23.0/go.mod h1:kYfIjRq5rmA2mJnv41GLWkxn5KyLNPlma3v5Q68rHdI= github.com/cenkalti/backoff v2.2.1+incompatible h1:tNowT99t7UNflLxfYYSlKYsBpXdEet03Pg2g16Swow4= github.com/cenkalti/backoff v2.2.1+incompatible/go.mod h1:90ReRw6GdpyfrHakVjL/QHaoyV4aDUVVkXQJJJ3NXXM= github.com/cenkalti/backoff/v4 v4.1.3 h1:cFAlzYUlVYDysBEH2T5hyJZMh3+5+WCBvSnK6Q8UtC4= @@ -156,6 +194,8 @@ github.com/go-kit/log v0.2.1 h1:MRVx0/zhvdseW+Gza6N9rVzU/IVzaeE1SFI4raAhmBU= github.com/go-kit/log v0.2.1/go.mod h1:NwTd00d/i8cPZ3xOwwiv2PO5MOcx78fFErGNcVmBjv0= github.com/go-logfmt/logfmt v0.6.0 h1:wGYYu3uicYdqXVgoYbvnkrPVXkuLM1p1ifugDMEdRi4= github.com/go-logfmt/logfmt v0.6.0/go.mod h1:WYhtIu8zTZfxdn5+rREduYbwxfcBr/Vr6KEVveWlfTs= +github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= +github.com/go-logr/logr v1.3.0/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= github.com/go-logr/logr v1.4.2 h1:6pFjapn8bFcIbiKo3XT4j/BhANplGihG6tvd+8rYgrY= github.com/go-logr/logr v1.4.2/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= @@ -203,6 +243,8 @@ github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/ github.com/google/go-cmp v0.5.3/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI= github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= +github.com/google/gofuzz v1.2.0 h1:xRy4A+RhZaiKjJ1bPfwQ8sedCA+YS2YcCHW6ec7JMi0= +github.com/google/gofuzz v1.2.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= github.com/google/orderedcode v0.0.1 h1:UzfcAexk9Vhv8+9pNOgRu41f16lHq725vPwnSeiG/Us= github.com/google/orderedcode v0.0.1/go.mod h1:iVyU4/qPKHY5h/wSd6rZZCDcLJNxiWO6dvsYES2Sb20= github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510 h1:El6M4kTTCOh6aBiKaUGG7oYTSPP8MxqL4YI3kZKwcP4= @@ -215,6 +257,12 @@ github.com/gorilla/websocket v1.5.3 h1:saDtZ6Pbx/0u+bgYQ3q96pZgCzfhKXGPqt7kZ72aN github.com/gorilla/websocket v1.5.3/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= github.com/gotestyourself/gotestyourself v2.2.0+incompatible h1:AQwinXlbQR2HvPjQZOmDhRqsv5mZf+Jb1RnSLxcqZcI= github.com/gotestyourself/gotestyourself v2.2.0+incompatible/go.mod h1:zZKM6oeNM8k+FRljX1mnzVYeS8wiGgQyvST1/GafPbY= +github.com/grafana/otel-profiling-go v0.5.1 h1:stVPKAFZSa7eGiqbYuG25VcqYksR6iWvF3YH66t4qL8= +github.com/grafana/otel-profiling-go v0.5.1/go.mod h1:ftN/t5A/4gQI19/8MoWurBEtC6gFw8Dns1sJZ9W4Tls= +github.com/grafana/pyroscope-go v1.2.0 h1:aILLKjTj8CS8f/24OPMGPewQSYlhmdQMBmol1d3KGj8= +github.com/grafana/pyroscope-go v1.2.0/go.mod h1:2GHr28Nr05bg2pElS+dDsc98f3JTUh2f6Fz1hWXrqwk= +github.com/grafana/pyroscope-go/godeltaprof v0.1.8 h1:iwOtYXeeVSAeYefJNaxDytgjKtUuKQbJqgAIjlnicKg= +github.com/grafana/pyroscope-go/godeltaprof v0.1.8/go.mod h1:2+l7K7twW49Ct4wFluZD3tZ6e0SjanjcUUBPVD/UuGU= github.com/hashicorp/golang-lru/v2 v2.0.7 h1:a+bsQ5rvGLjzHuww6tVxozPZFVghXaHOwFs4luLUK2k= github.com/hashicorp/golang-lru/v2 v2.0.7/go.mod h1:QeFd9opnmA6QUJc5vARoKUSoFhyfM2/ZepoAG6RGpeM= github.com/hashicorp/hcl v1.0.0 h1:0Anlzjpi4vEasTeNFn2mLJgTSwt0+6sfsiTG8qcWGx4= @@ -316,8 +364,8 @@ github.com/prometheus/procfs v0.15.1/go.mod h1:fB45yRUv8NstnjriLhBQLuOUt+WW4BsoG github.com/rcrowley/go-metrics v0.0.0-20201227073835-cf1acfcdf475 h1:N/ElC8H3+5XpJzTSTfLsJV/mx9Q9g7kxmchpfZyxgzM= github.com/rcrowley/go-metrics v0.0.0-20201227073835-cf1acfcdf475/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4= github.com/rogpeppe/go-internal v1.9.0/go.mod h1:WtVeX8xhTBvf0smdhujwtBcq4Qrzq/fJaraNFVN+nFs= -github.com/rogpeppe/go-internal v1.12.0 h1:exVL4IDcn6na9z1rAb56Vxr+CgyK3nn3O+epU5NdKM8= -github.com/rogpeppe/go-internal v1.12.0/go.mod h1:E+RYuTGaKKdloAfM02xzb0FW3Paa99yedzYV+kq4uf4= +github.com/rogpeppe/go-internal v1.13.1 h1:KvO1DLK/DRN07sQ1LQKScxyZJuNnedQ5/wKSR38lUII= +github.com/rogpeppe/go-internal v1.13.1/go.mod h1:uMEvuHeurkdAXX61udpOXGD/AzZDWNMNyH2VO9fmH0o= github.com/rs/cors v1.11.1 h1:eU3gRzXLRK57F5rKMGMZURNdIG4EoAmX8k94r9wXWHA= github.com/rs/cors v1.11.1/go.mod h1:XyqrcTp5zjWr1wsJ8PIRZssZ8b/WMcMf71DJnit4EMU= github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= @@ -369,6 +417,12 @@ github.com/subosito/gotenv v1.6.0 h1:9NlTDc1FTs4qu0DDq7AEtTPNw6SVm7uBMsUCUjABIf8 github.com/subosito/gotenv v1.6.0/go.mod h1:Dk4QP5c2W3ibzajGcXpNraDfq2IrhjMIvMSWPKKo0FU= github.com/syndtr/goleveldb v1.0.1-0.20210819022825-2ae1ddf74ef7 h1:epCh84lMvA70Z7CTTCmYQn2CKbY8j86K7/FAIr141uY= github.com/syndtr/goleveldb v1.0.1-0.20210819022825-2ae1ddf74ef7/go.mod h1:q4W45IWZaF22tdD+VEXcAWRA037jwmWEB5VWYORlTpc= +github.com/tidwall/gjson v1.18.0 h1:FIDeeyB800efLX89e5a8Y0BNH+LOngJyGrIWxG2FKQY= +github.com/tidwall/gjson v1.18.0/go.mod h1:/wbyibRr2FHMks5tjHJ5F8dMZh3AcwJEMf5vlfC0lxk= +github.com/tidwall/match v1.1.1 h1:+Ho715JplO36QYgwN9PGYNhgZvoUSc9X2c80KVTi+GA= +github.com/tidwall/match v1.1.1/go.mod h1:eRSPERbgtNPcGhD8UCthc6PmLEQXEWd3PRB5JTxsfmM= +github.com/tidwall/pretty v1.2.1 h1:qjsOFOWWQl+N3RsoF5/ssm1pHmJJwhjlSbZ51I6wMl4= +github.com/tidwall/pretty v1.2.1/go.mod h1:ITEVvHYasfjBbM0u2Pg8T2nJnzm8xPwvNhhsoaGGjNU= github.com/xanzy/ssh-agent v0.3.3 h1:+/15pJfg/RsTxqYcX6fHqOXZwwMP+2VyYWJeWM2qQFM= github.com/xanzy/ssh-agent v0.3.3/go.mod h1:6dzNDKs0J9rVPHPhaGCukekBHKqfl+L3KghI1Bc68Uw= github.com/xeipuuv/gojsonpointer v0.0.0-20190905194746-02993c407bfb h1:zGWFAtiMcyryUHoUjUJX0/lt1H2+i2Ka2n+D3DImSNo= @@ -383,16 +437,24 @@ go.etcd.io/bbolt v1.4.0-alpha.0.0.20240404170359-43604f3112c5 h1:qxen9oVGzDdIRP6 go.etcd.io/bbolt v1.4.0-alpha.0.0.20240404170359-43604f3112c5/go.mod h1:eW0HG9/oHQhvRCvb1/pIXW4cOvtDqeQK+XSi3TnwaXY= go.opencensus.io v0.24.0 h1:y73uSU6J157QMP2kn2r30vwW1A2W2WFwSCGnAVxeaD0= go.opencensus.io v0.24.0/go.mod h1:vNK8G9p7aAivkbmorf4v+7Hgx+Zs0yY+0fOtgBfjQKo= -go.opentelemetry.io/otel v1.32.0 h1:WnBN+Xjcteh0zdk01SVqV55d/m62NJLJdIyb4y/WO5U= -go.opentelemetry.io/otel v1.32.0/go.mod h1:00DCVSB0RQcnzlwyTfqtxSm+DRr9hpYrHjNGiBHVQIg= -go.opentelemetry.io/otel/metric v1.32.0 h1:xV2umtmNcThh2/a/aCP+h64Xx5wsj8qqnkYZktzNa0M= -go.opentelemetry.io/otel/metric v1.32.0/go.mod h1:jH7CIbbK6SH2V2wE16W05BHCtIDzauciCRLoc/SyMv8= -go.opentelemetry.io/otel/sdk v1.32.0 h1:RNxepc9vK59A8XsgZQouW8ue8Gkb4jpWtJm9ge5lEG4= -go.opentelemetry.io/otel/sdk v1.32.0/go.mod h1:LqgegDBjKMmb2GC6/PrTnteJG39I8/vJCAP9LlJXEjU= +go.opentelemetry.io/auto/sdk v1.1.0 h1:cH53jehLUN6UFLY71z+NDOiNJqDdPRaXzTel0sJySYA= +go.opentelemetry.io/auto/sdk v1.1.0/go.mod h1:3wSPjt5PWp2RhlCcmmOial7AvC4DQqZb7a7wCow3W8A= +go.opentelemetry.io/otel v1.21.0/go.mod h1:QZzNPQPm1zLX4gZK4cMi+71eaorMSGT3A4znnUvNNEo= +go.opentelemetry.io/otel v1.34.0 h1:zRLXxLCgL1WyKsPVrgbSdMN4c0FMkDAskSTQP+0hdUY= +go.opentelemetry.io/otel v1.34.0/go.mod h1:OWFPOQ+h4G8xpyjgqo4SxJYdDQ/qmRH+wivy7zzx9oI= +go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.34.0 h1:jBpDk4HAUsrnVO1FsfCfCOTEc/MkInJmvfCHYLFiT80= +go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.34.0/go.mod h1:H9LUIM1daaeZaz91vZcfeM0fejXPmgCYE8ZhzqfJuiU= +go.opentelemetry.io/otel/metric v1.21.0/go.mod h1:o1p3CA8nNHW8j5yuQLdc1eeqEaPfzug24uvsyIEJRWM= +go.opentelemetry.io/otel/metric v1.34.0 h1:+eTR3U0MyfWjRDhmFMxe2SsW64QrZ84AOhvqS7Y+PoQ= +go.opentelemetry.io/otel/metric v1.34.0/go.mod h1:CEDrp0fy2D0MvkXE+dPV7cMi8tWZwX3dmaIhwPOaqHE= +go.opentelemetry.io/otel/sdk v1.21.0/go.mod h1:Nna6Yv7PWTdgJHVRD9hIYywQBRx7pbox6nwBnZIxl/E= +go.opentelemetry.io/otel/sdk v1.34.0 h1:95zS4k/2GOy069d321O8jWgYsW3MzVV+KuSPKp7Wr1A= +go.opentelemetry.io/otel/sdk v1.34.0/go.mod h1:0e/pNiaMAqaykJGKbi+tSjWfNNHMTxoC9qANsCzbyxU= go.opentelemetry.io/otel/sdk/metric v1.32.0 h1:rZvFnvmvawYb0alrYkjraqJq0Z4ZUJAiyYCU9snn1CU= go.opentelemetry.io/otel/sdk/metric v1.32.0/go.mod h1:PWeZlq0zt9YkYAp3gjKZ0eicRYvOh1Gd+X99x6GHpCQ= -go.opentelemetry.io/otel/trace v1.32.0 h1:WIC9mYrXf8TmY/EXuULKc8hR17vE+Hjv2cssQDe03fM= -go.opentelemetry.io/otel/trace v1.32.0/go.mod h1:+i4rkvCraA+tG6AzwloGaCtkx53Fa+L+V8e9a7YvhT8= +go.opentelemetry.io/otel/trace v1.21.0/go.mod h1:LGbsEB0f9LGjN+OZaQQ26sohbOmiMR+BaslueVtS/qQ= +go.opentelemetry.io/otel/trace v1.34.0 h1:+ouXS2V8Rd4hp4580a8q23bg0azF2nI8cqLYnC8mh/k= +go.opentelemetry.io/otel/trace v1.34.0/go.mod h1:Svm7lSjQD7kG7KJ/MUHPVXSDGz2OX4h0M2jHBhmSfRE= go.uber.org/multierr v1.10.0 h1:S0h4aNzvfcFsC3dRF1jLoaov7oRaKqRGC/pUEJ2yvPQ= go.uber.org/multierr v1.10.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y= golang.org/x/crypto v0.0.0-20170930174604-9419663f5a44/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= @@ -455,6 +517,7 @@ golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20210616094352-59db8d763f22/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20221010170243-090e33056c14/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.14.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/sys v0.21.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/sys v0.29.0 h1:TPYlXGxvx1MGTn2GiZDhnjPA9wZzZeGKHHmKhHYvgaU= golang.org/x/sys v0.29.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= diff --git a/libs/bytes/bytes.go b/libs/bytes/bytes.go index 621016f8513..c03c44f4d7b 100644 --- a/libs/bytes/bytes.go +++ b/libs/bytes/bytes.go @@ -1,6 +1,7 @@ package bytes import ( + "encoding/binary" "encoding/hex" "fmt" "strings" @@ -30,6 +31,14 @@ func (bz HexBytes) MarshalJSON() ([]byte, error) { return jbz, nil } +func (bz HexBytes) MarshalDelimited() ([]byte, error) { + lenBuf := make([]byte, binary.MaxVarintLen64) + length := uint64(len(bz)) + n := binary.PutUvarint(lenBuf, length) + + return append(lenBuf[:n], bz...), nil +} + // This is the point of Bytes. func (bz *HexBytes) UnmarshalJSON(data []byte) error { if len(data) < 2 || data[0] != '"' || data[len(data)-1] != '"' { @@ -63,3 +72,7 @@ func (bz HexBytes) Format(s fmt.State, verb rune) { s.Write([]byte(fmt.Sprintf("%X", []byte(bz)))) //nolint: errcheck } } + +func FromBytes(b []byte) []HexBytes { + return []HexBytes{b} +} diff --git a/libs/consts/consts.go b/libs/consts/consts.go new file mode 100644 index 00000000000..2f6996862d6 --- /dev/null +++ b/libs/consts/consts.go @@ -0,0 +1,47 @@ +package consts + +import ( + "crypto/sha256" +) + +const ( + // TxInclusionProofQueryPath is the path used to query the application for a + // tx inclusion proof via the ABCI Query method. The desired transaction + // index must be formatted into the path. + TxInclusionProofQueryPath = "custom/txInclusionProof/%d" + + // ShareInclusionProofQueryPath is the path used to query the application for the + // shares to data root inclusion proofs via the ABCI query method. + ShareInclusionProofQueryPath = "custom/shareInclusionProof/%d/%d" + + // ProtoBlobTxTypeID is included in each encoded BlobTx to help prevent + // decoding binaries that are not actually BlobTxs. + ProtoBlobTxTypeID = "BLOB" + + // ProtoIndexWrapperTypeID is included in each encoded IndexWrapper to help prevent + // decoding binaries that are not actually IndexWrappers. + ProtoIndexWrapperTypeID = "INDX" + + // NamespaveVersionSize is the size of a namespace version in bytes. + NamespaceVersionSize = 1 + + // NamespaceIDSize is the size of a namespace ID in bytes. + NamespaceIDSize = 28 + + // NamespaceSize is the size of a namespace in bytes. + NamespaceSize = NamespaceIDSize + NamespaceVersionSize +) + +var ( + // TxNamespaceID is the namespace ID reserved for transaction data. It does + // not contain a leading version byte. + TxNamespaceID = []byte{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1} + + // NewBaseHashFunc change accordingly if another hash.Hash should be used as a base hasher in the NMT. + NewBaseHashFunc = sha256.New + + // DataCommitmentBlocksLimit is the limit to the number of blocks we can generate a data commitment for. + // Deprecated: this is no longer used as we're moving towards Blobstream X. However, we're leaving it + // here for backwards compatibility purpose until it's removed in the next breaking release. + DataCommitmentBlocksLimit = 1000 +) diff --git a/libs/trace/buffered_file.go b/libs/trace/buffered_file.go new file mode 100644 index 00000000000..9b228e3f9e6 --- /dev/null +++ b/libs/trace/buffered_file.go @@ -0,0 +1,101 @@ +package trace + +import ( + "bufio" + "errors" + "io" + "os" + "sync" + "sync/atomic" +) + +// bufferedFile is a file that is being written to and read from. It is thread +// safe, however, when reading from the file, writes will be ignored. +type bufferedFile struct { + // reading protects the file from being written to while it is being read + // from. This is needed beyond in addition to the mutex so that writes can + // be ignored while reading. + reading atomic.Bool + + // mut protects the buffered writer. + mut *sync.Mutex + + // file is the file that is being written to. + file *os.File + + // writer is the buffered writer that is writing to the file. + wr *bufio.Writer +} + +// newbufferedFile creates a new buffered file that writes to the given file. +func newbufferedFile(file *os.File) *bufferedFile { + return &bufferedFile{ + file: file, + wr: bufio.NewWriter(file), + reading: atomic.Bool{}, + mut: &sync.Mutex{}, + } +} + +// Write writes the given bytes to the file. If the file is currently being read +// from, the write will be lost. +func (f *bufferedFile) Write(b []byte) (int, error) { + if f.reading.Load() { + return 0, nil + } + f.mut.Lock() + defer f.mut.Unlock() + return f.wr.Write(b) +} + +func (f *bufferedFile) startReading() error { + f.reading.Store(true) + f.mut.Lock() + defer f.mut.Unlock() + + err := f.wr.Flush() + if err != nil { + f.reading.Store(false) + return err + } + + _, err = f.file.Seek(0, io.SeekStart) + if err != nil { + f.reading.Store(false) + return err + } + + return nil +} + +func (f *bufferedFile) stopReading() error { + f.mut.Lock() + defer f.mut.Unlock() + _, err := f.file.Seek(0, io.SeekEnd) + f.reading.Store(false) + return err +} + +// File returns the underlying file with the seek point reset. The caller should +// not close the file. The caller must call the returned function when they are +// done reading from the file. This function resets the seek point to where it +// was being written to. +func (f *bufferedFile) File() (*os.File, func() error, error) { + if f.reading.Load() { + return nil, func() error { return nil }, errors.New("file is currently being read from") + } + err := f.startReading() + if err != nil { + return nil, func() error { return nil }, err + } + return f.file, f.stopReading, nil +} + +// Close closes the file. +func (f *bufferedFile) Close() error { + // set reading to true to prevent writes while closing the file. + f.mut.Lock() + defer f.mut.Unlock() + f.reading.Store(true) + return f.file.Close() +} diff --git a/libs/trace/decoder.go b/libs/trace/decoder.go new file mode 100644 index 00000000000..abf24f40061 --- /dev/null +++ b/libs/trace/decoder.go @@ -0,0 +1,34 @@ +package trace + +import ( + "bufio" + "encoding/json" + "io" + "os" +) + +// DecodeFile reads a file and decodes it into a slice of events via +// scanning. The table parameter is used to determine the type of the events. +// The file should be a jsonl file. The generic here are passed to the event +// type. +func DecodeFile[T any](f *os.File) ([]Event[T], error) { + var out []Event[T] + r := bufio.NewReader(f) + for { + line, err := r.ReadString('\n') + if err == io.EOF { + break + } else if err != nil { + return nil, err + } + + var e Event[T] + if err := json.Unmarshal([]byte(line), &e); err != nil { + return nil, err + } + + out = append(out, e) + } + + return out, nil +} diff --git a/libs/trace/doc.go b/libs/trace/doc.go new file mode 100644 index 00000000000..27cf777c20b --- /dev/null +++ b/libs/trace/doc.go @@ -0,0 +1,2 @@ +/**/ +package trace diff --git a/libs/trace/fileserver.go b/libs/trace/fileserver.go new file mode 100644 index 00000000000..3e19dffe2b2 --- /dev/null +++ b/libs/trace/fileserver.go @@ -0,0 +1,346 @@ +package trace + +import ( + "bufio" + "context" + "encoding/json" + "errors" + "fmt" + "io" + "math/rand" + "mime" + "mime/multipart" + "net/http" + "net/url" + "os" + "path" + "path/filepath" + "strings" + "time" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/config" + "github.com/aws/aws-sdk-go-v2/credentials" + "github.com/aws/aws-sdk-go-v2/service/s3" +) + +const jsonL = ".jsonl" + +func (lt *LocalTracer) getTableHandler() http.HandlerFunc { + return func(w http.ResponseWriter, r *http.Request) { + // Parse the request to get the data + if err := r.ParseForm(); err != nil { + http.Error(w, "Failed to parse form", http.StatusBadRequest) + return + } + + inputString := r.FormValue("table") + if inputString == "" { + http.Error(w, "No data provided", http.StatusBadRequest) + return + } + + f, done, err := lt.readTable(inputString) + if err != nil { + http.Error(w, fmt.Sprintf("failed to read table: %v", err), http.StatusInternalServerError) + return + } + defer done() //nolint:errcheck + + // Use the pump function to continuously read from the file and write to + // the response writer + reader, writer := pump(inputString, bufio.NewReader(f)) + defer reader.Close() + + // Set the content type to the writer's form data content type + w.Header().Set("Content-Type", writer.FormDataContentType()) + + // Copy the data from the reader to the response writer + if _, err := io.Copy(w, reader); err != nil { + http.Error(w, "Failed to send data", http.StatusInternalServerError) + return + } + } +} + +// pump continuously reads from a bufio.Reader and writes to a multipart.Writer. +// It returns the reader end of the pipe and the writer for consumption by the +// server. +func pump(table string, br *bufio.Reader) (*io.PipeReader, *multipart.Writer) { + r, w := io.Pipe() + m := multipart.NewWriter(w) + + go func( + table string, + m *multipart.Writer, + w *io.PipeWriter, + br *bufio.Reader, + ) { + defer w.Close() + defer m.Close() + + part, err := m.CreateFormFile("filename", table+jsonL) + if err != nil { + return + } + + if _, err = io.Copy(part, br); err != nil { + return + } + + }(table, m, w, br) + + return r, m +} + +func (lt *LocalTracer) servePullData() { + mux := http.NewServeMux() + mux.HandleFunc("/get_table", lt.getTableHandler()) + err := http.ListenAndServe(lt.cfg.Instrumentation.TracePullAddress, mux) //nolint:gosec + if err != nil { + lt.logger.Error("trace pull server failure", "err", err) + } + lt.logger.Info("trace pull server started", "address", lt.cfg.Instrumentation.TracePullAddress) +} + +// GetTable downloads a table from the server and saves it to the given directory. It uses a multipart +// response to download the file. +func GetTable(serverURL, table, dirPath string) error { + data := url.Values{} + data.Set("table", table) + + serverURL += "/get_table" + + resp, err := http.PostForm(serverURL, data) //nolint:gosec + if err != nil { + return err + } + defer resp.Body.Close() + + if resp.StatusCode != http.StatusOK { + return fmt.Errorf("unexpected status code: %d", resp.StatusCode) + } + + _, params, err := mime.ParseMediaType(resp.Header.Get("Content-Type")) + if err != nil { + return err + } + + boundary, ok := params["boundary"] + if !ok { + panic("Not a multipart response") + } + + err = os.MkdirAll(dirPath, 0o755) + if err != nil { + return err + } + + outputFile, err := os.Create(path.Join(dirPath, table+jsonL)) + if err != nil { + return err + } + defer outputFile.Close() + + reader := multipart.NewReader(resp.Body, boundary) + + for { + part, err := reader.NextPart() + if err == io.EOF { + break // End of multipart + } + if err != nil { + return err + } + + contentDisposition, params, err := mime.ParseMediaType(part.Header.Get("Content-Disposition")) + if err != nil { + return err + } + + if contentDisposition == "form-data" && params["filename"] != "" { + _, err = io.Copy(outputFile, part) + if err != nil { + return err + } + } + + part.Close() + } + + return nil +} + +// S3Config is a struct that holds the configuration for an S3 bucket. +type S3Config struct { + BucketName string `json:"bucket_name"` + Region string `json:"region"` + AccessKey string `json:"access_key"` + SecretKey string `json:"secret_key"` + // PushDelay is the time in seconds to wait before pushing the file to S3. + // If this is 0, it defaults is used. + PushDelay int64 `json:"push_delay"` +} + +// readS3Config reads an S3Config from a file in the given directory. +func readS3Config(dir string) (S3Config, error) { + cfg := S3Config{} + f, err := os.Open(filepath.Join(dir, "s3.json")) + if errors.Is(err, os.ErrNotExist) { + return cfg, nil + } + if err != nil { + return cfg, err + } + defer f.Close() + err = json.NewDecoder(f).Decode(&cfg) + if cfg.PushDelay == 0 { + cfg.PushDelay = 60 + } + return cfg, err +} + +// PushS3 pushes a file to an S3 bucket using the given S3Config. It uses the +// chainID and the nodeID to organize the files in the bucket. The directory +// structure is chainID/nodeID/table.jsonl . +func PushS3(chainID, nodeID string, s3cfg S3Config, f *os.File) error { + cfg, err := config.LoadDefaultConfig(context.TODO(), + config.WithRegion(s3cfg.Region), + config.WithCredentialsProvider(credentials.NewStaticCredentialsProvider(s3cfg.AccessKey, s3cfg.SecretKey, "")), + config.WithHTTPClient(&http.Client{ + Timeout: time.Duration(15) * time.Second, + }), + ) + if err != nil { + return err + } + + s3Svc := s3.NewFromConfig(cfg) + + key := fmt.Sprintf("%s/%s/%s", chainID, nodeID, filepath.Base(f.Name())) + + _, err = s3Svc.PutObject(context.Background(), &s3.PutObjectInput{ + Bucket: aws.String(s3cfg.BucketName), + Key: aws.String(key), + Body: f, + }) + + return err +} + +func (lt *LocalTracer) pushLoop() { + for { + time.Sleep(time.Second * time.Duration(lt.s3Config.PushDelay)) + err := lt.PushAll() + if err != nil { + lt.logger.Error("failed to push tables", "error", err) + } + } +} + +func (lt *LocalTracer) PushAll() error { + for table := range lt.fileMap { + f, done, err := lt.readTable(table) + if err != nil { + return err + } + for i := 0; i < 3; i++ { + err = PushS3(lt.chainID, lt.nodeID, lt.s3Config, f) + if err == nil { + break + } + lt.logger.Error("failed to push table", "table", table, "error", err) + time.Sleep(time.Second * time.Duration(rand.Intn(3))) //nolint:gosec + } + err = done() + if err != nil { + return err + } + } + return nil +} + +// S3Download downloads files that match some prefix from an S3 bucket to a +// local directory dst. +// fileNames is a list of traced jsonl file names to download. If it is empty, all traces are downloaded. +// fileNames should not have .jsonl suffix. +func S3Download(dst, prefix string, cfg S3Config, fileNames ...string) error { + // Ensure local directory structure exists + err := os.MkdirAll(dst, os.ModePerm) + if err != nil { + return err + } + + awscfg, err2 := config.LoadDefaultConfig(context.TODO(), + config.WithRegion(cfg.Region), + config.WithCredentialsProvider(credentials.NewStaticCredentialsProvider(cfg.AccessKey, cfg.SecretKey, "")), + ) + if err2 != nil { + return err + } + + s3Svc := s3.NewFromConfig(awscfg) + input := &s3.ListObjectsV2Input{ + Bucket: aws.String(cfg.BucketName), + Prefix: aws.String(prefix), + Delimiter: aws.String(""), + } + + totalObjects := 0 + paginator := s3.NewListObjectsV2Paginator(s3Svc, input) + for paginator.HasMorePages() { + output, err := paginator.NextPage(context.TODO()) + if err != nil { + return err + } + for _, content := range output.Contents { + key := *content.Key + + // If no fileNames are specified, download all files + if len(fileNames) == 0 { + fileNames = append(fileNames, strings.TrimPrefix(key, prefix)) + } + + for _, filename := range fileNames { + // Add .jsonl suffix to the fileNames + fullFilename := filename + jsonL + if strings.HasSuffix(key, fullFilename) { + localFilePath := filepath.Join(dst, prefix, strings.TrimPrefix(key, prefix)) + fmt.Printf("Downloading %s to %s\n", key, localFilePath) + + // Create the directories in the path + if err := os.MkdirAll(filepath.Dir(localFilePath), os.ModePerm); err != nil { + return err + } + + // Create a file to write the S3 Object contents to. + f, err := os.Create(localFilePath) + if err != nil { + return err + } + + resp, err := s3Svc.GetObject(context.Background(), &s3.GetObjectInput{ + Bucket: aws.String(cfg.BucketName), + Key: aws.String(key), + }) + if err != nil { + f.Close() + continue + } + defer resp.Body.Close() + + // Copy the contents of the S3 object to the local file + if _, err := io.Copy(f, resp.Body); err != nil { + f.Close() + return err + } + + fmt.Printf("Successfully downloaded %s to %s\n", key, localFilePath) + f.Close() + } + } + } + totalObjects += len(output.Contents) + } + return err +} diff --git a/libs/trace/flags.go b/libs/trace/flags.go new file mode 100644 index 00000000000..6f17eebd27e --- /dev/null +++ b/libs/trace/flags.go @@ -0,0 +1,13 @@ +package trace + +const ( + FlagTracePushConfig = "trace-push-url" + FlagTracePullAddress = "trace-pull-address" + FlagTracePushConfigDescription = "URL of the trace push server" + FlagTracePullAddressDescription = "address to listen on for pulling trace data" + + FlagPyroscopeURL = "pyroscope-url" + FlagPyroscopeURLDescription = "URL of the Pyroscope instance to use for continuous profiling. If not specified, profiling will not be enabled" + FlagPyroscopeTrace = "pyroscope-trace" + FlagPyroscopeTraceDescription = "enable adding trace data to pyroscope profiling" +) diff --git a/libs/trace/local_tracer.go b/libs/trace/local_tracer.go new file mode 100644 index 00000000000..1d7c6f2bb48 --- /dev/null +++ b/libs/trace/local_tracer.go @@ -0,0 +1,241 @@ +package trace + +import ( + "encoding/json" + "fmt" + "os" + "path" + "strconv" + "strings" + "time" + + "github.com/cometbft/cometbft/config" + "github.com/cometbft/cometbft/libs/log" +) + +const ( + PushBucketName = "TRACE_PUSH_BUCKET_NAME" + PushRegion = "TRACE_PUSH_REGION" + PushAccessKey = "TRACE_PUSH_ACCESS_KEY" + PushKey = "TRACE_PUSH_SECRET_KEY" + PushDelay = "TRACE_PUSH_DELAY" +) + +// Event wraps some trace data with metadata that dictates the table and things +// like the chainID and nodeID. +type Event[T any] struct { + ChainID string `json:"chain_id"` + NodeID string `json:"node_id"` + Table string `json:"table"` + Timestamp time.Time `json:"timestamp"` + Msg T `json:"msg"` +} + +// NewEvent creates a new Event with the given chainID, nodeID, table, and msg. +// It adds the current time as the timestamp. +func NewEvent[T any](chainID, nodeID, table string, msg T) Event[T] { + return Event[T]{ + ChainID: chainID, + NodeID: nodeID, + Table: table, + Msg: msg, + Timestamp: time.Now(), + } +} + +// LocalTracer saves all of the events passed to the retuen channel to files +// based on their "type" (a string field in the event). Each type gets its own +// file. The internals are purposefully not *explicitly* thread safe to avoid the +// overhead of locking with each event save. Only pass events to the returned +// channel. Call CloseAll to close all open files. +type LocalTracer struct { + chainID, nodeID string + logger log.Logger + cfg *config.Config + s3Config S3Config + + // fileMap maps tables to their open files files are threadsafe, but the map + // is not. Therefore don't create new files after initialization to remain + // threadsafe. + fileMap map[string]*bufferedFile + // canal is a channel for all events that are being written. It acts as an + // extra buffer to avoid blocking the caller when writing to files. + canal chan Event[Entry] +} + +// NewLocalTracer creates a struct that will save all of the events passed to +// the retuen channel to files based on their "table" (a string field in the +// event). Each type gets its own file. The internal are purposefully not thread +// safe to avoid the overhead of locking with each event save. Only pass events +// to the returned channel. Call CloseAll to close all open files. Goroutine to +// save events is started in this function. +func NewLocalTracer(cfg *config.Config, logger log.Logger, chainID, nodeID string) (*LocalTracer, error) { + fm := make(map[string]*bufferedFile) + p := path.Join(cfg.RootDir, "data", "traces") + for _, table := range splitAndTrimEmpty(cfg.Instrumentation.TracingTables, ",", " ") { + fileName := fmt.Sprintf("%s/%s.jsonl", p, table) + err := os.MkdirAll(p, 0o700) + if err != nil { + return nil, fmt.Errorf("failed to create directory %s: %w", p, err) + } + file, err := os.OpenFile(fileName, os.O_APPEND|os.O_CREATE|os.O_RDWR, 0o644) + if err != nil { + return nil, fmt.Errorf("failed to open or create file %s: %w", fileName, err) + } + fm[table] = newbufferedFile(file) + } + + lt := &LocalTracer{ + fileMap: fm, + cfg: cfg, + canal: make(chan Event[Entry], cfg.Instrumentation.TraceBufferSize), + chainID: chainID, + nodeID: nodeID, + logger: logger, + } + + go lt.drainCanal() + if cfg.Instrumentation.TracePullAddress != "" { + logger.Info("starting pull server", "address", cfg.Instrumentation.TracePullAddress) + go lt.servePullData() + } + + if cfg.Instrumentation.TracePushConfig != "" { + s3Config, err := readS3Config(path.Join(cfg.RootDir, "config", cfg.Instrumentation.TracePushConfig)) + if err != nil { + return nil, fmt.Errorf("failed to read s3 config: %w", err) + } + lt.s3Config = s3Config + go lt.pushLoop() + } else if s3Config, err := GetPushConfigFromEnv(); err == nil { + lt.s3Config = s3Config + go lt.pushLoop() + } + + return lt, nil +} + +// GetPushConfigFromEnv reads the required environment variables to push trace. +func GetPushConfigFromEnv() (S3Config, error) { + bucketName := os.Getenv(PushBucketName) + region := os.Getenv(PushRegion) + accessKey := os.Getenv(PushAccessKey) + secretKey := os.Getenv(PushKey) + pushDelay, err := strconv.ParseInt(os.Getenv(PushDelay), 10, 64) + if err != nil { + return S3Config{}, err + } + if bucketName == "" || region == "" || accessKey == "" || secretKey == "" { + return S3Config{}, fmt.Errorf("missing required environment variables") + } + s3Config := S3Config{ + BucketName: bucketName, + Region: region, + AccessKey: accessKey, + SecretKey: secretKey, + PushDelay: pushDelay, + } + return s3Config, nil +} + +func (lt *LocalTracer) Write(e Entry) { + if !lt.IsCollecting(e.Table()) { + return + } + lt.canal <- NewEvent(lt.chainID, lt.nodeID, e.Table(), e) +} + +// ReadTable returns a file for the given table. If the table is not being +// collected, an error is returned. The caller should not close the file. +func (lt *LocalTracer) readTable(table string) (*os.File, func() error, error) { + bf, has := lt.getFile(table) + if !has { + return nil, func() error { return nil }, fmt.Errorf("table %s not found", table) + } + + return bf.File() +} + +func (lt *LocalTracer) IsCollecting(table string) bool { + _, has := lt.getFile(table) + return has +} + +// getFile gets a file for the given type. This method is purposely +// not thread-safe to avoid the overhead of locking with each event save. +func (lt *LocalTracer) getFile(table string) (*bufferedFile, bool) { + f, has := lt.fileMap[table] + return f, has +} + +// saveEventToFile marshals an Event into JSON and appends it to a file named after the event's Type. +func (lt *LocalTracer) saveEventToFile(event Event[Entry]) error { + file, has := lt.getFile(event.Table) + if !has { + return fmt.Errorf("table %s not found", event.Table) + } + + eventJSON, err := json.Marshal(event) + if err != nil { + return fmt.Errorf("failed to marshal event: %v", err) + } + + if _, err := file.Write(append(eventJSON, '\n')); err != nil { + return fmt.Errorf("failed to write event to file: %v", err) + } + + return nil +} + +// draincanal takes a variadic number of channels of Event pointers and drains them into files. +func (lt *LocalTracer) drainCanal() { + // purposefully do not lock, and rely on the channel to provide sync + // actions, to avoid overhead of locking with each event save. + for ev := range lt.canal { + if err := lt.saveEventToFile(ev); err != nil { + lt.logger.Error("failed to save event to file", "error", err) + } + } +} + +// Stop optionally uploads and closes all open files. +func (lt *LocalTracer) Stop() { + if lt.s3Config.SecretKey != "" { + lt.logger.Info("pushing all tables before stopping") + err := lt.PushAll() + if err != nil { + lt.logger.Error("failed to push tables", "error", err) + } + } + + for _, file := range lt.fileMap { + err := file.Close() + if err != nil { + lt.logger.Error("failed to close file", "error", err) + } + } +} + +// splitAndTrimEmpty slices s into all subslices separated by sep and returns a +// slice of the string s with all leading and trailing Unicode code points +// contained in cutset removed. If sep is empty, SplitAndTrim splits after each +// UTF-8 sequence. First part is equivalent to strings.SplitN with a count of +// -1. also filter out empty strings, only return non-empty strings. +// +// NOTE: this is copy pasted from the config package to avoid a circular +// dependency. See the function of the same name for tests. +func splitAndTrimEmpty(s, sep, cutset string) []string { + if s == "" { + return []string{} + } + + spl := strings.Split(s, sep) + nonEmptyStrings := make([]string, 0, len(spl)) + for i := 0; i < len(spl); i++ { + element := strings.Trim(spl[i], cutset) + if element != "" { + nonEmptyStrings = append(nonEmptyStrings, element) + } + } + return nonEmptyStrings +} diff --git a/libs/trace/local_tracer_teset.go b/libs/trace/local_tracer_teset.go new file mode 100644 index 00000000000..4d9c6390436 --- /dev/null +++ b/libs/trace/local_tracer_teset.go @@ -0,0 +1,185 @@ +package trace + +import ( + "fmt" + "io" + "net" + "os" + "path" + "testing" + "time" + + "github.com/stretchr/testify/require" + + "github.com/cometbft/cometbft/config" + "github.com/cometbft/cometbft/libs/log" +) + +const ( + // testEventTable is the table name for the testEvent struct. + testEventTable = "testEvent" +) + +type testEvent struct { + City string `json:"city"` + Length int `json:"length"` +} + +func (testEvent) Table() string { + return testEventTable +} + +// TestLocalTracerReadWrite tests the local client by writing some events, +// reading them back and comparing them, writing at the same time as reading. +func TestLocalTracerReadWrite(t *testing.T) { + port, err := getFreePort() + require.NoError(t, err) + client := setupLocalTracer(t, port) + + annecy := testEvent{"Annecy", 420} + paris := testEvent{"Paris", 420} + client.Write(annecy) + client.Write(paris) + + time.Sleep(100 * time.Millisecond) + + f, done, err := client.readTable(testEventTable) + require.NoError(t, err) + + // write at the same time as reading to test thread safety this test will be + // flakey if this is not being handled correctly. Since we're reading from + // the file, we expect these write to be ignored. + migenees := testEvent{"Migennes", 620} + pontivy := testEvent{"Pontivy", 720} + client.Write(migenees) + client.Write(pontivy) + + // wait to ensure that the write have been processed (and ignored in this case) + time.Sleep(100 * time.Millisecond) + + events, err := DecodeFile[testEvent](f) + require.NoError(t, err) + err = done() + require.NoError(t, err) + + // even though we've written twice, we expect only the first two events to be + // be written to the file. When reading the file, all writes are ignored. + require.GreaterOrEqual(t, len(events), 2) + require.Equal(t, annecy, events[0].Msg) + require.Equal(t, paris, events[1].Msg) + + // write again to the file and read it back this time, we expect the writes + // to be written since we've called the done() function. + client.Write(migenees) + client.Write(pontivy) + + time.Sleep(100 * time.Millisecond) + + f, done, err = client.readTable(testEventTable) + require.NoError(t, err) + events, err = DecodeFile[testEvent](f) + require.NoError(t, err) + err = done() + require.NoError(t, err) + require.Len(t, events, 4) + require.Equal(t, migenees, events[2].Msg) + require.Equal(t, pontivy, events[3].Msg) +} + +// TestLocalTracerServerPull tests the pull portion of the server. +func TestLocalTracerServerPull(t *testing.T) { + port, err := getFreePort() + require.NoError(t, err) + client := setupLocalTracer(t, port) + + for i := 0; i < 5; i++ { + client.Write(testEvent{"Annecy", i}) + } + + // Wait for the server to start + time.Sleep(100 * time.Millisecond) + + // Test the server + newDir := t.TempDir() + + url := fmt.Sprintf("http://localhost:%d", port) + + // try to read a table that is not being collected. error expected. + err = GetTable(url, "canal", newDir) + require.Error(t, err) + + err = GetTable(url, testEventTable, newDir) + require.NoError(t, err) + + originalFile, done, err := client.readTable(testEventTable) + require.NoError(t, err) + originalBz, err := io.ReadAll(originalFile) + require.NoError(t, err) + err = done() + require.NoError(t, err) + + path := path.Join(newDir, testEventTable+".jsonl") + downloadedFile, err := os.Open(path) + require.NoError(t, err) + defer downloadedFile.Close() + + downloadedBz, err := io.ReadAll(downloadedFile) + require.NoError(t, err) + require.Equal(t, originalBz, downloadedBz) + + _, err = downloadedFile.Seek(0, 0) // reset the seek on the file to read it again + require.NoError(t, err) + events, err := DecodeFile[testEvent](downloadedFile) + require.NoError(t, err) + require.Len(t, events, 5) + for i := 0; i < 5; i++ { + require.Equal(t, i, events[i].Msg.Length) + } +} + +// TestReadPushConfigFromConfigFile tests reading the push config from the environment variables. +func TestReadPushConfigFromEnvVars(t *testing.T) { + t.Setenv(PushBucketName, "bucket") + t.Setenv(PushRegion, "region") + t.Setenv(PushAccessKey, "access") + t.Setenv(PushKey, "secret") + t.Setenv(PushDelay, "10") + + lt := setupLocalTracer(t, 0) + require.Equal(t, "bucket", lt.s3Config.BucketName) + require.Equal(t, "region", lt.s3Config.Region) + require.Equal(t, "access", lt.s3Config.AccessKey) + require.Equal(t, "secret", lt.s3Config.SecretKey) + require.Equal(t, int64(10), lt.s3Config.PushDelay) +} +func setupLocalTracer(t *testing.T, port int) *LocalTracer { + t.Helper() + logger := log.NewNopLogger() + cfg := config.DefaultConfig() + cfg.SetRoot(t.TempDir()) + cfg.Instrumentation.TraceBufferSize = 100 + cfg.Instrumentation.TracingTables = testEventTable + cfg.Instrumentation.TracePullAddress = fmt.Sprintf(":%d", port) + + client, err := NewLocalTracer(cfg, logger, "test_chain", "test_node") + if err != nil { + t.Fatalf("failed to create local client: %v", err) + } + + return client +} + +// getFreePort returns a free port and optionally an error. +func getFreePort() (int, error) { + a, err := net.ResolveTCPAddr("tcp", "localhost:0") + if err != nil { + return 0, err + } + + l, err := net.ListenTCP("tcp", a) + if err != nil { + return 0, err + } + defer l.Close() + return l.Addr().(*net.TCPAddr).Port, nil +} diff --git a/libs/trace/schema/consensus.go b/libs/trace/schema/consensus.go new file mode 100644 index 00000000000..fe2ad8e774d --- /dev/null +++ b/libs/trace/schema/consensus.go @@ -0,0 +1,250 @@ +package schema + +import ( + "github.com/cometbft/cometbft/libs/trace" + "github.com/cometbft/cometbft/types" +) + +// ConsensusTables returns the list of tables that are used for consensus +// tracing. +func ConsensusTables() []string { + return []string{ + RoundStateTable, + BlockPartsTable, + BlockTable, + VoteTable, + ConsensusStateTable, + ProposalTable, + } +} + +// Schema constants for the consensus round state tracing database. +const ( + // RoundStateTable is the name of the table that stores the consensus + // state traces. + RoundStateTable = "consensus_round_state" +) + +// RoundState describes schema for the "consensus_round_state" table. +type RoundState struct { + Height int64 `json:"height"` + Round int32 `json:"round"` + Step uint8 `json:"step"` +} + +// Table returns the table name for the RoundState struct. +func (RoundState) Table() string { + return RoundStateTable +} + +// WriteRoundState writes a tracing point for a tx using the predetermined +// schema for consensus state tracing. +func WriteRoundState(client trace.Tracer, height int64, round int32, step uint8) { + client.Write(RoundState{Height: height, Round: round, Step: step}) +} + +// Schema constants for the "consensus_block_parts" table. +const ( + // BlockPartsTable is the name of the table that stores the consensus block + // parts. + BlockPartsTable = "consensus_block_parts" +) + +// BlockPart describes schema for the "consensus_block_parts" table. +type BlockPart struct { + Height int64 `json:"height"` + Round int32 `json:"round"` + Index int32 `json:"index"` + Catchup bool `json:"catchup"` + Peer string `json:"peer"` + TransferType TransferType `json:"transfer_type"` +} + +// Table returns the table name for the BlockPart struct. +func (BlockPart) Table() string { + return BlockPartsTable +} + +// WriteBlockPart writes a tracing point for a BlockPart using the predetermined +// schema for consensus state tracing. +func WriteBlockPart( + client trace.Tracer, + height int64, + round int32, + index uint32, + catchup bool, + peer string, + transferType TransferType, +) { + // this check is redundant to what is checked during client.Write, although it + // is an optimization to avoid allocations from the map of fields. + if !client.IsCollecting(BlockPartsTable) { + return + } + client.Write(BlockPart{ + Height: height, + Round: round, + //nolint:gosec + Index: int32(index), + Catchup: catchup, + Peer: peer, + TransferType: transferType, + }) +} + +// Schema constants for the consensus votes tracing database. +const ( + // VoteTable is the name of the table that stores the consensus + // voting traces. + VoteTable = "consensus_vote" +) + +// Vote describes schema for the "consensus_vote" table. +type Vote struct { + Height int64 `json:"height"` + Round int32 `json:"round"` + VoteType string `json:"vote_type"` + VoteHeight int64 `json:"vote_height"` + VoteRound int32 `json:"vote_round"` + VoteMillisecondTimestamp int64 `json:"vote_unix_millisecond_timestamp"` + ValidatorAddress string `json:"vote_validator_address"` + Peer string `json:"peer"` + TransferType TransferType `json:"transfer_type"` +} + +func (Vote) Table() string { + return VoteTable +} + +// WriteVote writes a tracing point for a vote using the predetermined +// schema for consensus vote tracing. +func WriteVote(client trace.Tracer, + height int64, // height of the current peer when it received/sent the vote + round int32, // round of the current peer when it received/sent the vote + vote *types.Vote, // vote received by the current peer + peer string, // the peer from which it received the vote or the peer to which it sent the vote + transferType TransferType, // download (received) or upload(sent) +) { + client.Write(Vote{ + Height: height, + Round: round, + VoteType: vote.Type.String(), + VoteHeight: vote.Height, + VoteRound: vote.Round, + VoteMillisecondTimestamp: vote.Timestamp.UnixMilli(), + ValidatorAddress: vote.ValidatorAddress.String(), + Peer: peer, + TransferType: transferType, + }) +} + +const ( + // BlockTable is the name of the table that stores metadata about consensus blocks. + BlockTable = "consensus_block" +) + +// BlockSummary describes schema for the "consensus_block" table. +type BlockSummary struct { + Height int64 `json:"height"` + UnixMillisecondTimestamp int64 `json:"unix_millisecond_timestamp"` + TxCount int `json:"tx_count"` + SquareSize uint64 `json:"square_size"` + BlockSize int `json:"block_size"` + Proposer string `json:"proposer"` + LastCommitRound int32 `json:"last_commit_round"` +} + +func (BlockSummary) Table() string { + return BlockTable +} + +// WriteBlockSummary writes a tracing point for a block using the predetermined. +func WriteBlockSummary(client trace.Tracer, block *types.Block, size int) { + client.Write(BlockSummary{ + Height: block.Height, + UnixMillisecondTimestamp: block.Time.UnixMilli(), + TxCount: len(block.Data.Txs), + SquareSize: block.SquareSize, + BlockSize: size, + Proposer: block.ProposerAddress.String(), + LastCommitRound: block.LastCommit.Round, + }) +} + +const ( + ConsensusStateTable = "consensus_state" +) + +type ConsensusStateUpdateType string + +const ( + ConsensusNewValidBlock ConsensusStateUpdateType = "new_valid_block" + ConsensusNewRoundStep ConsensusStateUpdateType = "new_round_step" + ConsensusVoteSetBits ConsensusStateUpdateType = "vote_set_bits" + ConsensusVoteSet23Prevote ConsensusStateUpdateType = "vote_set_23_prevote" + ConsensusVoteSet23Precommit ConsensusStateUpdateType = "vote_set_23_precommit" + ConsensusHasVote ConsensusStateUpdateType = "has_vote" + ConsensusPOL ConsensusStateUpdateType = "pol" +) + +type ConsensusState struct { + Height int64 `json:"height"` + Round int32 `json:"round"` + UpdateType string `json:"update_type"` + Peer string `json:"peer"` + TransferType TransferType `json:"transfer_type"` + Data []string `json:"data,omitempty"` +} + +func (ConsensusState) Table() string { + return ConsensusStateTable +} + +func WriteConsensusState( + client trace.Tracer, + height int64, + round int32, + peer string, + updateType ConsensusStateUpdateType, + transferType TransferType, + data ...string, +) { + client.Write(ConsensusState{ + Height: height, + Round: round, + Peer: peer, + UpdateType: string(updateType), + TransferType: transferType, + Data: data, + }) +} + +const ( + ProposalTable = "consensus_proposal" +) + +type Proposal struct { + Height int64 `json:"height"` + Round int32 `json:"round"` + PeerID string `json:"peer_id"` + TransferType TransferType `json:"transfer_type"` +} + +func (Proposal) Table() string { + return ProposalTable +} + +func WriteProposal( + client trace.Tracer, + height int64, + round int32, + peerID string, + transferType TransferType, +) { + client.Write(Proposal{ + Height: height, + Round: round, + PeerID: peerID, + TransferType: transferType, + }) +} diff --git a/libs/trace/schema/mempool.go b/libs/trace/schema/mempool.go new file mode 100644 index 00000000000..e7a8d74d0dd --- /dev/null +++ b/libs/trace/schema/mempool.go @@ -0,0 +1,100 @@ +package schema + +import ( + "github.com/cometbft/cometbft/libs/bytes" + "github.com/cometbft/cometbft/libs/trace" +) + +// MempoolTables returns the list of tables for mempool tracing. +func MempoolTables() []string { + return []string{ + MempoolTxTable, + MempoolPeerStateTable, + } +} + +// Schema constants for the mempool_tx table. +const ( + // MempoolTxTable is the tracing "measurement" (aka table) for the mempool + // that stores tracing data related to gossiping transactions. + MempoolTxTable = "mempool_tx" +) + +// MemPoolTx describes the schema for the "mempool_tx" table. +type MempoolTx struct { + TxHash string `json:"tx_hash"` + Peer string `json:"peer"` + Size int `json:"size"` + TransferType TransferType `json:"transfer_type"` +} + +// Table returns the table name for the MempoolTx struct. +func (MempoolTx) Table() string { + return MempoolTxTable +} + +// WriteMempoolTx writes a tracing point for a tx using the predetermined +// schema for mempool tracing. +func WriteMempoolTx(client trace.Tracer, peer string, txHash []byte, size int, transferType TransferType) { + // this check is redundant to what is checked during client.Write, although it + // is an optimization to avoid allocations from the map of fields. + if !client.IsCollecting(MempoolTxTable) { + return + } + client.Write(MempoolTx{ + TxHash: bytes.HexBytes(txHash).String(), + Peer: peer, + Size: size, + TransferType: transferType, + }) +} + +const ( + // MempoolPeerState is the tracing "measurement" (aka table) for the mempool + // that stores tracing data related to mempool state, specifically + // the gossipping of "SeenTx" and "WantTx". + MempoolPeerStateTable = "mempool_peer_state" +) + +type MempoolStateUpdateType string + +const ( + SeenTx MempoolStateUpdateType = "SeenTx" + WantTx MempoolStateUpdateType = "WantTx" + Unknown MempoolStateUpdateType = "Unknown" +) + +// MempoolPeerState describes the schema for the "mempool_peer_state" table. +type MempoolPeerState struct { + Peer string `json:"peer"` + StateUpdate MempoolStateUpdateType `json:"state_update"` + TxHash string `json:"tx_hash"` + TransferType TransferType `json:"transfer_type"` +} + +// Table returns the table name for the MempoolPeerState struct. +func (MempoolPeerState) Table() string { + return MempoolPeerStateTable +} + +// WriteMempoolPeerState writes a tracing point for the mempool state using +// the predetermined schema for mempool tracing. +func WriteMempoolPeerState( + client trace.Tracer, + peer string, + stateUpdate MempoolStateUpdateType, + txHash []byte, + transferType TransferType, +) { + // this check is redundant to what is checked during client.Write, although it + // is an optimization to avoid allocations from creating the map of fields. + if !client.IsCollecting(MempoolPeerStateTable) { + return + } + client.Write(MempoolPeerState{ + Peer: peer, + StateUpdate: stateUpdate, + TransferType: transferType, + TxHash: bytes.HexBytes(txHash).String(), + }) +} diff --git a/libs/trace/schema/misc.go b/libs/trace/schema/misc.go new file mode 100644 index 00000000000..2102320e2e8 --- /dev/null +++ b/libs/trace/schema/misc.go @@ -0,0 +1,42 @@ +package schema + +import "github.com/cometbft/cometbft/libs/trace" + +const ( + ABCITable = "abci" +) + +// ABCIUpdate is an enum that represents the different types of ABCI +// trace data. +type ABCIUpdate string + +const ( + PrepareProposalStart ABCIUpdate = "prepare_proposal_start" + PrepareProposalEnd ABCIUpdate = "prepare_proposal_end" + ProcessProposalStart ABCIUpdate = "process_proposal_start" + ProcessProposalEnd ABCIUpdate = "process_proposal_end" + CommitStart ABCIUpdate = "commit_start" + CommitEnd ABCIUpdate = "commit_end" +) + +// ABCI describes schema for the "abci" table. +type ABCI struct { + TraceType string `json:"trace"` + Height int64 `json:"height"` + Round int32 `json:"round"` +} + +// Table returns the table name for the ABCI struct and fulfills the +// trace.Entry interface. +func (ABCI) Table() string { + return ABCITable +} + +// WriteABCI writes a trace for an ABCI method. +func WriteABCI(client trace.Tracer, traceType ABCIUpdate, height int64, round int32) { + client.Write(ABCI{ + TraceType: string(traceType), + Height: height, + Round: round, + }) +} diff --git a/libs/trace/schema/p2p.go b/libs/trace/schema/p2p.go new file mode 100644 index 00000000000..4ffae7bf10f --- /dev/null +++ b/libs/trace/schema/p2p.go @@ -0,0 +1,82 @@ +package schema + +import "github.com/cometbft/cometbft/libs/trace" + +// P2PTables returns the list of tables that are used for p2p tracing. +func P2PTables() []string { + return []string{ + PeersTable, + PendingBytesTable, + ReceivedBytesTable, + } +} + +const ( + // PeerUpdateTable is the name of the table that stores the p2p peer + // updates. + PeersTable = "peers" +) + +// P2PPeerUpdate is an enum that represents the different types of p2p +// trace data. +type P2PPeerUpdate string + +const ( + // PeerJoin is the action for when a peer is connected. + PeerJoin P2PPeerUpdate = "connect" + // PeerDisconnect is the action for when a peer is disconnected. + PeerDisconnect P2PPeerUpdate = "disconnect" +) + +// PeerUpdate describes schema for the "peer_update" table. +type PeerUpdate struct { + PeerID string `json:"peer_id"` + Action string `json:"action"` + Reason string `json:"reason"` +} + +// Table returns the table name for the PeerUpdate struct. +func (PeerUpdate) Table() string { + return PeersTable +} + +// WritePeerUpdate writes a tracing point for a peer update using the predetermined +// schema for p2p tracing. +func WritePeerUpdate(client trace.Tracer, peerID string, action P2PPeerUpdate, reason string) { + client.Write(PeerUpdate{PeerID: peerID, Action: string(action), Reason: reason}) +} + +const ( + PendingBytesTable = "pending_bytes" +) + +type PendingBytes struct { + PeerID string `json:"peer_id"` + Bytes map[byte]int `json:"bytes"` +} + +func (PendingBytes) Table() string { + return PendingBytesTable +} + +func WritePendingBytes(client trace.Tracer, peerID string, bytes map[byte]int) { + client.Write(PendingBytes{PeerID: peerID, Bytes: bytes}) +} + +const ( + ReceivedBytesTable = "received_bytes" +) + +type ReceivedBytes struct { + PeerID string `json:"peer_id"` + Channel byte `json:"channel"` + Bytes int `json:"bytes"` +} + +func (ReceivedBytes) Table() string { + return ReceivedBytesTable +} + +func WriteReceivedBytes(client trace.Tracer, peerID string, channel byte, bytes int) { + client.Write(ReceivedBytes{PeerID: peerID, Channel: channel, Bytes: bytes}) +} diff --git a/libs/trace/schema/schema.go b/libs/trace/schema/schema.go new file mode 100644 index 00000000000..78c2fb81407 --- /dev/null +++ b/libs/trace/schema/schema.go @@ -0,0 +1,42 @@ +package schema + +import ( + "strings" + + "github.com/cometbft/cometbft/config" +) + +func init() { + config.DefaultTracingTables = strings.Join(AllTables(), ",") +} + +func AllTables() []string { + tables := []string{} + tables = append(tables, MempoolTables()...) + tables = append(tables, ConsensusTables()...) + tables = append(tables, P2PTables()...) + tables = append(tables, ABCITable) + return tables +} + +const ( + Broadcast = "broadcast" +) + +type TransferType int + +const ( + Download TransferType = iota + Upload +) + +func (t TransferType) String() string { + switch t { + case Download: + return "download" + case Upload: + return "upload" + default: + return "unknown" + } +} diff --git a/libs/trace/schema/schema_test.go b/libs/trace/schema/schema_test.go new file mode 100644 index 00000000000..c1bffd1bc98 --- /dev/null +++ b/libs/trace/schema/schema_test.go @@ -0,0 +1,17 @@ +package schema + +// Define a test struct with various field types and json tags. +type TestStruct struct { + Name string `json:"name"` + Age int `json:"age"` + Email string `json:"email"` +} + +// Mock for a custom type with String method. +type CustomType int + +// TestStructWithCustomType includes a field with a custom type having a String method. +type TestStructWithCustomType struct { + ID int `json:"id"` + Type CustomType `json:"type"` +} diff --git a/libs/trace/tracer.go b/libs/trace/tracer.go new file mode 100644 index 00000000000..f68bfbb04df --- /dev/null +++ b/libs/trace/tracer.go @@ -0,0 +1,48 @@ +package trace + +import ( + "errors" + "os" + + "github.com/cometbft/cometbft/config" + "github.com/cometbft/cometbft/libs/log" +) + +// Entry is an interface for all structs that are used to define the schema for +// traces. +type Entry interface { + // Table defines which table the struct belongs to. + Table() string +} + +// Tracer defines the methods for a client that can write and read trace data. +type Tracer interface { + Write(Entry) + IsCollecting(table string) bool + Stop() +} + +func NewTracer(cfg *config.Config, logger log.Logger, chainID, nodeID string) (Tracer, error) { + switch cfg.Instrumentation.TraceType { + case "local": + return NewLocalTracer(cfg, logger, chainID, nodeID) + case "noop": + return NoOpTracer(), nil + default: + logger.Error("unknown tracer type, using noop", "type", cfg.Instrumentation.TraceType) + return NoOpTracer(), nil + } +} + +func NoOpTracer() Tracer { + return &noOpTracer{} +} + +type noOpTracer struct{} + +func (*noOpTracer) Write(_ Entry) {} +func (*noOpTracer) ReadTable(_ string) (*os.File, error) { + return nil, errors.New("no-op tracer does not support reading") +} +func (*noOpTracer) IsCollecting(_ string) bool { return false } +func (*noOpTracer) Stop() {} diff --git a/light/detector.go b/light/detector.go index 5b742761eba..8de310447bb 100644 --- a/light/detector.go +++ b/light/detector.go @@ -26,7 +26,7 @@ import ( // If there are no conflicting headers, the light client deems the verified target header // trusted and saves it to the trusted store. func (c *Client) detectDivergence(ctx context.Context, primaryTrace []*types.LightBlock, now time.Time) error { - if primaryTrace == nil || len(primaryTrace) < 2 { + if len(primaryTrace) == 0 || len(primaryTrace) < 2 { return errors.New("nil or single block primary trace") } var ( diff --git a/light/provider/http/http.go b/light/provider/http/http.go index b73f7bd4524..17cbf753e5f 100644 --- a/light/provider/http/http.go +++ b/light/provider/http/http.go @@ -159,6 +159,11 @@ OUTER_LOOP: // we wait and try again with exponential backoff time.Sleep(backoffTimeout(uint16(attempt))) continue + case strings.Contains(err.Error(), context.DeadlineExceeded.Error()): + return nil, context.DeadlineExceeded + + case ctx.Err() != nil: + return nil, ctx.Err() // context canceled or connection refused we return the error default: @@ -202,6 +207,12 @@ func (p *http) signedHeader(ctx context.Context, height *int64) (*types.SignedHe time.Sleep(backoffTimeout(uint16(attempt))) continue + case strings.Contains(err.Error(), context.DeadlineExceeded.Error()): + return nil, context.DeadlineExceeded + + case ctx.Err() != nil: + return nil, ctx.Err() + // either context was canceled or connection refused. default: return nil, err diff --git a/light/provider/http/http_test.go b/light/provider/http/http_test.go index 3ef7dbb1795..c0aa35fc682 100644 --- a/light/provider/http/http_test.go +++ b/light/provider/http/http_test.go @@ -15,6 +15,7 @@ import ( lighthttp "github.com/cometbft/cometbft/light/provider/http" rpcclient "github.com/cometbft/cometbft/rpc/client" rpchttp "github.com/cometbft/cometbft/rpc/client/http" + ctypes "github.com/cometbft/cometbft/rpc/core/types" rpctest "github.com/cometbft/cometbft/rpc/test" "github.com/cometbft/cometbft/types" ) @@ -82,6 +83,21 @@ func TestProvider(t *testing.T) { require.Nil(t, lb) assert.Equal(t, provider.ErrLightBlockNotFound, err) + // fetching with the context canceled + ctx, cancel := context.WithCancel(context.Background()) + cancel() + _, err = p.LightBlock(ctx, lower+3) + require.Error(t, err) + require.Equal(t, context.Canceled, err) + + // fetching with the deadline exceeded (a mock RPC client is used to simulate this) + c2, err := newMockHTTP(rpcAddr) + require.NoError(t, err) + p2 := lighthttp.NewWithClient(chainID, c2) + _, err = p2.LightBlock(context.Background(), 0) + require.Error(t, err) + require.Equal(t, context.DeadlineExceeded, err) + // stop the full node and check that a no response error is returned rpctest.StopTendermint(node) time.Sleep(10 * time.Second) @@ -91,3 +107,19 @@ func TestProvider(t *testing.T) { require.Contains(t, err.Error(), "connection refused") require.Nil(t, lb) } + +type mockHTTP struct { + *rpchttp.HTTP +} + +func newMockHTTP(remote string) (*mockHTTP, error) { + c, err := rpchttp.New(remote, "/websocket") + if err != nil { + return nil, err + } + return &mockHTTP{c}, nil +} + +func (m *mockHTTP) Validators(ctx context.Context, height *int64, page, perPage *int) (*ctypes.ResultValidators, error) { + return nil, fmt.Errorf("post failed: %w", context.DeadlineExceeded) +} diff --git a/light/proxy/routes.go b/light/proxy/routes.go index 62e20712706..8eb47628b47 100644 --- a/light/proxy/routes.go +++ b/light/proxy/routes.go @@ -51,6 +51,9 @@ func RPCRoutes(c *lrpc.Client) map[string]*rpcserver.RPCFunc { // evidence API "broadcast_evidence": rpcserver.NewRPCFunc(makeBroadcastEvidenceFunc(c), "evidence"), + + // Celestia Specific RPCs + "tx_status": rpcserver.NewRPCFunc(makeTxStatusFunc(c), "hash"), } } @@ -300,3 +303,11 @@ func makeBroadcastEvidenceFunc(c *lrpc.Client) rpcBroadcastEvidenceFunc { return c.BroadcastEvidence(ctx.Context(), ev) } } + +type rpcTxStatusFunc func(ctx *rpctypes.Context, hash []byte) (*ctypes.ResultTxStatus, error) + +func makeTxStatusFunc(c *lrpc.Client) rpcTxStatusFunc { + return func(ctx *rpctypes.Context, hash []byte) (*ctypes.ResultTxStatus, error) { + return c.TxStatus(ctx.Context(), hash) + } +} diff --git a/light/rpc/client.go b/light/rpc/client.go index b26c51a9d23..9bb0c5e5315 100644 --- a/light/rpc/client.go +++ b/light/rpc/client.go @@ -678,3 +678,70 @@ func validateSkipCount(page, perPage int) int { return skipCount } + +// DataCommitment returns the data commitment for the given height. +func (c *Client) DataCommitment(ctx context.Context, start, end uint64) (*ctypes.ResultDataCommitment, error) { + return c.next.DataCommitment(ctx, start, end) +} + +func (c *Client) DataRootInclusionProof(ctx context.Context, height, start, end uint64) (*ctypes.ResultDataRootInclusionProof, error) { + return c.next.DataRootInclusionProof(ctx, height, start, end) +} + +func (c *Client) ProveShares(ctx context.Context, height, start, end uint64) (types.ShareProof, error) { + return c.next.ProveShares(ctx, height, start, end) +} + +func (c *Client) ProveSharesV2(ctx context.Context, height, start, end uint64) (*ctypes.ResultShareProof, error) { + return c.next.ProveSharesV2(ctx, height, start, end) +} + +func (c *Client) TxStatus(ctx context.Context, hash []byte) (*ctypes.ResultTxStatus, error) { + return c.next.TxStatus(ctx, hash) +} + +// SignedBlock calls rpcclient#SignedBlock and then verifies the result. +func (c *Client) SignedBlock(ctx context.Context, height *int64) (*ctypes.ResultSignedBlock, error) { + res, err := c.next.SignedBlock(ctx, height) + if err != nil { + return nil, err + } + + // Validate res. + if err := res.Header.ValidateBasic(); err != nil { + return nil, err + } + if height != nil && res.Header.Height != *height { + return nil, fmt.Errorf("incorrect height returned. Expected %d, got %d", *height, res.Header.Height) + } + if err := res.Commit.ValidateBasic(); err != nil { + return nil, err + } + if err := res.ValidatorSet.ValidateBasic(); err != nil { + return nil, err + } + + // NOTE: this will re-request the header and commit from the primary. Ideally, you'd just + // fetch the data from the primary and use the light client to verify it. + l, err := c.updateLightClientIfNeededTo(ctx, &res.Header.Height) + if err != nil { + return nil, err + } + + if bmH, bH := l.Header.Hash(), res.Header.Hash(); !bytes.Equal(bmH, bH) { + return nil, fmt.Errorf("light client header %X does not match with response header %X", + bmH, bH) + } + + if bmH, bH := l.Header.DataHash, res.Data.Hash(); !bytes.Equal(bmH, bH) { + return nil, fmt.Errorf("light client data hash %X does not match with response data %X", + bmH, bH) + } + + return &ctypes.ResultSignedBlock{ + Header: res.Header, + Commit: *l.Commit, + ValidatorSet: *l.ValidatorSet, + Data: res.Data, + }, nil +} diff --git a/mempool/cache.go b/mempool/cache.go index 37977e8fc5e..8141f2ae23d 100644 --- a/mempool/cache.go +++ b/mempool/cache.go @@ -26,6 +26,9 @@ type TxCache interface { // Has reports whether tx is present in the cache. Checking for presence is // not treated as an access of the value. Has(tx types.Tx) bool + + // HasKey reports whether the given key is present in the cache. + HasKey(key types.TxKey) bool } var _ TxCache = (*LRUTxCache)(nil) @@ -89,10 +92,14 @@ func (c *LRUTxCache) Push(tx types.Tx) bool { } func (c *LRUTxCache) Remove(tx types.Tx) { + key := tx.Key() + c.RemoveTxByKey(key) +} + +func (c *LRUTxCache) RemoveTxByKey(key types.TxKey) { c.mtx.Lock() defer c.mtx.Unlock() - key := tx.Key() e := c.cacheMap[key] delete(c.cacheMap, key) @@ -109,12 +116,21 @@ func (c *LRUTxCache) Has(tx types.Tx) bool { return ok } +func (c *LRUTxCache) HasKey(key types.TxKey) bool { + c.mtx.Lock() + defer c.mtx.Unlock() + + _, ok := c.cacheMap[key] + return ok +} + // NopTxCache defines a no-op raw transaction cache. type NopTxCache struct{} var _ TxCache = (*NopTxCache)(nil) -func (NopTxCache) Reset() {} -func (NopTxCache) Push(types.Tx) bool { return true } -func (NopTxCache) Remove(types.Tx) {} -func (NopTxCache) Has(types.Tx) bool { return false } +func (NopTxCache) Reset() {} +func (NopTxCache) Push(types.Tx) bool { return true } +func (NopTxCache) Remove(types.Tx) {} +func (NopTxCache) Has(types.Tx) bool { return false } +func (NopTxCache) HasKey(types.TxKey) bool { return false } diff --git a/mempool/cache_test.go b/mempool/cache_test.go index b71d9c6122e..7fa4ba23ad1 100644 --- a/mempool/cache_test.go +++ b/mempool/cache_test.go @@ -14,33 +14,6 @@ import ( "github.com/stretchr/testify/require" ) -func TestCacheRemove(t *testing.T) { - cache := NewLRUTxCache(100) - numTxs := 10 - - txs := make([][]byte, numTxs) - for i := 0; i < numTxs; i++ { - // probability of collision is 2**-256 - txBytes := make([]byte, 32) - _, err := rand.Read(txBytes) - require.NoError(t, err) - - txs[i] = txBytes - cache.Push(txBytes) - - // make sure its added to both the linked list and the map - require.Equal(t, i+1, len(cache.cacheMap)) - require.Equal(t, i+1, cache.list.Len()) - } - - for i := 0; i < numTxs; i++ { - cache.Remove(txs[i]) - // make sure its removed from both the map and the linked list - require.Equal(t, numTxs-(i+1), len(cache.cacheMap)) - require.Equal(t, numTxs-(i+1), cache.list.Len()) - } -} - func TestCacheAfterUpdate(t *testing.T) { app := kvstore.NewInMemoryApplication() cc := proxy.NewLocalClientCreator(app) @@ -112,3 +85,53 @@ func TestCacheAfterUpdate(t *testing.T) { mp.Flush() } } + +func TestCacheRemove(t *testing.T) { + cache := NewLRUTxCache(100) + numTxs := 10 + + txs, err := populate(cache, numTxs) + require.NoError(t, err) + require.Equal(t, numTxs, len(cache.cacheMap)) + require.Equal(t, numTxs, cache.list.Len()) + + for i := 0; i < numTxs; i++ { + cache.Remove(txs[i]) + // make sure its removed from both the map and the linked list + require.Len(t, cache.cacheMap, numTxs-(i+1)) + require.Equal(t, numTxs-(i+1), cache.list.Len()) + } +} + +func populate(cache TxCache, numTxs int) ([][]byte, error) { + txs := make([][]byte, numTxs) + for i := 0; i < numTxs; i++ { + // probability of collision is 2**-256 + txBytes := make([]byte, 32) + _, err := rand.Read(txBytes) + if err != nil { + return nil, err + } + + txs[i] = txBytes + cache.Push(txBytes) + } + return txs, nil +} + +func TestCacheRemoveByKey(t *testing.T) { + cache := NewLRUTxCache(100) + numTxs := 10 + + txs, err := populate(cache, numTxs) + require.NoError(t, err) + require.Equal(t, numTxs, len(cache.cacheMap)) + require.Equal(t, numTxs, cache.list.Len()) + + for i := 0; i < numTxs; i++ { + cache.RemoveTxByKey(types.Tx(txs[i]).Key()) + // make sure its removed from both the map and the linked list + require.Equal(t, numTxs-(i+1), len(cache.cacheMap)) + require.Equal(t, numTxs-(i+1), cache.list.Len()) + } +} diff --git a/mempool/clist_mempool.go b/mempool/clist_mempool.go index 455cce3ab4e..c0e05304331 100644 --- a/mempool/clist_mempool.go +++ b/mempool/clist_mempool.go @@ -14,6 +14,7 @@ import ( "github.com/cometbft/cometbft/libs/log" cmtmath "github.com/cometbft/cometbft/libs/math" cmtsync "github.com/cometbft/cometbft/libs/sync" + "github.com/cometbft/cometbft/libs/trace" "github.com/cometbft/cometbft/proxy" "github.com/cometbft/cometbft/types" ) @@ -55,6 +56,7 @@ type CListMempool struct { logger log.Logger metrics *Metrics + trace trace.Tracer } var _ Mempool = &CListMempool{} @@ -77,6 +79,7 @@ func NewCListMempool( recheck: newRecheck(), logger: log.NewNopLogger(), metrics: NopMetrics(), + trace: trace.NoOpTracer(), } mp.height.Store(height) @@ -95,6 +98,21 @@ func NewCListMempool( return mp } +// GetTxByKey retrieves a transaction from the mempool using its key. +func (mem *CListMempool) GetTxByKey(key types.TxKey) (types.Tx, bool) { + e, ok := mem.txsMap.Load(key) + if !ok { + return nil, false + } + memTx, ok := e.(*clist.CElement).Value.(*mempoolTx) + return memTx.tx, ok +} + +// WasRecentlyEvicted returns false consistently as this implementation does not support transaction eviction. +func (*CListMempool) WasRecentlyEvicted(key types.TxKey) bool { + return false +} + func (mem *CListMempool) getCElement(txKey types.TxKey) (*clist.CElement, bool) { if e, ok := mem.txsMap.Load(txKey); ok { return e.(*clist.CElement), true @@ -150,6 +168,12 @@ func WithMetrics(metrics *Metrics) CListMempoolOption { return func(mem *CListMempool) { mem.metrics = metrics } } +func WithTraceClient(tc trace.Tracer) CListMempoolOption { + return func(txmp *CListMempool) { + txmp.trace = tc + } +} + // Safe for concurrent use by multiple goroutines. func (mem *CListMempool) Lock() { if mem.recheck.setRecheckFull() { @@ -368,7 +392,10 @@ func (mem *CListMempool) RemoveTxByKey(txKey types.TxKey) error { mem.txs.Remove(elem) elem.DetachPrev() mem.txsMap.Delete(txKey) - tx := elem.Value.(*mempoolTx).tx + var tx types.Tx + if memtx, ok := elem.Value.(*mempoolTx); ok { + tx = memtx.tx + } mem.txsBytes.Add(int64(-len(tx))) return nil } @@ -599,6 +626,7 @@ func (mem *CListMempool) Update( mem.postCheck = postCheck } + mem.metrics.SuccessfulTxs.Add(float64(len(txs))) for i, tx := range txs { if txResults[i].Code == abci.CodeTypeOK { // Add valid committed tx to the cache (if missing). diff --git a/mempool/mempool.go b/mempool/mempool.go index 8ab37162641..e2530383854 100644 --- a/mempool/mempool.go +++ b/mempool/mempool.go @@ -99,6 +99,17 @@ type Mempool interface { // SizeBytes returns the total size of all txs in the mempool. SizeBytes() int64 + + // Celestia Specific Methods + + // GetTxByKey gets a tx by its key from the mempool. Returns the tx and a bool indicating its presence in the tx cache. + // Used in the RPC endpoint: TxStatus. + GetTxByKey(key types.TxKey) (types.Tx, bool) + + // WasRecentlyEvicted returns true if the tx was evicted from the mempool and exists in the + // evicted cache. + // Used in the RPC endpoint: TxStatus. + WasRecentlyEvicted(key types.TxKey) bool } // PreCheckFunc is an optional filter executed before CheckTx and rejects diff --git a/mempool/metrics.gen.go b/mempool/metrics.gen.go index 3d202e320ad..092b249c48d 100644 --- a/mempool/metrics.gen.go +++ b/mempool/metrics.gen.go @@ -64,6 +64,36 @@ func PrometheusMetrics(namespace string, labelsAndValues ...string) *Metrics { Name: "active_outbound_connections", Help: "Number of connections being actively used for gossiping transactions (experimental feature).", }, labels).With(labelsAndValues...), + ExpiredTxs: prometheus.NewCounterFrom(stdprometheus.CounterOpts{ + Namespace: namespace, + Subsystem: MetricsSubsystem, + Name: "expired_txs", + Help: "ExpiredTxs defines transactions that were removed from the mempool due to a TTL", + }, labels).With(labelsAndValues...), + SuccessfulTxs: prometheus.NewCounterFrom(stdprometheus.CounterOpts{ + Namespace: namespace, + Subsystem: MetricsSubsystem, + Name: "successful_txs", + Help: "SuccessfulTxs defines the number of transactions that successfully made it into a block.", + }, labels).With(labelsAndValues...), + AlreadySeenTxs: prometheus.NewCounterFrom(stdprometheus.CounterOpts{ + Namespace: namespace, + Subsystem: MetricsSubsystem, + Name: "already_seen_txs", + Help: "AlreadySeenTxs defines the number of transactions that entered the mempool which were already present in the mempool. This is a good indicator of the degree of duplication in message gossiping.", + }, labels).With(labelsAndValues...), + RequestedTxs: prometheus.NewCounterFrom(stdprometheus.CounterOpts{ + Namespace: namespace, + Subsystem: MetricsSubsystem, + Name: "requested_txs", + Help: "RequestedTxs defines the number of times that the node requested a tx to a peer", + }, labels).With(labelsAndValues...), + RerequestedTxs: prometheus.NewCounterFrom(stdprometheus.CounterOpts{ + Namespace: namespace, + Subsystem: MetricsSubsystem, + Name: "rerequested_txs", + Help: "RerequestedTxs defines the number of times that a requested tx never received a response in time and a new request was made.", + }, labels).With(labelsAndValues...), } } @@ -77,5 +107,10 @@ func NopMetrics() *Metrics { EvictedTxs: discard.NewCounter(), RecheckTimes: discard.NewCounter(), ActiveOutboundConnections: discard.NewGauge(), + ExpiredTxs: discard.NewCounter(), + SuccessfulTxs: discard.NewCounter(), + AlreadySeenTxs: discard.NewCounter(), + RequestedTxs: discard.NewCounter(), + RerequestedTxs: discard.NewCounter(), } } diff --git a/mempool/metrics.go b/mempool/metrics.go index 6a24a5b48a7..d9ae70d7c7b 100644 --- a/mempool/metrics.go +++ b/mempool/metrics.go @@ -48,4 +48,25 @@ type Metrics struct { // Number of connections being actively used for gossiping transactions // (experimental feature). ActiveOutboundConnections metrics.Gauge + + // ExpiredTxs defines transactions that were removed from the mempool due + // to a TTL + ExpiredTxs metrics.Counter + + // SuccessfulTxs defines the number of transactions that successfully made + // it into a block. + SuccessfulTxs metrics.Counter + + // AlreadySeenTxs defines the number of transactions that entered the + // mempool which were already present in the mempool. This is a good + // indicator of the degree of duplication in message gossiping. + AlreadySeenTxs metrics.Counter + + // RequestedTxs defines the number of times that the node requested a + // tx to a peer + RequestedTxs metrics.Counter + + // RerequestedTxs defines the number of times that a requested tx + // never received a response in time and a new request was made. + RerequestedTxs metrics.Counter } diff --git a/mempool/mocks/mempool.go b/mempool/mocks/mempool.go index 8e78bce4a60..c0314d65521 100644 --- a/mempool/mocks/mempool.go +++ b/mempool/mocks/mempool.go @@ -62,6 +62,36 @@ func (_m *Mempool) FlushAppConn() error { return r0 } +// GetTxByKey provides a mock function with given fields: key +func (_m *Mempool) GetTxByKey(key types.TxKey) (types.Tx, bool) { + ret := _m.Called(key) + + if len(ret) == 0 { + panic("no return value specified for GetTxByKey") + } + + var r0 types.Tx + var r1 bool + if rf, ok := ret.Get(0).(func(types.TxKey) (types.Tx, bool)); ok { + return rf(key) + } + if rf, ok := ret.Get(0).(func(types.TxKey) types.Tx); ok { + r0 = rf(key) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(types.Tx) + } + } + + if rf, ok := ret.Get(1).(func(types.TxKey) bool); ok { + r1 = rf(key) + } else { + r1 = ret.Get(1).(bool) + } + + return r0, r1 +} + // Lock provides a mock function with no fields func (_m *Mempool) Lock() { _m.Called() @@ -204,6 +234,24 @@ func (_m *Mempool) Update(blockHeight int64, blockTxs types.Txs, deliverTxRespon return r0 } +// WasRecentlyEvicted provides a mock function with given fields: key +func (_m *Mempool) WasRecentlyEvicted(key types.TxKey) bool { + ret := _m.Called(key) + + if len(ret) == 0 { + panic("no return value specified for WasRecentlyEvicted") + } + + var r0 bool + if rf, ok := ret.Get(0).(func(types.TxKey) bool); ok { + r0 = rf(key) + } else { + r0 = ret.Get(0).(bool) + } + + return r0 +} + // NewMempool creates a new instance of Mempool. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. // The first argument is typically a *testing.T value. func NewMempool(t interface { diff --git a/mempool/nop_mempool.go b/mempool/nop_mempool.go index 6bfff3b04d4..812d554dbde 100644 --- a/mempool/nop_mempool.go +++ b/mempool/nop_mempool.go @@ -74,6 +74,12 @@ func (*NopMempool) Size() int { return 0 } // SizeBytes always returns 0. func (*NopMempool) SizeBytes() int64 { return 0 } +// GetTxByKey always returns nil. +func (*NopMempool) GetTxByKey(types.TxKey) (types.Tx, bool) { return nil, false } + +// WasRecentlyEvicted always returns false. +func (*NopMempool) WasRecentlyEvicted(types.TxKey) bool { return false } + // NopMempoolReactor is a mempool reactor that does nothing. type NopMempoolReactor struct { service.BaseService @@ -105,3 +111,6 @@ func (*NopMempoolReactor) Receive(p2p.Envelope) {} // SetSwitch does nothing. func (*NopMempoolReactor) SetSwitch(*p2p.Switch) {} + +// QueueUnprocessedEnvelope does nothing. +func (*NopMempoolReactor) QueueUnprocessedEnvelope(p2p.UnprocessedEnvelope) {} diff --git a/mempool/reactor.go b/mempool/reactor.go index ef3e9a7c382..16960afa8dd 100644 --- a/mempool/reactor.go +++ b/mempool/reactor.go @@ -10,6 +10,7 @@ import ( cfg "github.com/cometbft/cometbft/config" "github.com/cometbft/cometbft/libs/clist" "github.com/cometbft/cometbft/libs/log" + "github.com/cometbft/cometbft/libs/trace" "github.com/cometbft/cometbft/p2p" protomem "github.com/cometbft/cometbft/proto/tendermint/mempool" "github.com/cometbft/cometbft/types" @@ -30,14 +31,16 @@ type Reactor struct { // connections for different groups of peers. activePersistentPeersSemaphore *semaphore.Weighted activeNonPersistentPeersSemaphore *semaphore.Weighted + traceClient trace.Tracer } // NewReactor returns a new Reactor with the given config and mempool. -func NewReactor(config *cfg.MempoolConfig, mempool *CListMempool) *Reactor { +func NewReactor(config *cfg.MempoolConfig, mempool *CListMempool, traceClient trace.Tracer) *Reactor { memR := &Reactor{ - config: config, - mempool: mempool, - ids: newMempoolIDs(), + config: config, + mempool: mempool, + ids: newMempoolIDs(), + traceClient: traceClient, } memR.BaseReactor = *p2p.NewBaseReactor("Mempool", memR) memR.activePersistentPeersSemaphore = semaphore.NewWeighted(int64(memR.config.ExperimentalMaxGossipConnectionsToPersistentPeers)) diff --git a/mempool/reactor_test.go b/mempool/reactor_test.go index f79ed97d6a9..599ed36c8b3 100644 --- a/mempool/reactor_test.go +++ b/mempool/reactor_test.go @@ -16,6 +16,7 @@ import ( abci "github.com/cometbft/cometbft/abci/types" cfg "github.com/cometbft/cometbft/config" "github.com/cometbft/cometbft/libs/log" + "github.com/cometbft/cometbft/libs/trace" "github.com/cometbft/cometbft/p2p" "github.com/cometbft/cometbft/p2p/mock" memproto "github.com/cometbft/cometbft/proto/tendermint/mempool" @@ -63,7 +64,7 @@ func TestReactorBroadcastTxsMessage(t *testing.T) { waitForTxsOnReactors(t, txs, reactors) } -// regression test for https://github.com/cometbft/cometbft/issues/5408 +// regression test for https://github.com/tendermint/tendermint/issues/5408 func TestReactorConcurrency(t *testing.T) { config := cfg.TestConfig() const N = 2 @@ -84,6 +85,8 @@ func TestReactorConcurrency(t *testing.T) { const numTxs = 5 + reactors[0].mempool.config.Size = 10000 + reactors[1].mempool.config.Size = 10000 for i := 0; i < 1000; i++ { wg.Add(2) @@ -117,7 +120,7 @@ func TestReactorConcurrency(t *testing.T) { }() // 1. flush the mempool - reactors[1].mempool.Flush() + // reactors[1].mempool.Flush() } wg.Wait() @@ -327,7 +330,7 @@ func makeAndConnectReactors(config *cfg.Config, n int) ([]*Reactor, []*p2p.Switc mempool, cleanup := newMempoolWithApp(cc) defer cleanup() - reactors[i] = NewReactor(config.Mempool, mempool) // so we dont start the consensus states + reactors[i] = NewReactor(config.Mempool, mempool, trace.NoOpTracer()) // so we dont start the consensus states reactors[i].SetLogger(logger.With("validator", i)) } diff --git a/node/node.go b/node/node.go index 4e6ac83de66..8a77a16d390 100644 --- a/node/node.go +++ b/node/node.go @@ -9,9 +9,11 @@ import ( "os" "time" + "github.com/grafana/pyroscope-go" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus/promhttp" "github.com/rs/cors" + sdktrace "go.opentelemetry.io/otel/sdk/trace" bc "github.com/cometbft/cometbft/blocksync" cfg "github.com/cometbft/cometbft/config" @@ -22,6 +24,7 @@ import ( "github.com/cometbft/cometbft/libs/log" cmtpubsub "github.com/cometbft/cometbft/libs/pubsub" "github.com/cometbft/cometbft/libs/service" + "github.com/cometbft/cometbft/libs/trace" mempl "github.com/cometbft/cometbft/mempool" "github.com/cometbft/cometbft/p2p" "github.com/cometbft/cometbft/p2p/pex" @@ -82,6 +85,11 @@ type Node struct { indexerService *txindex.IndexerService prometheusSrv *http.Server pprofSrv *http.Server + + // Celestia specific fields + tracer trace.Tracer + pyroscopeProfiler *pyroscope.Profiler + pyroscopeTracer *sdktrace.TracerProvider } // Option sets a parameter for the node. @@ -356,8 +364,10 @@ func NewNodeWithContext(ctx context.Context, // Create the handshaker, which calls RequestInfo, sets the AppVersion on the state, // and replays any blocks as necessary to sync CometBFT with the app. consensusLogger := logger.With("module", "consensus") + var softwareVersion string if !stateSync { - if err := doHandshake(ctx, stateStore, state, blockStore, genDoc, eventBus, proxyApp, consensusLogger); err != nil { + softwareVersion, err = doHandshake(ctx, stateStore, state, blockStore, genDoc, eventBus, proxyApp, consensusLogger) + if err != nil { return nil, err } @@ -376,7 +386,18 @@ func NewNodeWithContext(ctx context.Context, logNodeStartupInfo(state, pubKey, logger, consensusLogger) - mempool, mempoolReactor := createMempoolAndMempoolReactor(config, proxyApp, state, memplMetrics, logger) + // create an optional tracer client to collect trace data. + tracer, err := trace.NewTracer( + config, + logger, + genDoc.ChainID, + string(nodeKey.ID()), + ) + if err != nil { + return nil, err + } + + mempool, mempoolReactor := createMempoolAndMempoolReactor(config, proxyApp, state, memplMetrics, logger, tracer) evidenceReactor, evidencePool, err := createEvidenceReactor(config, dbProvider, stateStore, blockStore, logger) if err != nil { @@ -409,7 +430,7 @@ func NewNodeWithContext(ctx context.Context, consensusReactor, consensusState := createConsensusReactor( config, state, blockExec, blockStore, mempool, evidencePool, - privValidator, csMetrics, stateSync || blockSync, eventBus, consensusLogger, offlineStateSyncHeight, + privValidator, csMetrics, stateSync || blockSync, eventBus, consensusLogger, offlineStateSyncHeight, tracer, ) err = stateStore.SetOfflineStateSyncHeight(0) @@ -428,17 +449,17 @@ func NewNodeWithContext(ctx context.Context, ) stateSyncReactor.SetLogger(logger.With("module", "statesync")) - nodeInfo, err := makeNodeInfo(config, nodeKey, txIndexer, genDoc, state) + nodeInfo, err := makeNodeInfo(config, nodeKey, txIndexer, genDoc, state, softwareVersion) if err != nil { return nil, err } - transport, peerFilters := createTransport(config, nodeInfo, nodeKey, proxyApp) + transport, peerFilters := createTransport(config, nodeInfo, nodeKey, proxyApp, tracer) p2pLogger := logger.With("module", "p2p") sw := createSwitch( config, transport, p2pMetrics, peerFilters, mempoolReactor, bcReactor, - stateSyncReactor, consensusReactor, evidenceReactor, nodeInfo, nodeKey, p2pLogger, + stateSyncReactor, consensusReactor, evidenceReactor, nodeInfo, nodeKey, p2pLogger, tracer, ) err = sw.AddPersistentPeers(splitAndTrimEmpty(config.P2P.PersistentPeers, ",", " ")) @@ -543,6 +564,18 @@ func (n *Node) OnStart() error { n.rpcListeners = listeners } + if n.config.Instrumentation.PyroscopeURL != "" { + profiler, tracer, err := setupPyroscope( + n.config.Instrumentation, + string(n.nodeKey.ID()), + ) + if err != nil { + return err + } + n.pyroscopeProfiler = profiler + n.pyroscopeTracer = tracer + } + // Start the transport. addr, err := p2p.NewNetAddressString(p2p.IDAddressString(n.nodeKey.ID(), n.config.P2P.ListenAddress)) if err != nil { @@ -633,6 +666,21 @@ func (n *Node) OnStop() { n.Logger.Error("Pprof HTTP server Shutdown", "err", err) } } + if n.tracer != nil { + n.tracer.Stop() + } + + if n.pyroscopeProfiler != nil { + if err := n.pyroscopeProfiler.Stop(); err != nil { + n.Logger.Error("Pyroscope profiler Stop", "err", err) + } + } + + if n.pyroscopeTracer != nil { + if err := n.pyroscopeTracer.Shutdown(context.Background()); err != nil { + n.Logger.Error("Pyroscope tracer Shutdown", "err", err) + } + } if n.blockStore != nil { n.Logger.Info("Closing blockstore") if err := n.blockStore.Close(); err != nil { @@ -931,6 +979,7 @@ func makeNodeInfo( txIndexer txindex.TxIndexer, genDoc *types.GenesisDoc, state sm.State, + softwareVersion string, ) (p2p.DefaultNodeInfo, error) { txIndexerStatus := "on" if _, ok := txIndexer.(*null.TxIndex); ok { diff --git a/node/setup.go b/node/setup.go index 5215b18c7e7..e2d0ee52d03 100644 --- a/node/setup.go +++ b/node/setup.go @@ -23,6 +23,7 @@ import ( cmtjson "github.com/cometbft/cometbft/libs/json" "github.com/cometbft/cometbft/libs/log" + "github.com/cometbft/cometbft/libs/trace" "github.com/cometbft/cometbft/light" mempl "github.com/cometbft/cometbft/mempool" "github.com/cometbft/cometbft/p2p" @@ -179,14 +180,15 @@ func doHandshake( eventBus types.BlockEventPublisher, proxyApp proxy.AppConns, consensusLogger log.Logger, -) error { +) (string, error) { handshaker := cs.NewHandshaker(stateStore, state, blockStore, genDoc) handshaker.SetLogger(consensusLogger) handshaker.SetEventBus(eventBus) - if err := handshaker.HandshakeWithContext(ctx, proxyApp); err != nil { - return fmt.Errorf("error during handshake: %v", err) + softwareVersion, err := handshaker.HandshakeWithContext(ctx, proxyApp) + if err != nil { + return "", fmt.Errorf("error during handshake: %v", err) } - return nil + return softwareVersion, nil } func logNodeStartupInfo(state sm.State, pubKey crypto.PubKey, logger, consensusLogger log.Logger) { @@ -231,6 +233,7 @@ func createMempoolAndMempoolReactor( state sm.State, memplMetrics *mempl.Metrics, logger log.Logger, + traceClient trace.Tracer, ) (mempl.Mempool, p2p.Reactor) { switch config.Mempool.Type { // allow empty string for backward compatibility @@ -243,11 +246,13 @@ func createMempoolAndMempoolReactor( mempl.WithMetrics(memplMetrics), mempl.WithPreCheck(sm.TxPreCheck(state)), mempl.WithPostCheck(sm.TxPostCheck(state)), + mempl.WithTraceClient(traceClient), ) mp.SetLogger(logger) reactor := mempl.NewReactor( config.Mempool, mp, + traceClient, ) if config.Consensus.WaitForTxs() { mp.EnableTxsAvailable() @@ -259,6 +264,8 @@ func createMempoolAndMempoolReactor( // Strictly speaking, there's no need to have a `mempl.NopMempoolReactor`, but // adding it leads to a cleaner code. return &mempl.NopMempool{}, mempl.NewNopMempoolReactor() + case cfg.MempoolTypeCAT: + panic("not implemented") default: panic(fmt.Sprintf("unknown mempool type: %q", config.Mempool.Type)) } @@ -316,6 +323,7 @@ func createConsensusReactor(config *cfg.Config, eventBus *types.EventBus, consensusLogger log.Logger, offlineStateSyncHeight int64, + traceClient trace.Tracer, ) (*cs.Reactor, *cs.State) { consensusState := cs.NewState( config.Consensus, @@ -326,12 +334,13 @@ func createConsensusReactor(config *cfg.Config, evidencePool, cs.StateMetrics(csMetrics), cs.OfflineStateSyncHeight(offlineStateSyncHeight), + cs.SetTraceClient(traceClient), ) consensusState.SetLogger(consensusLogger) if privValidator != nil { consensusState.SetPrivValidator(privValidator) } - consensusReactor := cs.NewReactor(consensusState, waitSync, cs.ReactorMetrics(csMetrics)) + consensusReactor := cs.NewReactor(consensusState, waitSync, cs.ReactorMetrics(csMetrics), cs.ReactorTracing(traceClient)) consensusReactor.SetLogger(consensusLogger) // services which will be publishing and/or subscribing for messages (events) // consensusReactor will set it on consensusState and blockExecutor @@ -344,13 +353,14 @@ func createTransport( nodeInfo p2p.NodeInfo, nodeKey *p2p.NodeKey, proxyApp proxy.AppConns, + traceClient trace.Tracer, ) ( *p2p.MultiplexTransport, []p2p.PeerFilterFunc, ) { var ( mConnConfig = p2p.MConnConfig(config.P2P) - transport = p2p.NewMultiplexTransport(nodeInfo, *nodeKey, mConnConfig) + transport = p2p.NewMultiplexTransport(nodeInfo, *nodeKey, mConnConfig, traceClient) connFilters = []p2p.ConnFilterFunc{} peerFilters = []p2p.PeerFilterFunc{} ) @@ -420,12 +430,14 @@ func createSwitch(config *cfg.Config, nodeInfo p2p.NodeInfo, nodeKey *p2p.NodeKey, p2pLogger log.Logger, + traceClient trace.Tracer, ) *p2p.Switch { sw := p2p.NewSwitch( config.P2P, transport, p2p.WithMetrics(p2pMetrics), p2p.SwitchPeerFilters(peerFilters...), + p2p.WithTracer(traceClient), ) sw.SetLogger(p2pLogger) if config.Mempool.Type != cfg.MempoolTypeNop { diff --git a/node/tracing.go b/node/tracing.go new file mode 100644 index 00000000000..671d3912058 --- /dev/null +++ b/node/tracing.go @@ -0,0 +1,84 @@ +package node + +import ( + otelpyroscope "github.com/grafana/otel-profiling-go" + "github.com/grafana/pyroscope-go" + "go.opentelemetry.io/otel" + "go.opentelemetry.io/otel/exporters/stdout/stdouttrace" + "go.opentelemetry.io/otel/propagation" + sdktrace "go.opentelemetry.io/otel/sdk/trace" + + "github.com/cometbft/cometbft/config" +) + +// setupPyroscope sets up pyroscope profiler and optionally tracing. +func setupPyroscope(instCfg *config.InstrumentationConfig, nodeID string) (*pyroscope.Profiler, *sdktrace.TracerProvider, error) { + tp, err := tracerProviderDebug() + if err != nil { + return nil, nil, err + } + + labels := map[string]string{"node_id": nodeID} + + if instCfg.PyroscopeTrace { + if _, err = setupTracing(instCfg.PyroscopeURL, labels); err != nil { + return nil, nil, err + } + } else { + tp = nil + } + + pflr, err := pyroscope.Start(pyroscope.Config{ + ApplicationName: "celestia", + ServerAddress: instCfg.PyroscopeURL, + Logger: nil, // use the noop logger by passing nil + Tags: labels, + ProfileTypes: toPyroscopeProfiles(instCfg.PyroscopeProfileTypes), + }) + + return pflr, tp, err +} + +func setupTracing(addr string, labels map[string]string) (tp *sdktrace.TracerProvider, err error) { + tp, err = tracerProviderDebug() + if err != nil { + return nil, err + } + + // Set the Tracer Provider and the W3C Trace Context propagator as globals. + // We wrap the tracer provider to also annotate goroutines with Span ID so + // that pprof would add corresponding labels to profiling samples. + otel.SetTracerProvider(otelpyroscope.NewTracerProvider(tp, + otelpyroscope.WithAppName("celestia"), + otelpyroscope.WithRootSpanOnly(true), + otelpyroscope.WithAddSpanName(true), + otelpyroscope.WithPyroscopeURL(addr), + otelpyroscope.WithProfileBaselineLabels(labels), + otelpyroscope.WithProfileBaselineURL(true), + otelpyroscope.WithProfileURL(true), + )) + + // Register the trace context and baggage propagators so data is propagated across services/processes. + otel.SetTextMapPropagator(propagation.NewCompositeTextMapPropagator( + propagation.TraceContext{}, + propagation.Baggage{}, + )) + + return tp, err +} + +func tracerProviderDebug() (*sdktrace.TracerProvider, error) { + exp, err := stdouttrace.New(stdouttrace.WithPrettyPrint()) + if err != nil { + return nil, err + } + return sdktrace.NewTracerProvider(sdktrace.WithSpanProcessor(sdktrace.NewBatchSpanProcessor(exp))), nil +} + +func toPyroscopeProfiles(profiles []string) []pyroscope.ProfileType { + pts := make([]pyroscope.ProfileType, 0, len(profiles)) + for _, p := range profiles { + pts = append(pts, pyroscope.ProfileType(p)) + } + return pts +} diff --git a/p2p/base_reactor.go b/p2p/base_reactor.go index bfac2340847..40f20e3e81a 100644 --- a/p2p/base_reactor.go +++ b/p2p/base_reactor.go @@ -1,10 +1,15 @@ package p2p import ( + "context" + "github.com/cometbft/cometbft/libs/service" "github.com/cometbft/cometbft/p2p/conn" ) +// ProcessorFunc is the message processor function type. +type ProcessorFunc func(context.Context, <-chan UnprocessedEnvelope) error + // Reactor is responsible for handling incoming messages on one or more // Channel. Switch calls GetChannels when reactor is added to it. When a new // peer joins our node, InitPeer and AddPeer are called. RemovePeer is called @@ -41,6 +46,15 @@ type Reactor interface { // Receive is called by the switch when an envelope is received from any connected // peer on any of the channels registered by the reactor Receive(Envelope) + + // QueueUnprocessedEnvelope is called by the switch when an unprocessed + // envelope is received. Unprocessed envelopes are immediately buffered in a + // queue to avoid blocking. Incoming messages are then passed to a + // processing function. The default processing function unmarshals the + // messages in the order the sender sent them and then calls Receive on the + // reactor. The queue size and the processing function can be changed via + // passing options to the base reactor. + // QueueUnprocessedEnvelope(e UnprocessedEnvelope) } //-------------------------------------- @@ -48,12 +62,63 @@ type Reactor interface { type BaseReactor struct { service.BaseService // Provides Start, Stop, .Quit Switch *Switch + + // incoming chan UnprocessedEnvelope + + // ctx context.Context + // cancel context.CancelFunc + // // processor is called with the incoming channel and is responsible for + // // unmarshalling the messages and calling Receive on the reactor. + // processor ProcessorFunc } -func NewBaseReactor(name string, impl Reactor) *BaseReactor { - return &BaseReactor{ +type ReactorOptions func(*BaseReactor) + +func NewBaseReactor(name string, impl Reactor, opts ...ReactorOptions) *BaseReactor { + // ctx := context.Background() + // ctx, cancel := context.WithCancel(ctx) + + br := &BaseReactor{ BaseService: *service.NewBaseService(nil, name, impl), Switch: nil, + + // ctx: ctx, + // cancel: cancel, + // incoming: make(chan UnprocessedEnvelope, 100), + // processor: DefaultProcessor(impl), + } + + // for _, opt := range opts { + // opt(br) + // } + + // go func() { + // err := br.processor(ctx, br.incoming) + // if err != nil { + // err = br.Stop() + // if err != nil { + // panic(err) + // } + // } + // }() + + return br +} + +// // WithProcessor sets the processor function for the reactor. The processor +// // function is called with the incoming channel and is responsible for +// // unmarshalling the messages and calling Receive on the reactor. +// func WithProcessor(processor ProcessorFunc) ReactorOptions { +// return func(br *BaseReactor) { +// br.processor = processor +// } +// } + +// WithIncomingQueueSize sets the size of the incoming message queue for a +// reactor. +func WithIncomingQueueSize(size int) ReactorOptions { + return func(br *BaseReactor) { + // br.incoming = make(chan UnprocessedEnvelope, size) } } @@ -65,3 +130,79 @@ func (*BaseReactor) AddPeer(Peer) {} func (*BaseReactor) RemovePeer(Peer, interface{}) {} func (*BaseReactor) Receive(Envelope) {} func (*BaseReactor) InitPeer(peer Peer) Peer { return peer } + +// // QueueUnprocessedEnvelope is called by the switch when an unprocessed +// // envelope is received. Unprocessed envelopes are immediately buffered in a +// // queue to avoid blocking. The size of the queue can be changed by passing +// // options to the base reactor. +// func (br *BaseReactor) QueueUnprocessedEnvelope(e UnprocessedEnvelope) { +// select { +// // if the context is done, do nothing. +// case <-br.ctx.Done(): +// // if not, add the item to the channel. +// case br.incoming <- e: +// } +// } + +// func (br *BaseReactor) OnStop() { +// br.cancel() +// close(br.incoming) +// } + +// // DefaultProcessor unmarshalls the message and calls Receive on the reactor. +// // This preserves the sender's original order for all messages. +// func DefaultProcessor(impl Reactor) func(context.Context, <-chan UnprocessedEnvelope) error { +// implChannels := impl.GetChannels() + +// chIDs := make(map[byte]proto.Message, len(implChannels)) +// for _, chDesc := range implChannels { +// chIDs[chDesc.ID] = chDesc.MessageType +// } +// return func(ctx context.Context, incoming <-chan UnprocessedEnvelope) error { +// for { +// select { +// case <-ctx.Done(): +// return nil +// case ue, ok := <-incoming: +// if !ok { +// // this means the channel was closed. +// return nil +// } +// mt := chIDs[ue.ChannelID] + +// if mt == nil { +// return fmt.Errorf("no message type registered for channel %d", ue.ChannelID) +// } + +// msg := proto.Clone(mt) + +// err := proto.Unmarshal(ue.Message, msg) +// if err != nil { +// return fmt.Errorf("unmarshaling message: %v into type: %s resulted in error %w", msg, reflect.TypeOf(mt), err) +// } + +// if w, ok := msg.(Unwrapper); ok { +// msg, err = w.Unwrap() +// if err != nil { +// return fmt.Errorf("unwrapping message: %v", err) +// } +// } + +// labels := []string{ +// "peer_id", string(ue.Src.ID()), +// "chID", fmt.Sprintf("%#x", ue.ChannelID), +// } + +// ue.Src.Metrics().PeerReceiveBytesTotal.With(labels...).Add(float64(len(ue.Message))) +// ue.Src.Metrics().MessageReceiveBytesTotal.With(append(labels, "message_type", ue.Src.ValueToMetricLabel(msg))...).Add(float64(len(ue.Message))) +// schema.WriteReceivedBytes(ue.Src.TraceClient(), string(ue.Src.ID()), ue.ChannelID, len(ue.Message)) + +// impl.Receive(Envelope{ +// ChannelID: ue.ChannelID, +// Src: ue.Src, +// Message: msg, +// }) +// } +// } +// } +// } diff --git a/p2p/base_reactor_test.go b/p2p/base_reactor_test.go new file mode 100644 index 00000000000..aba6d461dd9 --- /dev/null +++ b/p2p/base_reactor_test.go @@ -0,0 +1,123 @@ +package p2p_test + +// import ( +// "net" +// "sync" +// "testing" +// "time" + +// "github.com/cosmos/gogoproto/proto" +// "github.com/stretchr/testify/require" + +// "github.com/cometbft/cometbft/libs/service" +// "github.com/cometbft/cometbft/libs/trace" +// "github.com/cometbft/cometbft/p2p" +// "github.com/cometbft/cometbft/p2p/conn" +// "github.com/cometbft/cometbft/proto/tendermint/mempool" +// ) + +// // TestBaseReactorProcessor tests the BaseReactor's message processing by +// // queueing encoded messages and adding artificial delay to the first message. +// // Depending on the processors used, the ordering of the sender could be lost. +// func TestBaseReactorProcessor(t *testing.T) { +// // a reactor using the default processor should be able to queue +// // messages, and they get processed in order. +// or := NewOrderedReactor() + +// msgs := []string{"msg1", "msg2", "msg3"} +// or.fillQueue(t, msgs...) + +// time.Sleep(300 * time.Millisecond) // wait plenty of time for the processing to finish + +// or.Lock() +// require.Equal(t, len(msgs), len(or.received)) +// require.Equal(t, msgs, or.received) +// or.Unlock() +// } + +// var _ p2p.Reactor = &orderedReactor{} + +// // orderedReactor is used for testing. It saves each envelope in the order it +// // receives it. +// type orderedReactor struct { +// p2p.BaseReactor + +// sync.Mutex +// received []string +// receivedFirst bool +// } + +// func NewOrderedReactor() *orderedReactor { +// r := &orderedReactor{Mutex: sync.Mutex{}} +// r.BaseReactor = *p2p.NewBaseReactor("Ordered Reactor", r, p2p.WithIncomingQueueSize(10)) +// return r +// } + +// func (r *orderedReactor) GetChannels() []*conn.ChannelDescriptor { +// return []*conn.ChannelDescriptor{ +// { +// ID: 0x99, +// Priority: 1, +// RecvMessageCapacity: 10, +// MessageType: &mempool.Txs{}, +// }, +// } +// } + +// // ReceiveEnvelope adds a delay to the first processed envelope to test ordering. +// func (r *orderedReactor) Receive(e p2p.Envelope) { +// r.Lock() +// f := r.receivedFirst +// if !f { +// r.receivedFirst = true +// r.Unlock() +// time.Sleep(100 * time.Millisecond) +// } else { +// r.Unlock() +// } +// r.Lock() +// defer r.Unlock() + +// envMsg := e.Message.(*mempool.Txs) +// r.received = append(r.received, string(envMsg.Txs[0])) +// } + +// func (r *orderedReactor) fillQueue(t *testing.T, msgs ...string) { +// peer := &imaginaryPeer{} +// for _, msg := range msgs { +// s, err := proto.Marshal(&mempool.Txs{Txs: [][]byte{[]byte(msg)}}) +// require.NoError(t, err) +// r.QueueUnprocessedEnvelope(p2p.UnprocessedEnvelope{ +// Src: peer, +// Message: s, +// ChannelID: 0x99, +// }) +// } +// } + +// var _ p2p.IntrospectivePeer = &imaginaryPeer{} + +// type imaginaryPeer struct { +// service.BaseService +// } + +// func (ip *imaginaryPeer) TraceClient() trace.Tracer { return trace.NoOpTracer() } +// func (ip *imaginaryPeer) HasIPChanged() bool { return false } +// func (ip *imaginaryPeer) FlushStop() {} +// func (ip *imaginaryPeer) ID() p2p.ID { return "" } +// func (ip *imaginaryPeer) RemoteIP() net.IP { return []byte{} } +// func (ip *imaginaryPeer) RemoteAddr() net.Addr { return nil } +// func (ip *imaginaryPeer) IsOutbound() bool { return true } +// func (ip *imaginaryPeer) CloseConn() error { return nil } +// func (ip *imaginaryPeer) IsPersistent() bool { return false } +// func (ip *imaginaryPeer) NodeInfo() p2p.NodeInfo { return nil } +// func (ip *imaginaryPeer) Status() conn.ConnectionStatus { return conn.ConnectionStatus{} } +// func (ip *imaginaryPeer) SocketAddr() *p2p.NetAddress { return nil } +// func (ip *imaginaryPeer) Send(p2p.Envelope) bool { return true } +// func (ip *imaginaryPeer) TrySend(p2p.Envelope) bool { return true } +// func (ip *imaginaryPeer) Set(key string, value any) {} +// func (ip *imaginaryPeer) Get(key string) any { return nil } +// func (ip *imaginaryPeer) SetRemovalFailed() {} +// func (ip *imaginaryPeer) GetRemovalFailed() bool { return false } +// func (ip *imaginaryPeer) Metrics() *p2p.Metrics { return p2p.NopMetrics() } +// func (ip *imaginaryPeer) ValueToMetricLabel(i any) string { return "" } diff --git a/p2p/conn/connection.go b/p2p/conn/connection.go index 34eb66e9615..6ab6cb6aa83 100644 --- a/p2p/conn/connection.go +++ b/p2p/conn/connection.go @@ -898,7 +898,8 @@ func (ch *Channel) recvPacketMsg(packet tmp2p.PacketMsg) ([]byte, error) { } ch.recving = append(ch.recving, packet.Data...) if packet.EOF { - msgBytes := ch.recving + msgBytes := make([]byte, len(ch.recving)) + copy(msgBytes, ch.recving) // clear the slice without re-allocating. // http://stackoverflow.com/questions/16971741/how-do-you-clear-a-slice-in-go diff --git a/p2p/metrics.gen.go b/p2p/metrics.gen.go index e452f16535e..e55132fdb9d 100644 --- a/p2p/metrics.gen.go +++ b/p2p/metrics.gen.go @@ -49,13 +49,13 @@ func PrometheusMetrics(namespace string, labelsAndValues ...string) *Metrics { Subsystem: MetricsSubsystem, Name: "message_receive_bytes_total", Help: "Number of bytes of each message type received.", - }, append(labels, "message_type")).With(labelsAndValues...), + }, append(labels, "message_type", "chID", "peer_id")).With(labelsAndValues...), MessageSendBytesTotal: prometheus.NewCounterFrom(stdprometheus.CounterOpts{ Namespace: namespace, Subsystem: MetricsSubsystem, Name: "message_send_bytes_total", Help: "Number of bytes of each message type sent.", - }, append(labels, "message_type")).With(labelsAndValues...), + }, append(labels, "message_type", "chID", "peer_id")).With(labelsAndValues...), } } diff --git a/p2p/metrics.go b/p2p/metrics.go index 808142e9afc..009f16fd52e 100644 --- a/p2p/metrics.go +++ b/p2p/metrics.go @@ -37,9 +37,9 @@ type Metrics struct { // Number of transactions submitted by each peer. NumTxs metrics.Gauge `metrics_labels:"peer_id"` // Number of bytes of each message type received. - MessageReceiveBytesTotal metrics.Counter `metrics_labels:"message_type"` + MessageReceiveBytesTotal metrics.Counter `metrics_labels:"message_type,chID,peer_id"` // Number of bytes of each message type sent. - MessageSendBytesTotal metrics.Counter `metrics_labels:"message_type"` + MessageSendBytesTotal metrics.Counter `metrics_labels:"message_type,chID,peer_id"` } type metricsLabelCache struct { diff --git a/p2p/mock/peer.go b/p2p/mock/peer.go index b4111004c81..82d1183722d 100644 --- a/p2p/mock/peer.go +++ b/p2p/mock/peer.go @@ -71,3 +71,4 @@ func (mp *Peer) RemoteAddr() net.Addr { return &net.TCPAddr{IP: mp.ip, Po func (mp *Peer) CloseConn() error { return nil } func (mp *Peer) SetRemovalFailed() {} func (mp *Peer) GetRemovalFailed() bool { return false } +func (*Peer) HasIPChanged() bool { return false } diff --git a/p2p/mock/reactor.go b/p2p/mock/reactor.go index 64d93a97358..28993a056fd 100644 --- a/p2p/mock/reactor.go +++ b/p2p/mock/reactor.go @@ -14,7 +14,7 @@ type Reactor struct { func NewReactor() *Reactor { r := &Reactor{} - r.BaseReactor = *p2p.NewBaseReactor("Mock-PEX", r) + r.BaseReactor = *p2p.NewBaseReactor("Mock-PEX", r, p2p.WithIncomingQueueSize(1)) r.SetLogger(log.TestingLogger()) return r } diff --git a/p2p/mocks/peer.go b/p2p/mocks/peer.go index 267aa4ed149..bcfcab6c386 100644 --- a/p2p/mocks/peer.go +++ b/p2p/mocks/peer.go @@ -79,6 +79,24 @@ func (_m *Peer) GetRemovalFailed() bool { return r0 } +// HasIPChanged provides a mock function with no fields +func (_m *Peer) HasIPChanged() bool { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for HasIPChanged") + } + + var r0 bool + if rf, ok := ret.Get(0).(func() bool); ok { + r0 = rf() + } else { + r0 = ret.Get(0).(bool) + } + + return r0 +} + // ID provides a mock function with no fields func (_m *Peer) ID() p2p.ID { ret := _m.Called() diff --git a/p2p/peer.go b/p2p/peer.go index dc88152df6d..d2292bfad5d 100644 --- a/p2p/peer.go +++ b/p2p/peer.go @@ -11,6 +11,8 @@ import ( "github.com/cometbft/cometbft/libs/cmap" "github.com/cometbft/cometbft/libs/log" "github.com/cometbft/cometbft/libs/service" + "github.com/cometbft/cometbft/libs/trace" + "github.com/cometbft/cometbft/libs/trace/schema" cmtconn "github.com/cometbft/cometbft/p2p/conn" ) @@ -45,6 +47,15 @@ type Peer interface { SetRemovalFailed() GetRemovalFailed() bool + + HasIPChanged() bool // has the peer's IP changed +} + +type IntrospectivePeer interface { + Peer + Metrics() *Metrics + ValueToMetricLabel(i any) string + TraceClient() trace.Tracer } //---------------------------------------------------------- @@ -127,10 +138,18 @@ type peer struct { // When removal of a peer fails, we set this flag removalAttemptFailed bool + + traceClient trace.Tracer } type PeerOption func(*peer) +func WithPeerTracer(t trace.Tracer) PeerOption { + return func(p *peer) { + p.traceClient = t + } +} + func newPeer( pc peerConn, mConfig cmtconn.MConnConfig, @@ -150,6 +169,7 @@ func newPeer( metricsTicker: time.NewTicker(metricsTickerDuration), metrics: NopMetrics(), mlc: mlc, + traceClient: trace.NoOpTracer(), } p.mconn = createMConnection( @@ -210,6 +230,18 @@ func (p *peer) FlushStop() { p.mconn.FlushStop() // stop everything and close the conn } +func (p *peer) Metrics() *Metrics { + return p.metrics +} + +func (p *peer) ValueToMetricLabel(i any) string { + return p.mlc.ValueToMetricLabel(i) +} + +func (p *peer) TraceClient() trace.Tracer { + return p.traceClient +} + // OnStop implements BaseService. func (p *peer) OnStop() { p.metricsTicker.Stop() @@ -242,6 +274,18 @@ func (p *peer) NodeInfo() NodeInfo { return p.nodeInfo } +// HasIPChanged returns true and the new IP if the peer's IP has changed. +func (p *peer) HasIPChanged() bool { + oldIP := p.ip + if oldIP == nil { + return false + } + // Reset the IP so we can get the new one + p.ip = nil + newIP := p.RemoteIP() + return !oldIP.Equal(newIP) +} + // SocketAddr returns the address of the socket. // For outbound peers, it's the address dialed (after DNS resolution). // For inbound peers, it's the address returned by the underlying connection @@ -368,6 +412,7 @@ func PeerMetrics(metrics *Metrics) PeerOption { } func (p *peer) metricsReporter() { + queues := make(map[byte]int, len(p.mconn.Status().Channels)) for { select { case <-p.metricsTicker.C: @@ -375,9 +420,11 @@ func (p *peer) metricsReporter() { var sendQueueSize float64 for _, chStatus := range status.Channels { sendQueueSize += float64(chStatus.SendQueueSize) + queues[chStatus.ID] = chStatus.SendQueueSize } p.metrics.PeerPendingSendBytes.With("peer_id", string(p.ID())).Set(sendQueueSize) + schema.WritePendingBytes(p.traceClient, string(p.ID()), queues) case <-p.Quit(): return } @@ -420,6 +467,7 @@ func createMConnection( panic(fmt.Errorf("unwrapping message: %s", err)) } } + schema.WriteReceivedBytes(p.traceClient, string(p.ID()), chID, len(msgBytes)) p.metrics.PeerReceiveBytesTotal.With(labels...).Add(float64(len(msgBytes))) p.metrics.MessageReceiveBytesTotal.With("message_type", p.mlc.ValueToMetricLabel(msg)).Add(float64(len(msgBytes))) reactor.Receive(Envelope{ @@ -427,6 +475,11 @@ func createMConnection( Src: p, Message: msg, }) + // reactor.QueueUnprocessedEnvelope(UnprocessedEnvelope{ + // ChannelID: chID, + // Src: p, + // Message: msgBytes, + // }) } onError := func(r interface{}) { diff --git a/p2p/peer_set_test.go b/p2p/peer_set_test.go index 64911ecebff..24e58eb56d8 100644 --- a/p2p/peer_set_test.go +++ b/p2p/peer_set_test.go @@ -35,6 +35,8 @@ func (mp *mockPeer) CloseConn() error { return nil } func (mp *mockPeer) SetRemovalFailed() {} func (mp *mockPeer) GetRemovalFailed() bool { return false } +func (mp *mockPeer) HasIPChanged() bool { return false } + // Returns a mock peer func newMockPeer(ip net.IP) *mockPeer { if ip == nil { diff --git a/p2p/switch.go b/p2p/switch.go index 4afe4ba45e0..b9d5255b20e 100644 --- a/p2p/switch.go +++ b/p2p/switch.go @@ -12,6 +12,8 @@ import ( "github.com/cometbft/cometbft/libs/cmap" "github.com/cometbft/cometbft/libs/rand" "github.com/cometbft/cometbft/libs/service" + "github.com/cometbft/cometbft/libs/trace" + "github.com/cometbft/cometbft/libs/trace/schema" "github.com/cometbft/cometbft/p2p/conn" ) @@ -94,8 +96,9 @@ type Switch struct { rng *rand.Rand // seed for randomizing dial times and orders - metrics *Metrics - mlc *metricsLabelCache + metrics *Metrics + mlc *metricsLabelCache + traceClient trace.Tracer } // NetAddress returns the address the switch is listening on. @@ -129,6 +132,7 @@ func NewSwitch( persistentPeersAddrs: make([]*NetAddress, 0), unconditionalPeerIDs: make(map[ID]struct{}), mlc: newMetricsLabelCache(), + traceClient: trace.NoOpTracer(), } // Ensure we have a completely undeterministic PRNG. @@ -158,6 +162,11 @@ func WithMetrics(metrics *Metrics) SwitchOption { return func(sw *Switch) { sw.metrics = metrics } } +// WithTracer sets the tracer. +func WithTracer(tracer trace.Tracer) SwitchOption { + return func(sw *Switch) { sw.traceClient = tracer } +} + //--------------------------------------------------------------------- // Switch setup @@ -341,22 +350,39 @@ func (sw *Switch) StopPeerForError(peer Peer, reason interface{}) { sw.stopAndRemovePeer(peer, reason) if peer.IsPersistent() { - var addr *NetAddress - if peer.IsOutbound() { // socket address for outbound peers - addr = peer.SocketAddr() - } else { // self-reported address for inbound peers - var err error - addr, err = peer.NodeInfo().NetAddress() - if err != nil { - sw.Logger.Error("Wanted to reconnect to inbound peer, but self-reported address is wrong", - "peer", peer, "err", err) - return - } + addr, err := sw.getPeerAddress(peer) + if err != nil { + sw.Logger.Error("Failed to get address for persistent peer", "peer", peer, "err", err) + return + } + go sw.reconnectToPeer(addr) + } + + if peer.HasIPChanged() { + addr, err := sw.getPeerAddress(peer) + if err != nil { + sw.Logger.Error("Failed to get address for peer with changed IP", "peer", peer, "err", err) } go sw.reconnectToPeer(addr) } } +// getPeerAddress returns the appropriate NetAddress for a given peer, +// handling both outbound and inbound peers. +func (sw *Switch) getPeerAddress(peer Peer) (*NetAddress, error) { + if peer.IsOutbound() { + return peer.SocketAddr(), nil + } + // For inbound peers, get the self-reported address + addr, err := peer.NodeInfo().NetAddress() + if err != nil { + sw.Logger.Error("Failed to get address for inbound peer", + "peer", peer, "err", err) + return nil, err + } + return addr, nil +} + // StopPeerGracefully disconnects from a peer gracefully. // TODO: handle graceful disconnects. func (sw *Switch) StopPeerGracefully(peer Peer) { @@ -369,7 +395,7 @@ func (sw *Switch) stopAndRemovePeer(peer Peer, reason interface{}) { if err := peer.Stop(); err != nil { sw.Logger.Error("error while stopping peer", "error", err) // TODO: should return error to be handled accordingly } - + schema.WritePeerUpdate(sw.traceClient, string(peer.ID()), schema.PeerDisconnect, fmt.Sprintf("%v", reason)) for _, reactor := range sw.reactors { reactor.RemovePeer(peer, reason) } @@ -853,6 +879,7 @@ func (sw *Switch) addPeer(p Peer) error { return err } sw.metrics.Peers.Add(float64(1)) + schema.WritePeerUpdate(sw.traceClient, string(p.ID()), schema.PeerJoin, "") // Start all the reactor protocols on the peer. for _, reactor := range sw.reactors { diff --git a/p2p/test_util.go b/p2p/test_util.go index 3fbb68bb655..97f3375403d 100644 --- a/p2p/test_util.go +++ b/p2p/test_util.go @@ -10,6 +10,7 @@ import ( "github.com/cometbft/cometbft/libs/log" cmtnet "github.com/cometbft/cometbft/libs/net" cmtrand "github.com/cometbft/cometbft/libs/rand" + "github.com/cometbft/cometbft/libs/trace" "github.com/cometbft/cometbft/config" "github.com/cometbft/cometbft/p2p/conn" @@ -192,7 +193,7 @@ func MakeSwitch( panic(err) } - t := NewMultiplexTransport(nodeInfo, nodeKey, MConnConfig(cfg)) + t := NewMultiplexTransport(nodeInfo, nodeKey, MConnConfig(cfg), trace.NoOpTracer()) if err := t.Listen(*addr); err != nil { panic(err) diff --git a/p2p/transport.go b/p2p/transport.go index d6043da3beb..5946017f1f2 100644 --- a/p2p/transport.go +++ b/p2p/transport.go @@ -12,6 +12,7 @@ import ( "github.com/cometbft/cometbft/crypto" "github.com/cometbft/cometbft/libs/protoio" + "github.com/cometbft/cometbft/libs/trace" "github.com/cometbft/cometbft/p2p/conn" tmp2p "github.com/cometbft/cometbft/proto/tendermint/p2p" ) @@ -159,6 +160,8 @@ type MultiplexTransport struct { // peer currently. All relevant configuration should be refactored into options // with sane defaults. mConfig conn.MConnConfig + // the tracer is passed to peers for collecting trace data + tracer trace.Tracer } // Test multiplexTransport for interface completeness. @@ -170,6 +173,7 @@ func NewMultiplexTransport( nodeInfo NodeInfo, nodeKey NodeKey, mConfig conn.MConnConfig, + tracer trace.Tracer, ) *MultiplexTransport { return &MultiplexTransport{ acceptc: make(chan accept), @@ -182,6 +186,7 @@ func NewMultiplexTransport( nodeKey: nodeKey, conns: NewConnSet(), resolver: net.DefaultResolver, + tracer: tracer, } } @@ -533,6 +538,7 @@ func (mt *MultiplexTransport) wrapPeer( cfg.onPeerError, cfg.mlc, PeerMetrics(cfg.metrics), + WithPeerTracer(mt.tracer), ) return p diff --git a/p2p/transport_test.go b/p2p/transport_test.go index 3c404f6a8dd..75300568bc9 100644 --- a/p2p/transport_test.go +++ b/p2p/transport_test.go @@ -12,6 +12,7 @@ import ( "github.com/cometbft/cometbft/crypto/ed25519" "github.com/cometbft/cometbft/libs/protoio" + "github.com/cometbft/cometbft/libs/trace" "github.com/cometbft/cometbft/p2p/conn" tmp2p "github.com/cometbft/cometbft/proto/tendermint/p2p" ) @@ -30,7 +31,7 @@ func newMultiplexTransport( nodeKey NodeKey, ) *MultiplexTransport { return NewMultiplexTransport( - nodeInfo, nodeKey, conn.DefaultMConnConfig(), + nodeInfo, nodeKey, conn.DefaultMConnConfig(), trace.NoOpTracer(), ) } diff --git a/p2p/types.go b/p2p/types.go index 48a6746ceba..17760239a62 100644 --- a/p2p/types.go +++ b/p2p/types.go @@ -10,6 +10,12 @@ import ( type ChannelDescriptor = conn.ChannelDescriptor type ConnectionStatus = conn.ConnectionStatus +type UnprocessedEnvelope struct { + Src IntrospectivePeer + Message []byte + ChannelID byte +} + // Envelope contains a message with sender routing info. type Envelope struct { Src Peer // sender (empty if outbound) diff --git a/proto/buf.yaml b/proto/buf.yaml index a646c2030a7..7e259131567 100644 --- a/proto/buf.yaml +++ b/proto/buf.yaml @@ -9,4 +9,3 @@ lint: use: - BASIC - FILE_LOWER_SNAKE_CASE - - UNARY_RPC diff --git a/proto/tendermint/abci/types.proto b/proto/tendermint/abci/types.proto index 89bafb6cd54..e71d58c7d41 100644 --- a/proto/tendermint/abci/types.proto +++ b/proto/tendermint/abci/types.proto @@ -110,6 +110,7 @@ message RequestListSnapshots {} message RequestOfferSnapshot { Snapshot snapshot = 1; // snapshot offered by peers bytes app_hash = 2; // light client-verified app hash for snapshot height + uint64 app_version = 3; } // loads a snapshot chunk @@ -152,6 +153,8 @@ message RequestProcessProposal { bytes next_validators_hash = 7; // address of the public key of the original proposer of the block. bytes proposer_address = 8; + uint64 square_size = 9; + bytes data_root_hash = 10; } // Extends a vote with application-injected data @@ -319,6 +322,8 @@ message ResponseApplySnapshotChunk { message ResponsePrepareProposal { repeated bytes txs = 1; + uint64 square_size = 2; + bytes data_root_hash = 3; } message ResponseProcessProposal { diff --git a/proto/tendermint/mempool/message.go b/proto/tendermint/mempool/message.go index 270a744faec..bf8ca0bf40a 100644 --- a/proto/tendermint/mempool/message.go +++ b/proto/tendermint/mempool/message.go @@ -18,13 +18,31 @@ func (m *Txs) Wrap() proto.Message { return mm } +// Wrap implements the p2p Wrapper interface and wraps a mempool seen tx message. +func (m *SeenTx) Wrap() proto.Message { + mm := &Message{} + mm.Sum = &Message_SeenTx{SeenTx: m} + return mm +} + +// Wrap implements the p2p Wrapper interface and wraps a mempool want tx message. +func (m *WantTx) Wrap() proto.Message { + mm := &Message{} + mm.Sum = &Message_WantTx{WantTx: m} + return mm +} + // Unwrap implements the p2p Wrapper interface and unwraps a wrapped mempool // message. func (m *Message) Unwrap() (proto.Message, error) { switch msg := m.Sum.(type) { case *Message_Txs: return m.GetTxs(), nil + case *Message_SeenTx: + return m.GetSeenTx(), nil + case *Message_WantTx: + return m.GetWantTx(), nil default: return nil, fmt.Errorf("unknown message: %T", msg) } diff --git a/proto/tendermint/mempool/types.pb.go b/proto/tendermint/mempool/types.pb.go index 4a6a40ef341..03ed7e7d340 100644 --- a/proto/tendermint/mempool/types.pb.go +++ b/proto/tendermint/mempool/types.pb.go @@ -66,10 +66,100 @@ func (m *Txs) GetTxs() [][]byte { return nil } +type SeenTx struct { + TxKey []byte `protobuf:"bytes,1,opt,name=tx_key,json=txKey,proto3" json:"tx_key,omitempty"` +} + +func (m *SeenTx) Reset() { *m = SeenTx{} } +func (m *SeenTx) String() string { return proto.CompactTextString(m) } +func (*SeenTx) ProtoMessage() {} +func (*SeenTx) Descriptor() ([]byte, []int) { + return fileDescriptor_2af51926fdbcbc05, []int{1} +} +func (m *SeenTx) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *SeenTx) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_SeenTx.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *SeenTx) XXX_Merge(src proto.Message) { + xxx_messageInfo_SeenTx.Merge(m, src) +} +func (m *SeenTx) XXX_Size() int { + return m.Size() +} +func (m *SeenTx) XXX_DiscardUnknown() { + xxx_messageInfo_SeenTx.DiscardUnknown(m) +} + +var xxx_messageInfo_SeenTx proto.InternalMessageInfo + +func (m *SeenTx) GetTxKey() []byte { + if m != nil { + return m.TxKey + } + return nil +} + +type WantTx struct { + TxKey []byte `protobuf:"bytes,1,opt,name=tx_key,json=txKey,proto3" json:"tx_key,omitempty"` +} + +func (m *WantTx) Reset() { *m = WantTx{} } +func (m *WantTx) String() string { return proto.CompactTextString(m) } +func (*WantTx) ProtoMessage() {} +func (*WantTx) Descriptor() ([]byte, []int) { + return fileDescriptor_2af51926fdbcbc05, []int{2} +} +func (m *WantTx) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *WantTx) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_WantTx.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *WantTx) XXX_Merge(src proto.Message) { + xxx_messageInfo_WantTx.Merge(m, src) +} +func (m *WantTx) XXX_Size() int { + return m.Size() +} +func (m *WantTx) XXX_DiscardUnknown() { + xxx_messageInfo_WantTx.DiscardUnknown(m) +} + +var xxx_messageInfo_WantTx proto.InternalMessageInfo + +func (m *WantTx) GetTxKey() []byte { + if m != nil { + return m.TxKey + } + return nil +} + type Message struct { // Types that are valid to be assigned to Sum: // // *Message_Txs + // *Message_SeenTx + // *Message_WantTx Sum isMessage_Sum `protobuf_oneof:"sum"` } @@ -77,7 +167,7 @@ func (m *Message) Reset() { *m = Message{} } func (m *Message) String() string { return proto.CompactTextString(m) } func (*Message) ProtoMessage() {} func (*Message) Descriptor() ([]byte, []int) { - return fileDescriptor_2af51926fdbcbc05, []int{1} + return fileDescriptor_2af51926fdbcbc05, []int{3} } func (m *Message) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -115,8 +205,16 @@ type isMessage_Sum interface { type Message_Txs struct { Txs *Txs `protobuf:"bytes,1,opt,name=txs,proto3,oneof" json:"txs,omitempty"` } +type Message_SeenTx struct { + SeenTx *SeenTx `protobuf:"bytes,2,opt,name=seen_tx,json=seenTx,proto3,oneof" json:"seen_tx,omitempty"` +} +type Message_WantTx struct { + WantTx *WantTx `protobuf:"bytes,3,opt,name=want_tx,json=wantTx,proto3,oneof" json:"want_tx,omitempty"` +} -func (*Message_Txs) isMessage_Sum() {} +func (*Message_Txs) isMessage_Sum() {} +func (*Message_SeenTx) isMessage_Sum() {} +func (*Message_WantTx) isMessage_Sum() {} func (m *Message) GetSum() isMessage_Sum { if m != nil { @@ -132,34 +230,58 @@ func (m *Message) GetTxs() *Txs { return nil } +func (m *Message) GetSeenTx() *SeenTx { + if x, ok := m.GetSum().(*Message_SeenTx); ok { + return x.SeenTx + } + return nil +} + +func (m *Message) GetWantTx() *WantTx { + if x, ok := m.GetSum().(*Message_WantTx); ok { + return x.WantTx + } + return nil +} + // XXX_OneofWrappers is for the internal use of the proto package. func (*Message) XXX_OneofWrappers() []interface{} { return []interface{}{ (*Message_Txs)(nil), + (*Message_SeenTx)(nil), + (*Message_WantTx)(nil), } } func init() { proto.RegisterType((*Txs)(nil), "tendermint.mempool.Txs") + proto.RegisterType((*SeenTx)(nil), "tendermint.mempool.SeenTx") + proto.RegisterType((*WantTx)(nil), "tendermint.mempool.WantTx") proto.RegisterType((*Message)(nil), "tendermint.mempool.Message") } func init() { proto.RegisterFile("tendermint/mempool/types.proto", fileDescriptor_2af51926fdbcbc05) } var fileDescriptor_2af51926fdbcbc05 = []byte{ - // 184 bytes of a gzipped FileDescriptorProto + // 274 bytes of a gzipped FileDescriptorProto 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x92, 0x2b, 0x49, 0xcd, 0x4b, 0x49, 0x2d, 0xca, 0xcd, 0xcc, 0x2b, 0xd1, 0xcf, 0x4d, 0xcd, 0x2d, 0xc8, 0xcf, 0xcf, 0xd1, 0x2f, 0xa9, 0x2c, 0x48, 0x2d, 0xd6, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, 0x12, 0x42, 0xc8, 0xeb, 0x41, 0xe5, 0x95, 0xc4, 0xb9, 0x98, 0x43, 0x2a, 0x8a, 0x85, 0x04, 0xb8, 0x98, 0x4b, 0x2a, 0x8a, 0x25, - 0x18, 0x15, 0x98, 0x35, 0x78, 0x82, 0x40, 0x4c, 0x25, 0x5b, 0x2e, 0x76, 0xdf, 0xd4, 0xe2, 0xe2, - 0xc4, 0xf4, 0x54, 0x21, 0x6d, 0x98, 0x24, 0xa3, 0x06, 0xb7, 0x91, 0xb8, 0x1e, 0xa6, 0x29, 0x7a, - 0x21, 0x15, 0xc5, 0x1e, 0x0c, 0x60, 0x7d, 0x4e, 0xac, 0x5c, 0xcc, 0xc5, 0xa5, 0xb9, 0x4e, 0xfe, - 0x27, 0x1e, 0xc9, 0x31, 0x5e, 0x78, 0x24, 0xc7, 0xf8, 0xe0, 0x91, 0x1c, 0xe3, 0x84, 0xc7, 0x72, - 0x0c, 0x17, 0x1e, 0xcb, 0x31, 0xdc, 0x78, 0x2c, 0xc7, 0x10, 0x65, 0x9a, 0x9e, 0x59, 0x92, 0x51, - 0x9a, 0xa4, 0x97, 0x9c, 0x9f, 0xab, 0x9f, 0x9c, 0x9f, 0x9b, 0x5a, 0x92, 0x94, 0x56, 0x82, 0x60, - 0x80, 0x5d, 0xaa, 0x8f, 0xe9, 0x91, 0x24, 0x36, 0xb0, 0x8c, 0x31, 0x20, 0x00, 0x00, 0xff, 0xff, - 0x53, 0xc3, 0xc4, 0x0a, 0xe5, 0x00, 0x00, 0x00, + 0x18, 0x15, 0x98, 0x35, 0x78, 0x82, 0x40, 0x4c, 0x25, 0x79, 0x2e, 0xb6, 0xe0, 0xd4, 0xd4, 0xbc, + 0x90, 0x0a, 0x21, 0x51, 0x2e, 0xb6, 0x92, 0x8a, 0xf8, 0xec, 0xd4, 0x4a, 0x09, 0x46, 0x05, 0x46, + 0x0d, 0x9e, 0x20, 0xd6, 0x92, 0x0a, 0xef, 0xd4, 0x4a, 0x90, 0x82, 0xf0, 0xc4, 0xbc, 0x12, 0xdc, + 0x0a, 0x56, 0x33, 0x72, 0xb1, 0xfb, 0xa6, 0x16, 0x17, 0x27, 0xa6, 0xa7, 0x0a, 0x69, 0xc3, 0xcc, + 0x67, 0xd4, 0xe0, 0x36, 0x12, 0xd7, 0xc3, 0x74, 0x88, 0x5e, 0x48, 0x45, 0xb1, 0x07, 0x03, 0xd8, + 0x6a, 0x21, 0x53, 0x2e, 0xf6, 0xe2, 0xd4, 0xd4, 0xbc, 0xf8, 0x92, 0x0a, 0x09, 0x26, 0xb0, 0x06, + 0x29, 0x6c, 0x1a, 0x20, 0xae, 0xf3, 0x60, 0x08, 0x62, 0x2b, 0x86, 0xb8, 0xd3, 0x94, 0x8b, 0xbd, + 0x3c, 0x31, 0xaf, 0x04, 0xa4, 0x8d, 0x19, 0xb7, 0x36, 0x88, 0x9b, 0x41, 0xda, 0xca, 0xc1, 0x2c, + 0x27, 0x56, 0x2e, 0xe6, 0xe2, 0xd2, 0x5c, 0x27, 0xff, 0x13, 0x8f, 0xe4, 0x18, 0x2f, 0x3c, 0x92, + 0x63, 0x7c, 0xf0, 0x48, 0x8e, 0x71, 0xc2, 0x63, 0x39, 0x86, 0x0b, 0x8f, 0xe5, 0x18, 0x6e, 0x3c, + 0x96, 0x63, 0x88, 0x32, 0x4d, 0xcf, 0x2c, 0xc9, 0x28, 0x4d, 0xd2, 0x4b, 0xce, 0xcf, 0xd5, 0x4f, + 0xce, 0xcf, 0x4d, 0x2d, 0x49, 0x4a, 0x2b, 0x41, 0x30, 0xc0, 0x41, 0xab, 0x8f, 0x19, 0xf2, 0x49, + 0x6c, 0x60, 0x19, 0x63, 0x40, 0x00, 0x00, 0x00, 0xff, 0xff, 0x66, 0x7b, 0xde, 0x37, 0x96, 0x01, + 0x00, 0x00, } func (m *Txs) Marshal() (dAtA []byte, err error) { @@ -194,6 +316,66 @@ func (m *Txs) MarshalToSizedBuffer(dAtA []byte) (int, error) { return len(dAtA) - i, nil } +func (m *SeenTx) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *SeenTx) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *SeenTx) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.TxKey) > 0 { + i -= len(m.TxKey) + copy(dAtA[i:], m.TxKey) + i = encodeVarintTypes(dAtA, i, uint64(len(m.TxKey))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *WantTx) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *WantTx) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *WantTx) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.TxKey) > 0 { + i -= len(m.TxKey) + copy(dAtA[i:], m.TxKey) + i = encodeVarintTypes(dAtA, i, uint64(len(m.TxKey))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + func (m *Message) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) @@ -247,6 +429,48 @@ func (m *Message_Txs) MarshalToSizedBuffer(dAtA []byte) (int, error) { } return len(dAtA) - i, nil } +func (m *Message_SeenTx) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Message_SeenTx) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + if m.SeenTx != nil { + { + size, err := m.SeenTx.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTypes(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + return len(dAtA) - i, nil +} +func (m *Message_WantTx) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Message_WantTx) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + if m.WantTx != nil { + { + size, err := m.WantTx.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTypes(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + return len(dAtA) - i, nil +} func encodeVarintTypes(dAtA []byte, offset int, v uint64) int { offset -= sovTypes(v) base := offset @@ -273,6 +497,32 @@ func (m *Txs) Size() (n int) { return n } +func (m *SeenTx) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.TxKey) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) + } + return n +} + +func (m *WantTx) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.TxKey) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) + } + return n +} + func (m *Message) Size() (n int) { if m == nil { return 0 @@ -297,6 +547,30 @@ func (m *Message_Txs) Size() (n int) { } return n } +func (m *Message_SeenTx) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.SeenTx != nil { + l = m.SeenTx.Size() + n += 1 + l + sovTypes(uint64(l)) + } + return n +} +func (m *Message_WantTx) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.WantTx != nil { + l = m.WantTx.Size() + n += 1 + l + sovTypes(uint64(l)) + } + return n +} func sovTypes(x uint64) (n int) { return (math_bits.Len64(x|1) + 6) / 7 @@ -386,6 +660,174 @@ func (m *Txs) Unmarshal(dAtA []byte) error { } return nil } +func (m *SeenTx) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: SeenTx: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: SeenTx: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field TxKey", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.TxKey = append(m.TxKey[:0], dAtA[iNdEx:postIndex]...) + if m.TxKey == nil { + m.TxKey = []byte{} + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipTypes(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *WantTx) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: WantTx: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: WantTx: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field TxKey", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.TxKey = append(m.TxKey[:0], dAtA[iNdEx:postIndex]...) + if m.TxKey == nil { + m.TxKey = []byte{} + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipTypes(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} func (m *Message) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 @@ -450,6 +892,76 @@ func (m *Message) Unmarshal(dAtA []byte) error { } m.Sum = &Message_Txs{v} iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SeenTx", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + v := &SeenTx{} + if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + m.Sum = &Message_SeenTx{v} + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field WantTx", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + v := &WantTx{} + if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + m.Sum = &Message_WantTx{v} + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipTypes(dAtA[iNdEx:]) diff --git a/proto/tendermint/mempool/types.proto b/proto/tendermint/mempool/types.proto index 60bafff03d1..7b1a89800b8 100644 --- a/proto/tendermint/mempool/types.proto +++ b/proto/tendermint/mempool/types.proto @@ -7,8 +7,18 @@ message Txs { repeated bytes txs = 1; } +message SeenTx { + bytes tx_key = 1; +} + +message WantTx { + bytes tx_key = 1; +} + message Message { oneof sum { - Txs txs = 1; + Txs txs = 1; + SeenTx seen_tx = 2; + WantTx want_tx = 3; } } diff --git a/proto/tendermint/rpc/grpc/types.pb.go b/proto/tendermint/rpc/grpc/types.pb.go index c27afb4e246..1ab1e08e452 100644 --- a/proto/tendermint/rpc/grpc/types.pb.go +++ b/proto/tendermint/rpc/grpc/types.pb.go @@ -7,20 +7,28 @@ import ( context "context" fmt "fmt" types "github.com/cometbft/cometbft/abci/types" + crypto "github.com/cometbft/cometbft/proto/tendermint/crypto" + p2p "github.com/cometbft/cometbft/proto/tendermint/p2p" + types1 "github.com/cometbft/cometbft/proto/tendermint/types" + _ "github.com/cosmos/gogoproto/gogoproto" grpc1 "github.com/cosmos/gogoproto/grpc" proto "github.com/cosmos/gogoproto/proto" + _ "github.com/cosmos/gogoproto/types" + github_com_cosmos_gogoproto_types "github.com/cosmos/gogoproto/types" grpc "google.golang.org/grpc" codes "google.golang.org/grpc/codes" status "google.golang.org/grpc/status" io "io" math "math" math_bits "math/bits" + time "time" ) // Reference imports to suppress errors if they are not otherwise used. var _ = proto.Marshal var _ = fmt.Errorf var _ = math.Inf +var _ = time.Kitchen // This is a compile-time assertion to ensure that this generated file // is compatible with the proto package it is being compiled against. @@ -196,374 +204,4557 @@ func (m *ResponseBroadcastTx) GetTxResult() *types.ExecTxResult { return nil } -func init() { - proto.RegisterType((*RequestPing)(nil), "tendermint.rpc.grpc.RequestPing") - proto.RegisterType((*RequestBroadcastTx)(nil), "tendermint.rpc.grpc.RequestBroadcastTx") - proto.RegisterType((*ResponsePing)(nil), "tendermint.rpc.grpc.ResponsePing") - proto.RegisterType((*ResponseBroadcastTx)(nil), "tendermint.rpc.grpc.ResponseBroadcastTx") +// BlockByHashRequest is a request to get a block by its hash. +type BlockByHashRequest struct { + Hash []byte `protobuf:"bytes,1,opt,name=hash,proto3" json:"hash,omitempty"` + Prove bool `protobuf:"varint,2,opt,name=prove,proto3" json:"prove,omitempty"` } -func init() { proto.RegisterFile("tendermint/rpc/grpc/types.proto", fileDescriptor_0ffff5682c662b95) } - -var fileDescriptor_0ffff5682c662b95 = []byte{ - // 324 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x7c, 0x92, 0x31, 0x4f, 0x02, 0x31, - 0x14, 0xc7, 0x29, 0x31, 0x8a, 0x05, 0x19, 0xca, 0x42, 0x30, 0x9e, 0x48, 0x4c, 0x64, 0x2a, 0x09, - 0x6e, 0x32, 0x89, 0x31, 0xd1, 0xb8, 0x90, 0x86, 0xc9, 0x05, 0xb9, 0xf2, 0x84, 0x8b, 0x72, 0x3d, - 0xdb, 0x47, 0x52, 0xbf, 0x84, 0xf1, 0x0b, 0xb9, 0x3b, 0x32, 0x3a, 0x1a, 0xf8, 0x22, 0xa6, 0x27, - 0x27, 0x35, 0x46, 0x96, 0xe6, 0xdf, 0xe6, 0xff, 0x7b, 0xfd, 0xbf, 0xd7, 0xd2, 0x43, 0x84, 0x78, - 0x04, 0x7a, 0x1a, 0xc5, 0xd8, 0xd2, 0x89, 0x6c, 0x8d, 0xdd, 0x82, 0xcf, 0x09, 0x18, 0x9e, 0x68, - 0x85, 0x8a, 0x55, 0xd6, 0x06, 0xae, 0x13, 0xc9, 0x9d, 0xa1, 0xb6, 0xef, 0x51, 0xc3, 0x50, 0x46, - 0x3e, 0xd1, 0xd8, 0xa3, 0x45, 0x01, 0x4f, 0x33, 0x30, 0xd8, 0x8b, 0xe2, 0x71, 0xe3, 0x98, 0xb2, - 0xd5, 0xb6, 0xab, 0xd5, 0x70, 0x24, 0x87, 0x06, 0xfb, 0x96, 0x95, 0x69, 0x1e, 0x6d, 0x95, 0xd4, - 0x49, 0xb3, 0x24, 0xf2, 0x68, 0x1b, 0x65, 0x5a, 0x12, 0x60, 0x12, 0x15, 0x1b, 0x48, 0xa9, 0x17, - 0x42, 0x2b, 0xd9, 0x81, 0xcf, 0x75, 0x68, 0x41, 0x4e, 0x40, 0x3e, 0x0c, 0x56, 0x74, 0xb1, 0x5d, - 0xe7, 0x5e, 0x42, 0x17, 0x86, 0x67, 0xdc, 0x85, 0x33, 0xf6, 0xad, 0xd8, 0x91, 0xdf, 0x82, 0x9d, - 0xd1, 0x5d, 0xb4, 0x03, 0x0d, 0x66, 0xf6, 0x88, 0xd5, 0x7c, 0x4a, 0x1f, 0xfc, 0xa1, 0x2f, 0x2d, - 0xc8, 0xbe, 0x15, 0xa9, 0x49, 0x14, 0x70, 0xa5, 0xda, 0x6f, 0x84, 0x96, 0x7e, 0x82, 0x9c, 0xf7, - 0xae, 0xd9, 0x0d, 0xdd, 0x72, 0x49, 0xd9, 0xaf, 0xfb, 0xb3, 0x09, 0x71, 0x6f, 0x02, 0xb5, 0xa3, - 0x7f, 0x1c, 0xeb, 0x76, 0xd9, 0x1d, 0x2d, 0xfa, 0x5d, 0x9e, 0x6c, 0xaa, 0xe9, 0x19, 0x6b, 0xcd, - 0x8d, 0xa5, 0x3d, 0x67, 0xf7, 0xea, 0x7d, 0x11, 0x90, 0xf9, 0x22, 0x20, 0x9f, 0x8b, 0x80, 0xbc, - 0x2e, 0x83, 0xdc, 0x7c, 0x19, 0xe4, 0x3e, 0x96, 0x41, 0xee, 0x96, 0x8f, 0x23, 0x9c, 0xcc, 0x42, - 0x2e, 0xd5, 0xb4, 0x25, 0xd5, 0x14, 0x30, 0xbc, 0xc7, 0xb5, 0xc8, 0x3e, 0x45, 0x47, 0x2a, 0x0d, - 0x4e, 0x84, 0xdb, 0xe9, 0x33, 0x9f, 0x7e, 0x05, 0x00, 0x00, 0xff, 0xff, 0x0c, 0xca, 0xdb, 0xe7, - 0x3b, 0x02, 0x00, 0x00, +func (m *BlockByHashRequest) Reset() { *m = BlockByHashRequest{} } +func (m *BlockByHashRequest) String() string { return proto.CompactTextString(m) } +func (*BlockByHashRequest) ProtoMessage() {} +func (*BlockByHashRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_0ffff5682c662b95, []int{4} } - -// Reference imports to suppress errors if they are not otherwise used. -var _ context.Context -var _ grpc.ClientConn - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the grpc package it is being compiled against. -const _ = grpc.SupportPackageIsVersion4 - -// BroadcastAPIClient is the client API for BroadcastAPI service. -// -// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. -type BroadcastAPIClient interface { - Ping(ctx context.Context, in *RequestPing, opts ...grpc.CallOption) (*ResponsePing, error) - BroadcastTx(ctx context.Context, in *RequestBroadcastTx, opts ...grpc.CallOption) (*ResponseBroadcastTx, error) +func (m *BlockByHashRequest) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) } - -type broadcastAPIClient struct { - cc grpc1.ClientConn +func (m *BlockByHashRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_BlockByHashRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } } - -func NewBroadcastAPIClient(cc grpc1.ClientConn) BroadcastAPIClient { - return &broadcastAPIClient{cc} +func (m *BlockByHashRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_BlockByHashRequest.Merge(m, src) +} +func (m *BlockByHashRequest) XXX_Size() int { + return m.Size() +} +func (m *BlockByHashRequest) XXX_DiscardUnknown() { + xxx_messageInfo_BlockByHashRequest.DiscardUnknown(m) } -func (c *broadcastAPIClient) Ping(ctx context.Context, in *RequestPing, opts ...grpc.CallOption) (*ResponsePing, error) { - out := new(ResponsePing) - err := c.cc.Invoke(ctx, "/tendermint.rpc.grpc.BroadcastAPI/Ping", in, out, opts...) - if err != nil { - return nil, err +var xxx_messageInfo_BlockByHashRequest proto.InternalMessageInfo + +func (m *BlockByHashRequest) GetHash() []byte { + if m != nil { + return m.Hash } - return out, nil + return nil } -func (c *broadcastAPIClient) BroadcastTx(ctx context.Context, in *RequestBroadcastTx, opts ...grpc.CallOption) (*ResponseBroadcastTx, error) { - out := new(ResponseBroadcastTx) - err := c.cc.Invoke(ctx, "/tendermint.rpc.grpc.BroadcastAPI/BroadcastTx", in, out, opts...) - if err != nil { - return nil, err +func (m *BlockByHashRequest) GetProve() bool { + if m != nil { + return m.Prove } - return out, nil + return false } -// BroadcastAPIServer is the server API for BroadcastAPI service. -type BroadcastAPIServer interface { - Ping(context.Context, *RequestPing) (*ResponsePing, error) - BroadcastTx(context.Context, *RequestBroadcastTx) (*ResponseBroadcastTx, error) +// BlockByHeightRequest is a request to get a block by its height. +type BlockByHeightRequest struct { + // Height the requested block height. + // If height is equal to 0, the latest height stored in the block store + // will be used. + Height int64 `protobuf:"varint,1,opt,name=height,proto3" json:"height,omitempty"` + // Prove set to true to return the parts proofs. + Prove bool `protobuf:"varint,2,opt,name=prove,proto3" json:"prove,omitempty"` } -// UnimplementedBroadcastAPIServer can be embedded to have forward compatible implementations. -type UnimplementedBroadcastAPIServer struct { +func (m *BlockByHeightRequest) Reset() { *m = BlockByHeightRequest{} } +func (m *BlockByHeightRequest) String() string { return proto.CompactTextString(m) } +func (*BlockByHeightRequest) ProtoMessage() {} +func (*BlockByHeightRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_0ffff5682c662b95, []int{5} } - -func (*UnimplementedBroadcastAPIServer) Ping(ctx context.Context, req *RequestPing) (*ResponsePing, error) { - return nil, status.Errorf(codes.Unimplemented, "method Ping not implemented") +func (m *BlockByHeightRequest) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) } -func (*UnimplementedBroadcastAPIServer) BroadcastTx(ctx context.Context, req *RequestBroadcastTx) (*ResponseBroadcastTx, error) { - return nil, status.Errorf(codes.Unimplemented, "method BroadcastTx not implemented") +func (m *BlockByHeightRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_BlockByHeightRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } } - -func RegisterBroadcastAPIServer(s grpc1.Server, srv BroadcastAPIServer) { - s.RegisterService(&_BroadcastAPI_serviceDesc, srv) +func (m *BlockByHeightRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_BlockByHeightRequest.Merge(m, src) +} +func (m *BlockByHeightRequest) XXX_Size() int { + return m.Size() +} +func (m *BlockByHeightRequest) XXX_DiscardUnknown() { + xxx_messageInfo_BlockByHeightRequest.DiscardUnknown(m) } -func _BroadcastAPI_Ping_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(RequestPing) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(BroadcastAPIServer).Ping(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/tendermint.rpc.grpc.BroadcastAPI/Ping", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(BroadcastAPIServer).Ping(ctx, req.(*RequestPing)) +var xxx_messageInfo_BlockByHeightRequest proto.InternalMessageInfo + +func (m *BlockByHeightRequest) GetHeight() int64 { + if m != nil { + return m.Height } - return interceptor(ctx, in, info, handler) + return 0 } -func _BroadcastAPI_BroadcastTx_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(RequestBroadcastTx) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(BroadcastAPIServer).BroadcastTx(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/tendermint.rpc.grpc.BroadcastAPI/BroadcastTx", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(BroadcastAPIServer).BroadcastTx(ctx, req.(*RequestBroadcastTx)) +func (m *BlockByHeightRequest) GetProve() bool { + if m != nil { + return m.Prove } - return interceptor(ctx, in, info, handler) + return false } -var BroadcastAPI_serviceDesc = _BroadcastAPI_serviceDesc -var _BroadcastAPI_serviceDesc = grpc.ServiceDesc{ - ServiceName: "tendermint.rpc.grpc.BroadcastAPI", - HandlerType: (*BroadcastAPIServer)(nil), - Methods: []grpc.MethodDesc{ - { - MethodName: "Ping", - Handler: _BroadcastAPI_Ping_Handler, - }, - { - MethodName: "BroadcastTx", - Handler: _BroadcastAPI_BroadcastTx_Handler, - }, - }, - Streams: []grpc.StreamDesc{}, - Metadata: "tendermint/rpc/grpc/types.proto", +// CommitRequest is a request to get the commit of a block. +type CommitRequest struct { + // Height the requested block commit height. + // If height is equal to 0, the latest height stored in the block store + // will be used. + Height int64 `protobuf:"varint,1,opt,name=height,proto3" json:"height,omitempty"` } -func (m *RequestPing) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err +func (m *CommitRequest) Reset() { *m = CommitRequest{} } +func (m *CommitRequest) String() string { return proto.CompactTextString(m) } +func (*CommitRequest) ProtoMessage() {} +func (*CommitRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_0ffff5682c662b95, []int{6} +} +func (m *CommitRequest) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *CommitRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_CommitRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil } - return dAtA[:n], nil } - -func (m *RequestPing) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) +func (m *CommitRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_CommitRequest.Merge(m, src) } - -func (m *RequestPing) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - return len(dAtA) - i, nil +func (m *CommitRequest) XXX_Size() int { + return m.Size() +} +func (m *CommitRequest) XXX_DiscardUnknown() { + xxx_messageInfo_CommitRequest.DiscardUnknown(m) } -func (m *RequestBroadcastTx) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err +var xxx_messageInfo_CommitRequest proto.InternalMessageInfo + +func (m *CommitRequest) GetHeight() int64 { + if m != nil { + return m.Height } - return dAtA[:n], nil + return 0 } -func (m *RequestBroadcastTx) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) +// ValidatorSetRequest is a request to get the validator set of a block. +type ValidatorSetRequest struct { + // Height the requested validator set height. + // If height is equal to 0, the latest height stored in the block store + // will be used. + Height int64 `protobuf:"varint,1,opt,name=height,proto3" json:"height,omitempty"` } -func (m *RequestBroadcastTx) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if len(m.Tx) > 0 { - i -= len(m.Tx) - copy(dAtA[i:], m.Tx) - i = encodeVarintTypes(dAtA, i, uint64(len(m.Tx))) - i-- - dAtA[i] = 0xa +func (m *ValidatorSetRequest) Reset() { *m = ValidatorSetRequest{} } +func (m *ValidatorSetRequest) String() string { return proto.CompactTextString(m) } +func (*ValidatorSetRequest) ProtoMessage() {} +func (*ValidatorSetRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_0ffff5682c662b95, []int{7} +} +func (m *ValidatorSetRequest) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ValidatorSetRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_ValidatorSetRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil } - return len(dAtA) - i, nil +} +func (m *ValidatorSetRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_ValidatorSetRequest.Merge(m, src) +} +func (m *ValidatorSetRequest) XXX_Size() int { + return m.Size() +} +func (m *ValidatorSetRequest) XXX_DiscardUnknown() { + xxx_messageInfo_ValidatorSetRequest.DiscardUnknown(m) } -func (m *ResponsePing) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err +var xxx_messageInfo_ValidatorSetRequest proto.InternalMessageInfo + +func (m *ValidatorSetRequest) GetHeight() int64 { + if m != nil { + return m.Height } - return dAtA[:n], nil + return 0 } -func (m *ResponsePing) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) +// SubscribeNewHeightsRequest is a request to subscribe to new heights. +type SubscribeNewHeightsRequest struct { } -func (m *ResponsePing) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - return len(dAtA) - i, nil +func (m *SubscribeNewHeightsRequest) Reset() { *m = SubscribeNewHeightsRequest{} } +func (m *SubscribeNewHeightsRequest) String() string { return proto.CompactTextString(m) } +func (*SubscribeNewHeightsRequest) ProtoMessage() {} +func (*SubscribeNewHeightsRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_0ffff5682c662b95, []int{8} } - -func (m *ResponseBroadcastTx) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err +func (m *SubscribeNewHeightsRequest) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *SubscribeNewHeightsRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_SubscribeNewHeightsRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil } - return dAtA[:n], nil +} +func (m *SubscribeNewHeightsRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_SubscribeNewHeightsRequest.Merge(m, src) +} +func (m *SubscribeNewHeightsRequest) XXX_Size() int { + return m.Size() +} +func (m *SubscribeNewHeightsRequest) XXX_DiscardUnknown() { + xxx_messageInfo_SubscribeNewHeightsRequest.DiscardUnknown(m) } -func (m *ResponseBroadcastTx) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) +var xxx_messageInfo_SubscribeNewHeightsRequest proto.InternalMessageInfo + +// StatusRequest is a request to get the status of the node. +type StatusRequest struct { } -func (m *ResponseBroadcastTx) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.TxResult != nil { - { - size, err := m.TxResult.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintTypes(dAtA, i, uint64(size)) +func (m *StatusRequest) Reset() { *m = StatusRequest{} } +func (m *StatusRequest) String() string { return proto.CompactTextString(m) } +func (*StatusRequest) ProtoMessage() {} +func (*StatusRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_0ffff5682c662b95, []int{9} +} +func (m *StatusRequest) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *StatusRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_StatusRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err } - i-- - dAtA[i] = 0x12 + return b[:n], nil } - if m.CheckTx != nil { - { - size, err := m.CheckTx.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintTypes(dAtA, i, uint64(size)) +} +func (m *StatusRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_StatusRequest.Merge(m, src) +} +func (m *StatusRequest) XXX_Size() int { + return m.Size() +} +func (m *StatusRequest) XXX_DiscardUnknown() { + xxx_messageInfo_StatusRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_StatusRequest proto.InternalMessageInfo + +// BlockByHashResponse is a response to a BlockByHashRequest. +type BlockByHashResponse struct { + BlockPart *types1.Part `protobuf:"bytes,1,opt,name=block_part,json=blockPart,proto3" json:"block_part,omitempty"` + // Commit is only set in the first part, and + // it stays nil in the remaining ones. + Commit *types1.Commit `protobuf:"bytes,2,opt,name=commit,proto3" json:"commit,omitempty"` + // ValidatorSet is only set in the first part, and + // it stays nil in the remaining ones. + ValidatorSet *types1.ValidatorSet `protobuf:"bytes,3,opt,name=validator_set,json=validatorSet,proto3" json:"validator_set,omitempty"` + IsLast bool `protobuf:"varint,4,opt,name=is_last,json=isLast,proto3" json:"is_last,omitempty"` +} + +func (m *BlockByHashResponse) Reset() { *m = BlockByHashResponse{} } +func (m *BlockByHashResponse) String() string { return proto.CompactTextString(m) } +func (*BlockByHashResponse) ProtoMessage() {} +func (*BlockByHashResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_0ffff5682c662b95, []int{10} +} +func (m *BlockByHashResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *BlockByHashResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_BlockByHashResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err } - i-- - dAtA[i] = 0xa + return b[:n], nil } - return len(dAtA) - i, nil +} +func (m *BlockByHashResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_BlockByHashResponse.Merge(m, src) +} +func (m *BlockByHashResponse) XXX_Size() int { + return m.Size() +} +func (m *BlockByHashResponse) XXX_DiscardUnknown() { + xxx_messageInfo_BlockByHashResponse.DiscardUnknown(m) } -func encodeVarintTypes(dAtA []byte, offset int, v uint64) int { - offset -= sovTypes(v) - base := offset - for v >= 1<<7 { - dAtA[offset] = uint8(v&0x7f | 0x80) - v >>= 7 - offset++ +var xxx_messageInfo_BlockByHashResponse proto.InternalMessageInfo + +func (m *BlockByHashResponse) GetBlockPart() *types1.Part { + if m != nil { + return m.BlockPart } - dAtA[offset] = uint8(v) - return base + return nil } -func (m *RequestPing) Size() (n int) { - if m == nil { - return 0 + +func (m *BlockByHashResponse) GetCommit() *types1.Commit { + if m != nil { + return m.Commit } - var l int - _ = l - return n + return nil } -func (m *RequestBroadcastTx) Size() (n int) { - if m == nil { - return 0 +func (m *BlockByHashResponse) GetValidatorSet() *types1.ValidatorSet { + if m != nil { + return m.ValidatorSet } - var l int - _ = l - l = len(m.Tx) - if l > 0 { - n += 1 + l + sovTypes(uint64(l)) + return nil +} + +func (m *BlockByHashResponse) GetIsLast() bool { + if m != nil { + return m.IsLast } - return n + return false } -func (m *ResponsePing) Size() (n int) { - if m == nil { - return 0 +// BlockByHeightResponse is a response to a BlockByHeightRequest. +type BlockByHeightResponse struct { + BlockPart *types1.Part `protobuf:"bytes,1,opt,name=block_part,json=blockPart,proto3" json:"block_part,omitempty"` + // Commit is only set in the first part, and + // it stays nil in the remaining ones. + Commit *types1.Commit `protobuf:"bytes,2,opt,name=commit,proto3" json:"commit,omitempty"` + // ValidatorSet is only set in the first part, and + // it stays nil in the remaining ones. + ValidatorSet *types1.ValidatorSet `protobuf:"bytes,3,opt,name=validator_set,json=validatorSet,proto3" json:"validator_set,omitempty"` + IsLast bool `protobuf:"varint,4,opt,name=is_last,json=isLast,proto3" json:"is_last,omitempty"` +} + +func (m *BlockByHeightResponse) Reset() { *m = BlockByHeightResponse{} } +func (m *BlockByHeightResponse) String() string { return proto.CompactTextString(m) } +func (*BlockByHeightResponse) ProtoMessage() {} +func (*BlockByHeightResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_0ffff5682c662b95, []int{11} +} +func (m *BlockByHeightResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *BlockByHeightResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_BlockByHeightResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil } - var l int - _ = l - return n +} +func (m *BlockByHeightResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_BlockByHeightResponse.Merge(m, src) +} +func (m *BlockByHeightResponse) XXX_Size() int { + return m.Size() +} +func (m *BlockByHeightResponse) XXX_DiscardUnknown() { + xxx_messageInfo_BlockByHeightResponse.DiscardUnknown(m) } -func (m *ResponseBroadcastTx) Size() (n int) { - if m == nil { - return 0 +var xxx_messageInfo_BlockByHeightResponse proto.InternalMessageInfo + +func (m *BlockByHeightResponse) GetBlockPart() *types1.Part { + if m != nil { + return m.BlockPart } - var l int - _ = l - if m.CheckTx != nil { - l = m.CheckTx.Size() - n += 1 + l + sovTypes(uint64(l)) + return nil +} + +func (m *BlockByHeightResponse) GetCommit() *types1.Commit { + if m != nil { + return m.Commit } - if m.TxResult != nil { - l = m.TxResult.Size() - n += 1 + l + sovTypes(uint64(l)) + return nil +} + +func (m *BlockByHeightResponse) GetValidatorSet() *types1.ValidatorSet { + if m != nil { + return m.ValidatorSet } - return n + return nil } -func sovTypes(x uint64) (n int) { - return (math_bits.Len64(x|1) + 6) / 7 +func (m *BlockByHeightResponse) GetIsLast() bool { + if m != nil { + return m.IsLast + } + return false } -func sozTypes(x uint64) (n int) { - return sovTypes(uint64((x << 1) ^ uint64((int64(x) >> 63)))) + +// CommitResponse is a response to a CommitRequest. +type CommitResponse struct { + Commit *types1.Commit `protobuf:"bytes,1,opt,name=commit,proto3" json:"commit,omitempty"` } -func (m *RequestPing) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTypes + +func (m *CommitResponse) Reset() { *m = CommitResponse{} } +func (m *CommitResponse) String() string { return proto.CompactTextString(m) } +func (*CommitResponse) ProtoMessage() {} +func (*CommitResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_0ffff5682c662b95, []int{12} +} +func (m *CommitResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *CommitResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_CommitResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *CommitResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_CommitResponse.Merge(m, src) +} +func (m *CommitResponse) XXX_Size() int { + return m.Size() +} +func (m *CommitResponse) XXX_DiscardUnknown() { + xxx_messageInfo_CommitResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_CommitResponse proto.InternalMessageInfo + +func (m *CommitResponse) GetCommit() *types1.Commit { + if m != nil { + return m.Commit + } + return nil +} + +// ValidatorSetResponse is a response to a ValidatorSetRequest. +type ValidatorSetResponse struct { + // ValidatorSet the requested validator set. + ValidatorSet *types1.ValidatorSet `protobuf:"bytes,1,opt,name=validator_set,json=validatorSet,proto3" json:"validator_set,omitempty"` + // Height the height corresponding to the returned + // validator set. + Height int64 `protobuf:"varint,2,opt,name=height,proto3" json:"height,omitempty"` +} + +func (m *ValidatorSetResponse) Reset() { *m = ValidatorSetResponse{} } +func (m *ValidatorSetResponse) String() string { return proto.CompactTextString(m) } +func (*ValidatorSetResponse) ProtoMessage() {} +func (*ValidatorSetResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_0ffff5682c662b95, []int{13} +} +func (m *ValidatorSetResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ValidatorSetResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_ValidatorSetResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *ValidatorSetResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_ValidatorSetResponse.Merge(m, src) +} +func (m *ValidatorSetResponse) XXX_Size() int { + return m.Size() +} +func (m *ValidatorSetResponse) XXX_DiscardUnknown() { + xxx_messageInfo_ValidatorSetResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_ValidatorSetResponse proto.InternalMessageInfo + +func (m *ValidatorSetResponse) GetValidatorSet() *types1.ValidatorSet { + if m != nil { + return m.ValidatorSet + } + return nil +} + +func (m *ValidatorSetResponse) GetHeight() int64 { + if m != nil { + return m.Height + } + return 0 +} + +// NewHeightEvent is an event that indicates a new height. +type SubscribeNewHeightsResponse struct { + Height int64 `protobuf:"varint,1,opt,name=height,proto3" json:"height,omitempty"` + Hash []byte `protobuf:"bytes,2,opt,name=hash,proto3" json:"hash,omitempty"` +} + +func (m *SubscribeNewHeightsResponse) Reset() { *m = SubscribeNewHeightsResponse{} } +func (m *SubscribeNewHeightsResponse) String() string { return proto.CompactTextString(m) } +func (*SubscribeNewHeightsResponse) ProtoMessage() {} +func (*SubscribeNewHeightsResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_0ffff5682c662b95, []int{14} +} +func (m *SubscribeNewHeightsResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *SubscribeNewHeightsResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_SubscribeNewHeightsResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *SubscribeNewHeightsResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_SubscribeNewHeightsResponse.Merge(m, src) +} +func (m *SubscribeNewHeightsResponse) XXX_Size() int { + return m.Size() +} +func (m *SubscribeNewHeightsResponse) XXX_DiscardUnknown() { + xxx_messageInfo_SubscribeNewHeightsResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_SubscribeNewHeightsResponse proto.InternalMessageInfo + +func (m *SubscribeNewHeightsResponse) GetHeight() int64 { + if m != nil { + return m.Height + } + return 0 +} + +func (m *SubscribeNewHeightsResponse) GetHash() []byte { + if m != nil { + return m.Hash + } + return nil +} + +// StatusResponse is a response to a StatusRequest. +type StatusResponse struct { + NodeInfo *p2p.DefaultNodeInfo `protobuf:"bytes,1,opt,name=node_info,json=nodeInfo,proto3" json:"node_info,omitempty"` + SyncInfo *SyncInfo `protobuf:"bytes,2,opt,name=sync_info,json=syncInfo,proto3" json:"sync_info,omitempty"` + ValidatorInfo *ValidatorInfo `protobuf:"bytes,3,opt,name=validator_info,json=validatorInfo,proto3" json:"validator_info,omitempty"` +} + +func (m *StatusResponse) Reset() { *m = StatusResponse{} } +func (m *StatusResponse) String() string { return proto.CompactTextString(m) } +func (*StatusResponse) ProtoMessage() {} +func (*StatusResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_0ffff5682c662b95, []int{15} +} +func (m *StatusResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *StatusResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_StatusResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *StatusResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_StatusResponse.Merge(m, src) +} +func (m *StatusResponse) XXX_Size() int { + return m.Size() +} +func (m *StatusResponse) XXX_DiscardUnknown() { + xxx_messageInfo_StatusResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_StatusResponse proto.InternalMessageInfo + +func (m *StatusResponse) GetNodeInfo() *p2p.DefaultNodeInfo { + if m != nil { + return m.NodeInfo + } + return nil +} + +func (m *StatusResponse) GetSyncInfo() *SyncInfo { + if m != nil { + return m.SyncInfo + } + return nil +} + +func (m *StatusResponse) GetValidatorInfo() *ValidatorInfo { + if m != nil { + return m.ValidatorInfo + } + return nil +} + +// SyncInfo is information about the node's sync status. +type SyncInfo struct { + LatestBlockHash []byte `protobuf:"bytes,1,opt,name=latest_block_hash,json=latestBlockHash,proto3" json:"latest_block_hash,omitempty"` + LatestAppHash []byte `protobuf:"bytes,2,opt,name=latest_app_hash,json=latestAppHash,proto3" json:"latest_app_hash,omitempty"` + LatestBlockHeight int64 `protobuf:"varint,3,opt,name=latest_block_height,json=latestBlockHeight,proto3" json:"latest_block_height,omitempty"` + LatestBlockTime time.Time `protobuf:"bytes,4,opt,name=latest_block_time,json=latestBlockTime,proto3,stdtime" json:"latest_block_time"` + EarliestBlockHash []byte `protobuf:"bytes,5,opt,name=earliest_block_hash,json=earliestBlockHash,proto3" json:"earliest_block_hash,omitempty"` + EarliestAppHash []byte `protobuf:"bytes,6,opt,name=earliest_app_hash,json=earliestAppHash,proto3" json:"earliest_app_hash,omitempty"` + EarliestBlockHeight int64 `protobuf:"varint,7,opt,name=earliest_block_height,json=earliestBlockHeight,proto3" json:"earliest_block_height,omitempty"` + EarliestBlockTime time.Time `protobuf:"bytes,8,opt,name=earliest_block_time,json=earliestBlockTime,proto3,stdtime" json:"earliest_block_time"` + CatchingUp bool `protobuf:"varint,9,opt,name=catching_up,json=catchingUp,proto3" json:"catching_up,omitempty"` +} + +func (m *SyncInfo) Reset() { *m = SyncInfo{} } +func (m *SyncInfo) String() string { return proto.CompactTextString(m) } +func (*SyncInfo) ProtoMessage() {} +func (*SyncInfo) Descriptor() ([]byte, []int) { + return fileDescriptor_0ffff5682c662b95, []int{16} +} +func (m *SyncInfo) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *SyncInfo) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_SyncInfo.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *SyncInfo) XXX_Merge(src proto.Message) { + xxx_messageInfo_SyncInfo.Merge(m, src) +} +func (m *SyncInfo) XXX_Size() int { + return m.Size() +} +func (m *SyncInfo) XXX_DiscardUnknown() { + xxx_messageInfo_SyncInfo.DiscardUnknown(m) +} + +var xxx_messageInfo_SyncInfo proto.InternalMessageInfo + +func (m *SyncInfo) GetLatestBlockHash() []byte { + if m != nil { + return m.LatestBlockHash + } + return nil +} + +func (m *SyncInfo) GetLatestAppHash() []byte { + if m != nil { + return m.LatestAppHash + } + return nil +} + +func (m *SyncInfo) GetLatestBlockHeight() int64 { + if m != nil { + return m.LatestBlockHeight + } + return 0 +} + +func (m *SyncInfo) GetLatestBlockTime() time.Time { + if m != nil { + return m.LatestBlockTime + } + return time.Time{} +} + +func (m *SyncInfo) GetEarliestBlockHash() []byte { + if m != nil { + return m.EarliestBlockHash + } + return nil +} + +func (m *SyncInfo) GetEarliestAppHash() []byte { + if m != nil { + return m.EarliestAppHash + } + return nil +} + +func (m *SyncInfo) GetEarliestBlockHeight() int64 { + if m != nil { + return m.EarliestBlockHeight + } + return 0 +} + +func (m *SyncInfo) GetEarliestBlockTime() time.Time { + if m != nil { + return m.EarliestBlockTime + } + return time.Time{} +} + +func (m *SyncInfo) GetCatchingUp() bool { + if m != nil { + return m.CatchingUp + } + return false +} + +// ValidatorInfo is information about a validator. +type ValidatorInfo struct { + Address []byte `protobuf:"bytes,1,opt,name=address,proto3" json:"address,omitempty"` + PubKey *crypto.PublicKey `protobuf:"bytes,2,opt,name=pub_key,json=pubKey,proto3" json:"pub_key,omitempty"` + VotingPower int64 `protobuf:"varint,3,opt,name=voting_power,json=votingPower,proto3" json:"voting_power,omitempty"` +} + +func (m *ValidatorInfo) Reset() { *m = ValidatorInfo{} } +func (m *ValidatorInfo) String() string { return proto.CompactTextString(m) } +func (*ValidatorInfo) ProtoMessage() {} +func (*ValidatorInfo) Descriptor() ([]byte, []int) { + return fileDescriptor_0ffff5682c662b95, []int{17} +} +func (m *ValidatorInfo) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ValidatorInfo) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_ValidatorInfo.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *ValidatorInfo) XXX_Merge(src proto.Message) { + xxx_messageInfo_ValidatorInfo.Merge(m, src) +} +func (m *ValidatorInfo) XXX_Size() int { + return m.Size() +} +func (m *ValidatorInfo) XXX_DiscardUnknown() { + xxx_messageInfo_ValidatorInfo.DiscardUnknown(m) +} + +var xxx_messageInfo_ValidatorInfo proto.InternalMessageInfo + +func (m *ValidatorInfo) GetAddress() []byte { + if m != nil { + return m.Address + } + return nil +} + +func (m *ValidatorInfo) GetPubKey() *crypto.PublicKey { + if m != nil { + return m.PubKey + } + return nil +} + +func (m *ValidatorInfo) GetVotingPower() int64 { + if m != nil { + return m.VotingPower + } + return 0 +} + +type DataRootInclusionProofRequest struct { + // Height the height of block we want to prove. + Height int64 `protobuf:"varint,1,opt,name=height,proto3" json:"height,omitempty"` + // Start the start of the data commitment range containing the block. + Start uint64 `protobuf:"varint,2,opt,name=start,proto3" json:"start,omitempty"` + // End the end exclusive of the data commitment range containing the block. + End uint64 `protobuf:"varint,3,opt,name=end,proto3" json:"end,omitempty"` +} + +func (m *DataRootInclusionProofRequest) Reset() { *m = DataRootInclusionProofRequest{} } +func (m *DataRootInclusionProofRequest) String() string { return proto.CompactTextString(m) } +func (*DataRootInclusionProofRequest) ProtoMessage() {} +func (*DataRootInclusionProofRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_0ffff5682c662b95, []int{18} +} +func (m *DataRootInclusionProofRequest) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *DataRootInclusionProofRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_DataRootInclusionProofRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *DataRootInclusionProofRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_DataRootInclusionProofRequest.Merge(m, src) +} +func (m *DataRootInclusionProofRequest) XXX_Size() int { + return m.Size() +} +func (m *DataRootInclusionProofRequest) XXX_DiscardUnknown() { + xxx_messageInfo_DataRootInclusionProofRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_DataRootInclusionProofRequest proto.InternalMessageInfo + +func (m *DataRootInclusionProofRequest) GetHeight() int64 { + if m != nil { + return m.Height + } + return 0 +} + +func (m *DataRootInclusionProofRequest) GetStart() uint64 { + if m != nil { + return m.Start + } + return 0 +} + +func (m *DataRootInclusionProofRequest) GetEnd() uint64 { + if m != nil { + return m.End + } + return 0 +} + +type DataRootInclusionProofResponse struct { + Proof crypto.Proof `protobuf:"bytes,1,opt,name=proof,proto3" json:"proof"` +} + +func (m *DataRootInclusionProofResponse) Reset() { *m = DataRootInclusionProofResponse{} } +func (m *DataRootInclusionProofResponse) String() string { return proto.CompactTextString(m) } +func (*DataRootInclusionProofResponse) ProtoMessage() {} +func (*DataRootInclusionProofResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_0ffff5682c662b95, []int{19} +} +func (m *DataRootInclusionProofResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *DataRootInclusionProofResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_DataRootInclusionProofResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *DataRootInclusionProofResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_DataRootInclusionProofResponse.Merge(m, src) +} +func (m *DataRootInclusionProofResponse) XXX_Size() int { + return m.Size() +} +func (m *DataRootInclusionProofResponse) XXX_DiscardUnknown() { + xxx_messageInfo_DataRootInclusionProofResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_DataRootInclusionProofResponse proto.InternalMessageInfo + +func (m *DataRootInclusionProofResponse) GetProof() crypto.Proof { + if m != nil { + return m.Proof + } + return crypto.Proof{} +} + +func init() { + proto.RegisterType((*RequestPing)(nil), "tendermint.rpc.grpc.RequestPing") + proto.RegisterType((*RequestBroadcastTx)(nil), "tendermint.rpc.grpc.RequestBroadcastTx") + proto.RegisterType((*ResponsePing)(nil), "tendermint.rpc.grpc.ResponsePing") + proto.RegisterType((*ResponseBroadcastTx)(nil), "tendermint.rpc.grpc.ResponseBroadcastTx") + proto.RegisterType((*BlockByHashRequest)(nil), "tendermint.rpc.grpc.BlockByHashRequest") + proto.RegisterType((*BlockByHeightRequest)(nil), "tendermint.rpc.grpc.BlockByHeightRequest") + proto.RegisterType((*CommitRequest)(nil), "tendermint.rpc.grpc.CommitRequest") + proto.RegisterType((*ValidatorSetRequest)(nil), "tendermint.rpc.grpc.ValidatorSetRequest") + proto.RegisterType((*SubscribeNewHeightsRequest)(nil), "tendermint.rpc.grpc.SubscribeNewHeightsRequest") + proto.RegisterType((*StatusRequest)(nil), "tendermint.rpc.grpc.StatusRequest") + proto.RegisterType((*BlockByHashResponse)(nil), "tendermint.rpc.grpc.BlockByHashResponse") + proto.RegisterType((*BlockByHeightResponse)(nil), "tendermint.rpc.grpc.BlockByHeightResponse") + proto.RegisterType((*CommitResponse)(nil), "tendermint.rpc.grpc.CommitResponse") + proto.RegisterType((*ValidatorSetResponse)(nil), "tendermint.rpc.grpc.ValidatorSetResponse") + proto.RegisterType((*SubscribeNewHeightsResponse)(nil), "tendermint.rpc.grpc.SubscribeNewHeightsResponse") + proto.RegisterType((*StatusResponse)(nil), "tendermint.rpc.grpc.StatusResponse") + proto.RegisterType((*SyncInfo)(nil), "tendermint.rpc.grpc.SyncInfo") + proto.RegisterType((*ValidatorInfo)(nil), "tendermint.rpc.grpc.ValidatorInfo") + proto.RegisterType((*DataRootInclusionProofRequest)(nil), "tendermint.rpc.grpc.DataRootInclusionProofRequest") + proto.RegisterType((*DataRootInclusionProofResponse)(nil), "tendermint.rpc.grpc.DataRootInclusionProofResponse") +} + +func init() { proto.RegisterFile("tendermint/rpc/grpc/types.proto", fileDescriptor_0ffff5682c662b95) } + +var fileDescriptor_0ffff5682c662b95 = []byte{ + // 1207 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xdc, 0x57, 0x4b, 0x6f, 0xdb, 0xc6, + 0x13, 0x37, 0x6d, 0x45, 0x96, 0x47, 0x92, 0xf3, 0xcf, 0xca, 0xc9, 0x5f, 0x60, 0x12, 0xc9, 0x61, + 0x8b, 0xe6, 0x01, 0x94, 0x32, 0x94, 0xe6, 0xd2, 0x14, 0x05, 0x2c, 0xbb, 0x40, 0x04, 0x17, 0x81, + 0x4a, 0xbb, 0x39, 0xf4, 0xc2, 0x92, 0xd4, 0x4a, 0x22, 0x2c, 0x71, 0x59, 0xee, 0xd2, 0x91, 0x7a, + 0x2b, 0x7a, 0x2f, 0x02, 0xf4, 0xf3, 0xf4, 0x9e, 0x63, 0x2e, 0x05, 0x7a, 0x28, 0xd2, 0xc2, 0x06, + 0xda, 0xaf, 0x51, 0xec, 0x83, 0x12, 0x69, 0x3d, 0xe2, 0xf4, 0xd8, 0x8b, 0x30, 0x9c, 0x99, 0xdf, + 0xec, 0xcc, 0xec, 0xcc, 0xce, 0x08, 0xea, 0x0c, 0x07, 0x5d, 0x1c, 0x8d, 0xfc, 0x80, 0x35, 0xa2, + 0xd0, 0x6b, 0xf4, 0xf9, 0x0f, 0x9b, 0x84, 0x98, 0x9a, 0x61, 0x44, 0x18, 0x41, 0x95, 0x99, 0x82, + 0x19, 0x85, 0x9e, 0xc9, 0x15, 0xf4, 0xdb, 0x29, 0x94, 0xe3, 0x7a, 0x7e, 0x1a, 0xa1, 0xdf, 0x49, + 0x09, 0x05, 0x3f, 0x23, 0xd5, 0x53, 0xd2, 0xb0, 0x19, 0x2e, 0x45, 0x7a, 0xd1, 0x24, 0x64, 0xa4, + 0x71, 0x8a, 0x27, 0x89, 0xf4, 0xee, 0xbc, 0x34, 0x8c, 0x08, 0xe9, 0x29, 0xf1, 0xee, 0xdc, 0xb1, + 0x67, 0xce, 0xd0, 0xef, 0x3a, 0x8c, 0x44, 0x4a, 0xa3, 0xde, 0x27, 0xa4, 0x3f, 0xc4, 0x0d, 0xf1, + 0xe5, 0xc6, 0xbd, 0x06, 0xf3, 0x47, 0x98, 0x32, 0x67, 0x14, 0x2a, 0x85, 0x9d, 0x3e, 0xe9, 0x13, + 0x41, 0x36, 0x38, 0x25, 0xb9, 0x46, 0x19, 0x8a, 0x16, 0xfe, 0x2e, 0xc6, 0x94, 0x75, 0xfc, 0xa0, + 0x6f, 0x7c, 0x08, 0x48, 0x7d, 0xb6, 0x22, 0xe2, 0x74, 0x3d, 0x87, 0xb2, 0x93, 0x31, 0xda, 0x86, + 0x75, 0x36, 0xae, 0x6a, 0xbb, 0xda, 0x83, 0x92, 0xb5, 0xce, 0xc6, 0xc6, 0x36, 0x94, 0x2c, 0x4c, + 0x43, 0x12, 0x50, 0x2c, 0x50, 0x3f, 0x69, 0x50, 0x49, 0x18, 0x69, 0xdc, 0x53, 0x28, 0x78, 0x03, + 0xec, 0x9d, 0xda, 0x0a, 0x5d, 0x6c, 0xee, 0x9a, 0xa9, 0x8c, 0xf3, 0xe4, 0x9a, 0x09, 0xee, 0x80, + 0x2b, 0x9e, 0x8c, 0xad, 0x4d, 0x4f, 0x12, 0xe8, 0x53, 0xd8, 0x62, 0x63, 0x3b, 0xc2, 0x34, 0x1e, + 0xb2, 0xea, 0xba, 0x40, 0xdf, 0x9d, 0x43, 0x7f, 0x31, 0xc6, 0xde, 0xc9, 0xd8, 0x12, 0x4a, 0x56, + 0x81, 0x29, 0xca, 0xf8, 0x1c, 0x50, 0x6b, 0x48, 0xbc, 0xd3, 0xd6, 0xe4, 0x99, 0x43, 0x07, 0x2a, + 0x22, 0x84, 0x20, 0x37, 0x70, 0xe8, 0x40, 0x05, 0x22, 0x68, 0xb4, 0x03, 0xd7, 0xc2, 0x88, 0x9c, + 0x61, 0x71, 0x42, 0xc1, 0x92, 0x1f, 0xc6, 0x21, 0xec, 0x24, 0x78, 0xec, 0xf7, 0x07, 0x2c, 0xb1, + 0x70, 0x0b, 0xf2, 0x03, 0xc1, 0x10, 0x36, 0x36, 0x2c, 0xf5, 0xb5, 0xc4, 0xca, 0x7d, 0x28, 0x1f, + 0x90, 0xd1, 0xc8, 0x7f, 0x17, 0xdc, 0xf8, 0x18, 0x2a, 0x2f, 0x92, 0xeb, 0x3c, 0xc6, 0xef, 0x54, + 0xbf, 0x03, 0xfa, 0x71, 0xec, 0x52, 0x2f, 0xf2, 0x5d, 0xfc, 0x1c, 0xbf, 0x94, 0x2e, 0x52, 0x85, + 0x32, 0xae, 0x43, 0xf9, 0x98, 0x39, 0x2c, 0x9e, 0x32, 0x7e, 0xd7, 0xa0, 0x92, 0xc9, 0x86, 0x4c, + 0x38, 0x7a, 0x02, 0xe0, 0x72, 0xb6, 0x1d, 0x3a, 0x11, 0x53, 0xf7, 0x73, 0x2b, 0x9d, 0x61, 0x59, + 0xbd, 0x1d, 0x27, 0x62, 0xd6, 0x96, 0xd0, 0xe4, 0x24, 0xda, 0x83, 0xbc, 0x27, 0xa2, 0x52, 0x97, + 0x52, 0x9d, 0x87, 0xa8, 0xa8, 0x95, 0x1e, 0x3a, 0x80, 0xf2, 0xb4, 0x5a, 0x6d, 0x8a, 0x59, 0x75, + 0x43, 0x00, 0x6b, 0xf3, 0xc0, 0x4c, 0x16, 0x4a, 0x67, 0xa9, 0x2f, 0xf4, 0x7f, 0xd8, 0xf4, 0xa9, + 0x3d, 0x74, 0x28, 0xab, 0xe6, 0x44, 0x92, 0xf3, 0x3e, 0xfd, 0xd2, 0xa1, 0xcc, 0x78, 0xab, 0xc1, + 0xcd, 0x4b, 0x97, 0xf5, 0xdf, 0x0a, 0xb0, 0x05, 0xdb, 0x49, 0x19, 0xa9, 0xc0, 0x66, 0x1e, 0x6a, + 0x57, 0xf3, 0xd0, 0xa0, 0xb0, 0x93, 0xad, 0x30, 0x65, 0x69, 0xce, 0x73, 0xed, 0x5f, 0x78, 0x3e, + 0xab, 0xd3, 0xf5, 0x4c, 0x9d, 0xb6, 0xe1, 0xf6, 0xc2, 0x3a, 0x55, 0x67, 0x2f, 0x6b, 0xa6, 0xa4, + 0x4d, 0xd7, 0x67, 0x6d, 0x6a, 0xfc, 0xaa, 0xc1, 0x76, 0x52, 0xd5, 0x0a, 0xfe, 0x19, 0x6c, 0x05, + 0xa4, 0x8b, 0x6d, 0x3f, 0xe8, 0x11, 0xe5, 0x76, 0x3d, 0xed, 0x76, 0xd8, 0x0c, 0xcd, 0x43, 0xdc, + 0x73, 0xe2, 0x21, 0x7b, 0x4e, 0xba, 0xb8, 0x1d, 0xf4, 0x88, 0x55, 0x08, 0x14, 0xc5, 0x5f, 0x17, + 0x3a, 0x09, 0x3c, 0x89, 0x5e, 0xf0, 0xba, 0x24, 0xd3, 0xc0, 0x3c, 0x9e, 0x04, 0x9e, 0xc4, 0x52, + 0x45, 0xa1, 0x36, 0x6c, 0xcf, 0x92, 0x26, 0x0c, 0xc8, 0xfb, 0x36, 0x16, 0x1a, 0x98, 0x26, 0x4e, + 0x58, 0x99, 0xa5, 0x9b, 0x7f, 0x1a, 0x7f, 0x6f, 0x40, 0x21, 0x39, 0x01, 0x3d, 0x82, 0x1b, 0x43, + 0x87, 0x61, 0xca, 0x6c, 0x59, 0xb6, 0xa9, 0xc7, 0xea, 0xba, 0x14, 0x88, 0x3a, 0xe7, 0x4d, 0x8c, + 0x3e, 0x02, 0xc5, 0xb2, 0x9d, 0x30, 0xb4, 0x53, 0xf9, 0x2a, 0x4b, 0xf6, 0x7e, 0x18, 0x0a, 0x3d, + 0x13, 0x2a, 0x59, 0x9b, 0x32, 0xe3, 0x1b, 0x22, 0xe3, 0x37, 0xd2, 0x56, 0x65, 0xf2, 0x3b, 0x97, + 0x7c, 0xe0, 0x53, 0x44, 0xd4, 0x63, 0xb1, 0xa9, 0x9b, 0x72, 0xc4, 0x98, 0xc9, 0x88, 0x31, 0x4f, + 0x92, 0x11, 0xd3, 0x2a, 0xbc, 0x7e, 0x5b, 0x5f, 0x7b, 0xf5, 0x47, 0x5d, 0xcb, 0x78, 0xca, 0xe5, + 0xdc, 0x03, 0xec, 0x44, 0x43, 0xff, 0x52, 0x5c, 0xd7, 0x84, 0xb7, 0x37, 0x12, 0xd1, 0x2c, 0xb2, + 0x47, 0x30, 0x65, 0xce, 0x62, 0xcb, 0xcb, 0x2c, 0x24, 0x82, 0x24, 0xba, 0x26, 0xdc, 0xbc, 0x6c, + 0x5b, 0xc6, 0xb7, 0x29, 0xe2, 0xab, 0x64, 0xad, 0xcb, 0x08, 0x4f, 0xe6, 0xfc, 0x11, 0x31, 0x16, + 0xde, 0x23, 0xc6, 0xac, 0xd7, 0x22, 0xca, 0x3a, 0x14, 0x3d, 0x87, 0x79, 0x03, 0x3f, 0xe8, 0xdb, + 0x71, 0x58, 0xdd, 0x12, 0x1d, 0x0c, 0x09, 0xeb, 0xeb, 0xd0, 0xf8, 0x51, 0x83, 0x72, 0xa6, 0x14, + 0x50, 0x15, 0x36, 0x9d, 0x6e, 0x37, 0xc2, 0x94, 0xaa, 0x4b, 0x4e, 0x3e, 0xd1, 0x13, 0xd8, 0x0c, + 0x63, 0xd7, 0x3e, 0xc5, 0x13, 0x55, 0x9a, 0x77, 0xd2, 0x95, 0x25, 0xd7, 0x03, 0xb3, 0x13, 0xbb, + 0x43, 0xdf, 0x3b, 0xc2, 0x13, 0x2b, 0x1f, 0xc6, 0xee, 0x11, 0x9e, 0xa0, 0x7b, 0x50, 0x3a, 0x23, + 0x8c, 0x7b, 0x10, 0x92, 0x97, 0x38, 0x52, 0x97, 0x5c, 0x94, 0xbc, 0x0e, 0x67, 0x19, 0x36, 0xdc, + 0x3d, 0x74, 0x98, 0x63, 0x11, 0xc2, 0xda, 0x81, 0x37, 0x8c, 0xa9, 0x4f, 0x82, 0x0e, 0xdf, 0x33, + 0xae, 0x30, 0xe1, 0x28, 0xe3, 0xcf, 0x28, 0x77, 0x28, 0x67, 0xc9, 0x0f, 0xf4, 0x3f, 0xd8, 0xc0, + 0x41, 0x57, 0x1c, 0x94, 0xb3, 0x38, 0x69, 0xbc, 0x80, 0xda, 0xb2, 0x03, 0x54, 0xdf, 0x7e, 0x22, + 0x66, 0x25, 0xe9, 0x2d, 0x7a, 0xbb, 0x92, 0xd0, 0xb8, 0xbc, 0x95, 0xe3, 0xf9, 0xb6, 0xa4, 0x72, + 0xf3, 0x17, 0x0d, 0x4a, 0xd3, 0xd5, 0x62, 0xbf, 0xd3, 0x46, 0x47, 0x90, 0xe3, 0xbb, 0x07, 0xda, + 0x5d, 0xd8, 0x74, 0xa9, 0x9d, 0x46, 0xbf, 0xb7, 0x44, 0x63, 0xb6, 0xc0, 0xa0, 0x6f, 0xa1, 0x98, + 0xde, 0x5b, 0xee, 0xaf, 0xb2, 0x99, 0x52, 0xd4, 0x1f, 0xac, 0x34, 0x9d, 0xd2, 0x6c, 0xfe, 0x95, + 0x83, 0xeb, 0xa2, 0x5a, 0xf6, 0x3b, 0xed, 0x63, 0x1c, 0x9d, 0xf9, 0x1e, 0x46, 0x2e, 0x14, 0x53, + 0x73, 0x79, 0xc9, 0xa9, 0xf3, 0x7b, 0xcc, 0x92, 0x53, 0x17, 0x8c, 0xf8, 0x3d, 0x0d, 0x0d, 0xa0, + 0x9c, 0x19, 0x8e, 0xe8, 0xe1, 0x4a, 0x70, 0x7a, 0xdb, 0xd1, 0x1f, 0x5d, 0x45, 0x75, 0x7a, 0xd2, + 0x57, 0x90, 0x97, 0x43, 0x07, 0x2d, 0x7e, 0x07, 0x33, 0xab, 0x90, 0xfe, 0xc1, 0x4a, 0x1d, 0x55, + 0x2a, 0x1e, 0x94, 0xd2, 0x63, 0x07, 0x3d, 0x58, 0xfd, 0xc0, 0xce, 0x56, 0x27, 0xfd, 0xe1, 0x15, + 0x34, 0xd5, 0x21, 0xdf, 0x43, 0x65, 0xc1, 0x94, 0x42, 0x8d, 0xc5, 0xd3, 0x60, 0xe9, 0xde, 0xa5, + 0xef, 0x5d, 0x1d, 0x90, 0xce, 0x99, 0x9c, 0x6a, 0x4b, 0x72, 0x96, 0x59, 0xe4, 0x96, 0xe4, 0x2c, + 0x3b, 0x16, 0x9b, 0x3f, 0x6b, 0xe2, 0xc6, 0x5d, 0xca, 0x22, 0xec, 0x8c, 0x78, 0xa7, 0xfc, 0xa0, + 0xc1, 0xad, 0xc5, 0x3d, 0x89, 0x9a, 0x0b, 0x2d, 0xae, 0x7c, 0x21, 0xf4, 0xc7, 0xef, 0x85, 0x51, + 0x9d, 0xf0, 0xec, 0xf5, 0x79, 0x4d, 0x7b, 0x73, 0x5e, 0xd3, 0xfe, 0x3c, 0xaf, 0x69, 0xaf, 0x2e, + 0x6a, 0x6b, 0x6f, 0x2e, 0x6a, 0x6b, 0xbf, 0x5d, 0xd4, 0xd6, 0xbe, 0x31, 0xfb, 0x3e, 0x1b, 0xc4, + 0xae, 0xe9, 0x91, 0x51, 0xc3, 0x23, 0x23, 0xcc, 0xdc, 0x1e, 0x9b, 0x11, 0xc9, 0xbf, 0xb6, 0xa7, + 0x1e, 0x89, 0x30, 0x27, 0xdc, 0xbc, 0x78, 0x99, 0x1f, 0xff, 0x13, 0x00, 0x00, 0xff, 0xff, 0x27, + 0xa6, 0x79, 0x23, 0xdc, 0x0d, 0x00, 0x00, +} + +// Reference imports to suppress errors if they are not otherwise used. +var _ context.Context +var _ grpc.ClientConn + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +const _ = grpc.SupportPackageIsVersion4 + +// BroadcastAPIClient is the client API for BroadcastAPI service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. +type BroadcastAPIClient interface { + Ping(ctx context.Context, in *RequestPing, opts ...grpc.CallOption) (*ResponsePing, error) + BroadcastTx(ctx context.Context, in *RequestBroadcastTx, opts ...grpc.CallOption) (*ResponseBroadcastTx, error) +} + +type broadcastAPIClient struct { + cc grpc1.ClientConn +} + +func NewBroadcastAPIClient(cc grpc1.ClientConn) BroadcastAPIClient { + return &broadcastAPIClient{cc} +} + +func (c *broadcastAPIClient) Ping(ctx context.Context, in *RequestPing, opts ...grpc.CallOption) (*ResponsePing, error) { + out := new(ResponsePing) + err := c.cc.Invoke(ctx, "/tendermint.rpc.grpc.BroadcastAPI/Ping", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *broadcastAPIClient) BroadcastTx(ctx context.Context, in *RequestBroadcastTx, opts ...grpc.CallOption) (*ResponseBroadcastTx, error) { + out := new(ResponseBroadcastTx) + err := c.cc.Invoke(ctx, "/tendermint.rpc.grpc.BroadcastAPI/BroadcastTx", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +// BroadcastAPIServer is the server API for BroadcastAPI service. +type BroadcastAPIServer interface { + Ping(context.Context, *RequestPing) (*ResponsePing, error) + BroadcastTx(context.Context, *RequestBroadcastTx) (*ResponseBroadcastTx, error) +} + +// UnimplementedBroadcastAPIServer can be embedded to have forward compatible implementations. +type UnimplementedBroadcastAPIServer struct { +} + +func (*UnimplementedBroadcastAPIServer) Ping(ctx context.Context, req *RequestPing) (*ResponsePing, error) { + return nil, status.Errorf(codes.Unimplemented, "method Ping not implemented") +} +func (*UnimplementedBroadcastAPIServer) BroadcastTx(ctx context.Context, req *RequestBroadcastTx) (*ResponseBroadcastTx, error) { + return nil, status.Errorf(codes.Unimplemented, "method BroadcastTx not implemented") +} + +func RegisterBroadcastAPIServer(s grpc1.Server, srv BroadcastAPIServer) { + s.RegisterService(&_BroadcastAPI_serviceDesc, srv) +} + +func _BroadcastAPI_Ping_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(RequestPing) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(BroadcastAPIServer).Ping(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/tendermint.rpc.grpc.BroadcastAPI/Ping", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(BroadcastAPIServer).Ping(ctx, req.(*RequestPing)) + } + return interceptor(ctx, in, info, handler) +} + +func _BroadcastAPI_BroadcastTx_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(RequestBroadcastTx) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(BroadcastAPIServer).BroadcastTx(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/tendermint.rpc.grpc.BroadcastAPI/BroadcastTx", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(BroadcastAPIServer).BroadcastTx(ctx, req.(*RequestBroadcastTx)) + } + return interceptor(ctx, in, info, handler) +} + +var _BroadcastAPI_serviceDesc = grpc.ServiceDesc{ + ServiceName: "tendermint.rpc.grpc.BroadcastAPI", + HandlerType: (*BroadcastAPIServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "Ping", + Handler: _BroadcastAPI_Ping_Handler, + }, + { + MethodName: "BroadcastTx", + Handler: _BroadcastAPI_BroadcastTx_Handler, + }, + }, + Streams: []grpc.StreamDesc{}, + Metadata: "tendermint/rpc/grpc/types.proto", +} + +// BlockAPIServiceClient is the client API for BlockAPIService service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. +type BlockAPIServiceClient interface { + // BlockByHash returns a block by its hash. + BlockByHash(ctx context.Context, in *BlockByHashRequest, opts ...grpc.CallOption) (BlockAPIService_BlockByHashClient, error) + // BlockByHeight returns a block by its height. + BlockByHeight(ctx context.Context, in *BlockByHeightRequest, opts ...grpc.CallOption) (BlockAPIService_BlockByHeightClient, error) + // Commit returns the commit of a block. + Commit(ctx context.Context, in *CommitRequest, opts ...grpc.CallOption) (*CommitResponse, error) + // ValidatorSet returns the validator set of a block. + ValidatorSet(ctx context.Context, in *ValidatorSetRequest, opts ...grpc.CallOption) (*ValidatorSetResponse, error) + // SubscribeNewHeights subscribes to new heights. + SubscribeNewHeights(ctx context.Context, in *SubscribeNewHeightsRequest, opts ...grpc.CallOption) (BlockAPIService_SubscribeNewHeightsClient, error) + // Status returns the status of the node. + Status(ctx context.Context, in *StatusRequest, opts ...grpc.CallOption) (*StatusResponse, error) +} + +type blockAPIServiceClient struct { + cc grpc1.ClientConn +} + +func NewBlockAPIServiceClient(cc grpc1.ClientConn) BlockAPIServiceClient { + return &blockAPIServiceClient{cc} +} + +func (c *blockAPIServiceClient) BlockByHash(ctx context.Context, in *BlockByHashRequest, opts ...grpc.CallOption) (BlockAPIService_BlockByHashClient, error) { + stream, err := c.cc.NewStream(ctx, &_BlockAPIService_serviceDesc.Streams[0], "/tendermint.rpc.grpc.BlockAPIService/BlockByHash", opts...) + if err != nil { + return nil, err + } + x := &blockAPIServiceBlockByHashClient{stream} + if err := x.ClientStream.SendMsg(in); err != nil { + return nil, err + } + if err := x.ClientStream.CloseSend(); err != nil { + return nil, err + } + return x, nil +} + +type BlockAPIService_BlockByHashClient interface { + Recv() (*BlockByHashResponse, error) + grpc.ClientStream +} + +type blockAPIServiceBlockByHashClient struct { + grpc.ClientStream +} + +func (x *blockAPIServiceBlockByHashClient) Recv() (*BlockByHashResponse, error) { + m := new(BlockByHashResponse) + if err := x.ClientStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +func (c *blockAPIServiceClient) BlockByHeight(ctx context.Context, in *BlockByHeightRequest, opts ...grpc.CallOption) (BlockAPIService_BlockByHeightClient, error) { + stream, err := c.cc.NewStream(ctx, &_BlockAPIService_serviceDesc.Streams[1], "/tendermint.rpc.grpc.BlockAPIService/BlockByHeight", opts...) + if err != nil { + return nil, err + } + x := &blockAPIServiceBlockByHeightClient{stream} + if err := x.ClientStream.SendMsg(in); err != nil { + return nil, err + } + if err := x.ClientStream.CloseSend(); err != nil { + return nil, err + } + return x, nil +} + +type BlockAPIService_BlockByHeightClient interface { + Recv() (*BlockByHeightResponse, error) + grpc.ClientStream +} + +type blockAPIServiceBlockByHeightClient struct { + grpc.ClientStream +} + +func (x *blockAPIServiceBlockByHeightClient) Recv() (*BlockByHeightResponse, error) { + m := new(BlockByHeightResponse) + if err := x.ClientStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +func (c *blockAPIServiceClient) Commit(ctx context.Context, in *CommitRequest, opts ...grpc.CallOption) (*CommitResponse, error) { + out := new(CommitResponse) + err := c.cc.Invoke(ctx, "/tendermint.rpc.grpc.BlockAPIService/Commit", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *blockAPIServiceClient) ValidatorSet(ctx context.Context, in *ValidatorSetRequest, opts ...grpc.CallOption) (*ValidatorSetResponse, error) { + out := new(ValidatorSetResponse) + err := c.cc.Invoke(ctx, "/tendermint.rpc.grpc.BlockAPIService/ValidatorSet", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *blockAPIServiceClient) SubscribeNewHeights(ctx context.Context, in *SubscribeNewHeightsRequest, opts ...grpc.CallOption) (BlockAPIService_SubscribeNewHeightsClient, error) { + stream, err := c.cc.NewStream(ctx, &_BlockAPIService_serviceDesc.Streams[2], "/tendermint.rpc.grpc.BlockAPIService/SubscribeNewHeights", opts...) + if err != nil { + return nil, err + } + x := &blockAPIServiceSubscribeNewHeightsClient{stream} + if err := x.ClientStream.SendMsg(in); err != nil { + return nil, err + } + if err := x.ClientStream.CloseSend(); err != nil { + return nil, err + } + return x, nil +} + +type BlockAPIService_SubscribeNewHeightsClient interface { + Recv() (*SubscribeNewHeightsResponse, error) + grpc.ClientStream +} + +type blockAPIServiceSubscribeNewHeightsClient struct { + grpc.ClientStream +} + +func (x *blockAPIServiceSubscribeNewHeightsClient) Recv() (*SubscribeNewHeightsResponse, error) { + m := new(SubscribeNewHeightsResponse) + if err := x.ClientStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +func (c *blockAPIServiceClient) Status(ctx context.Context, in *StatusRequest, opts ...grpc.CallOption) (*StatusResponse, error) { + out := new(StatusResponse) + err := c.cc.Invoke(ctx, "/tendermint.rpc.grpc.BlockAPIService/Status", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +// BlockAPIServiceServer is the server API for BlockAPIService service. +type BlockAPIServiceServer interface { + // BlockByHash returns a block by its hash. + BlockByHash(*BlockByHashRequest, BlockAPIService_BlockByHashServer) error + // BlockByHeight returns a block by its height. + BlockByHeight(*BlockByHeightRequest, BlockAPIService_BlockByHeightServer) error + // Commit returns the commit of a block. + Commit(context.Context, *CommitRequest) (*CommitResponse, error) + // ValidatorSet returns the validator set of a block. + ValidatorSet(context.Context, *ValidatorSetRequest) (*ValidatorSetResponse, error) + // SubscribeNewHeights subscribes to new heights. + SubscribeNewHeights(*SubscribeNewHeightsRequest, BlockAPIService_SubscribeNewHeightsServer) error + // Status returns the status of the node. + Status(context.Context, *StatusRequest) (*StatusResponse, error) +} + +// UnimplementedBlockAPIServiceServer can be embedded to have forward compatible implementations. +type UnimplementedBlockAPIServiceServer struct { +} + +func (*UnimplementedBlockAPIServiceServer) BlockByHash(req *BlockByHashRequest, srv BlockAPIService_BlockByHashServer) error { + return status.Errorf(codes.Unimplemented, "method BlockByHash not implemented") +} +func (*UnimplementedBlockAPIServiceServer) BlockByHeight(req *BlockByHeightRequest, srv BlockAPIService_BlockByHeightServer) error { + return status.Errorf(codes.Unimplemented, "method BlockByHeight not implemented") +} +func (*UnimplementedBlockAPIServiceServer) Commit(ctx context.Context, req *CommitRequest) (*CommitResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method Commit not implemented") +} +func (*UnimplementedBlockAPIServiceServer) ValidatorSet(ctx context.Context, req *ValidatorSetRequest) (*ValidatorSetResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method ValidatorSet not implemented") +} +func (*UnimplementedBlockAPIServiceServer) SubscribeNewHeights(req *SubscribeNewHeightsRequest, srv BlockAPIService_SubscribeNewHeightsServer) error { + return status.Errorf(codes.Unimplemented, "method SubscribeNewHeights not implemented") +} +func (*UnimplementedBlockAPIServiceServer) Status(ctx context.Context, req *StatusRequest) (*StatusResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method Status not implemented") +} + +func RegisterBlockAPIServiceServer(s grpc1.Server, srv BlockAPIServiceServer) { + s.RegisterService(&_BlockAPIService_serviceDesc, srv) +} + +func _BlockAPIService_BlockByHash_Handler(srv interface{}, stream grpc.ServerStream) error { + m := new(BlockByHashRequest) + if err := stream.RecvMsg(m); err != nil { + return err + } + return srv.(BlockAPIServiceServer).BlockByHash(m, &blockAPIServiceBlockByHashServer{stream}) +} + +type BlockAPIService_BlockByHashServer interface { + Send(*BlockByHashResponse) error + grpc.ServerStream +} + +type blockAPIServiceBlockByHashServer struct { + grpc.ServerStream +} + +func (x *blockAPIServiceBlockByHashServer) Send(m *BlockByHashResponse) error { + return x.ServerStream.SendMsg(m) +} + +func _BlockAPIService_BlockByHeight_Handler(srv interface{}, stream grpc.ServerStream) error { + m := new(BlockByHeightRequest) + if err := stream.RecvMsg(m); err != nil { + return err + } + return srv.(BlockAPIServiceServer).BlockByHeight(m, &blockAPIServiceBlockByHeightServer{stream}) +} + +type BlockAPIService_BlockByHeightServer interface { + Send(*BlockByHeightResponse) error + grpc.ServerStream +} + +type blockAPIServiceBlockByHeightServer struct { + grpc.ServerStream +} + +func (x *blockAPIServiceBlockByHeightServer) Send(m *BlockByHeightResponse) error { + return x.ServerStream.SendMsg(m) +} + +func _BlockAPIService_Commit_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(CommitRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(BlockAPIServiceServer).Commit(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/tendermint.rpc.grpc.BlockAPIService/Commit", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(BlockAPIServiceServer).Commit(ctx, req.(*CommitRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _BlockAPIService_ValidatorSet_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ValidatorSetRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(BlockAPIServiceServer).ValidatorSet(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/tendermint.rpc.grpc.BlockAPIService/ValidatorSet", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(BlockAPIServiceServer).ValidatorSet(ctx, req.(*ValidatorSetRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _BlockAPIService_SubscribeNewHeights_Handler(srv interface{}, stream grpc.ServerStream) error { + m := new(SubscribeNewHeightsRequest) + if err := stream.RecvMsg(m); err != nil { + return err + } + return srv.(BlockAPIServiceServer).SubscribeNewHeights(m, &blockAPIServiceSubscribeNewHeightsServer{stream}) +} + +type BlockAPIService_SubscribeNewHeightsServer interface { + Send(*SubscribeNewHeightsResponse) error + grpc.ServerStream +} + +type blockAPIServiceSubscribeNewHeightsServer struct { + grpc.ServerStream +} + +func (x *blockAPIServiceSubscribeNewHeightsServer) Send(m *SubscribeNewHeightsResponse) error { + return x.ServerStream.SendMsg(m) +} + +func _BlockAPIService_Status_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(StatusRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(BlockAPIServiceServer).Status(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/tendermint.rpc.grpc.BlockAPIService/Status", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(BlockAPIServiceServer).Status(ctx, req.(*StatusRequest)) + } + return interceptor(ctx, in, info, handler) +} + +var _BlockAPIService_serviceDesc = grpc.ServiceDesc{ + ServiceName: "tendermint.rpc.grpc.BlockAPIService", + HandlerType: (*BlockAPIServiceServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "Commit", + Handler: _BlockAPIService_Commit_Handler, + }, + { + MethodName: "ValidatorSet", + Handler: _BlockAPIService_ValidatorSet_Handler, + }, + { + MethodName: "Status", + Handler: _BlockAPIService_Status_Handler, + }, + }, + Streams: []grpc.StreamDesc{ + { + StreamName: "BlockByHash", + Handler: _BlockAPIService_BlockByHash_Handler, + ServerStreams: true, + }, + { + StreamName: "BlockByHeight", + Handler: _BlockAPIService_BlockByHeight_Handler, + ServerStreams: true, + }, + { + StreamName: "SubscribeNewHeights", + Handler: _BlockAPIService_SubscribeNewHeights_Handler, + ServerStreams: true, + }, + }, + Metadata: "tendermint/rpc/grpc/types.proto", +} + +// BlobstreamAPIClient is the client API for BlobstreamAPI service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. +type BlobstreamAPIClient interface { + // DataRootInclusionProof creates an inclusion proof for the data root of block + // height `height` in the set of blocks defined by `start` and `end`. The range + // is end exclusive. + DataRootInclusionProof(ctx context.Context, in *DataRootInclusionProofRequest, opts ...grpc.CallOption) (*DataRootInclusionProofResponse, error) +} + +type blobstreamAPIClient struct { + cc grpc1.ClientConn +} + +func NewBlobstreamAPIClient(cc grpc1.ClientConn) BlobstreamAPIClient { + return &blobstreamAPIClient{cc} +} + +func (c *blobstreamAPIClient) DataRootInclusionProof(ctx context.Context, in *DataRootInclusionProofRequest, opts ...grpc.CallOption) (*DataRootInclusionProofResponse, error) { + out := new(DataRootInclusionProofResponse) + err := c.cc.Invoke(ctx, "/tendermint.rpc.grpc.BlobstreamAPI/DataRootInclusionProof", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +// BlobstreamAPIServer is the server API for BlobstreamAPI service. +type BlobstreamAPIServer interface { + // DataRootInclusionProof creates an inclusion proof for the data root of block + // height `height` in the set of blocks defined by `start` and `end`. The range + // is end exclusive. + DataRootInclusionProof(context.Context, *DataRootInclusionProofRequest) (*DataRootInclusionProofResponse, error) +} + +// UnimplementedBlobstreamAPIServer can be embedded to have forward compatible implementations. +type UnimplementedBlobstreamAPIServer struct { +} + +func (*UnimplementedBlobstreamAPIServer) DataRootInclusionProof(ctx context.Context, req *DataRootInclusionProofRequest) (*DataRootInclusionProofResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method DataRootInclusionProof not implemented") +} + +func RegisterBlobstreamAPIServer(s grpc1.Server, srv BlobstreamAPIServer) { + s.RegisterService(&_BlobstreamAPI_serviceDesc, srv) +} + +func _BlobstreamAPI_DataRootInclusionProof_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(DataRootInclusionProofRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(BlobstreamAPIServer).DataRootInclusionProof(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/tendermint.rpc.grpc.BlobstreamAPI/DataRootInclusionProof", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(BlobstreamAPIServer).DataRootInclusionProof(ctx, req.(*DataRootInclusionProofRequest)) + } + return interceptor(ctx, in, info, handler) +} + +var _BlobstreamAPI_serviceDesc = grpc.ServiceDesc{ + ServiceName: "tendermint.rpc.grpc.BlobstreamAPI", + HandlerType: (*BlobstreamAPIServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "DataRootInclusionProof", + Handler: _BlobstreamAPI_DataRootInclusionProof_Handler, + }, + }, + Streams: []grpc.StreamDesc{}, + Metadata: "tendermint/rpc/grpc/types.proto", +} + +func (m *RequestPing) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *RequestPing) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *RequestPing) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + return len(dAtA) - i, nil +} + +func (m *RequestBroadcastTx) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *RequestBroadcastTx) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *RequestBroadcastTx) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Tx) > 0 { + i -= len(m.Tx) + copy(dAtA[i:], m.Tx) + i = encodeVarintTypes(dAtA, i, uint64(len(m.Tx))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *ResponsePing) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ResponsePing) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ResponsePing) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + return len(dAtA) - i, nil +} + +func (m *ResponseBroadcastTx) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ResponseBroadcastTx) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ResponseBroadcastTx) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.TxResult != nil { + { + size, err := m.TxResult.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTypes(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + if m.CheckTx != nil { + { + size, err := m.CheckTx.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTypes(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *BlockByHashRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *BlockByHashRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *BlockByHashRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Prove { + i-- + if m.Prove { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x10 + } + if len(m.Hash) > 0 { + i -= len(m.Hash) + copy(dAtA[i:], m.Hash) + i = encodeVarintTypes(dAtA, i, uint64(len(m.Hash))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *BlockByHeightRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *BlockByHeightRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *BlockByHeightRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Prove { + i-- + if m.Prove { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x10 + } + if m.Height != 0 { + i = encodeVarintTypes(dAtA, i, uint64(m.Height)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *CommitRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *CommitRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *CommitRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Height != 0 { + i = encodeVarintTypes(dAtA, i, uint64(m.Height)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *ValidatorSetRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ValidatorSetRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ValidatorSetRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Height != 0 { + i = encodeVarintTypes(dAtA, i, uint64(m.Height)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *SubscribeNewHeightsRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *SubscribeNewHeightsRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *SubscribeNewHeightsRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + return len(dAtA) - i, nil +} + +func (m *StatusRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *StatusRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *StatusRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + return len(dAtA) - i, nil +} + +func (m *BlockByHashResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *BlockByHashResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *BlockByHashResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.IsLast { + i-- + if m.IsLast { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x20 + } + if m.ValidatorSet != nil { + { + size, err := m.ValidatorSet.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTypes(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + if m.Commit != nil { + { + size, err := m.Commit.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTypes(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + if m.BlockPart != nil { + { + size, err := m.BlockPart.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTypes(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *BlockByHeightResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *BlockByHeightResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *BlockByHeightResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.IsLast { + i-- + if m.IsLast { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x20 + } + if m.ValidatorSet != nil { + { + size, err := m.ValidatorSet.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTypes(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + if m.Commit != nil { + { + size, err := m.Commit.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTypes(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + if m.BlockPart != nil { + { + size, err := m.BlockPart.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTypes(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *CommitResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *CommitResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *CommitResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Commit != nil { + { + size, err := m.Commit.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTypes(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *ValidatorSetResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ValidatorSetResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ValidatorSetResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Height != 0 { + i = encodeVarintTypes(dAtA, i, uint64(m.Height)) + i-- + dAtA[i] = 0x10 + } + if m.ValidatorSet != nil { + { + size, err := m.ValidatorSet.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTypes(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *SubscribeNewHeightsResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *SubscribeNewHeightsResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *SubscribeNewHeightsResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Hash) > 0 { + i -= len(m.Hash) + copy(dAtA[i:], m.Hash) + i = encodeVarintTypes(dAtA, i, uint64(len(m.Hash))) + i-- + dAtA[i] = 0x12 + } + if m.Height != 0 { + i = encodeVarintTypes(dAtA, i, uint64(m.Height)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *StatusResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *StatusResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *StatusResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.ValidatorInfo != nil { + { + size, err := m.ValidatorInfo.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTypes(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + if m.SyncInfo != nil { + { + size, err := m.SyncInfo.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTypes(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + if m.NodeInfo != nil { + { + size, err := m.NodeInfo.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTypes(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *SyncInfo) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *SyncInfo) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *SyncInfo) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.CatchingUp { + i-- + if m.CatchingUp { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x48 + } + n14, err14 := github_com_cosmos_gogoproto_types.StdTimeMarshalTo(m.EarliestBlockTime, dAtA[i-github_com_cosmos_gogoproto_types.SizeOfStdTime(m.EarliestBlockTime):]) + if err14 != nil { + return 0, err14 + } + i -= n14 + i = encodeVarintTypes(dAtA, i, uint64(n14)) + i-- + dAtA[i] = 0x42 + if m.EarliestBlockHeight != 0 { + i = encodeVarintTypes(dAtA, i, uint64(m.EarliestBlockHeight)) + i-- + dAtA[i] = 0x38 + } + if len(m.EarliestAppHash) > 0 { + i -= len(m.EarliestAppHash) + copy(dAtA[i:], m.EarliestAppHash) + i = encodeVarintTypes(dAtA, i, uint64(len(m.EarliestAppHash))) + i-- + dAtA[i] = 0x32 + } + if len(m.EarliestBlockHash) > 0 { + i -= len(m.EarliestBlockHash) + copy(dAtA[i:], m.EarliestBlockHash) + i = encodeVarintTypes(dAtA, i, uint64(len(m.EarliestBlockHash))) + i-- + dAtA[i] = 0x2a + } + n15, err15 := github_com_cosmos_gogoproto_types.StdTimeMarshalTo(m.LatestBlockTime, dAtA[i-github_com_cosmos_gogoproto_types.SizeOfStdTime(m.LatestBlockTime):]) + if err15 != nil { + return 0, err15 + } + i -= n15 + i = encodeVarintTypes(dAtA, i, uint64(n15)) + i-- + dAtA[i] = 0x22 + if m.LatestBlockHeight != 0 { + i = encodeVarintTypes(dAtA, i, uint64(m.LatestBlockHeight)) + i-- + dAtA[i] = 0x18 + } + if len(m.LatestAppHash) > 0 { + i -= len(m.LatestAppHash) + copy(dAtA[i:], m.LatestAppHash) + i = encodeVarintTypes(dAtA, i, uint64(len(m.LatestAppHash))) + i-- + dAtA[i] = 0x12 + } + if len(m.LatestBlockHash) > 0 { + i -= len(m.LatestBlockHash) + copy(dAtA[i:], m.LatestBlockHash) + i = encodeVarintTypes(dAtA, i, uint64(len(m.LatestBlockHash))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *ValidatorInfo) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ValidatorInfo) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ValidatorInfo) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.VotingPower != 0 { + i = encodeVarintTypes(dAtA, i, uint64(m.VotingPower)) + i-- + dAtA[i] = 0x18 + } + if m.PubKey != nil { + { + size, err := m.PubKey.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTypes(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + if len(m.Address) > 0 { + i -= len(m.Address) + copy(dAtA[i:], m.Address) + i = encodeVarintTypes(dAtA, i, uint64(len(m.Address))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *DataRootInclusionProofRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *DataRootInclusionProofRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *DataRootInclusionProofRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.End != 0 { + i = encodeVarintTypes(dAtA, i, uint64(m.End)) + i-- + dAtA[i] = 0x18 + } + if m.Start != 0 { + i = encodeVarintTypes(dAtA, i, uint64(m.Start)) + i-- + dAtA[i] = 0x10 + } + if m.Height != 0 { + i = encodeVarintTypes(dAtA, i, uint64(m.Height)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *DataRootInclusionProofResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *DataRootInclusionProofResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *DataRootInclusionProofResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size, err := m.Proof.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTypes(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func encodeVarintTypes(dAtA []byte, offset int, v uint64) int { + offset -= sovTypes(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func (m *RequestPing) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + return n +} + +func (m *RequestBroadcastTx) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Tx) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) + } + return n +} + +func (m *ResponsePing) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + return n +} + +func (m *ResponseBroadcastTx) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.CheckTx != nil { + l = m.CheckTx.Size() + n += 1 + l + sovTypes(uint64(l)) + } + if m.TxResult != nil { + l = m.TxResult.Size() + n += 1 + l + sovTypes(uint64(l)) + } + return n +} + +func (m *BlockByHashRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Hash) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) + } + if m.Prove { + n += 2 + } + return n +} + +func (m *BlockByHeightRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Height != 0 { + n += 1 + sovTypes(uint64(m.Height)) + } + if m.Prove { + n += 2 + } + return n +} + +func (m *CommitRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Height != 0 { + n += 1 + sovTypes(uint64(m.Height)) + } + return n +} + +func (m *ValidatorSetRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Height != 0 { + n += 1 + sovTypes(uint64(m.Height)) + } + return n +} + +func (m *SubscribeNewHeightsRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + return n +} + +func (m *StatusRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + return n +} + +func (m *BlockByHashResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.BlockPart != nil { + l = m.BlockPart.Size() + n += 1 + l + sovTypes(uint64(l)) + } + if m.Commit != nil { + l = m.Commit.Size() + n += 1 + l + sovTypes(uint64(l)) + } + if m.ValidatorSet != nil { + l = m.ValidatorSet.Size() + n += 1 + l + sovTypes(uint64(l)) + } + if m.IsLast { + n += 2 + } + return n +} + +func (m *BlockByHeightResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.BlockPart != nil { + l = m.BlockPart.Size() + n += 1 + l + sovTypes(uint64(l)) + } + if m.Commit != nil { + l = m.Commit.Size() + n += 1 + l + sovTypes(uint64(l)) + } + if m.ValidatorSet != nil { + l = m.ValidatorSet.Size() + n += 1 + l + sovTypes(uint64(l)) + } + if m.IsLast { + n += 2 + } + return n +} + +func (m *CommitResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Commit != nil { + l = m.Commit.Size() + n += 1 + l + sovTypes(uint64(l)) + } + return n +} + +func (m *ValidatorSetResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.ValidatorSet != nil { + l = m.ValidatorSet.Size() + n += 1 + l + sovTypes(uint64(l)) + } + if m.Height != 0 { + n += 1 + sovTypes(uint64(m.Height)) + } + return n +} + +func (m *SubscribeNewHeightsResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Height != 0 { + n += 1 + sovTypes(uint64(m.Height)) + } + l = len(m.Hash) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) + } + return n +} + +func (m *StatusResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.NodeInfo != nil { + l = m.NodeInfo.Size() + n += 1 + l + sovTypes(uint64(l)) + } + if m.SyncInfo != nil { + l = m.SyncInfo.Size() + n += 1 + l + sovTypes(uint64(l)) + } + if m.ValidatorInfo != nil { + l = m.ValidatorInfo.Size() + n += 1 + l + sovTypes(uint64(l)) + } + return n +} + +func (m *SyncInfo) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.LatestBlockHash) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) + } + l = len(m.LatestAppHash) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) + } + if m.LatestBlockHeight != 0 { + n += 1 + sovTypes(uint64(m.LatestBlockHeight)) + } + l = github_com_cosmos_gogoproto_types.SizeOfStdTime(m.LatestBlockTime) + n += 1 + l + sovTypes(uint64(l)) + l = len(m.EarliestBlockHash) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) + } + l = len(m.EarliestAppHash) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) + } + if m.EarliestBlockHeight != 0 { + n += 1 + sovTypes(uint64(m.EarliestBlockHeight)) + } + l = github_com_cosmos_gogoproto_types.SizeOfStdTime(m.EarliestBlockTime) + n += 1 + l + sovTypes(uint64(l)) + if m.CatchingUp { + n += 2 + } + return n +} + +func (m *ValidatorInfo) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Address) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) + } + if m.PubKey != nil { + l = m.PubKey.Size() + n += 1 + l + sovTypes(uint64(l)) + } + if m.VotingPower != 0 { + n += 1 + sovTypes(uint64(m.VotingPower)) + } + return n +} + +func (m *DataRootInclusionProofRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Height != 0 { + n += 1 + sovTypes(uint64(m.Height)) + } + if m.Start != 0 { + n += 1 + sovTypes(uint64(m.Start)) + } + if m.End != 0 { + n += 1 + sovTypes(uint64(m.End)) + } + return n +} + +func (m *DataRootInclusionProofResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.Proof.Size() + n += 1 + l + sovTypes(uint64(l)) + return n +} + +func sovTypes(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozTypes(x uint64) (n int) { + return sovTypes(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (m *RequestPing) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: RequestPing: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: RequestPing: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + default: + iNdEx = preIndex + skippy, err := skipTypes(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *RequestBroadcastTx) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: RequestBroadcastTx: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: RequestBroadcastTx: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Tx", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Tx = append(m.Tx[:0], dAtA[iNdEx:postIndex]...) + if m.Tx == nil { + m.Tx = []byte{} + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipTypes(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ResponsePing) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ResponsePing: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ResponsePing: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + default: + iNdEx = preIndex + skippy, err := skipTypes(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ResponseBroadcastTx) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ResponseBroadcastTx: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ResponseBroadcastTx: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field CheckTx", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.CheckTx == nil { + m.CheckTx = &types.ResponseCheckTx{} + } + if err := m.CheckTx.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field TxResult", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.TxResult == nil { + m.TxResult = &types.ExecTxResult{} + } + if err := m.TxResult.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipTypes(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *BlockByHashRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: BlockByHashRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: BlockByHashRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Hash", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Hash = append(m.Hash[:0], dAtA[iNdEx:postIndex]...) + if m.Hash == nil { + m.Hash = []byte{} + } + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Prove", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.Prove = bool(v != 0) + default: + iNdEx = preIndex + skippy, err := skipTypes(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *BlockByHeightRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: BlockByHeightRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: BlockByHeightRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Height", wireType) + } + m.Height = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Height |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Prove", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.Prove = bool(v != 0) + default: + iNdEx = preIndex + skippy, err := skipTypes(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *CommitRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: CommitRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: CommitRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Height", wireType) + } + m.Height = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Height |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipTypes(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ValidatorSetRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ValidatorSetRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ValidatorSetRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Height", wireType) + } + m.Height = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Height |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipTypes(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *SubscribeNewHeightsRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: SubscribeNewHeightsRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: SubscribeNewHeightsRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + default: + iNdEx = preIndex + skippy, err := skipTypes(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *StatusRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: StatusRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: StatusRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + default: + iNdEx = preIndex + skippy, err := skipTypes(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *BlockByHashResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: BlockByHashResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: BlockByHashResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field BlockPart", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.BlockPart == nil { + m.BlockPart = &types1.Part{} + } + if err := m.BlockPart.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Commit", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Commit == nil { + m.Commit = &types1.Commit{} + } + if err := m.Commit.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ValidatorSet", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.ValidatorSet == nil { + m.ValidatorSet = &types1.ValidatorSet{} + } + if err := m.ValidatorSet.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field IsLast", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.IsLast = bool(v != 0) + default: + iNdEx = preIndex + skippy, err := skipTypes(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *BlockByHeightResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: BlockByHeightResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: BlockByHeightResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field BlockPart", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.BlockPart == nil { + m.BlockPart = &types1.Part{} + } + if err := m.BlockPart.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Commit", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Commit == nil { + m.Commit = &types1.Commit{} + } + if err := m.Commit.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ValidatorSet", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.ValidatorSet == nil { + m.ValidatorSet = &types1.ValidatorSet{} + } + if err := m.ValidatorSet.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field IsLast", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.IsLast = bool(v != 0) + default: + iNdEx = preIndex + skippy, err := skipTypes(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *CommitResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: CommitResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: CommitResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Commit", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Commit == nil { + m.Commit = &types1.Commit{} + } + if err := m.Commit.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipTypes(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ValidatorSetResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ValidatorSetResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ValidatorSetResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ValidatorSet", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.ValidatorSet == nil { + m.ValidatorSet = &types1.ValidatorSet{} + } + if err := m.ValidatorSet.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Height", wireType) + } + m.Height = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Height |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipTypes(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *SubscribeNewHeightsResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: SubscribeNewHeightsResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: SubscribeNewHeightsResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Height", wireType) + } + m.Height = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Height |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Hash", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Hash = append(m.Hash[:0], dAtA[iNdEx:postIndex]...) + if m.Hash == nil { + m.Hash = []byte{} + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipTypes(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *StatusResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: StatusResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: StatusResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field NodeInfo", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.NodeInfo == nil { + m.NodeInfo = &p2p.DefaultNodeInfo{} + } + if err := m.NodeInfo.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SyncInfo", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.SyncInfo == nil { + m.SyncInfo = &SyncInfo{} + } + if err := m.SyncInfo.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ValidatorInfo", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.ValidatorInfo == nil { + m.ValidatorInfo = &ValidatorInfo{} + } + if err := m.ValidatorInfo.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipTypes(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *SyncInfo) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: SyncInfo: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: SyncInfo: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field LatestBlockHash", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.LatestBlockHash = append(m.LatestBlockHash[:0], dAtA[iNdEx:postIndex]...) + if m.LatestBlockHash == nil { + m.LatestBlockHash = []byte{} + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field LatestAppHash", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.LatestAppHash = append(m.LatestAppHash[:0], dAtA[iNdEx:postIndex]...) + if m.LatestAppHash == nil { + m.LatestAppHash = []byte{} + } + iNdEx = postIndex + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field LatestBlockHeight", wireType) + } + m.LatestBlockHeight = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.LatestBlockHeight |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field LatestBlockTime", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := github_com_cosmos_gogoproto_types.StdTimeUnmarshal(&m.LatestBlockTime, dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field EarliestBlockHash", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthTypes } - if iNdEx >= l { + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { return io.ErrUnexpectedEOF } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break + m.EarliestBlockHash = append(m.EarliestBlockHash[:0], dAtA[iNdEx:postIndex]...) + if m.EarliestBlockHash == nil { + m.EarliestBlockHash = []byte{} } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: RequestPing: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: RequestPing: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field EarliestAppHash", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.EarliestAppHash = append(m.EarliestAppHash[:0], dAtA[iNdEx:postIndex]...) + if m.EarliestAppHash == nil { + m.EarliestAppHash = []byte{} + } + iNdEx = postIndex + case 7: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field EarliestBlockHeight", wireType) + } + m.EarliestBlockHeight = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.EarliestBlockHeight |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 8: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field EarliestBlockTime", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := github_com_cosmos_gogoproto_types.StdTimeUnmarshal(&m.EarliestBlockTime, dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 9: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field CatchingUp", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.CatchingUp = bool(v != 0) default: iNdEx = preIndex skippy, err := skipTypes(dAtA[iNdEx:]) @@ -585,7 +4776,7 @@ func (m *RequestPing) Unmarshal(dAtA []byte) error { } return nil } -func (m *RequestBroadcastTx) Unmarshal(dAtA []byte) error { +func (m *ValidatorInfo) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -608,15 +4799,15 @@ func (m *RequestBroadcastTx) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: RequestBroadcastTx: wiretype end group for non-group") + return fmt.Errorf("proto: ValidatorInfo: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: RequestBroadcastTx: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: ValidatorInfo: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Tx", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Address", wireType) } var byteLen int for shift := uint(0); ; shift += 7 { @@ -643,11 +4834,66 @@ func (m *RequestBroadcastTx) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Tx = append(m.Tx[:0], dAtA[iNdEx:postIndex]...) - if m.Tx == nil { - m.Tx = []byte{} + m.Address = append(m.Address[:0], dAtA[iNdEx:postIndex]...) + if m.Address == nil { + m.Address = []byte{} + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PubKey", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.PubKey == nil { + m.PubKey = &crypto.PublicKey{} + } + if err := m.PubKey.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err } iNdEx = postIndex + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field VotingPower", wireType) + } + m.VotingPower = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.VotingPower |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } default: iNdEx = preIndex skippy, err := skipTypes(dAtA[iNdEx:]) @@ -669,7 +4915,7 @@ func (m *RequestBroadcastTx) Unmarshal(dAtA []byte) error { } return nil } -func (m *ResponsePing) Unmarshal(dAtA []byte) error { +func (m *DataRootInclusionProofRequest) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -692,12 +4938,69 @@ func (m *ResponsePing) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: ResponsePing: wiretype end group for non-group") + return fmt.Errorf("proto: DataRootInclusionProofRequest: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: ResponsePing: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: DataRootInclusionProofRequest: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Height", wireType) + } + m.Height = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Height |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Start", wireType) + } + m.Start = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Start |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field End", wireType) + } + m.End = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.End |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } default: iNdEx = preIndex skippy, err := skipTypes(dAtA[iNdEx:]) @@ -719,7 +5022,7 @@ func (m *ResponsePing) Unmarshal(dAtA []byte) error { } return nil } -func (m *ResponseBroadcastTx) Unmarshal(dAtA []byte) error { +func (m *DataRootInclusionProofResponse) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -742,51 +5045,15 @@ func (m *ResponseBroadcastTx) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: ResponseBroadcastTx: wiretype end group for non-group") + return fmt.Errorf("proto: DataRootInclusionProofResponse: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: ResponseBroadcastTx: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: DataRootInclusionProofResponse: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field CheckTx", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTypes - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthTypes - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthTypes - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.CheckTx == nil { - m.CheckTx = &types.ResponseCheckTx{} - } - if err := m.CheckTx.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field TxResult", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Proof", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -813,10 +5080,7 @@ func (m *ResponseBroadcastTx) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if m.TxResult == nil { - m.TxResult = &types.ExecTxResult{} - } - if err := m.TxResult.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if err := m.Proof.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex diff --git a/proto/tendermint/rpc/grpc/types.proto b/proto/tendermint/rpc/grpc/types.proto index 68ff0cad71b..38f0d6d6bd0 100644 --- a/proto/tendermint/rpc/grpc/types.proto +++ b/proto/tendermint/rpc/grpc/types.proto @@ -3,6 +3,13 @@ package tendermint.rpc.grpc; option go_package = "github.com/cometbft/cometbft/rpc/grpc;coregrpc"; import "tendermint/abci/types.proto"; +import "tendermint/types/types.proto"; +import "tendermint/p2p/types.proto"; +import "tendermint/crypto/keys.proto"; +import "tendermint/crypto/proof.proto"; +import "tendermint/types/validator.proto"; +import "google/protobuf/timestamp.proto"; +import "gogoproto/gogo.proto"; //---------------------------------------- // Request types @@ -34,3 +41,155 @@ service BroadcastAPI { rpc Ping(RequestPing) returns (ResponsePing); rpc BroadcastTx(RequestBroadcastTx) returns (ResponseBroadcastTx); } + +// BlockAPI is an API for querying blocks. +service BlockAPIService { + // BlockByHash returns a block by its hash. + rpc BlockByHash(BlockByHashRequest) returns (stream BlockByHashResponse); + // BlockByHeight returns a block by its height. + rpc BlockByHeight(BlockByHeightRequest) returns (stream BlockByHeightResponse); + // Commit returns the commit of a block. + rpc Commit(CommitRequest) returns (CommitResponse); + // ValidatorSet returns the validator set of a block. + rpc ValidatorSet(ValidatorSetRequest) returns (ValidatorSetResponse); + // SubscribeNewHeights subscribes to new heights. + rpc SubscribeNewHeights(SubscribeNewHeightsRequest) returns (stream SubscribeNewHeightsResponse); + // Status returns the status of the node. + rpc Status(StatusRequest) returns (StatusResponse); +} + +service BlobstreamAPI { + // DataRootInclusionProof creates an inclusion proof for the data root of block + // height `height` in the set of blocks defined by `start` and `end`. The range + // is end exclusive. + rpc DataRootInclusionProof(DataRootInclusionProofRequest) returns (DataRootInclusionProofResponse); +} + + +// BlockByHashRequest is a request to get a block by its hash. +message BlockByHashRequest { + bytes hash = 1; + bool prove = 2; + } + + // BlockByHeightRequest is a request to get a block by its height. + message BlockByHeightRequest { + // Height the requested block height. + // If height is equal to 0, the latest height stored in the block store + // will be used. + int64 height = 1; + // Prove set to true to return the parts proofs. + bool prove = 2; + } + + // CommitRequest is a request to get the commit of a block. + message CommitRequest { + // Height the requested block commit height. + // If height is equal to 0, the latest height stored in the block store + // will be used. + int64 height = 1; + } + + // ValidatorSetRequest is a request to get the validator set of a block. + message ValidatorSetRequest { + // Height the requested validator set height. + // If height is equal to 0, the latest height stored in the block store + // will be used. + int64 height = 1; + } + + // SubscribeNewHeightsRequest is a request to subscribe to new heights. + message SubscribeNewHeightsRequest {} + + // StatusRequest is a request to get the status of the node. + message StatusRequest {} + + + // BlockByHashResponse is a response to a BlockByHashRequest. + message BlockByHashResponse { + tendermint.types.Part block_part = 1; + // Commit is only set in the first part, and + // it stays nil in the remaining ones. + tendermint.types.Commit commit = 2; + // ValidatorSet is only set in the first part, and + // it stays nil in the remaining ones. + tendermint.types.ValidatorSet validator_set = 3; + bool is_last = 4; + } + + // BlockByHeightResponse is a response to a BlockByHeightRequest. + message BlockByHeightResponse { + tendermint.types.Part block_part = 1; + // Commit is only set in the first part, and + // it stays nil in the remaining ones. + tendermint.types.Commit commit = 2; + // ValidatorSet is only set in the first part, and + // it stays nil in the remaining ones. + tendermint.types.ValidatorSet validator_set = 3; + bool is_last = 4; + } + + // CommitResponse is a response to a CommitRequest. + message CommitResponse { + tendermint.types.Commit commit = 1; + } + + // ValidatorSetResponse is a response to a ValidatorSetRequest. + message ValidatorSetResponse { + // ValidatorSet the requested validator set. + tendermint.types.ValidatorSet validator_set = 1; + // Height the height corresponding to the returned + // validator set. + int64 height = 2; + } + + // NewHeightEvent is an event that indicates a new height. + message SubscribeNewHeightsResponse { + int64 height = 1; + bytes hash = 2; + } + + // StatusResponse is a response to a StatusRequest. + message StatusResponse { + tendermint.p2p.DefaultNodeInfo node_info = 1; + SyncInfo sync_info = 2; + ValidatorInfo validator_info = 3; + } + + // SyncInfo is information about the node's sync status. + message SyncInfo { + bytes latest_block_hash = 1; + bytes latest_app_hash = 2; + int64 latest_block_height = 3; + google.protobuf.Timestamp latest_block_time = 4 + [(gogoproto.nullable) = false, (gogoproto.stdtime) = true]; + + bytes earliest_block_hash = 5; + bytes earliest_app_hash = 6; + int64 earliest_block_height = 7; + google.protobuf.Timestamp earliest_block_time = 8 + [(gogoproto.nullable) = false, (gogoproto.stdtime) = true]; + + bool catching_up = 9; + } + + // ValidatorInfo is information about a validator. + message ValidatorInfo { + bytes address = 1; + tendermint.crypto.PublicKey pub_key = 2; + int64 voting_power = 3; + } + + + message DataRootInclusionProofRequest { + // Height the height of block we want to prove. + int64 height = 1; + // Start the start of the data commitment range containing the block. + uint64 start = 2; + // End the end exclusive of the data commitment range containing the block. + uint64 end = 3; + } + + message DataRootInclusionProofResponse { + tendermint.crypto.Proof proof = 1 [(gogoproto.nullable) = false]; + } \ No newline at end of file diff --git a/proto/tendermint/store/types.pb.go b/proto/tendermint/store/types.pb.go index e7e553e0e2c..c36a141d626 100644 --- a/proto/tendermint/store/types.pb.go +++ b/proto/tendermint/store/types.pb.go @@ -74,25 +74,103 @@ func (m *BlockStoreState) GetHeight() int64 { return 0 } +// TxInfo describes the location of a tx inside a committed block +// as well as the result of executing the transaction and the error log output. +type TxInfo struct { + Height int64 `protobuf:"varint,1,opt,name=height,proto3" json:"height,omitempty"` + Index uint32 `protobuf:"varint,2,opt,name=index,proto3" json:"index,omitempty"` + // The response code of executing the tx. 0 means + // successfully executed, all others are error codes. + Code uint32 `protobuf:"varint,3,opt,name=code,proto3" json:"code,omitempty"` + // The error log output generated if the transaction execution fails. + Error string `protobuf:"bytes,4,opt,name=error,proto3" json:"error,omitempty"` +} + +func (m *TxInfo) Reset() { *m = TxInfo{} } +func (m *TxInfo) String() string { return proto.CompactTextString(m) } +func (*TxInfo) ProtoMessage() {} +func (*TxInfo) Descriptor() ([]byte, []int) { + return fileDescriptor_ff9e53a0a74267f7, []int{1} +} +func (m *TxInfo) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *TxInfo) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_TxInfo.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *TxInfo) XXX_Merge(src proto.Message) { + xxx_messageInfo_TxInfo.Merge(m, src) +} +func (m *TxInfo) XXX_Size() int { + return m.Size() +} +func (m *TxInfo) XXX_DiscardUnknown() { + xxx_messageInfo_TxInfo.DiscardUnknown(m) +} + +var xxx_messageInfo_TxInfo proto.InternalMessageInfo + +func (m *TxInfo) GetHeight() int64 { + if m != nil { + return m.Height + } + return 0 +} + +func (m *TxInfo) GetIndex() uint32 { + if m != nil { + return m.Index + } + return 0 +} + +func (m *TxInfo) GetCode() uint32 { + if m != nil { + return m.Code + } + return 0 +} + +func (m *TxInfo) GetError() string { + if m != nil { + return m.Error + } + return "" +} + func init() { proto.RegisterType((*BlockStoreState)(nil), "tendermint.store.BlockStoreState") + proto.RegisterType((*TxInfo)(nil), "tendermint.store.TxInfo") } func init() { proto.RegisterFile("tendermint/store/types.proto", fileDescriptor_ff9e53a0a74267f7) } var fileDescriptor_ff9e53a0a74267f7 = []byte{ - // 171 bytes of a gzipped FileDescriptorProto + // 228 bytes of a gzipped FileDescriptorProto 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x92, 0x29, 0x49, 0xcd, 0x4b, 0x49, 0x2d, 0xca, 0xcd, 0xcc, 0x2b, 0xd1, 0x2f, 0x2e, 0xc9, 0x2f, 0x4a, 0xd5, 0x2f, 0xa9, 0x2c, 0x48, 0x2d, 0xd6, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, 0x12, 0x40, 0xc8, 0xea, 0x81, 0x65, 0x95, 0x6c, 0xb9, 0xf8, 0x9d, 0x72, 0xf2, 0x93, 0xb3, 0x83, 0x41, 0xbc, 0xe0, 0x92, 0xc4, 0x92, 0x54, 0x21, 0x21, 0x2e, 0x96, 0xa4, 0xc4, 0xe2, 0x54, 0x09, 0x46, 0x05, 0x46, 0x0d, 0xe6, 0x20, 0x30, 0x5b, 0x48, 0x8c, 0x8b, 0x2d, 0x23, 0x35, 0x33, 0x3d, 0xa3, 0x44, 0x82, 0x09, 0x2c, 0x0a, 0xe5, - 0x39, 0xf9, 0x9e, 0x78, 0x24, 0xc7, 0x78, 0xe1, 0x91, 0x1c, 0xe3, 0x83, 0x47, 0x72, 0x8c, 0x13, - 0x1e, 0xcb, 0x31, 0x5c, 0x78, 0x2c, 0xc7, 0x70, 0xe3, 0xb1, 0x1c, 0x43, 0x94, 0x71, 0x7a, 0x66, - 0x49, 0x46, 0x69, 0x92, 0x5e, 0x72, 0x7e, 0xae, 0x7e, 0x72, 0x7e, 0x6e, 0x6a, 0x49, 0x52, 0x5a, - 0x09, 0x82, 0x01, 0x76, 0x8e, 0x3e, 0xba, 0x5b, 0x93, 0xd8, 0xc0, 0xe2, 0xc6, 0x80, 0x00, 0x00, - 0x00, 0xff, 0xff, 0xb7, 0x2b, 0x34, 0x2a, 0xc6, 0x00, 0x00, 0x00, + 0x29, 0x25, 0x70, 0xb1, 0x85, 0x54, 0x78, 0xe6, 0xa5, 0xe5, 0x23, 0xa9, 0x60, 0x44, 0x56, 0x21, + 0x24, 0xc2, 0xc5, 0x9a, 0x99, 0x97, 0x92, 0x5a, 0x01, 0xd6, 0xc8, 0x1b, 0x04, 0xe1, 0x80, 0xec, + 0x48, 0xce, 0x4f, 0x49, 0x95, 0x60, 0x06, 0x0b, 0x82, 0xd9, 0x20, 0x95, 0xa9, 0x45, 0x45, 0xf9, + 0x45, 0x12, 0x2c, 0x0a, 0x8c, 0x1a, 0x9c, 0x41, 0x10, 0x8e, 0x93, 0xef, 0x89, 0x47, 0x72, 0x8c, + 0x17, 0x1e, 0xc9, 0x31, 0x3e, 0x78, 0x24, 0xc7, 0x38, 0xe1, 0xb1, 0x1c, 0xc3, 0x85, 0xc7, 0x72, + 0x0c, 0x37, 0x1e, 0xcb, 0x31, 0x44, 0x19, 0xa7, 0x67, 0x96, 0x64, 0x94, 0x26, 0xe9, 0x25, 0xe7, + 0xe7, 0xea, 0x27, 0xe7, 0xe7, 0xa6, 0x96, 0x24, 0xa5, 0x95, 0x20, 0x18, 0x60, 0x0f, 0xeb, 0xa3, + 0x87, 0x46, 0x12, 0x1b, 0x58, 0xdc, 0x18, 0x10, 0x00, 0x00, 0xff, 0xff, 0xce, 0x99, 0xda, 0x87, + 0x28, 0x01, 0x00, 0x00, } func (m *BlockStoreState) Marshal() (dAtA []byte, err error) { @@ -128,6 +206,51 @@ func (m *BlockStoreState) MarshalToSizedBuffer(dAtA []byte) (int, error) { return len(dAtA) - i, nil } +func (m *TxInfo) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *TxInfo) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *TxInfo) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Error) > 0 { + i -= len(m.Error) + copy(dAtA[i:], m.Error) + i = encodeVarintTypes(dAtA, i, uint64(len(m.Error))) + i-- + dAtA[i] = 0x22 + } + if m.Code != 0 { + i = encodeVarintTypes(dAtA, i, uint64(m.Code)) + i-- + dAtA[i] = 0x18 + } + if m.Index != 0 { + i = encodeVarintTypes(dAtA, i, uint64(m.Index)) + i-- + dAtA[i] = 0x10 + } + if m.Height != 0 { + i = encodeVarintTypes(dAtA, i, uint64(m.Height)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + func encodeVarintTypes(dAtA []byte, offset int, v uint64) int { offset -= sovTypes(v) base := offset @@ -154,6 +277,28 @@ func (m *BlockStoreState) Size() (n int) { return n } +func (m *TxInfo) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Height != 0 { + n += 1 + sovTypes(uint64(m.Height)) + } + if m.Index != 0 { + n += 1 + sovTypes(uint64(m.Index)) + } + if m.Code != 0 { + n += 1 + sovTypes(uint64(m.Code)) + } + l = len(m.Error) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) + } + return n +} + func sovTypes(x uint64) (n int) { return (math_bits.Len64(x|1) + 6) / 7 } @@ -248,6 +393,145 @@ func (m *BlockStoreState) Unmarshal(dAtA []byte) error { } return nil } +func (m *TxInfo) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: TxInfo: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: TxInfo: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Height", wireType) + } + m.Height = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Height |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Index", wireType) + } + m.Index = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Index |= uint32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Code", wireType) + } + m.Code = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Code |= uint32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Error", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Error = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipTypes(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} func skipTypes(dAtA []byte) (n int, err error) { l := len(dAtA) iNdEx := 0 diff --git a/proto/tendermint/store/types.proto b/proto/tendermint/store/types.proto index b510169a4c0..891f1e70899 100644 --- a/proto/tendermint/store/types.proto +++ b/proto/tendermint/store/types.proto @@ -7,3 +7,16 @@ message BlockStoreState { int64 base = 1; int64 height = 2; } + + +// TxInfo describes the location of a tx inside a committed block +// as well as the result of executing the transaction and the error log output. +message TxInfo { + int64 height = 1; + uint32 index = 2; + // The response code of executing the tx. 0 means + // successfully executed, all others are error codes. + uint32 code = 3; + // The error log output generated if the transaction execution fails. + string error = 4; +} \ No newline at end of file diff --git a/proto/tendermint/types/types.pb.go b/proto/tendermint/types/types.pb.go index 2b2c819b4f4..4c3bd8af2a2 100644 --- a/proto/tendermint/types/types.pb.go +++ b/proto/tendermint/types/types.pb.go @@ -389,6 +389,12 @@ type Data struct { // NOTE: not all txs here are valid. We're just agreeing on the order first. // This means that block.AppHash does not include these txs. Txs [][]byte `protobuf:"bytes,1,rep,name=txs,proto3" json:"txs,omitempty"` + // SquareSize is the number of rows or columns in the original data square. + SquareSize uint64 `protobuf:"varint,5,opt,name=square_size,json=squareSize,proto3" json:"square_size,omitempty"` + // Hash is the root of a binary Merkle tree where the leaves of the tree are + // the row and column roots of an extended data square. Hash is often referred + // to as the "data root". + Hash []byte `protobuf:"bytes,6,opt,name=hash,proto3" json:"hash,omitempty"` } func (m *Data) Reset() { *m = Data{} } @@ -431,6 +437,92 @@ func (m *Data) GetTxs() [][]byte { return nil } +func (m *Data) GetSquareSize() uint64 { + if m != nil { + return m.SquareSize + } + return 0 +} + +func (m *Data) GetHash() []byte { + if m != nil { + return m.Hash + } + return nil +} + +// Blob (named after binary large object) is a chunk of data submitted by a user +// to be published to the Celestia blockchain. The data of a Blob is published +// to a namespace and is encoded into shares based on the format specified by +// share_version. +type Blob struct { + NamespaceId []byte `protobuf:"bytes,1,opt,name=namespace_id,json=namespaceId,proto3" json:"namespace_id,omitempty"` + Data []byte `protobuf:"bytes,2,opt,name=data,proto3" json:"data,omitempty"` + ShareVersion uint32 `protobuf:"varint,3,opt,name=share_version,json=shareVersion,proto3" json:"share_version,omitempty"` + NamespaceVersion uint32 `protobuf:"varint,4,opt,name=namespace_version,json=namespaceVersion,proto3" json:"namespace_version,omitempty"` +} + +func (m *Blob) Reset() { *m = Blob{} } +func (m *Blob) String() string { return proto.CompactTextString(m) } +func (*Blob) ProtoMessage() {} +func (*Blob) Descriptor() ([]byte, []int) { + return fileDescriptor_d3a6e55e2345de56, []int{5} +} +func (m *Blob) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Blob) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_Blob.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *Blob) XXX_Merge(src proto.Message) { + xxx_messageInfo_Blob.Merge(m, src) +} +func (m *Blob) XXX_Size() int { + return m.Size() +} +func (m *Blob) XXX_DiscardUnknown() { + xxx_messageInfo_Blob.DiscardUnknown(m) +} + +var xxx_messageInfo_Blob proto.InternalMessageInfo + +func (m *Blob) GetNamespaceId() []byte { + if m != nil { + return m.NamespaceId + } + return nil +} + +func (m *Blob) GetData() []byte { + if m != nil { + return m.Data + } + return nil +} + +func (m *Blob) GetShareVersion() uint32 { + if m != nil { + return m.ShareVersion + } + return 0 +} + +func (m *Blob) GetNamespaceVersion() uint32 { + if m != nil { + return m.NamespaceVersion + } + return 0 +} + // Vote represents a prevote or precommit vote from validators for // consensus. type Vote struct { @@ -457,7 +549,7 @@ func (m *Vote) Reset() { *m = Vote{} } func (m *Vote) String() string { return proto.CompactTextString(m) } func (*Vote) ProtoMessage() {} func (*Vote) Descriptor() ([]byte, []int) { - return fileDescriptor_d3a6e55e2345de56, []int{5} + return fileDescriptor_d3a6e55e2345de56, []int{6} } func (m *Vote) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -568,7 +660,7 @@ func (m *Commit) Reset() { *m = Commit{} } func (m *Commit) String() string { return proto.CompactTextString(m) } func (*Commit) ProtoMessage() {} func (*Commit) Descriptor() ([]byte, []int) { - return fileDescriptor_d3a6e55e2345de56, []int{6} + return fileDescriptor_d3a6e55e2345de56, []int{7} } func (m *Commit) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -637,7 +729,7 @@ func (m *CommitSig) Reset() { *m = CommitSig{} } func (m *CommitSig) String() string { return proto.CompactTextString(m) } func (*CommitSig) ProtoMessage() {} func (*CommitSig) Descriptor() ([]byte, []int) { - return fileDescriptor_d3a6e55e2345de56, []int{7} + return fileDescriptor_d3a6e55e2345de56, []int{8} } func (m *CommitSig) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -705,7 +797,7 @@ func (m *ExtendedCommit) Reset() { *m = ExtendedCommit{} } func (m *ExtendedCommit) String() string { return proto.CompactTextString(m) } func (*ExtendedCommit) ProtoMessage() {} func (*ExtendedCommit) Descriptor() ([]byte, []int) { - return fileDescriptor_d3a6e55e2345de56, []int{8} + return fileDescriptor_d3a6e55e2345de56, []int{9} } func (m *ExtendedCommit) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -780,7 +872,7 @@ func (m *ExtendedCommitSig) Reset() { *m = ExtendedCommitSig{} } func (m *ExtendedCommitSig) String() string { return proto.CompactTextString(m) } func (*ExtendedCommitSig) ProtoMessage() {} func (*ExtendedCommitSig) Descriptor() ([]byte, []int) { - return fileDescriptor_d3a6e55e2345de56, []int{9} + return fileDescriptor_d3a6e55e2345de56, []int{10} } func (m *ExtendedCommitSig) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -865,7 +957,7 @@ func (m *Proposal) Reset() { *m = Proposal{} } func (m *Proposal) String() string { return proto.CompactTextString(m) } func (*Proposal) ProtoMessage() {} func (*Proposal) Descriptor() ([]byte, []int) { - return fileDescriptor_d3a6e55e2345de56, []int{10} + return fileDescriptor_d3a6e55e2345de56, []int{11} } func (m *Proposal) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -952,7 +1044,7 @@ func (m *SignedHeader) Reset() { *m = SignedHeader{} } func (m *SignedHeader) String() string { return proto.CompactTextString(m) } func (*SignedHeader) ProtoMessage() {} func (*SignedHeader) Descriptor() ([]byte, []int) { - return fileDescriptor_d3a6e55e2345de56, []int{11} + return fileDescriptor_d3a6e55e2345de56, []int{12} } func (m *SignedHeader) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1004,7 +1096,7 @@ func (m *LightBlock) Reset() { *m = LightBlock{} } func (m *LightBlock) String() string { return proto.CompactTextString(m) } func (*LightBlock) ProtoMessage() {} func (*LightBlock) Descriptor() ([]byte, []int) { - return fileDescriptor_d3a6e55e2345de56, []int{12} + return fileDescriptor_d3a6e55e2345de56, []int{13} } func (m *LightBlock) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1058,7 +1150,7 @@ func (m *BlockMeta) Reset() { *m = BlockMeta{} } func (m *BlockMeta) String() string { return proto.CompactTextString(m) } func (*BlockMeta) ProtoMessage() {} func (*BlockMeta) Descriptor() ([]byte, []int) { - return fileDescriptor_d3a6e55e2345de56, []int{13} + return fileDescriptor_d3a6e55e2345de56, []int{14} } func (m *BlockMeta) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1126,7 +1218,7 @@ func (m *TxProof) Reset() { *m = TxProof{} } func (m *TxProof) String() string { return proto.CompactTextString(m) } func (*TxProof) ProtoMessage() {} func (*TxProof) Descriptor() ([]byte, []int) { - return fileDescriptor_d3a6e55e2345de56, []int{14} + return fileDescriptor_d3a6e55e2345de56, []int{15} } func (m *TxProof) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1176,360 +1268,506 @@ func (m *TxProof) GetProof() *crypto.Proof { return nil } -func init() { - proto.RegisterEnum("tendermint.types.SignedMsgType", SignedMsgType_name, SignedMsgType_value) - proto.RegisterType((*PartSetHeader)(nil), "tendermint.types.PartSetHeader") - proto.RegisterType((*Part)(nil), "tendermint.types.Part") - proto.RegisterType((*BlockID)(nil), "tendermint.types.BlockID") - proto.RegisterType((*Header)(nil), "tendermint.types.Header") - proto.RegisterType((*Data)(nil), "tendermint.types.Data") - proto.RegisterType((*Vote)(nil), "tendermint.types.Vote") - proto.RegisterType((*Commit)(nil), "tendermint.types.Commit") - proto.RegisterType((*CommitSig)(nil), "tendermint.types.CommitSig") - proto.RegisterType((*ExtendedCommit)(nil), "tendermint.types.ExtendedCommit") - proto.RegisterType((*ExtendedCommitSig)(nil), "tendermint.types.ExtendedCommitSig") - proto.RegisterType((*Proposal)(nil), "tendermint.types.Proposal") - proto.RegisterType((*SignedHeader)(nil), "tendermint.types.SignedHeader") - proto.RegisterType((*LightBlock)(nil), "tendermint.types.LightBlock") - proto.RegisterType((*BlockMeta)(nil), "tendermint.types.BlockMeta") - proto.RegisterType((*TxProof)(nil), "tendermint.types.TxProof") +// IndexWrapper adds index metadata to a transaction. This is used to track +// transactions that pay for blobs, and where the blobs start in the square. +type IndexWrapper struct { + Tx []byte `protobuf:"bytes,1,opt,name=tx,proto3" json:"tx,omitempty"` + ShareIndexes []uint32 `protobuf:"varint,2,rep,packed,name=share_indexes,json=shareIndexes,proto3" json:"share_indexes,omitempty"` + TypeId string `protobuf:"bytes,3,opt,name=type_id,json=typeId,proto3" json:"type_id,omitempty"` } -func init() { proto.RegisterFile("tendermint/types/types.proto", fileDescriptor_d3a6e55e2345de56) } - -var fileDescriptor_d3a6e55e2345de56 = []byte{ - // 1310 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xdc, 0x57, 0xcf, 0x6f, 0x1b, 0x45, - 0x14, 0xce, 0xda, 0xeb, 0x5f, 0xcf, 0x76, 0xe2, 0x2c, 0x11, 0x75, 0xdd, 0xc6, 0xb1, 0x5c, 0x01, - 0xa1, 0xa0, 0x4d, 0x95, 0x22, 0x04, 0x07, 0x0e, 0xf9, 0x45, 0x1b, 0x51, 0x27, 0xd6, 0xda, 0x2d, - 0xa2, 0x97, 0xd5, 0xda, 0x3b, 0xb1, 0x97, 0xda, 0x3b, 0xab, 0xdd, 0x71, 0x70, 0xfa, 0x17, 0xa0, - 0x9e, 0x7a, 0xe2, 0xd6, 0x13, 0x1c, 0xb8, 0x83, 0xc4, 0x15, 0x71, 0xea, 0xb1, 0x37, 0xb8, 0x50, - 0x20, 0x95, 0xf8, 0x3b, 0xd0, 0xbc, 0x99, 0xdd, 0xb5, 0xe3, 0x18, 0xaa, 0xa8, 0x02, 0x89, 0x8b, - 0xb5, 0xf3, 0xde, 0xf7, 0xde, 0xbc, 0x79, 0xdf, 0x37, 0xa3, 0x67, 0xb8, 0xca, 0x88, 0x6b, 0x13, - 0x7f, 0xe8, 0xb8, 0x6c, 0x83, 0x9d, 0x78, 0x24, 0x10, 0xbf, 0xba, 0xe7, 0x53, 0x46, 0xb5, 0x52, - 0xec, 0xd5, 0xd1, 0x5e, 0x59, 0xe9, 0xd1, 0x1e, 0x45, 0xe7, 0x06, 0xff, 0x12, 0xb8, 0xca, 0x5a, - 0x8f, 0xd2, 0xde, 0x80, 0x6c, 0xe0, 0xaa, 0x33, 0x3a, 0xda, 0x60, 0xce, 0x90, 0x04, 0xcc, 0x1a, - 0x7a, 0x12, 0xb0, 0x3a, 0xb1, 0x4d, 0xd7, 0x3f, 0xf1, 0x18, 0xe5, 0x58, 0x7a, 0x24, 0xdd, 0xd5, - 0x09, 0xf7, 0x31, 0xf1, 0x03, 0x87, 0xba, 0x93, 0x75, 0x54, 0x6a, 0x33, 0x55, 0x1e, 0x5b, 0x03, - 0xc7, 0xb6, 0x18, 0xf5, 0x05, 0xa2, 0xfe, 0x21, 0x14, 0x9b, 0x96, 0xcf, 0x5a, 0x84, 0xdd, 0x26, - 0x96, 0x4d, 0x7c, 0x6d, 0x05, 0x52, 0x8c, 0x32, 0x6b, 0x50, 0x56, 0x6a, 0xca, 0x7a, 0xd1, 0x10, - 0x0b, 0x4d, 0x03, 0xb5, 0x6f, 0x05, 0xfd, 0x72, 0xa2, 0xa6, 0xac, 0x17, 0x0c, 0xfc, 0xae, 0xf7, - 0x41, 0xe5, 0xa1, 0x3c, 0xc2, 0x71, 0x6d, 0x32, 0x0e, 0x23, 0x70, 0xc1, 0xad, 0x9d, 0x13, 0x46, - 0x02, 0x19, 0x22, 0x16, 0xda, 0x7b, 0x90, 0xc2, 0xfa, 0xcb, 0xc9, 0x9a, 0xb2, 0x9e, 0xdf, 0x2c, - 0xeb, 0x13, 0x8d, 0x12, 0xe7, 0xd3, 0x9b, 0xdc, 0xbf, 0xad, 0x3e, 0x7d, 0xbe, 0xb6, 0x60, 0x08, - 0x70, 0x7d, 0x00, 0x99, 0xed, 0x01, 0xed, 0x3e, 0xd8, 0xdf, 0x8d, 0x0a, 0x51, 0xe2, 0x42, 0xb4, - 0x06, 0x2c, 0x79, 0x96, 0xcf, 0xcc, 0x80, 0x30, 0xb3, 0x8f, 0xa7, 0xc0, 0x4d, 0xf3, 0x9b, 0x6b, - 0xfa, 0x59, 0x1e, 0xf4, 0xa9, 0xc3, 0xca, 0x5d, 0x8a, 0xde, 0xa4, 0xb1, 0xfe, 0xa7, 0x0a, 0x69, - 0xd9, 0x8c, 0x8f, 0x20, 0x23, 0xdb, 0x8a, 0x1b, 0xe6, 0x37, 0x57, 0x27, 0x33, 0x4a, 0x97, 0xbe, - 0x43, 0xdd, 0x80, 0xb8, 0xc1, 0x28, 0x90, 0xf9, 0xc2, 0x18, 0xed, 0x4d, 0xc8, 0x76, 0xfb, 0x96, - 0xe3, 0x9a, 0x8e, 0x8d, 0x15, 0xe5, 0xb6, 0xf3, 0xa7, 0xcf, 0xd7, 0x32, 0x3b, 0xdc, 0xb6, 0xbf, - 0x6b, 0x64, 0xd0, 0xb9, 0x6f, 0x6b, 0xaf, 0x43, 0xba, 0x4f, 0x9c, 0x5e, 0x9f, 0x61, 0x5b, 0x92, - 0x86, 0x5c, 0x69, 0x1f, 0x80, 0xca, 0x05, 0x51, 0x56, 0x71, 0xef, 0x8a, 0x2e, 0xd4, 0xa2, 0x87, - 0x6a, 0xd1, 0xdb, 0xa1, 0x5a, 0xb6, 0xb3, 0x7c, 0xe3, 0xc7, 0xbf, 0xad, 0x29, 0x06, 0x46, 0x68, - 0x3b, 0x50, 0x1c, 0x58, 0x01, 0x33, 0x3b, 0xbc, 0x6d, 0x7c, 0xfb, 0x14, 0xa6, 0xb8, 0x3c, 0xdb, - 0x10, 0xd9, 0x58, 0x59, 0x7a, 0x9e, 0x47, 0x09, 0x93, 0xad, 0xad, 0x43, 0x09, 0x93, 0x74, 0xe9, - 0x70, 0xe8, 0x30, 0x13, 0xfb, 0x9e, 0xc6, 0xbe, 0x2f, 0x72, 0xfb, 0x0e, 0x9a, 0x6f, 0x73, 0x06, - 0xae, 0x40, 0xce, 0xb6, 0x98, 0x25, 0x20, 0x19, 0x84, 0x64, 0xb9, 0x01, 0x9d, 0x6f, 0xc1, 0x52, - 0xa4, 0xba, 0x40, 0x40, 0xb2, 0x22, 0x4b, 0x6c, 0x46, 0xe0, 0x0d, 0x58, 0x71, 0xc9, 0x98, 0x99, - 0x67, 0xd1, 0x39, 0x44, 0x6b, 0xdc, 0x77, 0x6f, 0x3a, 0xe2, 0x0d, 0x58, 0xec, 0x86, 0xcd, 0x17, - 0x58, 0x40, 0x6c, 0x31, 0xb2, 0x22, 0xec, 0x32, 0x64, 0x2d, 0xcf, 0x13, 0x80, 0x3c, 0x02, 0x32, - 0x96, 0xe7, 0xa1, 0xeb, 0x3a, 0x2c, 0xe3, 0x19, 0x7d, 0x12, 0x8c, 0x06, 0x4c, 0x26, 0x29, 0x20, - 0x66, 0x89, 0x3b, 0x0c, 0x61, 0x47, 0xec, 0x35, 0x28, 0x92, 0x63, 0xc7, 0x26, 0x6e, 0x97, 0x08, - 0x5c, 0x11, 0x71, 0x85, 0xd0, 0x88, 0xa0, 0xb7, 0xa1, 0xe4, 0xf9, 0xd4, 0xa3, 0x01, 0xf1, 0x4d, - 0xcb, 0xb6, 0x7d, 0x12, 0x04, 0xe5, 0x45, 0x91, 0x2f, 0xb4, 0x6f, 0x09, 0x73, 0xbd, 0x0c, 0xea, - 0xae, 0xc5, 0x2c, 0xad, 0x04, 0x49, 0x36, 0x0e, 0xca, 0x4a, 0x2d, 0xb9, 0x5e, 0x30, 0xf8, 0x67, - 0xfd, 0x87, 0x24, 0xa8, 0xf7, 0x28, 0x23, 0xda, 0x4d, 0x50, 0x39, 0x4d, 0xa8, 0xbe, 0xc5, 0xf3, - 0xf4, 0xdc, 0x72, 0x7a, 0x2e, 0xb1, 0x1b, 0x41, 0xaf, 0x7d, 0xe2, 0x11, 0x03, 0xc1, 0x13, 0x72, - 0x4a, 0x4c, 0xc9, 0x69, 0x05, 0x52, 0x3e, 0x1d, 0xb9, 0x36, 0xaa, 0x2c, 0x65, 0x88, 0x85, 0xb6, - 0x07, 0xd9, 0x48, 0x25, 0xea, 0x3f, 0xa9, 0x64, 0x89, 0xab, 0x84, 0x6b, 0x58, 0x1a, 0x8c, 0x4c, - 0x47, 0x8a, 0x65, 0x1b, 0x72, 0xd1, 0xe3, 0x25, 0xd5, 0xf6, 0x72, 0x82, 0x8d, 0xc3, 0xb4, 0x77, - 0x60, 0x39, 0xe2, 0x3e, 0x6a, 0x9e, 0x50, 0x5c, 0x29, 0x72, 0xc8, 0xee, 0x4d, 0xc9, 0xca, 0x14, - 0x0f, 0x50, 0x06, 0xcf, 0x15, 0xcb, 0x6a, 0x1f, 0x5f, 0xa2, 0xab, 0x90, 0x0b, 0x9c, 0x9e, 0x6b, - 0xb1, 0x91, 0x4f, 0xa4, 0xf2, 0x62, 0x03, 0xf7, 0x92, 0x31, 0x23, 0x2e, 0x5e, 0x72, 0xa1, 0xb4, - 0xd8, 0xa0, 0x6d, 0xc0, 0x6b, 0xd1, 0xc2, 0x8c, 0xb3, 0x08, 0x95, 0x69, 0x91, 0xab, 0x15, 0x7a, - 0xea, 0x3f, 0x2a, 0x90, 0x16, 0x17, 0x63, 0x82, 0x06, 0xe5, 0x7c, 0x1a, 0x12, 0xf3, 0x68, 0x48, - 0x5e, 0x9c, 0x86, 0x2d, 0x80, 0xa8, 0xcc, 0xa0, 0xac, 0xd6, 0x92, 0xeb, 0xf9, 0xcd, 0x2b, 0xb3, - 0x89, 0x44, 0x89, 0x2d, 0xa7, 0x27, 0xef, 0xfd, 0x44, 0x50, 0xfd, 0x57, 0x05, 0x72, 0x91, 0x5f, - 0xdb, 0x82, 0x62, 0x58, 0x97, 0x79, 0x34, 0xb0, 0x7a, 0x52, 0x8a, 0xab, 0x73, 0x8b, 0xfb, 0x78, - 0x60, 0xf5, 0x8c, 0xbc, 0xac, 0x87, 0x2f, 0xce, 0xa7, 0x35, 0x31, 0x87, 0xd6, 0x29, 0x1d, 0x25, - 0x2f, 0xa6, 0xa3, 0x29, 0xc6, 0xd5, 0x33, 0x8c, 0xd7, 0xff, 0x50, 0x60, 0x71, 0x6f, 0x8c, 0xe5, - 0xdb, 0xff, 0x25, 0x55, 0xf7, 0xa5, 0xb6, 0x6c, 0x62, 0x9b, 0x33, 0x9c, 0x5d, 0x9b, 0xcd, 0x38, - 0x5d, 0x73, 0xcc, 0x9d, 0x16, 0x66, 0x69, 0xc5, 0x1c, 0x7e, 0x9f, 0x80, 0xe5, 0x19, 0xfc, 0xff, - 0x8f, 0xcb, 0xe9, 0xdb, 0x9b, 0x7a, 0xc9, 0xdb, 0x9b, 0x9e, 0x7b, 0x7b, 0xbf, 0x4b, 0x40, 0xb6, - 0x89, 0xaf, 0xb4, 0x35, 0xf8, 0x37, 0xde, 0xde, 0x2b, 0x90, 0xf3, 0xe8, 0xc0, 0x14, 0x1e, 0x15, - 0x3d, 0x59, 0x8f, 0x0e, 0x8c, 0x19, 0x99, 0xa5, 0x5e, 0xd1, 0xc3, 0x9c, 0x7e, 0x05, 0x24, 0x64, - 0xce, 0x5e, 0x28, 0x1f, 0x0a, 0xa2, 0x15, 0x72, 0x6a, 0xba, 0xc1, 0x7b, 0x80, 0x63, 0x98, 0x32, - 0x3b, 0xe5, 0x89, 0xb2, 0x05, 0xd2, 0x90, 0x38, 0x1e, 0x21, 0x86, 0x0c, 0x39, 0xb8, 0x95, 0xe7, - 0xbd, 0x58, 0x86, 0xc4, 0xd5, 0xbf, 0x52, 0x00, 0xee, 0xf0, 0xce, 0xe2, 0x79, 0xf9, 0xbc, 0x13, - 0x60, 0x09, 0xe6, 0xd4, 0xce, 0xd5, 0x79, 0xa4, 0xc9, 0xfd, 0x0b, 0xc1, 0x64, 0xdd, 0x3b, 0x50, - 0x8c, 0xb5, 0x1d, 0x90, 0xb0, 0x98, 0x73, 0x92, 0x44, 0x63, 0x48, 0x8b, 0x30, 0xa3, 0x70, 0x3c, - 0xb1, 0xaa, 0xff, 0xa4, 0x40, 0x0e, 0x6b, 0x6a, 0x10, 0x66, 0x4d, 0x71, 0xa8, 0x5c, 0x9c, 0xc3, - 0x55, 0x00, 0x91, 0x26, 0x70, 0x1e, 0x12, 0xa9, 0xac, 0x1c, 0x5a, 0x5a, 0xce, 0x43, 0xa2, 0xbd, - 0x1f, 0x35, 0x3c, 0xf9, 0xf7, 0x0d, 0x97, 0x2f, 0x46, 0xd8, 0xf6, 0x4b, 0x90, 0x71, 0x47, 0x43, - 0x93, 0x0f, 0x1f, 0xaa, 0x50, 0xab, 0x3b, 0x1a, 0xb6, 0xc7, 0x41, 0xfd, 0x73, 0xc8, 0xb4, 0xc7, - 0x38, 0x88, 0x73, 0x89, 0xfa, 0x94, 0xca, 0xe9, 0x4f, 0x4c, 0xdd, 0x59, 0x6e, 0xc0, 0x61, 0x47, - 0x03, 0x95, 0x8f, 0x79, 0xe1, 0xdf, 0x02, 0xfe, 0xad, 0xe9, 0x2f, 0x39, 0xe2, 0xcb, 0xe1, 0xfe, - 0xfa, 0xcf, 0x0a, 0x14, 0xa7, 0x6e, 0x92, 0xf6, 0x2e, 0x5c, 0x6a, 0xed, 0xdf, 0x3a, 0xd8, 0xdb, - 0x35, 0x1b, 0xad, 0x5b, 0x66, 0xfb, 0xb3, 0xe6, 0x9e, 0x79, 0xf7, 0xe0, 0x93, 0x83, 0xc3, 0x4f, - 0x0f, 0x4a, 0x0b, 0x95, 0xa5, 0x47, 0x4f, 0x6a, 0xf9, 0xbb, 0xee, 0x03, 0x97, 0x7e, 0xe1, 0xce, - 0x43, 0x37, 0x8d, 0xbd, 0x7b, 0x87, 0xed, 0xbd, 0x92, 0x22, 0xd0, 0x4d, 0x9f, 0x1c, 0x53, 0x46, - 0x10, 0x7d, 0x03, 0x2e, 0x9f, 0x83, 0xde, 0x39, 0x6c, 0x34, 0xf6, 0xdb, 0xa5, 0x44, 0x65, 0xf9, - 0xd1, 0x93, 0x5a, 0xb1, 0xe9, 0x13, 0xa1, 0x32, 0x8c, 0xd0, 0xa1, 0x3c, 0x1b, 0x71, 0xd8, 0x3c, - 0x6c, 0x6d, 0xdd, 0x29, 0xd5, 0x2a, 0xa5, 0x47, 0x4f, 0x6a, 0x85, 0xf0, 0xc9, 0xe0, 0xf8, 0x4a, - 0xf6, 0xcb, 0xaf, 0xab, 0x0b, 0xdf, 0x7e, 0x53, 0x55, 0xb6, 0x1b, 0x4f, 0x4f, 0xab, 0xca, 0xb3, - 0xd3, 0xaa, 0xf2, 0xfb, 0x69, 0x55, 0x79, 0xfc, 0xa2, 0xba, 0xf0, 0xec, 0x45, 0x75, 0xe1, 0x97, - 0x17, 0xd5, 0x85, 0xfb, 0x37, 0x7b, 0x0e, 0xeb, 0x8f, 0x3a, 0x7a, 0x97, 0x0e, 0x37, 0xba, 0x74, - 0x48, 0x58, 0xe7, 0x88, 0xc5, 0x1f, 0xe2, 0x6f, 0xe2, 0xd9, 0xbf, 0x6e, 0x9d, 0x34, 0xda, 0x6f, - 0xfe, 0x15, 0x00, 0x00, 0xff, 0xff, 0x8c, 0xb6, 0xa1, 0x4e, 0x7b, 0x0e, 0x00, 0x00, +func (m *IndexWrapper) Reset() { *m = IndexWrapper{} } +func (m *IndexWrapper) String() string { return proto.CompactTextString(m) } +func (*IndexWrapper) ProtoMessage() {} +func (*IndexWrapper) Descriptor() ([]byte, []int) { + return fileDescriptor_d3a6e55e2345de56, []int{16} } - -func (m *PartSetHeader) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err +func (m *IndexWrapper) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *IndexWrapper) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_IndexWrapper.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil } - return dAtA[:n], nil } - -func (m *PartSetHeader) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) +func (m *IndexWrapper) XXX_Merge(src proto.Message) { + xxx_messageInfo_IndexWrapper.Merge(m, src) +} +func (m *IndexWrapper) XXX_Size() int { + return m.Size() +} +func (m *IndexWrapper) XXX_DiscardUnknown() { + xxx_messageInfo_IndexWrapper.DiscardUnknown(m) } -func (m *PartSetHeader) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if len(m.Hash) > 0 { - i -= len(m.Hash) - copy(dAtA[i:], m.Hash) - i = encodeVarintTypes(dAtA, i, uint64(len(m.Hash))) - i-- - dAtA[i] = 0x12 +var xxx_messageInfo_IndexWrapper proto.InternalMessageInfo + +func (m *IndexWrapper) GetTx() []byte { + if m != nil { + return m.Tx } - if m.Total != 0 { - i = encodeVarintTypes(dAtA, i, uint64(m.Total)) - i-- - dAtA[i] = 0x8 + return nil +} + +func (m *IndexWrapper) GetShareIndexes() []uint32 { + if m != nil { + return m.ShareIndexes } - return len(dAtA) - i, nil + return nil } -func (m *Part) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err +func (m *IndexWrapper) GetTypeId() string { + if m != nil { + return m.TypeId } - return dAtA[:n], nil + return "" } -func (m *Part) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) +// BlobTx wraps an encoded sdk.Tx with a second field to contain blobs of data. +// The raw bytes of the blobs are not signed over, instead we verify each blob +// using the relevant MsgPayForBlobs that is signed over in the encoded sdk.Tx. +type BlobTx struct { + Tx []byte `protobuf:"bytes,1,opt,name=tx,proto3" json:"tx,omitempty"` + Blobs []*Blob `protobuf:"bytes,2,rep,name=blobs,proto3" json:"blobs,omitempty"` + TypeId string `protobuf:"bytes,3,opt,name=type_id,json=typeId,proto3" json:"type_id,omitempty"` } -func (m *Part) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - { - size, err := m.Proof.MarshalToSizedBuffer(dAtA[:i]) +func (m *BlobTx) Reset() { *m = BlobTx{} } +func (m *BlobTx) String() string { return proto.CompactTextString(m) } +func (*BlobTx) ProtoMessage() {} +func (*BlobTx) Descriptor() ([]byte, []int) { + return fileDescriptor_d3a6e55e2345de56, []int{17} +} +func (m *BlobTx) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *BlobTx) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_BlobTx.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) if err != nil { - return 0, err + return nil, err } - i -= size - i = encodeVarintTypes(dAtA, i, uint64(size)) + return b[:n], nil } - i-- - dAtA[i] = 0x1a - if len(m.Bytes) > 0 { - i -= len(m.Bytes) - copy(dAtA[i:], m.Bytes) - i = encodeVarintTypes(dAtA, i, uint64(len(m.Bytes))) - i-- - dAtA[i] = 0x12 +} +func (m *BlobTx) XXX_Merge(src proto.Message) { + xxx_messageInfo_BlobTx.Merge(m, src) +} +func (m *BlobTx) XXX_Size() int { + return m.Size() +} +func (m *BlobTx) XXX_DiscardUnknown() { + xxx_messageInfo_BlobTx.DiscardUnknown(m) +} + +var xxx_messageInfo_BlobTx proto.InternalMessageInfo + +func (m *BlobTx) GetTx() []byte { + if m != nil { + return m.Tx } - if m.Index != 0 { - i = encodeVarintTypes(dAtA, i, uint64(m.Index)) - i-- - dAtA[i] = 0x8 + return nil +} + +func (m *BlobTx) GetBlobs() []*Blob { + if m != nil { + return m.Blobs } - return len(dAtA) - i, nil + return nil } -func (m *BlockID) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err +func (m *BlobTx) GetTypeId() string { + if m != nil { + return m.TypeId } - return dAtA[:n], nil + return "" } -func (m *BlockID) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) +// ShareProof is an NMT proof that a set of shares exist in a set of rows and a +// Merkle proof that those rows exist in a Merkle tree with a given data root. +type ShareProof struct { + Data [][]byte `protobuf:"bytes,1,rep,name=data,proto3" json:"data,omitempty"` + ShareProofs []*NMTProof `protobuf:"bytes,2,rep,name=share_proofs,json=shareProofs,proto3" json:"share_proofs,omitempty"` + NamespaceId []byte `protobuf:"bytes,3,opt,name=namespace_id,json=namespaceId,proto3" json:"namespace_id,omitempty"` + RowProof *RowProof `protobuf:"bytes,4,opt,name=row_proof,json=rowProof,proto3" json:"row_proof,omitempty"` + NamespaceVersion uint32 `protobuf:"varint,5,opt,name=namespace_version,json=namespaceVersion,proto3" json:"namespace_version,omitempty"` } -func (m *BlockID) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - { - size, err := m.PartSetHeader.MarshalToSizedBuffer(dAtA[:i]) +func (m *ShareProof) Reset() { *m = ShareProof{} } +func (m *ShareProof) String() string { return proto.CompactTextString(m) } +func (*ShareProof) ProtoMessage() {} +func (*ShareProof) Descriptor() ([]byte, []int) { + return fileDescriptor_d3a6e55e2345de56, []int{18} +} +func (m *ShareProof) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ShareProof) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_ShareProof.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) if err != nil { - return 0, err + return nil, err } - i -= size - i = encodeVarintTypes(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - if len(m.Hash) > 0 { - i -= len(m.Hash) - copy(dAtA[i:], m.Hash) - i = encodeVarintTypes(dAtA, i, uint64(len(m.Hash))) - i-- - dAtA[i] = 0xa + return b[:n], nil } - return len(dAtA) - i, nil +} +func (m *ShareProof) XXX_Merge(src proto.Message) { + xxx_messageInfo_ShareProof.Merge(m, src) +} +func (m *ShareProof) XXX_Size() int { + return m.Size() +} +func (m *ShareProof) XXX_DiscardUnknown() { + xxx_messageInfo_ShareProof.DiscardUnknown(m) } -func (m *Header) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err +var xxx_messageInfo_ShareProof proto.InternalMessageInfo + +func (m *ShareProof) GetData() [][]byte { + if m != nil { + return m.Data } - return dAtA[:n], nil + return nil } -func (m *Header) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) +func (m *ShareProof) GetShareProofs() []*NMTProof { + if m != nil { + return m.ShareProofs + } + return nil } -func (m *Header) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if len(m.ProposerAddress) > 0 { - i -= len(m.ProposerAddress) - copy(dAtA[i:], m.ProposerAddress) - i = encodeVarintTypes(dAtA, i, uint64(len(m.ProposerAddress))) - i-- - dAtA[i] = 0x72 +func (m *ShareProof) GetNamespaceId() []byte { + if m != nil { + return m.NamespaceId } - if len(m.EvidenceHash) > 0 { - i -= len(m.EvidenceHash) - copy(dAtA[i:], m.EvidenceHash) - i = encodeVarintTypes(dAtA, i, uint64(len(m.EvidenceHash))) - i-- - dAtA[i] = 0x6a + return nil +} + +func (m *ShareProof) GetRowProof() *RowProof { + if m != nil { + return m.RowProof } - if len(m.LastResultsHash) > 0 { - i -= len(m.LastResultsHash) - copy(dAtA[i:], m.LastResultsHash) - i = encodeVarintTypes(dAtA, i, uint64(len(m.LastResultsHash))) - i-- - dAtA[i] = 0x62 + return nil +} + +func (m *ShareProof) GetNamespaceVersion() uint32 { + if m != nil { + return m.NamespaceVersion } - if len(m.AppHash) > 0 { - i -= len(m.AppHash) - copy(dAtA[i:], m.AppHash) - i = encodeVarintTypes(dAtA, i, uint64(len(m.AppHash))) - i-- - dAtA[i] = 0x5a + return 0 +} + +// RowProof is a Merkle proof that a set of rows exist in a Merkle tree with a +// given data root. +type RowProof struct { + RowRoots [][]byte `protobuf:"bytes,1,rep,name=row_roots,json=rowRoots,proto3" json:"row_roots,omitempty"` + Proofs []*crypto.Proof `protobuf:"bytes,2,rep,name=proofs,proto3" json:"proofs,omitempty"` + Root []byte `protobuf:"bytes,3,opt,name=root,proto3" json:"root,omitempty"` + StartRow uint32 `protobuf:"varint,4,opt,name=start_row,json=startRow,proto3" json:"start_row,omitempty"` + EndRow uint32 `protobuf:"varint,5,opt,name=end_row,json=endRow,proto3" json:"end_row,omitempty"` +} + +func (m *RowProof) Reset() { *m = RowProof{} } +func (m *RowProof) String() string { return proto.CompactTextString(m) } +func (*RowProof) ProtoMessage() {} +func (*RowProof) Descriptor() ([]byte, []int) { + return fileDescriptor_d3a6e55e2345de56, []int{19} +} +func (m *RowProof) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *RowProof) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_RowProof.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil } - if len(m.ConsensusHash) > 0 { - i -= len(m.ConsensusHash) - copy(dAtA[i:], m.ConsensusHash) - i = encodeVarintTypes(dAtA, i, uint64(len(m.ConsensusHash))) - i-- - dAtA[i] = 0x52 +} +func (m *RowProof) XXX_Merge(src proto.Message) { + xxx_messageInfo_RowProof.Merge(m, src) +} +func (m *RowProof) XXX_Size() int { + return m.Size() +} +func (m *RowProof) XXX_DiscardUnknown() { + xxx_messageInfo_RowProof.DiscardUnknown(m) +} + +var xxx_messageInfo_RowProof proto.InternalMessageInfo + +func (m *RowProof) GetRowRoots() [][]byte { + if m != nil { + return m.RowRoots } - if len(m.NextValidatorsHash) > 0 { - i -= len(m.NextValidatorsHash) - copy(dAtA[i:], m.NextValidatorsHash) - i = encodeVarintTypes(dAtA, i, uint64(len(m.NextValidatorsHash))) - i-- - dAtA[i] = 0x4a + return nil +} + +func (m *RowProof) GetProofs() []*crypto.Proof { + if m != nil { + return m.Proofs } - if len(m.ValidatorsHash) > 0 { - i -= len(m.ValidatorsHash) - copy(dAtA[i:], m.ValidatorsHash) - i = encodeVarintTypes(dAtA, i, uint64(len(m.ValidatorsHash))) - i-- - dAtA[i] = 0x42 + return nil +} + +func (m *RowProof) GetRoot() []byte { + if m != nil { + return m.Root } - if len(m.DataHash) > 0 { - i -= len(m.DataHash) - copy(dAtA[i:], m.DataHash) - i = encodeVarintTypes(dAtA, i, uint64(len(m.DataHash))) - i-- - dAtA[i] = 0x3a + return nil +} + +func (m *RowProof) GetStartRow() uint32 { + if m != nil { + return m.StartRow } - if len(m.LastCommitHash) > 0 { - i -= len(m.LastCommitHash) - copy(dAtA[i:], m.LastCommitHash) - i = encodeVarintTypes(dAtA, i, uint64(len(m.LastCommitHash))) - i-- - dAtA[i] = 0x32 + return 0 +} + +func (m *RowProof) GetEndRow() uint32 { + if m != nil { + return m.EndRow } - { - size, err := m.LastBlockId.MarshalToSizedBuffer(dAtA[:i]) + return 0 +} + +// NMTProof is a proof of a namespace.ID in an NMT. +// In case this proof proves the absence of a namespace.ID +// in a tree it also contains the leaf hashes of the range +// where that namespace would be. +type NMTProof struct { + // Start index of this proof. + Start int32 `protobuf:"varint,1,opt,name=start,proto3" json:"start,omitempty"` + // End index of this proof. + End int32 `protobuf:"varint,2,opt,name=end,proto3" json:"end,omitempty"` + // Nodes that together with the corresponding leaf values can be used to + // recompute the root and verify this proof. Nodes should consist of the max + // and min namespaces along with the actual hash, resulting in each being 48 + // bytes each + Nodes [][]byte `protobuf:"bytes,3,rep,name=nodes,proto3" json:"nodes,omitempty"` + // leafHash are nil if the namespace is present in the NMT. In case the + // namespace to be proved is in the min/max range of the tree but absent, this + // will contain the leaf hash necessary to verify the proof of absence. Leaf + // hashes should consist of the namespace along with the actual hash, + // resulting 40 bytes total. + LeafHash []byte `protobuf:"bytes,4,opt,name=leaf_hash,json=leafHash,proto3" json:"leaf_hash,omitempty"` +} + +func (m *NMTProof) Reset() { *m = NMTProof{} } +func (m *NMTProof) String() string { return proto.CompactTextString(m) } +func (*NMTProof) ProtoMessage() {} +func (*NMTProof) Descriptor() ([]byte, []int) { + return fileDescriptor_d3a6e55e2345de56, []int{20} +} +func (m *NMTProof) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *NMTProof) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_NMTProof.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) if err != nil { - return 0, err + return nil, err } - i -= size - i = encodeVarintTypes(dAtA, i, uint64(size)) + return b[:n], nil } - i-- - dAtA[i] = 0x2a - n4, err4 := github_com_cosmos_gogoproto_types.StdTimeMarshalTo(m.Time, dAtA[i-github_com_cosmos_gogoproto_types.SizeOfStdTime(m.Time):]) - if err4 != nil { - return 0, err4 +} +func (m *NMTProof) XXX_Merge(src proto.Message) { + xxx_messageInfo_NMTProof.Merge(m, src) +} +func (m *NMTProof) XXX_Size() int { + return m.Size() +} +func (m *NMTProof) XXX_DiscardUnknown() { + xxx_messageInfo_NMTProof.DiscardUnknown(m) +} + +var xxx_messageInfo_NMTProof proto.InternalMessageInfo + +func (m *NMTProof) GetStart() int32 { + if m != nil { + return m.Start } - i -= n4 - i = encodeVarintTypes(dAtA, i, uint64(n4)) - i-- - dAtA[i] = 0x22 - if m.Height != 0 { - i = encodeVarintTypes(dAtA, i, uint64(m.Height)) - i-- - dAtA[i] = 0x18 + return 0 +} + +func (m *NMTProof) GetEnd() int32 { + if m != nil { + return m.End } - if len(m.ChainID) > 0 { - i -= len(m.ChainID) - copy(dAtA[i:], m.ChainID) - i = encodeVarintTypes(dAtA, i, uint64(len(m.ChainID))) - i-- - dAtA[i] = 0x12 + return 0 +} + +func (m *NMTProof) GetNodes() [][]byte { + if m != nil { + return m.Nodes } - { - size, err := m.Version.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintTypes(dAtA, i, uint64(size)) + return nil +} + +func (m *NMTProof) GetLeafHash() []byte { + if m != nil { + return m.LeafHash } - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil + return nil } -func (m *Data) Marshal() (dAtA []byte, err error) { +func init() { + proto.RegisterEnum("tendermint.types.SignedMsgType", SignedMsgType_name, SignedMsgType_value) + proto.RegisterType((*PartSetHeader)(nil), "tendermint.types.PartSetHeader") + proto.RegisterType((*Part)(nil), "tendermint.types.Part") + proto.RegisterType((*BlockID)(nil), "tendermint.types.BlockID") + proto.RegisterType((*Header)(nil), "tendermint.types.Header") + proto.RegisterType((*Data)(nil), "tendermint.types.Data") + proto.RegisterType((*Blob)(nil), "tendermint.types.Blob") + proto.RegisterType((*Vote)(nil), "tendermint.types.Vote") + proto.RegisterType((*Commit)(nil), "tendermint.types.Commit") + proto.RegisterType((*CommitSig)(nil), "tendermint.types.CommitSig") + proto.RegisterType((*ExtendedCommit)(nil), "tendermint.types.ExtendedCommit") + proto.RegisterType((*ExtendedCommitSig)(nil), "tendermint.types.ExtendedCommitSig") + proto.RegisterType((*Proposal)(nil), "tendermint.types.Proposal") + proto.RegisterType((*SignedHeader)(nil), "tendermint.types.SignedHeader") + proto.RegisterType((*LightBlock)(nil), "tendermint.types.LightBlock") + proto.RegisterType((*BlockMeta)(nil), "tendermint.types.BlockMeta") + proto.RegisterType((*TxProof)(nil), "tendermint.types.TxProof") + proto.RegisterType((*IndexWrapper)(nil), "tendermint.types.IndexWrapper") + proto.RegisterType((*BlobTx)(nil), "tendermint.types.BlobTx") + proto.RegisterType((*ShareProof)(nil), "tendermint.types.ShareProof") + proto.RegisterType((*RowProof)(nil), "tendermint.types.RowProof") + proto.RegisterType((*NMTProof)(nil), "tendermint.types.NMTProof") +} + +func init() { proto.RegisterFile("tendermint/types/types.proto", fileDescriptor_d3a6e55e2345de56) } + +var fileDescriptor_d3a6e55e2345de56 = []byte{ + // 1661 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xdc, 0x58, 0xcd, 0x8f, 0x1a, 0xc9, + 0x15, 0x9f, 0x86, 0x06, 0x9a, 0x07, 0xcc, 0x30, 0x9d, 0x91, 0x8d, 0xb1, 0x87, 0x21, 0x58, 0x49, + 0x26, 0x8e, 0xc5, 0x58, 0xe3, 0x28, 0x1f, 0x07, 0x1f, 0xe6, 0x2b, 0x36, 0x8e, 0x99, 0x41, 0x0d, + 0x1e, 0x2b, 0x56, 0xa4, 0x56, 0x43, 0xd7, 0x40, 0xc7, 0xd0, 0xd5, 0xe9, 0x2e, 0x66, 0x18, 0xff, + 0x05, 0x91, 0x2f, 0xf1, 0x29, 0x37, 0x9f, 0x9c, 0x43, 0xee, 0x89, 0x94, 0x6b, 0x94, 0x93, 0x8f, + 0xbe, 0x25, 0x97, 0x38, 0xbb, 0x63, 0x69, 0xb5, 0x7f, 0xc6, 0xaa, 0x5e, 0x55, 0x37, 0x30, 0xc0, + 0xae, 0x65, 0x59, 0xbb, 0xd2, 0x5e, 0x50, 0xd5, 0x7b, 0xbf, 0xf7, 0x51, 0xef, 0xa3, 0xfa, 0x15, + 0x70, 0x83, 0x11, 0xd7, 0x26, 0xfe, 0xc0, 0x71, 0xd9, 0x16, 0x3b, 0xf7, 0x48, 0x20, 0x7e, 0xab, + 0x9e, 0x4f, 0x19, 0xd5, 0xf3, 0x63, 0x6e, 0x15, 0xe9, 0xc5, 0xb5, 0x2e, 0xed, 0x52, 0x64, 0x6e, + 0xf1, 0x95, 0xc0, 0x15, 0x37, 0xba, 0x94, 0x76, 0xfb, 0x64, 0x0b, 0x77, 0xed, 0xe1, 0xc9, 0x16, + 0x73, 0x06, 0x24, 0x60, 0xd6, 0xc0, 0x93, 0x80, 0xf5, 0x09, 0x33, 0x1d, 0xff, 0xdc, 0x63, 0x94, + 0x63, 0xe9, 0x89, 0x64, 0x97, 0x26, 0xd8, 0xa7, 0xc4, 0x0f, 0x1c, 0xea, 0x4e, 0xfa, 0x51, 0x2c, + 0xcf, 0x78, 0x79, 0x6a, 0xf5, 0x1d, 0xdb, 0x62, 0xd4, 0x17, 0x88, 0xca, 0xaf, 0x21, 0xd7, 0xb0, + 0x7c, 0xd6, 0x24, 0xec, 0x01, 0xb1, 0x6c, 0xe2, 0xeb, 0x6b, 0x90, 0x60, 0x94, 0x59, 0xfd, 0x82, + 0x52, 0x56, 0x36, 0x73, 0x86, 0xd8, 0xe8, 0x3a, 0xa8, 0x3d, 0x2b, 0xe8, 0x15, 0x62, 0x65, 0x65, + 0x33, 0x6b, 0xe0, 0xba, 0xd2, 0x03, 0x95, 0x8b, 0x72, 0x09, 0xc7, 0xb5, 0xc9, 0x28, 0x94, 0xc0, + 0x0d, 0xa7, 0xb6, 0xcf, 0x19, 0x09, 0xa4, 0x88, 0xd8, 0xe8, 0x3f, 0x87, 0x04, 0xfa, 0x5f, 0x88, + 0x97, 0x95, 0xcd, 0xcc, 0x76, 0xa1, 0x3a, 0x11, 0x28, 0x71, 0xbe, 0x6a, 0x83, 0xf3, 0x77, 0xd5, + 0x37, 0xef, 0x36, 0x96, 0x0c, 0x01, 0xae, 0xf4, 0x21, 0xb5, 0xdb, 0xa7, 0x9d, 0x67, 0xb5, 0xfd, + 0xc8, 0x11, 0x65, 0xec, 0x88, 0x5e, 0x87, 0x15, 0xcf, 0xf2, 0x99, 0x19, 0x10, 0x66, 0xf6, 0xf0, + 0x14, 0x68, 0x34, 0xb3, 0xbd, 0x51, 0xbd, 0x9c, 0x87, 0xea, 0xd4, 0x61, 0xa5, 0x95, 0x9c, 0x37, + 0x49, 0xac, 0x7c, 0xa1, 0x42, 0x52, 0x06, 0xe3, 0x1e, 0xa4, 0x64, 0x58, 0xd1, 0x60, 0x66, 0x7b, + 0x7d, 0x52, 0xa3, 0x64, 0x55, 0xf7, 0xa8, 0x1b, 0x10, 0x37, 0x18, 0x06, 0x52, 0x5f, 0x28, 0xa3, + 0xff, 0x18, 0xb4, 0x4e, 0xcf, 0x72, 0x5c, 0xd3, 0xb1, 0xd1, 0xa3, 0xf4, 0x6e, 0xe6, 0xe2, 0xdd, + 0x46, 0x6a, 0x8f, 0xd3, 0x6a, 0xfb, 0x46, 0x0a, 0x99, 0x35, 0x5b, 0xbf, 0x02, 0xc9, 0x1e, 0x71, + 0xba, 0x3d, 0x86, 0x61, 0x89, 0x1b, 0x72, 0xa7, 0xff, 0x0a, 0x54, 0x5e, 0x10, 0x05, 0x15, 0x6d, + 0x17, 0xab, 0xa2, 0x5a, 0xaa, 0x61, 0xb5, 0x54, 0x5b, 0x61, 0xb5, 0xec, 0x6a, 0xdc, 0xf0, 0xcb, + 0xff, 0x6f, 0x28, 0x06, 0x4a, 0xe8, 0x7b, 0x90, 0xeb, 0x5b, 0x01, 0x33, 0xdb, 0x3c, 0x6c, 0xdc, + 0x7c, 0x02, 0x55, 0x5c, 0x9b, 0x0d, 0x88, 0x0c, 0xac, 0x74, 0x3d, 0xc3, 0xa5, 0x04, 0xc9, 0xd6, + 0x37, 0x21, 0x8f, 0x4a, 0x3a, 0x74, 0x30, 0x70, 0x98, 0x89, 0x71, 0x4f, 0x62, 0xdc, 0x97, 0x39, + 0x7d, 0x0f, 0xc9, 0x0f, 0x78, 0x06, 0xae, 0x43, 0xda, 0xb6, 0x98, 0x25, 0x20, 0x29, 0x84, 0x68, + 0x9c, 0x80, 0xcc, 0x9f, 0xc0, 0x4a, 0x54, 0x75, 0x81, 0x80, 0x68, 0x42, 0xcb, 0x98, 0x8c, 0xc0, + 0x3b, 0xb0, 0xe6, 0x92, 0x11, 0x33, 0x2f, 0xa3, 0xd3, 0x88, 0xd6, 0x39, 0xef, 0x78, 0x5a, 0xe2, + 0x47, 0xb0, 0xdc, 0x09, 0x83, 0x2f, 0xb0, 0x80, 0xd8, 0x5c, 0x44, 0x45, 0xd8, 0x35, 0xd0, 0x2c, + 0xcf, 0x13, 0x80, 0x0c, 0x02, 0x52, 0x96, 0xe7, 0x21, 0xeb, 0x16, 0xac, 0xe2, 0x19, 0x7d, 0x12, + 0x0c, 0xfb, 0x4c, 0x2a, 0xc9, 0x22, 0x66, 0x85, 0x33, 0x0c, 0x41, 0x47, 0xec, 0x4d, 0xc8, 0x91, + 0x53, 0xc7, 0x26, 0x6e, 0x87, 0x08, 0x5c, 0x0e, 0x71, 0xd9, 0x90, 0x88, 0xa0, 0x9f, 0x42, 0xde, + 0xf3, 0xa9, 0x47, 0x03, 0xe2, 0x9b, 0x96, 0x6d, 0xfb, 0x24, 0x08, 0x0a, 0xcb, 0x42, 0x5f, 0x48, + 0xdf, 0x11, 0xe4, 0x8a, 0x09, 0xea, 0xbe, 0xc5, 0x2c, 0x3d, 0x0f, 0x71, 0x36, 0x0a, 0x0a, 0x4a, + 0x39, 0xbe, 0x99, 0x35, 0xf8, 0x52, 0xdf, 0x80, 0x4c, 0xf0, 0xc7, 0xa1, 0xe5, 0x13, 0x33, 0x70, + 0x9e, 0x13, 0x4c, 0x9e, 0x6a, 0x80, 0x20, 0x35, 0x9d, 0xe7, 0x24, 0x6a, 0x83, 0xe4, 0xb8, 0x0d, + 0x1e, 0xaa, 0x5a, 0x2c, 0x1f, 0x7f, 0xa8, 0x6a, 0xf1, 0xbc, 0xfa, 0x50, 0xd5, 0xd4, 0x7c, 0xa2, + 0xf2, 0x67, 0x05, 0xd4, 0xdd, 0x3e, 0x6d, 0xeb, 0x3f, 0x84, 0xac, 0x6b, 0x0d, 0x48, 0xe0, 0x59, + 0x1d, 0xc2, 0xab, 0x41, 0x74, 0x4f, 0x26, 0xa2, 0xd5, 0x6c, 0xae, 0x91, 0x67, 0x2c, 0xec, 0x70, + 0xbe, 0xe6, 0x07, 0x0e, 0x7a, 0xdc, 0x8b, 0xb0, 0x09, 0xe2, 0xd8, 0xe1, 0x59, 0x24, 0x1e, 0xcb, + 0x22, 0xff, 0x19, 0xac, 0x8e, 0x75, 0x87, 0x40, 0x15, 0x81, 0xf9, 0x88, 0x21, 0xc1, 0x95, 0x7f, + 0xc6, 0x41, 0x3d, 0xa6, 0x8c, 0xe8, 0x77, 0x41, 0xe5, 0xf5, 0x87, 0x9e, 0x2c, 0xcf, 0x6b, 0xd4, + 0xa6, 0xd3, 0x75, 0x89, 0x5d, 0x0f, 0xba, 0xad, 0x73, 0x8f, 0x18, 0x08, 0x9e, 0xe8, 0x93, 0xd8, + 0x54, 0x9f, 0xac, 0x41, 0xc2, 0xa7, 0x43, 0xd7, 0x46, 0xff, 0x12, 0x86, 0xd8, 0xe8, 0x07, 0xa0, + 0x45, 0xe5, 0xaf, 0x7e, 0x53, 0xf9, 0xaf, 0xf0, 0xf2, 0xe7, 0xcd, 0x29, 0x09, 0x46, 0xaa, 0x2d, + 0xbb, 0x60, 0x17, 0xd2, 0xd1, 0xad, 0x2c, 0xdb, 0xe8, 0xc3, 0x3a, 0x71, 0x2c, 0xc6, 0x63, 0x14, + 0x15, 0x75, 0x54, 0x15, 0x22, 0x77, 0xf9, 0x88, 0x21, 0xcb, 0x62, 0xaa, 0x5f, 0x4c, 0x71, 0xb3, + 0xa6, 0xf0, 0x5c, 0xe3, 0x7e, 0xa9, 0xe1, 0x15, 0x7b, 0x03, 0xd2, 0x81, 0xd3, 0x75, 0x2d, 0x36, + 0xf4, 0x89, 0x6c, 0xa9, 0x31, 0x81, 0x73, 0xc9, 0x88, 0x11, 0x17, 0xf3, 0x21, 0x5a, 0x68, 0x4c, + 0xd0, 0xb7, 0xe0, 0x07, 0xd1, 0xc6, 0x1c, 0x6b, 0x11, 0xed, 0xa3, 0x47, 0xac, 0x66, 0xc8, 0xa9, + 0xfc, 0x4b, 0x81, 0xa4, 0xe8, 0xf8, 0x89, 0x34, 0x28, 0xf3, 0xd3, 0x10, 0x5b, 0x94, 0x86, 0xf8, + 0xc7, 0xa7, 0x61, 0x07, 0x20, 0x72, 0x33, 0x28, 0xa8, 0xe5, 0xf8, 0x66, 0x66, 0xfb, 0xfa, 0xac, + 0x22, 0xe1, 0x62, 0xd3, 0xe9, 0xca, 0x0b, 0x6d, 0x42, 0xa8, 0xf2, 0x3f, 0x05, 0xd2, 0x11, 0x5f, + 0xdf, 0x81, 0x5c, 0xe8, 0x97, 0x79, 0xd2, 0xb7, 0xba, 0xb2, 0x14, 0xd7, 0x17, 0x3a, 0xf7, 0x9b, + 0xbe, 0xd5, 0x35, 0x32, 0xd2, 0x1f, 0xbe, 0x99, 0x9f, 0xd6, 0xd8, 0x82, 0xb4, 0x4e, 0xd5, 0x51, + 0xfc, 0xe3, 0xea, 0x68, 0x2a, 0xe3, 0xea, 0xa5, 0x8c, 0x57, 0x3e, 0x57, 0x60, 0xf9, 0x60, 0x84, + 0xee, 0xdb, 0xdf, 0x65, 0xaa, 0x9e, 0xca, 0xda, 0xb2, 0x89, 0x6d, 0xce, 0xe4, 0xec, 0xe6, 0xac, + 0xc6, 0x69, 0x9f, 0xc7, 0xb9, 0xd3, 0x43, 0x2d, 0xcd, 0x71, 0x0e, 0xff, 0x11, 0x83, 0xd5, 0x19, + 0xfc, 0xf7, 0x2f, 0x97, 0xd3, 0xdd, 0x9b, 0xf8, 0xc0, 0xee, 0x4d, 0x2e, 0xec, 0xde, 0xbf, 0xc7, + 0x40, 0x6b, 0xe0, 0xe7, 0xc7, 0xea, 0x7f, 0x1b, 0x77, 0xef, 0x75, 0x48, 0x7b, 0xb4, 0x6f, 0x0a, + 0x8e, 0x8a, 0x1c, 0xcd, 0xa3, 0x7d, 0x63, 0xa6, 0xcc, 0x12, 0x9f, 0xe8, 0x62, 0x4e, 0x7e, 0x82, + 0x24, 0xa4, 0x2e, 0x37, 0x94, 0x0f, 0x59, 0x11, 0x0a, 0x39, 0x0e, 0xde, 0xe1, 0x31, 0xc0, 0xf9, + 0x52, 0x99, 0x1d, 0x5f, 0x85, 0xdb, 0x02, 0x69, 0x48, 0x1c, 0x97, 0x10, 0xd3, 0x93, 0x9c, 0x48, + 0x0b, 0x8b, 0x6e, 0x2c, 0x43, 0xe2, 0x2a, 0x7f, 0x51, 0x00, 0x1e, 0xf1, 0xc8, 0xe2, 0x79, 0xf9, + 0x20, 0x17, 0xa0, 0x0b, 0xe6, 0x94, 0xe5, 0xd2, 0xa2, 0xa4, 0x49, 0xfb, 0xd9, 0x60, 0xd2, 0xef, + 0x3d, 0xc8, 0x8d, 0x6b, 0x3b, 0x20, 0xa1, 0x33, 0x73, 0x94, 0x44, 0xf3, 0x55, 0x93, 0x30, 0x23, + 0x7b, 0x3a, 0xb1, 0xab, 0xfc, 0x5b, 0x81, 0x34, 0xfa, 0x54, 0x27, 0xcc, 0x9a, 0xca, 0xa1, 0xf2, + 0xf1, 0x39, 0x5c, 0x07, 0x10, 0x6a, 0x70, 0xce, 0x11, 0x95, 0x95, 0x46, 0x0a, 0x8e, 0x39, 0xbf, + 0x88, 0x02, 0x1e, 0xff, 0xfa, 0x80, 0xcb, 0x1b, 0x23, 0x0c, 0xfb, 0x55, 0x48, 0xb9, 0xc3, 0x81, + 0xc9, 0xa7, 0x2a, 0x55, 0x54, 0xab, 0x3b, 0x1c, 0xb4, 0x46, 0x41, 0xe5, 0x0f, 0x90, 0x6a, 0x8d, + 0xf0, 0x85, 0xc1, 0x4b, 0xd4, 0xa7, 0x54, 0x8e, 0xb5, 0x62, 0x20, 0xd2, 0x38, 0x01, 0xa7, 0xb8, + 0x79, 0xd3, 0x50, 0xf5, 0x03, 0xdf, 0x2e, 0xe1, 0xab, 0xe5, 0xf7, 0x90, 0xc5, 0xef, 0xf4, 0x13, + 0xdf, 0xf2, 0x3c, 0xe2, 0xeb, 0xcb, 0x10, 0x63, 0x23, 0x69, 0x29, 0xc6, 0x46, 0xe3, 0xe9, 0x0a, + 0xbf, 0xf1, 0xf8, 0x52, 0x8a, 0x47, 0xd3, 0x55, 0x4d, 0xd0, 0xf8, 0x49, 0xf8, 0x39, 0xc3, 0x1b, + 0x39, 0x6d, 0x24, 0xf9, 0xb6, 0x66, 0x57, 0x4c, 0x48, 0xf2, 0xd1, 0xae, 0x35, 0x9a, 0xd1, 0x7b, + 0x1b, 0x12, 0xed, 0x3e, 0x6d, 0x0b, 0x7d, 0x99, 0xed, 0x2b, 0x73, 0xf3, 0xd2, 0x36, 0x04, 0x68, + 0xb1, 0x81, 0x2f, 0x15, 0x80, 0x26, 0x77, 0x45, 0x84, 0x2b, 0x8c, 0x88, 0x98, 0x52, 0x45, 0x44, + 0xee, 0x81, 0x70, 0xd6, 0xc4, 0x03, 0x87, 0x06, 0x8b, 0xb3, 0x06, 0x0f, 0xeb, 0x2d, 0x11, 0x9a, + 0x4c, 0x10, 0x69, 0x0c, 0x66, 0xa6, 0xd2, 0xf8, 0xec, 0x54, 0xfa, 0x4b, 0x9e, 0xa4, 0x33, 0xa1, + 0x3f, 0x7a, 0x06, 0xcd, 0xa8, 0x37, 0xe8, 0x99, 0x50, 0xaf, 0xf9, 0x72, 0x35, 0x7f, 0x2a, 0x4d, + 0x2c, 0x98, 0x4a, 0x5f, 0x2b, 0xa0, 0x85, 0x3a, 0x44, 0x5d, 0x9c, 0x99, 0xbc, 0x14, 0xc2, 0x99, + 0x9c, 0xab, 0x35, 0xf8, 0x9e, 0xf7, 0xf3, 0xd4, 0x59, 0x17, 0x17, 0x81, 0xc4, 0xf1, 0xb8, 0x71, + 0x55, 0xf2, 0x70, 0xb8, 0xe6, 0x26, 0x02, 0xc6, 0x5f, 0xac, 0x3e, 0x3d, 0x93, 0xa3, 0xb2, 0x86, + 0x04, 0x83, 0x9e, 0xf1, 0x84, 0x10, 0xd7, 0x46, 0x96, 0xf0, 0x37, 0x49, 0x5c, 0xdb, 0xa0, 0x67, + 0x15, 0x02, 0x5a, 0x18, 0x47, 0x7e, 0xeb, 0xa2, 0x00, 0xa6, 0x3d, 0x61, 0x88, 0x0d, 0x7f, 0x48, + 0x90, 0xe8, 0x9b, 0xce, 0x97, 0x1c, 0xe7, 0x52, 0x9b, 0x04, 0x85, 0x38, 0x1e, 0x44, 0x6c, 0xb8, + 0xfd, 0x3e, 0xb1, 0x4e, 0x44, 0xe9, 0x8b, 0x4f, 0x8f, 0xc6, 0x09, 0xbc, 0xf4, 0x6f, 0xfd, 0x47, + 0x81, 0xdc, 0xd4, 0x07, 0x40, 0xbf, 0x0d, 0x57, 0x9b, 0xb5, 0xfb, 0x87, 0x07, 0xfb, 0x66, 0xbd, + 0x79, 0xdf, 0x6c, 0xfd, 0xae, 0x71, 0x60, 0x3e, 0x3e, 0xfc, 0xed, 0xe1, 0xd1, 0x93, 0xc3, 0xfc, + 0x52, 0x71, 0xe5, 0xc5, 0xab, 0x72, 0xe6, 0xb1, 0xfb, 0xcc, 0xa5, 0x67, 0xee, 0x22, 0x74, 0xc3, + 0x38, 0x38, 0x3e, 0x6a, 0x1d, 0xe4, 0x15, 0x81, 0x6e, 0xf8, 0xe4, 0x94, 0x32, 0x82, 0xe8, 0x3b, + 0x70, 0x6d, 0x0e, 0x7a, 0xef, 0xa8, 0x5e, 0xaf, 0xb5, 0xf2, 0xb1, 0xe2, 0xea, 0x8b, 0x57, 0xe5, + 0x5c, 0xc3, 0x27, 0xe2, 0x72, 0x44, 0x89, 0x2a, 0x14, 0x66, 0x25, 0x8e, 0x1a, 0x47, 0xcd, 0x9d, + 0x47, 0xf9, 0x72, 0x31, 0xff, 0xe2, 0x55, 0x39, 0x1b, 0x7e, 0xe9, 0x38, 0xbe, 0xa8, 0xfd, 0xe9, + 0x75, 0x69, 0xe9, 0x6f, 0x7f, 0x2d, 0x29, 0xbb, 0xf5, 0x37, 0x17, 0x25, 0xe5, 0xed, 0x45, 0x49, + 0xf9, 0xec, 0xa2, 0xa4, 0xbc, 0x7c, 0x5f, 0x5a, 0x7a, 0xfb, 0xbe, 0xb4, 0xf4, 0xdf, 0xf7, 0xa5, + 0xa5, 0xa7, 0x77, 0xbb, 0x0e, 0xeb, 0x0d, 0xdb, 0xd5, 0x0e, 0x1d, 0x6c, 0x75, 0xe8, 0x80, 0xb0, + 0xf6, 0x09, 0x1b, 0x2f, 0xc4, 0xdf, 0x36, 0x97, 0xff, 0x4a, 0x69, 0x27, 0x91, 0x7e, 0xf7, 0xab, + 0x00, 0x00, 0x00, 0xff, 0xff, 0xf2, 0xee, 0xae, 0xad, 0x0b, 0x12, 0x00, 0x00, +} + +func (m *PartSetHeader) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalToSizedBuffer(dAtA[:size]) @@ -1539,29 +1777,32 @@ func (m *Data) Marshal() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *Data) MarshalTo(dAtA []byte) (int, error) { +func (m *PartSetHeader) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *Data) MarshalToSizedBuffer(dAtA []byte) (int, error) { +func (m *PartSetHeader) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) _ = i var l int _ = l - if len(m.Txs) > 0 { - for iNdEx := len(m.Txs) - 1; iNdEx >= 0; iNdEx-- { - i -= len(m.Txs[iNdEx]) - copy(dAtA[i:], m.Txs[iNdEx]) - i = encodeVarintTypes(dAtA, i, uint64(len(m.Txs[iNdEx]))) - i-- - dAtA[i] = 0xa - } + if len(m.Hash) > 0 { + i -= len(m.Hash) + copy(dAtA[i:], m.Hash) + i = encodeVarintTypes(dAtA, i, uint64(len(m.Hash))) + i-- + dAtA[i] = 0x12 + } + if m.Total != 0 { + i = encodeVarintTypes(dAtA, i, uint64(m.Total)) + i-- + dAtA[i] = 0x8 } return len(dAtA) - i, nil } -func (m *Vote) Marshal() (dAtA []byte, err error) { +func (m *Part) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalToSizedBuffer(dAtA[:size]) @@ -1571,86 +1812,42 @@ func (m *Vote) Marshal() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *Vote) MarshalTo(dAtA []byte) (int, error) { +func (m *Part) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *Vote) MarshalToSizedBuffer(dAtA []byte) (int, error) { +func (m *Part) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) _ = i var l int _ = l - if len(m.ExtensionSignature) > 0 { - i -= len(m.ExtensionSignature) - copy(dAtA[i:], m.ExtensionSignature) - i = encodeVarintTypes(dAtA, i, uint64(len(m.ExtensionSignature))) - i-- - dAtA[i] = 0x52 + { + size, err := m.Proof.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTypes(dAtA, i, uint64(size)) } - if len(m.Extension) > 0 { - i -= len(m.Extension) - copy(dAtA[i:], m.Extension) - i = encodeVarintTypes(dAtA, i, uint64(len(m.Extension))) + i-- + dAtA[i] = 0x1a + if len(m.Bytes) > 0 { + i -= len(m.Bytes) + copy(dAtA[i:], m.Bytes) + i = encodeVarintTypes(dAtA, i, uint64(len(m.Bytes))) i-- - dAtA[i] = 0x4a - } - if len(m.Signature) > 0 { - i -= len(m.Signature) - copy(dAtA[i:], m.Signature) - i = encodeVarintTypes(dAtA, i, uint64(len(m.Signature))) - i-- - dAtA[i] = 0x42 - } - if m.ValidatorIndex != 0 { - i = encodeVarintTypes(dAtA, i, uint64(m.ValidatorIndex)) - i-- - dAtA[i] = 0x38 - } - if len(m.ValidatorAddress) > 0 { - i -= len(m.ValidatorAddress) - copy(dAtA[i:], m.ValidatorAddress) - i = encodeVarintTypes(dAtA, i, uint64(len(m.ValidatorAddress))) - i-- - dAtA[i] = 0x32 - } - n6, err6 := github_com_cosmos_gogoproto_types.StdTimeMarshalTo(m.Timestamp, dAtA[i-github_com_cosmos_gogoproto_types.SizeOfStdTime(m.Timestamp):]) - if err6 != nil { - return 0, err6 - } - i -= n6 - i = encodeVarintTypes(dAtA, i, uint64(n6)) - i-- - dAtA[i] = 0x2a - { - size, err := m.BlockID.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintTypes(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x22 - if m.Round != 0 { - i = encodeVarintTypes(dAtA, i, uint64(m.Round)) - i-- - dAtA[i] = 0x18 - } - if m.Height != 0 { - i = encodeVarintTypes(dAtA, i, uint64(m.Height)) - i-- - dAtA[i] = 0x10 + dAtA[i] = 0x12 } - if m.Type != 0 { - i = encodeVarintTypes(dAtA, i, uint64(m.Type)) + if m.Index != 0 { + i = encodeVarintTypes(dAtA, i, uint64(m.Index)) i-- dAtA[i] = 0x8 } return len(dAtA) - i, nil } -func (m *Commit) Marshal() (dAtA []byte, err error) { +func (m *BlockID) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalToSizedBuffer(dAtA[:size]) @@ -1660,32 +1857,18 @@ func (m *Commit) Marshal() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *Commit) MarshalTo(dAtA []byte) (int, error) { +func (m *BlockID) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *Commit) MarshalToSizedBuffer(dAtA []byte) (int, error) { +func (m *BlockID) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) _ = i var l int _ = l - if len(m.Signatures) > 0 { - for iNdEx := len(m.Signatures) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Signatures[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintTypes(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x22 - } - } { - size, err := m.BlockID.MarshalToSizedBuffer(dAtA[:i]) + size, err := m.PartSetHeader.MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } @@ -1693,21 +1876,18 @@ func (m *Commit) MarshalToSizedBuffer(dAtA []byte) (int, error) { i = encodeVarintTypes(dAtA, i, uint64(size)) } i-- - dAtA[i] = 0x1a - if m.Round != 0 { - i = encodeVarintTypes(dAtA, i, uint64(m.Round)) - i-- - dAtA[i] = 0x10 - } - if m.Height != 0 { - i = encodeVarintTypes(dAtA, i, uint64(m.Height)) + dAtA[i] = 0x12 + if len(m.Hash) > 0 { + i -= len(m.Hash) + copy(dAtA[i:], m.Hash) + i = encodeVarintTypes(dAtA, i, uint64(len(m.Hash))) i-- - dAtA[i] = 0x8 + dAtA[i] = 0xa } return len(dAtA) - i, nil } -func (m *CommitSig) Marshal() (dAtA []byte, err error) { +func (m *Header) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalToSizedBuffer(dAtA[:size]) @@ -1717,82 +1897,81 @@ func (m *CommitSig) Marshal() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *CommitSig) MarshalTo(dAtA []byte) (int, error) { +func (m *Header) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *CommitSig) MarshalToSizedBuffer(dAtA []byte) (int, error) { +func (m *Header) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) _ = i var l int _ = l - if len(m.Signature) > 0 { - i -= len(m.Signature) - copy(dAtA[i:], m.Signature) - i = encodeVarintTypes(dAtA, i, uint64(len(m.Signature))) + if len(m.ProposerAddress) > 0 { + i -= len(m.ProposerAddress) + copy(dAtA[i:], m.ProposerAddress) + i = encodeVarintTypes(dAtA, i, uint64(len(m.ProposerAddress))) i-- - dAtA[i] = 0x22 + dAtA[i] = 0x72 } - n9, err9 := github_com_cosmos_gogoproto_types.StdTimeMarshalTo(m.Timestamp, dAtA[i-github_com_cosmos_gogoproto_types.SizeOfStdTime(m.Timestamp):]) - if err9 != nil { - return 0, err9 + if len(m.EvidenceHash) > 0 { + i -= len(m.EvidenceHash) + copy(dAtA[i:], m.EvidenceHash) + i = encodeVarintTypes(dAtA, i, uint64(len(m.EvidenceHash))) + i-- + dAtA[i] = 0x6a } - i -= n9 - i = encodeVarintTypes(dAtA, i, uint64(n9)) - i-- - dAtA[i] = 0x1a - if len(m.ValidatorAddress) > 0 { - i -= len(m.ValidatorAddress) - copy(dAtA[i:], m.ValidatorAddress) - i = encodeVarintTypes(dAtA, i, uint64(len(m.ValidatorAddress))) + if len(m.LastResultsHash) > 0 { + i -= len(m.LastResultsHash) + copy(dAtA[i:], m.LastResultsHash) + i = encodeVarintTypes(dAtA, i, uint64(len(m.LastResultsHash))) i-- - dAtA[i] = 0x12 + dAtA[i] = 0x62 } - if m.BlockIdFlag != 0 { - i = encodeVarintTypes(dAtA, i, uint64(m.BlockIdFlag)) + if len(m.AppHash) > 0 { + i -= len(m.AppHash) + copy(dAtA[i:], m.AppHash) + i = encodeVarintTypes(dAtA, i, uint64(len(m.AppHash))) i-- - dAtA[i] = 0x8 + dAtA[i] = 0x5a } - return len(dAtA) - i, nil -} - -func (m *ExtendedCommit) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err + if len(m.ConsensusHash) > 0 { + i -= len(m.ConsensusHash) + copy(dAtA[i:], m.ConsensusHash) + i = encodeVarintTypes(dAtA, i, uint64(len(m.ConsensusHash))) + i-- + dAtA[i] = 0x52 } - return dAtA[:n], nil -} - -func (m *ExtendedCommit) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *ExtendedCommit) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if len(m.ExtendedSignatures) > 0 { - for iNdEx := len(m.ExtendedSignatures) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.ExtendedSignatures[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintTypes(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x22 - } + if len(m.NextValidatorsHash) > 0 { + i -= len(m.NextValidatorsHash) + copy(dAtA[i:], m.NextValidatorsHash) + i = encodeVarintTypes(dAtA, i, uint64(len(m.NextValidatorsHash))) + i-- + dAtA[i] = 0x4a + } + if len(m.ValidatorsHash) > 0 { + i -= len(m.ValidatorsHash) + copy(dAtA[i:], m.ValidatorsHash) + i = encodeVarintTypes(dAtA, i, uint64(len(m.ValidatorsHash))) + i-- + dAtA[i] = 0x42 + } + if len(m.DataHash) > 0 { + i -= len(m.DataHash) + copy(dAtA[i:], m.DataHash) + i = encodeVarintTypes(dAtA, i, uint64(len(m.DataHash))) + i-- + dAtA[i] = 0x3a + } + if len(m.LastCommitHash) > 0 { + i -= len(m.LastCommitHash) + copy(dAtA[i:], m.LastCommitHash) + i = encodeVarintTypes(dAtA, i, uint64(len(m.LastCommitHash))) + i-- + dAtA[i] = 0x32 } { - size, err := m.BlockID.MarshalToSizedBuffer(dAtA[:i]) + size, err := m.LastBlockId.MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } @@ -1800,21 +1979,41 @@ func (m *ExtendedCommit) MarshalToSizedBuffer(dAtA []byte) (int, error) { i = encodeVarintTypes(dAtA, i, uint64(size)) } i-- - dAtA[i] = 0x1a - if m.Round != 0 { - i = encodeVarintTypes(dAtA, i, uint64(m.Round)) - i-- - dAtA[i] = 0x10 + dAtA[i] = 0x2a + n4, err4 := github_com_cosmos_gogoproto_types.StdTimeMarshalTo(m.Time, dAtA[i-github_com_cosmos_gogoproto_types.SizeOfStdTime(m.Time):]) + if err4 != nil { + return 0, err4 } + i -= n4 + i = encodeVarintTypes(dAtA, i, uint64(n4)) + i-- + dAtA[i] = 0x22 if m.Height != 0 { i = encodeVarintTypes(dAtA, i, uint64(m.Height)) i-- - dAtA[i] = 0x8 + dAtA[i] = 0x18 + } + if len(m.ChainID) > 0 { + i -= len(m.ChainID) + copy(dAtA[i:], m.ChainID) + i = encodeVarintTypes(dAtA, i, uint64(len(m.ChainID))) + i-- + dAtA[i] = 0x12 + } + { + size, err := m.Version.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTypes(dAtA, i, uint64(size)) } + i-- + dAtA[i] = 0xa return len(dAtA) - i, nil } -func (m *ExtendedCommitSig) Marshal() (dAtA []byte, err error) { +func (m *Data) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalToSizedBuffer(dAtA[:size]) @@ -1824,61 +2023,88 @@ func (m *ExtendedCommitSig) Marshal() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *ExtendedCommitSig) MarshalTo(dAtA []byte) (int, error) { +func (m *Data) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *ExtendedCommitSig) MarshalToSizedBuffer(dAtA []byte) (int, error) { +func (m *Data) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) _ = i var l int _ = l - if len(m.ExtensionSignature) > 0 { - i -= len(m.ExtensionSignature) - copy(dAtA[i:], m.ExtensionSignature) - i = encodeVarintTypes(dAtA, i, uint64(len(m.ExtensionSignature))) + if len(m.Hash) > 0 { + i -= len(m.Hash) + copy(dAtA[i:], m.Hash) + i = encodeVarintTypes(dAtA, i, uint64(len(m.Hash))) i-- dAtA[i] = 0x32 } - if len(m.Extension) > 0 { - i -= len(m.Extension) - copy(dAtA[i:], m.Extension) - i = encodeVarintTypes(dAtA, i, uint64(len(m.Extension))) + if m.SquareSize != 0 { + i = encodeVarintTypes(dAtA, i, uint64(m.SquareSize)) i-- - dAtA[i] = 0x2a + dAtA[i] = 0x28 } - if len(m.Signature) > 0 { - i -= len(m.Signature) - copy(dAtA[i:], m.Signature) - i = encodeVarintTypes(dAtA, i, uint64(len(m.Signature))) + if len(m.Txs) > 0 { + for iNdEx := len(m.Txs) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Txs[iNdEx]) + copy(dAtA[i:], m.Txs[iNdEx]) + i = encodeVarintTypes(dAtA, i, uint64(len(m.Txs[iNdEx]))) + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *Blob) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Blob) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Blob) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.NamespaceVersion != 0 { + i = encodeVarintTypes(dAtA, i, uint64(m.NamespaceVersion)) i-- - dAtA[i] = 0x22 + dAtA[i] = 0x20 } - n11, err11 := github_com_cosmos_gogoproto_types.StdTimeMarshalTo(m.Timestamp, dAtA[i-github_com_cosmos_gogoproto_types.SizeOfStdTime(m.Timestamp):]) - if err11 != nil { - return 0, err11 + if m.ShareVersion != 0 { + i = encodeVarintTypes(dAtA, i, uint64(m.ShareVersion)) + i-- + dAtA[i] = 0x18 } - i -= n11 - i = encodeVarintTypes(dAtA, i, uint64(n11)) - i-- - dAtA[i] = 0x1a - if len(m.ValidatorAddress) > 0 { - i -= len(m.ValidatorAddress) - copy(dAtA[i:], m.ValidatorAddress) - i = encodeVarintTypes(dAtA, i, uint64(len(m.ValidatorAddress))) + if len(m.Data) > 0 { + i -= len(m.Data) + copy(dAtA[i:], m.Data) + i = encodeVarintTypes(dAtA, i, uint64(len(m.Data))) i-- dAtA[i] = 0x12 } - if m.BlockIdFlag != 0 { - i = encodeVarintTypes(dAtA, i, uint64(m.BlockIdFlag)) + if len(m.NamespaceId) > 0 { + i -= len(m.NamespaceId) + copy(dAtA[i:], m.NamespaceId) + i = encodeVarintTypes(dAtA, i, uint64(len(m.NamespaceId))) i-- - dAtA[i] = 0x8 + dAtA[i] = 0xa } return len(dAtA) - i, nil } -func (m *Proposal) Marshal() (dAtA []byte, err error) { +func (m *Vote) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalToSizedBuffer(dAtA[:size]) @@ -1888,31 +2114,57 @@ func (m *Proposal) Marshal() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *Proposal) MarshalTo(dAtA []byte) (int, error) { +func (m *Vote) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *Proposal) MarshalToSizedBuffer(dAtA []byte) (int, error) { +func (m *Vote) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) _ = i var l int _ = l + if len(m.ExtensionSignature) > 0 { + i -= len(m.ExtensionSignature) + copy(dAtA[i:], m.ExtensionSignature) + i = encodeVarintTypes(dAtA, i, uint64(len(m.ExtensionSignature))) + i-- + dAtA[i] = 0x52 + } + if len(m.Extension) > 0 { + i -= len(m.Extension) + copy(dAtA[i:], m.Extension) + i = encodeVarintTypes(dAtA, i, uint64(len(m.Extension))) + i-- + dAtA[i] = 0x4a + } if len(m.Signature) > 0 { i -= len(m.Signature) copy(dAtA[i:], m.Signature) i = encodeVarintTypes(dAtA, i, uint64(len(m.Signature))) i-- - dAtA[i] = 0x3a + dAtA[i] = 0x42 } - n12, err12 := github_com_cosmos_gogoproto_types.StdTimeMarshalTo(m.Timestamp, dAtA[i-github_com_cosmos_gogoproto_types.SizeOfStdTime(m.Timestamp):]) - if err12 != nil { - return 0, err12 + if m.ValidatorIndex != 0 { + i = encodeVarintTypes(dAtA, i, uint64(m.ValidatorIndex)) + i-- + dAtA[i] = 0x38 } - i -= n12 - i = encodeVarintTypes(dAtA, i, uint64(n12)) + if len(m.ValidatorAddress) > 0 { + i -= len(m.ValidatorAddress) + copy(dAtA[i:], m.ValidatorAddress) + i = encodeVarintTypes(dAtA, i, uint64(len(m.ValidatorAddress))) + i-- + dAtA[i] = 0x32 + } + n6, err6 := github_com_cosmos_gogoproto_types.StdTimeMarshalTo(m.Timestamp, dAtA[i-github_com_cosmos_gogoproto_types.SizeOfStdTime(m.Timestamp):]) + if err6 != nil { + return 0, err6 + } + i -= n6 + i = encodeVarintTypes(dAtA, i, uint64(n6)) i-- - dAtA[i] = 0x32 + dAtA[i] = 0x2a { size, err := m.BlockID.MarshalToSizedBuffer(dAtA[:i]) if err != nil { @@ -1922,12 +2174,7 @@ func (m *Proposal) MarshalToSizedBuffer(dAtA []byte) (int, error) { i = encodeVarintTypes(dAtA, i, uint64(size)) } i-- - dAtA[i] = 0x2a - if m.PolRound != 0 { - i = encodeVarintTypes(dAtA, i, uint64(m.PolRound)) - i-- - dAtA[i] = 0x20 - } + dAtA[i] = 0x22 if m.Round != 0 { i = encodeVarintTypes(dAtA, i, uint64(m.Round)) i-- @@ -1946,7 +2193,7 @@ func (m *Proposal) MarshalToSizedBuffer(dAtA []byte) (int, error) { return len(dAtA) - i, nil } -func (m *SignedHeader) Marshal() (dAtA []byte, err error) { +func (m *Commit) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalToSizedBuffer(dAtA[:size]) @@ -1956,44 +2203,54 @@ func (m *SignedHeader) Marshal() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *SignedHeader) MarshalTo(dAtA []byte) (int, error) { +func (m *Commit) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *SignedHeader) MarshalToSizedBuffer(dAtA []byte) (int, error) { +func (m *Commit) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) _ = i var l int _ = l - if m.Commit != nil { - { - size, err := m.Commit.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err + if len(m.Signatures) > 0 { + for iNdEx := len(m.Signatures) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Signatures[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTypes(dAtA, i, uint64(size)) } - i -= size - i = encodeVarintTypes(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x22 } - i-- - dAtA[i] = 0x12 } - if m.Header != nil { - { - size, err := m.Header.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintTypes(dAtA, i, uint64(size)) + { + size, err := m.BlockID.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err } + i -= size + i = encodeVarintTypes(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + if m.Round != 0 { + i = encodeVarintTypes(dAtA, i, uint64(m.Round)) i-- - dAtA[i] = 0xa + dAtA[i] = 0x10 + } + if m.Height != 0 { + i = encodeVarintTypes(dAtA, i, uint64(m.Height)) + i-- + dAtA[i] = 0x8 } return len(dAtA) - i, nil } -func (m *LightBlock) Marshal() (dAtA []byte, err error) { +func (m *CommitSig) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalToSizedBuffer(dAtA[:size]) @@ -2003,44 +2260,47 @@ func (m *LightBlock) Marshal() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *LightBlock) MarshalTo(dAtA []byte) (int, error) { +func (m *CommitSig) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *LightBlock) MarshalToSizedBuffer(dAtA []byte) (int, error) { +func (m *CommitSig) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) _ = i var l int _ = l - if m.ValidatorSet != nil { - { - size, err := m.ValidatorSet.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintTypes(dAtA, i, uint64(size)) - } + if len(m.Signature) > 0 { + i -= len(m.Signature) + copy(dAtA[i:], m.Signature) + i = encodeVarintTypes(dAtA, i, uint64(len(m.Signature))) + i-- + dAtA[i] = 0x22 + } + n9, err9 := github_com_cosmos_gogoproto_types.StdTimeMarshalTo(m.Timestamp, dAtA[i-github_com_cosmos_gogoproto_types.SizeOfStdTime(m.Timestamp):]) + if err9 != nil { + return 0, err9 + } + i -= n9 + i = encodeVarintTypes(dAtA, i, uint64(n9)) + i-- + dAtA[i] = 0x1a + if len(m.ValidatorAddress) > 0 { + i -= len(m.ValidatorAddress) + copy(dAtA[i:], m.ValidatorAddress) + i = encodeVarintTypes(dAtA, i, uint64(len(m.ValidatorAddress))) i-- dAtA[i] = 0x12 } - if m.SignedHeader != nil { - { - size, err := m.SignedHeader.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintTypes(dAtA, i, uint64(size)) - } + if m.BlockIdFlag != 0 { + i = encodeVarintTypes(dAtA, i, uint64(m.BlockIdFlag)) i-- - dAtA[i] = 0xa + dAtA[i] = 0x8 } return len(dAtA) - i, nil } -func (m *BlockMeta) Marshal() (dAtA []byte, err error) { +func (m *ExtendedCommit) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalToSizedBuffer(dAtA[:size]) @@ -2050,23 +2310,32 @@ func (m *BlockMeta) Marshal() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *BlockMeta) MarshalTo(dAtA []byte) (int, error) { +func (m *ExtendedCommit) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *BlockMeta) MarshalToSizedBuffer(dAtA []byte) (int, error) { +func (m *ExtendedCommit) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) _ = i var l int _ = l - if m.NumTxs != 0 { - i = encodeVarintTypes(dAtA, i, uint64(m.NumTxs)) - i-- - dAtA[i] = 0x20 + if len(m.ExtendedSignatures) > 0 { + for iNdEx := len(m.ExtendedSignatures) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.ExtendedSignatures[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTypes(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x22 + } } { - size, err := m.Header.MarshalToSizedBuffer(dAtA[:i]) + size, err := m.BlockID.MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } @@ -2075,25 +2344,20 @@ func (m *BlockMeta) MarshalToSizedBuffer(dAtA []byte) (int, error) { } i-- dAtA[i] = 0x1a - if m.BlockSize != 0 { - i = encodeVarintTypes(dAtA, i, uint64(m.BlockSize)) + if m.Round != 0 { + i = encodeVarintTypes(dAtA, i, uint64(m.Round)) i-- dAtA[i] = 0x10 } - { - size, err := m.BlockID.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintTypes(dAtA, i, uint64(size)) + if m.Height != 0 { + i = encodeVarintTypes(dAtA, i, uint64(m.Height)) + i-- + dAtA[i] = 0x8 } - i-- - dAtA[i] = 0xa return len(dAtA) - i, nil } -func (m *TxProof) Marshal() (dAtA []byte, err error) { +func (m *ExtendedCommitSig) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalToSizedBuffer(dAtA[:size]) @@ -2103,460 +2367,1868 @@ func (m *TxProof) Marshal() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *TxProof) MarshalTo(dAtA []byte) (int, error) { +func (m *ExtendedCommitSig) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *TxProof) MarshalToSizedBuffer(dAtA []byte) (int, error) { +func (m *ExtendedCommitSig) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) _ = i var l int _ = l - if m.Proof != nil { - { - size, err := m.Proof.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintTypes(dAtA, i, uint64(size)) - } + if len(m.ExtensionSignature) > 0 { + i -= len(m.ExtensionSignature) + copy(dAtA[i:], m.ExtensionSignature) + i = encodeVarintTypes(dAtA, i, uint64(len(m.ExtensionSignature))) i-- - dAtA[i] = 0x1a + dAtA[i] = 0x32 } - if len(m.Data) > 0 { - i -= len(m.Data) - copy(dAtA[i:], m.Data) - i = encodeVarintTypes(dAtA, i, uint64(len(m.Data))) + if len(m.Extension) > 0 { + i -= len(m.Extension) + copy(dAtA[i:], m.Extension) + i = encodeVarintTypes(dAtA, i, uint64(len(m.Extension))) i-- - dAtA[i] = 0x12 + dAtA[i] = 0x2a } - if len(m.RootHash) > 0 { - i -= len(m.RootHash) - copy(dAtA[i:], m.RootHash) - i = encodeVarintTypes(dAtA, i, uint64(len(m.RootHash))) + if len(m.Signature) > 0 { + i -= len(m.Signature) + copy(dAtA[i:], m.Signature) + i = encodeVarintTypes(dAtA, i, uint64(len(m.Signature))) i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func encodeVarintTypes(dAtA []byte, offset int, v uint64) int { - offset -= sovTypes(v) - base := offset - for v >= 1<<7 { - dAtA[offset] = uint8(v&0x7f | 0x80) - v >>= 7 - offset++ + dAtA[i] = 0x22 } - dAtA[offset] = uint8(v) - return base -} -func (m *PartSetHeader) Size() (n int) { - if m == nil { - return 0 + n11, err11 := github_com_cosmos_gogoproto_types.StdTimeMarshalTo(m.Timestamp, dAtA[i-github_com_cosmos_gogoproto_types.SizeOfStdTime(m.Timestamp):]) + if err11 != nil { + return 0, err11 } - var l int - _ = l - if m.Total != 0 { - n += 1 + sovTypes(uint64(m.Total)) + i -= n11 + i = encodeVarintTypes(dAtA, i, uint64(n11)) + i-- + dAtA[i] = 0x1a + if len(m.ValidatorAddress) > 0 { + i -= len(m.ValidatorAddress) + copy(dAtA[i:], m.ValidatorAddress) + i = encodeVarintTypes(dAtA, i, uint64(len(m.ValidatorAddress))) + i-- + dAtA[i] = 0x12 } - l = len(m.Hash) - if l > 0 { - n += 1 + l + sovTypes(uint64(l)) + if m.BlockIdFlag != 0 { + i = encodeVarintTypes(dAtA, i, uint64(m.BlockIdFlag)) + i-- + dAtA[i] = 0x8 } - return n + return len(dAtA) - i, nil } -func (m *Part) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.Index != 0 { - n += 1 + sovTypes(uint64(m.Index)) - } - l = len(m.Bytes) - if l > 0 { - n += 1 + l + sovTypes(uint64(l)) +func (m *Proposal) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err } - l = m.Proof.Size() - n += 1 + l + sovTypes(uint64(l)) - return n + return dAtA[:n], nil } -func (m *BlockID) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.Hash) - if l > 0 { - n += 1 + l + sovTypes(uint64(l)) - } - l = m.PartSetHeader.Size() - n += 1 + l + sovTypes(uint64(l)) - return n +func (m *Proposal) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *Header) Size() (n int) { - if m == nil { - return 0 - } +func (m *Proposal) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i var l int _ = l - l = m.Version.Size() - n += 1 + l + sovTypes(uint64(l)) - l = len(m.ChainID) - if l > 0 { - n += 1 + l + sovTypes(uint64(l)) - } - if m.Height != 0 { - n += 1 + sovTypes(uint64(m.Height)) - } - l = github_com_cosmos_gogoproto_types.SizeOfStdTime(m.Time) - n += 1 + l + sovTypes(uint64(l)) - l = m.LastBlockId.Size() - n += 1 + l + sovTypes(uint64(l)) - l = len(m.LastCommitHash) - if l > 0 { - n += 1 + l + sovTypes(uint64(l)) - } - l = len(m.DataHash) - if l > 0 { - n += 1 + l + sovTypes(uint64(l)) - } - l = len(m.ValidatorsHash) - if l > 0 { - n += 1 + l + sovTypes(uint64(l)) + if len(m.Signature) > 0 { + i -= len(m.Signature) + copy(dAtA[i:], m.Signature) + i = encodeVarintTypes(dAtA, i, uint64(len(m.Signature))) + i-- + dAtA[i] = 0x3a } - l = len(m.NextValidatorsHash) - if l > 0 { - n += 1 + l + sovTypes(uint64(l)) + n12, err12 := github_com_cosmos_gogoproto_types.StdTimeMarshalTo(m.Timestamp, dAtA[i-github_com_cosmos_gogoproto_types.SizeOfStdTime(m.Timestamp):]) + if err12 != nil { + return 0, err12 } - l = len(m.ConsensusHash) - if l > 0 { - n += 1 + l + sovTypes(uint64(l)) + i -= n12 + i = encodeVarintTypes(dAtA, i, uint64(n12)) + i-- + dAtA[i] = 0x32 + { + size, err := m.BlockID.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTypes(dAtA, i, uint64(size)) } - l = len(m.AppHash) - if l > 0 { - n += 1 + l + sovTypes(uint64(l)) + i-- + dAtA[i] = 0x2a + if m.PolRound != 0 { + i = encodeVarintTypes(dAtA, i, uint64(m.PolRound)) + i-- + dAtA[i] = 0x20 } - l = len(m.LastResultsHash) - if l > 0 { - n += 1 + l + sovTypes(uint64(l)) + if m.Round != 0 { + i = encodeVarintTypes(dAtA, i, uint64(m.Round)) + i-- + dAtA[i] = 0x18 } - l = len(m.EvidenceHash) - if l > 0 { - n += 1 + l + sovTypes(uint64(l)) + if m.Height != 0 { + i = encodeVarintTypes(dAtA, i, uint64(m.Height)) + i-- + dAtA[i] = 0x10 } - l = len(m.ProposerAddress) - if l > 0 { - n += 1 + l + sovTypes(uint64(l)) + if m.Type != 0 { + i = encodeVarintTypes(dAtA, i, uint64(m.Type)) + i-- + dAtA[i] = 0x8 } - return n + return len(dAtA) - i, nil } -func (m *Data) Size() (n int) { - if m == nil { - return 0 +func (m *SignedHeader) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err } + return dAtA[:n], nil +} + +func (m *SignedHeader) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *SignedHeader) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i var l int _ = l - if len(m.Txs) > 0 { - for _, b := range m.Txs { - l = len(b) - n += 1 + l + sovTypes(uint64(l)) + if m.Commit != nil { + { + size, err := m.Commit.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTypes(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + if m.Header != nil { + { + size, err := m.Header.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTypes(dAtA, i, uint64(size)) } + i-- + dAtA[i] = 0xa } - return n + return len(dAtA) - i, nil } -func (m *Vote) Size() (n int) { - if m == nil { - return 0 +func (m *LightBlock) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err } + return dAtA[:n], nil +} + +func (m *LightBlock) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *LightBlock) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i var l int _ = l - if m.Type != 0 { - n += 1 + sovTypes(uint64(m.Type)) - } - if m.Height != 0 { - n += 1 + sovTypes(uint64(m.Height)) - } - if m.Round != 0 { - n += 1 + sovTypes(uint64(m.Round)) - } - l = m.BlockID.Size() - n += 1 + l + sovTypes(uint64(l)) - l = github_com_cosmos_gogoproto_types.SizeOfStdTime(m.Timestamp) - n += 1 + l + sovTypes(uint64(l)) - l = len(m.ValidatorAddress) - if l > 0 { - n += 1 + l + sovTypes(uint64(l)) - } - if m.ValidatorIndex != 0 { - n += 1 + sovTypes(uint64(m.ValidatorIndex)) - } - l = len(m.Signature) - if l > 0 { - n += 1 + l + sovTypes(uint64(l)) - } - l = len(m.Extension) - if l > 0 { - n += 1 + l + sovTypes(uint64(l)) + if m.ValidatorSet != nil { + { + size, err := m.ValidatorSet.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTypes(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 } - l = len(m.ExtensionSignature) - if l > 0 { - n += 1 + l + sovTypes(uint64(l)) + if m.SignedHeader != nil { + { + size, err := m.SignedHeader.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTypes(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa } - return n + return len(dAtA) - i, nil } -func (m *Commit) Size() (n int) { - if m == nil { - return 0 +func (m *BlockMeta) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err } + return dAtA[:n], nil +} + +func (m *BlockMeta) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *BlockMeta) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i var l int _ = l - if m.Height != 0 { - n += 1 + sovTypes(uint64(m.Height)) + if m.NumTxs != 0 { + i = encodeVarintTypes(dAtA, i, uint64(m.NumTxs)) + i-- + dAtA[i] = 0x20 } - if m.Round != 0 { - n += 1 + sovTypes(uint64(m.Round)) + { + size, err := m.Header.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTypes(dAtA, i, uint64(size)) } - l = m.BlockID.Size() - n += 1 + l + sovTypes(uint64(l)) - if len(m.Signatures) > 0 { - for _, e := range m.Signatures { - l = e.Size() - n += 1 + l + sovTypes(uint64(l)) + i-- + dAtA[i] = 0x1a + if m.BlockSize != 0 { + i = encodeVarintTypes(dAtA, i, uint64(m.BlockSize)) + i-- + dAtA[i] = 0x10 + } + { + size, err := m.BlockID.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err } + i -= size + i = encodeVarintTypes(dAtA, i, uint64(size)) } - return n + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } -func (m *CommitSig) Size() (n int) { - if m == nil { - return 0 +func (m *TxProof) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err } + return dAtA[:n], nil +} + +func (m *TxProof) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *TxProof) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i var l int _ = l - if m.BlockIdFlag != 0 { - n += 1 + sovTypes(uint64(m.BlockIdFlag)) + if m.Proof != nil { + { + size, err := m.Proof.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTypes(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a } - l = len(m.ValidatorAddress) - if l > 0 { - n += 1 + l + sovTypes(uint64(l)) + if len(m.Data) > 0 { + i -= len(m.Data) + copy(dAtA[i:], m.Data) + i = encodeVarintTypes(dAtA, i, uint64(len(m.Data))) + i-- + dAtA[i] = 0x12 } - l = github_com_cosmos_gogoproto_types.SizeOfStdTime(m.Timestamp) - n += 1 + l + sovTypes(uint64(l)) - l = len(m.Signature) - if l > 0 { - n += 1 + l + sovTypes(uint64(l)) + if len(m.RootHash) > 0 { + i -= len(m.RootHash) + copy(dAtA[i:], m.RootHash) + i = encodeVarintTypes(dAtA, i, uint64(len(m.RootHash))) + i-- + dAtA[i] = 0xa } - return n + return len(dAtA) - i, nil } -func (m *ExtendedCommit) Size() (n int) { - if m == nil { - return 0 +func (m *IndexWrapper) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err } + return dAtA[:n], nil +} + +func (m *IndexWrapper) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *IndexWrapper) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i var l int _ = l - if m.Height != 0 { - n += 1 + sovTypes(uint64(m.Height)) - } - if m.Round != 0 { - n += 1 + sovTypes(uint64(m.Round)) + if len(m.TypeId) > 0 { + i -= len(m.TypeId) + copy(dAtA[i:], m.TypeId) + i = encodeVarintTypes(dAtA, i, uint64(len(m.TypeId))) + i-- + dAtA[i] = 0x1a } - l = m.BlockID.Size() - n += 1 + l + sovTypes(uint64(l)) - if len(m.ExtendedSignatures) > 0 { - for _, e := range m.ExtendedSignatures { - l = e.Size() - n += 1 + l + sovTypes(uint64(l)) + if len(m.ShareIndexes) > 0 { + dAtA22 := make([]byte, len(m.ShareIndexes)*10) + var j21 int + for _, num := range m.ShareIndexes { + for num >= 1<<7 { + dAtA22[j21] = uint8(uint64(num)&0x7f | 0x80) + num >>= 7 + j21++ + } + dAtA22[j21] = uint8(num) + j21++ } + i -= j21 + copy(dAtA[i:], dAtA22[:j21]) + i = encodeVarintTypes(dAtA, i, uint64(j21)) + i-- + dAtA[i] = 0x12 } - return n + if len(m.Tx) > 0 { + i -= len(m.Tx) + copy(dAtA[i:], m.Tx) + i = encodeVarintTypes(dAtA, i, uint64(len(m.Tx))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil } -func (m *ExtendedCommitSig) Size() (n int) { - if m == nil { - return 0 +func (m *BlobTx) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err } + return dAtA[:n], nil +} + +func (m *BlobTx) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *BlobTx) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i var l int _ = l - if m.BlockIdFlag != 0 { - n += 1 + sovTypes(uint64(m.BlockIdFlag)) + if len(m.TypeId) > 0 { + i -= len(m.TypeId) + copy(dAtA[i:], m.TypeId) + i = encodeVarintTypes(dAtA, i, uint64(len(m.TypeId))) + i-- + dAtA[i] = 0x1a } - l = len(m.ValidatorAddress) - if l > 0 { - n += 1 + l + sovTypes(uint64(l)) + if len(m.Blobs) > 0 { + for iNdEx := len(m.Blobs) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Blobs[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTypes(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } } - l = github_com_cosmos_gogoproto_types.SizeOfStdTime(m.Timestamp) - n += 1 + l + sovTypes(uint64(l)) - l = len(m.Signature) - if l > 0 { - n += 1 + l + sovTypes(uint64(l)) + if len(m.Tx) > 0 { + i -= len(m.Tx) + copy(dAtA[i:], m.Tx) + i = encodeVarintTypes(dAtA, i, uint64(len(m.Tx))) + i-- + dAtA[i] = 0xa } - l = len(m.Extension) - if l > 0 { - n += 1 + l + sovTypes(uint64(l)) + return len(dAtA) - i, nil +} + +func (m *ShareProof) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err } - l = len(m.ExtensionSignature) - if l > 0 { - n += 1 + l + sovTypes(uint64(l)) + return dAtA[:n], nil +} + +func (m *ShareProof) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ShareProof) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.NamespaceVersion != 0 { + i = encodeVarintTypes(dAtA, i, uint64(m.NamespaceVersion)) + i-- + dAtA[i] = 0x28 } - return n + if m.RowProof != nil { + { + size, err := m.RowProof.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTypes(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x22 + } + if len(m.NamespaceId) > 0 { + i -= len(m.NamespaceId) + copy(dAtA[i:], m.NamespaceId) + i = encodeVarintTypes(dAtA, i, uint64(len(m.NamespaceId))) + i-- + dAtA[i] = 0x1a + } + if len(m.ShareProofs) > 0 { + for iNdEx := len(m.ShareProofs) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.ShareProofs[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTypes(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + if len(m.Data) > 0 { + for iNdEx := len(m.Data) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Data[iNdEx]) + copy(dAtA[i:], m.Data[iNdEx]) + i = encodeVarintTypes(dAtA, i, uint64(len(m.Data[iNdEx]))) + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil } -func (m *Proposal) Size() (n int) { - if m == nil { - return 0 +func (m *RowProof) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err } + return dAtA[:n], nil +} + +func (m *RowProof) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *RowProof) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i var l int _ = l - if m.Type != 0 { - n += 1 + sovTypes(uint64(m.Type)) + if m.EndRow != 0 { + i = encodeVarintTypes(dAtA, i, uint64(m.EndRow)) + i-- + dAtA[i] = 0x28 } - if m.Height != 0 { - n += 1 + sovTypes(uint64(m.Height)) + if m.StartRow != 0 { + i = encodeVarintTypes(dAtA, i, uint64(m.StartRow)) + i-- + dAtA[i] = 0x20 } - if m.Round != 0 { - n += 1 + sovTypes(uint64(m.Round)) + if len(m.Root) > 0 { + i -= len(m.Root) + copy(dAtA[i:], m.Root) + i = encodeVarintTypes(dAtA, i, uint64(len(m.Root))) + i-- + dAtA[i] = 0x1a } - if m.PolRound != 0 { - n += 1 + sovTypes(uint64(m.PolRound)) + if len(m.Proofs) > 0 { + for iNdEx := len(m.Proofs) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Proofs[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTypes(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } } - l = m.BlockID.Size() - n += 1 + l + sovTypes(uint64(l)) - l = github_com_cosmos_gogoproto_types.SizeOfStdTime(m.Timestamp) - n += 1 + l + sovTypes(uint64(l)) - l = len(m.Signature) - if l > 0 { - n += 1 + l + sovTypes(uint64(l)) + if len(m.RowRoots) > 0 { + for iNdEx := len(m.RowRoots) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.RowRoots[iNdEx]) + copy(dAtA[i:], m.RowRoots[iNdEx]) + i = encodeVarintTypes(dAtA, i, uint64(len(m.RowRoots[iNdEx]))) + i-- + dAtA[i] = 0xa + } } - return n + return len(dAtA) - i, nil } -func (m *SignedHeader) Size() (n int) { +func (m *NMTProof) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *NMTProof) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *NMTProof) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.LeafHash) > 0 { + i -= len(m.LeafHash) + copy(dAtA[i:], m.LeafHash) + i = encodeVarintTypes(dAtA, i, uint64(len(m.LeafHash))) + i-- + dAtA[i] = 0x22 + } + if len(m.Nodes) > 0 { + for iNdEx := len(m.Nodes) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Nodes[iNdEx]) + copy(dAtA[i:], m.Nodes[iNdEx]) + i = encodeVarintTypes(dAtA, i, uint64(len(m.Nodes[iNdEx]))) + i-- + dAtA[i] = 0x1a + } + } + if m.End != 0 { + i = encodeVarintTypes(dAtA, i, uint64(m.End)) + i-- + dAtA[i] = 0x10 + } + if m.Start != 0 { + i = encodeVarintTypes(dAtA, i, uint64(m.Start)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func encodeVarintTypes(dAtA []byte, offset int, v uint64) int { + offset -= sovTypes(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func (m *PartSetHeader) Size() (n int) { if m == nil { return 0 } var l int _ = l - if m.Header != nil { - l = m.Header.Size() - n += 1 + l + sovTypes(uint64(l)) + if m.Total != 0 { + n += 1 + sovTypes(uint64(m.Total)) } - if m.Commit != nil { - l = m.Commit.Size() + l = len(m.Hash) + if l > 0 { n += 1 + l + sovTypes(uint64(l)) } return n } -func (m *LightBlock) Size() (n int) { +func (m *Part) Size() (n int) { if m == nil { return 0 } var l int _ = l - if m.SignedHeader != nil { - l = m.SignedHeader.Size() - n += 1 + l + sovTypes(uint64(l)) + if m.Index != 0 { + n += 1 + sovTypes(uint64(m.Index)) } - if m.ValidatorSet != nil { - l = m.ValidatorSet.Size() + l = len(m.Bytes) + if l > 0 { n += 1 + l + sovTypes(uint64(l)) } + l = m.Proof.Size() + n += 1 + l + sovTypes(uint64(l)) return n } -func (m *BlockMeta) Size() (n int) { +func (m *BlockID) Size() (n int) { if m == nil { return 0 } var l int _ = l - l = m.BlockID.Size() - n += 1 + l + sovTypes(uint64(l)) - if m.BlockSize != 0 { - n += 1 + sovTypes(uint64(m.BlockSize)) + l = len(m.Hash) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) } - l = m.Header.Size() + l = m.PartSetHeader.Size() n += 1 + l + sovTypes(uint64(l)) - if m.NumTxs != 0 { - n += 1 + sovTypes(uint64(m.NumTxs)) - } return n } -func (m *TxProof) Size() (n int) { +func (m *Header) Size() (n int) { if m == nil { return 0 } var l int _ = l - l = len(m.RootHash) + l = m.Version.Size() + n += 1 + l + sovTypes(uint64(l)) + l = len(m.ChainID) if l > 0 { n += 1 + l + sovTypes(uint64(l)) } - l = len(m.Data) + if m.Height != 0 { + n += 1 + sovTypes(uint64(m.Height)) + } + l = github_com_cosmos_gogoproto_types.SizeOfStdTime(m.Time) + n += 1 + l + sovTypes(uint64(l)) + l = m.LastBlockId.Size() + n += 1 + l + sovTypes(uint64(l)) + l = len(m.LastCommitHash) if l > 0 { n += 1 + l + sovTypes(uint64(l)) } - if m.Proof != nil { - l = m.Proof.Size() + l = len(m.DataHash) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) + } + l = len(m.ValidatorsHash) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) + } + l = len(m.NextValidatorsHash) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) + } + l = len(m.ConsensusHash) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) + } + l = len(m.AppHash) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) + } + l = len(m.LastResultsHash) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) + } + l = len(m.EvidenceHash) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) + } + l = len(m.ProposerAddress) + if l > 0 { n += 1 + l + sovTypes(uint64(l)) } return n } -func sovTypes(x uint64) (n int) { - return (math_bits.Len64(x|1) + 6) / 7 -} -func sozTypes(x uint64) (n int) { - return sovTypes(uint64((x << 1) ^ uint64((int64(x) >> 63)))) -} -func (m *PartSetHeader) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { +func (m *Data) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Txs) > 0 { + for _, b := range m.Txs { + l = len(b) + n += 1 + l + sovTypes(uint64(l)) + } + } + if m.SquareSize != 0 { + n += 1 + sovTypes(uint64(m.SquareSize)) + } + l = len(m.Hash) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) + } + return n +} + +func (m *Blob) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.NamespaceId) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) + } + l = len(m.Data) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) + } + if m.ShareVersion != 0 { + n += 1 + sovTypes(uint64(m.ShareVersion)) + } + if m.NamespaceVersion != 0 { + n += 1 + sovTypes(uint64(m.NamespaceVersion)) + } + return n +} + +func (m *Vote) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Type != 0 { + n += 1 + sovTypes(uint64(m.Type)) + } + if m.Height != 0 { + n += 1 + sovTypes(uint64(m.Height)) + } + if m.Round != 0 { + n += 1 + sovTypes(uint64(m.Round)) + } + l = m.BlockID.Size() + n += 1 + l + sovTypes(uint64(l)) + l = github_com_cosmos_gogoproto_types.SizeOfStdTime(m.Timestamp) + n += 1 + l + sovTypes(uint64(l)) + l = len(m.ValidatorAddress) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) + } + if m.ValidatorIndex != 0 { + n += 1 + sovTypes(uint64(m.ValidatorIndex)) + } + l = len(m.Signature) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) + } + l = len(m.Extension) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) + } + l = len(m.ExtensionSignature) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) + } + return n +} + +func (m *Commit) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Height != 0 { + n += 1 + sovTypes(uint64(m.Height)) + } + if m.Round != 0 { + n += 1 + sovTypes(uint64(m.Round)) + } + l = m.BlockID.Size() + n += 1 + l + sovTypes(uint64(l)) + if len(m.Signatures) > 0 { + for _, e := range m.Signatures { + l = e.Size() + n += 1 + l + sovTypes(uint64(l)) + } + } + return n +} + +func (m *CommitSig) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.BlockIdFlag != 0 { + n += 1 + sovTypes(uint64(m.BlockIdFlag)) + } + l = len(m.ValidatorAddress) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) + } + l = github_com_cosmos_gogoproto_types.SizeOfStdTime(m.Timestamp) + n += 1 + l + sovTypes(uint64(l)) + l = len(m.Signature) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) + } + return n +} + +func (m *ExtendedCommit) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Height != 0 { + n += 1 + sovTypes(uint64(m.Height)) + } + if m.Round != 0 { + n += 1 + sovTypes(uint64(m.Round)) + } + l = m.BlockID.Size() + n += 1 + l + sovTypes(uint64(l)) + if len(m.ExtendedSignatures) > 0 { + for _, e := range m.ExtendedSignatures { + l = e.Size() + n += 1 + l + sovTypes(uint64(l)) + } + } + return n +} + +func (m *ExtendedCommitSig) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.BlockIdFlag != 0 { + n += 1 + sovTypes(uint64(m.BlockIdFlag)) + } + l = len(m.ValidatorAddress) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) + } + l = github_com_cosmos_gogoproto_types.SizeOfStdTime(m.Timestamp) + n += 1 + l + sovTypes(uint64(l)) + l = len(m.Signature) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) + } + l = len(m.Extension) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) + } + l = len(m.ExtensionSignature) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) + } + return n +} + +func (m *Proposal) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Type != 0 { + n += 1 + sovTypes(uint64(m.Type)) + } + if m.Height != 0 { + n += 1 + sovTypes(uint64(m.Height)) + } + if m.Round != 0 { + n += 1 + sovTypes(uint64(m.Round)) + } + if m.PolRound != 0 { + n += 1 + sovTypes(uint64(m.PolRound)) + } + l = m.BlockID.Size() + n += 1 + l + sovTypes(uint64(l)) + l = github_com_cosmos_gogoproto_types.SizeOfStdTime(m.Timestamp) + n += 1 + l + sovTypes(uint64(l)) + l = len(m.Signature) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) + } + return n +} + +func (m *SignedHeader) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Header != nil { + l = m.Header.Size() + n += 1 + l + sovTypes(uint64(l)) + } + if m.Commit != nil { + l = m.Commit.Size() + n += 1 + l + sovTypes(uint64(l)) + } + return n +} + +func (m *LightBlock) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.SignedHeader != nil { + l = m.SignedHeader.Size() + n += 1 + l + sovTypes(uint64(l)) + } + if m.ValidatorSet != nil { + l = m.ValidatorSet.Size() + n += 1 + l + sovTypes(uint64(l)) + } + return n +} + +func (m *BlockMeta) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.BlockID.Size() + n += 1 + l + sovTypes(uint64(l)) + if m.BlockSize != 0 { + n += 1 + sovTypes(uint64(m.BlockSize)) + } + l = m.Header.Size() + n += 1 + l + sovTypes(uint64(l)) + if m.NumTxs != 0 { + n += 1 + sovTypes(uint64(m.NumTxs)) + } + return n +} + +func (m *TxProof) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.RootHash) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) + } + l = len(m.Data) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) + } + if m.Proof != nil { + l = m.Proof.Size() + n += 1 + l + sovTypes(uint64(l)) + } + return n +} + +func (m *IndexWrapper) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Tx) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) + } + if len(m.ShareIndexes) > 0 { + l = 0 + for _, e := range m.ShareIndexes { + l += sovTypes(uint64(e)) + } + n += 1 + sovTypes(uint64(l)) + l + } + l = len(m.TypeId) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) + } + return n +} + +func (m *BlobTx) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Tx) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) + } + if len(m.Blobs) > 0 { + for _, e := range m.Blobs { + l = e.Size() + n += 1 + l + sovTypes(uint64(l)) + } + } + l = len(m.TypeId) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) + } + return n +} + +func (m *ShareProof) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Data) > 0 { + for _, b := range m.Data { + l = len(b) + n += 1 + l + sovTypes(uint64(l)) + } + } + if len(m.ShareProofs) > 0 { + for _, e := range m.ShareProofs { + l = e.Size() + n += 1 + l + sovTypes(uint64(l)) + } + } + l = len(m.NamespaceId) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) + } + if m.RowProof != nil { + l = m.RowProof.Size() + n += 1 + l + sovTypes(uint64(l)) + } + if m.NamespaceVersion != 0 { + n += 1 + sovTypes(uint64(m.NamespaceVersion)) + } + return n +} + +func (m *RowProof) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.RowRoots) > 0 { + for _, b := range m.RowRoots { + l = len(b) + n += 1 + l + sovTypes(uint64(l)) + } + } + if len(m.Proofs) > 0 { + for _, e := range m.Proofs { + l = e.Size() + n += 1 + l + sovTypes(uint64(l)) + } + } + l = len(m.Root) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) + } + if m.StartRow != 0 { + n += 1 + sovTypes(uint64(m.StartRow)) + } + if m.EndRow != 0 { + n += 1 + sovTypes(uint64(m.EndRow)) + } + return n +} + +func (m *NMTProof) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Start != 0 { + n += 1 + sovTypes(uint64(m.Start)) + } + if m.End != 0 { + n += 1 + sovTypes(uint64(m.End)) + } + if len(m.Nodes) > 0 { + for _, b := range m.Nodes { + l = len(b) + n += 1 + l + sovTypes(uint64(l)) + } + } + l = len(m.LeafHash) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) + } + return n +} + +func sovTypes(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozTypes(x uint64) (n int) { + return sovTypes(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (m *PartSetHeader) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: PartSetHeader: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: PartSetHeader: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Total", wireType) + } + m.Total = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Total |= uint32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Hash", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Hash = append(m.Hash[:0], dAtA[iNdEx:postIndex]...) + if m.Hash == nil { + m.Hash = []byte{} + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipTypes(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Part) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Part: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Part: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Index", wireType) + } + m.Index = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Index |= uint32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Bytes", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Bytes = append(m.Bytes[:0], dAtA[iNdEx:postIndex]...) + if m.Bytes == nil { + m.Bytes = []byte{} + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Proof", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Proof.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipTypes(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *BlockID) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: BlockID: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: BlockID: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Hash", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Hash = append(m.Hash[:0], dAtA[iNdEx:postIndex]...) + if m.Hash == nil { + m.Hash = []byte{} + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PartSetHeader", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.PartSetHeader.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipTypes(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Header) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { return ErrIntOverflowTypes } - if iNdEx >= l { + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Header: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Header: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Version", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Version.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ChainID", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ChainID = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Height", wireType) + } + m.Height = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Height |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Time", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := github_com_cosmos_gogoproto_types.StdTimeUnmarshal(&m.Time, dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field LastBlockId", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.LastBlockId.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field LastCommitHash", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.LastCommitHash = append(m.LastCommitHash[:0], dAtA[iNdEx:postIndex]...) + if m.LastCommitHash == nil { + m.LastCommitHash = []byte{} + } + iNdEx = postIndex + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field DataHash", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.DataHash = append(m.DataHash[:0], dAtA[iNdEx:postIndex]...) + if m.DataHash == nil { + m.DataHash = []byte{} + } + iNdEx = postIndex + case 8: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ValidatorsHash", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ValidatorsHash = append(m.ValidatorsHash[:0], dAtA[iNdEx:postIndex]...) + if m.ValidatorsHash == nil { + m.ValidatorsHash = []byte{} + } + iNdEx = postIndex + case 9: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field NextValidatorsHash", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.NextValidatorsHash = append(m.NextValidatorsHash[:0], dAtA[iNdEx:postIndex]...) + if m.NextValidatorsHash == nil { + m.NextValidatorsHash = []byte{} + } + iNdEx = postIndex + case 10: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ConsensusHash", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { return io.ErrUnexpectedEOF } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break + m.ConsensusHash = append(m.ConsensusHash[:0], dAtA[iNdEx:postIndex]...) + if m.ConsensusHash == nil { + m.ConsensusHash = []byte{} } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: PartSetHeader: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: PartSetHeader: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Total", wireType) + iNdEx = postIndex + case 11: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field AppHash", wireType) } - m.Total = 0 + var byteLen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowTypes @@ -2566,14 +4238,29 @@ func (m *PartSetHeader) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.Total |= uint32(b&0x7F) << shift + byteLen |= int(b&0x7F) << shift if b < 0x80 { break } } - case 2: + if byteLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.AppHash = append(m.AppHash[:0], dAtA[iNdEx:postIndex]...) + if m.AppHash == nil { + m.AppHash = []byte{} + } + iNdEx = postIndex + case 12: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Hash", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field LastResultsHash", wireType) } var byteLen int for shift := uint(0); ; shift += 7 { @@ -2600,9 +4287,77 @@ func (m *PartSetHeader) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Hash = append(m.Hash[:0], dAtA[iNdEx:postIndex]...) - if m.Hash == nil { - m.Hash = []byte{} + m.LastResultsHash = append(m.LastResultsHash[:0], dAtA[iNdEx:postIndex]...) + if m.LastResultsHash == nil { + m.LastResultsHash = []byte{} + } + iNdEx = postIndex + case 13: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field EvidenceHash", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.EvidenceHash = append(m.EvidenceHash[:0], dAtA[iNdEx:postIndex]...) + if m.EvidenceHash == nil { + m.EvidenceHash = []byte{} + } + iNdEx = postIndex + case 14: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ProposerAddress", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ProposerAddress = append(m.ProposerAddress[:0], dAtA[iNdEx:postIndex]...) + if m.ProposerAddress == nil { + m.ProposerAddress = []byte{} } iNdEx = postIndex default: @@ -2626,7 +4381,7 @@ func (m *PartSetHeader) Unmarshal(dAtA []byte) error { } return nil } -func (m *Part) Unmarshal(dAtA []byte) error { +func (m *Data) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -2649,34 +4404,15 @@ func (m *Part) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: Part: wiretype end group for non-group") + return fmt.Errorf("proto: Data: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: Part: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: Data: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Index", wireType) - } - m.Index = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTypes - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Index |= uint32(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Bytes", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Txs", wireType) } var byteLen int for shift := uint(0); ; shift += 7 { @@ -2703,16 +4439,33 @@ func (m *Part) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Bytes = append(m.Bytes[:0], dAtA[iNdEx:postIndex]...) - if m.Bytes == nil { - m.Bytes = []byte{} - } + m.Txs = append(m.Txs, make([]byte, postIndex-iNdEx)) + copy(m.Txs[len(m.Txs)-1], dAtA[iNdEx:postIndex]) iNdEx = postIndex - case 3: + case 5: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field SquareSize", wireType) + } + m.SquareSize = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.SquareSize |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 6: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Proof", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Hash", wireType) } - var msglen int + var byteLen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowTypes @@ -2722,23 +4475,24 @@ func (m *Part) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + byteLen |= int(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + if byteLen < 0 { return ErrInvalidLengthTypes } - postIndex := iNdEx + msglen + postIndex := iNdEx + byteLen if postIndex < 0 { return ErrInvalidLengthTypes } if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.Proof.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err + m.Hash = append(m.Hash[:0], dAtA[iNdEx:postIndex]...) + if m.Hash == nil { + m.Hash = []byte{} } iNdEx = postIndex default: @@ -2762,7 +4516,7 @@ func (m *Part) Unmarshal(dAtA []byte) error { } return nil } -func (m *BlockID) Unmarshal(dAtA []byte) error { +func (m *Blob) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -2785,15 +4539,15 @@ func (m *BlockID) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: BlockID: wiretype end group for non-group") + return fmt.Errorf("proto: Blob: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: BlockID: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: Blob: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Hash", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field NamespaceId", wireType) } var byteLen int for shift := uint(0); ; shift += 7 { @@ -2820,16 +4574,50 @@ func (m *BlockID) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Hash = append(m.Hash[:0], dAtA[iNdEx:postIndex]...) - if m.Hash == nil { - m.Hash = []byte{} + m.NamespaceId = append(m.NamespaceId[:0], dAtA[iNdEx:postIndex]...) + if m.NamespaceId == nil { + m.NamespaceId = []byte{} } iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field PartSetHeader", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Data", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Data = append(m.Data[:0], dAtA[iNdEx:postIndex]...) + if m.Data == nil { + m.Data = []byte{} + } + iNdEx = postIndex + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ShareVersion", wireType) } - var msglen int + m.ShareVersion = 0 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowTypes @@ -2839,25 +4627,30 @@ func (m *BlockID) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + m.ShareVersion |= uint32(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { - return ErrInvalidLengthTypes - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthTypes - } - if postIndex > l { - return io.ErrUnexpectedEOF + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field NamespaceVersion", wireType) } - if err := m.PartSetHeader.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err + m.NamespaceVersion = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.NamespaceVersion |= uint32(b&0x7F) << shift + if b < 0x80 { + break + } } - iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipTypes(dAtA[iNdEx:]) @@ -2879,7 +4672,7 @@ func (m *BlockID) Unmarshal(dAtA []byte) error { } return nil } -func (m *Header) Unmarshal(dAtA []byte) error { +func (m *Vote) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -2902,17 +4695,17 @@ func (m *Header) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: Header: wiretype end group for non-group") + return fmt.Errorf("proto: Vote: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: Header: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: Vote: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Version", wireType) + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) } - var msglen int + m.Type = 0 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowTypes @@ -2922,30 +4715,16 @@ func (m *Header) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + m.Type |= SignedMsgType(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { - return ErrInvalidLengthTypes - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthTypes - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Version.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ChainID", wireType) + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Height", wireType) } - var stringLen uint64 + m.Height = 0 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowTypes @@ -2955,29 +4734,16 @@ func (m *Header) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + m.Height |= int64(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthTypes - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthTypes - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.ChainID = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex case 3: if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Height", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Round", wireType) } - m.Height = 0 + m.Round = 0 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowTypes @@ -2987,14 +4753,14 @@ func (m *Header) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.Height |= int64(b&0x7F) << shift + m.Round |= int32(b&0x7F) << shift if b < 0x80 { break } } case 4: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Time", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field BlockID", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -3021,13 +4787,13 @@ func (m *Header) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if err := github_com_cosmos_gogoproto_types.StdTimeUnmarshal(&m.Time, dAtA[iNdEx:postIndex]); err != nil { + if err := m.BlockID.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex case 5: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field LastBlockId", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Timestamp", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -3054,13 +4820,13 @@ func (m *Header) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.LastBlockId.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if err := github_com_cosmos_gogoproto_types.StdTimeUnmarshal(&m.Timestamp, dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex case 6: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field LastCommitHash", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field ValidatorAddress", wireType) } var byteLen int for shift := uint(0); ; shift += 7 { @@ -3087,16 +4853,16 @@ func (m *Header) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.LastCommitHash = append(m.LastCommitHash[:0], dAtA[iNdEx:postIndex]...) - if m.LastCommitHash == nil { - m.LastCommitHash = []byte{} + m.ValidatorAddress = append(m.ValidatorAddress[:0], dAtA[iNdEx:postIndex]...) + if m.ValidatorAddress == nil { + m.ValidatorAddress = []byte{} } iNdEx = postIndex case 7: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field DataHash", wireType) + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ValidatorIndex", wireType) } - var byteLen int + m.ValidatorIndex = 0 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowTypes @@ -3106,29 +4872,14 @@ func (m *Header) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - byteLen |= int(b&0x7F) << shift + m.ValidatorIndex |= int32(b&0x7F) << shift if b < 0x80 { break } } - if byteLen < 0 { - return ErrInvalidLengthTypes - } - postIndex := iNdEx + byteLen - if postIndex < 0 { - return ErrInvalidLengthTypes - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.DataHash = append(m.DataHash[:0], dAtA[iNdEx:postIndex]...) - if m.DataHash == nil { - m.DataHash = []byte{} - } - iNdEx = postIndex case 8: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ValidatorsHash", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Signature", wireType) } var byteLen int for shift := uint(0); ; shift += 7 { @@ -3155,14 +4906,14 @@ func (m *Header) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.ValidatorsHash = append(m.ValidatorsHash[:0], dAtA[iNdEx:postIndex]...) - if m.ValidatorsHash == nil { - m.ValidatorsHash = []byte{} + m.Signature = append(m.Signature[:0], dAtA[iNdEx:postIndex]...) + if m.Signature == nil { + m.Signature = []byte{} } iNdEx = postIndex case 9: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field NextValidatorsHash", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Extension", wireType) } var byteLen int for shift := uint(0); ; shift += 7 { @@ -3189,14 +4940,14 @@ func (m *Header) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.NextValidatorsHash = append(m.NextValidatorsHash[:0], dAtA[iNdEx:postIndex]...) - if m.NextValidatorsHash == nil { - m.NextValidatorsHash = []byte{} + m.Extension = append(m.Extension[:0], dAtA[iNdEx:postIndex]...) + if m.Extension == nil { + m.Extension = []byte{} } iNdEx = postIndex case 10: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ConsensusHash", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field ExtensionSignature", wireType) } var byteLen int for shift := uint(0); ; shift += 7 { @@ -3223,16 +4974,66 @@ func (m *Header) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.ConsensusHash = append(m.ConsensusHash[:0], dAtA[iNdEx:postIndex]...) - if m.ConsensusHash == nil { - m.ConsensusHash = []byte{} + m.ExtensionSignature = append(m.ExtensionSignature[:0], dAtA[iNdEx:postIndex]...) + if m.ExtensionSignature == nil { + m.ExtensionSignature = []byte{} } iNdEx = postIndex - case 11: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field AppHash", wireType) + default: + iNdEx = preIndex + skippy, err := skipTypes(dAtA[iNdEx:]) + if err != nil { + return err } - var byteLen int + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Commit) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Commit: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Commit: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Height", wireType) + } + m.Height = 0 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowTypes @@ -3242,31 +5043,16 @@ func (m *Header) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - byteLen |= int(b&0x7F) << shift + m.Height |= int64(b&0x7F) << shift if b < 0x80 { break } } - if byteLen < 0 { - return ErrInvalidLengthTypes - } - postIndex := iNdEx + byteLen - if postIndex < 0 { - return ErrInvalidLengthTypes - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.AppHash = append(m.AppHash[:0], dAtA[iNdEx:postIndex]...) - if m.AppHash == nil { - m.AppHash = []byte{} - } - iNdEx = postIndex - case 12: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field LastResultsHash", wireType) + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Round", wireType) } - var byteLen int + m.Round = 0 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowTypes @@ -3276,31 +5062,16 @@ func (m *Header) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - byteLen |= int(b&0x7F) << shift + m.Round |= int32(b&0x7F) << shift if b < 0x80 { break } } - if byteLen < 0 { - return ErrInvalidLengthTypes - } - postIndex := iNdEx + byteLen - if postIndex < 0 { - return ErrInvalidLengthTypes - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.LastResultsHash = append(m.LastResultsHash[:0], dAtA[iNdEx:postIndex]...) - if m.LastResultsHash == nil { - m.LastResultsHash = []byte{} - } - iNdEx = postIndex - case 13: + case 3: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field EvidenceHash", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field BlockID", wireType) } - var byteLen int + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowTypes @@ -3310,31 +5081,30 @@ func (m *Header) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - byteLen |= int(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - if byteLen < 0 { + if msglen < 0 { return ErrInvalidLengthTypes } - postIndex := iNdEx + byteLen + postIndex := iNdEx + msglen if postIndex < 0 { return ErrInvalidLengthTypes } if postIndex > l { return io.ErrUnexpectedEOF } - m.EvidenceHash = append(m.EvidenceHash[:0], dAtA[iNdEx:postIndex]...) - if m.EvidenceHash == nil { - m.EvidenceHash = []byte{} + if err := m.BlockID.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err } iNdEx = postIndex - case 14: + case 4: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ProposerAddress", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Signatures", wireType) } - var byteLen int + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowTypes @@ -3344,24 +5114,24 @@ func (m *Header) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - byteLen |= int(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - if byteLen < 0 { + if msglen < 0 { return ErrInvalidLengthTypes } - postIndex := iNdEx + byteLen + postIndex := iNdEx + msglen if postIndex < 0 { return ErrInvalidLengthTypes } if postIndex > l { return io.ErrUnexpectedEOF } - m.ProposerAddress = append(m.ProposerAddress[:0], dAtA[iNdEx:postIndex]...) - if m.ProposerAddress == nil { - m.ProposerAddress = []byte{} + m.Signatures = append(m.Signatures, CommitSig{}) + if err := m.Signatures[len(m.Signatures)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err } iNdEx = postIndex default: @@ -3385,7 +5155,7 @@ func (m *Header) Unmarshal(dAtA []byte) error { } return nil } -func (m *Data) Unmarshal(dAtA []byte) error { +func (m *CommitSig) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -3408,15 +5178,34 @@ func (m *Data) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: Data: wiretype end group for non-group") + return fmt.Errorf("proto: CommitSig: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: Data: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: CommitSig: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field BlockIdFlag", wireType) + } + m.BlockIdFlag = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.BlockIdFlag |= BlockIDFlag(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Txs", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field ValidatorAddress", wireType) } var byteLen int for shift := uint(0); ; shift += 7 { @@ -3443,8 +5232,77 @@ func (m *Data) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Txs = append(m.Txs, make([]byte, postIndex-iNdEx)) - copy(m.Txs[len(m.Txs)-1], dAtA[iNdEx:postIndex]) + m.ValidatorAddress = append(m.ValidatorAddress[:0], dAtA[iNdEx:postIndex]...) + if m.ValidatorAddress == nil { + m.ValidatorAddress = []byte{} + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Timestamp", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := github_com_cosmos_gogoproto_types.StdTimeUnmarshal(&m.Timestamp, dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Signature", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Signature = append(m.Signature[:0], dAtA[iNdEx:postIndex]...) + if m.Signature == nil { + m.Signature = []byte{} + } iNdEx = postIndex default: iNdEx = preIndex @@ -3467,7 +5325,7 @@ func (m *Data) Unmarshal(dAtA []byte) error { } return nil } -func (m *Vote) Unmarshal(dAtA []byte) error { +func (m *ExtendedCommit) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -3490,32 +5348,13 @@ func (m *Vote) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: Vote: wiretype end group for non-group") + return fmt.Errorf("proto: ExtendedCommit: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: Vote: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: ExtendedCommit: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) - } - m.Type = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTypes - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Type |= SignedMsgType(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 2: if wireType != 0 { return fmt.Errorf("proto: wrong wireType = %d for field Height", wireType) } @@ -3534,7 +5373,7 @@ func (m *Vote) Unmarshal(dAtA []byte) error { break } } - case 3: + case 2: if wireType != 0 { return fmt.Errorf("proto: wrong wireType = %d for field Round", wireType) } @@ -3553,7 +5392,7 @@ func (m *Vote) Unmarshal(dAtA []byte) error { break } } - case 4: + case 3: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field BlockID", wireType) } @@ -3586,9 +5425,9 @@ func (m *Vote) Unmarshal(dAtA []byte) error { return err } iNdEx = postIndex - case 5: + case 4: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Timestamp", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field ExtendedSignatures", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -3615,11 +5454,81 @@ func (m *Vote) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if err := github_com_cosmos_gogoproto_types.StdTimeUnmarshal(&m.Timestamp, dAtA[iNdEx:postIndex]); err != nil { + m.ExtendedSignatures = append(m.ExtendedSignatures, ExtendedCommitSig{}) + if err := m.ExtendedSignatures[len(m.ExtendedSignatures)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex - case 6: + default: + iNdEx = preIndex + skippy, err := skipTypes(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ExtendedCommitSig) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ExtendedCommitSig: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ExtendedCommitSig: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field BlockIdFlag", wireType) + } + m.BlockIdFlag = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.BlockIdFlag |= BlockIDFlag(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 2: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field ValidatorAddress", wireType) } @@ -3653,11 +5562,11 @@ func (m *Vote) Unmarshal(dAtA []byte) error { m.ValidatorAddress = []byte{} } iNdEx = postIndex - case 7: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field ValidatorIndex", wireType) + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Timestamp", wireType) } - m.ValidatorIndex = 0 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowTypes @@ -3667,12 +5576,26 @@ func (m *Vote) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.ValidatorIndex |= int32(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - case 8: + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := github_com_cosmos_gogoproto_types.StdTimeUnmarshal(&m.Timestamp, dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field Signature", wireType) } @@ -3706,7 +5629,7 @@ func (m *Vote) Unmarshal(dAtA []byte) error { m.Signature = []byte{} } iNdEx = postIndex - case 9: + case 5: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field Extension", wireType) } @@ -3740,7 +5663,7 @@ func (m *Vote) Unmarshal(dAtA []byte) error { m.Extension = []byte{} } iNdEx = postIndex - case 10: + case 6: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field ExtensionSignature", wireType) } @@ -3795,7 +5718,7 @@ func (m *Vote) Unmarshal(dAtA []byte) error { } return nil } -func (m *Commit) Unmarshal(dAtA []byte) error { +func (m *Proposal) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -3818,13 +5741,32 @@ func (m *Commit) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: Commit: wiretype end group for non-group") + return fmt.Errorf("proto: Proposal: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: Commit: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: Proposal: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) + } + m.Type = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Type |= SignedMsgType(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 2: if wireType != 0 { return fmt.Errorf("proto: wrong wireType = %d for field Height", wireType) } @@ -3843,7 +5785,7 @@ func (m *Commit) Unmarshal(dAtA []byte) error { break } } - case 2: + case 3: if wireType != 0 { return fmt.Errorf("proto: wrong wireType = %d for field Round", wireType) } @@ -3862,7 +5804,26 @@ func (m *Commit) Unmarshal(dAtA []byte) error { break } } - case 3: + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field PolRound", wireType) + } + m.PolRound = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.PolRound |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 5: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field BlockID", wireType) } @@ -3895,9 +5856,9 @@ func (m *Commit) Unmarshal(dAtA []byte) error { return err } iNdEx = postIndex - case 4: + case 6: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Signatures", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Timestamp", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -3924,11 +5885,44 @@ func (m *Commit) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Signatures = append(m.Signatures, CommitSig{}) - if err := m.Signatures[len(m.Signatures)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if err := github_com_cosmos_gogoproto_types.StdTimeUnmarshal(&m.Timestamp, dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Signature", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Signature = append(m.Signature[:0], dAtA[iNdEx:postIndex]...) + if m.Signature == nil { + m.Signature = []byte{} + } + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipTypes(dAtA[iNdEx:]) @@ -3950,7 +5944,7 @@ func (m *Commit) Unmarshal(dAtA []byte) error { } return nil } -func (m *CommitSig) Unmarshal(dAtA []byte) error { +func (m *SignedHeader) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -3973,17 +5967,17 @@ func (m *CommitSig) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: CommitSig: wiretype end group for non-group") + return fmt.Errorf("proto: SignedHeader: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: CommitSig: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: SignedHeader: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field BlockIdFlag", wireType) + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType) } - m.BlockIdFlag = 0 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowTypes @@ -3993,16 +5987,33 @@ func (m *CommitSig) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.BlockIdFlag |= BlockIDFlag(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Header == nil { + m.Header = &Header{} + } + if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ValidatorAddress", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Commit", wireType) } - var byteLen int + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowTypes @@ -4012,29 +6023,81 @@ func (m *CommitSig) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - byteLen |= int(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - if byteLen < 0 { + if msglen < 0 { return ErrInvalidLengthTypes } - postIndex := iNdEx + byteLen + postIndex := iNdEx + msglen if postIndex < 0 { return ErrInvalidLengthTypes } if postIndex > l { return io.ErrUnexpectedEOF } - m.ValidatorAddress = append(m.ValidatorAddress[:0], dAtA[iNdEx:postIndex]...) - if m.ValidatorAddress == nil { - m.ValidatorAddress = []byte{} + if m.Commit == nil { + m.Commit = &Commit{} + } + if err := m.Commit.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err } iNdEx = postIndex - case 3: + default: + iNdEx = preIndex + skippy, err := skipTypes(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *LightBlock) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: LightBlock: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: LightBlock: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Timestamp", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field SignedHeader", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -4061,15 +6124,18 @@ func (m *CommitSig) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if err := github_com_cosmos_gogoproto_types.StdTimeUnmarshal(&m.Timestamp, dAtA[iNdEx:postIndex]); err != nil { + if m.SignedHeader == nil { + m.SignedHeader = &SignedHeader{} + } + if err := m.SignedHeader.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex - case 4: + case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Signature", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field ValidatorSet", wireType) } - var byteLen int + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowTypes @@ -4079,24 +6145,26 @@ func (m *CommitSig) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - byteLen |= int(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - if byteLen < 0 { + if msglen < 0 { return ErrInvalidLengthTypes } - postIndex := iNdEx + byteLen + postIndex := iNdEx + msglen if postIndex < 0 { return ErrInvalidLengthTypes } if postIndex > l { return io.ErrUnexpectedEOF } - m.Signature = append(m.Signature[:0], dAtA[iNdEx:postIndex]...) - if m.Signature == nil { - m.Signature = []byte{} + if m.ValidatorSet == nil { + m.ValidatorSet = &ValidatorSet{} + } + if err := m.ValidatorSet.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err } iNdEx = postIndex default: @@ -4120,7 +6188,7 @@ func (m *CommitSig) Unmarshal(dAtA []byte) error { } return nil } -func (m *ExtendedCommit) Unmarshal(dAtA []byte) error { +func (m *BlockMeta) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -4143,17 +6211,17 @@ func (m *ExtendedCommit) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: ExtendedCommit: wiretype end group for non-group") + return fmt.Errorf("proto: BlockMeta: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: ExtendedCommit: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: BlockMeta: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Height", wireType) + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field BlockID", wireType) } - m.Height = 0 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowTypes @@ -4163,16 +6231,30 @@ func (m *ExtendedCommit) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.Height |= int64(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.BlockID.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex case 2: if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Round", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field BlockSize", wireType) } - m.Round = 0 + m.BlockSize = 0 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowTypes @@ -4182,14 +6264,14 @@ func (m *ExtendedCommit) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.Round |= int32(b&0x7F) << shift + m.BlockSize |= int64(b&0x7F) << shift if b < 0x80 { break } } case 3: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field BlockID", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -4216,15 +6298,15 @@ func (m *ExtendedCommit) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.BlockID.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ExtendedSignatures", wireType) + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field NumTxs", wireType) } - var msglen int + m.NumTxs = 0 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowTypes @@ -4234,26 +6316,11 @@ func (m *ExtendedCommit) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + m.NumTxs |= int64(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { - return ErrInvalidLengthTypes - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthTypes - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.ExtendedSignatures = append(m.ExtendedSignatures, ExtendedCommitSig{}) - if err := m.ExtendedSignatures[len(m.ExtendedSignatures)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipTypes(dAtA[iNdEx:]) @@ -4275,7 +6342,7 @@ func (m *ExtendedCommit) Unmarshal(dAtA []byte) error { } return nil } -func (m *ExtendedCommitSig) Unmarshal(dAtA []byte) error { +func (m *TxProof) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -4298,17 +6365,17 @@ func (m *ExtendedCommitSig) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: ExtendedCommitSig: wiretype end group for non-group") + return fmt.Errorf("proto: TxProof: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: ExtendedCommitSig: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: TxProof: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field BlockIdFlag", wireType) + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field RootHash", wireType) } - m.BlockIdFlag = 0 + var byteLen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowTypes @@ -4318,14 +6385,29 @@ func (m *ExtendedCommitSig) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.BlockIdFlag |= BlockIDFlag(b&0x7F) << shift + byteLen |= int(b&0x7F) << shift if b < 0x80 { break } } + if byteLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.RootHash = append(m.RootHash[:0], dAtA[iNdEx:postIndex]...) + if m.RootHash == nil { + m.RootHash = []byte{} + } + iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ValidatorAddress", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Data", wireType) } var byteLen int for shift := uint(0); ; shift += 7 { @@ -4352,14 +6434,14 @@ func (m *ExtendedCommitSig) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.ValidatorAddress = append(m.ValidatorAddress[:0], dAtA[iNdEx:postIndex]...) - if m.ValidatorAddress == nil { - m.ValidatorAddress = []byte{} + m.Data = append(m.Data[:0], dAtA[iNdEx:postIndex]...) + if m.Data == nil { + m.Data = []byte{} } iNdEx = postIndex case 3: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Timestamp", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Proof", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -4386,47 +6468,66 @@ func (m *ExtendedCommitSig) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if err := github_com_cosmos_gogoproto_types.StdTimeUnmarshal(&m.Timestamp, dAtA[iNdEx:postIndex]); err != nil { + if m.Proof == nil { + m.Proof = &crypto.Proof{} + } + if err := m.Proof.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Signature", wireType) - } - var byteLen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTypes - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - byteLen |= int(b&0x7F) << shift - if b < 0x80 { - break - } + default: + iNdEx = preIndex + skippy, err := skipTypes(dAtA[iNdEx:]) + if err != nil { + return err } - if byteLen < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthTypes } - postIndex := iNdEx + byteLen - if postIndex < 0 { - return ErrInvalidLengthTypes + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF } - if postIndex > l { + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *IndexWrapper) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { return io.ErrUnexpectedEOF } - m.Signature = append(m.Signature[:0], dAtA[iNdEx:postIndex]...) - if m.Signature == nil { - m.Signature = []byte{} + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break } - iNdEx = postIndex - case 5: + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: IndexWrapper: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: IndexWrapper: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Extension", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Tx", wireType) } var byteLen int for shift := uint(0); ; shift += 7 { @@ -4453,16 +6554,92 @@ func (m *ExtendedCommitSig) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Extension = append(m.Extension[:0], dAtA[iNdEx:postIndex]...) - if m.Extension == nil { - m.Extension = []byte{} + m.Tx = append(m.Tx[:0], dAtA[iNdEx:postIndex]...) + if m.Tx == nil { + m.Tx = []byte{} } iNdEx = postIndex - case 6: + case 2: + if wireType == 0 { + var v uint32 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= uint32(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.ShareIndexes = append(m.ShareIndexes, v) + } else if wireType == 2 { + var packedLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + packedLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if packedLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + packedLen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + var elementCount int + var count int + for _, integer := range dAtA[iNdEx:postIndex] { + if integer < 128 { + count++ + } + } + elementCount = count + if elementCount != 0 && len(m.ShareIndexes) == 0 { + m.ShareIndexes = make([]uint32, 0, elementCount) + } + for iNdEx < postIndex { + var v uint32 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= uint32(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.ShareIndexes = append(m.ShareIndexes, v) + } + } else { + return fmt.Errorf("proto: wrong wireType = %d for field ShareIndexes", wireType) + } + case 3: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ExtensionSignature", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field TypeId", wireType) } - var byteLen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowTypes @@ -4472,25 +6649,23 @@ func (m *ExtendedCommitSig) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - byteLen |= int(b&0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - if byteLen < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLengthTypes } - postIndex := iNdEx + byteLen + postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLengthTypes } if postIndex > l { return io.ErrUnexpectedEOF } - m.ExtensionSignature = append(m.ExtensionSignature[:0], dAtA[iNdEx:postIndex]...) - if m.ExtensionSignature == nil { - m.ExtensionSignature = []byte{} - } + m.TypeId = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex default: iNdEx = preIndex @@ -4513,7 +6688,7 @@ func (m *ExtendedCommitSig) Unmarshal(dAtA []byte) error { } return nil } -func (m *Proposal) Unmarshal(dAtA []byte) error { +func (m *BlobTx) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -4536,93 +6711,17 @@ func (m *Proposal) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: Proposal: wiretype end group for non-group") + return fmt.Errorf("proto: BlobTx: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: Proposal: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: BlobTx: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) - } - m.Type = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTypes - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Type |= SignedMsgType(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Height", wireType) - } - m.Height = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTypes - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Height |= int64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 3: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Round", wireType) - } - m.Round = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTypes - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Round |= int32(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 4: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field PolRound", wireType) - } - m.PolRound = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTypes - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.PolRound |= int32(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 5: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field BlockID", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Tx", wireType) } - var msglen int + var byteLen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowTypes @@ -4632,28 +6731,29 @@ func (m *Proposal) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + byteLen |= int(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + if byteLen < 0 { return ErrInvalidLengthTypes } - postIndex := iNdEx + msglen + postIndex := iNdEx + byteLen if postIndex < 0 { return ErrInvalidLengthTypes } if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.BlockID.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } + m.Tx = append(m.Tx[:0], dAtA[iNdEx:postIndex]...) + if m.Tx == nil { + m.Tx = []byte{} + } iNdEx = postIndex - case 6: + case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Timestamp", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Blobs", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -4680,15 +6780,16 @@ func (m *Proposal) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if err := github_com_cosmos_gogoproto_types.StdTimeUnmarshal(&m.Timestamp, dAtA[iNdEx:postIndex]); err != nil { + m.Blobs = append(m.Blobs, &Blob{}) + if err := m.Blobs[len(m.Blobs)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex - case 7: + case 3: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Signature", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field TypeId", wireType) } - var byteLen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowTypes @@ -4698,25 +6799,23 @@ func (m *Proposal) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - byteLen |= int(b&0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - if byteLen < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLengthTypes } - postIndex := iNdEx + byteLen + postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLengthTypes } if postIndex > l { return io.ErrUnexpectedEOF } - m.Signature = append(m.Signature[:0], dAtA[iNdEx:postIndex]...) - if m.Signature == nil { - m.Signature = []byte{} - } + m.TypeId = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex default: iNdEx = preIndex @@ -4739,7 +6838,7 @@ func (m *Proposal) Unmarshal(dAtA []byte) error { } return nil } -func (m *SignedHeader) Unmarshal(dAtA []byte) error { +func (m *ShareProof) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -4762,17 +6861,17 @@ func (m *SignedHeader) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: SignedHeader: wiretype end group for non-group") + return fmt.Errorf("proto: ShareProof: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: SignedHeader: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: ShareProof: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Data", wireType) } - var msglen int + var byteLen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowTypes @@ -4782,31 +6881,27 @@ func (m *SignedHeader) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + byteLen |= int(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + if byteLen < 0 { return ErrInvalidLengthTypes } - postIndex := iNdEx + msglen + postIndex := iNdEx + byteLen if postIndex < 0 { return ErrInvalidLengthTypes } if postIndex > l { return io.ErrUnexpectedEOF } - if m.Header == nil { - m.Header = &Header{} - } - if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } + m.Data = append(m.Data, make([]byte, postIndex-iNdEx)) + copy(m.Data[len(m.Data)-1], dAtA[iNdEx:postIndex]) iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Commit", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field ShareProofs", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -4833,68 +6928,16 @@ func (m *SignedHeader) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if m.Commit == nil { - m.Commit = &Commit{} - } - if err := m.Commit.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + m.ShareProofs = append(m.ShareProofs, &NMTProof{}) + if err := m.ShareProofs[len(m.ShareProofs)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipTypes(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthTypes - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *LightBlock) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTypes - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: LightBlock: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: LightBlock: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: + case 3: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field SignedHeader", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field NamespaceId", wireType) } - var msglen int + var byteLen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowTypes @@ -4904,31 +6947,29 @@ func (m *LightBlock) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + byteLen |= int(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + if byteLen < 0 { return ErrInvalidLengthTypes } - postIndex := iNdEx + msglen + postIndex := iNdEx + byteLen if postIndex < 0 { return ErrInvalidLengthTypes } if postIndex > l { return io.ErrUnexpectedEOF } - if m.SignedHeader == nil { - m.SignedHeader = &SignedHeader{} - } - if err := m.SignedHeader.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err + m.NamespaceId = append(m.NamespaceId[:0], dAtA[iNdEx:postIndex]...) + if m.NamespaceId == nil { + m.NamespaceId = []byte{} } iNdEx = postIndex - case 2: + case 4: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ValidatorSet", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field RowProof", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -4955,13 +6996,32 @@ func (m *LightBlock) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if m.ValidatorSet == nil { - m.ValidatorSet = &ValidatorSet{} + if m.RowProof == nil { + m.RowProof = &RowProof{} } - if err := m.ValidatorSet.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if err := m.RowProof.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex + case 5: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field NamespaceVersion", wireType) + } + m.NamespaceVersion = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.NamespaceVersion |= uint32(b&0x7F) << shift + if b < 0x80 { + break + } + } default: iNdEx = preIndex skippy, err := skipTypes(dAtA[iNdEx:]) @@ -4983,7 +7043,7 @@ func (m *LightBlock) Unmarshal(dAtA []byte) error { } return nil } -func (m *BlockMeta) Unmarshal(dAtA []byte) error { +func (m *RowProof) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -5006,17 +7066,17 @@ func (m *BlockMeta) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: BlockMeta: wiretype end group for non-group") + return fmt.Errorf("proto: RowProof: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: BlockMeta: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: RowProof: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field BlockID", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field RowRoots", wireType) } - var msglen int + var byteLen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowTypes @@ -5026,30 +7086,29 @@ func (m *BlockMeta) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + byteLen |= int(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + if byteLen < 0 { return ErrInvalidLengthTypes } - postIndex := iNdEx + msglen + postIndex := iNdEx + byteLen if postIndex < 0 { return ErrInvalidLengthTypes } if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.BlockID.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } + m.RowRoots = append(m.RowRoots, make([]byte, postIndex-iNdEx)) + copy(m.RowRoots[len(m.RowRoots)-1], dAtA[iNdEx:postIndex]) iNdEx = postIndex case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field BlockSize", wireType) + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Proofs", wireType) } - m.BlockSize = 0 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowTypes @@ -5059,16 +7118,31 @@ func (m *BlockMeta) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.BlockSize |= int64(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Proofs = append(m.Proofs, &crypto.Proof{}) + if err := m.Proofs[len(m.Proofs)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex case 3: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Root", wireType) } - var msglen int + var byteLen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowTypes @@ -5078,30 +7152,31 @@ func (m *BlockMeta) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + byteLen |= int(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + if byteLen < 0 { return ErrInvalidLengthTypes } - postIndex := iNdEx + msglen + postIndex := iNdEx + byteLen if postIndex < 0 { return ErrInvalidLengthTypes } if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err + m.Root = append(m.Root[:0], dAtA[iNdEx:postIndex]...) + if m.Root == nil { + m.Root = []byte{} } iNdEx = postIndex case 4: if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field NumTxs", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field StartRow", wireType) } - m.NumTxs = 0 + m.StartRow = 0 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowTypes @@ -5111,7 +7186,26 @@ func (m *BlockMeta) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.NumTxs |= int64(b&0x7F) << shift + m.StartRow |= uint32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 5: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field EndRow", wireType) + } + m.EndRow = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.EndRow |= uint32(b&0x7F) << shift if b < 0x80 { break } @@ -5137,7 +7231,7 @@ func (m *BlockMeta) Unmarshal(dAtA []byte) error { } return nil } -func (m *TxProof) Unmarshal(dAtA []byte) error { +func (m *NMTProof) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -5160,17 +7254,17 @@ func (m *TxProof) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: TxProof: wiretype end group for non-group") + return fmt.Errorf("proto: NMTProof: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: TxProof: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: NMTProof: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field RootHash", wireType) + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Start", wireType) } - var byteLen int + m.Start = 0 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowTypes @@ -5180,29 +7274,33 @@ func (m *TxProof) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - byteLen |= int(b&0x7F) << shift + m.Start |= int32(b&0x7F) << shift if b < 0x80 { break } } - if byteLen < 0 { - return ErrInvalidLengthTypes - } - postIndex := iNdEx + byteLen - if postIndex < 0 { - return ErrInvalidLengthTypes - } - if postIndex > l { - return io.ErrUnexpectedEOF + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field End", wireType) } - m.RootHash = append(m.RootHash[:0], dAtA[iNdEx:postIndex]...) - if m.RootHash == nil { - m.RootHash = []byte{} + m.End = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.End |= int32(b&0x7F) << shift + if b < 0x80 { + break + } } - iNdEx = postIndex - case 2: + case 3: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Data", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Nodes", wireType) } var byteLen int for shift := uint(0); ; shift += 7 { @@ -5229,16 +7327,14 @@ func (m *TxProof) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Data = append(m.Data[:0], dAtA[iNdEx:postIndex]...) - if m.Data == nil { - m.Data = []byte{} - } + m.Nodes = append(m.Nodes, make([]byte, postIndex-iNdEx)) + copy(m.Nodes[len(m.Nodes)-1], dAtA[iNdEx:postIndex]) iNdEx = postIndex - case 3: + case 4: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Proof", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field LeafHash", wireType) } - var msglen int + var byteLen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowTypes @@ -5248,26 +7344,24 @@ func (m *TxProof) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + byteLen |= int(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + if byteLen < 0 { return ErrInvalidLengthTypes } - postIndex := iNdEx + msglen + postIndex := iNdEx + byteLen if postIndex < 0 { return ErrInvalidLengthTypes } if postIndex > l { return io.ErrUnexpectedEOF } - if m.Proof == nil { - m.Proof = &crypto.Proof{} - } - if err := m.Proof.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err + m.LeafHash = append(m.LeafHash[:0], dAtA[iNdEx:postIndex]...) + if m.LeafHash == nil { + m.LeafHash = []byte{} } iNdEx = postIndex default: diff --git a/proto/tendermint/types/types.proto b/proto/tendermint/types/types.proto index a527e2ffb21..e1f030408a5 100644 --- a/proto/tendermint/types/types.proto +++ b/proto/tendermint/types/types.proto @@ -76,6 +76,29 @@ message Data { // NOTE: not all txs here are valid. We're just agreeing on the order first. // This means that block.AppHash does not include these txs. repeated bytes txs = 1; + reserved 2, 3, 4; + // field number 2 is reserved for intermediate state roots + // field number 3 is reserved for evidence + // field number 4 is reserved for blobs + + // SquareSize is the number of rows or columns in the original data square. + uint64 square_size = 5; + + // Hash is the root of a binary Merkle tree where the leaves of the tree are + // the row and column roots of an extended data square. Hash is often referred + // to as the "data root". + bytes hash = 6; +} + +// Blob (named after binary large object) is a chunk of data submitted by a user +// to be published to the Celestia blockchain. The data of a Blob is published +// to a namespace and is encoded into shares based on the format specified by +// share_version. +message Blob { + bytes namespace_id = 1; + bytes data = 2; + uint32 share_version = 3; + uint32 namespace_version = 4; } // Vote represents a prevote or precommit vote from validators for @@ -176,3 +199,62 @@ message TxProof { bytes data = 2; tendermint.crypto.Proof proof = 3; } + +// IndexWrapper adds index metadata to a transaction. This is used to track +// transactions that pay for blobs, and where the blobs start in the square. +message IndexWrapper { + bytes tx = 1; + repeated uint32 share_indexes = 2; + string type_id = 3; +} + +// BlobTx wraps an encoded sdk.Tx with a second field to contain blobs of data. +// The raw bytes of the blobs are not signed over, instead we verify each blob +// using the relevant MsgPayForBlobs that is signed over in the encoded sdk.Tx. +message BlobTx { + bytes tx = 1; + repeated Blob blobs = 2; + string type_id = 3; +} + +// ShareProof is an NMT proof that a set of shares exist in a set of rows and a +// Merkle proof that those rows exist in a Merkle tree with a given data root. +message ShareProof { + repeated bytes data = 1; + repeated NMTProof share_proofs = 2; + bytes namespace_id = 3; + RowProof row_proof = 4; + uint32 namespace_version = 5; +} + +// RowProof is a Merkle proof that a set of rows exist in a Merkle tree with a +// given data root. +message RowProof { + repeated bytes row_roots = 1; + repeated tendermint.crypto.Proof proofs = 2; + bytes root = 3; + uint32 start_row = 4; + uint32 end_row = 5; +} + +// NMTProof is a proof of a namespace.ID in an NMT. +// In case this proof proves the absence of a namespace.ID +// in a tree it also contains the leaf hashes of the range +// where that namespace would be. +message NMTProof { + // Start index of this proof. + int32 start = 1; + // End index of this proof. + int32 end = 2; + // Nodes that together with the corresponding leaf values can be used to + // recompute the root and verify this proof. Nodes should consist of the max + // and min namespaces along with the actual hash, resulting in each being 48 + // bytes each + repeated bytes nodes = 3; + // leafHash are nil if the namespace is present in the NMT. In case the + // namespace to be proved is in the min/max range of the tree but absent, this + // will contain the leaf hash necessary to verify the proof of absence. Leaf + // hashes should consist of the namespace along with the actual hash, + // resulting 40 bytes total. + bytes leaf_hash = 4; +} diff --git a/rpc/client/http/http.go b/rpc/client/http/http.go index f9ccaeb5a4c..377e1e92d7d 100644 --- a/rpc/client/http/http.go +++ b/rpc/client/http/http.go @@ -591,6 +591,116 @@ func (c *baseRPCClient) BroadcastEvidence( return result, nil } +func (c *baseRPCClient) SignedBlock(ctx context.Context, height *int64) (*ctypes.ResultSignedBlock, error) { + result := new(ctypes.ResultSignedBlock) + params := make(map[string]interface{}) + if height != nil { + params["height"] = height + } + _, err := c.caller.Call(ctx, "signed_block", params, result) + if err != nil { + return nil, err + } + return result, nil +} + +func (c *baseRPCClient) DataCommitment( + ctx context.Context, + start uint64, + end uint64, +) (*ctypes.ResultDataCommitment, error) { + result := new(ctypes.ResultDataCommitment) + params := map[string]interface{}{ + "start": start, + "end": end, + } + + _, err := c.caller.Call(ctx, "data_commitment", params, result) + if err != nil { + return nil, err + } + + return result, nil +} + +func (c *baseRPCClient) TxStatus( + ctx context.Context, + hash []byte, +) (*ctypes.ResultTxStatus, error) { + result := new(ctypes.ResultTxStatus) + params := map[string]interface{}{ + "hash": hash, + } + + _, err := c.caller.Call(ctx, "tx_status", params, result) + if err != nil { + return nil, err + } + + return result, nil +} + +func (c *baseRPCClient) DataRootInclusionProof( + ctx context.Context, + height uint64, + start uint64, + end uint64, +) (*ctypes.ResultDataRootInclusionProof, error) { + result := new(ctypes.ResultDataRootInclusionProof) + params := map[string]interface{}{ + "height": height, + "start": start, + "end": end, + } + + _, err := c.caller.Call(ctx, "data_root_inclusion_proof", params, result) + if err != nil { + return nil, err + } + + return result, nil +} + +// ProveShares +// Deprecated: Use ProveSharesV2 instead. +func (c *baseRPCClient) ProveShares( + ctx context.Context, + height uint64, + startShare uint64, + endShare uint64, +) (types.ShareProof, error) { + result := new(types.ShareProof) + params := map[string]interface{}{ + "height": height, + "startShare": startShare, + "endShare": endShare, + } + _, err := c.caller.Call(ctx, "prove_shares", params, result) + if err != nil { + return types.ShareProof{}, err + } + return *result, nil +} + +func (c *baseRPCClient) ProveSharesV2( + ctx context.Context, + height uint64, + startShare uint64, + endShare uint64, +) (*ctypes.ResultShareProof, error) { + result := new(ctypes.ResultShareProof) + params := map[string]interface{}{ + "height": height, + "startShare": startShare, + "endShare": endShare, + } + _, err := c.caller.Call(ctx, "prove_shares_v2", params, result) + if err != nil { + return nil, err + } + return result, nil +} + //----------------------------------------------------------------------------- // WSEvents diff --git a/rpc/client/interface.go b/rpc/client/interface.go index 285ac74e533..2ff02a53a5a 100644 --- a/rpc/client/interface.go +++ b/rpc/client/interface.go @@ -91,6 +91,24 @@ type SignClient interface { page, perPage *int, orderBy string, ) (*ctypes.ResultBlockSearch, error) + + SignedBlock(ctx context.Context, height *int64) (*ctypes.ResultSignedBlock, error) + + DataCommitment(ctx context.Context, start, end uint64) (*ctypes.ResultDataCommitment, error) + DataRootInclusionProof( + ctx context.Context, + height uint64, + start, + end uint64, + ) (*ctypes.ResultDataRootInclusionProof, error) + + // ProveShares + // Deprecated: Use ProveSharesV2 instead. + ProveShares(_ context.Context, height uint64, startShare uint64, endShare uint64) (types.ShareProof, error) + ProveSharesV2(_ context.Context, height uint64, startShare uint64, endShare uint64) (*ctypes.ResultShareProof, error) + + // TxStatus returns the transaction status for a given transaction hash. + TxStatus(ctx context.Context, hash []byte) (*ctypes.ResultTxStatus, error) } // HistoryClient provides access to data from genesis to now in large chunks. diff --git a/rpc/client/local/local.go b/rpc/client/local/local.go index 7115af1deb8..9a5d1f785fc 100644 --- a/rpc/client/local/local.go +++ b/rpc/client/local/local.go @@ -308,3 +308,51 @@ func (c *Local) Unsubscribe(ctx context.Context, subscriber, query string) error func (c *Local) UnsubscribeAll(ctx context.Context, subscriber string) error { return c.EventBus.UnsubscribeAll(ctx, subscriber) } + +func (c *Local) SignedBlock(ctx context.Context, height *int64) (*ctypes.ResultSignedBlock, error) { + return c.env.SignedBlock(c.ctx, height) +} + +func (c *Local) DataCommitment( + _ context.Context, + start uint64, + end uint64, +) (*ctypes.ResultDataCommitment, error) { + return c.env.DataCommitment(c.ctx, start, end) +} + +func (c *Local) DataRootInclusionProof( + _ context.Context, + height uint64, + start uint64, + end uint64, +) (*ctypes.ResultDataRootInclusionProof, error) { + //nolint:gosec + return c.env.DataRootInclusionProof(c.ctx, int64(height), start, end) +} + +// ProveShares +// Deprecated: Use ProveSharesV2 instead. +func (c *Local) ProveShares( + ctx context.Context, + height uint64, + startShare uint64, + endShare uint64, +) (types.ShareProof, error) { + //nolint:gosec + return c.env.ProveShares(c.ctx, int64(height), startShare, endShare) +} + +func (c *Local) ProveSharesV2( + ctx context.Context, + height uint64, + startShare uint64, + endShare uint64, +) (*ctypes.ResultShareProof, error) { + //nolint:gosec + return c.env.ProveSharesV2(c.ctx, int64(height), startShare, endShare) +} + +func (c *Local) TxStatus(ctx context.Context, hash []byte) (*ctypes.ResultTxStatus, error) { + return c.env.TxStatus(c.ctx, hash) +} diff --git a/rpc/client/mock/client.go b/rpc/client/mock/client.go index 0607954251f..a7fcdd2b3a5 100644 --- a/rpc/client/mock/client.go +++ b/rpc/client/mock/client.go @@ -181,3 +181,21 @@ func (c Client) Validators(_ context.Context, height *int64, page, perPage *int) func (c Client) BroadcastEvidence(_ context.Context, ev types.Evidence) (*ctypes.ResultBroadcastEvidence, error) { return c.env.BroadcastEvidence(&rpctypes.Context{}, ev) } + +func (c Client) DataCommitment( + ctx context.Context, + start uint64, + end uint64, +) (*ctypes.ResultDataCommitment, error) { + return c.env.DataCommitment(&rpctypes.Context{}, start, end) +} + +func (c Client) DataRootInclusionProof( + ctx context.Context, + height uint64, + start uint64, + end uint64, +) (*ctypes.ResultDataRootInclusionProof, error) { + //nolint:gosec + return c.env.DataRootInclusionProof(&rpctypes.Context{}, int64(height), start, end) +} diff --git a/rpc/client/rpc_test.go b/rpc/client/rpc_test.go index 8832ef040c1..efdf94278c4 100644 --- a/rpc/client/rpc_test.go +++ b/rpc/client/rpc_test.go @@ -492,11 +492,6 @@ func TestTx(t *testing.T) { assert.True(t, ptx.TxResult.IsOK()) assert.EqualValues(t, txHash, ptx.Hash) - // time to verify the proof - proof := ptx.Proof - if tc.prove && assert.EqualValues(t, tx, proof.Data) { - assert.NoError(t, proof.Proof.Verify(proof.RootHash, txHash)) - } } } } @@ -576,10 +571,10 @@ func TestTxSearch(t *testing.T) { assert.True(t, ptx.TxResult.IsOK()) assert.EqualValues(t, find.Hash, ptx.Hash) - // time to verify the proof - if assert.EqualValues(t, find.Tx, ptx.Proof.Data) { - assert.NoError(t, ptx.Proof.Proof.Verify(ptx.Proof.RootHash, find.Hash)) - } + // // time to verify the proof + // if assert.EqualValues(t, find.Tx, ptx.Proof.Data) { + // assert.NoError(t, ptx.Proof.Proof.Verify(ptx.Proof.RootHash, find.Hash)) + // } // query by height result, err = c.TxSearch(context.Background(), fmt.Sprintf("tx.height=%d", find.Height), true, nil, nil, "asc") @@ -762,3 +757,79 @@ func TestConcurrentJSONRPCBatching(t *testing.T) { } wg.Wait() } + +func TestTxStatus(t *testing.T) { + c := getHTTPClient() + require := require.New(t) + mempool := node.Mempool() + + // Create a new transaction + _, _, tx := MakeTxKV() + + // Get the initial size of the mempool + initMempoolSize := mempool.Size() + + // Add the transaction to the mempool + err := mempool.CheckTx(tx, nil, mempl.TxInfo{}) + require.NoError(err) + + // Check if the size of the mempool has increased + require.Equal(initMempoolSize+1, mempool.Size()) + + // Get the tx status from the mempool + result, err := c.TxStatus(context.Background(), types.Tx(tx).Hash()) + require.NoError(err) + require.EqualValues(0, result.Height) + require.EqualValues(0, result.Index) + require.Equal("PENDING", result.Status) + + // Flush the mempool + mempool.Flush() + require.Equal(0, mempool.Size()) + + // Get tx status after flushing it from the mempool + result, err = c.TxStatus(context.Background(), types.Tx(tx).Hash()) + require.NoError(err) + require.EqualValues(0, result.Height) + require.EqualValues(0, result.Index) + require.Equal("UNKNOWN", result.Status) + + // Broadcast the tx again + bres, err := c.BroadcastTxCommit(context.Background(), tx) + require.NoError(err) + require.True(bres.CheckTx.IsOK()) + require.True(bres.TxResult.IsOK()) + + // Get the tx status + result, err = c.TxStatus(context.Background(), types.Tx(tx).Hash()) + require.NoError(err) + require.EqualValues(bres.Height, result.Height) + require.EqualValues(0, result.Index) + require.Equal("COMMITTED", result.Status) + require.Equal(abci.CodeTypeOK, result.ExecutionCode) + require.Equal("", result.Error) +} + +func TestDataCommitment(t *testing.T) { + c := getHTTPClient() + + // first we broadcast a few tx + expectedHeight := int64(3) + var bres *ctypes.ResultBroadcastTxCommit + var err error + for i := int64(0); i < expectedHeight; i++ { + _, _, tx := MakeTxKV() + bres, err = c.BroadcastTxCommit(context.Background(), tx) + require.Nil(t, err, "%+v when submitting tx %d", err, i) + } + + // check if height >= 3 + actualHeight := bres.Height + require.LessOrEqual(t, expectedHeight, actualHeight, "couldn't create enough blocks for testing the commitment.") + + // check if data commitment is not nil. + // Checking if the commitment is correct is done in `core/blocks_test.go`. + dataCommitment, err := c.DataCommitment(ctx, 1, uint64(expectedHeight)) + require.NotNil(t, dataCommitment, "data commitment shouldn't be nul.") + require.Nil(t, err, "%+v when creating data commitment.", err) +} diff --git a/rpc/core/blocks.go b/rpc/core/blocks.go index c7dc2e8e5ee..d0e0667628e 100644 --- a/rpc/core/blocks.go +++ b/rpc/core/blocks.go @@ -1,10 +1,13 @@ package core import ( + "encoding/hex" "errors" "fmt" "sort" + "strconv" + "github.com/cometbft/cometbft/crypto/merkle" "github.com/cometbft/cometbft/libs/bytes" cmtmath "github.com/cometbft/cometbft/libs/math" cmtquery "github.com/cometbft/cometbft/libs/pubsub/query" @@ -14,6 +17,11 @@ import ( "github.com/cometbft/cometbft/types" ) +const ( + asc = "asc" + desc = "desc" +) + // BlockchainInfo gets block headers for minHeight <= height <= maxHeight. // // If maxHeight does not yet exist, blocks up to the current height will be @@ -228,10 +236,10 @@ func (env *Environment) BlockSearch( // sort results (must be done before pagination) switch orderBy { - case "desc", "": + case desc, "": sort.Slice(results, func(i, j int) bool { return results[i] > results[j] }) - case "asc": + case asc: sort.Slice(results, func(i, j int) bool { return results[i] < results[j] }) default: @@ -266,3 +274,278 @@ func (env *Environment) BlockSearch( return &ctypes.ResultBlockSearch{Blocks: apiResults, TotalCount: totalCount}, nil } + +// SignedBlock fetches the set of transactions at a specified height and all the relevant +// data to verify the transactions (i.e. using light client verification). +func (env *Environment) SignedBlock(ctx *rpctypes.Context, heightPtr *int64) (*ctypes.ResultSignedBlock, error) { + height, err := env.getHeight(env.BlockStore.Height(), heightPtr) + if err != nil { + return nil, err + } + + block := env.BlockStore.LoadBlock(height) + if block == nil { + return nil, errors.New("block not found") + } + seenCommit := env.BlockStore.LoadSeenCommit(height) + if seenCommit == nil { + return nil, errors.New("seen commit not found") + } + validatorSet, err := env.StateStore.LoadValidators(height) + if validatorSet == nil || err != nil { + return nil, err + } + + return &ctypes.ResultSignedBlock{ + Header: block.Header, + Commit: *seenCommit, + ValidatorSet: *validatorSet, + Data: block.Data, + }, nil +} + +// DataCommitment collects the data roots over a provided ordered range of blocks, +// and then creates a new Merkle root of those data roots. The range is end exclusive. +func (env *Environment) DataCommitment(ctx *rpctypes.Context, start, end uint64) (*ctypes.ResultDataCommitment, error) { + err := env.validateDataCommitmentRange(start, end) + if err != nil { + return nil, err + } + tuples, err := env.fetchDataRootTuples(start, end) + if err != nil { + return nil, err + } + root, err := hashDataRootTuples(tuples) + if err != nil { + return nil, err + } + // Create data commitment + return &ctypes.ResultDataCommitment{DataCommitment: root}, nil +} + +// padBytes Pad bytes to given length +func padBytes(byt []byte, length int) ([]byte, error) { + l := len(byt) + if l > length { + return nil, fmt.Errorf( + "cannot pad bytes because length of bytes array: %d is greater than given length: %d", + l, + length, + ) + } + if l == length { + return byt, nil + } + tmp := make([]byte, length) + copy(tmp[length-l:], byt) + return tmp, nil +} + +// To32PaddedHexBytes takes a number and returns its hex representation padded to 32 bytes. +// Used to mimic the result of `abi.encode(number)` in Ethereum. +func To32PaddedHexBytes(number uint64) ([]byte, error) { + hexRepresentation := strconv.FormatUint(number, 16) + // Make sure hex representation has even length. + // The `strconv.FormatUint` can return odd length hex encodings. + // For example, `strconv.FormatUint(10, 16)` returns `a`. + // Thus, we need to pad it. + if len(hexRepresentation)%2 == 1 { + hexRepresentation = "0" + hexRepresentation + } + hexBytes, hexErr := hex.DecodeString(hexRepresentation) + if hexErr != nil { + return nil, hexErr + } + paddedBytes, padErr := padBytes(hexBytes, 32) + if padErr != nil { + return nil, padErr + } + return paddedBytes, nil +} + +// DataRootTuple contains the data that will be used to create the QGB commitments. +// The commitments will be signed by orchestrators and submitted to an EVM chain via a relayer. +// For more information: https://github.com/celestiaorg/quantum-gravity-bridge/blob/master/src/DataRootTuple.sol +type DataRootTuple struct { + height uint64 + dataRoot [32]byte +} + +// EncodeDataRootTuple takes a height and a data root, and returns the equivalent of +// `abi.encode(...)` in Ethereum. +// The encoded type is a DataRootTuple, which has the following ABI: +// +// { +// "components":[ +// { +// "internalType":"uint256", +// "name":"height", +// "type":"uint256" +// }, +// { +// "internalType":"bytes32", +// "name":"dataRoot", +// "type":"bytes32" +// }, +// { +// "internalType":"structDataRootTuple", +// "name":"_tuple", +// "type":"tuple" +// } +// ] +// } +// +// padding the hex representation of the height padded to 32 bytes concatenated to the data root. +// For more information, refer to: +// https://github.com/celestiaorg/quantum-gravity-bridge/blob/master/src/DataRootTuple.sol +func EncodeDataRootTuple(height uint64, dataRoot [32]byte) ([]byte, error) { + paddedHeight, err := To32PaddedHexBytes(height) + if err != nil { + return nil, err + } + return append(paddedHeight, dataRoot[:]...), nil +} + +// dataCommitmentBlocksLimit The maximum number of blocks to be used to create a data commitment. +// It's a local parameter to protect the API from creating unnecessarily large commitments. +const dataCommitmentBlocksLimit = 10_000 // ~33 hours of blocks assuming 12-second blocks. + +// validateDataCommitmentRange runs basic checks on the asc sorted list of +// heights that will be used subsequently in generating data commitments over +// the defined set of heights. +func (env *Environment) validateDataCommitmentRange(start uint64, end uint64) error { + if start == 0 { + return fmt.Errorf("the first block is 0") + } + heightsRange := end - start + if heightsRange > uint64(dataCommitmentBlocksLimit) { + return fmt.Errorf("the query exceeds the limit of allowed blocks %d", dataCommitmentBlocksLimit) + } + if heightsRange == 0 { + return fmt.Errorf("cannot create the data commitments for an empty set of blocks") + } + if start >= end { + return fmt.Errorf("last block is smaller than first block") + } + // the data commitment range is end exclusive + //nolint:gosec + if end > uint64(env.BlockStore.Height())+1 { + return fmt.Errorf( + "end block %d is higher than current chain height %d", + end, + env.BlockStore.Height(), + ) + } + return nil +} + +// hashDataRootTuples hashes a list of blocks data root tuples, i.e. height, data root and square size, +// then returns their merkle root. +func hashDataRootTuples(tuples []DataRootTuple) ([]byte, error) { + dataRootEncodedTuples := make([][]byte, 0, len(tuples)) + for _, tuple := range tuples { + encodedTuple, err := EncodeDataRootTuple( + tuple.height, + tuple.dataRoot, + ) + if err != nil { + return nil, err + } + dataRootEncodedTuples = append(dataRootEncodedTuples, encodedTuple) + } + root := merkle.HashFromByteSlices(dataRootEncodedTuples) + return root, nil +} + +// validateDataRootInclusionProofRequest validates the request to generate a data root +// inclusion proof. +func (env *Environment) validateDataRootInclusionProofRequest(height uint64, start uint64, end uint64) error { + err := env.validateDataCommitmentRange(start, end) + if err != nil { + return err + } + if height < start || height >= end { + return fmt.Errorf( + "height %d should be in the end exclusive interval first_block %d last_block %d", + height, + start, + end, + ) + } + return nil +} + +// proveDataRootTuples returns the merkle inclusion proof for a height. +func (env *Environment) proveDataRootTuples(tuples []DataRootTuple, height int64) (*merkle.Proof, error) { + dataRootEncodedTuples := make([][]byte, 0, len(tuples)) + for _, tuple := range tuples { + encodedTuple, err := EncodeDataRootTuple( + tuple.height, + tuple.dataRoot, + ) + if err != nil { + return nil, err + } + dataRootEncodedTuples = append(dataRootEncodedTuples, encodedTuple) + } + _, proofs := merkle.ProofsFromByteSlices(dataRootEncodedTuples) + //nolint:gosec + return proofs[height-int64(tuples[0].height)], nil +} + +// fetchDataRootTuples takes an end exclusive range of heights and fetches its +// corresponding data root tuples. +func (env *Environment) fetchDataRootTuples(start, end uint64) ([]DataRootTuple, error) { + + tuples := make([]DataRootTuple, 0, end-start) + for height := start; height < end; height++ { + //nolint:gosec + block := env.BlockStore.LoadBlock(int64(height)) + if block == nil { + return nil, fmt.Errorf("couldn't load block %d", height) + } + tuples = append(tuples, DataRootTuple{ + //nolint:gosec + height: uint64(block.Height), + dataRoot: *(*[32]byte)(block.DataHash), + }) + } + return tuples, nil +} + +// DataRootInclusionProof creates an inclusion proof for the data root of block +// height `height` in the set of blocks defined by `start` and `end`. The range +// is end exclusive. +func (env *Environment) DataRootInclusionProof( + ctx *rpctypes.Context, + height int64, + start, + end uint64, +) (*ctypes.ResultDataRootInclusionProof, error) { + //nolint:gosec + proof, err := env.GenerateDataRootInclusionProof(height, start, end) + if err != nil { + return nil, err + } + return &ctypes.ResultDataRootInclusionProof{Proof: *proof}, nil +} + +func (env *Environment) GenerateDataRootInclusionProof(height int64, start, end uint64) (*merkle.Proof, error) { + // if globalEnv == nil { + // return nil, errors.New("global env is nil. this can only be called inside celestia-core") + // } + + err := env.validateDataRootInclusionProofRequest(uint64(height), start, end) + if err != nil { + return nil, err + } + tuples, err := env.fetchDataRootTuples(start, end) + if err != nil { + return nil, err + } + proof, err := env.proveDataRootTuples(tuples, height) + if err != nil { + return nil, err + } + return proof, nil +} diff --git a/rpc/core/blocks_test.go b/rpc/core/blocks_test.go index 68c0a1facd2..af149aba4bf 100644 --- a/rpc/core/blocks_test.go +++ b/rpc/core/blocks_test.go @@ -1,6 +1,7 @@ package core import ( + "encoding/hex" "fmt" "testing" @@ -116,3 +117,215 @@ func TestBlockResults(t *testing.T) { } } } + +func TestEncodeDataRootTuple(t *testing.T) { + height := uint64(2) + dataRoot, err := hex.DecodeString("82dc1607d84557d3579ce602a45f5872e821c36dbda7ec926dfa17ebc8d5c013") + require.NoError(t, err) + + expectedEncoding, err := hex.DecodeString( + // hex representation of height padded to 32 bytes + "0000000000000000000000000000000000000000000000000000000000000002" + + // data root + "82dc1607d84557d3579ce602a45f5872e821c36dbda7ec926dfa17ebc8d5c013", + ) + require.NoError(t, err) + require.NotNil(t, expectedEncoding) + + actualEncoding, err := EncodeDataRootTuple(height, *(*[32]byte)(dataRoot)) + require.NoError(t, err) + require.NotNil(t, actualEncoding) + + // Check that the length of packed data is correct + assert.Equal(t, len(actualEncoding), 64) + assert.Equal(t, expectedEncoding, actualEncoding) +} + +// func TestDataCommitmentResults(t *testing.T) { +// env := &Environment{} +// height := int64(2826) + +// blocks := randomBlocks(height) +// blockStore := mockBlockStore{ +// height: height, +// blocks: blocks, +// } +// env.BlockStore = blockStore + +// testCases := []struct { +// beginQuery int +// endQuery int +// expectPass bool +// }{ +// {10, 15, true}, +// {2727, 2828, false}, +// {10, 9, false}, +// {0, 1000, false}, +// {0, 10, false}, +// {10, 8, false}, +// // to test the end exclusive support for ranges. +// // the end block could be equal to (height+1), but the data commitment would only +// // take up to height. So we should be able to send request having end block equal +// // to (height+1). +// {int(env.BlockStore.Height()) - 100, int(env.BlockStore.Height()) + 1, true}, +// } + +// for i, tc := range testCases { +// env.BlockIndexer = mockBlockIndexer{ +// height: height, +// beginQueryBlock: tc.beginQuery, +// endQueryBlock: tc.endQuery, +// } +// SetEnvironment(env) + +// actualCommitment, err := DataCommitment(&rpctypes.Context{}, uint64(tc.beginQuery), uint64(tc.endQuery)) +// if tc.expectPass { +// require.Nil(t, err, "should generate the needed data commitment.") + +// size := tc.endQuery - tc.beginQuery +// dataRootEncodedTuples := make([][]byte, size) +// for i := 0; i < size; i++ { +// encodedTuple, err := EncodeDataRootTuple( +// uint64(blocks[tc.beginQuery+i].Height), +// *(*[32]byte)(blocks[tc.beginQuery+i].DataHash), +// ) +// require.NoError(t, err) +// dataRootEncodedTuples[i] = encodedTuple +// } +// expectedCommitment := merkle.HashFromByteSlices(dataRootEncodedTuples) + +// assert.Equal( +// t, +// expectedCommitment, +// actualCommitment.DataCommitment.Bytes(), +// i, +// ) +// } else { +// require.NotNil(t, err, "couldn't generate the needed data commitment.") +// } +// } +// } + +// func TestDataRootInclusionProofResults(t *testing.T) { +// env := &Environment{} +// env.StateStore = sm.NewStore( +// dbm.NewMemDB(), sm.StoreOptions{ +// DiscardABCIResponses: false, +// }, +// ) + +// height := int64(2826) +// env.BlockStore = mockBlockStore{height: height} +// SetEnvironment(env) + +// blocks := randomBlocks(height) +// blockStore := mockBlockStore{ +// height: height, +// blocks: blocks, +// } +// env.BlockStore = blockStore + +// testCases := []struct { +// height int +// firstQuery int +// lastQuery int +// expectPass bool +// }{ +// {8, 10, 15, false}, +// {10, 0, 15, false}, +// {10, 10, 15, true}, +// {13, 10, 15, true}, +// {14, 10, 15, true}, +// {15, 10, 15, false}, +// {17, 10, 15, false}, +// } + +// for i, tc := range testCases { +// env.BlockIndexer = mockBlockIndexer{ +// height: height, +// beginQueryBlock: tc.firstQuery, +// endQueryBlock: tc.lastQuery, +// } + +// proof, err := DataRootInclusionProof( +// &rpctypes.Context{}, +// int64(tc.height), +// uint64(tc.firstQuery), +// uint64(tc.lastQuery), +// ) +// if tc.expectPass { +// require.Nil(t, err, "should generate block height data root inclusion proof.", i) + +// size := tc.lastQuery - tc.firstQuery +// dataRootEncodedTuples := make([][]byte, size) +// for i := 0; i < size; i++ { +// encodedTuple, err := EncodeDataRootTuple( +// uint64(blocks[tc.firstQuery+i].Height), +// *(*[32]byte)(blocks[tc.firstQuery+i].DataHash), +// ) +// require.NoError(t, err) +// dataRootEncodedTuples[i] = encodedTuple +// } +// commitment := merkle.HashFromByteSlices(dataRootEncodedTuples) + +// err = proof.Proof.Verify(commitment, dataRootEncodedTuples[tc.height-tc.firstQuery]) +// require.NoError(t, err) +// } else { +// require.NotNil(t, err, "shouldn't be able to generate proof.") +// } +// } +// } + +// // mockBlockIndexer used to mock the set of indexed blocks and return a predefined one. +// type mockBlockIndexer struct { +// height int64 +// beginQueryBlock int // used not to have to parse any query +// endQueryBlock int // used not to have to parse any query +// } + +// func (indexer mockBlockIndexer) Has(height int64) (bool, error) { return true, nil } +// func (indexer mockBlockIndexer) Index(types.EventDataNewBlockHeader) error { return nil } + +// // Search returns a list of block heights corresponding to the values of `indexer.endQueryBlock` +// // and `indexer.beginQueryBlock`. +// // Doesn't use the query parameter for anything. +// func (indexer mockBlockIndexer) Search(ctx context.Context, _ *query.Query) ([]int64, error) { +// size := indexer.endQueryBlock - indexer.beginQueryBlock + 1 +// results := make([]int64, size) +// for i := 0; i < size; i++ { +// results[i] = int64(indexer.beginQueryBlock + i) +// } +// return results, nil +// } + +// // randomBlocks generates a set of random blocks up to (and including) the provided height. +// func randomBlocks(height int64) []*types.Block { +// blocks := make([]*types.Block, height+1) +// for i := int64(0); i <= height; i++ { +// blocks[i] = randomBlock(i) +// } +// return blocks +// } + +// func makeTxs(height int64) (txs []types.Tx) { +// for i := 0; i < 10; i++ { +// numBytes := make([]byte, 8) +// binary.BigEndian.PutUint64(numBytes, uint64(height)) + +// txs = append(txs, types.Tx(append(numBytes, byte(i)))) +// } +// return txs +// } + +// // randomBlock generates a Block with a certain height and random data hash. +// func randomBlock(height int64) *types.Block { +// return &types.Block{ +// Header: types.Header{ +// Height: height, +// DataHash: cmtrand.Bytes(32), +// }, +// Data: types.Data{ +// Txs: makeTxs(height), +// }, +// } +// } diff --git a/rpc/core/routes.go b/rpc/core/routes.go index c7c13a52781..767c4afd56a 100644 --- a/rpc/core/routes.go +++ b/rpc/core/routes.go @@ -51,6 +51,14 @@ func (env *Environment) GetRoutes() RoutesMap { // evidence API "broadcast_evidence": rpc.NewRPCFunc(env.BroadcastEvidence, "evidence"), + + // celestia-specific API + "prove_shares": rpc.NewRPCFunc(env.ProveShares, "height,startShare,endShare"), + "prove_shares_v2": rpc.NewRPCFunc(env.ProveSharesV2, "height,startShare,endShare"), + "data_root_inclusion_proof": rpc.NewRPCFunc(env.DataRootInclusionProof, "height,start,end"), + "signed_block": rpc.NewRPCFunc(env.SignedBlock, "height", rpc.Cacheable("height")), + "data_commitment": rpc.NewRPCFunc(env.DataCommitment, "start,end"), + "tx_status": rpc.NewRPCFunc(env.TxStatus, "hash"), } } diff --git a/rpc/core/status.go b/rpc/core/status.go index 5e3d6d1892e..1783b3d263a 100644 --- a/rpc/core/status.go +++ b/rpc/core/status.go @@ -52,7 +52,7 @@ func (env *Environment) Status(*rpctypes.Context) (*ctypes.ResultStatus, error) } result := &ctypes.ResultStatus{ - NodeInfo: env.P2PTransport.NodeInfo().(p2p.DefaultNodeInfo), + NodeInfo: GetNodeInfo(env, latestHeight), SyncInfo: ctypes.SyncInfo{ LatestBlockHash: latestBlockHash, LatestAppHash: latestAppHash, @@ -83,3 +83,27 @@ func (env *Environment) validatorAtHeight(h int64) *types.Validator { _, val := valsWithH.GetByAddress(privValAddress) return val } + +// GetNodeInfo returns the node info with the app version set to the latest app +// version from the state store. +// +// This function is necessary because upstream CometBFT does not support +// upgrading app versions for a running binary. Therefore the +// env.P2PTransport.NodeInfo.ProtocolVersion.App is expected to be set on node +// start-up and never updated. Celestia supports upgrading the app version for a +// running binary so the env.P2PTransport.NodeInfo.ProtocolVersion.App will be +// incorrect if a node upgraded app versions without restarting. This function +// corrects that issue by fetching the latest app version from the state store. +func GetNodeInfo(env *Environment, latestHeight int64) p2p.DefaultNodeInfo { + nodeInfo := env.P2PTransport.NodeInfo().(p2p.DefaultNodeInfo) + + consensusParams, err := env.StateStore.LoadConsensusParams(latestHeight) + if err != nil { + // use the default app version if we can't load the consensus params (i.e. height 0) + return nodeInfo + } + + // override the default app version with the latest app version + nodeInfo.ProtocolVersion.App = consensusParams.Version.App + return nodeInfo +} diff --git a/rpc/core/status_test.go b/rpc/core/status_test.go new file mode 100644 index 00000000000..ddfda294678 --- /dev/null +++ b/rpc/core/status_test.go @@ -0,0 +1,76 @@ +package core_test + +import ( + "testing" + + "github.com/cometbft/cometbft/p2p" + "github.com/cometbft/cometbft/rpc/core" + "github.com/cometbft/cometbft/state/mocks" + "github.com/cometbft/cometbft/types" + "github.com/stretchr/testify/assert" +) + +func TestGetNodeInfo(t *testing.T) { + p2pTransport := mockTransport{} + stateStore := &mocks.Store{} + stateStore.On("LoadConsensusParams", int64(1)).Return(types.ConsensusParams{Version: types.VersionParams{App: 1}}, nil) + stateStore.On("LoadConsensusParams", int64(2)).Return(types.ConsensusParams{Version: types.VersionParams{App: 2}}, nil) + + type testCase struct { + name string + env *core.Environment + latestHeight int64 + want uint64 + } + testCases := []testCase{ + { + name: "want 1 when consensus params app version is 1", + env: &core.Environment{P2PTransport: p2pTransport, StateStore: stateStore}, + latestHeight: 1, + want: 1, + }, + { + name: "want 2 if consensus params app version is 2", + env: &core.Environment{P2PTransport: p2pTransport, StateStore: stateStore}, + latestHeight: 2, + want: 2, + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + nodeInfo := core.GetNodeInfo(tc.env, tc.latestHeight) + assert.Equal(t, tc.want, nodeInfo.ProtocolVersion.App) + }) + } +} + +// transport is copy + pasted from the core package because it isn't exported. +// https://github.com/celestiaorg/celestia-core/blob/640d115aec834609022c842b2497fc568df53692/rpc/core/env.go#L69-L73 +type transport interface { + Listeners() []string + IsListening() bool + NodeInfo() p2p.NodeInfo +} + +// mockTransport implements the transport interface. +var _ transport = (*mockTransport)(nil) + +type mockTransport struct{} + +func (m mockTransport) Listeners() []string { + return []string{} +} +func (m mockTransport) IsListening() bool { + return false +} + +func (m mockTransport) NodeInfo() p2p.NodeInfo { + return p2p.DefaultNodeInfo{ + ProtocolVersion: p2p.ProtocolVersion{ + P2P: 0, + Block: 0, + App: 0, + }, + } +} diff --git a/rpc/core/tx.go b/rpc/core/tx.go index fdd38e327bc..cd4cee62954 100644 --- a/rpc/core/tx.go +++ b/rpc/core/tx.go @@ -1,18 +1,30 @@ package core import ( + "context" "errors" "fmt" "sort" + abcitypes "github.com/cometbft/cometbft/abci/types" + "github.com/cometbft/cometbft/libs/consts" cmtmath "github.com/cometbft/cometbft/libs/math" cmtquery "github.com/cometbft/cometbft/libs/pubsub/query" + cmtproto "github.com/cometbft/cometbft/proto/tendermint/types" ctypes "github.com/cometbft/cometbft/rpc/core/types" rpctypes "github.com/cometbft/cometbft/rpc/jsonrpc/types" + "github.com/cometbft/cometbft/state" "github.com/cometbft/cometbft/state/txindex/null" "github.com/cometbft/cometbft/types" ) +const ( + TxStatusUnknown string = "UNKNOWN" + TxStatusPending string = "PENDING" + TxStatusEvicted string = "EVICTED" + TxStatusCommitted string = "COMMITTED" +) + // Tx allows you to query the transaction results. `nil` could mean the // transaction is in the mempool, invalidated, or was not sent in the first // place. @@ -32,11 +44,14 @@ func (env *Environment) Tx(_ *rpctypes.Context, hash []byte, prove bool) (*ctype return nil, fmt.Errorf("tx (%X) not found", hash) } - var proof types.TxProof + var shareProof types.ShareProof if prove { block := env.BlockStore.LoadBlock(r.Height) if block != nil { - proof = block.Data.Txs.Proof(int(r.Index)) + shareProof, err = env.proveTx(r.Height, r.Index) + if err != nil { + return nil, err + } } } @@ -46,7 +61,7 @@ func (env *Environment) Tx(_ *rpctypes.Context, hash []byte, prove bool) (*ctype Index: r.Index, TxResult: r.Result, Tx: r.Tx, - Proof: proof, + Proof: shareProof, }, nil } @@ -113,11 +128,14 @@ func (env *Environment) TxSearch( for i := skipCount; i < skipCount+pageSize; i++ { r := results[i] - var proof types.TxProof + var shareProof types.ShareProof if prove { block := env.BlockStore.LoadBlock(r.Height) if block != nil { - proof = block.Data.Txs.Proof(int(r.Index)) + shareProof, err = env.proveTx(r.Height, r.Index) + if err != nil { + return nil, err + } } } @@ -127,9 +145,143 @@ func (env *Environment) TxSearch( Index: r.Index, TxResult: r.Result, Tx: r.Tx, - Proof: proof, + Proof: shareProof, }) } return &ctypes.ResultTxSearch{Txs: apiResults, TotalCount: totalCount}, nil } + +func (env *Environment) proveTx(height int64, index uint32) (types.ShareProof, error) { + var ( + pShareProof cmtproto.ShareProof + shareProof types.ShareProof + ) + rawBlock, err := loadRawBlock(env.BlockStore, height) + if err != nil { + return shareProof, err + } + res, err := env.ProxyAppQuery.Query(context.Background(), &abcitypes.RequestQuery{ + Data: rawBlock, + Path: fmt.Sprintf(consts.TxInclusionProofQueryPath, index), + }) + if err != nil { + return shareProof, err + } + err = pShareProof.Unmarshal(res.Value) + if err != nil { + return shareProof, err + } + shareProof, err = types.ShareProofFromProto(pShareProof) + if err != nil { + return shareProof, err + } + return shareProof, nil +} + +// ProveShares creates an NMT proof for a set of shares to a set of rows. It is +// end exclusive. +// Deprecated: Use ProveSharesV2 instead. +func (env *Environment) ProveShares( + _ *rpctypes.Context, + height int64, + startShare uint64, + endShare uint64, +) (types.ShareProof, error) { + var ( + pShareProof cmtproto.ShareProof + shareProof types.ShareProof + ) + rawBlock, err := loadRawBlock(env.BlockStore, height) + if err != nil { + return shareProof, err + } + res, err := env.ProxyAppQuery.Query(context.Background(), &abcitypes.RequestQuery{ + Data: rawBlock, + Path: fmt.Sprintf(consts.ShareInclusionProofQueryPath, startShare, endShare), + }) + if err != nil { + return shareProof, err + } + if res.Value == nil && res.Log != "" { + // we can make the assumption that for custom queries, if the value is nil + // and some logs have been emitted, then an error happened. + return types.ShareProof{}, errors.New(res.Log) + } + err = pShareProof.Unmarshal(res.Value) + if err != nil { + return shareProof, err + } + shareProof, err = types.ShareProofFromProto(pShareProof) + if err != nil { + return shareProof, err + } + return shareProof, nil +} + +// TxStatus retrieves the status of a transaction by its hash. It returns a ResultTxStatus +// with the transaction's height and index if committed, or its pending, evicted, or unknown status. +// It also includes the execution code and log for failed txs. +func (env *Environment) TxStatus(ctx *rpctypes.Context, hash []byte) (*ctypes.ResultTxStatus, error) { + + // Check if the tx has been committed + txInfo := env.BlockStore.LoadTxInfo(hash) + if txInfo != nil { + return &ctypes.ResultTxStatus{Height: txInfo.Height, Index: txInfo.Index, ExecutionCode: txInfo.Code, Error: txInfo.Error, Status: TxStatusCommitted}, nil + } + + // Get the tx key from the hash + txKey, err := types.TxKeyFromBytes(hash) + if err != nil { + return nil, fmt.Errorf("failed to get tx key from hash: %v", err) + } + + // Check if the tx is in the mempool + txInMempool, ok := env.Mempool.GetTxByKey(txKey) + if txInMempool != nil && ok { + return &ctypes.ResultTxStatus{Status: TxStatusPending}, nil + } + + // Check if the tx is evicted + isEvicted := env.Mempool.WasRecentlyEvicted(txKey) + if isEvicted { + return &ctypes.ResultTxStatus{Status: TxStatusEvicted}, nil + } + + // If the tx is not in the mempool, evicted, or committed, return unknown + return &ctypes.ResultTxStatus{Status: TxStatusUnknown}, nil +} + +// ProveSharesV2 creates a proof for a set of shares to the data root. +// The range is end exclusive. +func (env *Environment) ProveSharesV2( + ctx *rpctypes.Context, + height int64, + startShare uint64, + endShare uint64, +) (*ctypes.ResultShareProof, error) { + shareProof, err := env.ProveShares(ctx, height, startShare, endShare) + if err != nil { + return nil, err + } + return &ctypes.ResultShareProof{ShareProof: shareProof}, nil +} + +func loadRawBlock(bs state.BlockStore, height int64) ([]byte, error) { + var blockMeta = bs.LoadBlockMeta(height) + if blockMeta == nil { + return nil, fmt.Errorf("no block found for height %d", height) + } + + buf := []byte{} + for i := 0; i < int(blockMeta.BlockID.PartSetHeader.Total); i++ { + part := bs.LoadBlockPart(height, i) + // If the part is missing (e.g. since it has been deleted after we + // loaded the block meta) we consider the whole block to be missing. + if part == nil { + return nil, fmt.Errorf("missing block part at height %d part %d", height, i) + } + buf = append(buf, part.Bytes...) + } + return buf, nil +} diff --git a/rpc/core/types/responses.go b/rpc/core/types/responses.go index 36d3ef87306..f50276ac2df 100644 --- a/rpc/core/types/responses.go +++ b/rpc/core/types/responses.go @@ -6,6 +6,7 @@ import ( abci "github.com/cometbft/cometbft/abci/types" "github.com/cometbft/cometbft/crypto" + "github.com/cometbft/cometbft/crypto/merkle" "github.com/cometbft/cometbft/libs/bytes" "github.com/cometbft/cometbft/p2p" cmtproto "github.com/cometbft/cometbft/proto/tendermint/types" @@ -201,7 +202,7 @@ type ResultTx struct { Index uint32 `json:"index"` TxResult abci.ExecTxResult `json:"tx_result"` Tx types.Tx `json:"tx"` - Proof types.TxProof `json:"proof,omitempty"` + Proof types.ShareProof `json:"proof,omitempty"` } // Result of searching for txs @@ -254,3 +255,34 @@ type ResultEvent struct { Data types.TMEventData `json:"data"` Events map[string][]string `json:"events"` } + +// Single block with all data for validation +type ResultSignedBlock struct { + Header types.Header `json:"header"` + Commit types.Commit `json:"commit"` + Data types.Data `json:"data"` + ValidatorSet types.ValidatorSet `json:"validator_set"` +} + +// ResultTxStatus represents the status of a transaction during its life cycle. +// It contains info to locate a tx in a committed block as well as its execution code, log if it fails and status. +type ResultTxStatus struct { + Height int64 `json:"height"` + Index uint32 `json:"index"` + ExecutionCode uint32 `json:"execution_code"` + Error string `json:"error"` + Status string `json:"status"` +} + +type ResultDataCommitment struct { + DataCommitment bytes.HexBytes `json:"data_commitment"` +} + +type ResultDataRootInclusionProof struct { + Proof merkle.Proof `json:"proof"` +} + +// ResultShareProof is an API response that contains a ShareProof. +type ResultShareProof struct { + ShareProof types.ShareProof `json:"share_proof"` +} diff --git a/rpc/grpc/api.go b/rpc/grpc/api.go index d237953b1a6..3d9bacb5ce6 100644 --- a/rpc/grpc/api.go +++ b/rpc/grpc/api.go @@ -2,10 +2,20 @@ package coregrpc import ( "context" + "errors" + fmt "fmt" + "sync" + time "time" abci "github.com/cometbft/cometbft/abci/types" + "github.com/cometbft/cometbft/crypto/encoding" + "github.com/cometbft/cometbft/libs/pubsub" + "github.com/cometbft/cometbft/libs/rand" + crypto "github.com/cometbft/cometbft/proto/tendermint/crypto" + "github.com/cometbft/cometbft/proto/tendermint/types" core "github.com/cometbft/cometbft/rpc/core" rpctypes "github.com/cometbft/cometbft/rpc/jsonrpc/types" + eventstypes "github.com/cometbft/cometbft/types" ) type broadcastAPI struct { @@ -38,3 +48,391 @@ func (bapi *broadcastAPI) BroadcastTx(_ context.Context, req *RequestBroadcastTx }, }, nil } + +type BlockAPI struct { + env *core.Environment + sync.Mutex + heightListeners map[chan SubscribeNewHeightsResponse]struct{} + newBlockSubscription eventstypes.Subscription + subscriptionID string + subscriptionQuery pubsub.Query +} + +func NewBlockAPI(env *core.Environment) *BlockAPI { + return &BlockAPI{ + env: env, + heightListeners: make(map[chan SubscribeNewHeightsResponse]struct{}, 1000), + subscriptionID: fmt.Sprintf("block-api-subscription-%s", rand.Str(6)), + subscriptionQuery: eventstypes.EventQueryNewBlock, + } +} + +func (blockAPI *BlockAPI) StartNewBlockEventListener(ctx context.Context) error { + if blockAPI.newBlockSubscription == nil { + var err error + blockAPI.newBlockSubscription, err = blockAPI.env.EventBus.Subscribe( + ctx, + blockAPI.subscriptionID, + blockAPI.subscriptionQuery, + 500, + ) + if err != nil { + blockAPI.env.Logger.Error("Failed to subscribe to new blocks", "err", err) + return err + } + } + for { + select { + case <-ctx.Done(): + return nil + case <-blockAPI.newBlockSubscription.Canceled(): + blockAPI.env.Logger.Error("canceled grpc subscription. retrying") + ok, err := blockAPI.retryNewBlocksSubscription(ctx) + if err != nil { + return err + } + if !ok { + // this will happen when the context is done. we can stop here + return nil + } + case event, ok := <-blockAPI.newBlockSubscription.Out(): + if !ok { + blockAPI.env.Logger.Error("new blocks subscription closed. re-subscribing") + ok, err := blockAPI.retryNewBlocksSubscription(ctx) + if err != nil { + return err + } + if !ok { + // this will happen when the context is done. we can stop here + return nil + } + continue + } + newBlockEvent, ok := event.Events()[eventstypes.EventTypeKey] + if !ok || len(newBlockEvent) == 0 || newBlockEvent[0] != eventstypes.EventNewBlock { + continue + } + data, ok := event.Data().(eventstypes.EventDataNewBlock) + if !ok { + blockAPI.env.Logger.Error("couldn't cast event data to new block") + return fmt.Errorf("couldn't cast event data to new block. Events: %s", event.Events()) + } + blockAPI.broadcastToListeners(ctx, data.Block.Height, data.Block.Hash()) + } + } +} + +// RetryAttempts the number of retry times when the subscription is closed. +const RetryAttempts = 6 + +// SubscriptionCapacity the maximum number of pending blocks in the subscription. +const SubscriptionCapacity = 500 + +func (blockAPI *BlockAPI) retryNewBlocksSubscription(ctx context.Context) (bool, error) { + ticker := time.NewTicker(time.Second) + defer ticker.Stop() + blockAPI.Lock() + defer blockAPI.Unlock() + for i := 1; i < RetryAttempts; i++ { + select { + case <-ctx.Done(): + return false, nil + case <-ticker.C: + var err error + blockAPI.newBlockSubscription, err = blockAPI.env.EventBus.Subscribe( + ctx, + fmt.Sprintf("block-api-subscription-%s", rand.Str(6)), + blockAPI.subscriptionQuery, + SubscriptionCapacity, + ) + if err != nil { + blockAPI.env.Logger.Error("Failed to subscribe to new blocks. retrying", "err", err, "retry_number", i) + } else { + return true, nil + } + } + } + return false, errors.New("couldn't recover from failed blocks subscription. stopping listeners") +} + +func (blockAPI *BlockAPI) broadcastToListeners(ctx context.Context, height int64, hash []byte) { + blockAPI.Lock() + defer blockAPI.Unlock() + for ch := range blockAPI.heightListeners { + func() { + defer func() { + if r := recover(); r != nil { + // logging the error then removing the heights listener + blockAPI.env.Logger.Debug("failed to write to heights listener", "err", r) + blockAPI.removeHeightListener(ch) + } + }() + select { + case <-ctx.Done(): + return + case ch <- SubscribeNewHeightsResponse{Height: height, Hash: hash}: + } + }() + } +} + +func (blockAPI *BlockAPI) addHeightListener() chan SubscribeNewHeightsResponse { + blockAPI.Lock() + defer blockAPI.Unlock() + ch := make(chan SubscribeNewHeightsResponse, 50) + blockAPI.heightListeners[ch] = struct{}{} + return ch +} + +func (blockAPI *BlockAPI) removeHeightListener(ch chan SubscribeNewHeightsResponse) { + blockAPI.Lock() + defer blockAPI.Unlock() + delete(blockAPI.heightListeners, ch) +} + +func (blockAPI *BlockAPI) closeAllListeners() { + blockAPI.Lock() + defer blockAPI.Unlock() + if blockAPI.heightListeners == nil { + // if this is nil, then there is no need to close anything + return + } + for channel := range blockAPI.heightListeners { + delete(blockAPI.heightListeners, channel) + } +} + +// Stop cleans up the BlockAPI instance by closing all listeners +// and ensuring no further events are processed. +func (blockAPI *BlockAPI) Stop(ctx context.Context) error { + blockAPI.Lock() + defer blockAPI.Unlock() + + // close all height listeners + blockAPI.closeAllListeners() + + var err error + // stop the events subscription + if blockAPI.newBlockSubscription != nil { + err = blockAPI.env.EventBus.Unsubscribe(ctx, blockAPI.subscriptionID, blockAPI.subscriptionQuery) + blockAPI.newBlockSubscription = nil + } + + blockAPI.env.Logger.Info("gRPC streaming API has been stopped") + return err +} + +func (blockAPI *BlockAPI) BlockByHash(req *BlockByHashRequest, stream BlockAPIService_BlockByHashServer) error { + blockStore := blockAPI.env.BlockStore + blockMeta := blockStore.LoadBlockMetaByHash(req.Hash) + if blockMeta == nil { + return fmt.Errorf("nil block meta for block hash %d", req.Hash) + } + commit := blockStore.LoadBlockCommit(blockMeta.Header.Height) + if commit == nil { + return fmt.Errorf("nil commit for block hash %d", req.Hash) + } + protoCommit := commit.ToProto() + + validatorSet, err := blockAPI.env.StateStore.LoadValidators(blockMeta.Header.Height) + if err != nil { + return err + } + protoValidatorSet, err := validatorSet.ToProto() + if err != nil { + return err + } + + for i := 0; i < int(blockMeta.BlockID.PartSetHeader.Total); i++ { + part, err := blockStore.LoadBlockPart(blockMeta.Header.Height, i).ToProto() + if err != nil { + return err + } + if part == nil { + return fmt.Errorf("nil block part %d for block hash %d", i, req.Hash) + } + if !req.Prove { + part.Proof = crypto.Proof{} + } + isLastPart := i == int(blockMeta.BlockID.PartSetHeader.Total)-1 + resp := BlockByHashResponse{ + BlockPart: part, + IsLast: isLastPart, + } + if i == 0 { + resp.ValidatorSet = protoValidatorSet + resp.Commit = protoCommit + } + err = stream.Send(&resp) + if err != nil { + return err + } + } + return nil +} + +func (blockAPI *BlockAPI) BlockByHeight(req *BlockByHeightRequest, stream BlockAPIService_BlockByHeightServer) error { + blockStore := blockAPI.env.BlockStore + height := req.Height + if height == 0 { + height = blockStore.Height() + } + + blockMeta := blockStore.LoadBlockMeta(height) + if blockMeta == nil { + return fmt.Errorf("nil block meta for height %d", height) + } + + commit := blockStore.LoadSeenCommit(height) + if commit == nil { + return fmt.Errorf("nil block commit for height %d", height) + } + protoCommit := commit.ToProto() + + validatorSet, err := blockAPI.env.StateStore.LoadValidators(height) + if err != nil { + return err + } + protoValidatorSet, err := validatorSet.ToProto() + if err != nil { + return err + } + + for i := 0; i < int(blockMeta.BlockID.PartSetHeader.Total); i++ { + part, err := blockStore.LoadBlockPart(height, i).ToProto() + if err != nil { + return err + } + if part == nil { + return fmt.Errorf("nil block part %d for height %d", i, height) + } + if !req.Prove { + part.Proof = crypto.Proof{} + } + isLastPart := i == int(blockMeta.BlockID.PartSetHeader.Total)-1 + resp := BlockByHeightResponse{ + BlockPart: part, + IsLast: isLastPart, + } + if i == 0 { + resp.ValidatorSet = protoValidatorSet + resp.Commit = protoCommit + } + err = stream.Send(&resp) + if err != nil { + return err + } + } + return nil +} + +func (blockAPI *BlockAPI) Status(_ context.Context, _ *StatusRequest) (*StatusResponse, error) { + status, err := blockAPI.env.Status(nil) + if err != nil { + return nil, err + } + + protoPubKey, err := encoding.PubKeyToProto(status.ValidatorInfo.PubKey) + if err != nil { + return nil, err + } + return &StatusResponse{ + NodeInfo: status.NodeInfo.ToProto(), + SyncInfo: &SyncInfo{ + LatestBlockHash: status.SyncInfo.LatestBlockHash, + LatestAppHash: status.SyncInfo.LatestAppHash, + LatestBlockHeight: status.SyncInfo.LatestBlockHeight, + LatestBlockTime: status.SyncInfo.LatestBlockTime, + EarliestBlockHash: status.SyncInfo.EarliestBlockHash, + EarliestAppHash: status.SyncInfo.EarliestAppHash, + EarliestBlockHeight: status.SyncInfo.EarliestBlockHeight, + EarliestBlockTime: status.SyncInfo.EarliestBlockTime, + CatchingUp: status.SyncInfo.CatchingUp, + }, + ValidatorInfo: &ValidatorInfo{ + Address: status.ValidatorInfo.Address, + PubKey: &protoPubKey, + VotingPower: status.ValidatorInfo.VotingPower, + }, + }, nil +} + +func (blockAPI *BlockAPI) Commit(_ context.Context, req *CommitRequest) (*CommitResponse, error) { + blockStore := blockAPI.env.BlockStore + height := req.Height + if height == 0 { + height = blockStore.Height() + } + commit := blockStore.LoadSeenCommit(height) + if commit == nil { + return nil, fmt.Errorf("nil block commit for height %d", height) + } + protoCommit := commit.ToProto() + + return &CommitResponse{ + Commit: &types.Commit{ + Height: protoCommit.Height, + Round: protoCommit.Round, + BlockID: protoCommit.BlockID, + Signatures: protoCommit.Signatures, + }, + }, nil +} + +func (blockAPI *BlockAPI) ValidatorSet(_ context.Context, req *ValidatorSetRequest) (*ValidatorSetResponse, error) { + blockStore := blockAPI.env.BlockStore + height := req.Height + if height == 0 { + height = blockStore.Height() + } + validatorSet, err := blockAPI.env.StateStore.LoadValidators(height) + if err != nil { + return nil, err + } + protoValidatorSet, err := validatorSet.ToProto() + if err != nil { + return nil, err + } + return &ValidatorSetResponse{ + ValidatorSet: protoValidatorSet, + Height: height, + }, nil +} + +func (blockAPI *BlockAPI) SubscribeNewHeights(_ *SubscribeNewHeightsRequest, stream BlockAPIService_SubscribeNewHeightsServer) error { + heightListener := blockAPI.addHeightListener() + defer blockAPI.removeHeightListener(heightListener) + + for { + select { + case event, ok := <-heightListener: + if !ok { + return errors.New("blocks subscription closed from the service side") + } + if err := stream.Send(&event); err != nil { + return err + } + case <-stream.Context().Done(): + return nil + } + } +} + +type BlobstreamAPI struct { + env *core.Environment +} + +func NewBlobstreamAPI(env *core.Environment) *BlobstreamAPI { + return &BlobstreamAPI{env: env} +} + +func (blobAPI *BlobstreamAPI) DataRootInclusionProof(_ context.Context, req *DataRootInclusionProofRequest) (*DataRootInclusionProofResponse, error) { + proof, err := blobAPI.env.GenerateDataRootInclusionProof(req.Height, req.Start, req.End) + if err != nil { + return nil, err + } + + return &DataRootInclusionProofResponse{ + Proof: *proof.ToProto(), + }, nil +} diff --git a/rpc/grpc/client_server.go b/rpc/grpc/client_server.go index b2105e84120..097b661d583 100644 --- a/rpc/grpc/client_server.go +++ b/rpc/grpc/client_server.go @@ -5,6 +5,7 @@ import ( "golang.org/x/net/context" "google.golang.org/grpc" + "google.golang.org/grpc/credentials/insecure" cmtnet "github.com/cometbft/cometbft/libs/net" "github.com/cometbft/cometbft/rpc/core" @@ -25,7 +26,33 @@ type Config struct { func StartGRPCServer(env *core.Environment, ln net.Listener) error { grpcServer := grpc.NewServer() RegisterBroadcastAPIServer(grpcServer, &broadcastAPI{env: env}) - return grpcServer.Serve(ln) + + // block api + api := NewBlockAPI(env) + RegisterBlockAPIServiceServer(grpcServer, api) + + // blobstream api + blobstreamAPI := NewBlobstreamAPI(env) + RegisterBlobstreamAPIServer(grpcServer, blobstreamAPI) + + errCh := make(chan error, 2) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + go func() { + errCh <- api.StartNewBlockEventListener(ctx) + }() + go func() { + errCh <- grpcServer.Serve(ln) + }() + defer grpcServer.GracefulStop() + defer func(api *BlockAPI, ctx context.Context) { + err := api.Stop(ctx) + if err != nil { + env.Logger.Error("error stopping block api", "err", err) + } + }(api, ctx) + // blocks until one errors or returns nil + return <-errCh } // StartGRPCClient dials the gRPC server using protoAddr and returns a new @@ -33,7 +60,7 @@ func StartGRPCServer(env *core.Environment, ln net.Listener) error { // // Deprecated: A new gRPC API will be introduced after v0.38. func StartGRPCClient(protoAddr string) BroadcastAPIClient { - conn, err := grpc.Dial(protoAddr, grpc.WithInsecure(), grpc.WithContextDialer(dialerFunc)) + conn, err := grpc.Dial(protoAddr, grpc.WithTransportCredentials(insecure.NewCredentials()), grpc.WithContextDialer(dialerFunc)) if err != nil { panic(err) } @@ -43,3 +70,37 @@ func StartGRPCClient(protoAddr string) BroadcastAPIClient { func dialerFunc(_ context.Context, addr string) (net.Conn, error) { return cmtnet.Connect(addr) } + +// StartBlockAPIGRPCClient dials the gRPC server using protoAddr and returns a new +// BlockAPIClient. +func StartBlockAPIGRPCClient(protoAddr string, opts ...grpc.DialOption) (BlockAPIServiceClient, error) { + if len(opts) == 0 { + opts = append(opts, grpc.WithTransportCredentials(insecure.NewCredentials())) + } + opts = append(opts, grpc.WithContextDialer(dialerFunc)) + conn, err := grpc.Dial( //nolint:staticcheck + protoAddr, + opts..., + ) + if err != nil { + return nil, err + } + return NewBlockAPIServiceClient(conn), nil +} + +// StartBlobstreamAPIGRPCClient dials the gRPC server using protoAddr and returns a new +// BlobstreamAPIClient. +func StartBlobstreamAPIGRPCClient(protoAddr string, opts ...grpc.DialOption) (BlobstreamAPIClient, error) { + if len(opts) == 0 { + opts = append(opts, grpc.WithTransportCredentials(insecure.NewCredentials())) + } + opts = append(opts, grpc.WithContextDialer(dialerFunc)) + conn, err := grpc.Dial( //nolint:staticcheck + protoAddr, + opts..., + ) + if err != nil { + return nil, err + } + return NewBlobstreamAPIClient(conn), nil +} diff --git a/rpc/grpc/types.pb.go b/rpc/grpc/types.pb.go index c27afb4e246..1ab1e08e452 100644 --- a/rpc/grpc/types.pb.go +++ b/rpc/grpc/types.pb.go @@ -7,20 +7,28 @@ import ( context "context" fmt "fmt" types "github.com/cometbft/cometbft/abci/types" + crypto "github.com/cometbft/cometbft/proto/tendermint/crypto" + p2p "github.com/cometbft/cometbft/proto/tendermint/p2p" + types1 "github.com/cometbft/cometbft/proto/tendermint/types" + _ "github.com/cosmos/gogoproto/gogoproto" grpc1 "github.com/cosmos/gogoproto/grpc" proto "github.com/cosmos/gogoproto/proto" + _ "github.com/cosmos/gogoproto/types" + github_com_cosmos_gogoproto_types "github.com/cosmos/gogoproto/types" grpc "google.golang.org/grpc" codes "google.golang.org/grpc/codes" status "google.golang.org/grpc/status" io "io" math "math" math_bits "math/bits" + time "time" ) // Reference imports to suppress errors if they are not otherwise used. var _ = proto.Marshal var _ = fmt.Errorf var _ = math.Inf +var _ = time.Kitchen // This is a compile-time assertion to ensure that this generated file // is compatible with the proto package it is being compiled against. @@ -196,374 +204,4557 @@ func (m *ResponseBroadcastTx) GetTxResult() *types.ExecTxResult { return nil } -func init() { - proto.RegisterType((*RequestPing)(nil), "tendermint.rpc.grpc.RequestPing") - proto.RegisterType((*RequestBroadcastTx)(nil), "tendermint.rpc.grpc.RequestBroadcastTx") - proto.RegisterType((*ResponsePing)(nil), "tendermint.rpc.grpc.ResponsePing") - proto.RegisterType((*ResponseBroadcastTx)(nil), "tendermint.rpc.grpc.ResponseBroadcastTx") +// BlockByHashRequest is a request to get a block by its hash. +type BlockByHashRequest struct { + Hash []byte `protobuf:"bytes,1,opt,name=hash,proto3" json:"hash,omitempty"` + Prove bool `protobuf:"varint,2,opt,name=prove,proto3" json:"prove,omitempty"` } -func init() { proto.RegisterFile("tendermint/rpc/grpc/types.proto", fileDescriptor_0ffff5682c662b95) } - -var fileDescriptor_0ffff5682c662b95 = []byte{ - // 324 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x7c, 0x92, 0x31, 0x4f, 0x02, 0x31, - 0x14, 0xc7, 0x29, 0x31, 0x8a, 0x05, 0x19, 0xca, 0x42, 0x30, 0x9e, 0x48, 0x4c, 0x64, 0x2a, 0x09, - 0x6e, 0x32, 0x89, 0x31, 0xd1, 0xb8, 0x90, 0x86, 0xc9, 0x05, 0xb9, 0xf2, 0x84, 0x8b, 0x72, 0x3d, - 0xdb, 0x47, 0x52, 0xbf, 0x84, 0xf1, 0x0b, 0xb9, 0x3b, 0x32, 0x3a, 0x1a, 0xf8, 0x22, 0xa6, 0x27, - 0x27, 0x35, 0x46, 0x96, 0xe6, 0xdf, 0xe6, 0xff, 0x7b, 0xfd, 0xbf, 0xd7, 0xd2, 0x43, 0x84, 0x78, - 0x04, 0x7a, 0x1a, 0xc5, 0xd8, 0xd2, 0x89, 0x6c, 0x8d, 0xdd, 0x82, 0xcf, 0x09, 0x18, 0x9e, 0x68, - 0x85, 0x8a, 0x55, 0xd6, 0x06, 0xae, 0x13, 0xc9, 0x9d, 0xa1, 0xb6, 0xef, 0x51, 0xc3, 0x50, 0x46, - 0x3e, 0xd1, 0xd8, 0xa3, 0x45, 0x01, 0x4f, 0x33, 0x30, 0xd8, 0x8b, 0xe2, 0x71, 0xe3, 0x98, 0xb2, - 0xd5, 0xb6, 0xab, 0xd5, 0x70, 0x24, 0x87, 0x06, 0xfb, 0x96, 0x95, 0x69, 0x1e, 0x6d, 0x95, 0xd4, - 0x49, 0xb3, 0x24, 0xf2, 0x68, 0x1b, 0x65, 0x5a, 0x12, 0x60, 0x12, 0x15, 0x1b, 0x48, 0xa9, 0x17, - 0x42, 0x2b, 0xd9, 0x81, 0xcf, 0x75, 0x68, 0x41, 0x4e, 0x40, 0x3e, 0x0c, 0x56, 0x74, 0xb1, 0x5d, - 0xe7, 0x5e, 0x42, 0x17, 0x86, 0x67, 0xdc, 0x85, 0x33, 0xf6, 0xad, 0xd8, 0x91, 0xdf, 0x82, 0x9d, - 0xd1, 0x5d, 0xb4, 0x03, 0x0d, 0x66, 0xf6, 0x88, 0xd5, 0x7c, 0x4a, 0x1f, 0xfc, 0xa1, 0x2f, 0x2d, - 0xc8, 0xbe, 0x15, 0xa9, 0x49, 0x14, 0x70, 0xa5, 0xda, 0x6f, 0x84, 0x96, 0x7e, 0x82, 0x9c, 0xf7, - 0xae, 0xd9, 0x0d, 0xdd, 0x72, 0x49, 0xd9, 0xaf, 0xfb, 0xb3, 0x09, 0x71, 0x6f, 0x02, 0xb5, 0xa3, - 0x7f, 0x1c, 0xeb, 0x76, 0xd9, 0x1d, 0x2d, 0xfa, 0x5d, 0x9e, 0x6c, 0xaa, 0xe9, 0x19, 0x6b, 0xcd, - 0x8d, 0xa5, 0x3d, 0x67, 0xf7, 0xea, 0x7d, 0x11, 0x90, 0xf9, 0x22, 0x20, 0x9f, 0x8b, 0x80, 0xbc, - 0x2e, 0x83, 0xdc, 0x7c, 0x19, 0xe4, 0x3e, 0x96, 0x41, 0xee, 0x96, 0x8f, 0x23, 0x9c, 0xcc, 0x42, - 0x2e, 0xd5, 0xb4, 0x25, 0xd5, 0x14, 0x30, 0xbc, 0xc7, 0xb5, 0xc8, 0x3e, 0x45, 0x47, 0x2a, 0x0d, - 0x4e, 0x84, 0xdb, 0xe9, 0x33, 0x9f, 0x7e, 0x05, 0x00, 0x00, 0xff, 0xff, 0x0c, 0xca, 0xdb, 0xe7, - 0x3b, 0x02, 0x00, 0x00, +func (m *BlockByHashRequest) Reset() { *m = BlockByHashRequest{} } +func (m *BlockByHashRequest) String() string { return proto.CompactTextString(m) } +func (*BlockByHashRequest) ProtoMessage() {} +func (*BlockByHashRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_0ffff5682c662b95, []int{4} } - -// Reference imports to suppress errors if they are not otherwise used. -var _ context.Context -var _ grpc.ClientConn - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the grpc package it is being compiled against. -const _ = grpc.SupportPackageIsVersion4 - -// BroadcastAPIClient is the client API for BroadcastAPI service. -// -// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. -type BroadcastAPIClient interface { - Ping(ctx context.Context, in *RequestPing, opts ...grpc.CallOption) (*ResponsePing, error) - BroadcastTx(ctx context.Context, in *RequestBroadcastTx, opts ...grpc.CallOption) (*ResponseBroadcastTx, error) +func (m *BlockByHashRequest) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) } - -type broadcastAPIClient struct { - cc grpc1.ClientConn +func (m *BlockByHashRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_BlockByHashRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } } - -func NewBroadcastAPIClient(cc grpc1.ClientConn) BroadcastAPIClient { - return &broadcastAPIClient{cc} +func (m *BlockByHashRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_BlockByHashRequest.Merge(m, src) +} +func (m *BlockByHashRequest) XXX_Size() int { + return m.Size() +} +func (m *BlockByHashRequest) XXX_DiscardUnknown() { + xxx_messageInfo_BlockByHashRequest.DiscardUnknown(m) } -func (c *broadcastAPIClient) Ping(ctx context.Context, in *RequestPing, opts ...grpc.CallOption) (*ResponsePing, error) { - out := new(ResponsePing) - err := c.cc.Invoke(ctx, "/tendermint.rpc.grpc.BroadcastAPI/Ping", in, out, opts...) - if err != nil { - return nil, err +var xxx_messageInfo_BlockByHashRequest proto.InternalMessageInfo + +func (m *BlockByHashRequest) GetHash() []byte { + if m != nil { + return m.Hash } - return out, nil + return nil } -func (c *broadcastAPIClient) BroadcastTx(ctx context.Context, in *RequestBroadcastTx, opts ...grpc.CallOption) (*ResponseBroadcastTx, error) { - out := new(ResponseBroadcastTx) - err := c.cc.Invoke(ctx, "/tendermint.rpc.grpc.BroadcastAPI/BroadcastTx", in, out, opts...) - if err != nil { - return nil, err +func (m *BlockByHashRequest) GetProve() bool { + if m != nil { + return m.Prove } - return out, nil + return false } -// BroadcastAPIServer is the server API for BroadcastAPI service. -type BroadcastAPIServer interface { - Ping(context.Context, *RequestPing) (*ResponsePing, error) - BroadcastTx(context.Context, *RequestBroadcastTx) (*ResponseBroadcastTx, error) +// BlockByHeightRequest is a request to get a block by its height. +type BlockByHeightRequest struct { + // Height the requested block height. + // If height is equal to 0, the latest height stored in the block store + // will be used. + Height int64 `protobuf:"varint,1,opt,name=height,proto3" json:"height,omitempty"` + // Prove set to true to return the parts proofs. + Prove bool `protobuf:"varint,2,opt,name=prove,proto3" json:"prove,omitempty"` } -// UnimplementedBroadcastAPIServer can be embedded to have forward compatible implementations. -type UnimplementedBroadcastAPIServer struct { +func (m *BlockByHeightRequest) Reset() { *m = BlockByHeightRequest{} } +func (m *BlockByHeightRequest) String() string { return proto.CompactTextString(m) } +func (*BlockByHeightRequest) ProtoMessage() {} +func (*BlockByHeightRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_0ffff5682c662b95, []int{5} } - -func (*UnimplementedBroadcastAPIServer) Ping(ctx context.Context, req *RequestPing) (*ResponsePing, error) { - return nil, status.Errorf(codes.Unimplemented, "method Ping not implemented") +func (m *BlockByHeightRequest) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) } -func (*UnimplementedBroadcastAPIServer) BroadcastTx(ctx context.Context, req *RequestBroadcastTx) (*ResponseBroadcastTx, error) { - return nil, status.Errorf(codes.Unimplemented, "method BroadcastTx not implemented") +func (m *BlockByHeightRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_BlockByHeightRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } } - -func RegisterBroadcastAPIServer(s grpc1.Server, srv BroadcastAPIServer) { - s.RegisterService(&_BroadcastAPI_serviceDesc, srv) +func (m *BlockByHeightRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_BlockByHeightRequest.Merge(m, src) +} +func (m *BlockByHeightRequest) XXX_Size() int { + return m.Size() +} +func (m *BlockByHeightRequest) XXX_DiscardUnknown() { + xxx_messageInfo_BlockByHeightRequest.DiscardUnknown(m) } -func _BroadcastAPI_Ping_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(RequestPing) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(BroadcastAPIServer).Ping(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/tendermint.rpc.grpc.BroadcastAPI/Ping", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(BroadcastAPIServer).Ping(ctx, req.(*RequestPing)) +var xxx_messageInfo_BlockByHeightRequest proto.InternalMessageInfo + +func (m *BlockByHeightRequest) GetHeight() int64 { + if m != nil { + return m.Height } - return interceptor(ctx, in, info, handler) + return 0 } -func _BroadcastAPI_BroadcastTx_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(RequestBroadcastTx) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(BroadcastAPIServer).BroadcastTx(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/tendermint.rpc.grpc.BroadcastAPI/BroadcastTx", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(BroadcastAPIServer).BroadcastTx(ctx, req.(*RequestBroadcastTx)) +func (m *BlockByHeightRequest) GetProve() bool { + if m != nil { + return m.Prove } - return interceptor(ctx, in, info, handler) + return false } -var BroadcastAPI_serviceDesc = _BroadcastAPI_serviceDesc -var _BroadcastAPI_serviceDesc = grpc.ServiceDesc{ - ServiceName: "tendermint.rpc.grpc.BroadcastAPI", - HandlerType: (*BroadcastAPIServer)(nil), - Methods: []grpc.MethodDesc{ - { - MethodName: "Ping", - Handler: _BroadcastAPI_Ping_Handler, - }, - { - MethodName: "BroadcastTx", - Handler: _BroadcastAPI_BroadcastTx_Handler, - }, - }, - Streams: []grpc.StreamDesc{}, - Metadata: "tendermint/rpc/grpc/types.proto", +// CommitRequest is a request to get the commit of a block. +type CommitRequest struct { + // Height the requested block commit height. + // If height is equal to 0, the latest height stored in the block store + // will be used. + Height int64 `protobuf:"varint,1,opt,name=height,proto3" json:"height,omitempty"` } -func (m *RequestPing) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err +func (m *CommitRequest) Reset() { *m = CommitRequest{} } +func (m *CommitRequest) String() string { return proto.CompactTextString(m) } +func (*CommitRequest) ProtoMessage() {} +func (*CommitRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_0ffff5682c662b95, []int{6} +} +func (m *CommitRequest) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *CommitRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_CommitRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil } - return dAtA[:n], nil } - -func (m *RequestPing) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) +func (m *CommitRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_CommitRequest.Merge(m, src) } - -func (m *RequestPing) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - return len(dAtA) - i, nil +func (m *CommitRequest) XXX_Size() int { + return m.Size() +} +func (m *CommitRequest) XXX_DiscardUnknown() { + xxx_messageInfo_CommitRequest.DiscardUnknown(m) } -func (m *RequestBroadcastTx) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err +var xxx_messageInfo_CommitRequest proto.InternalMessageInfo + +func (m *CommitRequest) GetHeight() int64 { + if m != nil { + return m.Height } - return dAtA[:n], nil + return 0 } -func (m *RequestBroadcastTx) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) +// ValidatorSetRequest is a request to get the validator set of a block. +type ValidatorSetRequest struct { + // Height the requested validator set height. + // If height is equal to 0, the latest height stored in the block store + // will be used. + Height int64 `protobuf:"varint,1,opt,name=height,proto3" json:"height,omitempty"` } -func (m *RequestBroadcastTx) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if len(m.Tx) > 0 { - i -= len(m.Tx) - copy(dAtA[i:], m.Tx) - i = encodeVarintTypes(dAtA, i, uint64(len(m.Tx))) - i-- - dAtA[i] = 0xa +func (m *ValidatorSetRequest) Reset() { *m = ValidatorSetRequest{} } +func (m *ValidatorSetRequest) String() string { return proto.CompactTextString(m) } +func (*ValidatorSetRequest) ProtoMessage() {} +func (*ValidatorSetRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_0ffff5682c662b95, []int{7} +} +func (m *ValidatorSetRequest) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ValidatorSetRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_ValidatorSetRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil } - return len(dAtA) - i, nil +} +func (m *ValidatorSetRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_ValidatorSetRequest.Merge(m, src) +} +func (m *ValidatorSetRequest) XXX_Size() int { + return m.Size() +} +func (m *ValidatorSetRequest) XXX_DiscardUnknown() { + xxx_messageInfo_ValidatorSetRequest.DiscardUnknown(m) } -func (m *ResponsePing) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err +var xxx_messageInfo_ValidatorSetRequest proto.InternalMessageInfo + +func (m *ValidatorSetRequest) GetHeight() int64 { + if m != nil { + return m.Height } - return dAtA[:n], nil + return 0 } -func (m *ResponsePing) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) +// SubscribeNewHeightsRequest is a request to subscribe to new heights. +type SubscribeNewHeightsRequest struct { } -func (m *ResponsePing) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - return len(dAtA) - i, nil +func (m *SubscribeNewHeightsRequest) Reset() { *m = SubscribeNewHeightsRequest{} } +func (m *SubscribeNewHeightsRequest) String() string { return proto.CompactTextString(m) } +func (*SubscribeNewHeightsRequest) ProtoMessage() {} +func (*SubscribeNewHeightsRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_0ffff5682c662b95, []int{8} } - -func (m *ResponseBroadcastTx) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err +func (m *SubscribeNewHeightsRequest) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *SubscribeNewHeightsRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_SubscribeNewHeightsRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil } - return dAtA[:n], nil +} +func (m *SubscribeNewHeightsRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_SubscribeNewHeightsRequest.Merge(m, src) +} +func (m *SubscribeNewHeightsRequest) XXX_Size() int { + return m.Size() +} +func (m *SubscribeNewHeightsRequest) XXX_DiscardUnknown() { + xxx_messageInfo_SubscribeNewHeightsRequest.DiscardUnknown(m) } -func (m *ResponseBroadcastTx) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) +var xxx_messageInfo_SubscribeNewHeightsRequest proto.InternalMessageInfo + +// StatusRequest is a request to get the status of the node. +type StatusRequest struct { } -func (m *ResponseBroadcastTx) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.TxResult != nil { - { - size, err := m.TxResult.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintTypes(dAtA, i, uint64(size)) +func (m *StatusRequest) Reset() { *m = StatusRequest{} } +func (m *StatusRequest) String() string { return proto.CompactTextString(m) } +func (*StatusRequest) ProtoMessage() {} +func (*StatusRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_0ffff5682c662b95, []int{9} +} +func (m *StatusRequest) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *StatusRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_StatusRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err } - i-- - dAtA[i] = 0x12 + return b[:n], nil } - if m.CheckTx != nil { - { - size, err := m.CheckTx.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintTypes(dAtA, i, uint64(size)) +} +func (m *StatusRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_StatusRequest.Merge(m, src) +} +func (m *StatusRequest) XXX_Size() int { + return m.Size() +} +func (m *StatusRequest) XXX_DiscardUnknown() { + xxx_messageInfo_StatusRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_StatusRequest proto.InternalMessageInfo + +// BlockByHashResponse is a response to a BlockByHashRequest. +type BlockByHashResponse struct { + BlockPart *types1.Part `protobuf:"bytes,1,opt,name=block_part,json=blockPart,proto3" json:"block_part,omitempty"` + // Commit is only set in the first part, and + // it stays nil in the remaining ones. + Commit *types1.Commit `protobuf:"bytes,2,opt,name=commit,proto3" json:"commit,omitempty"` + // ValidatorSet is only set in the first part, and + // it stays nil in the remaining ones. + ValidatorSet *types1.ValidatorSet `protobuf:"bytes,3,opt,name=validator_set,json=validatorSet,proto3" json:"validator_set,omitempty"` + IsLast bool `protobuf:"varint,4,opt,name=is_last,json=isLast,proto3" json:"is_last,omitempty"` +} + +func (m *BlockByHashResponse) Reset() { *m = BlockByHashResponse{} } +func (m *BlockByHashResponse) String() string { return proto.CompactTextString(m) } +func (*BlockByHashResponse) ProtoMessage() {} +func (*BlockByHashResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_0ffff5682c662b95, []int{10} +} +func (m *BlockByHashResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *BlockByHashResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_BlockByHashResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err } - i-- - dAtA[i] = 0xa + return b[:n], nil } - return len(dAtA) - i, nil +} +func (m *BlockByHashResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_BlockByHashResponse.Merge(m, src) +} +func (m *BlockByHashResponse) XXX_Size() int { + return m.Size() +} +func (m *BlockByHashResponse) XXX_DiscardUnknown() { + xxx_messageInfo_BlockByHashResponse.DiscardUnknown(m) } -func encodeVarintTypes(dAtA []byte, offset int, v uint64) int { - offset -= sovTypes(v) - base := offset - for v >= 1<<7 { - dAtA[offset] = uint8(v&0x7f | 0x80) - v >>= 7 - offset++ +var xxx_messageInfo_BlockByHashResponse proto.InternalMessageInfo + +func (m *BlockByHashResponse) GetBlockPart() *types1.Part { + if m != nil { + return m.BlockPart } - dAtA[offset] = uint8(v) - return base + return nil } -func (m *RequestPing) Size() (n int) { - if m == nil { - return 0 + +func (m *BlockByHashResponse) GetCommit() *types1.Commit { + if m != nil { + return m.Commit } - var l int - _ = l - return n + return nil } -func (m *RequestBroadcastTx) Size() (n int) { - if m == nil { - return 0 +func (m *BlockByHashResponse) GetValidatorSet() *types1.ValidatorSet { + if m != nil { + return m.ValidatorSet } - var l int - _ = l - l = len(m.Tx) - if l > 0 { - n += 1 + l + sovTypes(uint64(l)) + return nil +} + +func (m *BlockByHashResponse) GetIsLast() bool { + if m != nil { + return m.IsLast } - return n + return false } -func (m *ResponsePing) Size() (n int) { - if m == nil { - return 0 +// BlockByHeightResponse is a response to a BlockByHeightRequest. +type BlockByHeightResponse struct { + BlockPart *types1.Part `protobuf:"bytes,1,opt,name=block_part,json=blockPart,proto3" json:"block_part,omitempty"` + // Commit is only set in the first part, and + // it stays nil in the remaining ones. + Commit *types1.Commit `protobuf:"bytes,2,opt,name=commit,proto3" json:"commit,omitempty"` + // ValidatorSet is only set in the first part, and + // it stays nil in the remaining ones. + ValidatorSet *types1.ValidatorSet `protobuf:"bytes,3,opt,name=validator_set,json=validatorSet,proto3" json:"validator_set,omitempty"` + IsLast bool `protobuf:"varint,4,opt,name=is_last,json=isLast,proto3" json:"is_last,omitempty"` +} + +func (m *BlockByHeightResponse) Reset() { *m = BlockByHeightResponse{} } +func (m *BlockByHeightResponse) String() string { return proto.CompactTextString(m) } +func (*BlockByHeightResponse) ProtoMessage() {} +func (*BlockByHeightResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_0ffff5682c662b95, []int{11} +} +func (m *BlockByHeightResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *BlockByHeightResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_BlockByHeightResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil } - var l int - _ = l - return n +} +func (m *BlockByHeightResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_BlockByHeightResponse.Merge(m, src) +} +func (m *BlockByHeightResponse) XXX_Size() int { + return m.Size() +} +func (m *BlockByHeightResponse) XXX_DiscardUnknown() { + xxx_messageInfo_BlockByHeightResponse.DiscardUnknown(m) } -func (m *ResponseBroadcastTx) Size() (n int) { - if m == nil { - return 0 +var xxx_messageInfo_BlockByHeightResponse proto.InternalMessageInfo + +func (m *BlockByHeightResponse) GetBlockPart() *types1.Part { + if m != nil { + return m.BlockPart } - var l int - _ = l - if m.CheckTx != nil { - l = m.CheckTx.Size() - n += 1 + l + sovTypes(uint64(l)) + return nil +} + +func (m *BlockByHeightResponse) GetCommit() *types1.Commit { + if m != nil { + return m.Commit } - if m.TxResult != nil { - l = m.TxResult.Size() - n += 1 + l + sovTypes(uint64(l)) + return nil +} + +func (m *BlockByHeightResponse) GetValidatorSet() *types1.ValidatorSet { + if m != nil { + return m.ValidatorSet } - return n + return nil } -func sovTypes(x uint64) (n int) { - return (math_bits.Len64(x|1) + 6) / 7 +func (m *BlockByHeightResponse) GetIsLast() bool { + if m != nil { + return m.IsLast + } + return false } -func sozTypes(x uint64) (n int) { - return sovTypes(uint64((x << 1) ^ uint64((int64(x) >> 63)))) + +// CommitResponse is a response to a CommitRequest. +type CommitResponse struct { + Commit *types1.Commit `protobuf:"bytes,1,opt,name=commit,proto3" json:"commit,omitempty"` } -func (m *RequestPing) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTypes + +func (m *CommitResponse) Reset() { *m = CommitResponse{} } +func (m *CommitResponse) String() string { return proto.CompactTextString(m) } +func (*CommitResponse) ProtoMessage() {} +func (*CommitResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_0ffff5682c662b95, []int{12} +} +func (m *CommitResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *CommitResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_CommitResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *CommitResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_CommitResponse.Merge(m, src) +} +func (m *CommitResponse) XXX_Size() int { + return m.Size() +} +func (m *CommitResponse) XXX_DiscardUnknown() { + xxx_messageInfo_CommitResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_CommitResponse proto.InternalMessageInfo + +func (m *CommitResponse) GetCommit() *types1.Commit { + if m != nil { + return m.Commit + } + return nil +} + +// ValidatorSetResponse is a response to a ValidatorSetRequest. +type ValidatorSetResponse struct { + // ValidatorSet the requested validator set. + ValidatorSet *types1.ValidatorSet `protobuf:"bytes,1,opt,name=validator_set,json=validatorSet,proto3" json:"validator_set,omitempty"` + // Height the height corresponding to the returned + // validator set. + Height int64 `protobuf:"varint,2,opt,name=height,proto3" json:"height,omitempty"` +} + +func (m *ValidatorSetResponse) Reset() { *m = ValidatorSetResponse{} } +func (m *ValidatorSetResponse) String() string { return proto.CompactTextString(m) } +func (*ValidatorSetResponse) ProtoMessage() {} +func (*ValidatorSetResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_0ffff5682c662b95, []int{13} +} +func (m *ValidatorSetResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ValidatorSetResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_ValidatorSetResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *ValidatorSetResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_ValidatorSetResponse.Merge(m, src) +} +func (m *ValidatorSetResponse) XXX_Size() int { + return m.Size() +} +func (m *ValidatorSetResponse) XXX_DiscardUnknown() { + xxx_messageInfo_ValidatorSetResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_ValidatorSetResponse proto.InternalMessageInfo + +func (m *ValidatorSetResponse) GetValidatorSet() *types1.ValidatorSet { + if m != nil { + return m.ValidatorSet + } + return nil +} + +func (m *ValidatorSetResponse) GetHeight() int64 { + if m != nil { + return m.Height + } + return 0 +} + +// NewHeightEvent is an event that indicates a new height. +type SubscribeNewHeightsResponse struct { + Height int64 `protobuf:"varint,1,opt,name=height,proto3" json:"height,omitempty"` + Hash []byte `protobuf:"bytes,2,opt,name=hash,proto3" json:"hash,omitempty"` +} + +func (m *SubscribeNewHeightsResponse) Reset() { *m = SubscribeNewHeightsResponse{} } +func (m *SubscribeNewHeightsResponse) String() string { return proto.CompactTextString(m) } +func (*SubscribeNewHeightsResponse) ProtoMessage() {} +func (*SubscribeNewHeightsResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_0ffff5682c662b95, []int{14} +} +func (m *SubscribeNewHeightsResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *SubscribeNewHeightsResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_SubscribeNewHeightsResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *SubscribeNewHeightsResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_SubscribeNewHeightsResponse.Merge(m, src) +} +func (m *SubscribeNewHeightsResponse) XXX_Size() int { + return m.Size() +} +func (m *SubscribeNewHeightsResponse) XXX_DiscardUnknown() { + xxx_messageInfo_SubscribeNewHeightsResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_SubscribeNewHeightsResponse proto.InternalMessageInfo + +func (m *SubscribeNewHeightsResponse) GetHeight() int64 { + if m != nil { + return m.Height + } + return 0 +} + +func (m *SubscribeNewHeightsResponse) GetHash() []byte { + if m != nil { + return m.Hash + } + return nil +} + +// StatusResponse is a response to a StatusRequest. +type StatusResponse struct { + NodeInfo *p2p.DefaultNodeInfo `protobuf:"bytes,1,opt,name=node_info,json=nodeInfo,proto3" json:"node_info,omitempty"` + SyncInfo *SyncInfo `protobuf:"bytes,2,opt,name=sync_info,json=syncInfo,proto3" json:"sync_info,omitempty"` + ValidatorInfo *ValidatorInfo `protobuf:"bytes,3,opt,name=validator_info,json=validatorInfo,proto3" json:"validator_info,omitempty"` +} + +func (m *StatusResponse) Reset() { *m = StatusResponse{} } +func (m *StatusResponse) String() string { return proto.CompactTextString(m) } +func (*StatusResponse) ProtoMessage() {} +func (*StatusResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_0ffff5682c662b95, []int{15} +} +func (m *StatusResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *StatusResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_StatusResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *StatusResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_StatusResponse.Merge(m, src) +} +func (m *StatusResponse) XXX_Size() int { + return m.Size() +} +func (m *StatusResponse) XXX_DiscardUnknown() { + xxx_messageInfo_StatusResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_StatusResponse proto.InternalMessageInfo + +func (m *StatusResponse) GetNodeInfo() *p2p.DefaultNodeInfo { + if m != nil { + return m.NodeInfo + } + return nil +} + +func (m *StatusResponse) GetSyncInfo() *SyncInfo { + if m != nil { + return m.SyncInfo + } + return nil +} + +func (m *StatusResponse) GetValidatorInfo() *ValidatorInfo { + if m != nil { + return m.ValidatorInfo + } + return nil +} + +// SyncInfo is information about the node's sync status. +type SyncInfo struct { + LatestBlockHash []byte `protobuf:"bytes,1,opt,name=latest_block_hash,json=latestBlockHash,proto3" json:"latest_block_hash,omitempty"` + LatestAppHash []byte `protobuf:"bytes,2,opt,name=latest_app_hash,json=latestAppHash,proto3" json:"latest_app_hash,omitempty"` + LatestBlockHeight int64 `protobuf:"varint,3,opt,name=latest_block_height,json=latestBlockHeight,proto3" json:"latest_block_height,omitempty"` + LatestBlockTime time.Time `protobuf:"bytes,4,opt,name=latest_block_time,json=latestBlockTime,proto3,stdtime" json:"latest_block_time"` + EarliestBlockHash []byte `protobuf:"bytes,5,opt,name=earliest_block_hash,json=earliestBlockHash,proto3" json:"earliest_block_hash,omitempty"` + EarliestAppHash []byte `protobuf:"bytes,6,opt,name=earliest_app_hash,json=earliestAppHash,proto3" json:"earliest_app_hash,omitempty"` + EarliestBlockHeight int64 `protobuf:"varint,7,opt,name=earliest_block_height,json=earliestBlockHeight,proto3" json:"earliest_block_height,omitempty"` + EarliestBlockTime time.Time `protobuf:"bytes,8,opt,name=earliest_block_time,json=earliestBlockTime,proto3,stdtime" json:"earliest_block_time"` + CatchingUp bool `protobuf:"varint,9,opt,name=catching_up,json=catchingUp,proto3" json:"catching_up,omitempty"` +} + +func (m *SyncInfo) Reset() { *m = SyncInfo{} } +func (m *SyncInfo) String() string { return proto.CompactTextString(m) } +func (*SyncInfo) ProtoMessage() {} +func (*SyncInfo) Descriptor() ([]byte, []int) { + return fileDescriptor_0ffff5682c662b95, []int{16} +} +func (m *SyncInfo) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *SyncInfo) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_SyncInfo.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *SyncInfo) XXX_Merge(src proto.Message) { + xxx_messageInfo_SyncInfo.Merge(m, src) +} +func (m *SyncInfo) XXX_Size() int { + return m.Size() +} +func (m *SyncInfo) XXX_DiscardUnknown() { + xxx_messageInfo_SyncInfo.DiscardUnknown(m) +} + +var xxx_messageInfo_SyncInfo proto.InternalMessageInfo + +func (m *SyncInfo) GetLatestBlockHash() []byte { + if m != nil { + return m.LatestBlockHash + } + return nil +} + +func (m *SyncInfo) GetLatestAppHash() []byte { + if m != nil { + return m.LatestAppHash + } + return nil +} + +func (m *SyncInfo) GetLatestBlockHeight() int64 { + if m != nil { + return m.LatestBlockHeight + } + return 0 +} + +func (m *SyncInfo) GetLatestBlockTime() time.Time { + if m != nil { + return m.LatestBlockTime + } + return time.Time{} +} + +func (m *SyncInfo) GetEarliestBlockHash() []byte { + if m != nil { + return m.EarliestBlockHash + } + return nil +} + +func (m *SyncInfo) GetEarliestAppHash() []byte { + if m != nil { + return m.EarliestAppHash + } + return nil +} + +func (m *SyncInfo) GetEarliestBlockHeight() int64 { + if m != nil { + return m.EarliestBlockHeight + } + return 0 +} + +func (m *SyncInfo) GetEarliestBlockTime() time.Time { + if m != nil { + return m.EarliestBlockTime + } + return time.Time{} +} + +func (m *SyncInfo) GetCatchingUp() bool { + if m != nil { + return m.CatchingUp + } + return false +} + +// ValidatorInfo is information about a validator. +type ValidatorInfo struct { + Address []byte `protobuf:"bytes,1,opt,name=address,proto3" json:"address,omitempty"` + PubKey *crypto.PublicKey `protobuf:"bytes,2,opt,name=pub_key,json=pubKey,proto3" json:"pub_key,omitempty"` + VotingPower int64 `protobuf:"varint,3,opt,name=voting_power,json=votingPower,proto3" json:"voting_power,omitempty"` +} + +func (m *ValidatorInfo) Reset() { *m = ValidatorInfo{} } +func (m *ValidatorInfo) String() string { return proto.CompactTextString(m) } +func (*ValidatorInfo) ProtoMessage() {} +func (*ValidatorInfo) Descriptor() ([]byte, []int) { + return fileDescriptor_0ffff5682c662b95, []int{17} +} +func (m *ValidatorInfo) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ValidatorInfo) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_ValidatorInfo.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *ValidatorInfo) XXX_Merge(src proto.Message) { + xxx_messageInfo_ValidatorInfo.Merge(m, src) +} +func (m *ValidatorInfo) XXX_Size() int { + return m.Size() +} +func (m *ValidatorInfo) XXX_DiscardUnknown() { + xxx_messageInfo_ValidatorInfo.DiscardUnknown(m) +} + +var xxx_messageInfo_ValidatorInfo proto.InternalMessageInfo + +func (m *ValidatorInfo) GetAddress() []byte { + if m != nil { + return m.Address + } + return nil +} + +func (m *ValidatorInfo) GetPubKey() *crypto.PublicKey { + if m != nil { + return m.PubKey + } + return nil +} + +func (m *ValidatorInfo) GetVotingPower() int64 { + if m != nil { + return m.VotingPower + } + return 0 +} + +type DataRootInclusionProofRequest struct { + // Height the height of block we want to prove. + Height int64 `protobuf:"varint,1,opt,name=height,proto3" json:"height,omitempty"` + // Start the start of the data commitment range containing the block. + Start uint64 `protobuf:"varint,2,opt,name=start,proto3" json:"start,omitempty"` + // End the end exclusive of the data commitment range containing the block. + End uint64 `protobuf:"varint,3,opt,name=end,proto3" json:"end,omitempty"` +} + +func (m *DataRootInclusionProofRequest) Reset() { *m = DataRootInclusionProofRequest{} } +func (m *DataRootInclusionProofRequest) String() string { return proto.CompactTextString(m) } +func (*DataRootInclusionProofRequest) ProtoMessage() {} +func (*DataRootInclusionProofRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_0ffff5682c662b95, []int{18} +} +func (m *DataRootInclusionProofRequest) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *DataRootInclusionProofRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_DataRootInclusionProofRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *DataRootInclusionProofRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_DataRootInclusionProofRequest.Merge(m, src) +} +func (m *DataRootInclusionProofRequest) XXX_Size() int { + return m.Size() +} +func (m *DataRootInclusionProofRequest) XXX_DiscardUnknown() { + xxx_messageInfo_DataRootInclusionProofRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_DataRootInclusionProofRequest proto.InternalMessageInfo + +func (m *DataRootInclusionProofRequest) GetHeight() int64 { + if m != nil { + return m.Height + } + return 0 +} + +func (m *DataRootInclusionProofRequest) GetStart() uint64 { + if m != nil { + return m.Start + } + return 0 +} + +func (m *DataRootInclusionProofRequest) GetEnd() uint64 { + if m != nil { + return m.End + } + return 0 +} + +type DataRootInclusionProofResponse struct { + Proof crypto.Proof `protobuf:"bytes,1,opt,name=proof,proto3" json:"proof"` +} + +func (m *DataRootInclusionProofResponse) Reset() { *m = DataRootInclusionProofResponse{} } +func (m *DataRootInclusionProofResponse) String() string { return proto.CompactTextString(m) } +func (*DataRootInclusionProofResponse) ProtoMessage() {} +func (*DataRootInclusionProofResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_0ffff5682c662b95, []int{19} +} +func (m *DataRootInclusionProofResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *DataRootInclusionProofResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_DataRootInclusionProofResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *DataRootInclusionProofResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_DataRootInclusionProofResponse.Merge(m, src) +} +func (m *DataRootInclusionProofResponse) XXX_Size() int { + return m.Size() +} +func (m *DataRootInclusionProofResponse) XXX_DiscardUnknown() { + xxx_messageInfo_DataRootInclusionProofResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_DataRootInclusionProofResponse proto.InternalMessageInfo + +func (m *DataRootInclusionProofResponse) GetProof() crypto.Proof { + if m != nil { + return m.Proof + } + return crypto.Proof{} +} + +func init() { + proto.RegisterType((*RequestPing)(nil), "tendermint.rpc.grpc.RequestPing") + proto.RegisterType((*RequestBroadcastTx)(nil), "tendermint.rpc.grpc.RequestBroadcastTx") + proto.RegisterType((*ResponsePing)(nil), "tendermint.rpc.grpc.ResponsePing") + proto.RegisterType((*ResponseBroadcastTx)(nil), "tendermint.rpc.grpc.ResponseBroadcastTx") + proto.RegisterType((*BlockByHashRequest)(nil), "tendermint.rpc.grpc.BlockByHashRequest") + proto.RegisterType((*BlockByHeightRequest)(nil), "tendermint.rpc.grpc.BlockByHeightRequest") + proto.RegisterType((*CommitRequest)(nil), "tendermint.rpc.grpc.CommitRequest") + proto.RegisterType((*ValidatorSetRequest)(nil), "tendermint.rpc.grpc.ValidatorSetRequest") + proto.RegisterType((*SubscribeNewHeightsRequest)(nil), "tendermint.rpc.grpc.SubscribeNewHeightsRequest") + proto.RegisterType((*StatusRequest)(nil), "tendermint.rpc.grpc.StatusRequest") + proto.RegisterType((*BlockByHashResponse)(nil), "tendermint.rpc.grpc.BlockByHashResponse") + proto.RegisterType((*BlockByHeightResponse)(nil), "tendermint.rpc.grpc.BlockByHeightResponse") + proto.RegisterType((*CommitResponse)(nil), "tendermint.rpc.grpc.CommitResponse") + proto.RegisterType((*ValidatorSetResponse)(nil), "tendermint.rpc.grpc.ValidatorSetResponse") + proto.RegisterType((*SubscribeNewHeightsResponse)(nil), "tendermint.rpc.grpc.SubscribeNewHeightsResponse") + proto.RegisterType((*StatusResponse)(nil), "tendermint.rpc.grpc.StatusResponse") + proto.RegisterType((*SyncInfo)(nil), "tendermint.rpc.grpc.SyncInfo") + proto.RegisterType((*ValidatorInfo)(nil), "tendermint.rpc.grpc.ValidatorInfo") + proto.RegisterType((*DataRootInclusionProofRequest)(nil), "tendermint.rpc.grpc.DataRootInclusionProofRequest") + proto.RegisterType((*DataRootInclusionProofResponse)(nil), "tendermint.rpc.grpc.DataRootInclusionProofResponse") +} + +func init() { proto.RegisterFile("tendermint/rpc/grpc/types.proto", fileDescriptor_0ffff5682c662b95) } + +var fileDescriptor_0ffff5682c662b95 = []byte{ + // 1207 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xdc, 0x57, 0x4b, 0x6f, 0xdb, 0xc6, + 0x13, 0x37, 0x6d, 0x45, 0x96, 0x47, 0x92, 0xf3, 0xcf, 0xca, 0xc9, 0x5f, 0x60, 0x12, 0xc9, 0x61, + 0x8b, 0xe6, 0x01, 0x94, 0x32, 0x94, 0xe6, 0xd2, 0x14, 0x05, 0x2c, 0xbb, 0x40, 0x04, 0x17, 0x81, + 0x4a, 0xbb, 0x39, 0xf4, 0xc2, 0x92, 0xd4, 0x4a, 0x22, 0x2c, 0x71, 0x59, 0xee, 0xd2, 0x91, 0x7a, + 0x2b, 0x7a, 0x2f, 0x02, 0xf4, 0xf3, 0xf4, 0x9e, 0x63, 0x2e, 0x05, 0x7a, 0x28, 0xd2, 0xc2, 0x06, + 0xda, 0xaf, 0x51, 0xec, 0x83, 0x12, 0x69, 0x3d, 0xe2, 0xf4, 0xd8, 0x8b, 0x30, 0x9c, 0x99, 0xdf, + 0xec, 0xcc, 0xec, 0xcc, 0xce, 0x08, 0xea, 0x0c, 0x07, 0x5d, 0x1c, 0x8d, 0xfc, 0x80, 0x35, 0xa2, + 0xd0, 0x6b, 0xf4, 0xf9, 0x0f, 0x9b, 0x84, 0x98, 0x9a, 0x61, 0x44, 0x18, 0x41, 0x95, 0x99, 0x82, + 0x19, 0x85, 0x9e, 0xc9, 0x15, 0xf4, 0xdb, 0x29, 0x94, 0xe3, 0x7a, 0x7e, 0x1a, 0xa1, 0xdf, 0x49, + 0x09, 0x05, 0x3f, 0x23, 0xd5, 0x53, 0xd2, 0xb0, 0x19, 0x2e, 0x45, 0x7a, 0xd1, 0x24, 0x64, 0xa4, + 0x71, 0x8a, 0x27, 0x89, 0xf4, 0xee, 0xbc, 0x34, 0x8c, 0x08, 0xe9, 0x29, 0xf1, 0xee, 0xdc, 0xb1, + 0x67, 0xce, 0xd0, 0xef, 0x3a, 0x8c, 0x44, 0x4a, 0xa3, 0xde, 0x27, 0xa4, 0x3f, 0xc4, 0x0d, 0xf1, + 0xe5, 0xc6, 0xbd, 0x06, 0xf3, 0x47, 0x98, 0x32, 0x67, 0x14, 0x2a, 0x85, 0x9d, 0x3e, 0xe9, 0x13, + 0x41, 0x36, 0x38, 0x25, 0xb9, 0x46, 0x19, 0x8a, 0x16, 0xfe, 0x2e, 0xc6, 0x94, 0x75, 0xfc, 0xa0, + 0x6f, 0x7c, 0x08, 0x48, 0x7d, 0xb6, 0x22, 0xe2, 0x74, 0x3d, 0x87, 0xb2, 0x93, 0x31, 0xda, 0x86, + 0x75, 0x36, 0xae, 0x6a, 0xbb, 0xda, 0x83, 0x92, 0xb5, 0xce, 0xc6, 0xc6, 0x36, 0x94, 0x2c, 0x4c, + 0x43, 0x12, 0x50, 0x2c, 0x50, 0x3f, 0x69, 0x50, 0x49, 0x18, 0x69, 0xdc, 0x53, 0x28, 0x78, 0x03, + 0xec, 0x9d, 0xda, 0x0a, 0x5d, 0x6c, 0xee, 0x9a, 0xa9, 0x8c, 0xf3, 0xe4, 0x9a, 0x09, 0xee, 0x80, + 0x2b, 0x9e, 0x8c, 0xad, 0x4d, 0x4f, 0x12, 0xe8, 0x53, 0xd8, 0x62, 0x63, 0x3b, 0xc2, 0x34, 0x1e, + 0xb2, 0xea, 0xba, 0x40, 0xdf, 0x9d, 0x43, 0x7f, 0x31, 0xc6, 0xde, 0xc9, 0xd8, 0x12, 0x4a, 0x56, + 0x81, 0x29, 0xca, 0xf8, 0x1c, 0x50, 0x6b, 0x48, 0xbc, 0xd3, 0xd6, 0xe4, 0x99, 0x43, 0x07, 0x2a, + 0x22, 0x84, 0x20, 0x37, 0x70, 0xe8, 0x40, 0x05, 0x22, 0x68, 0xb4, 0x03, 0xd7, 0xc2, 0x88, 0x9c, + 0x61, 0x71, 0x42, 0xc1, 0x92, 0x1f, 0xc6, 0x21, 0xec, 0x24, 0x78, 0xec, 0xf7, 0x07, 0x2c, 0xb1, + 0x70, 0x0b, 0xf2, 0x03, 0xc1, 0x10, 0x36, 0x36, 0x2c, 0xf5, 0xb5, 0xc4, 0xca, 0x7d, 0x28, 0x1f, + 0x90, 0xd1, 0xc8, 0x7f, 0x17, 0xdc, 0xf8, 0x18, 0x2a, 0x2f, 0x92, 0xeb, 0x3c, 0xc6, 0xef, 0x54, + 0xbf, 0x03, 0xfa, 0x71, 0xec, 0x52, 0x2f, 0xf2, 0x5d, 0xfc, 0x1c, 0xbf, 0x94, 0x2e, 0x52, 0x85, + 0x32, 0xae, 0x43, 0xf9, 0x98, 0x39, 0x2c, 0x9e, 0x32, 0x7e, 0xd7, 0xa0, 0x92, 0xc9, 0x86, 0x4c, + 0x38, 0x7a, 0x02, 0xe0, 0x72, 0xb6, 0x1d, 0x3a, 0x11, 0x53, 0xf7, 0x73, 0x2b, 0x9d, 0x61, 0x59, + 0xbd, 0x1d, 0x27, 0x62, 0xd6, 0x96, 0xd0, 0xe4, 0x24, 0xda, 0x83, 0xbc, 0x27, 0xa2, 0x52, 0x97, + 0x52, 0x9d, 0x87, 0xa8, 0xa8, 0x95, 0x1e, 0x3a, 0x80, 0xf2, 0xb4, 0x5a, 0x6d, 0x8a, 0x59, 0x75, + 0x43, 0x00, 0x6b, 0xf3, 0xc0, 0x4c, 0x16, 0x4a, 0x67, 0xa9, 0x2f, 0xf4, 0x7f, 0xd8, 0xf4, 0xa9, + 0x3d, 0x74, 0x28, 0xab, 0xe6, 0x44, 0x92, 0xf3, 0x3e, 0xfd, 0xd2, 0xa1, 0xcc, 0x78, 0xab, 0xc1, + 0xcd, 0x4b, 0x97, 0xf5, 0xdf, 0x0a, 0xb0, 0x05, 0xdb, 0x49, 0x19, 0xa9, 0xc0, 0x66, 0x1e, 0x6a, + 0x57, 0xf3, 0xd0, 0xa0, 0xb0, 0x93, 0xad, 0x30, 0x65, 0x69, 0xce, 0x73, 0xed, 0x5f, 0x78, 0x3e, + 0xab, 0xd3, 0xf5, 0x4c, 0x9d, 0xb6, 0xe1, 0xf6, 0xc2, 0x3a, 0x55, 0x67, 0x2f, 0x6b, 0xa6, 0xa4, + 0x4d, 0xd7, 0x67, 0x6d, 0x6a, 0xfc, 0xaa, 0xc1, 0x76, 0x52, 0xd5, 0x0a, 0xfe, 0x19, 0x6c, 0x05, + 0xa4, 0x8b, 0x6d, 0x3f, 0xe8, 0x11, 0xe5, 0x76, 0x3d, 0xed, 0x76, 0xd8, 0x0c, 0xcd, 0x43, 0xdc, + 0x73, 0xe2, 0x21, 0x7b, 0x4e, 0xba, 0xb8, 0x1d, 0xf4, 0x88, 0x55, 0x08, 0x14, 0xc5, 0x5f, 0x17, + 0x3a, 0x09, 0x3c, 0x89, 0x5e, 0xf0, 0xba, 0x24, 0xd3, 0xc0, 0x3c, 0x9e, 0x04, 0x9e, 0xc4, 0x52, + 0x45, 0xa1, 0x36, 0x6c, 0xcf, 0x92, 0x26, 0x0c, 0xc8, 0xfb, 0x36, 0x16, 0x1a, 0x98, 0x26, 0x4e, + 0x58, 0x99, 0xa5, 0x9b, 0x7f, 0x1a, 0x7f, 0x6f, 0x40, 0x21, 0x39, 0x01, 0x3d, 0x82, 0x1b, 0x43, + 0x87, 0x61, 0xca, 0x6c, 0x59, 0xb6, 0xa9, 0xc7, 0xea, 0xba, 0x14, 0x88, 0x3a, 0xe7, 0x4d, 0x8c, + 0x3e, 0x02, 0xc5, 0xb2, 0x9d, 0x30, 0xb4, 0x53, 0xf9, 0x2a, 0x4b, 0xf6, 0x7e, 0x18, 0x0a, 0x3d, + 0x13, 0x2a, 0x59, 0x9b, 0x32, 0xe3, 0x1b, 0x22, 0xe3, 0x37, 0xd2, 0x56, 0x65, 0xf2, 0x3b, 0x97, + 0x7c, 0xe0, 0x53, 0x44, 0xd4, 0x63, 0xb1, 0xa9, 0x9b, 0x72, 0xc4, 0x98, 0xc9, 0x88, 0x31, 0x4f, + 0x92, 0x11, 0xd3, 0x2a, 0xbc, 0x7e, 0x5b, 0x5f, 0x7b, 0xf5, 0x47, 0x5d, 0xcb, 0x78, 0xca, 0xe5, + 0xdc, 0x03, 0xec, 0x44, 0x43, 0xff, 0x52, 0x5c, 0xd7, 0x84, 0xb7, 0x37, 0x12, 0xd1, 0x2c, 0xb2, + 0x47, 0x30, 0x65, 0xce, 0x62, 0xcb, 0xcb, 0x2c, 0x24, 0x82, 0x24, 0xba, 0x26, 0xdc, 0xbc, 0x6c, + 0x5b, 0xc6, 0xb7, 0x29, 0xe2, 0xab, 0x64, 0xad, 0xcb, 0x08, 0x4f, 0xe6, 0xfc, 0x11, 0x31, 0x16, + 0xde, 0x23, 0xc6, 0xac, 0xd7, 0x22, 0xca, 0x3a, 0x14, 0x3d, 0x87, 0x79, 0x03, 0x3f, 0xe8, 0xdb, + 0x71, 0x58, 0xdd, 0x12, 0x1d, 0x0c, 0x09, 0xeb, 0xeb, 0xd0, 0xf8, 0x51, 0x83, 0x72, 0xa6, 0x14, + 0x50, 0x15, 0x36, 0x9d, 0x6e, 0x37, 0xc2, 0x94, 0xaa, 0x4b, 0x4e, 0x3e, 0xd1, 0x13, 0xd8, 0x0c, + 0x63, 0xd7, 0x3e, 0xc5, 0x13, 0x55, 0x9a, 0x77, 0xd2, 0x95, 0x25, 0xd7, 0x03, 0xb3, 0x13, 0xbb, + 0x43, 0xdf, 0x3b, 0xc2, 0x13, 0x2b, 0x1f, 0xc6, 0xee, 0x11, 0x9e, 0xa0, 0x7b, 0x50, 0x3a, 0x23, + 0x8c, 0x7b, 0x10, 0x92, 0x97, 0x38, 0x52, 0x97, 0x5c, 0x94, 0xbc, 0x0e, 0x67, 0x19, 0x36, 0xdc, + 0x3d, 0x74, 0x98, 0x63, 0x11, 0xc2, 0xda, 0x81, 0x37, 0x8c, 0xa9, 0x4f, 0x82, 0x0e, 0xdf, 0x33, + 0xae, 0x30, 0xe1, 0x28, 0xe3, 0xcf, 0x28, 0x77, 0x28, 0x67, 0xc9, 0x0f, 0xf4, 0x3f, 0xd8, 0xc0, + 0x41, 0x57, 0x1c, 0x94, 0xb3, 0x38, 0x69, 0xbc, 0x80, 0xda, 0xb2, 0x03, 0x54, 0xdf, 0x7e, 0x22, + 0x66, 0x25, 0xe9, 0x2d, 0x7a, 0xbb, 0x92, 0xd0, 0xb8, 0xbc, 0x95, 0xe3, 0xf9, 0xb6, 0xa4, 0x72, + 0xf3, 0x17, 0x0d, 0x4a, 0xd3, 0xd5, 0x62, 0xbf, 0xd3, 0x46, 0x47, 0x90, 0xe3, 0xbb, 0x07, 0xda, + 0x5d, 0xd8, 0x74, 0xa9, 0x9d, 0x46, 0xbf, 0xb7, 0x44, 0x63, 0xb6, 0xc0, 0xa0, 0x6f, 0xa1, 0x98, + 0xde, 0x5b, 0xee, 0xaf, 0xb2, 0x99, 0x52, 0xd4, 0x1f, 0xac, 0x34, 0x9d, 0xd2, 0x6c, 0xfe, 0x95, + 0x83, 0xeb, 0xa2, 0x5a, 0xf6, 0x3b, 0xed, 0x63, 0x1c, 0x9d, 0xf9, 0x1e, 0x46, 0x2e, 0x14, 0x53, + 0x73, 0x79, 0xc9, 0xa9, 0xf3, 0x7b, 0xcc, 0x92, 0x53, 0x17, 0x8c, 0xf8, 0x3d, 0x0d, 0x0d, 0xa0, + 0x9c, 0x19, 0x8e, 0xe8, 0xe1, 0x4a, 0x70, 0x7a, 0xdb, 0xd1, 0x1f, 0x5d, 0x45, 0x75, 0x7a, 0xd2, + 0x57, 0x90, 0x97, 0x43, 0x07, 0x2d, 0x7e, 0x07, 0x33, 0xab, 0x90, 0xfe, 0xc1, 0x4a, 0x1d, 0x55, + 0x2a, 0x1e, 0x94, 0xd2, 0x63, 0x07, 0x3d, 0x58, 0xfd, 0xc0, 0xce, 0x56, 0x27, 0xfd, 0xe1, 0x15, + 0x34, 0xd5, 0x21, 0xdf, 0x43, 0x65, 0xc1, 0x94, 0x42, 0x8d, 0xc5, 0xd3, 0x60, 0xe9, 0xde, 0xa5, + 0xef, 0x5d, 0x1d, 0x90, 0xce, 0x99, 0x9c, 0x6a, 0x4b, 0x72, 0x96, 0x59, 0xe4, 0x96, 0xe4, 0x2c, + 0x3b, 0x16, 0x9b, 0x3f, 0x6b, 0xe2, 0xc6, 0x5d, 0xca, 0x22, 0xec, 0x8c, 0x78, 0xa7, 0xfc, 0xa0, + 0xc1, 0xad, 0xc5, 0x3d, 0x89, 0x9a, 0x0b, 0x2d, 0xae, 0x7c, 0x21, 0xf4, 0xc7, 0xef, 0x85, 0x51, + 0x9d, 0xf0, 0xec, 0xf5, 0x79, 0x4d, 0x7b, 0x73, 0x5e, 0xd3, 0xfe, 0x3c, 0xaf, 0x69, 0xaf, 0x2e, + 0x6a, 0x6b, 0x6f, 0x2e, 0x6a, 0x6b, 0xbf, 0x5d, 0xd4, 0xd6, 0xbe, 0x31, 0xfb, 0x3e, 0x1b, 0xc4, + 0xae, 0xe9, 0x91, 0x51, 0xc3, 0x23, 0x23, 0xcc, 0xdc, 0x1e, 0x9b, 0x11, 0xc9, 0xbf, 0xb6, 0xa7, + 0x1e, 0x89, 0x30, 0x27, 0xdc, 0xbc, 0x78, 0x99, 0x1f, 0xff, 0x13, 0x00, 0x00, 0xff, 0xff, 0x27, + 0xa6, 0x79, 0x23, 0xdc, 0x0d, 0x00, 0x00, +} + +// Reference imports to suppress errors if they are not otherwise used. +var _ context.Context +var _ grpc.ClientConn + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +const _ = grpc.SupportPackageIsVersion4 + +// BroadcastAPIClient is the client API for BroadcastAPI service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. +type BroadcastAPIClient interface { + Ping(ctx context.Context, in *RequestPing, opts ...grpc.CallOption) (*ResponsePing, error) + BroadcastTx(ctx context.Context, in *RequestBroadcastTx, opts ...grpc.CallOption) (*ResponseBroadcastTx, error) +} + +type broadcastAPIClient struct { + cc grpc1.ClientConn +} + +func NewBroadcastAPIClient(cc grpc1.ClientConn) BroadcastAPIClient { + return &broadcastAPIClient{cc} +} + +func (c *broadcastAPIClient) Ping(ctx context.Context, in *RequestPing, opts ...grpc.CallOption) (*ResponsePing, error) { + out := new(ResponsePing) + err := c.cc.Invoke(ctx, "/tendermint.rpc.grpc.BroadcastAPI/Ping", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *broadcastAPIClient) BroadcastTx(ctx context.Context, in *RequestBroadcastTx, opts ...grpc.CallOption) (*ResponseBroadcastTx, error) { + out := new(ResponseBroadcastTx) + err := c.cc.Invoke(ctx, "/tendermint.rpc.grpc.BroadcastAPI/BroadcastTx", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +// BroadcastAPIServer is the server API for BroadcastAPI service. +type BroadcastAPIServer interface { + Ping(context.Context, *RequestPing) (*ResponsePing, error) + BroadcastTx(context.Context, *RequestBroadcastTx) (*ResponseBroadcastTx, error) +} + +// UnimplementedBroadcastAPIServer can be embedded to have forward compatible implementations. +type UnimplementedBroadcastAPIServer struct { +} + +func (*UnimplementedBroadcastAPIServer) Ping(ctx context.Context, req *RequestPing) (*ResponsePing, error) { + return nil, status.Errorf(codes.Unimplemented, "method Ping not implemented") +} +func (*UnimplementedBroadcastAPIServer) BroadcastTx(ctx context.Context, req *RequestBroadcastTx) (*ResponseBroadcastTx, error) { + return nil, status.Errorf(codes.Unimplemented, "method BroadcastTx not implemented") +} + +func RegisterBroadcastAPIServer(s grpc1.Server, srv BroadcastAPIServer) { + s.RegisterService(&_BroadcastAPI_serviceDesc, srv) +} + +func _BroadcastAPI_Ping_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(RequestPing) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(BroadcastAPIServer).Ping(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/tendermint.rpc.grpc.BroadcastAPI/Ping", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(BroadcastAPIServer).Ping(ctx, req.(*RequestPing)) + } + return interceptor(ctx, in, info, handler) +} + +func _BroadcastAPI_BroadcastTx_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(RequestBroadcastTx) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(BroadcastAPIServer).BroadcastTx(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/tendermint.rpc.grpc.BroadcastAPI/BroadcastTx", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(BroadcastAPIServer).BroadcastTx(ctx, req.(*RequestBroadcastTx)) + } + return interceptor(ctx, in, info, handler) +} + +var _BroadcastAPI_serviceDesc = grpc.ServiceDesc{ + ServiceName: "tendermint.rpc.grpc.BroadcastAPI", + HandlerType: (*BroadcastAPIServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "Ping", + Handler: _BroadcastAPI_Ping_Handler, + }, + { + MethodName: "BroadcastTx", + Handler: _BroadcastAPI_BroadcastTx_Handler, + }, + }, + Streams: []grpc.StreamDesc{}, + Metadata: "tendermint/rpc/grpc/types.proto", +} + +// BlockAPIServiceClient is the client API for BlockAPIService service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. +type BlockAPIServiceClient interface { + // BlockByHash returns a block by its hash. + BlockByHash(ctx context.Context, in *BlockByHashRequest, opts ...grpc.CallOption) (BlockAPIService_BlockByHashClient, error) + // BlockByHeight returns a block by its height. + BlockByHeight(ctx context.Context, in *BlockByHeightRequest, opts ...grpc.CallOption) (BlockAPIService_BlockByHeightClient, error) + // Commit returns the commit of a block. + Commit(ctx context.Context, in *CommitRequest, opts ...grpc.CallOption) (*CommitResponse, error) + // ValidatorSet returns the validator set of a block. + ValidatorSet(ctx context.Context, in *ValidatorSetRequest, opts ...grpc.CallOption) (*ValidatorSetResponse, error) + // SubscribeNewHeights subscribes to new heights. + SubscribeNewHeights(ctx context.Context, in *SubscribeNewHeightsRequest, opts ...grpc.CallOption) (BlockAPIService_SubscribeNewHeightsClient, error) + // Status returns the status of the node. + Status(ctx context.Context, in *StatusRequest, opts ...grpc.CallOption) (*StatusResponse, error) +} + +type blockAPIServiceClient struct { + cc grpc1.ClientConn +} + +func NewBlockAPIServiceClient(cc grpc1.ClientConn) BlockAPIServiceClient { + return &blockAPIServiceClient{cc} +} + +func (c *blockAPIServiceClient) BlockByHash(ctx context.Context, in *BlockByHashRequest, opts ...grpc.CallOption) (BlockAPIService_BlockByHashClient, error) { + stream, err := c.cc.NewStream(ctx, &_BlockAPIService_serviceDesc.Streams[0], "/tendermint.rpc.grpc.BlockAPIService/BlockByHash", opts...) + if err != nil { + return nil, err + } + x := &blockAPIServiceBlockByHashClient{stream} + if err := x.ClientStream.SendMsg(in); err != nil { + return nil, err + } + if err := x.ClientStream.CloseSend(); err != nil { + return nil, err + } + return x, nil +} + +type BlockAPIService_BlockByHashClient interface { + Recv() (*BlockByHashResponse, error) + grpc.ClientStream +} + +type blockAPIServiceBlockByHashClient struct { + grpc.ClientStream +} + +func (x *blockAPIServiceBlockByHashClient) Recv() (*BlockByHashResponse, error) { + m := new(BlockByHashResponse) + if err := x.ClientStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +func (c *blockAPIServiceClient) BlockByHeight(ctx context.Context, in *BlockByHeightRequest, opts ...grpc.CallOption) (BlockAPIService_BlockByHeightClient, error) { + stream, err := c.cc.NewStream(ctx, &_BlockAPIService_serviceDesc.Streams[1], "/tendermint.rpc.grpc.BlockAPIService/BlockByHeight", opts...) + if err != nil { + return nil, err + } + x := &blockAPIServiceBlockByHeightClient{stream} + if err := x.ClientStream.SendMsg(in); err != nil { + return nil, err + } + if err := x.ClientStream.CloseSend(); err != nil { + return nil, err + } + return x, nil +} + +type BlockAPIService_BlockByHeightClient interface { + Recv() (*BlockByHeightResponse, error) + grpc.ClientStream +} + +type blockAPIServiceBlockByHeightClient struct { + grpc.ClientStream +} + +func (x *blockAPIServiceBlockByHeightClient) Recv() (*BlockByHeightResponse, error) { + m := new(BlockByHeightResponse) + if err := x.ClientStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +func (c *blockAPIServiceClient) Commit(ctx context.Context, in *CommitRequest, opts ...grpc.CallOption) (*CommitResponse, error) { + out := new(CommitResponse) + err := c.cc.Invoke(ctx, "/tendermint.rpc.grpc.BlockAPIService/Commit", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *blockAPIServiceClient) ValidatorSet(ctx context.Context, in *ValidatorSetRequest, opts ...grpc.CallOption) (*ValidatorSetResponse, error) { + out := new(ValidatorSetResponse) + err := c.cc.Invoke(ctx, "/tendermint.rpc.grpc.BlockAPIService/ValidatorSet", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *blockAPIServiceClient) SubscribeNewHeights(ctx context.Context, in *SubscribeNewHeightsRequest, opts ...grpc.CallOption) (BlockAPIService_SubscribeNewHeightsClient, error) { + stream, err := c.cc.NewStream(ctx, &_BlockAPIService_serviceDesc.Streams[2], "/tendermint.rpc.grpc.BlockAPIService/SubscribeNewHeights", opts...) + if err != nil { + return nil, err + } + x := &blockAPIServiceSubscribeNewHeightsClient{stream} + if err := x.ClientStream.SendMsg(in); err != nil { + return nil, err + } + if err := x.ClientStream.CloseSend(); err != nil { + return nil, err + } + return x, nil +} + +type BlockAPIService_SubscribeNewHeightsClient interface { + Recv() (*SubscribeNewHeightsResponse, error) + grpc.ClientStream +} + +type blockAPIServiceSubscribeNewHeightsClient struct { + grpc.ClientStream +} + +func (x *blockAPIServiceSubscribeNewHeightsClient) Recv() (*SubscribeNewHeightsResponse, error) { + m := new(SubscribeNewHeightsResponse) + if err := x.ClientStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +func (c *blockAPIServiceClient) Status(ctx context.Context, in *StatusRequest, opts ...grpc.CallOption) (*StatusResponse, error) { + out := new(StatusResponse) + err := c.cc.Invoke(ctx, "/tendermint.rpc.grpc.BlockAPIService/Status", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +// BlockAPIServiceServer is the server API for BlockAPIService service. +type BlockAPIServiceServer interface { + // BlockByHash returns a block by its hash. + BlockByHash(*BlockByHashRequest, BlockAPIService_BlockByHashServer) error + // BlockByHeight returns a block by its height. + BlockByHeight(*BlockByHeightRequest, BlockAPIService_BlockByHeightServer) error + // Commit returns the commit of a block. + Commit(context.Context, *CommitRequest) (*CommitResponse, error) + // ValidatorSet returns the validator set of a block. + ValidatorSet(context.Context, *ValidatorSetRequest) (*ValidatorSetResponse, error) + // SubscribeNewHeights subscribes to new heights. + SubscribeNewHeights(*SubscribeNewHeightsRequest, BlockAPIService_SubscribeNewHeightsServer) error + // Status returns the status of the node. + Status(context.Context, *StatusRequest) (*StatusResponse, error) +} + +// UnimplementedBlockAPIServiceServer can be embedded to have forward compatible implementations. +type UnimplementedBlockAPIServiceServer struct { +} + +func (*UnimplementedBlockAPIServiceServer) BlockByHash(req *BlockByHashRequest, srv BlockAPIService_BlockByHashServer) error { + return status.Errorf(codes.Unimplemented, "method BlockByHash not implemented") +} +func (*UnimplementedBlockAPIServiceServer) BlockByHeight(req *BlockByHeightRequest, srv BlockAPIService_BlockByHeightServer) error { + return status.Errorf(codes.Unimplemented, "method BlockByHeight not implemented") +} +func (*UnimplementedBlockAPIServiceServer) Commit(ctx context.Context, req *CommitRequest) (*CommitResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method Commit not implemented") +} +func (*UnimplementedBlockAPIServiceServer) ValidatorSet(ctx context.Context, req *ValidatorSetRequest) (*ValidatorSetResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method ValidatorSet not implemented") +} +func (*UnimplementedBlockAPIServiceServer) SubscribeNewHeights(req *SubscribeNewHeightsRequest, srv BlockAPIService_SubscribeNewHeightsServer) error { + return status.Errorf(codes.Unimplemented, "method SubscribeNewHeights not implemented") +} +func (*UnimplementedBlockAPIServiceServer) Status(ctx context.Context, req *StatusRequest) (*StatusResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method Status not implemented") +} + +func RegisterBlockAPIServiceServer(s grpc1.Server, srv BlockAPIServiceServer) { + s.RegisterService(&_BlockAPIService_serviceDesc, srv) +} + +func _BlockAPIService_BlockByHash_Handler(srv interface{}, stream grpc.ServerStream) error { + m := new(BlockByHashRequest) + if err := stream.RecvMsg(m); err != nil { + return err + } + return srv.(BlockAPIServiceServer).BlockByHash(m, &blockAPIServiceBlockByHashServer{stream}) +} + +type BlockAPIService_BlockByHashServer interface { + Send(*BlockByHashResponse) error + grpc.ServerStream +} + +type blockAPIServiceBlockByHashServer struct { + grpc.ServerStream +} + +func (x *blockAPIServiceBlockByHashServer) Send(m *BlockByHashResponse) error { + return x.ServerStream.SendMsg(m) +} + +func _BlockAPIService_BlockByHeight_Handler(srv interface{}, stream grpc.ServerStream) error { + m := new(BlockByHeightRequest) + if err := stream.RecvMsg(m); err != nil { + return err + } + return srv.(BlockAPIServiceServer).BlockByHeight(m, &blockAPIServiceBlockByHeightServer{stream}) +} + +type BlockAPIService_BlockByHeightServer interface { + Send(*BlockByHeightResponse) error + grpc.ServerStream +} + +type blockAPIServiceBlockByHeightServer struct { + grpc.ServerStream +} + +func (x *blockAPIServiceBlockByHeightServer) Send(m *BlockByHeightResponse) error { + return x.ServerStream.SendMsg(m) +} + +func _BlockAPIService_Commit_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(CommitRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(BlockAPIServiceServer).Commit(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/tendermint.rpc.grpc.BlockAPIService/Commit", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(BlockAPIServiceServer).Commit(ctx, req.(*CommitRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _BlockAPIService_ValidatorSet_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ValidatorSetRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(BlockAPIServiceServer).ValidatorSet(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/tendermint.rpc.grpc.BlockAPIService/ValidatorSet", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(BlockAPIServiceServer).ValidatorSet(ctx, req.(*ValidatorSetRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _BlockAPIService_SubscribeNewHeights_Handler(srv interface{}, stream grpc.ServerStream) error { + m := new(SubscribeNewHeightsRequest) + if err := stream.RecvMsg(m); err != nil { + return err + } + return srv.(BlockAPIServiceServer).SubscribeNewHeights(m, &blockAPIServiceSubscribeNewHeightsServer{stream}) +} + +type BlockAPIService_SubscribeNewHeightsServer interface { + Send(*SubscribeNewHeightsResponse) error + grpc.ServerStream +} + +type blockAPIServiceSubscribeNewHeightsServer struct { + grpc.ServerStream +} + +func (x *blockAPIServiceSubscribeNewHeightsServer) Send(m *SubscribeNewHeightsResponse) error { + return x.ServerStream.SendMsg(m) +} + +func _BlockAPIService_Status_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(StatusRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(BlockAPIServiceServer).Status(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/tendermint.rpc.grpc.BlockAPIService/Status", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(BlockAPIServiceServer).Status(ctx, req.(*StatusRequest)) + } + return interceptor(ctx, in, info, handler) +} + +var _BlockAPIService_serviceDesc = grpc.ServiceDesc{ + ServiceName: "tendermint.rpc.grpc.BlockAPIService", + HandlerType: (*BlockAPIServiceServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "Commit", + Handler: _BlockAPIService_Commit_Handler, + }, + { + MethodName: "ValidatorSet", + Handler: _BlockAPIService_ValidatorSet_Handler, + }, + { + MethodName: "Status", + Handler: _BlockAPIService_Status_Handler, + }, + }, + Streams: []grpc.StreamDesc{ + { + StreamName: "BlockByHash", + Handler: _BlockAPIService_BlockByHash_Handler, + ServerStreams: true, + }, + { + StreamName: "BlockByHeight", + Handler: _BlockAPIService_BlockByHeight_Handler, + ServerStreams: true, + }, + { + StreamName: "SubscribeNewHeights", + Handler: _BlockAPIService_SubscribeNewHeights_Handler, + ServerStreams: true, + }, + }, + Metadata: "tendermint/rpc/grpc/types.proto", +} + +// BlobstreamAPIClient is the client API for BlobstreamAPI service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. +type BlobstreamAPIClient interface { + // DataRootInclusionProof creates an inclusion proof for the data root of block + // height `height` in the set of blocks defined by `start` and `end`. The range + // is end exclusive. + DataRootInclusionProof(ctx context.Context, in *DataRootInclusionProofRequest, opts ...grpc.CallOption) (*DataRootInclusionProofResponse, error) +} + +type blobstreamAPIClient struct { + cc grpc1.ClientConn +} + +func NewBlobstreamAPIClient(cc grpc1.ClientConn) BlobstreamAPIClient { + return &blobstreamAPIClient{cc} +} + +func (c *blobstreamAPIClient) DataRootInclusionProof(ctx context.Context, in *DataRootInclusionProofRequest, opts ...grpc.CallOption) (*DataRootInclusionProofResponse, error) { + out := new(DataRootInclusionProofResponse) + err := c.cc.Invoke(ctx, "/tendermint.rpc.grpc.BlobstreamAPI/DataRootInclusionProof", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +// BlobstreamAPIServer is the server API for BlobstreamAPI service. +type BlobstreamAPIServer interface { + // DataRootInclusionProof creates an inclusion proof for the data root of block + // height `height` in the set of blocks defined by `start` and `end`. The range + // is end exclusive. + DataRootInclusionProof(context.Context, *DataRootInclusionProofRequest) (*DataRootInclusionProofResponse, error) +} + +// UnimplementedBlobstreamAPIServer can be embedded to have forward compatible implementations. +type UnimplementedBlobstreamAPIServer struct { +} + +func (*UnimplementedBlobstreamAPIServer) DataRootInclusionProof(ctx context.Context, req *DataRootInclusionProofRequest) (*DataRootInclusionProofResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method DataRootInclusionProof not implemented") +} + +func RegisterBlobstreamAPIServer(s grpc1.Server, srv BlobstreamAPIServer) { + s.RegisterService(&_BlobstreamAPI_serviceDesc, srv) +} + +func _BlobstreamAPI_DataRootInclusionProof_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(DataRootInclusionProofRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(BlobstreamAPIServer).DataRootInclusionProof(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/tendermint.rpc.grpc.BlobstreamAPI/DataRootInclusionProof", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(BlobstreamAPIServer).DataRootInclusionProof(ctx, req.(*DataRootInclusionProofRequest)) + } + return interceptor(ctx, in, info, handler) +} + +var _BlobstreamAPI_serviceDesc = grpc.ServiceDesc{ + ServiceName: "tendermint.rpc.grpc.BlobstreamAPI", + HandlerType: (*BlobstreamAPIServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "DataRootInclusionProof", + Handler: _BlobstreamAPI_DataRootInclusionProof_Handler, + }, + }, + Streams: []grpc.StreamDesc{}, + Metadata: "tendermint/rpc/grpc/types.proto", +} + +func (m *RequestPing) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *RequestPing) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *RequestPing) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + return len(dAtA) - i, nil +} + +func (m *RequestBroadcastTx) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *RequestBroadcastTx) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *RequestBroadcastTx) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Tx) > 0 { + i -= len(m.Tx) + copy(dAtA[i:], m.Tx) + i = encodeVarintTypes(dAtA, i, uint64(len(m.Tx))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *ResponsePing) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ResponsePing) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ResponsePing) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + return len(dAtA) - i, nil +} + +func (m *ResponseBroadcastTx) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ResponseBroadcastTx) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ResponseBroadcastTx) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.TxResult != nil { + { + size, err := m.TxResult.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTypes(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + if m.CheckTx != nil { + { + size, err := m.CheckTx.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTypes(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *BlockByHashRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *BlockByHashRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *BlockByHashRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Prove { + i-- + if m.Prove { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x10 + } + if len(m.Hash) > 0 { + i -= len(m.Hash) + copy(dAtA[i:], m.Hash) + i = encodeVarintTypes(dAtA, i, uint64(len(m.Hash))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *BlockByHeightRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *BlockByHeightRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *BlockByHeightRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Prove { + i-- + if m.Prove { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x10 + } + if m.Height != 0 { + i = encodeVarintTypes(dAtA, i, uint64(m.Height)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *CommitRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *CommitRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *CommitRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Height != 0 { + i = encodeVarintTypes(dAtA, i, uint64(m.Height)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *ValidatorSetRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ValidatorSetRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ValidatorSetRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Height != 0 { + i = encodeVarintTypes(dAtA, i, uint64(m.Height)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *SubscribeNewHeightsRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *SubscribeNewHeightsRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *SubscribeNewHeightsRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + return len(dAtA) - i, nil +} + +func (m *StatusRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *StatusRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *StatusRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + return len(dAtA) - i, nil +} + +func (m *BlockByHashResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *BlockByHashResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *BlockByHashResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.IsLast { + i-- + if m.IsLast { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x20 + } + if m.ValidatorSet != nil { + { + size, err := m.ValidatorSet.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTypes(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + if m.Commit != nil { + { + size, err := m.Commit.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTypes(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + if m.BlockPart != nil { + { + size, err := m.BlockPart.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTypes(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *BlockByHeightResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *BlockByHeightResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *BlockByHeightResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.IsLast { + i-- + if m.IsLast { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x20 + } + if m.ValidatorSet != nil { + { + size, err := m.ValidatorSet.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTypes(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + if m.Commit != nil { + { + size, err := m.Commit.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTypes(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + if m.BlockPart != nil { + { + size, err := m.BlockPart.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTypes(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *CommitResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *CommitResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *CommitResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Commit != nil { + { + size, err := m.Commit.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTypes(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *ValidatorSetResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ValidatorSetResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ValidatorSetResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Height != 0 { + i = encodeVarintTypes(dAtA, i, uint64(m.Height)) + i-- + dAtA[i] = 0x10 + } + if m.ValidatorSet != nil { + { + size, err := m.ValidatorSet.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTypes(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *SubscribeNewHeightsResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *SubscribeNewHeightsResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *SubscribeNewHeightsResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Hash) > 0 { + i -= len(m.Hash) + copy(dAtA[i:], m.Hash) + i = encodeVarintTypes(dAtA, i, uint64(len(m.Hash))) + i-- + dAtA[i] = 0x12 + } + if m.Height != 0 { + i = encodeVarintTypes(dAtA, i, uint64(m.Height)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *StatusResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *StatusResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *StatusResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.ValidatorInfo != nil { + { + size, err := m.ValidatorInfo.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTypes(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + if m.SyncInfo != nil { + { + size, err := m.SyncInfo.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTypes(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + if m.NodeInfo != nil { + { + size, err := m.NodeInfo.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTypes(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *SyncInfo) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *SyncInfo) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *SyncInfo) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.CatchingUp { + i-- + if m.CatchingUp { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x48 + } + n14, err14 := github_com_cosmos_gogoproto_types.StdTimeMarshalTo(m.EarliestBlockTime, dAtA[i-github_com_cosmos_gogoproto_types.SizeOfStdTime(m.EarliestBlockTime):]) + if err14 != nil { + return 0, err14 + } + i -= n14 + i = encodeVarintTypes(dAtA, i, uint64(n14)) + i-- + dAtA[i] = 0x42 + if m.EarliestBlockHeight != 0 { + i = encodeVarintTypes(dAtA, i, uint64(m.EarliestBlockHeight)) + i-- + dAtA[i] = 0x38 + } + if len(m.EarliestAppHash) > 0 { + i -= len(m.EarliestAppHash) + copy(dAtA[i:], m.EarliestAppHash) + i = encodeVarintTypes(dAtA, i, uint64(len(m.EarliestAppHash))) + i-- + dAtA[i] = 0x32 + } + if len(m.EarliestBlockHash) > 0 { + i -= len(m.EarliestBlockHash) + copy(dAtA[i:], m.EarliestBlockHash) + i = encodeVarintTypes(dAtA, i, uint64(len(m.EarliestBlockHash))) + i-- + dAtA[i] = 0x2a + } + n15, err15 := github_com_cosmos_gogoproto_types.StdTimeMarshalTo(m.LatestBlockTime, dAtA[i-github_com_cosmos_gogoproto_types.SizeOfStdTime(m.LatestBlockTime):]) + if err15 != nil { + return 0, err15 + } + i -= n15 + i = encodeVarintTypes(dAtA, i, uint64(n15)) + i-- + dAtA[i] = 0x22 + if m.LatestBlockHeight != 0 { + i = encodeVarintTypes(dAtA, i, uint64(m.LatestBlockHeight)) + i-- + dAtA[i] = 0x18 + } + if len(m.LatestAppHash) > 0 { + i -= len(m.LatestAppHash) + copy(dAtA[i:], m.LatestAppHash) + i = encodeVarintTypes(dAtA, i, uint64(len(m.LatestAppHash))) + i-- + dAtA[i] = 0x12 + } + if len(m.LatestBlockHash) > 0 { + i -= len(m.LatestBlockHash) + copy(dAtA[i:], m.LatestBlockHash) + i = encodeVarintTypes(dAtA, i, uint64(len(m.LatestBlockHash))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *ValidatorInfo) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ValidatorInfo) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ValidatorInfo) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.VotingPower != 0 { + i = encodeVarintTypes(dAtA, i, uint64(m.VotingPower)) + i-- + dAtA[i] = 0x18 + } + if m.PubKey != nil { + { + size, err := m.PubKey.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTypes(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + if len(m.Address) > 0 { + i -= len(m.Address) + copy(dAtA[i:], m.Address) + i = encodeVarintTypes(dAtA, i, uint64(len(m.Address))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *DataRootInclusionProofRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *DataRootInclusionProofRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *DataRootInclusionProofRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.End != 0 { + i = encodeVarintTypes(dAtA, i, uint64(m.End)) + i-- + dAtA[i] = 0x18 + } + if m.Start != 0 { + i = encodeVarintTypes(dAtA, i, uint64(m.Start)) + i-- + dAtA[i] = 0x10 + } + if m.Height != 0 { + i = encodeVarintTypes(dAtA, i, uint64(m.Height)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *DataRootInclusionProofResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *DataRootInclusionProofResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *DataRootInclusionProofResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size, err := m.Proof.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTypes(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func encodeVarintTypes(dAtA []byte, offset int, v uint64) int { + offset -= sovTypes(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func (m *RequestPing) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + return n +} + +func (m *RequestBroadcastTx) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Tx) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) + } + return n +} + +func (m *ResponsePing) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + return n +} + +func (m *ResponseBroadcastTx) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.CheckTx != nil { + l = m.CheckTx.Size() + n += 1 + l + sovTypes(uint64(l)) + } + if m.TxResult != nil { + l = m.TxResult.Size() + n += 1 + l + sovTypes(uint64(l)) + } + return n +} + +func (m *BlockByHashRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Hash) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) + } + if m.Prove { + n += 2 + } + return n +} + +func (m *BlockByHeightRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Height != 0 { + n += 1 + sovTypes(uint64(m.Height)) + } + if m.Prove { + n += 2 + } + return n +} + +func (m *CommitRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Height != 0 { + n += 1 + sovTypes(uint64(m.Height)) + } + return n +} + +func (m *ValidatorSetRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Height != 0 { + n += 1 + sovTypes(uint64(m.Height)) + } + return n +} + +func (m *SubscribeNewHeightsRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + return n +} + +func (m *StatusRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + return n +} + +func (m *BlockByHashResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.BlockPart != nil { + l = m.BlockPart.Size() + n += 1 + l + sovTypes(uint64(l)) + } + if m.Commit != nil { + l = m.Commit.Size() + n += 1 + l + sovTypes(uint64(l)) + } + if m.ValidatorSet != nil { + l = m.ValidatorSet.Size() + n += 1 + l + sovTypes(uint64(l)) + } + if m.IsLast { + n += 2 + } + return n +} + +func (m *BlockByHeightResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.BlockPart != nil { + l = m.BlockPart.Size() + n += 1 + l + sovTypes(uint64(l)) + } + if m.Commit != nil { + l = m.Commit.Size() + n += 1 + l + sovTypes(uint64(l)) + } + if m.ValidatorSet != nil { + l = m.ValidatorSet.Size() + n += 1 + l + sovTypes(uint64(l)) + } + if m.IsLast { + n += 2 + } + return n +} + +func (m *CommitResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Commit != nil { + l = m.Commit.Size() + n += 1 + l + sovTypes(uint64(l)) + } + return n +} + +func (m *ValidatorSetResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.ValidatorSet != nil { + l = m.ValidatorSet.Size() + n += 1 + l + sovTypes(uint64(l)) + } + if m.Height != 0 { + n += 1 + sovTypes(uint64(m.Height)) + } + return n +} + +func (m *SubscribeNewHeightsResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Height != 0 { + n += 1 + sovTypes(uint64(m.Height)) + } + l = len(m.Hash) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) + } + return n +} + +func (m *StatusResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.NodeInfo != nil { + l = m.NodeInfo.Size() + n += 1 + l + sovTypes(uint64(l)) + } + if m.SyncInfo != nil { + l = m.SyncInfo.Size() + n += 1 + l + sovTypes(uint64(l)) + } + if m.ValidatorInfo != nil { + l = m.ValidatorInfo.Size() + n += 1 + l + sovTypes(uint64(l)) + } + return n +} + +func (m *SyncInfo) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.LatestBlockHash) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) + } + l = len(m.LatestAppHash) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) + } + if m.LatestBlockHeight != 0 { + n += 1 + sovTypes(uint64(m.LatestBlockHeight)) + } + l = github_com_cosmos_gogoproto_types.SizeOfStdTime(m.LatestBlockTime) + n += 1 + l + sovTypes(uint64(l)) + l = len(m.EarliestBlockHash) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) + } + l = len(m.EarliestAppHash) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) + } + if m.EarliestBlockHeight != 0 { + n += 1 + sovTypes(uint64(m.EarliestBlockHeight)) + } + l = github_com_cosmos_gogoproto_types.SizeOfStdTime(m.EarliestBlockTime) + n += 1 + l + sovTypes(uint64(l)) + if m.CatchingUp { + n += 2 + } + return n +} + +func (m *ValidatorInfo) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Address) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) + } + if m.PubKey != nil { + l = m.PubKey.Size() + n += 1 + l + sovTypes(uint64(l)) + } + if m.VotingPower != 0 { + n += 1 + sovTypes(uint64(m.VotingPower)) + } + return n +} + +func (m *DataRootInclusionProofRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Height != 0 { + n += 1 + sovTypes(uint64(m.Height)) + } + if m.Start != 0 { + n += 1 + sovTypes(uint64(m.Start)) + } + if m.End != 0 { + n += 1 + sovTypes(uint64(m.End)) + } + return n +} + +func (m *DataRootInclusionProofResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.Proof.Size() + n += 1 + l + sovTypes(uint64(l)) + return n +} + +func sovTypes(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozTypes(x uint64) (n int) { + return sovTypes(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (m *RequestPing) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: RequestPing: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: RequestPing: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + default: + iNdEx = preIndex + skippy, err := skipTypes(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *RequestBroadcastTx) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: RequestBroadcastTx: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: RequestBroadcastTx: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Tx", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Tx = append(m.Tx[:0], dAtA[iNdEx:postIndex]...) + if m.Tx == nil { + m.Tx = []byte{} + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipTypes(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ResponsePing) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ResponsePing: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ResponsePing: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + default: + iNdEx = preIndex + skippy, err := skipTypes(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ResponseBroadcastTx) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ResponseBroadcastTx: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ResponseBroadcastTx: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field CheckTx", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.CheckTx == nil { + m.CheckTx = &types.ResponseCheckTx{} + } + if err := m.CheckTx.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field TxResult", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.TxResult == nil { + m.TxResult = &types.ExecTxResult{} + } + if err := m.TxResult.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipTypes(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *BlockByHashRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: BlockByHashRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: BlockByHashRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Hash", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Hash = append(m.Hash[:0], dAtA[iNdEx:postIndex]...) + if m.Hash == nil { + m.Hash = []byte{} + } + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Prove", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.Prove = bool(v != 0) + default: + iNdEx = preIndex + skippy, err := skipTypes(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *BlockByHeightRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: BlockByHeightRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: BlockByHeightRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Height", wireType) + } + m.Height = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Height |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Prove", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.Prove = bool(v != 0) + default: + iNdEx = preIndex + skippy, err := skipTypes(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *CommitRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: CommitRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: CommitRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Height", wireType) + } + m.Height = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Height |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipTypes(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ValidatorSetRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ValidatorSetRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ValidatorSetRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Height", wireType) + } + m.Height = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Height |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipTypes(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *SubscribeNewHeightsRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: SubscribeNewHeightsRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: SubscribeNewHeightsRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + default: + iNdEx = preIndex + skippy, err := skipTypes(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *StatusRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: StatusRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: StatusRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + default: + iNdEx = preIndex + skippy, err := skipTypes(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *BlockByHashResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: BlockByHashResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: BlockByHashResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field BlockPart", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.BlockPart == nil { + m.BlockPart = &types1.Part{} + } + if err := m.BlockPart.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Commit", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Commit == nil { + m.Commit = &types1.Commit{} + } + if err := m.Commit.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ValidatorSet", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.ValidatorSet == nil { + m.ValidatorSet = &types1.ValidatorSet{} + } + if err := m.ValidatorSet.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field IsLast", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.IsLast = bool(v != 0) + default: + iNdEx = preIndex + skippy, err := skipTypes(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *BlockByHeightResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: BlockByHeightResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: BlockByHeightResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field BlockPart", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.BlockPart == nil { + m.BlockPart = &types1.Part{} + } + if err := m.BlockPart.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Commit", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Commit == nil { + m.Commit = &types1.Commit{} + } + if err := m.Commit.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ValidatorSet", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.ValidatorSet == nil { + m.ValidatorSet = &types1.ValidatorSet{} + } + if err := m.ValidatorSet.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field IsLast", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.IsLast = bool(v != 0) + default: + iNdEx = preIndex + skippy, err := skipTypes(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *CommitResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: CommitResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: CommitResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Commit", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Commit == nil { + m.Commit = &types1.Commit{} + } + if err := m.Commit.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipTypes(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ValidatorSetResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ValidatorSetResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ValidatorSetResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ValidatorSet", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.ValidatorSet == nil { + m.ValidatorSet = &types1.ValidatorSet{} + } + if err := m.ValidatorSet.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Height", wireType) + } + m.Height = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Height |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipTypes(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *SubscribeNewHeightsResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: SubscribeNewHeightsResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: SubscribeNewHeightsResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Height", wireType) + } + m.Height = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Height |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Hash", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Hash = append(m.Hash[:0], dAtA[iNdEx:postIndex]...) + if m.Hash == nil { + m.Hash = []byte{} + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipTypes(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *StatusResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: StatusResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: StatusResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field NodeInfo", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.NodeInfo == nil { + m.NodeInfo = &p2p.DefaultNodeInfo{} + } + if err := m.NodeInfo.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SyncInfo", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.SyncInfo == nil { + m.SyncInfo = &SyncInfo{} + } + if err := m.SyncInfo.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ValidatorInfo", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.ValidatorInfo == nil { + m.ValidatorInfo = &ValidatorInfo{} + } + if err := m.ValidatorInfo.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipTypes(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *SyncInfo) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: SyncInfo: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: SyncInfo: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field LatestBlockHash", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.LatestBlockHash = append(m.LatestBlockHash[:0], dAtA[iNdEx:postIndex]...) + if m.LatestBlockHash == nil { + m.LatestBlockHash = []byte{} + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field LatestAppHash", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.LatestAppHash = append(m.LatestAppHash[:0], dAtA[iNdEx:postIndex]...) + if m.LatestAppHash == nil { + m.LatestAppHash = []byte{} + } + iNdEx = postIndex + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field LatestBlockHeight", wireType) + } + m.LatestBlockHeight = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.LatestBlockHeight |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field LatestBlockTime", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := github_com_cosmos_gogoproto_types.StdTimeUnmarshal(&m.LatestBlockTime, dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field EarliestBlockHash", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthTypes } - if iNdEx >= l { + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { return io.ErrUnexpectedEOF } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break + m.EarliestBlockHash = append(m.EarliestBlockHash[:0], dAtA[iNdEx:postIndex]...) + if m.EarliestBlockHash == nil { + m.EarliestBlockHash = []byte{} } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: RequestPing: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: RequestPing: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field EarliestAppHash", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.EarliestAppHash = append(m.EarliestAppHash[:0], dAtA[iNdEx:postIndex]...) + if m.EarliestAppHash == nil { + m.EarliestAppHash = []byte{} + } + iNdEx = postIndex + case 7: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field EarliestBlockHeight", wireType) + } + m.EarliestBlockHeight = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.EarliestBlockHeight |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 8: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field EarliestBlockTime", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := github_com_cosmos_gogoproto_types.StdTimeUnmarshal(&m.EarliestBlockTime, dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 9: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field CatchingUp", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.CatchingUp = bool(v != 0) default: iNdEx = preIndex skippy, err := skipTypes(dAtA[iNdEx:]) @@ -585,7 +4776,7 @@ func (m *RequestPing) Unmarshal(dAtA []byte) error { } return nil } -func (m *RequestBroadcastTx) Unmarshal(dAtA []byte) error { +func (m *ValidatorInfo) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -608,15 +4799,15 @@ func (m *RequestBroadcastTx) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: RequestBroadcastTx: wiretype end group for non-group") + return fmt.Errorf("proto: ValidatorInfo: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: RequestBroadcastTx: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: ValidatorInfo: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Tx", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Address", wireType) } var byteLen int for shift := uint(0); ; shift += 7 { @@ -643,11 +4834,66 @@ func (m *RequestBroadcastTx) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Tx = append(m.Tx[:0], dAtA[iNdEx:postIndex]...) - if m.Tx == nil { - m.Tx = []byte{} + m.Address = append(m.Address[:0], dAtA[iNdEx:postIndex]...) + if m.Address == nil { + m.Address = []byte{} + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PubKey", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.PubKey == nil { + m.PubKey = &crypto.PublicKey{} + } + if err := m.PubKey.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err } iNdEx = postIndex + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field VotingPower", wireType) + } + m.VotingPower = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.VotingPower |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } default: iNdEx = preIndex skippy, err := skipTypes(dAtA[iNdEx:]) @@ -669,7 +4915,7 @@ func (m *RequestBroadcastTx) Unmarshal(dAtA []byte) error { } return nil } -func (m *ResponsePing) Unmarshal(dAtA []byte) error { +func (m *DataRootInclusionProofRequest) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -692,12 +4938,69 @@ func (m *ResponsePing) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: ResponsePing: wiretype end group for non-group") + return fmt.Errorf("proto: DataRootInclusionProofRequest: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: ResponsePing: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: DataRootInclusionProofRequest: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Height", wireType) + } + m.Height = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Height |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Start", wireType) + } + m.Start = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Start |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field End", wireType) + } + m.End = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.End |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } default: iNdEx = preIndex skippy, err := skipTypes(dAtA[iNdEx:]) @@ -719,7 +5022,7 @@ func (m *ResponsePing) Unmarshal(dAtA []byte) error { } return nil } -func (m *ResponseBroadcastTx) Unmarshal(dAtA []byte) error { +func (m *DataRootInclusionProofResponse) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -742,51 +5045,15 @@ func (m *ResponseBroadcastTx) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: ResponseBroadcastTx: wiretype end group for non-group") + return fmt.Errorf("proto: DataRootInclusionProofResponse: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: ResponseBroadcastTx: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: DataRootInclusionProofResponse: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field CheckTx", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTypes - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthTypes - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthTypes - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.CheckTx == nil { - m.CheckTx = &types.ResponseCheckTx{} - } - if err := m.CheckTx.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field TxResult", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Proof", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -813,10 +5080,7 @@ func (m *ResponseBroadcastTx) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if m.TxResult == nil { - m.TxResult = &types.ExecTxResult{} - } - if err := m.TxResult.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if err := m.Proof.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex diff --git a/rpc/test/helpers.go b/rpc/test/helpers.go index 1e67f7b4993..eeb005ba59e 100644 --- a/rpc/test/helpers.go +++ b/rpc/test/helpers.go @@ -29,6 +29,9 @@ type Options struct { suppressStdout bool recreateConfig bool maxReqBatchSize int + + // SpecificConfig will replace the global config if not nil + SpecificConfig *cfg.Config } var ( @@ -157,6 +160,9 @@ func StopTendermint(node *nm.Node) { func NewTendermint(app abci.Application, opts *Options) *nm.Node { // Create & start node config := GetConfig(opts.recreateConfig) + if opts.SpecificConfig != nil { + config = opts.SpecificConfig + } var logger log.Logger if opts.suppressStdout { logger = log.NewNopLogger() @@ -202,3 +208,21 @@ func RecreateConfig(o *Options) { func MaxReqBatchSize(o *Options) { o.maxReqBatchSize = 2 } + +func GetBlockAPIClient() (core_grpc.BlockAPIServiceClient, error) { + grpcAddr := globalConfig.RPC.GRPCListenAddress + client, err := core_grpc.StartBlockAPIGRPCClient(grpcAddr) + if err != nil { + return nil, err + } + return client, nil +} + +func GetBlobstreamAPIClient() (core_grpc.BlobstreamAPIClient, error) { + grpcAddr := globalConfig.RPC.GRPCListenAddress + client, err := core_grpc.StartBlobstreamAPIGRPCClient(grpcAddr) + if err != nil { + return nil, err + } + return client, nil +} diff --git a/state/execution.go b/state/execution.go index 4accf1639d6..a812f5b028b 100644 --- a/state/execution.go +++ b/state/execution.go @@ -125,7 +125,7 @@ func (blockExec *BlockExecutor) CreateProposalBlock( txs := blockExec.mempool.ReapMaxBytesMaxGas(maxReapBytes, maxGas) commit := lastExtCommit.ToCommit() - block := state.MakeBlock(height, txs, commit, evidence, proposerAddr) + block := state.MakeBlock(height, types.MakeData(txs), commit, evidence, proposerAddr) rpp, err := blockExec.proxyApp.PrepareProposal( ctx, &abci.RequestPrepareProposal{ @@ -151,12 +151,21 @@ func (blockExec *BlockExecutor) CreateProposalBlock( return nil, err } + rawNewData := rpp.GetTxs() + + rejectedTxs := len(rawNewData) - len(txs) + if rejectedTxs > 0 { + blockExec.metrics.RejectedTransactions.Add(float64(rejectedTxs)) + } + txl := types.ToTxs(rpp.Txs) if err := txl.Validate(maxDataBytes); err != nil { return nil, err } - return state.MakeBlock(height, txl, commit, evidence, proposerAddr), nil + data := types.NewData(txl, rpp.SquareSize, rpp.DataRootHash) + + return state.MakeBlock(height, data, commit, evidence, proposerAddr), nil } func (blockExec *BlockExecutor) ProcessProposal( @@ -168,6 +177,8 @@ func (blockExec *BlockExecutor) ProcessProposal( Height: block.Header.Height, Time: block.Header.Time, Txs: block.Data.Txs.ToSliceOfBytes(), + SquareSize: block.Data.SquareSize, + DataRootHash: block.Data.GetDataRootHash(), ProposedLastCommit: buildLastCommitInfoFromStore(block, blockExec.store, state.InitialHeight), Misbehavior: block.Evidence.Evidence.ToABCI(), ProposerAddress: block.ProposerAddress, @@ -197,9 +208,9 @@ func (blockExec *BlockExecutor) ValidateBlock(state State, block *types.Block) e // ApplyVerifiedBlock does the same as `ApplyBlock`, but skips verification. func (blockExec *BlockExecutor) ApplyVerifiedBlock( - state State, blockID types.BlockID, block *types.Block, + state State, blockID types.BlockID, block *types.Block, lastCommit *types.Commit, ) (State, error) { - return blockExec.applyBlock(state, blockID, block) + return blockExec.applyBlock(state, blockID, block, lastCommit) } // ApplyBlock validates the block against the state, executes it against the app, @@ -209,18 +220,29 @@ func (blockExec *BlockExecutor) ApplyVerifiedBlock( // from outside this package to process and commit an entire block. // It takes a blockID to avoid recomputing the parts hash. func (blockExec *BlockExecutor) ApplyBlock( - state State, blockID types.BlockID, block *types.Block, + state State, blockID types.BlockID, block *types.Block, lastCommit *types.Commit, ) (State, error) { if err := validateBlock(state, block); err != nil { return state, ErrInvalidBlock(err) } - return blockExec.applyBlock(state, blockID, block) + return blockExec.applyBlock(state, blockID, block, lastCommit) } -func (blockExec *BlockExecutor) applyBlock(state State, blockID types.BlockID, block *types.Block) (State, error) { +func (blockExec *BlockExecutor) applyBlock(state State, blockID types.BlockID, block *types.Block, lastCommit *types.Commit) (State, error) { startTime := time.Now().UnixNano() + + // Unmarshal blob txs + txs := make([][]byte, len(block.Txs)) + for i, tx := range block.Txs { + blobTx, isBlobTx := types.UnmarshalBlobTx(tx) + if isBlobTx { + tx = blobTx.Tx + } + txs[i] = tx + } + abciResponse, err := blockExec.proxyApp.FinalizeBlock(context.TODO(), &abci.RequestFinalizeBlock{ Hash: block.Hash(), NextValidatorsHash: block.NextValidatorsHash, @@ -229,7 +251,7 @@ func (blockExec *BlockExecutor) applyBlock(state State, blockID types.BlockID, b Time: block.Time, DecidedLastCommit: buildLastCommitInfoFromStore(block, blockExec.store, state.InitialHeight), Misbehavior: block.Evidence.Evidence.ToABCI(), - Txs: block.Txs.ToSliceOfBytes(), + Txs: txs, }) endTime := time.Now().UnixNano() blockExec.metrics.BlockProcessingTime.Observe(float64(endTime-startTime) / 1000000) @@ -252,6 +274,16 @@ func (blockExec *BlockExecutor) applyBlock(state State, blockID types.BlockID, b } blockExec.logger.Info("executed block", "height", block.Height, "app_hash", fmt.Sprintf("%X", abciResponse.AppHash)) + // Save indexing info of the transaction. + // This needs to be done prior to saving state + // for correct crash recovery + if blockExec.blockStore != nil { + respCodes := getResponseCodes(abciResponse.TxResults) + logs := getLogs(abciResponse.TxResults) + if err := blockExec.blockStore.SaveTxInfo(block, respCodes, logs); err != nil { + return state, err + } + } fail.Fail() // XXX @@ -317,7 +349,7 @@ func (blockExec *BlockExecutor) applyBlock(state State, blockID types.BlockID, b // Events are fired after everything else. // NOTE: if we crash between Commit and Save, events wont be fired during replay - fireEvents(blockExec.logger, blockExec.eventBus, block, blockID, abciResponse, validatorUpdates) + fireEvents(blockExec.logger, blockExec.eventBus, block, blockID, abciResponse, validatorUpdates, state.Validators, lastCommit) return state, nil } @@ -670,6 +702,8 @@ func fireEvents( blockID types.BlockID, abciResponse *abci.ResponseFinalizeBlock, validatorUpdates []*types.Validator, + currentValidators *types.ValidatorSet, + lastCommit *types.Commit, ) { if err := eventBus.PublishEventNewBlock(types.EventDataNewBlock{ Block: block, @@ -679,6 +713,18 @@ func fireEvents( logger.Error("failed publishing new block", "err", err) } + if lastCommit != nil { + err := eventBus.PublishEventSignedBlock(types.EventDataSignedBlock{ + Header: block.Header, + Commit: *lastCommit, + ValidatorSet: *currentValidators, + Data: block.Data, + }) + if err != nil { + logger.Error("failed publishing new signed block", "err", err) + } + } + if err := eventBus.PublishEventNewBlockHeader(types.EventDataNewBlockHeader{ Header: block.Header, }); err != nil { @@ -705,6 +751,11 @@ func fireEvents( } for i, tx := range block.Data.Txs { + //TODO: can we decode without decoding the whole blob? + blobTx, isBlobTx := types.UnmarshalBlobTx(tx) + if isBlobTx { + tx = blobTx.Tx + } if err := eventBus.PublishEventTx(types.EventDataTx{TxResult: abci.TxResult{ Height: block.Height, Index: uint32(i), @@ -787,3 +838,21 @@ func (blockExec *BlockExecutor) pruneBlocks(retainHeight int64, state State) (ui } return amountPruned, nil } + +// getResponseCodes gets response codes from a list of ResponseDeliverTx. +func getResponseCodes(responses []*abci.ExecTxResult) []uint32 { + responseCodes := make([]uint32, len(responses)) + for i, response := range responses { + responseCodes[i] = response.Code + } + return responseCodes +} + +// getLogs gets logs from a list of ResponseDeliverTx. +func getLogs(responses []*abci.ExecTxResult) []string { + logs := make([]string, len(responses)) + for i, response := range responses { + logs[i] = response.Log + } + return logs +} diff --git a/state/execution_test.go b/state/execution_test.go index 797bd8a5778..092503f15fb 100644 --- a/state/execution_test.go +++ b/state/execution_test.go @@ -72,7 +72,7 @@ func TestApplyBlock(t *testing.T) { require.NoError(t, err) blockID := types.BlockID{Hash: block.Hash(), PartSetHeader: bps.Header()} - state, err = blockExec.ApplyBlock(state, blockID, block) + state, err = blockExec.ApplyBlock(state, blockID, block, nil) require.Nil(t, err) // TODO check state and mempool @@ -145,7 +145,7 @@ func TestFinalizeBlockDecidedLastCommit(t *testing.T) { bps, err := block.MakePartSet(testPartSize) require.NoError(t, err) blockID := types.BlockID{Hash: block.Hash(), PartSetHeader: bps.Header()} - _, err = blockExec.ApplyBlock(state, blockID, block) + _, err = blockExec.ApplyBlock(state, blockID, block, nil) require.NoError(t, err) require.True(t, app.LastTime.After(baseTime)) @@ -354,7 +354,7 @@ func TestFinalizeBlockMisbehavior(t *testing.T) { blockID = types.BlockID{Hash: block.Hash(), PartSetHeader: bps.Header()} - _, err = blockExec.ApplyBlock(state, blockID, block) + _, err = blockExec.ApplyBlock(state, blockID, block, nil) require.NoError(t, err) // TODO check state and mempool @@ -636,7 +636,7 @@ func TestFinalizeBlockValidatorUpdates(t *testing.T) { {PubKey: pk, Power: 10}, } - state, err = blockExec.ApplyBlock(state, blockID, block) + state, err = blockExec.ApplyBlock(state, blockID, block, nil) require.NoError(t, err) // test new validator was added to NextValidators if assert.Equal(t, state.Validators.Size()+1, state.NextValidators.Size()) { @@ -698,7 +698,7 @@ func TestFinalizeBlockValidatorUpdatesResultingInEmptySet(t *testing.T) { {PubKey: vp, Power: 0}, } - assert.NotPanics(t, func() { state, err = blockExec.ApplyBlock(state, blockID, block) }) + assert.NotPanics(t, func() { state, err = blockExec.ApplyBlock(state, blockID, block, nil) }) assert.Error(t, err) assert.NotEmpty(t, state.NextValidators.Validators) } diff --git a/state/helpers_test.go b/state/helpers_test.go index f094b79ab85..136b8b8ef5a 100644 --- a/state/helpers_test.go +++ b/state/helpers_test.go @@ -56,7 +56,7 @@ func makeAndCommitGoodBlock( func makeAndApplyGoodBlock(state sm.State, height int64, lastCommit *types.Commit, proposerAddr []byte, blockExec *sm.BlockExecutor, evidence []types.Evidence, ) (sm.State, types.BlockID, error) { - block := state.MakeBlock(height, test.MakeNTxs(height, 10), lastCommit, evidence, proposerAddr) + block := state.MakeBlock(height, types.MakeData(test.MakeNTxs(height, 10)), lastCommit, evidence, proposerAddr) partSet, err := block.MakePartSet(types.BlockPartSizeBytes) if err != nil { return state, types.BlockID{}, err @@ -69,7 +69,7 @@ func makeAndApplyGoodBlock(state sm.State, height int64, lastCommit *types.Commi Hash: block.Hash(), PartSetHeader: partSet.Header(), } - state, err = blockExec.ApplyBlock(state, blockID, block) + state, err = blockExec.ApplyBlock(state, blockID, block, nil) if err != nil { return state, types.BlockID{}, err } @@ -79,7 +79,7 @@ func makeAndApplyGoodBlock(state sm.State, height int64, lastCommit *types.Commi func makeBlock(state sm.State, height int64, c *types.Commit) *types.Block { return state.MakeBlock( height, - test.MakeNTxs(state.LastBlockHeight, 10), + types.MakeData(test.MakeNTxs(state.LastBlockHeight, 10)), c, nil, state.Validators.GetProposer().Address, diff --git a/state/metrics.gen.go b/state/metrics.gen.go index 554beefb691..0cccda7af4e 100644 --- a/state/metrics.gen.go +++ b/state/metrics.gen.go @@ -34,6 +34,18 @@ func PrometheusMetrics(namespace string, labelsAndValues ...string) *Metrics { Name: "validator_set_updates", Help: "Number of validator set updates returned by the application since process start.", }, labels).With(labelsAndValues...), + RejectedTransactions: prometheus.NewCounterFrom(stdprometheus.CounterOpts{ + Namespace: namespace, + Subsystem: MetricsSubsystem, + Name: "rejected_transactions", + Help: "The number of transactions rejected by the application.", + }, labels).With(labelsAndValues...), + ProcessedTransactions: prometheus.NewCounterFrom(stdprometheus.CounterOpts{ + Namespace: namespace, + Subsystem: MetricsSubsystem, + Name: "processed_transactions", + Help: "The number of transactions processed by the application.", + }, labels).With(labelsAndValues...), } } @@ -42,5 +54,7 @@ func NopMetrics() *Metrics { BlockProcessingTime: discard.NewHistogram(), ConsensusParamUpdates: discard.NewCounter(), ValidatorSetUpdates: discard.NewCounter(), + RejectedTransactions: discard.NewCounter(), + ProcessedTransactions: discard.NewCounter(), } } diff --git a/state/metrics.go b/state/metrics.go index 9015ed1dd2b..3e62aa20cb0 100644 --- a/state/metrics.go +++ b/state/metrics.go @@ -26,4 +26,10 @@ type Metrics struct { // updated the validator set since process start. //metrics:Number of validator set updates returned by the application since process start. ValidatorSetUpdates metrics.Counter + + // The number of transactions rejected by the application. + RejectedTransactions metrics.Counter + + // The number of transactions processed by the application. + ProcessedTransactions metrics.Counter } diff --git a/state/mocks/block_store.go b/state/mocks/block_store.go index b7506482fab..5b8a5060cdd 100644 --- a/state/mocks/block_store.go +++ b/state/mocks/block_store.go @@ -6,6 +6,8 @@ import ( state "github.com/cometbft/cometbft/state" mock "github.com/stretchr/testify/mock" + store "github.com/cometbft/cometbft/proto/tendermint/store" + types "github.com/cometbft/cometbft/types" ) @@ -266,6 +268,26 @@ func (_m *BlockStore) LoadSeenCommit(height int64) *types.Commit { return r0 } +// LoadTxInfo provides a mock function with given fields: hash +func (_m *BlockStore) LoadTxInfo(hash []byte) *store.TxInfo { + ret := _m.Called(hash) + + if len(ret) == 0 { + panic("no return value specified for LoadTxInfo") + } + + var r0 *store.TxInfo + if rf, ok := ret.Get(0).(func([]byte) *store.TxInfo); ok { + r0 = rf(hash) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*store.TxInfo) + } + } + + return r0 +} + // PruneBlocks provides a mock function with given fields: height, _a1 func (_m *BlockStore) PruneBlocks(height int64, _a1 state.State) (uint64, int64, error) { ret := _m.Called(height, _a1) @@ -311,6 +333,24 @@ func (_m *BlockStore) SaveBlockWithExtendedCommit(block *types.Block, blockParts _m.Called(block, blockParts, seenCommit) } +// SaveTxInfo provides a mock function with given fields: block, txResponseCodes, logs +func (_m *BlockStore) SaveTxInfo(block *types.Block, txResponseCodes []uint32, logs []string) error { + ret := _m.Called(block, txResponseCodes, logs) + + if len(ret) == 0 { + panic("no return value specified for SaveTxInfo") + } + + var r0 error + if rf, ok := ret.Get(0).(func(*types.Block, []uint32, []string) error); ok { + r0 = rf(block, txResponseCodes, logs) + } else { + r0 = ret.Error(0) + } + + return r0 +} + // Size provides a mock function with no fields func (_m *BlockStore) Size() int64 { ret := _m.Called() diff --git a/state/services.go b/state/services.go index 280a945668f..d25455058f0 100644 --- a/state/services.go +++ b/state/services.go @@ -2,6 +2,8 @@ package state import ( "github.com/cometbft/cometbft/types" + + cmtstore "github.com/cometbft/cometbft/proto/tendermint/store" ) //------------------------------------------------------ @@ -37,6 +39,9 @@ type BlockStore interface { LoadSeenCommit(height int64) *types.Commit LoadBlockExtendedCommit(height int64) *types.ExtendedCommit + LoadTxInfo(hash []byte) *cmtstore.TxInfo + SaveTxInfo(block *types.Block, txResponseCodes []uint32, logs []string) error + DeleteLatestBlock() error Close() error diff --git a/state/state.go b/state/state.go index 15fb8e5e62b..e3c1e94a48b 100644 --- a/state/state.go +++ b/state/state.go @@ -27,12 +27,14 @@ var ( // but leaves the Consensus.App version blank. // The Consensus.App version will be set during the Handshake, once // we hear from the app what protocol version it is running. -var InitStateVersion = cmtstate.Version{ - Consensus: cmtversion.Consensus{ - Block: version.BlockProtocol, - App: 0, - }, - Software: version.TMCoreSemVer, +func InitStateVersion(appVersion uint64) cmtstate.Version { + return cmtstate.Version{ + Consensus: cmtversion.Consensus{ + Block: version.BlockProtocol, + App: appVersion, + }, + Software: version.TMCoreSemVer, + } } //----------------------------------------------------------------------------- @@ -233,14 +235,14 @@ func FromProto(pb *cmtstate.State) (*State, error) { //nolint:golint // track rounds, and hence does not know the correct proposer. TODO: fix this! func (state State) MakeBlock( height int64, - txs []types.Tx, + data types.Data, lastCommit *types.Commit, evidence []types.Evidence, proposerAddress []byte, ) *types.Block { // Build base block with block data. - block := types.MakeBlock(height, txs, lastCommit, evidence) + block := types.MakeBlock(height, data, lastCommit, evidence) // Set time. var timestamp time.Time @@ -333,8 +335,10 @@ func MakeGenesisState(genDoc *types.GenesisDoc) (State, error) { nextValidatorSet = types.NewValidatorSet(validators).CopyIncrementProposerPriority(1) } + appVersion := getAppVersion(genDoc) + return State{ - Version: InitStateVersion, + Version: InitStateVersion(appVersion), ChainID: genDoc.ChainID, InitialHeight: genDoc.InitialHeight, @@ -353,3 +357,13 @@ func MakeGenesisState(genDoc *types.GenesisDoc) (State, error) { AppHash: genDoc.AppHash, }, nil } + +func getAppVersion(genDoc *types.GenesisDoc) uint64 { + if genDoc.ConsensusParams != nil && + genDoc.ConsensusParams.Version.App != 0 { + return genDoc.ConsensusParams.Version.App + } + // Default to app version 1 because some chains (e.g. mocha-4) did not set + // an explicit app version in genesis.json. + return uint64(1) +} diff --git a/state/state_test.go b/state/state_test.go index 863395dca32..3f29d32150b 100644 --- a/state/state_test.go +++ b/state/state_test.go @@ -20,6 +20,7 @@ import ( cmtrand "github.com/cometbft/cometbft/libs/rand" sm "github.com/cometbft/cometbft/state" "github.com/cometbft/cometbft/types" + "github.com/cometbft/cometbft/version" ) // setupTestCase does setup common to all test cases. @@ -1114,3 +1115,30 @@ func TestStateProto(t *testing.T) { } } } + +func TestMakeGenesisStateSetsAppVersion(t *testing.T) { + cp := types.DefaultConsensusParams() + appVersion := uint64(5) + cp.Version.App = appVersion + doc := types.GenesisDoc{ + ChainID: "dummy", + ConsensusParams: cp, + } + require.Nil(t, doc.ValidateAndComplete()) + state, err := sm.MakeGenesisState(&doc) + require.Nil(t, err) + require.Equal(t, appVersion, state.Version.Consensus.App) + require.Equal(t, version.BlockProtocol, state.Version.Consensus.Block) + t.Run("MakeGenesisState defaults to 1 if app version is not set", func(t *testing.T) { + cp := types.DefaultConsensusParams() + cp.Version = types.VersionParams{} // zero value + doc := types.GenesisDoc{ + ChainID: "chain-id", + ConsensusParams: cp, + } + require.NoError(t, doc.ValidateAndComplete()) + state, err := sm.MakeGenesisState(&doc) + require.NoError(t, err) + require.Equal(t, uint64(1), state.Version.Consensus.App) + }) +} diff --git a/state/txindex/kv/kv.go b/state/txindex/kv/kv.go index f807735feff..4918a53d97a 100644 --- a/state/txindex/kv/kv.go +++ b/state/txindex/kv/kv.go @@ -65,6 +65,7 @@ func (txi *TxIndex) Get(hash []byte) (*abci.TxResult, error) { panic(err) } if rawBytes == nil { + fmt.Println("rawBytes is nil") return nil, nil } @@ -86,26 +87,7 @@ func (txi *TxIndex) AddBatch(b *txindex.Batch) error { defer storeBatch.Close() for _, result := range b.Ops { - hash := types.Tx(result.Tx).Hash() - - // index tx by events - err := txi.indexEvents(result, hash, storeBatch) - if err != nil { - return err - } - - // index by height (always) - err = storeBatch.Set(keyForHeight(result), hash) - if err != nil { - return err - } - - rawBytes, err := proto.Marshal(result) - if err != nil { - return err - } - // index by hash (always) - err = storeBatch.Set(hash, rawBytes) + err := txi.indexResult(storeBatch, result) if err != nil { return err } @@ -766,3 +748,44 @@ func startKey(fields ...interface{}) []byte { } return b.Bytes() } + +func (txi *TxIndex) indexResult(batch dbm.Batch, result *abci.TxResult) error { + hash := types.Tx(result.Tx).Hash() + + rawBytes, err := proto.Marshal(result) + if err != nil { + return err + } + + if !result.Result.IsOK() { + oldResult, err := txi.Get(hash) + if err != nil { + return err + } + + // if the new transaction failed and it's already indexed in an older block and was successful + // we skip it as we want users to get the older successful transaction when they query. + if oldResult != nil && oldResult.Result.Code == abci.CodeTypeOK { + return nil + } + } + + // index tx by events + err = txi.indexEvents(result, hash, batch) + if err != nil { + return err + } + + // index by height (always) + err = batch.Set(keyForHeight(result), hash) + if err != nil { + return err + } + + // index by hash (always) + err = batch.Set(hash, rawBytes) + if err != nil { + return err + } + return nil +} diff --git a/state/txindex/kv/kv_test.go b/state/txindex/kv/kv_test.go index ea8d4ea308b..2b4f4b3ee80 100644 --- a/state/txindex/kv/kv_test.go +++ b/state/txindex/kv/kv_test.go @@ -659,6 +659,21 @@ func TestTxSearchMultipleTxs(t *testing.T) { assert.NoError(t, err) require.Len(t, results, 3) + + // since two txs were added at height 1 and 2, we should have two unique transactions + // for both heights + + q, err := query.New("tx.height=1") + assert.NoError(t, err) + results, err = indexer.Search(ctx, q) + assert.NoError(t, err) + require.Len(t, results, 2) + + q, err = query.New("tx.height=2") + assert.NoError(t, err) + results, err = indexer.Search(ctx, q) + assert.NoError(t, err) + require.Len(t, results, 2) } func txResultWithEvents(events []abci.Event) *abci.TxResult { @@ -799,3 +814,31 @@ func BenchmarkTxIndex500(b *testing.B) { benchmarkTxIndex(500, b) } func BenchmarkTxIndex1000(b *testing.B) { benchmarkTxIndex(1000, b) } func BenchmarkTxIndex2000(b *testing.B) { benchmarkTxIndex(2000, b) } func BenchmarkTxIndex10000(b *testing.B) { benchmarkTxIndex(10000, b) } + +func TestWrappedTxIndex(t *testing.T) { + indexer := NewTxIndex(db.NewMemDB()) + + tx := types.Tx("HELLO WORLD") + wrappedTx, err := types.MarshalIndexWrapper(tx, 11) + require.NoError(t, err) + txResult := &abci.TxResult{ + Height: 1, + Index: 0, + Tx: wrappedTx, + Result: abci.ExecTxResult{ + Data: []byte{0}, + Code: abci.CodeTypeOK, Log: "", Events: nil, + }, + } + hash := tx.Hash() + + batch := txindex.NewBatch(1) + err = batch.Add(txResult) + require.NoError(t, err) + + err = indexer.AddBatch(batch) + require.NoError(t, err) + loadedTxResult, err := indexer.Get(hash) + require.NoError(t, err) + assert.True(t, proto.Equal(txResult, loadedTxResult)) +} diff --git a/state/validation_test.go b/state/validation_test.go index b4efcd75989..6f7e9c18265 100644 --- a/state/validation_test.go +++ b/state/validation_test.go @@ -326,7 +326,7 @@ func TestValidateBlockEvidence(t *testing.T) { evidence = append(evidence, newEv) currentBytes += int64(len(newEv.Bytes())) } - block := state.MakeBlock(height, test.MakeNTxs(height, 10), lastCommit, evidence, proposerAddr) + block := state.MakeBlock(height, types.MakeData(test.MakeNTxs(height, 10)), lastCommit, evidence, proposerAddr) err := blockExec.ValidateBlock(state, block) if assert.Error(t, err) { diff --git a/statesync/reactor.go b/statesync/reactor.go index a7374a29182..33ba56434ad 100644 --- a/statesync/reactor.go +++ b/statesync/reactor.go @@ -23,6 +23,8 @@ const ( ChunkChannel = byte(0x61) // recentSnapshots is the number of recent snapshots to send and receive per peer. recentSnapshots = 10 + // ReactorIncomingMessageQueueSize the size of the reactor's message queue. + ReactorIncomingMessageQueueSize = 100 ) // Reactor handles state sync, both restoring snapshots for the local node and serving snapshots @@ -55,7 +57,7 @@ func NewReactor( connQuery: connQuery, metrics: metrics, } - r.BaseReactor = *p2p.NewBaseReactor("StateSync", r) + r.BaseReactor = *p2p.NewBaseReactor("StateSync", r, p2p.WithIncomingQueueSize(ReactorIncomingMessageQueueSize)) return r } diff --git a/statesync/snapshots.go b/statesync/snapshots.go index 5d4f9fe4d34..48457cd9058 100644 --- a/statesync/snapshots.go +++ b/statesync/snapshots.go @@ -21,7 +21,8 @@ type snapshot struct { Hash []byte Metadata []byte - trustedAppHash []byte // populated by light client + trustedAppHash []byte // populated by light client + trustedAppVersion uint64 // populated by light client } // Key generates a snapshot key, used for lookups. It takes into account not only the height and diff --git a/statesync/syncer.go b/statesync/syncer.go index 38bdedfd1b6..26c3f561b20 100644 --- a/statesync/syncer.go +++ b/statesync/syncer.go @@ -265,6 +265,19 @@ func (s *syncer) Sync(snapshot *snapshot, chunks *chunkQueue) (sm.State, *types. } snapshot.trustedAppHash = appHash + pctx, pcancel := context.WithTimeout(context.TODO(), 30*time.Second) + defer pcancel() + // Optimistically build new state, so we don't discover any light client failures at the end. + state, err := s.stateProvider.State(pctx, snapshot.Height) + if err != nil { + s.logger.Info("failed to fetch and verify CometBFT state", "err", err) + if err == light.ErrNoWitnesses { + return sm.State{}, nil, err + } + return sm.State{}, nil, errRejectSnapshot + } + snapshot.trustedAppVersion = state.ConsensusParams.Version.App + // Offer snapshot to ABCI app. err = s.offerSnapshot(snapshot) if err != nil { @@ -278,18 +291,6 @@ func (s *syncer) Sync(snapshot *snapshot, chunks *chunkQueue) (sm.State, *types. go s.fetchChunks(fetchCtx, snapshot, chunks) } - pctx, pcancel := context.WithTimeout(context.TODO(), 30*time.Second) - defer pcancel() - - // Optimistically build new state, so we don't discover any light client failures at the end. - state, err := s.stateProvider.State(pctx, snapshot.Height) - if err != nil { - s.logger.Info("failed to fetch and verify CometBFT state", "err", err) - if err == light.ErrNoWitnesses { - return sm.State{}, nil, err - } - return sm.State{}, nil, errRejectSnapshot - } commit, err := s.stateProvider.Commit(pctx, snapshot.Height) if err != nil { s.logger.Info("failed to fetch and verify commit", "err", err) @@ -330,7 +331,8 @@ func (s *syncer) offerSnapshot(snapshot *snapshot) error { Hash: snapshot.Hash, Metadata: snapshot.Metadata, }, - AppHash: snapshot.trustedAppHash, + AppHash: snapshot.trustedAppHash, + AppVersion: snapshot.trustedAppVersion, }) if err != nil { return fmt.Errorf("failed to offer snapshot: %w", err) diff --git a/statesync/syncer_test.go b/statesync/syncer_test.go index 4fbb47a2e13..3d5860f1036 100644 --- a/statesync/syncer_test.go +++ b/statesync/syncer_test.go @@ -34,6 +34,9 @@ func setupOfferSyncer() (*syncer, *proxymocks.AppConnSnapshot) { connSnapshot := &proxymocks.AppConnSnapshot{} stateProvider := &mocks.StateProvider{} stateProvider.On("AppHash", mock.Anything, mock.Anything).Return([]byte("app_hash"), nil) + stateProvider.On("State", mock.AnythingOfType("*context.timerCtx"), uint64(1)).Return(sm.State{}, nil) + stateProvider.On("State", mock.AnythingOfType("*context.timerCtx"), uint64(2)).Return(sm.State{}, nil) + stateProvider.On("State", mock.AnythingOfType("*context.timerCtx"), uint64(4)).Return(sm.State{}, nil) cfg := config.DefaultStateSyncConfig() syncer := newSyncer(*cfg, log.NewNopLogger(), connSnapshot, connQuery, stateProvider, "") @@ -84,7 +87,8 @@ func TestSyncer_SyncAny(t *testing.T) { stateProvider.On("AppHash", mock.Anything, uint64(1)).Return(state.AppHash, nil) stateProvider.On("AppHash", mock.Anything, uint64(2)).Return([]byte("app_hash_2"), nil) stateProvider.On("Commit", mock.Anything, uint64(1)).Return(commit, nil) - stateProvider.On("State", mock.Anything, uint64(1)).Return(state, nil) + stateProvider.On("State", mock.AnythingOfType("*context.timerCtx"), uint64(1)).Return(state, nil) + stateProvider.On("State", mock.AnythingOfType("*context.timerCtx"), uint64(2)).Return(state, nil) connSnapshot := &proxymocks.AppConnSnapshot{} connQuery := &proxymocks.AppConnQuery{} diff --git a/store/bench_test.go b/store/bench_test.go index ceb7a94e50e..953dd5243b3 100644 --- a/store/bench_test.go +++ b/store/bench_test.go @@ -17,7 +17,7 @@ func BenchmarkRepeatedLoadSeenCommitSameBlock(b *testing.B) { state, bs, cleanup := makeStateAndBlockStore() defer cleanup() h := bs.Height() + 1 - block := state.MakeBlock(h, test.MakeNTxs(h, 10), new(types.Commit), nil, state.Validators.GetProposer().Address) + block := state.MakeBlock(h, types.MakeData(test.MakeNTxs(h, 10)), new(types.Commit), nil, state.Validators.GetProposer().Address) seenCommit := makeTestExtCommitWithNumSigs(block.Header.Height, cmttime.Now(), 100).ToCommit() ps, err := block.MakePartSet(types.BlockPartSizeBytes) require.NoError(b, err) diff --git a/store/store.go b/store/store.go index baffc704d10..3935d1bed51 100644 --- a/store/store.go +++ b/store/store.go @@ -10,6 +10,7 @@ import ( dbm "github.com/cometbft/cometbft-db" + abci "github.com/cometbft/cometbft/abci/types" "github.com/cometbft/cometbft/evidence" cmtsync "github.com/cometbft/cometbft/libs/sync" cmtstore "github.com/cometbft/cometbft/proto/tendermint/store" @@ -377,10 +378,17 @@ func (bs *BlockStore) PruneBlocks(height int64, state sm.State) (uint64, int64, for h := base; h < height; h++ { meta := bs.LoadBlockMeta(h) + block := bs.LoadBlock(h) if meta == nil { // assume already deleted continue } + for _, tx := range block.Txs { + if err := batch.Delete(calcTxHashKey(tx.Hash())); err != nil { + return 0, -1, err + } + } + // This logic is in place to protect data that proves malicious behavior. // If the height is within the evidence age, we continue to persist the header and commit data. @@ -645,6 +653,10 @@ func calcBlockHashKey(hash []byte) []byte { return []byte(fmt.Sprintf("BH:%x", hash)) } +func calcTxHashKey(hash []byte) []byte { + return []byte(fmt.Sprintf("TH:%x", hash)) +} + //----------------------------------------------------------------------------- var blockStoreKey = []byte("blockStore") @@ -755,3 +767,58 @@ func (bs *BlockStore) DeleteLatestBlock() error { bs.height = targetHeight - 1 return bs.saveStateAndWriteDB(batch, "failed to delete the latest block") } + +// SaveTxInfo indexes the txs from the block with the given response codes and logs from execution. +// Only the error logs are saved for failed transactions. +func (bs *BlockStore) SaveTxInfo(block *types.Block, txResponseCodes []uint32, logs []string) error { + if len(txResponseCodes) != len(block.Txs) { + return errors.New("txResponseCodes length mismatch with block txs length") + } + if len(logs) != len(block.Txs) { + return errors.New("logs length mismatch with block txs length") + } + + // Create a new batch + batch := bs.db.NewBatch() + + // Batch and save txs from the block + for i, tx := range block.Txs { + txInfo := cmtstore.TxInfo{ + Height: block.Height, + //nolint:gosec + Index: uint32(i), + Code: txResponseCodes[i], + } + // Set error log for failed txs + if txResponseCodes[i] != abci.CodeTypeOK { + txInfo.Error = logs[i] + } + txInfoBytes, err := proto.Marshal(&txInfo) + if err != nil { + return fmt.Errorf("unable to marshal tx: %w", err) + } + if err := batch.Set(calcTxHashKey(tx.Hash()), txInfoBytes); err != nil { + return err + } + } + + // Write the batch to the db + return batch.WriteSync() +} + +// LoadTxInfo loads the TxInfo from disk given its hash. +func (bs *BlockStore) LoadTxInfo(txHash []byte) *cmtstore.TxInfo { + bz, err := bs.db.Get(calcTxHashKey(txHash)) + if err != nil { + panic(err) + } + if len(bz) == 0 { + return nil + } + + var txi cmtstore.TxInfo + if err = proto.Unmarshal(bz, &txi); err != nil { + panic(fmt.Errorf("unmarshal to TxInfo failed: %w", err)) + } + return &txi +} diff --git a/store/store_test.go b/store/store_test.go index cafe7d34ffa..7ece3f074e2 100644 --- a/store/store_test.go +++ b/store/store_test.go @@ -15,6 +15,7 @@ import ( dbm "github.com/cometbft/cometbft-db" + abci "github.com/cometbft/cometbft/abci/types" "github.com/cometbft/cometbft/crypto" "github.com/cometbft/cometbft/internal/test" cmtrand "github.com/cometbft/cometbft/libs/rand" @@ -164,7 +165,7 @@ func TestBlockStoreSaveLoadBlock(t *testing.T) { // save a block big enough to have two block parts txs := []types.Tx{make([]byte, types.BlockPartSizeBytes)} // TX taking one block part alone - block := state.MakeBlock(bs.Height()+1, txs, new(types.Commit), nil, state.Validators.GetProposer().Address) + block := state.MakeBlock(bs.Height()+1, types.MakeData(txs), new(types.Commit), nil, state.Validators.GetProposer().Address) validPartSet, err := block.MakePartSet(types.BlockPartSizeBytes) require.NoError(t, err) require.GreaterOrEqual(t, validPartSet.Total(), uint32(2)) @@ -406,7 +407,7 @@ func TestSaveBlockWithExtendedCommitPanicOnAbsentExtension(t *testing.T) { state, bs, cleanup := makeStateAndBlockStore() defer cleanup() h := bs.Height() + 1 - block := state.MakeBlock(h, test.MakeNTxs(h, 10), new(types.Commit), nil, state.Validators.GetProposer().Address) + block := state.MakeBlock(h, types.MakeData(test.MakeNTxs(h, 10)), new(types.Commit), nil, state.Validators.GetProposer().Address) seenCommit := makeTestExtCommit(block.Header.Height, cmttime.Now()) ps, err := block.MakePartSet(types.BlockPartSizeBytes) @@ -447,7 +448,7 @@ func TestLoadBlockExtendedCommit(t *testing.T) { state, bs, cleanup := makeStateAndBlockStore() defer cleanup() h := bs.Height() + 1 - block := state.MakeBlock(h, test.MakeNTxs(h, 10), new(types.Commit), nil, state.Validators.GetProposer().Address) + block := state.MakeBlock(h, types.MakeData(test.MakeNTxs(h, 10)), new(types.Commit), nil, state.Validators.GetProposer().Address) seenCommit := makeTestExtCommit(block.Header.Height, cmttime.Now()) ps, err := block.MakePartSet(types.BlockPartSizeBytes) require.NoError(t, err) @@ -477,7 +478,7 @@ func TestLoadBaseMeta(t *testing.T) { bs := NewBlockStore(dbm.NewMemDB()) for h := int64(1); h <= 10; h++ { - block := state.MakeBlock(h, test.MakeNTxs(h, 10), new(types.Commit), nil, state.Validators.GetProposer().Address) + block := state.MakeBlock(h, types.MakeData(test.MakeNTxs(h, 10)), new(types.Commit), nil, state.Validators.GetProposer().Address) partSet, err := block.MakePartSet(types.BlockPartSizeBytes) require.NoError(t, err) seenCommit := makeTestExtCommit(h, cmttime.Now()) @@ -522,7 +523,7 @@ func TestLoadBlockPart(t *testing.T) { require.Contains(t, panicErr.Error(), "unmarshal to cmtproto.Part failed") // 3. A good block serialized and saved to the DB should be retrievable - block := state.MakeBlock(height, nil, new(types.Commit), nil, state.Validators.GetProposer().Address) + block := state.MakeBlock(height, types.MakeData(nil), new(types.Commit), nil, state.Validators.GetProposer().Address) partSet, err := block.MakePartSet(types.BlockPartSizeBytes) require.NoError(t, err) part1 := partSet.GetPart(0) @@ -567,7 +568,7 @@ func TestPruneBlocks(t *testing.T) { // make more than 1000 blocks, to test batch deletions for h := int64(1); h <= 1500; h++ { - block := state.MakeBlock(h, test.MakeNTxs(h, 10), new(types.Commit), nil, state.Validators.GetProposer().Address) + block := state.MakeBlock(h, types.MakeData(test.MakeNTxs(h, 10)), new(types.Commit), nil, state.Validators.GetProposer().Address) partSet, err := block.MakePartSet(types.BlockPartSizeBytes) require.NoError(t, err) seenCommit := makeTestExtCommit(h, cmttime.Now()) @@ -696,7 +697,7 @@ func TestLoadBlockMetaByHash(t *testing.T) { require.NoError(t, err) bs := NewBlockStore(dbm.NewMemDB()) - b1 := state.MakeBlock(state.LastBlockHeight+1, test.MakeNTxs(state.LastBlockHeight+1, 10), new(types.Commit), nil, state.Validators.GetProposer().Address) + b1 := state.MakeBlock(state.LastBlockHeight+1, types.MakeData(test.MakeNTxs(state.LastBlockHeight+1, 10)), new(types.Commit), nil, state.Validators.GetProposer().Address) partSet, err := b1.MakePartSet(types.BlockPartSizeBytes) require.NoError(t, err) seenCommit := makeTestExtCommit(1, cmttime.Now()) @@ -712,7 +713,7 @@ func TestBlockFetchAtHeight(t *testing.T) { state, bs, cleanup := makeStateAndBlockStore() defer cleanup() require.Equal(t, bs.Height(), int64(0), "initially the height should be zero") - block := state.MakeBlock(bs.Height()+1, nil, new(types.Commit), nil, state.Validators.GetProposer().Address) + block := state.MakeBlock(bs.Height()+1, types.MakeData(nil), new(types.Commit), nil, state.Validators.GetProposer().Address) partSet, err := block.MakePartSet(types.BlockPartSizeBytes) require.NoError(t, err) @@ -765,3 +766,76 @@ func newBlock(hdr types.Header, lastCommit *types.Commit) *types.Block { LastCommit: lastCommit, } } + +func makeUniqueBlock(height int64, state sm.State, lastCommit *types.Commit) *types.Block { + data := types.Data{ + Txs: []types.Tx{types.Tx([]byte{byte(height)})}, + } + block := state.MakeBlock(height, data, lastCommit, nil, state.Validators.GetProposer().Address) + return block +} + +func TestSaveTxInfo(t *testing.T) { + // Create a state and a block store + state, blockStore, cleanup := makeStateAndBlockStore() + defer cleanup() + + var allTxResponseCodes []uint32 + var allTxLogs []string + + // Create 10 blocks each with 1 tx + for h := int64(1); h <= 10; h++ { + block := makeUniqueBlock(h, state, new(types.Commit)) + partSet, err := block.MakePartSet(types.BlockPartSizeBytes) + require.NoError(t, err) + seenCommit := makeTestExtCommit(h, cmttime.Now()) + blockStore.SaveBlockWithExtendedCommit(block, partSet, seenCommit) + + var txResponseCode uint32 + var txLog string + + if h%2 == 0 { + txResponseCode = 0 + txLog = "success" + } else { + txResponseCode = 1 + txLog = "failure" + } + + // Save the tx info + err = blockStore.SaveTxInfo(block, []uint32{txResponseCode}, []string{txLog}) + require.NoError(t, err) + allTxResponseCodes = append(allTxResponseCodes, txResponseCode) + allTxLogs = append(allTxLogs, txLog) + } + + txIndex := 0 + // Get the blocks from blockstore up to the height + for h := int64(1); h <= 10; h++ { + block := blockStore.LoadBlock(h) + // Check that transactions exist in the block + for i, tx := range block.Txs { + txInfo := blockStore.LoadTxInfo(tx.Hash()) + require.Equal(t, block.Height, txInfo.Height) + require.Equal(t, uint32(i), txInfo.Index) + require.Equal(t, allTxResponseCodes[txIndex], txInfo.Code) + // We don't save the logs for successful transactions + if allTxResponseCodes[txIndex] == abci.CodeTypeOK { + require.Equal(t, "", txInfo.Error) + } else { + require.Equal(t, allTxLogs[txIndex], txInfo.Error) + } + txIndex++ + } + } + + // Get a random transaction and make sure it's indexed properly + block := blockStore.LoadBlock(7) + tx := block.Txs[0] + txInfo := blockStore.LoadTxInfo(tx.Hash()) + require.Equal(t, block.Height, txInfo.Height) + require.Equal(t, block.Height, int64(7)) + require.Equal(t, txInfo.Height, int64(7)) + require.Equal(t, uint32(1), txInfo.Code) + require.Equal(t, "failure", txInfo.Error) +} diff --git a/test/e2e/docker/Dockerfile b/test/e2e/docker/Dockerfile index eb1125634d2..78b7ca55c0c 100644 --- a/test/e2e/docker/Dockerfile +++ b/test/e2e/docker/Dockerfile @@ -1,7 +1,7 @@ # We need to build in a Linux environment to support C libraries, e.g. RocksDB. # We use Debian instead of Alpine, so that we can use binary database packages # instead of spending time compiling them. -FROM cometbft/cometbft-db-testing:v0.14.2 +FROM cometbft/cometbft-db-testing:latest RUN apt-get -qq update -y && apt-get -qq upgrade -y >/dev/null diff --git a/test/e2e/node/main.go b/test/e2e/node/main.go index b6451e493be..9d5f68f5f4b 100644 --- a/test/e2e/node/main.go +++ b/test/e2e/node/main.go @@ -123,6 +123,8 @@ func startNode(cfg *Config) error { return fmt.Errorf("failed to setup config: %w", err) } + cmtcfg.Instrumentation.TraceType = "local" + var clientCreator proxy.ClientCreator if cfg.Protocol == string(e2e.ProtocolBuiltinConnSync) { clientCreator = proxy.NewConnSyncLocalClientCreator(app) diff --git a/test/e2e/pkg/infrastructure.go b/test/e2e/pkg/infrastructure.go index 22f8e9ae0cf..89066bc1c15 100644 --- a/test/e2e/pkg/infrastructure.go +++ b/test/e2e/pkg/infrastructure.go @@ -34,6 +34,22 @@ type InfrastructureData struct { // Network is the CIDR notation range of IP addresses that all of the instances' // IP addresses are expected to be within. Network string `json:"network"` + + // TracePushConfig is the URL of the server to push trace data to. + TracePushConfig string `json:"trace_push_config,omitempty"` + + // TracePullAddress is the address to listen on for pulling trace data. + TracePullAddress string `json:"trace_pull_address,omitempty"` + + // PyroscopeURL is the URL of the pyroscope instance to use for continuous + // profiling. If not specified, data will not be collected. + PyroscopeURL string `json:"pyroscope_url,omitempty"` + + // PyroscopeTrace enables adding trace data to pyroscope profiling. + PyroscopeTrace bool `json:"pyroscope_trace,omitempty"` + + // PyroscopeProfileTypes is the list of profile types to collect. + PyroscopeProfileTypes []string `json:"pyroscope_profile_types,omitempty"` } // InstanceData contains the relevant information for a machine instance backing diff --git a/test/e2e/pkg/manifest.go b/test/e2e/pkg/manifest.go index 7fd7afaf9d9..03ed9f66eef 100644 --- a/test/e2e/pkg/manifest.go +++ b/test/e2e/pkg/manifest.go @@ -112,6 +112,13 @@ type Manifest struct { // Maximum number of peers to which the node gossips transactions ExperimentalMaxGossipConnectionsToPersistentPeers uint `toml:"experimental_max_gossip_connections_to_persistent_peers"` ExperimentalMaxGossipConnectionsToNonPersistentPeers uint `toml:"experimental_max_gossip_connections_to_non_persistent_peers"` + + // MaxInboundConnections and MaxOutboundConnection are the maximum number + // of connections a node has. This can be used to throttle the degree of + // connectivity of the network. If not specified, the default is taken + // from config/config.go + MaxInboundConnections int `toml:"max_inbound_connections"` + MaxOutboundConnections int `toml:"max_outbound_connections"` } // ManifestNode represents a node in a testnet manifest. diff --git a/test/e2e/pkg/testnet.go b/test/e2e/pkg/testnet.go index 41c5e4ae814..e49771b2814 100644 --- a/test/e2e/pkg/testnet.go +++ b/test/e2e/pkg/testnet.go @@ -97,6 +97,9 @@ type Testnet struct { VoteExtensionsUpdateHeight int64 ExperimentalMaxGossipConnectionsToPersistentPeers uint ExperimentalMaxGossipConnectionsToNonPersistentPeers uint + + MaxInboundConnections int + MaxOutboundConnections int } // Node represents a CometBFT node in a testnet. @@ -125,6 +128,15 @@ type Node struct { SendNoLoad bool Prometheus bool PrometheusProxyPort uint32 + + MaxInboundConnections int + MaxOutboundConnections int + + TracePushConfig string + TracePullAddress string + PyroscopeURL string + PyroscopeTrace bool + PyroscopeProfileTypes []string } // LoadTestnet loads a testnet from a manifest file, using the filename to @@ -181,6 +193,9 @@ func NewTestnetFromManifest(manifest Manifest, file string, ifd InfrastructureDa VoteExtensionsUpdateHeight: manifest.VoteExtensionsUpdateHeight, ExperimentalMaxGossipConnectionsToPersistentPeers: manifest.ExperimentalMaxGossipConnectionsToPersistentPeers, ExperimentalMaxGossipConnectionsToNonPersistentPeers: manifest.ExperimentalMaxGossipConnectionsToNonPersistentPeers, + + MaxInboundConnections: manifest.MaxInboundConnections, + MaxOutboundConnections: manifest.MaxOutboundConnections, } if len(manifest.KeyType) != 0 { testnet.KeyType = manifest.KeyType @@ -241,6 +256,12 @@ func NewTestnetFromManifest(manifest Manifest, file string, ifd InfrastructureDa Perturbations: []Perturbation{}, SendNoLoad: nodeManifest.SendNoLoad, Prometheus: testnet.Prometheus, + + TracePushConfig: ifd.TracePushConfig, + TracePullAddress: ifd.TracePullAddress, + PyroscopeURL: ifd.PyroscopeURL, + PyroscopeTrace: ifd.PyroscopeTrace, + PyroscopeProfileTypes: ifd.PyroscopeProfileTypes, } if node.StartAt == testnet.InitialHeight { node.StartAt = 0 // normalize to 0 for initial nodes, since code expects this @@ -269,6 +290,12 @@ func NewTestnetFromManifest(manifest Manifest, file string, ifd InfrastructureDa for _, p := range nodeManifest.Perturb { node.Perturbations = append(node.Perturbations, Perturbation(p)) } + if node.MaxInboundConnections < 0 { + return nil, errors.New("MaxInboundConnections must not be negative") + } + if node.MaxOutboundConnections < 0 { + return nil, errors.New("MaxOutboundConnections must not be negative") + } testnet.Nodes = append(testnet.Nodes, node) } diff --git a/test/e2e/runner/main.go b/test/e2e/runner/main.go index 23c14a1833d..efb616ddabf 100644 --- a/test/e2e/runner/main.go +++ b/test/e2e/runner/main.go @@ -11,6 +11,7 @@ import ( "github.com/spf13/cobra" "github.com/cometbft/cometbft/libs/log" + "github.com/cometbft/cometbft/libs/trace" e2e "github.com/cometbft/cometbft/test/e2e/pkg" "github.com/cometbft/cometbft/test/e2e/pkg/infra" "github.com/cometbft/cometbft/test/e2e/pkg/infra/digitalocean" @@ -80,6 +81,32 @@ func NewCLI() *CLI { return fmt.Errorf("unknown infrastructure type '%s'", inft) } + iurl, err := cmd.Flags().GetString(trace.FlagTracePushConfig) + if err != nil { + return err + } + itoken, err := cmd.Flags().GetString(trace.FlagTracePullAddress) + if err != nil { + return err + } + if ifd.TracePushConfig == "" { + ifd.TracePushConfig = iurl + ifd.TracePullAddress = itoken + } + + purl, err := cmd.Flags().GetString(trace.FlagPyroscopeURL) + if err != nil { + return err + } + pTrace, err := cmd.Flags().GetBool(trace.FlagPyroscopeTrace) + if err != nil { + return err + } + if ifd.PyroscopeURL == "" { + ifd.PyroscopeURL = purl + ifd.PyroscopeTrace = pTrace + } + testnet, err := e2e.LoadTestnet(file, ifd) if err != nil { return fmt.Errorf("loading testnet: %s", err) diff --git a/test/e2e/runner/setup.go b/test/e2e/runner/setup.go index 25d9a70b6a3..7735f6ec130 100644 --- a/test/e2e/runner/setup.go +++ b/test/e2e/runner/setup.go @@ -181,6 +181,13 @@ func MakeConfig(node *e2e.Node) (*config.Config, error) { cfg.Mempool.ExperimentalMaxGossipConnectionsToNonPersistentPeers = int(node.Testnet.ExperimentalMaxGossipConnectionsToNonPersistentPeers) cfg.Mempool.ExperimentalMaxGossipConnectionsToPersistentPeers = int(node.Testnet.ExperimentalMaxGossipConnectionsToPersistentPeers) + cfg.Instrumentation.TraceType = "celestia" + cfg.Instrumentation.TracePushConfig = node.TracePushConfig + cfg.Instrumentation.TracePullAddress = node.TracePullAddress + cfg.Instrumentation.PyroscopeTrace = node.PyroscopeTrace + cfg.Instrumentation.PyroscopeURL = node.PyroscopeURL + cfg.Instrumentation.PyroscopeProfileTypes = node.PyroscopeProfileTypes + switch node.ABCIProtocol { case e2e.ProtocolUNIX: cfg.ProxyApp = AppAddressUNIX @@ -267,6 +274,13 @@ func MakeConfig(node *e2e.Node) (*config.Config, error) { cfg.Instrumentation.Prometheus = true } + if node.Testnet.MaxInboundConnections != 0 { + cfg.P2P.MaxNumInboundPeers = node.Testnet.MaxInboundConnections + } + if node.Testnet.MaxOutboundConnections != 0 { + cfg.P2P.MaxNumOutboundPeers = node.Testnet.MaxOutboundConnections + } + return cfg, nil } diff --git a/test/e2e/tests/block_test.go b/test/e2e/tests/block_test.go index e76e622eb11..7f1120c79be 100644 --- a/test/e2e/tests/block_test.go +++ b/test/e2e/tests/block_test.go @@ -1,6 +1,8 @@ package e2e_test import ( + "bytes" + "context" "testing" "github.com/stretchr/testify/assert" @@ -93,3 +95,26 @@ func TestBlock_Range(t *testing.T) { } }) } + +func TestBlock_SignedData(t *testing.T) { + t.Helper() + testNode(t, func(t *testing.T, node e2e.Node) { + client, err := node.Client() + require.NoError(t, err) + + resp, err := client.SignedBlock(context.Background(), nil) + require.NoError(t, err) + require.Equal(t, resp.Header.Height, resp.Commit.Height) + + err = resp.ValidatorSet.VerifyCommit(resp.Header.ChainID, resp.Commit.BlockID, resp.Header.Height, &resp.Commit) + require.NoError(t, err) + + if !bytes.Equal(resp.Commit.BlockID.Hash, resp.Header.Hash()) { + t.Fatal("commit is for a different block") + } + + if !bytes.Equal(resp.Header.DataHash, resp.Data.Hash()) { + t.Fatal("data does not match header data hash") + } + }) +} diff --git a/types/block.go b/types/block.go index 643038becc4..fb4072cd46e 100644 --- a/types/block.go +++ b/types/block.go @@ -1295,10 +1295,24 @@ type Data struct { // This means that block.AppHash does not include these txs. Txs Txs `json:"txs"` + // SquareSize is the size of the square after splitting all the block data + // into shares. The erasure data is discarded after generation, and keeping this + // value avoids unnecessarily regenerating all of the shares when returning + // proofs that some element was included in the block + SquareSize uint64 `json:"square_size"` + // Volatile hash cmtbytes.HexBytes } +func NewData(txs Txs, squareSize uint64, hash cmtbytes.HexBytes) Data { + return Data{ + Txs: txs, + SquareSize: squareSize, + hash: hash, + } +} + // Hash returns the hash of the data func (data *Data) Hash() cmtbytes.HexBytes { if data == nil { @@ -1310,6 +1324,11 @@ func (data *Data) Hash() cmtbytes.HexBytes { return data.hash } +// GetDataRootHash returns the hash data which is not equal to calling data.Hash() +func (data *Data) GetDataRootHash() cmtbytes.HexBytes { + return data.hash +} + // StringIndented returns an indented string representation of the transactions. func (data *Data) StringIndented(indent string) string { if data == nil { @@ -1342,6 +1361,9 @@ func (data *Data) ToProto() cmtproto.Data { tp.Txs = txBzs } + tp.SquareSize = data.SquareSize + tp.Hash = data.hash + return *tp } @@ -1363,11 +1385,39 @@ func DataFromProto(dp *cmtproto.Data) (Data, error) { data.Txs = Txs{} } + data.hash = dp.Hash + data.SquareSize = dp.SquareSize + return *data, nil } //----------------------------------------------------------------------------- +type Blob struct { + // NamespaceVersion is the version of the namespace. Used in conjunction + // with NamespaceID to determine the namespace of this blob. + NamespaceVersion uint8 + + // NamespaceID defines the namespace ID of this blob. Used in conjunction + // with NamespaceVersion to determine the namespace of this blob. + NamespaceID []byte + + // Data is the actual data of the blob. + // (e.g. a block of a virtual sidechain). + Data []byte + + // ShareVersion is the version of the share format that this blob should use + // when encoded into shares. + ShareVersion uint8 +} + +// Namespace returns the namespace of this blob encoded as a byte slice. +func (b Blob) Namespace() []byte { + return append([]byte{b.NamespaceVersion}, b.NamespaceID...) +} + +// ----------------------------------------------------------------------------- + // EvidenceData contains any evidence of malicious wrong-doing by validators type EvidenceData struct { Evidence EvidenceList `json:"evidence"` diff --git a/types/block_test.go b/types/block_test.go index f9c97a7e840..3534d668d67 100644 --- a/types/block_test.go +++ b/types/block_test.go @@ -4,6 +4,7 @@ import ( // it is ok to use math/rand here: we do not need a cryptographically secure random // number generator here and we can run the tests a bit faster + stdbytes "bytes" "crypto/rand" "encoding/hex" "math" @@ -46,7 +47,7 @@ func TestBlockAddEvidence(t *testing.T) { require.NoError(t, err) evList := []Evidence{ev} - block := MakeBlock(h, txs, extCommit.ToCommit(), evList) + block := MakeBlock(h, Data{Txs: txs}, extCommit.ToCommit(), evList) require.NotNil(t, block) require.Equal(t, 1, len(block.Evidence.Evidence)) require.NotNil(t, block.EvidenceHash) @@ -81,13 +82,6 @@ func TestBlockValidateBasic(t *testing.T) { blk.LastCommit.hash = nil // clear hash or change wont be noticed }, true}, {"Remove LastCommitHash", func(blk *Block) { blk.LastCommitHash = []byte("something else") }, true}, - {"Tampered Data", func(blk *Block) { - blk.Data.Txs[0] = Tx("something else") - blk.Data.hash = nil // clear hash or change wont be noticed - }, true}, - {"Tampered DataHash", func(blk *Block) { - blk.DataHash = cmtrand.Bytes(len(blk.DataHash)) - }, true}, {"Tampered EvidenceHash", func(blk *Block) { blk.EvidenceHash = []byte("something else") }, true}, @@ -99,7 +93,7 @@ func TestBlockValidateBasic(t *testing.T) { tc := tc i := i t.Run(tc.testName, func(t *testing.T) { - block := MakeBlock(h, txs, commit, evList) + block := MakeBlock(h, Data{Txs: txs}, commit, evList) block.ProposerAddress = valSet.GetProposer().Address tc.malleateBlock(block) err = block.ValidateBasic() @@ -110,7 +104,7 @@ func TestBlockValidateBasic(t *testing.T) { func TestBlockHash(t *testing.T) { assert.Nil(t, (*Block)(nil).Hash()) - assert.Nil(t, MakeBlock(int64(3), []Tx{Tx("Hello World")}, nil, nil).Hash()) + assert.Nil(t, MakeBlock(int64(3), Data{Txs: []Tx{Tx("Hello World")}}, nil, nil).Hash()) } func TestBlockMakePartSet(t *testing.T) { @@ -118,7 +112,7 @@ func TestBlockMakePartSet(t *testing.T) { assert.Error(t, err) assert.Nil(t, bps) - partSet, err := MakeBlock(int64(3), []Tx{Tx("Hello World")}, nil, nil).MakePartSet(1024) + partSet, err := MakeBlock(int64(3), Data{Txs: []Tx{Tx("Hello World")}}, nil, nil).MakePartSet(1024) require.NoError(t, err) assert.NotNil(t, partSet) assert.EqualValues(t, 1, partSet.Total()) @@ -140,7 +134,7 @@ func TestBlockMakePartSetWithEvidence(t *testing.T) { require.NoError(t, err) evList := []Evidence{ev} - partSet, err := MakeBlock(h, []Tx{Tx("Hello World")}, extCommit.ToCommit(), evList).MakePartSet(512) + partSet, err := MakeBlock(h, Data{Txs: []Tx{Tx("Hello World")}}, extCommit.ToCommit(), evList).MakePartSet(512) require.NoError(t, err) assert.NotNil(t, partSet) @@ -160,7 +154,7 @@ func TestBlockHashesTo(t *testing.T) { require.NoError(t, err) evList := []Evidence{ev} - block := MakeBlock(h, []Tx{Tx("Hello World")}, extCommit.ToCommit(), evList) + block := MakeBlock(h, Data{Txs: []Tx{Tx("Hello World")}}, extCommit.ToCommit(), evList) block.ValidatorsHash = valSet.Hash() assert.False(t, block.HashesTo([]byte{})) assert.False(t, block.HashesTo([]byte("something else"))) @@ -168,7 +162,7 @@ func TestBlockHashesTo(t *testing.T) { } func TestBlockSize(t *testing.T) { - size := MakeBlock(int64(3), []Tx{Tx("Hello World")}, nil, nil).Size() + size := MakeBlock(int64(3), Data{Txs: []Tx{Tx("Hello World")}}, nil, nil).Size() if size <= 0 { t.Fatal("Size of the block is zero or negative") } @@ -179,7 +173,7 @@ func TestBlockString(t *testing.T) { assert.Equal(t, "nil-Block", (*Block)(nil).StringIndented("")) assert.Equal(t, "nil-Block", (*Block)(nil).StringShort()) - block := MakeBlock(int64(3), []Tx{Tx("Hello World")}, nil, nil) + block := MakeBlock(int64(3), Data{Txs: []Tx{Tx("Hello World")}}, nil, nil) assert.NotEqual(t, "nil-Block", block.String()) assert.NotEqual(t, "nil-Block", block.StringIndented("")) assert.NotEqual(t, "nil-Block", block.StringShort()) @@ -759,10 +753,10 @@ func TestBlockIDValidateBasic(t *testing.T) { func TestBlockProtoBuf(t *testing.T) { h := cmtrand.Int63() c1 := randCommit(time.Now()) - b1 := MakeBlock(h, []Tx{Tx([]byte{1})}, &Commit{Signatures: []CommitSig{}}, []Evidence{}) + b1 := MakeBlock(h, Data{Txs: []Tx{Tx([]byte{1})}}, &Commit{Signatures: []CommitSig{}}, []Evidence{}) b1.ProposerAddress = cmtrand.Bytes(crypto.AddressSize) - b2 := MakeBlock(h, []Tx{Tx([]byte{1})}, c1, []Evidence{}) + b2 := MakeBlock(h, Data{Txs: []Tx{Tx([]byte{1})}}, c1, []Evidence{}) b2.ProposerAddress = cmtrand.Bytes(crypto.AddressSize) evidenceTime := time.Date(2019, 1, 1, 0, 0, 0, 0, time.UTC) evi, err := NewMockDuplicateVoteEvidence(h, evidenceTime, "block-test-chain") @@ -770,7 +764,7 @@ func TestBlockProtoBuf(t *testing.T) { b2.Evidence = EvidenceData{Evidence: EvidenceList{evi}} b2.EvidenceHash = b2.Evidence.Hash() - b3 := MakeBlock(h, []Tx{}, c1, []Evidence{}) + b3 := MakeBlock(h, Data{Txs: []Tx{}}, c1, []Evidence{}) b3.ProposerAddress = cmtrand.Bytes(crypto.AddressSize) testCases := []struct { msg string @@ -986,3 +980,27 @@ func TestBlockIDEquals(t *testing.T) { assert.True(t, blockIDEmpty.Equals(blockIDEmpty)) assert.False(t, blockIDEmpty.Equals(blockIDDifferent)) } + +func TestBlob(t *testing.T) { + namespaceVersion := uint8(0) + namespaceID := stdbytes.Repeat([]byte{0x01}, 28) + data := []byte("data") + shareVersion := uint8(0) + + blob := Blob{ + NamespaceVersion: namespaceVersion, + NamespaceID: namespaceID, + Data: data, + ShareVersion: shareVersion, + } + + t.Run("blob.Namespace() returns encoded namespace", func(t *testing.T) { + got := blob.Namespace() + want := []byte{ + 0, // namespace version + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, // namespace ID + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, // namespace ID + } + assert.Equal(t, want, got) + }) +} diff --git a/types/event_bus.go b/types/event_bus.go index be5aee65c66..a942a7da1db 100644 --- a/types/event_bus.go +++ b/types/event_bus.go @@ -140,6 +140,10 @@ func (b *EventBus) PublishEventNewBlock(data EventDataNewBlock) error { return b.pubsub.PublishWithEvents(ctx, data, events) } +func (b *EventBus) PublishEventSignedBlock(data EventDataSignedBlock) error { + return b.Publish(EventSignedBlock, data) +} + func (b *EventBus) PublishEventNewBlockEvents(data EventDataNewBlockEvents) error { // no explicit deadline for publishing events ctx := context.Background() @@ -257,6 +261,10 @@ func (NopEventBus) PublishEventNewBlockEvents(EventDataNewBlockEvents) error { return nil } +func (NopEventBus) PublishEventSignedBlock(EventDataSignedBlock) error { + return nil +} + func (NopEventBus) PublishEventNewEvidence(EventDataNewEvidence) error { return nil } diff --git a/types/event_bus_test.go b/types/event_bus_test.go index de9e61ed285..13d6ab68cc2 100644 --- a/types/event_bus_test.go +++ b/types/event_bus_test.go @@ -74,7 +74,7 @@ func TestEventBusPublishEventNewBlock(t *testing.T) { } }) - block := MakeBlock(0, []Tx{}, nil, []Evidence{}) + block := MakeBlock(0, Data{}, nil, []Evidence{}) resultFinalizeBlock := abci.ResponseFinalizeBlock{ Events: []abci.Event{ {Type: "testType", Attributes: []abci.EventAttribute{{Key: "baz", Value: "1"}}}, @@ -234,7 +234,7 @@ func TestEventBusPublishEventNewBlockHeader(t *testing.T) { } }) - block := MakeBlock(0, []Tx{}, nil, []Evidence{}) + block := MakeBlock(0, Data{}, nil, []Evidence{}) // PublishEventNewBlockHeader adds the tm.event compositeKey, so the query below should work query := "tm.event='NewBlockHeader'" headersSub, err := eventBus.Subscribe(context.Background(), "test", cmtquery.MustCompile(query)) @@ -530,3 +530,56 @@ var queries = []cmtpubsub.Query{ func randQuery(r *rand.Rand) cmtpubsub.Query { return queries[r.Intn(len(queries))] } + +func TestEventBusPublishEventIndexWrapper(t *testing.T) { + eventBus := NewEventBus() + err := eventBus.Start() + require.NoError(t, err) + t.Cleanup(func() { + if err := eventBus.Stop(); err != nil { + t.Error(err) + } + }) + + tx := Tx("foo") + require.NoError(t, err) + + result := abci.ExecTxResult{ + Data: []byte("bar"), + Events: []abci.Event{ + {Type: "testType", Attributes: []abci.EventAttribute{{Key: "baz", Value: "1"}}}, + }, + } + + // PublishEventTx adds 3 composite keys, so the query below should work + query := fmt.Sprintf("tm.event='Tx' AND tx.height=1 AND tx.hash='%X' AND testType.baz=1", tx.Hash()) + queryQ, err := cmtquery.New(query) + require.NoError(t, err) + txsSub, err := eventBus.Subscribe(context.Background(), "test", queryQ) + require.NoError(t, err) + + done := make(chan struct{}) + go func() { + msg := <-txsSub.Out() + edt := msg.Data().(EventDataTx) + assert.Equal(t, int64(1), edt.Height) + assert.Equal(t, uint32(0), edt.Index) + assert.EqualValues(t, tx, edt.Tx) + assert.Equal(t, result, edt.Result) + close(done) + }() + + err = eventBus.PublishEventTx(EventDataTx{abci.TxResult{ + Height: 1, + Index: 0, + Tx: tx, + Result: result, + }}) + assert.NoError(t, err) + + select { + case <-done: + case <-time.After(1 * time.Second): + t.Fatal("did not receive a transaction after 1 sec.") + } +} diff --git a/types/events.go b/types/events.go index 3dbf9a78db6..bcaed011923 100644 --- a/types/events.go +++ b/types/events.go @@ -17,6 +17,7 @@ const ( // These are also used by the tx indexer for async indexing. // All of this data can be fetched through the rpc. EventNewBlock = "NewBlock" + EventSignedBlock = "NewSignedBlock" EventNewBlockHeader = "NewBlockHeader" EventNewBlockEvents = "NewBlockEvents" EventNewEvidence = "NewEvidence" @@ -48,6 +49,7 @@ type TMEventData interface { func init() { cmtjson.RegisterType(EventDataNewBlock{}, "tendermint/event/NewBlock") + cmtjson.RegisterType(EventDataSignedBlock{}, "tendermint/event/NewSignedBlock") cmtjson.RegisterType(EventDataNewBlockHeader{}, "tendermint/event/NewBlockHeader") cmtjson.RegisterType(EventDataNewBlockEvents{}, "tendermint/event/NewBlockEvents") cmtjson.RegisterType(EventDataNewEvidence{}, "tendermint/event/NewEvidence") @@ -69,6 +71,15 @@ type EventDataNewBlock struct { ResultFinalizeBlock abci.ResponseFinalizeBlock `json:"result_finalize_block"` } +// EventDataSignedBlock contains all the information needed to verify +// the data committed in a block. +type EventDataSignedBlock struct { + Header Header `json:"header"` + Commit Commit `json:"commit"` + ValidatorSet ValidatorSet `json:"validator_set"` + Data Data `json:"data"` +} + type EventDataNewBlockHeader struct { Header Header `json:"header"` } @@ -149,6 +160,7 @@ var ( EventQueryCompleteProposal = QueryForEvent(EventCompleteProposal) EventQueryLock = QueryForEvent(EventLock) EventQueryNewBlock = QueryForEvent(EventNewBlock) + EventQueryNewSignedBlock = QueryForEvent(EventSignedBlock) EventQueryNewBlockHeader = QueryForEvent(EventNewBlockHeader) EventQueryNewBlockEvents = QueryForEvent(EventNewBlockEvents) EventQueryNewEvidence = QueryForEvent(EventNewEvidence) @@ -176,6 +188,7 @@ func QueryForEvent(eventType string) cmtpubsub.Query { // BlockEventPublisher publishes all block related events type BlockEventPublisher interface { PublishEventNewBlock(block EventDataNewBlock) error + PublishEventSignedBlock(block EventDataSignedBlock) error PublishEventNewBlockHeader(header EventDataNewBlockHeader) error PublishEventNewBlockEvents(events EventDataNewBlockEvents) error PublishEventNewEvidence(evidence EventDataNewEvidence) error diff --git a/types/part_set.go b/types/part_set.go index 92e54cacb66..3dac0ef0fe8 100644 --- a/types/part_set.go +++ b/types/part_set.go @@ -299,6 +299,24 @@ func (ps *PartSet) AddPart(part *Part) (bool, error) { return false, nil } + // The proof should be compatible with the number of parts. + if part.Proof.Total != int64(ps.total) { + return false, ErrPartSetInvalidProof + } + + // Check hash proof + if part.Proof.Verify(ps.Hash(), part.Bytes) != nil { + return false, ErrPartSetInvalidProof + } + + return ps.AddPartWithoutProof(part) +} + +func (ps *PartSet) AddPartWithoutProof(part *Part) (bool, error) { + if part == nil { + return false, errors.New("nil part") + } + ps.mtx.Lock() defer ps.mtx.Unlock() @@ -312,16 +330,6 @@ func (ps *PartSet) AddPart(part *Part) (bool, error) { return false, nil } - // The proof should be compatible with the number of parts. - if part.Proof.Total != int64(ps.total) { - return false, ErrPartSetInvalidProof - } - - // Check hash proof - if part.Proof.Verify(ps.Hash(), part.Bytes) != nil { - return false, ErrPartSetInvalidProof - } - // Add part ps.parts[part.Index] = part ps.partsBitArray.SetIndex(int(part.Index), true) diff --git a/types/row_proof.go b/types/row_proof.go new file mode 100644 index 00000000000..1ecb6236960 --- /dev/null +++ b/types/row_proof.go @@ -0,0 +1,86 @@ +package types + +import ( + "errors" + "fmt" + + "github.com/cometbft/cometbft/crypto/merkle" + tmbytes "github.com/cometbft/cometbft/libs/bytes" + tmproto "github.com/cometbft/cometbft/proto/tendermint/types" +) + +// RowProof is a Merkle proof that a set of rows exist in a Merkle tree with a +// given data root. +type RowProof struct { + // RowRoots are the roots of the rows being proven. + RowRoots []tmbytes.HexBytes `json:"row_roots"` + // Proofs is a list of Merkle proofs where each proof proves that a row + // exists in a Merkle tree with a given data root. + Proofs []*merkle.Proof `json:"proofs"` + // StartRow the index of the start row. + // Note: currently, StartRow is not validated as part of the proof verification. + // If this field is used downstream, Validate(root) should be called along with + // extra validation depending on how it's used. + StartRow uint32 `json:"start_row"` + // EndRow the index of the end row. + // Note: currently, EndRow is not validated as part of the proof verification. + // If this field is used downstream, Validate(root) should be called along with + // extra validation depending on how it's used. + EndRow uint32 `json:"end_row"` +} + +// Validate performs checks on the fields of this RowProof. Returns an error if +// the proof fails validation. If the proof passes validation, this function +// attempts to verify the proof. It returns nil if the proof is valid. +func (rp RowProof) Validate(root []byte) error { + if rp.EndRow < rp.StartRow { + return fmt.Errorf("end row %d cannot be less than start row %d", rp.EndRow, rp.StartRow) + } + if int(rp.EndRow-rp.StartRow+1) != len(rp.RowRoots) { + return fmt.Errorf("the number of rows %d must equal the number of row roots %d", int(rp.EndRow-rp.StartRow+1), len(rp.RowRoots)) + } + if len(rp.Proofs) != len(rp.RowRoots) { + return fmt.Errorf("the number of proofs %d must equal the number of row roots %d", len(rp.Proofs), len(rp.RowRoots)) + } + if !rp.VerifyProof(root) { + return errors.New("row proof failed to verify") + } + + return nil +} + +// VerifyProof verifies that all the row roots in this RowProof exist in a +// Merkle tree with the given root. Returns true if all proofs are valid. +func (rp RowProof) VerifyProof(root []byte) bool { + for i, proof := range rp.Proofs { + err := proof.Verify(root, rp.RowRoots[i]) + if err != nil { + return false + } + } + return true +} + +func RowProofFromProto(p *tmproto.RowProof) RowProof { + if p == nil { + return RowProof{} + } + rowRoots := make([]tmbytes.HexBytes, len(p.RowRoots)) + rowProofs := make([]*merkle.Proof, len(p.Proofs)) + for i := range p.Proofs { + rowRoots[i] = p.RowRoots[i] + rowProofs[i] = &merkle.Proof{ + Total: p.Proofs[i].Total, + Index: p.Proofs[i].Index, + LeafHash: p.Proofs[i].LeafHash, + Aunts: p.Proofs[i].Aunts, + } + } + + return RowProof{ + RowRoots: rowRoots, + Proofs: rowProofs, + StartRow: p.StartRow, + EndRow: p.EndRow, + } +} diff --git a/types/row_proof_test.go b/types/row_proof_test.go new file mode 100644 index 00000000000..23bceb601ef --- /dev/null +++ b/types/row_proof_test.go @@ -0,0 +1,116 @@ +package types + +import ( + "bytes" + "testing" + + "github.com/stretchr/testify/assert" + + "github.com/cometbft/cometbft/crypto/merkle" + tmbytes "github.com/cometbft/cometbft/libs/bytes" +) + +func TestRowProofValidate(t *testing.T) { + type testCase struct { + name string + rp RowProof + root []byte + wantErr bool + } + testCases := []testCase{ + { + name: "empty row proof returns error", + rp: RowProof{}, + root: root, + wantErr: true, + }, + { + name: "row proof with mismatched number of rows and row roots returns error", + rp: mismatchedRowRoots(), + root: root, + wantErr: true, + }, + { + name: "row proof with mismatched number of proofs returns error", + rp: mismatchedProofs(), + root: root, + wantErr: true, + }, + { + name: "row proof with mismatched number of rows returns error", + rp: mismatchedRows(), + root: root, + wantErr: true, + }, + { + name: "valid row proof returns no error", + rp: validRowProof(), + root: root, + wantErr: false, + }, + { + name: "valid row proof with incorrect root returns error", + rp: validRowProof(), + root: incorrectRoot, + wantErr: true, + }, + { + name: "start row greater than end row", + rp: RowProof{StartRow: 10, EndRow: 5}, + root: root, + wantErr: true, + }, + } + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + got := tc.rp.Validate(tc.root) + if tc.wantErr { + assert.Error(t, got) + return + } + assert.NoError(t, got) + }) + } +} + +// root is the root hash of the Merkle tree used in validRowProof. +var root = []byte{0x82, 0x37, 0x91, 0xd2, 0x5d, 0x77, 0x7, 0x67, 0x35, 0x3, 0x90, 0x12, 0x10, 0xc4, 0x43, 0x8a, 0x8b, 0x78, 0x4b, 0xbf, 0x5b, 0x8f, 0xa6, 0x40, 0xa9, 0x51, 0xa7, 0xa9, 0xbd, 0x52, 0xd5, 0xf6} + +var incorrectRoot = bytes.Repeat([]byte{0}, 32) + +// validRowProof returns a row proof for one row. This test data copied from +// ceelestia-app's pkg/proof/proof_test.go TestNewShareInclusionProof: "1 +// transaction share". +func validRowProof() RowProof { + return RowProof{ + RowRoots: tmbytes.FromBytes([]byte{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x1, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x9d, 0xe6, 0x38, 0x91, 0xc1, 0x6, 0xaf, 0x81, 0x75, 0x5a, 0x36, 0xf5, 0xb2, 0x62, 0x1e, 0xfa, 0xb9, 0xb8, 0x73, 0x87, 0xef, 0xe3, 0x6b, 0x33, 0xd8, 0xbf, 0xc9, 0x87, 0x1b, 0x8d, 0xfa, 0x8a}), + Proofs: []*merkle.Proof{ + { + Total: 128, + Index: 0, + LeafHash: []uint8{0x0, 0xcc, 0xfb, 0xff, 0x62, 0x10, 0x71, 0x61, 0x2f, 0xb9, 0x5a, 0xb1, 0xc3, 0x83, 0xff, 0x1d, 0x30, 0x31, 0x86, 0x42, 0xe4, 0x8e, 0x59, 0xe8, 0x8b, 0x92, 0x83, 0x11, 0x67, 0xb, 0xfc, 0x9a}, + Aunts: [][]uint8{{0x5c, 0xc6, 0x3b, 0x1e, 0x91, 0xa4, 0xbf, 0x6a, 0xa7, 0xd2, 0x68, 0x1c, 0x44, 0xc1, 0xda, 0xa2, 0x22, 0xed, 0x33, 0xb8, 0xd0, 0x29, 0x48, 0xfc, 0xab, 0x8f, 0x71, 0x50, 0x9c, 0xbb, 0x15, 0xab}, {0xc6, 0x14, 0x2b, 0x33, 0x5d, 0xaa, 0xfa, 0x20, 0xdf, 0x8a, 0x9b, 0xe9, 0x29, 0x9b, 0x34, 0xcd, 0xeb, 0xe7, 0x35, 0x39, 0x5c, 0x58, 0xb1, 0x13, 0x1f, 0x4, 0xeb, 0xdc, 0x33, 0x99, 0xdf, 0x98}, {0xdb, 0x99, 0xe2, 0xdf, 0x86, 0x84, 0x24, 0x90, 0x44, 0x8e, 0x29, 0x26, 0xe1, 0xb2, 0xb0, 0x52, 0x42, 0xf9, 0x73, 0x7, 0x7f, 0xab, 0x1d, 0xa9, 0xad, 0x56, 0x10, 0xf0, 0x58, 0xdf, 0x8, 0xd7}, {0x48, 0xfd, 0xfc, 0x3b, 0x96, 0xa5, 0x19, 0xf5, 0x14, 0xf, 0x37, 0xfd, 0x95, 0xb3, 0x76, 0xfb, 0x7e, 0x5, 0x5b, 0x4d, 0x8b, 0x68, 0x16, 0x81, 0x51, 0x92, 0x44, 0x0, 0xe5, 0xf6, 0x49, 0x16}, {0xfb, 0x45, 0xdc, 0x2, 0x8b, 0xa9, 0x45, 0xfe, 0xa0, 0x7b, 0xeb, 0x62, 0x81, 0x84, 0x95, 0x19, 0x29, 0xf5, 0x78, 0x16, 0x15, 0xb8, 0xf2, 0xa3, 0x94, 0x96, 0xb1, 0x4c, 0x4c, 0xef, 0xf4, 0xd3}, {0x2c, 0x26, 0x82, 0xb1, 0x8c, 0x9f, 0xff, 0x50, 0xde, 0x67, 0x4e, 0x82, 0x3, 0x3, 0xd6, 0xdc, 0x7c, 0x7a, 0xea, 0x1a, 0xe3, 0x9, 0xf0, 0x1a, 0xc6, 0xcd, 0x19, 0x34, 0xc7, 0x54, 0x6, 0x14}, {0xe9, 0x41, 0x8b, 0x1, 0x9a, 0xd6, 0xd3, 0x13, 0x21, 0x14, 0x89, 0x98, 0xbb, 0x81, 0xda, 0xf7, 0xa, 0x36, 0x14, 0xcf, 0xc5, 0xac, 0xbf, 0xc3, 0x48, 0xb0, 0x88, 0x90, 0x45, 0x29, 0x80, 0x23}}, + }, + }, + StartRow: 0, + EndRow: 0, + } +} + +func mismatchedRowRoots() RowProof { + rp := validRowProof() + rp.RowRoots = []tmbytes.HexBytes{} + return rp +} + +func mismatchedProofs() RowProof { + rp := validRowProof() + rp.Proofs = []*merkle.Proof{} + return rp +} + +func mismatchedRows() RowProof { + rp := validRowProof() + rp.EndRow = 10 + return rp +} diff --git a/types/share_proof.go b/types/share_proof.go new file mode 100644 index 00000000000..5be7cb39945 --- /dev/null +++ b/types/share_proof.go @@ -0,0 +1,135 @@ +package types + +import ( + "errors" + "fmt" + "math" + + "github.com/celestiaorg/nmt" + + "github.com/cometbft/cometbft/libs/consts" + crypto "github.com/cometbft/cometbft/proto/tendermint/crypto" + tmproto "github.com/cometbft/cometbft/proto/tendermint/types" +) + +// ShareProof is an NMT proof that a set of shares exist in a set of rows and a +// Merkle proof that those rows exist in a Merkle tree with a given data root. +type ShareProof struct { + // Data are the raw shares that are being proven. + Data [][]byte `json:"data"` + // ShareProofs are NMT proofs that the shares in Data exist in a set of + // rows. There will be one ShareProof per row that the shares occupy. + ShareProofs []*tmproto.NMTProof `json:"share_proofs"` + // NamespaceID is the namespace id of the shares being proven. This + // namespace id is used when verifying the proof. If the namespace id doesn't + // match the namespace of the shares, the proof will fail verification. + NamespaceID []byte `json:"namespace_id"` + RowProof RowProof `json:"row_proof"` + NamespaceVersion uint32 `json:"namespace_version"` +} + +func (sp ShareProof) ToProto() tmproto.ShareProof { + // TODO consider extracting a ToProto function for RowProof + rowRoots := make([][]byte, len(sp.RowProof.RowRoots)) + rowProofs := make([]*crypto.Proof, len(sp.RowProof.Proofs)) + for i := range sp.RowProof.RowRoots { + rowRoots[i] = sp.RowProof.RowRoots[i].Bytes() + rowProofs[i] = sp.RowProof.Proofs[i].ToProto() + } + pbtp := tmproto.ShareProof{ + Data: sp.Data, + ShareProofs: sp.ShareProofs, + NamespaceId: sp.NamespaceID, + RowProof: &tmproto.RowProof{ + RowRoots: rowRoots, + Proofs: rowProofs, + StartRow: sp.RowProof.StartRow, + EndRow: sp.RowProof.EndRow, + }, + NamespaceVersion: sp.NamespaceVersion, + } + + return pbtp +} + +// ShareProofFromProto creates a ShareProof from a proto message. +// Expects the proof to be pre-validated. +func ShareProofFromProto(pb tmproto.ShareProof) (ShareProof, error) { + return ShareProof{ + RowProof: RowProofFromProto(pb.RowProof), + Data: pb.Data, + ShareProofs: pb.ShareProofs, + NamespaceID: pb.NamespaceId, + NamespaceVersion: pb.NamespaceVersion, + }, nil +} + +// Validate runs basic validations on the proof then verifies if it is consistent. +// It returns nil if the proof is valid. Otherwise, it returns a sensible error. +// The `root` is the block data root that the shares to be proven belong to. +// Note: these proofs are tested on the app side. +func (sp ShareProof) Validate(root []byte) error { + numberOfSharesInProofs := int32(0) + for _, proof := range sp.ShareProofs { + // the range is not inclusive from the left. + numberOfSharesInProofs += proof.End - proof.Start + } + + if len(sp.ShareProofs) != len(sp.RowProof.RowRoots) { + return fmt.Errorf("the number of share proofs %d must equal the number of row roots %d", len(sp.ShareProofs), len(sp.RowProof.RowRoots)) + + } + if len(sp.Data) != int(numberOfSharesInProofs) { + return fmt.Errorf("the number of shares %d must equal the number of shares in share proofs %d", len(sp.Data), numberOfSharesInProofs) + } + + for _, proof := range sp.ShareProofs { + if proof.Start < 0 { + return errors.New("proof index cannot be negative") + } + if (proof.End - proof.Start) <= 0 { + return errors.New("proof total must be positive") + } + } + + if err := sp.RowProof.Validate(root); err != nil { + return err + } + + if ok := sp.VerifyProof(); !ok { + return errors.New("share proof failed to verify") + } + + return nil +} + +func (sp ShareProof) VerifyProof() bool { + cursor := int32(0) + for i, proof := range sp.ShareProofs { + nmtProof := nmt.NewInclusionProof( + int(proof.Start), + int(proof.End), + proof.Nodes, + true, + ) + sharesUsed := proof.End - proof.Start + if sp.NamespaceVersion > math.MaxUint8 { + return false + } + // Consider extracting celestia-app's namespace package. We can't use it + // here because that would introduce a circulcar import. + //nolint:gosec + namespace := append([]byte{uint8(sp.NamespaceVersion)}, sp.NamespaceID...) + valid := nmtProof.VerifyInclusion( + consts.NewBaseHashFunc(), + namespace, + sp.Data[cursor:sharesUsed+cursor], + sp.RowProof.RowRoots[i], + ) + if !valid { + return false + } + cursor += sharesUsed + } + return true +} diff --git a/types/share_proof_test.go b/types/share_proof_test.go new file mode 100644 index 00000000000..b09e45905d2 --- /dev/null +++ b/types/share_proof_test.go @@ -0,0 +1,95 @@ +package types + +import ( + "testing" + + "github.com/stretchr/testify/assert" + + "github.com/cometbft/cometbft/libs/consts" + types "github.com/cometbft/cometbft/proto/tendermint/types" +) + +func TestShareProofValidate(t *testing.T) { + type testCase struct { + name string + sp ShareProof + root []byte + wantErr bool + } + + testCases := []testCase{ + { + name: "empty share proof returns error", + sp: ShareProof{}, + root: root, + wantErr: true, + }, + { + name: "valid share proof returns no error", + sp: validShareProof(), + root: root, + wantErr: false, + }, + { + name: "share proof with mismatched number of share proofs returns error", + sp: mismatchedShareProofs(), + root: root, + wantErr: true, + }, + { + name: "share proof with mismatched number of shares returns error", + sp: mismatchedShares(), + root: root, + wantErr: true, + }, + { + name: "valid share proof with incorrect root returns error", + sp: validShareProof(), + root: incorrectRoot, + wantErr: true, + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + got := tc.sp.Validate(tc.root) + if tc.wantErr { + assert.Error(t, got) + return + } + assert.NoError(t, got) + }) + } +} + +func mismatchedShareProofs() ShareProof { + sp := validShareProof() + sp.ShareProofs = []*types.NMTProof{} + return sp +} + +func mismatchedShares() ShareProof { + sp := validShareProof() + sp.Data = [][]byte{} + return sp +} + +// validShareProof returns a valid ShareProof for a single share. This test data +// was copied from celestia-app's pkg/proof/proof_test.go +// TestNewShareInclusionProof: "1 transaction share" +func validShareProof() ShareProof { + return ShareProof{ + Data: [][]uint8{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x1, 0x1, 0x0, 0x0, 0x62, 0xc, 0x0, 0x0, 0x0, 0x2a, 0xf4, 0x3, 0xff, 0xe8, 0x78, 0x6c, 0x48, 0x84, 0x9, 0x5, 0x5, 0x79, 0x8f, 0x29, 0x67, 0xa2, 0xe1, 0x8d, 0x2f, 0xdc, 0xf2, 0x60, 0xe4, 0x62, 0x71, 0xf9, 0xae, 0x92, 0x83, 0x3a, 0x7f, 0xf3, 0xc6, 0x14, 0xb4, 0x17, 0xfc, 0x64, 0x4b, 0x89, 0x18, 0x5e, 0x22, 0x4b, 0x0, 0x82, 0xeb, 0x67, 0x5b, 0x51, 0x43, 0x4e, 0xc3, 0x42, 0x48, 0xc1, 0xfd, 0x88, 0x71, 0xcb, 0xee, 0xf3, 0x92, 0x20, 0x9c, 0x15, 0xc0, 0x4f, 0x11, 0xa4, 0x5, 0xd0, 0xdf, 0xb8, 0x25, 0x60, 0x58, 0xae, 0x2, 0x2d, 0x78, 0xf8, 0x1f, 0x67, 0xeb, 0x88, 0x58, 0x5d, 0x5a, 0x4a, 0x74, 0xe7, 0xdf, 0x38, 0x6a, 0xa4, 0x3f, 0x62, 0xd6, 0x3d, 0x17, 0xd2, 0x7e, 0x92, 0x9c, 0x4a, 0xd0, 0x2b, 0x55, 0x49, 0x3b, 0xa7, 0x5a, 0x29, 0xd5, 0x6b, 0x91, 0xde, 0xfe, 0x5b, 0x39, 0x88, 0xc5, 0xbb, 0x91, 0x16, 0xf6, 0x47, 0xec, 0x8, 0x3, 0x2a, 0x1e, 0x6e, 0x4b, 0x27, 0x34, 0x90, 0x38, 0x46, 0x6e, 0xce, 0x35, 0xdf, 0xd6, 0x1e, 0x1a, 0xf2, 0xf0, 0x6e, 0xa0, 0xfe, 0x84, 0x51, 0xf2, 0xc1, 0x32, 0xd, 0x89, 0x17, 0x5f, 0x4c, 0xab, 0x81, 0xd4, 0x44, 0x5a, 0x55, 0xdb, 0xe5, 0xa7, 0x3c, 0x42, 0xb6, 0xb3, 0x20, 0xc4, 0x81, 0x75, 0x8, 0x5e, 0x39, 0x21, 0x51, 0x4c, 0x93, 0x2c, 0x7c, 0xb3, 0xd0, 0x37, 0xf9, 0x6a, 0xab, 0x93, 0xf0, 0x3f, 0xa2, 0x44, 0x1f, 0x63, 0xae, 0x96, 0x4e, 0x26, 0x7a, 0x1f, 0x18, 0x5b, 0x28, 0x4d, 0x24, 0xe8, 0x98, 0x56, 0xbf, 0x98, 0x44, 0x23, 0x17, 0x85, 0x22, 0x38, 0x56, 0xeb, 0xf3, 0x4e, 0x87, 0x1e, 0xc1, 0x51, 0x6, 0x71, 0xa7, 0xa9, 0x45, 0xef, 0xc7, 0x89, 0x5c, 0xed, 0x68, 0xbd, 0x43, 0x2f, 0xe6, 0xf1, 0x56, 0xef, 0xf, 0x4f, 0x57, 0xaa, 0x8c, 0x5c, 0xbd, 0x21, 0xb4, 0xaa, 0x15, 0x71, 0x6a, 0xdc, 0x12, 0xda, 0xee, 0xd9, 0x19, 0xbc, 0x17, 0xa2, 0x49, 0xd6, 0xbe, 0xd2, 0xc6, 0x6a, 0xbc, 0x53, 0xe4, 0x28, 0xd4, 0xeb, 0xe9, 0x9b, 0xd6, 0x85, 0x89, 0xb9, 0xe8, 0xa2, 0x70, 0x40, 0xad, 0xb1, 0x1a, 0xa0, 0xb1, 0xb5, 0xee, 0xde, 0x6d, 0xa9, 0x2a, 0x4b, 0x6, 0xd1, 0xfa, 0x67, 0x13, 0xac, 0x7d, 0x9a, 0x81, 0xc6, 0xef, 0x78, 0x42, 0x18, 0xf, 0x7b, 0xaf, 0x50, 0xa7, 0xdb, 0xb6, 0xde, 0xab, 0x3, 0xdc, 0x5, 0x14, 0x5f, 0x9, 0xdb, 0x81, 0xe3, 0x72, 0x2, 0x61, 0x23, 0x77, 0x12, 0x82, 0xfc, 0x9, 0x43, 0xfb, 0xd6, 0x38, 0x53, 0xfd, 0x77, 0xe, 0x17, 0xcc, 0x93, 0x5e, 0x4e, 0x60, 0x87, 0xda, 0xbd, 0xfc, 0x86, 0xdd, 0xb1, 0xd6, 0x74, 0x41, 0x71, 0x24, 0xda, 0x1, 0x3f, 0x11, 0x17, 0x9e, 0x54, 0x66, 0xb6, 0xc4, 0x9a, 0xb8, 0x59, 0xb9, 0x13, 0x4e, 0xed, 0x8, 0xe5, 0x99, 0x27, 0xa0, 0x6b, 0x1, 0x6c, 0x8a, 0xbf, 0x20, 0x3d, 0x75, 0xd5, 0x7e, 0xea, 0xe0, 0xef, 0x7f, 0xfe, 0xa8, 0xaf, 0x76, 0xad, 0x30, 0x55, 0x65, 0x9d, 0xbe, 0x30, 0x32, 0x9f, 0x3b, 0xb7, 0xa1, 0x5c, 0x98, 0xef, 0xe1, 0xe4, 0x33, 0x1a, 0x56, 0x5a, 0x22, 0xd1, 0x38, 0x9b, 0xee, 0xfa, 0x11, 0x6f, 0xa7, 0xd7, 0x6, 0x17, 0xdc, 0xc6, 0x4d, 0xbd, 0x3f, 0x3c, 0xe6, 0xac, 0x54, 0x70, 0xda, 0x11, 0xdb, 0x87, 0xe2, 0xc2, 0x26, 0x7e, 0x48, 0x3b, 0xda, 0xf4, 0x98, 0x3c, 0x51}}, + ShareProofs: []*types.NMTProof{ + { + Start: 0, + End: 1, + Nodes: [][]uint8{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x1, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x1, 0x27, 0x3a, 0x5f, 0x16, 0x36, 0xa3, 0xce, 0x1c, 0x17, 0x58, 0x7e, 0xb8, 0xaa, 0xc8, 0x5e, 0x58, 0x9e, 0xa9, 0x36, 0x3c, 0x3d, 0x5c, 0xb5, 0xc2, 0xf0, 0x26, 0x1a, 0x9a, 0x13, 0xcd, 0x59, 0xb2}, {0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x1, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x1, 0x55, 0xe5, 0x43, 0x2e, 0xa2, 0x32, 0x84, 0x75, 0x8a, 0x88, 0x8d, 0x7c, 0x27, 0xdc, 0x2e, 0x13, 0x1e, 0x44, 0xc4, 0xe7, 0x51, 0x64, 0xe5, 0xe4, 0xf4, 0x7d, 0x4, 0xb8, 0x10, 0x3b, 0x72, 0xa5}, {0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x1, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x1, 0x4d, 0xeb, 0x2a, 0x3c, 0x56, 0x98, 0x49, 0xdb, 0x61, 0x54, 0x12, 0xee, 0xb, 0xeb, 0x29, 0xf8, 0xc9, 0x71, 0x9c, 0xf7, 0x28, 0xbb, 0x7a, 0x85, 0x70, 0xa1, 0x81, 0xc8, 0x5f, 0x6a, 0x63, 0x59}, {0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x1, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x1, 0xf0, 0xb5, 0x59, 0x71, 0xba, 0x6a, 0xf, 0xd1, 0xf, 0x2e, 0x79, 0xd4, 0xdc, 0xfb, 0x93, 0x94, 0x58, 0x3d, 0xd9, 0xef, 0xe2, 0x2b, 0xd4, 0xe3, 0x71, 0xbd, 0xd4, 0xd9, 0xc2, 0xc4, 0xef, 0xd1}, {0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x1, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x1, 0x5, 0x8f, 0xf0, 0x4e, 0x81, 0x8e, 0xc7, 0x2f, 0x35, 0xec, 0x9, 0xdf, 0xf1, 0x41, 0xd5, 0x5a, 0x2f, 0xa3, 0xa0, 0xe5, 0x8d, 0x83, 0x70, 0xf2, 0x11, 0xea, 0xc2, 0xa3, 0x4a, 0x7a, 0xc5, 0x17}, {0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x96, 0x6d, 0x3f, 0x7b, 0xf5, 0xef, 0x38, 0x4b, 0xa5, 0x38, 0x98, 0x7e, 0x3b, 0x4e, 0x12, 0x21, 0xcb, 0xd7, 0xff, 0xd6, 0xf3, 0x7d, 0xf, 0x8a, 0x57, 0xfe, 0x5, 0x5, 0xb6, 0x62, 0xa6, 0xae}}, + LeafHash: []uint8(nil), + }, + }, + NamespaceID: consts.TxNamespaceID, + RowProof: validRowProof(), + NamespaceVersion: uint32(0), + } +} diff --git a/types/test_util.go b/types/test_util.go index 8f94e6d91be..e6848844ffc 100644 --- a/types/test_util.go +++ b/types/test_util.go @@ -106,18 +106,35 @@ func MakeVoteNoError( // MakeBlock returns a new block with an empty header, except what can be // computed from itself. // It populates the same set of fields validated by ValidateBasic. -func MakeBlock(height int64, txs []Tx, lastCommit *Commit, evidence []Evidence) *Block { +func MakeBlock(height int64, data Data, lastCommit *Commit, evidence []Evidence) *Block { block := &Block{ Header: Header{ Version: cmtversion.Consensus{Block: version.BlockProtocol, App: 0}, Height: height, }, - Data: Data{ - Txs: txs, - }, + Data: data, Evidence: EvidenceData{Evidence: evidence}, LastCommit: lastCommit, } block.fillHeader() return block } + +// MakeTxs is a helper function to generate mock transactions by given the block height +// and the transaction numbers. +func MakeTxs(height int64, num int) (txs []Tx) { + for i := 0; i < num; i++ { + txs = append(txs, Tx([]byte{byte(height), byte(i)})) + } + return txs +} + +func MakeTenTxs(height int64) (txs []Tx) { + return MakeTxs(height, 10) +} + +func MakeData(txs []Tx) Data { + return Data{ + Txs: txs, + } +} diff --git a/types/tx.go b/types/tx.go index 5cbb2cc40df..de84b96cefa 100644 --- a/types/tx.go +++ b/types/tx.go @@ -6,9 +6,12 @@ import ( "errors" "fmt" + "github.com/cosmos/gogoproto/proto" + "github.com/cometbft/cometbft/crypto/merkle" "github.com/cometbft/cometbft/crypto/tmhash" cmtbytes "github.com/cometbft/cometbft/libs/bytes" + "github.com/cometbft/cometbft/libs/consts" cmtproto "github.com/cometbft/cometbft/proto/tendermint/types" ) @@ -27,10 +30,22 @@ type ( // Hash computes the TMHASH hash of the wire encoded transaction. func (tx Tx) Hash() []byte { + if indexWrapper, isIndexWrapper := UnmarshalIndexWrapper(tx); isIndexWrapper { + return tmhash.Sum(indexWrapper.Tx) + } + if blobTx, isBlobTx := UnmarshalBlobTx(tx); isBlobTx { + return tmhash.Sum(blobTx.Tx) + } return tmhash.Sum(tx) } func (tx Tx) Key() TxKey { + if blobTx, isBlobTx := UnmarshalBlobTx(tx); isBlobTx { + return sha256.Sum256(blobTx.Tx) + } + if indexWrapper, isIndexWrapper := UnmarshalIndexWrapper(tx); isIndexWrapper { + return sha256.Sum256(indexWrapper.Tx) + } return sha256.Sum256(tx) } @@ -39,6 +54,15 @@ func (tx Tx) String() string { return fmt.Sprintf("Tx{%X}", []byte(tx)) } +func TxKeyFromBytes(bytes []byte) (TxKey, error) { + if len(bytes) != TxKeySize { + return TxKey{}, fmt.Errorf("incorrect tx key size. Expected %d bytes, got %d", TxKeySize, len(bytes)) + } + var key TxKey + copy(key[:], bytes) + return key, nil +} + // Txs is a slice of Tx. type Txs []Tx @@ -190,3 +214,73 @@ func ComputeProtoSizeForTxs(txs []Tx) int64 { pdData := data.ToProto() return int64(pdData.Size()) } + +// UnmarshalIndexWrapper attempts to unmarshal the provided transaction into an +// IndexWrapper transaction. It returns true if the provided transaction is an +// IndexWrapper transaction. An IndexWrapper transaction is a transaction that contains +// a MsgPayForBlob that has been wrapped with a share index. +// +// NOTE: protobuf sometimes does not throw an error if the transaction passed is +// not a tmproto.IndexWrapper, since the protobuf definition for MsgPayForBlob is +// kept in the app, we cannot perform further checks without creating an import +// cycle. +func UnmarshalIndexWrapper(tx Tx) (indexWrapper cmtproto.IndexWrapper, isIndexWrapper bool) { + // attempt to unmarshal into an IndexWrapper transaction + err := proto.Unmarshal(tx, &indexWrapper) + if err != nil { + return indexWrapper, false + } + if indexWrapper.TypeId != consts.ProtoIndexWrapperTypeID { + return indexWrapper, false + } + return indexWrapper, true +} + +// MarshalIndexWrapper creates a wrapped Tx that includes the original transaction +// and the share index of the start of its blob. +// +// NOTE: must be unwrapped to be a viable sdk.Tx. +func MarshalIndexWrapper(tx Tx, shareIndexes ...uint32) (Tx, error) { + wTx := cmtproto.IndexWrapper{ + Tx: tx, + ShareIndexes: shareIndexes, + TypeId: consts.ProtoIndexWrapperTypeID, + } + return proto.Marshal(&wTx) +} + +// UnmarshalBlobTx attempts to unmarshal a transaction into blob transaction. If an +// error is thrown, false is returned. +func UnmarshalBlobTx(tx Tx) (bTx cmtproto.BlobTx, isBlob bool) { + err := bTx.Unmarshal(tx) + if err != nil { + return cmtproto.BlobTx{}, false + } + // perform some quick basic checks to prevent false positives + if bTx.TypeId != consts.ProtoBlobTxTypeID { + return bTx, false + } + if len(bTx.Blobs) == 0 { + return bTx, false + } + for _, b := range bTx.Blobs { + if len(b.NamespaceId) != consts.NamespaceIDSize { + return bTx, false + } + } + return bTx, true +} + +// MarshalBlobTx creates a BlobTx using a normal transaction and some number of +// blobs. +// +// NOTE: Any checks on the blobs or the transaction must be performed in the +// application. +func MarshalBlobTx(tx []byte, blobs ...*cmtproto.Blob) (Tx, error) { + bTx := cmtproto.BlobTx{ + Tx: tx, + Blobs: blobs, + TypeId: consts.ProtoBlobTxTypeID, + } + return bTx.Marshal() +} diff --git a/types/tx_test.go b/types/tx_test.go index f5de93ae276..2f02c6d5c27 100644 --- a/types/tx_test.go +++ b/types/tx_test.go @@ -8,6 +8,7 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" + "github.com/cometbft/cometbft/libs/consts" cmtrand "github.com/cometbft/cometbft/libs/rand" ctest "github.com/cometbft/cometbft/libs/test" cmtproto "github.com/cometbft/cometbft/proto/tendermint/types" @@ -149,3 +150,66 @@ func assertBadProof(t *testing.T, root []byte, bad []byte, good TxProof) { func randInt(low, high int) int { return rand.Intn(high-low) + low } + +func TestUnmarshalIndexWrapper(t *testing.T) { + // perform a simple test for being unable to decode a non + // IndexWrapper transaction + tx := Tx{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 0} + _, ok := UnmarshalIndexWrapper(tx) + require.False(t, ok) + + data := Data{Txs: []Tx{tx}} + + // create a proto message that used to be decoded when it shouldn't have + randomBlock := MakeBlock( + 1, + data, + &Commit{}, + []Evidence{}, + ) + protoB, err := randomBlock.ToProto() + require.NoError(t, err) + + rawBlock, err := protoB.Marshal() + require.NoError(t, err) + + // due to protobuf not actually requiring type compatibility + // we need to make sure that there is some check + _, ok = UnmarshalIndexWrapper(rawBlock) + require.False(t, ok) + + IndexWrapper, err := MarshalIndexWrapper(rawBlock, 0) + require.NoError(t, err) + + // finally, ensure that the unwrapped bytes are identical to the input + indexWrapper, ok := UnmarshalIndexWrapper(IndexWrapper) + require.True(t, ok) + require.Equal(t, rawBlock, indexWrapper.Tx) +} + +func TestUnmarshalBlobTx(t *testing.T) { + tx := []byte{1, 2, 3, 4, 5, 6, 7, 8, 9, 1, 2, 3, 4, 5, 6, 7, 8, 9} + namespaceOne := bytes.Repeat([]byte{1}, consts.NamespaceIDSize) + blob := cmtproto.Blob{ + NamespaceId: namespaceOne, + Data: []byte{1, 2, 3, 4, 5, 6, 7, 8, 9, 1, 2, 3, 4, 5, 6, 7, 8, 9}, + ShareVersion: 0, + NamespaceVersion: 0, + } + bTx, err := MarshalBlobTx(tx, &blob) + require.NoError(t, err) + + resTx, isBlob := UnmarshalBlobTx(bTx) + require.True(t, isBlob) + + assert.Equal(t, tx, resTx.Tx) + require.Len(t, resTx.Blobs, 1) + assert.Equal(t, blob, *resTx.Blobs[0]) +} + +// todo: add fuzzing +func TestUnmarshalBlobTxFalsePositive(t *testing.T) { + tx := []byte("sender-193-0=D16B687628035716B1DA53BE1491A1B3D4CEA3AB=1025") + _, isBlob := UnmarshalBlobTx(tx) + require.False(t, isBlob) +}