diff --git a/cmd/cometbft/commands/run_node.go b/cmd/cometbft/commands/run_node.go index f11c3131f38..b1b024a91f7 100644 --- a/cmd/cometbft/commands/run_node.go +++ b/cmd/cometbft/commands/run_node.go @@ -9,6 +9,7 @@ import ( "github.com/cometbft/cometbft/crypto/ed25519" kt "github.com/cometbft/cometbft/internal/keytypes" cmtos "github.com/cometbft/cometbft/internal/os" + "github.com/cometbft/cometbft/internal/trace" nm "github.com/cometbft/cometbft/node" ) @@ -90,6 +91,31 @@ func AddNodeFlags(cmd *cobra.Command) { config.DBPath, "database directory") cmd.Flags().StringVarP(&keyType, "key-type", "k", ed25519.KeyType, fmt.Sprintf("private key type (one of %s)", kt.SupportedKeyTypesStr())) + + // trace flags + cmd.PersistentFlags().String( + trace.FlagTracePushConfig, + config.Instrumentation.TracePushConfig, + trace.FlagTracePushConfigDescription, + ) + + cmd.PersistentFlags().String( + trace.FlagTracePullAddress, + config.Instrumentation.TracePullAddress, + trace.FlagTracePullAddressDescription, + ) + + cmd.PersistentFlags().String( + trace.FlagPyroscopeURL, + config.Instrumentation.PyroscopeURL, + trace.FlagPyroscopeURLDescription, + ) + + cmd.PersistentFlags().Bool( + trace.FlagPyroscopeTrace, + config.Instrumentation.PyroscopeTrace, + trace.FlagPyroscopeTraceDescription, + ) } // NewRunNodeCmd returns the command that allows the CLI to start a node. diff --git a/config/config.go b/config/config.go index 7c1eda0831c..5e35a554d5f 100644 --- a/config/config.go +++ b/config/config.go @@ -51,9 +51,18 @@ const ( MempoolTypeFlood = "flood" MempoolTypeNop = "nop" + MempoolTypeCAT = "cat" ) -// NOTE: Most of the structs & relevant comments + the +var ( + // DefaultTracingTables is a list of tables that are used for storing traces. + // This global var is filled by an init function in the schema package. This + // allows for the schema package to contain all the relevant logic while + // avoiding import cycles. + DefaultTracingTables = "" +) + +// NOTE: Most of the structs & relevant comments the // default configuration options were used to manually // generate the config.toml. Please reflect any changes // made here in the defaultConfigTemplate constant in @@ -934,6 +943,7 @@ type MempoolConfig struct { // (default) // - "nop" : nop-mempool (short for no operation; the ABCI app is // responsible for storing, disseminating and proposing txs). + // - "cat" : content addressable transaction pool // "create_empty_blocks=false" is not supported. Type string `mapstructure:"type"` // RootDir is the root directory for all data. This should be configured via @@ -1007,6 +1017,12 @@ type MempoolConfig struct { // redundancy level. The higher the value, the longer it will take the node // to reduce bandwidth and converge to a stable redundancy level. DOGAdjustInterval time.Duration `mapstructure:"dog_adjust_interval"` + + // MaxGossipDelay is the maximum allotted time that the reactor expects a transaction to + // arrive before issuing a new request to a different peer + // Only applicable to the v2 / CAT mempool + // Default is 200ms + MaxGossipDelay time.Duration `mapstructure:"max-gossip-delay"` } // DefaultMempoolConfig returns a default configuration for the CometBFT mempool. @@ -1241,8 +1257,13 @@ func (cfg *BlockSyncConfig) ValidateBasic() error { // including timeouts and details about the WAL and the block structure. type ConsensusConfig struct { RootDir string `mapstructure:"home"` - WalPath string `mapstructure:"wal_file"` - walFile string // overrides WalPath if set + // If set to true, only internal messages will be written + // to the WAL. External messages like votes, proposals + // block parts, will not be written + // Default: true + OnlyInternalWal bool `mapstructure:"only_internal_wal"` + WalPath string `mapstructure:"wal_file"` + walFile string // overrides WalPath if set // How long we wait for a proposal block before prevoting nil TimeoutPropose time.Duration `mapstructure:"timeout_propose"` @@ -1270,6 +1291,7 @@ type ConsensusConfig struct { // DefaultConsensusConfig returns a default configuration for the consensus service. func DefaultConsensusConfig() *ConsensusConfig { return &ConsensusConfig{ + OnlyInternalWal: true, WalPath: filepath.Join(DefaultDataDir, "cs.wal", "wal"), TimeoutPropose: 3000 * time.Millisecond, TimeoutProposeDelta: 500 * time.Millisecond, @@ -1514,6 +1536,38 @@ type InstrumentationConfig struct { // Instrumentation namespace. Namespace string `mapstructure:"namespace"` + + // TracePushConfig is the relative path of the push config. This second + // config contains credentials for where and how often to. + TracePushConfig string `mapstructure:"trace_push_config"` + + // TracePullAddress is the address that the trace server will listen on for + // pulling data. + TracePullAddress string `mapstructure:"trace_pull_address"` + + // TraceType is the type of tracer used. Options are "local" and "noop". + TraceType string `mapstructure:"trace_type"` + + // TraceBufferSize is the number of traces to write in a single batch. + TraceBufferSize int `mapstructure:"trace_push_batch_size"` + + // TracingTables is the list of tables that will be traced. See the + // pkg/trace/schema for a complete list of tables. It is represented as a + // comma separate string. For example: "consensus_round_state,mempool_tx". + TracingTables string `mapstructure:"tracing_tables"` + + // PyroscopeURL is the pyroscope url used to establish a connection with a + // pyroscope continuous profiling server. + PyroscopeURL string `mapstructure:"pyroscope_url"` + + // PyroscopeProfile is a flag that enables tracing with pyroscope. + PyroscopeTrace bool `mapstructure:"pyroscope_trace"` + + // PyroscopeProfileTypes is a list of profile types to be traced with + // pyroscope. Available profile types are: cpu, alloc_objects, alloc_space, + // inuse_objects, inuse_space, goroutines, mutex_count, mutex_duration, + // block_count, block_duration. + PyroscopeProfileTypes []string `mapstructure:"pyroscope_profile_types"` } // DefaultInstrumentationConfig returns a default configuration for metrics @@ -1524,6 +1578,23 @@ func DefaultInstrumentationConfig() *InstrumentationConfig { PrometheusListenAddr: ":26660", MaxOpenConnections: 3, Namespace: "cometbft", + TracePushConfig: "", + TracePullAddress: "", + TraceType: "noop", + TraceBufferSize: 1000, + TracingTables: DefaultTracingTables, + PyroscopeURL: "", + PyroscopeTrace: false, + PyroscopeProfileTypes: []string{ + "cpu", + "alloc_objects", + "inuse_objects", + "goroutines", + "mutex_count", + "mutex_duration", + "block_count", + "block_duration", + }, } } @@ -1539,6 +1610,23 @@ func (cfg *InstrumentationConfig) ValidateBasic() error { if cfg.MaxOpenConnections < 0 { return cmterrors.ErrNegativeField{Field: "max_open_connections"} } + if cfg.PyroscopeTrace && cfg.PyroscopeURL == "" { + return errors.New("pyroscope_trace can't be enabled if profiling is disabled") + } + // if there is not TracePushConfig configured, then we do not need to validate the rest + // of the config because we are not connecting. + if cfg.TracePushConfig == "" { + return nil + } + if cfg.TracePullAddress == "" { + return fmt.Errorf("token is required") + } + if cfg.TraceType == "" { + return fmt.Errorf("org is required") + } + if cfg.TraceBufferSize <= 0 { + return fmt.Errorf("batch size must be greater than 0") + } return nil } diff --git a/config/config.toml.tpl b/config/config.toml.tpl index a0e483ddb2f..657e0b5965e 100644 --- a/config/config.toml.tpl +++ b/config/config.toml.tpl @@ -592,3 +592,39 @@ max_open_connections = {{ .Instrumentation.MaxOpenConnections }} # Instrumentation namespace namespace = "{{ .Instrumentation.Namespace }}" + +# TracePushConfig is the relative path of the push config. +# This second config contains credentials for where and how often to +# push trace data to. For example, if the config is next to this config, +# it would be "push_config.json". +trace_push_config = "{{ .Instrumentation.TracePushConfig }}" + +# The tracer pull address specifies which address will be used for pull based +# event collection. If empty, the pull based server will not be started. +trace_pull_address = "{{ .Instrumentation.TracePullAddress }}" + +# The tracer to use for collecting trace data. +trace_type = "{{ .Instrumentation.TraceType }}" + +# The size of the batches that are sent to the database. +trace_push_batch_size = {{ .Instrumentation.TraceBufferSize }} + +# The list of tables that are updated when tracing. All available tables and +# their schema can be found in the pkg/trace/schema package. It is represented as a +# comma separate string. For example: "consensus_round_state,mempool_tx". +tracing_tables = "{{ .Instrumentation.TracingTables }}" + +# The URL of the pyroscope instance to use for continuous profiling. +# If empty, continuous profiling is disabled. +pyroscope_url = "{{ .Instrumentation.PyroscopeURL }}" + +# When true, tracing data is added to the continuous profiling +# performed by pyroscope. +pyroscope_trace = {{ .Instrumentation.PyroscopeTrace }} + +# pyroscope_profile_types is a list of profile types to be traced with +# pyroscope. Available profile types are: cpu, alloc_objects, alloc_space, +# inuse_objects, inuse_space, goroutines, mutex_count, mutex_duration, +# block_count, block_duration. +pyroscope_profile_types = [{{ range .Instrumentation.PyroscopeProfileTypes }}{{ printf "%q, " . }}{{end}}] + diff --git a/crypto/merkle/proof_op.go b/crypto/merkle/proof_op.go index 579fae27397..6bac142fb53 100644 --- a/crypto/merkle/proof_op.go +++ b/crypto/merkle/proof_op.go @@ -77,6 +77,36 @@ func (poz ProofOperators) Verify(root []byte, keypath string, args [][]byte) err return nil } +// VerifyFromKeys performs the same verification logic as the normal Verify +// method, except it does not perform any processing on the keypath. This is +// useful when using keys that have split or escape points as a part of the key. +func (poz ProofOperators) VerifyFromKeys(root []byte, keys [][]byte, args [][]byte) (err error) { + for i, op := range poz { + key := op.GetKey() + if len(key) != 0 { + if len(keys) == 0 { + return fmt.Errorf("key path has insufficient # of parts: expected no more keys but got %v", string(key)) + } + lastKey := keys[len(keys)-1] + if !bytes.Equal(lastKey, key) { + return fmt.Errorf("key mismatch on operation #%d: expected %v but got %v", i, string(lastKey), string(key)) + } + keys = keys[:len(keys)-1] + } + args, err = op.Run(args) + if err != nil { + return + } + } + if !bytes.Equal(root, args[0]) { + return fmt.Errorf("calculated root hash is invalid: expected %X but got %X", root, args[0]) + } + if len(keys) != 0 { + return fmt.Errorf("keypath not consumed all: %s", string(bytes.Join(keys, []byte("/")))) + } + return nil +} + // ---------------------------------------- // ProofRuntime - main entrypoint @@ -131,6 +161,10 @@ func (prt *ProofRuntime) VerifyValue(proof *cmtcrypto.ProofOps, root []byte, key return prt.Verify(proof, root, keypath, [][]byte{value}) } +func (prt *ProofRuntime) VerifyValueFromKeys(proof *cmtcrypto.ProofOps, root []byte, keys [][]byte, value []byte) (err error) { + return prt.VerifyFromKeys(proof, root, keys, [][]byte{value}) +} + // TODO In the long run we'll need a method of classification of ops, // whether existence or absence or perhaps a third? func (prt *ProofRuntime) VerifyAbsence(proof *cmtcrypto.ProofOps, root []byte, keypath string) (err error) { @@ -148,6 +182,17 @@ func (prt *ProofRuntime) Verify(proof *cmtcrypto.ProofOps, root []byte, keypath return poz.Verify(root, keypath, args) } +// VerifyFromKeys performs the same verification logic as the normal Verify +// method, except it does not perform any processing on the keypath. This is +// useful when using keys that have split or escape points as a part of the key. +func (prt *ProofRuntime) VerifyFromKeys(proof *cmtcrypto.ProofOps, root []byte, keys [][]byte, args [][]byte) (err error) { + poz, err := prt.DecodeProof(proof) + if err != nil { + return fmt.Errorf("decoding proof: %w", err) + } + return poz.VerifyFromKeys(root, keys, args) +} + // DefaultProofRuntime only knows about value proofs. // To use e.g. IAVL proofs, register op-decoders as // defined in the IAVL package. diff --git a/crypto/merkle/proof_op_test.go b/crypto/merkle/proof_op_test.go index 4a4df34801d..96971f02dba 100644 --- a/crypto/merkle/proof_op_test.go +++ b/crypto/merkle/proof_op_test.go @@ -78,3 +78,71 @@ func TestProofOperators_Verify(t *testing.T) { require.NoError(t, err, "Verify Failed") } + +func TestProofOperatorsFromKeys(t *testing.T) { + var err error + + // ProofRuntime setup + // TODO test this somehow. + + // ProofOperators setup + op1 := merkle.NewDominoOp("KEY1", "INPUT1", "INPUT2") + op2 := merkle.NewDominoOp("KEY%2", "INPUT2", "INPUT3") + op3 := merkle.NewDominoOp("", "INPUT3", "INPUT4") + op4 := merkle.NewDominoOp("KEY/4", "INPUT4", "OUTPUT4") + + // add characters to the keys that would otherwise result in bad keypath if + // processed + keys1 := [][]byte{bz("KEY/4"), bz("KEY%2"), bz("KEY1")} + badkeys1 := [][]byte{bz("WrongKey"), bz("KEY%2"), bz("KEY1")} + keys2 := [][]byte{bz("KEY3"), bz("KEY%2"), bz("KEY1")} + keys3 := [][]byte{bz("KEY2"), bz("KEY1")} + + // Good + popz := merkle.ProofOperators([]merkle.ProofOperator{op1, op2, op3, op4}) + err = popz.VerifyFromKeys(bz("OUTPUT4"), keys1, [][]byte{bz("INPUT1")}) + require.NoError(t, err) + + // BAD INPUT + err = popz.VerifyFromKeys(bz("OUTPUT4"), keys1, [][]byte{bz("INPUT1_WRONG")}) + require.Error(t, err) + + // BAD KEY 1 + err = popz.VerifyFromKeys(bz("OUTPUT4"), keys2, [][]byte{bz("INPUT1")}) + require.Error(t, err) + + // BAD KEY 2 + err = popz.VerifyFromKeys(bz("OUTPUT4"), badkeys1, [][]byte{bz("INPUT1")}) + require.Error(t, err) + + // BAD KEY 5 + err = popz.VerifyFromKeys(bz("OUTPUT4"), keys3, [][]byte{bz("INPUT1")}) + require.Error(t, err) + + // BAD OUTPUT 1 + err = popz.VerifyFromKeys(bz("OUTPUT4_WRONG"), keys1, [][]byte{bz("INPUT1")}) + require.Error(t, err) + + // BAD OUTPUT 2 + err = popz.VerifyFromKeys(bz(""), keys1, [][]byte{bz("INPUT1")}) + require.Error(t, err) + + // BAD POPZ 1 + popz = merkle.ProofOperators([]merkle.ProofOperator{op1, op2, op4}) + err = popz.VerifyFromKeys(bz("OUTPUT4"), keys1, [][]byte{bz("INPUT1")}) + require.Error(t, err) + + // BAD POPZ 2 + popz = merkle.ProofOperators([]merkle.ProofOperator{op4, op3, op2, op1}) + err = popz.VerifyFromKeys(bz("OUTPUT4"), keys1, [][]byte{bz("INPUT1")}) + require.Error(t, err) + + // BAD POPZ 3 + popz = merkle.ProofOperators([]merkle.ProofOperator{}) + err = popz.VerifyFromKeys(bz("OUTPUT4"), keys1, [][]byte{bz("INPUT1")}) + require.Error(t, err) +} + +func bz(s string) []byte { + return []byte(s) +} diff --git a/internal/blocksync/msgs_test.go b/internal/blocksync/msgs_test.go index 6e85ed1c25b..435cf616855 100644 --- a/internal/blocksync/msgs_test.go +++ b/internal/blocksync/msgs_test.go @@ -11,6 +11,7 @@ import ( bcproto "github.com/cometbft/cometbft/api/cometbft/blocksync/v2" "github.com/cometbft/cometbft/internal/blocksync" + "github.com/cometbft/cometbft/internal/test" "github.com/cometbft/cometbft/types" ) @@ -78,7 +79,7 @@ func TestBcStatusResponseMessageValidateBasic(t *testing.T) { //nolint:lll // ignore line length in tests func TestBlocksyncMessageVectors(t *testing.T) { - block := types.MakeBlock(int64(3), []types.Tx{types.Tx("Hello World")}, nil, nil) + block := types.MakeBlock(int64(3), test.MakeData([]types.Tx{types.Tx("Hello World")}), nil, nil) block.Version.Block = 11 // overwrite updated protocol version bpb, err := block.ToProto() diff --git a/internal/blocksync/reactor.go b/internal/blocksync/reactor.go index e4afc944a4c..ce95b1d5124 100644 --- a/internal/blocksync/reactor.go +++ b/internal/blocksync/reactor.go @@ -548,7 +548,16 @@ func (bcR *Reactor) processBlock(first, second *types.Block, firstParts *types.P chainID, firstID, first.Height, second.LastCommit) if err == nil { + var stateMachineValid bool // validate the block before we persist it + stateMachineValid, err = bcR.blockExec.ProcessProposal(first, state) + if !stateMachineValid { + err = fmt.Errorf("application has rejected syncing block (%X) at height %d", first.Hash(), first.Height) + } + if err != nil { //TODO: keep this? + return state, err + } + //TODO: keep this? err = bcR.blockExec.ValidateBlock(state, first) } @@ -600,6 +609,8 @@ func (bcR *Reactor) processBlock(first, second *types.Block, firstParts *types.P // TODO: same thing for app - but we would need a way to // get the hash without persisting the state state, err = bcR.blockExec.ApplyVerifiedBlock(state, firstID, first, bcR.pool.MaxPeerHeight()) + // TODO: same thing for app - but we would need a way to get the hash without persisting the state + // state, err = bcR.blockExec.ApplyBlock(state, firstID, first, second.LastCommit)//TODO: do we need second.LastCommit? if err != nil { // TODO This is bad, are we zombie? panic(fmt.Sprintf("Failed to process committed block (%d:%X): %v", first.Height, first.Hash(), err)) diff --git a/internal/blocksync/reactor_test.go b/internal/blocksync/reactor_test.go index 241e972358d..27276c123b1 100644 --- a/internal/blocksync/reactor_test.go +++ b/internal/blocksync/reactor_test.go @@ -166,7 +166,7 @@ func newReactor( ExtendedSignatures: []types.ExtendedCommitSig{vote.ExtendedCommitSig()}, } - state, err = blockExec.ApplyBlock(state, blockID, thisBlock, maxBlockHeight) + state, err = blockExec.ApplyBlock(state, blockID, thisBlock, maxBlockHeight) //TODO: may need last commit if err != nil { panic(fmt.Errorf("error apply block: %w", err)) } diff --git a/internal/consts/consts.go b/internal/consts/consts.go new file mode 100644 index 00000000000..1c309b6fdf2 --- /dev/null +++ b/internal/consts/consts.go @@ -0,0 +1,47 @@ +package consts + +import ( + "crypto/sha256" +) + +const ( + // TxInclusionProofQueryPath is the path used to query the application for a + // tx inclusion proof via the ABCI Query method. The desired transaction + // index must be formatted into the path. + TxInclusionProofQueryPath = "custom/txInclusionProof/%d" + + // ShareInclusionProofQueryPath is the path used to query the application for the + // shares to data root inclusion proofs via the ABCI query method. + ShareInclusionProofQueryPath = "custom/shareInclusionProof/%d/%d" + + // ProtoBlobTxTypeID is included in each encoded BlobTx to help prevent + // decoding binaries that are not actually BlobTxs. + ProtoBlobTxTypeID = "BLOB" + + // ProtoIndexWrapperTypeID is included in each encoded IndexWrapper to help prevent + // decoding binaries that are not actually IndexWrappers. + ProtoIndexWrapperTypeID = "INDX" + + // NamespaveVersionSize is the size of a namespace version in bytes. + NamespaceVersionSize = 1 + + // NamespaceIDSize is the size of a namespace ID in bytes. + NamespaceIDSize = 28 + + // NamespaceSize is the size of a namespace in bytes. + NamespaceSize = NamespaceIDSize + NamespaceVersionSize +) + +var ( + // TxNamespaceID is the namespace ID reserved for transaction data. It does + // not contain a leading version byte. + TxNamespaceID = []byte{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1} + + // NewBaseHashFunc change accordingly if another hash.Hash should be used as a base hasher in the NMT: + NewBaseHashFunc = sha256.New + + // DataCommitmentBlocksLimit is the limit to the number of blocks we can generate a data commitment for. + // Deprecated: this is no longer used as we're moving towards Blobstream X. However, we're leaving it + // here for backwards compatibility purpose until it's removed in the next breaking release. + DataCommitmentBlocksLimit = 1000 +) diff --git a/internal/evidence/pool_test.go b/internal/evidence/pool_test.go index 3b7d8fece28..0a8cadbee69 100644 --- a/internal/evidence/pool_test.go +++ b/internal/evidence/pool_test.go @@ -211,7 +211,7 @@ func TestEvidencePoolUpdate(t *testing.T) { val, evidenceChainID) require.NoError(t, err) lastExtCommit := makeExtCommit(height, val.PrivKey.PubKey().Address()) - block := types.MakeBlock(height+1, []types.Tx{}, lastExtCommit.ToCommit(), []types.Evidence{ev}) + block := types.MakeBlock(height+1, test.MakeData([]types.Tx{}), lastExtCommit.ToCommit(), []types.Evidence{ev}) // update state (partially) state.LastBlockHeight = height + 1 state.LastBlockTime = defaultEvidenceTime.Add(22 * time.Minute) diff --git a/internal/test/tx.go b/internal/test/tx.go index f5175a00cbd..f94fb47b882 100644 --- a/internal/test/tx.go +++ b/internal/test/tx.go @@ -9,3 +9,13 @@ func MakeNTxs(height, n int64) types.Txs { } return txs } + +func MakeTenTxs(height int64) (txs []types.Tx) { + return MakeNTxs(height, 10) +} + +func MakeData(txs []types.Tx) types.Data { + return types.Data{ + Txs: txs, + } +} diff --git a/internal/trace/buffered_file.go b/internal/trace/buffered_file.go new file mode 100644 index 00000000000..9b228e3f9e6 --- /dev/null +++ b/internal/trace/buffered_file.go @@ -0,0 +1,101 @@ +package trace + +import ( + "bufio" + "errors" + "io" + "os" + "sync" + "sync/atomic" +) + +// bufferedFile is a file that is being written to and read from. It is thread +// safe, however, when reading from the file, writes will be ignored. +type bufferedFile struct { + // reading protects the file from being written to while it is being read + // from. This is needed beyond in addition to the mutex so that writes can + // be ignored while reading. + reading atomic.Bool + + // mut protects the buffered writer. + mut *sync.Mutex + + // file is the file that is being written to. + file *os.File + + // writer is the buffered writer that is writing to the file. + wr *bufio.Writer +} + +// newbufferedFile creates a new buffered file that writes to the given file. +func newbufferedFile(file *os.File) *bufferedFile { + return &bufferedFile{ + file: file, + wr: bufio.NewWriter(file), + reading: atomic.Bool{}, + mut: &sync.Mutex{}, + } +} + +// Write writes the given bytes to the file. If the file is currently being read +// from, the write will be lost. +func (f *bufferedFile) Write(b []byte) (int, error) { + if f.reading.Load() { + return 0, nil + } + f.mut.Lock() + defer f.mut.Unlock() + return f.wr.Write(b) +} + +func (f *bufferedFile) startReading() error { + f.reading.Store(true) + f.mut.Lock() + defer f.mut.Unlock() + + err := f.wr.Flush() + if err != nil { + f.reading.Store(false) + return err + } + + _, err = f.file.Seek(0, io.SeekStart) + if err != nil { + f.reading.Store(false) + return err + } + + return nil +} + +func (f *bufferedFile) stopReading() error { + f.mut.Lock() + defer f.mut.Unlock() + _, err := f.file.Seek(0, io.SeekEnd) + f.reading.Store(false) + return err +} + +// File returns the underlying file with the seek point reset. The caller should +// not close the file. The caller must call the returned function when they are +// done reading from the file. This function resets the seek point to where it +// was being written to. +func (f *bufferedFile) File() (*os.File, func() error, error) { + if f.reading.Load() { + return nil, func() error { return nil }, errors.New("file is currently being read from") + } + err := f.startReading() + if err != nil { + return nil, func() error { return nil }, err + } + return f.file, f.stopReading, nil +} + +// Close closes the file. +func (f *bufferedFile) Close() error { + // set reading to true to prevent writes while closing the file. + f.mut.Lock() + defer f.mut.Unlock() + f.reading.Store(true) + return f.file.Close() +} diff --git a/internal/trace/decoder.go b/internal/trace/decoder.go new file mode 100644 index 00000000000..abf24f40061 --- /dev/null +++ b/internal/trace/decoder.go @@ -0,0 +1,34 @@ +package trace + +import ( + "bufio" + "encoding/json" + "io" + "os" +) + +// DecodeFile reads a file and decodes it into a slice of events via +// scanning. The table parameter is used to determine the type of the events. +// The file should be a jsonl file. The generic here are passed to the event +// type. +func DecodeFile[T any](f *os.File) ([]Event[T], error) { + var out []Event[T] + r := bufio.NewReader(f) + for { + line, err := r.ReadString('\n') + if err == io.EOF { + break + } else if err != nil { + return nil, err + } + + var e Event[T] + if err := json.Unmarshal([]byte(line), &e); err != nil { + return nil, err + } + + out = append(out, e) + } + + return out, nil +} diff --git a/internal/trace/doc.go b/internal/trace/doc.go new file mode 100644 index 00000000000..27cf777c20b --- /dev/null +++ b/internal/trace/doc.go @@ -0,0 +1,2 @@ +/**/ +package trace diff --git a/internal/trace/fileserver.go b/internal/trace/fileserver.go new file mode 100644 index 00000000000..ce9a1daae82 --- /dev/null +++ b/internal/trace/fileserver.go @@ -0,0 +1,349 @@ +package trace + +import ( + "bufio" + "encoding/json" + "errors" + "fmt" + "io" + "math/rand" + "mime" + "mime/multipart" + "net/http" + "net/url" + "os" + "path" + "path/filepath" + "strings" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/credentials" + "github.com/aws/aws-sdk-go/aws/session" + "github.com/aws/aws-sdk-go/service/s3" +) + +const jsonL = ".jsonl" + +func (lt *LocalTracer) getTableHandler() http.HandlerFunc { + return func(w http.ResponseWriter, r *http.Request) { + // Parse the request to get the data + if err := r.ParseForm(); err != nil { + http.Error(w, "Failed to parse form", http.StatusBadRequest) + return + } + + inputString := r.FormValue("table") + if inputString == "" { + http.Error(w, "No data provided", http.StatusBadRequest) + return + } + + f, done, err := lt.readTable(inputString) + if err != nil { + http.Error(w, fmt.Sprintf("failed to read table: %v", err), http.StatusInternalServerError) + return + } + defer done() //nolint:errcheck + + // Use the pump function to continuously read from the file and write to + // the response writer + reader, writer := pump(inputString, bufio.NewReader(f)) + defer reader.Close() + + // Set the content type to the writer's form data content type + w.Header().Set("Content-Type", writer.FormDataContentType()) + + // Copy the data from the reader to the response writer + if _, err := io.Copy(w, reader); err != nil { + http.Error(w, "Failed to send data", http.StatusInternalServerError) + return + } + } +} + +// pump continuously reads from a bufio.Reader and writes to a multipart.Writer. +// It returns the reader end of the pipe and the writer for consumption by the +// server. +func pump(table string, br *bufio.Reader) (*io.PipeReader, *multipart.Writer) { + r, w := io.Pipe() + m := multipart.NewWriter(w) + + go func( + table string, + m *multipart.Writer, + w *io.PipeWriter, + br *bufio.Reader, + ) { + defer w.Close() + defer m.Close() + + part, err := m.CreateFormFile("filename", table+jsonL) + if err != nil { + return + } + + if _, err = io.Copy(part, br); err != nil { + return + } + + }(table, m, w, br) + + return r, m +} + +func (lt *LocalTracer) servePullData() { + mux := http.NewServeMux() + mux.HandleFunc("/get_table", lt.getTableHandler()) + err := http.ListenAndServe(lt.cfg.Instrumentation.TracePullAddress, mux) //nolint:gosec + if err != nil { + lt.logger.Error("trace pull server failure", "err", err) + } + lt.logger.Info("trace pull server started", "address", lt.cfg.Instrumentation.TracePullAddress) +} + +// GetTable downloads a table from the server and saves it to the given directory. It uses a multipart +// response to download the file. +func GetTable(serverURL, table, dirPath string) error { + data := url.Values{} + data.Set("table", table) + + serverURL = serverURL + "/get_table" + + resp, err := http.PostForm(serverURL, data) //nolint:gosec + if err != nil { + return err + } + defer resp.Body.Close() + + if resp.StatusCode != http.StatusOK { + return fmt.Errorf("unexpected status code: %d", resp.StatusCode) + } + + _, params, err := mime.ParseMediaType(resp.Header.Get("Content-Type")) + if err != nil { + return err + } + + boundary, ok := params["boundary"] + if !ok { + panic("Not a multipart response") + } + + err = os.MkdirAll(dirPath, 0755) + if err != nil { + return err + } + + outputFile, err := os.Create(path.Join(dirPath, table+jsonL)) + if err != nil { + return err + } + defer outputFile.Close() + + reader := multipart.NewReader(resp.Body, boundary) + + for { + part, err := reader.NextPart() + if err == io.EOF { + break // End of multipart + } + if err != nil { + return err + } + + contentDisposition, params, err := mime.ParseMediaType(part.Header.Get("Content-Disposition")) + if err != nil { + return err + } + + if contentDisposition == "form-data" && params["filename"] != "" { + _, err = io.Copy(outputFile, part) + if err != nil { + return err + } + } + + part.Close() + } + + return nil +} + +// S3Config is a struct that holds the configuration for an S3 bucket. +type S3Config struct { + BucketName string `json:"bucket_name"` + Region string `json:"region"` + AccessKey string `json:"access_key"` + SecretKey string `json:"secret_key"` + // PushDelay is the time in seconds to wait before pushing the file to S3. + // If this is 0, it defaults is used. + PushDelay int64 `json:"push_delay"` +} + +// readS3Config reads an S3Config from a file in the given directory. +func readS3Config(dir string) (S3Config, error) { + cfg := S3Config{} + f, err := os.Open(filepath.Join(dir, "s3.json")) + if errors.Is(err, os.ErrNotExist) { + return cfg, nil + } + if err != nil { + return cfg, err + } + defer f.Close() + err = json.NewDecoder(f).Decode(&cfg) + if cfg.PushDelay == 0 { + cfg.PushDelay = 60 + } + return cfg, err +} + +// PushS3 pushes a file to an S3 bucket using the given S3Config. It uses the +// chainID and the nodeID to organize the files in the bucket. The directory +// structure is chainID/nodeID/table.jsonl . +func PushS3(chainID, nodeID string, s3cfg S3Config, f *os.File) error { + sess, err := session.NewSession(&aws.Config{ + Region: aws.String(s3cfg.Region), + Credentials: credentials.NewStaticCredentials( + s3cfg.AccessKey, + s3cfg.SecretKey, + "", + ), + HTTPClient: &http.Client{ + Timeout: time.Duration(15) * time.Second, + }, + }, + ) + if err != nil { + return err + } + + s3Svc := s3.New(sess) + + key := fmt.Sprintf("%s/%s/%s", chainID, nodeID, filepath.Base(f.Name())) + + _, err = s3Svc.PutObject(&s3.PutObjectInput{ + Bucket: aws.String(s3cfg.BucketName), + Key: aws.String(key), + Body: f, + }) + + return err +} + +func (lt *LocalTracer) pushLoop() { + for { + time.Sleep(time.Second * time.Duration(lt.s3Config.PushDelay)) + err := lt.PushAll() + if err != nil { + lt.logger.Error("failed to push tables", "error", err) + } + } +} + +func (lt *LocalTracer) PushAll() error { + for table := range lt.fileMap { + f, done, err := lt.readTable(table) + if err != nil { + return err + } + for i := 0; i < 3; i++ { + err = PushS3(lt.chainID, lt.nodeID, lt.s3Config, f) + if err == nil { + break + } + lt.logger.Error("failed to push table", "table", table, "error", err) + time.Sleep(time.Second * time.Duration(rand.Intn(3))) //nolint:gosec + } + err = done() + if err != nil { + return err + } + } + return nil +} + +// S3Download downloads files that match some prefix from an S3 bucket to a +// local directory dst. +// fileNames is a list of traced jsonl file names to download. If it is empty, all traces are downloaded. +// fileNames should not have .jsonl suffix. +func S3Download(dst, prefix string, cfg S3Config, fileNames ...string) error { + // Ensure local directory structure exists + err := os.MkdirAll(dst, os.ModePerm) + if err != nil { + return err + } + + sess, err := session.NewSession(&aws.Config{ + Region: aws.String(cfg.Region), + Credentials: credentials.NewStaticCredentials( + cfg.AccessKey, + cfg.SecretKey, + "", + ), + }, + ) + if err != nil { + return err + } + + s3Svc := s3.New(sess) + input := &s3.ListObjectsV2Input{ + Bucket: aws.String(cfg.BucketName), + Prefix: aws.String(prefix), + Delimiter: aws.String(""), + } + + err = s3Svc.ListObjectsV2Pages(input, func(page *s3.ListObjectsV2Output, lastPage bool) bool { + for _, content := range page.Contents { + key := *content.Key + + // If no fileNames are specified, download all files + if len(fileNames) == 0 { + fileNames = append(fileNames, strings.TrimPrefix(key, prefix)) + } + + for _, filename := range fileNames { + // Add .jsonl suffix to the fileNames + fullFilename := filename + jsonL + if strings.HasSuffix(key, fullFilename) { + localFilePath := filepath.Join(dst, prefix, strings.TrimPrefix(key, prefix)) + fmt.Printf("Downloading %s to %s\n", key, localFilePath) + + // Create the directories in the path + if err := os.MkdirAll(filepath.Dir(localFilePath), os.ModePerm); err != nil { + return false + } + + // Create a file to write the S3 Object contents to. + f, err := os.Create(localFilePath) + if err != nil { + return false + } + + resp, err := s3Svc.GetObject(&s3.GetObjectInput{ + Bucket: aws.String(cfg.BucketName), + Key: aws.String(key), + }) + if err != nil { + f.Close() + continue + } + defer resp.Body.Close() + + // Copy the contents of the S3 object to the local file + if _, err := io.Copy(f, resp.Body); err != nil { + f.Close() + return false + } + + fmt.Printf("Successfully downloaded %s to %s\n", key, localFilePath) + f.Close() + } + } + } + return !lastPage // continue paging + }) + return err +} diff --git a/internal/trace/flags.go b/internal/trace/flags.go new file mode 100644 index 00000000000..6f17eebd27e --- /dev/null +++ b/internal/trace/flags.go @@ -0,0 +1,13 @@ +package trace + +const ( + FlagTracePushConfig = "trace-push-url" + FlagTracePullAddress = "trace-pull-address" + FlagTracePushConfigDescription = "URL of the trace push server" + FlagTracePullAddressDescription = "address to listen on for pulling trace data" + + FlagPyroscopeURL = "pyroscope-url" + FlagPyroscopeURLDescription = "URL of the Pyroscope instance to use for continuous profiling. If not specified, profiling will not be enabled" + FlagPyroscopeTrace = "pyroscope-trace" + FlagPyroscopeTraceDescription = "enable adding trace data to pyroscope profiling" +) diff --git a/internal/trace/local_tracer.go b/internal/trace/local_tracer.go new file mode 100644 index 00000000000..ae064ea5e17 --- /dev/null +++ b/internal/trace/local_tracer.go @@ -0,0 +1,241 @@ +package trace + +import ( + "encoding/json" + "fmt" + "os" + "path" + "strconv" + "strings" + "time" + + "github.com/cometbft/cometbft/config" + "github.com/cometbft/cometbft/libs/log" +) + +const ( + PushBucketName = "TRACE_PUSH_BUCKET_NAME" + PushRegion = "TRACE_PUSH_REGION" + PushAccessKey = "TRACE_PUSH_ACCESS_KEY" + PushKey = "TRACE_PUSH_SECRET_KEY" + PushDelay = "TRACE_PUSH_DELAY" +) + +// Event wraps some trace data with metadata that dictates the table and things +// like the chainID and nodeID. +type Event[T any] struct { + ChainID string `json:"chain_id"` + NodeID string `json:"node_id"` + Table string `json:"table"` + Timestamp time.Time `json:"timestamp"` + Msg T `json:"msg"` +} + +// NewEvent creates a new Event with the given chainID, nodeID, table, and msg. +// It adds the current time as the timestamp. +func NewEvent[T any](chainID, nodeID, table string, msg T) Event[T] { + return Event[T]{ + ChainID: chainID, + NodeID: nodeID, + Table: table, + Msg: msg, + Timestamp: time.Now(), + } +} + +// LocalTracer saves all of the events passed to the retuen channel to files +// based on their "type" (a string field in the event). Each type gets its own +// file. The internals are purposefully not *explicitly* thread safe to avoid the +// overhead of locking with each event save. Only pass events to the returned +// channel. Call CloseAll to close all open files. +type LocalTracer struct { + chainID, nodeID string + logger log.Logger + cfg *config.Config + s3Config S3Config + + // fileMap maps tables to their open files files are threadsafe, but the map + // is not. Therefore don't create new files after initialization to remain + // threadsafe. + fileMap map[string]*bufferedFile + // canal is a channel for all events that are being written. It acts as an + // extra buffer to avoid blocking the caller when writing to files. + canal chan Event[Entry] +} + +// NewLocalTracer creates a struct that will save all of the events passed to +// the retuen channel to files based on their "table" (a string field in the +// event). Each type gets its own file. The internal are purposefully not thread +// safe to avoid the overhead of locking with each event save. Only pass events +// to the returned channel. Call CloseAll to close all open files. Goroutine to +// save events is started in this function. +func NewLocalTracer(cfg *config.Config, logger log.Logger, chainID, nodeID string) (*LocalTracer, error) { + fm := make(map[string]*bufferedFile) + p := path.Join(cfg.RootDir, "data", "traces") + for _, table := range splitAndTrimEmpty(cfg.Instrumentation.TracingTables, ",", " ") { + fileName := fmt.Sprintf("%s/%s.jsonl", p, table) + err := os.MkdirAll(p, 0700) + if err != nil { + return nil, fmt.Errorf("failed to create directory %s: %w", p, err) + } + file, err := os.OpenFile(fileName, os.O_APPEND|os.O_CREATE|os.O_RDWR, 0644) + if err != nil { + return nil, fmt.Errorf("failed to open or create file %s: %w", fileName, err) + } + fm[table] = newbufferedFile(file) + } + + lt := &LocalTracer{ + fileMap: fm, + cfg: cfg, + canal: make(chan Event[Entry], cfg.Instrumentation.TraceBufferSize), + chainID: chainID, + nodeID: nodeID, + logger: logger, + } + + go lt.drainCanal() + if cfg.Instrumentation.TracePullAddress != "" { + logger.Info("starting pull server", "address", cfg.Instrumentation.TracePullAddress) + go lt.servePullData() + } + + if cfg.Instrumentation.TracePushConfig != "" { + s3Config, err := readS3Config(path.Join(cfg.RootDir, "config", cfg.Instrumentation.TracePushConfig)) + if err != nil { + return nil, fmt.Errorf("failed to read s3 config: %w", err) + } + lt.s3Config = s3Config + go lt.pushLoop() + } else if s3Config, err := GetPushConfigFromEnv(); err == nil { + lt.s3Config = s3Config + go lt.pushLoop() + } + + return lt, nil +} + +// GetPushConfigFromEnv reads the required environment variables to push trace +func GetPushConfigFromEnv() (S3Config, error) { + bucketName := os.Getenv(PushBucketName) + region := os.Getenv(PushRegion) + accessKey := os.Getenv(PushAccessKey) + secretKey := os.Getenv(PushKey) + pushDelay, err := strconv.ParseInt(os.Getenv(PushDelay), 10, 64) + if err != nil { + return S3Config{}, err + } + if bucketName == "" || region == "" || accessKey == "" || secretKey == "" { + return S3Config{}, fmt.Errorf("missing required environment variables") + } + var s3Config = S3Config{ + BucketName: bucketName, + Region: region, + AccessKey: accessKey, + SecretKey: secretKey, + PushDelay: pushDelay, + } + return s3Config, nil +} + +func (lt *LocalTracer) Write(e Entry) { + if !lt.IsCollecting(e.Table()) { + return + } + lt.canal <- NewEvent(lt.chainID, lt.nodeID, e.Table(), e) +} + +// ReadTable returns a file for the given table. If the table is not being +// collected, an error is returned. The caller should not close the file. +func (lt *LocalTracer) readTable(table string) (*os.File, func() error, error) { + bf, has := lt.getFile(table) + if !has { + return nil, func() error { return nil }, fmt.Errorf("table %s not found", table) + } + + return bf.File() +} + +func (lt *LocalTracer) IsCollecting(table string) bool { + _, has := lt.getFile(table) + return has +} + +// getFile gets a file for the given type. This method is purposely +// not thread-safe to avoid the overhead of locking with each event save. +func (lt *LocalTracer) getFile(table string) (*bufferedFile, bool) { + f, has := lt.fileMap[table] + return f, has +} + +// saveEventToFile marshals an Event into JSON and appends it to a file named after the event's Type. +func (lt *LocalTracer) saveEventToFile(event Event[Entry]) error { + file, has := lt.getFile(event.Table) + if !has { + return fmt.Errorf("table %s not found", event.Table) + } + + eventJSON, err := json.Marshal(event) + if err != nil { + return fmt.Errorf("failed to marshal event: %v", err) + } + + if _, err := file.Write(append(eventJSON, '\n')); err != nil { + return fmt.Errorf("failed to write event to file: %v", err) + } + + return nil +} + +// draincanal takes a variadic number of channels of Event pointers and drains them into files. +func (lt *LocalTracer) drainCanal() { + // purposefully do not lock, and rely on the channel to provide sync + // actions, to avoid overhead of locking with each event save. + for ev := range lt.canal { + if err := lt.saveEventToFile(ev); err != nil { + lt.logger.Error("failed to save event to file", "error", err) + } + } +} + +// Stop optionally uploads and closes all open files. +func (lt *LocalTracer) Stop() { + if lt.s3Config.SecretKey != "" { + lt.logger.Info("pushing all tables before stopping") + err := lt.PushAll() + if err != nil { + lt.logger.Error("failed to push tables", "error", err) + } + } + + for _, file := range lt.fileMap { + err := file.Close() + if err != nil { + lt.logger.Error("failed to close file", "error", err) + } + } +} + +// splitAndTrimEmpty slices s into all subslices separated by sep and returns a +// slice of the string s with all leading and trailing Unicode code points +// contained in cutset removed. If sep is empty, SplitAndTrim splits after each +// UTF-8 sequence. First part is equivalent to strings.SplitN with a count of +// -1. also filter out empty strings, only return non-empty strings. +// +// NOTE: this is copy pasted from the config package to avoid a circular +// dependency. See the function of the same name for tests. +func splitAndTrimEmpty(s, sep, cutset string) []string { + if s == "" { + return []string{} + } + + spl := strings.Split(s, sep) + nonEmptyStrings := make([]string, 0, len(spl)) + for i := 0; i < len(spl); i++ { + element := strings.Trim(spl[i], cutset) + if element != "" { + nonEmptyStrings = append(nonEmptyStrings, element) + } + } + return nonEmptyStrings +} diff --git a/internal/trace/local_tracer_test.go b/internal/trace/local_tracer_test.go new file mode 100644 index 00000000000..782ef594309 --- /dev/null +++ b/internal/trace/local_tracer_test.go @@ -0,0 +1,183 @@ +package trace + +import ( + "fmt" + "io" + "net" + "os" + "path" + "testing" + "time" + + "github.com/cometbft/cometbft/config" + "github.com/cometbft/cometbft/libs/log" + "github.com/stretchr/testify/require" +) + +const ( + // testEventTable is the table name for the testEvent struct. + testEventTable = "testEvent" +) + +type testEvent struct { + City string `json:"city"` + Length int `json:"length"` +} + +func (c testEvent) Table() string { + return testEventTable +} + +// TestLocalTracerReadWrite tests the local client by writing some events, +// reading them back and comparing them, writing at the same time as reading. +func TestLocalTracerReadWrite(t *testing.T) { + port, err := getFreePort() + require.NoError(t, err) + client := setupLocalTracer(t, port) + + annecy := testEvent{"Annecy", 420} + paris := testEvent{"Paris", 420} + client.Write(annecy) + client.Write(paris) + + time.Sleep(100 * time.Millisecond) + + f, done, err := client.readTable(testEventTable) + require.NoError(t, err) + + // write at the same time as reading to test thread safety this test will be + // flakey if this is not being handled correctly. Since we're reading from + // the file, we expect these write to be ignored. + migenees := testEvent{"Migennes", 620} + pontivy := testEvent{"Pontivy", 720} + client.Write(migenees) + client.Write(pontivy) + + // wait to ensure that the write have been processed (and ignored in this case) + time.Sleep(100 * time.Millisecond) + + events, err := DecodeFile[testEvent](f) + require.NoError(t, err) + err = done() + require.NoError(t, err) + + // even though we've written twice, we expect only the first two events to be + // be written to the file. When reading the file, all writes are ignored. + require.GreaterOrEqual(t, len(events), 2) + require.Equal(t, annecy, events[0].Msg) + require.Equal(t, paris, events[1].Msg) + + // write again to the file and read it back this time, we expect the writes + // to be written since we've called the done() function. + client.Write(migenees) + client.Write(pontivy) + + time.Sleep(100 * time.Millisecond) + + f, done, err = client.readTable(testEventTable) + require.NoError(t, err) + events, err = DecodeFile[testEvent](f) + require.NoError(t, err) + err = done() + require.NoError(t, err) + require.Len(t, events, 4) + require.Equal(t, migenees, events[2].Msg) + require.Equal(t, pontivy, events[3].Msg) +} + +// TestLocalTracerServerPull tests the pull portion of the server. +func TestLocalTracerServerPull(t *testing.T) { + port, err := getFreePort() + require.NoError(t, err) + client := setupLocalTracer(t, port) + + for i := 0; i < 5; i++ { + client.Write(testEvent{"Annecy", i}) + } + + // Wait for the server to start + time.Sleep(100 * time.Millisecond) + + // Test the server + newDir := t.TempDir() + + url := fmt.Sprintf("http://localhost:%d", port) + + // try to read a table that is not being collected. error expected. + err = GetTable(url, "canal", newDir) + require.Error(t, err) + + err = GetTable(url, testEventTable, newDir) + require.NoError(t, err) + + originalFile, done, err := client.readTable(testEventTable) + require.NoError(t, err) + originalBz, err := io.ReadAll(originalFile) + require.NoError(t, err) + err = done() + require.NoError(t, err) + + path := path.Join(newDir, testEventTable+".jsonl") + downloadedFile, err := os.Open(path) + require.NoError(t, err) + defer downloadedFile.Close() + + downloadedBz, err := io.ReadAll(downloadedFile) + require.NoError(t, err) + require.Equal(t, originalBz, downloadedBz) + + _, err = downloadedFile.Seek(0, 0) // reset the seek on the file to read it again + require.NoError(t, err) + events, err := DecodeFile[testEvent](downloadedFile) + require.NoError(t, err) + require.Len(t, events, 5) + for i := 0; i < 5; i++ { + require.Equal(t, i, events[i].Msg.Length) + } +} + +// TestReadPushConfigFromConfigFile tests reading the push config from the environment variables. +func TestReadPushConfigFromEnvVars(t *testing.T) { + os.Setenv(PushBucketName, "bucket") + os.Setenv(PushRegion, "region") + os.Setenv(PushAccessKey, "access") + os.Setenv(PushKey, "secret") + os.Setenv(PushDelay, "10") + + lt := setupLocalTracer(t, 0) + require.Equal(t, "bucket", lt.s3Config.BucketName) + require.Equal(t, "region", lt.s3Config.Region) + require.Equal(t, "access", lt.s3Config.AccessKey) + require.Equal(t, "secret", lt.s3Config.SecretKey) + require.Equal(t, int64(10), lt.s3Config.PushDelay) +} +func setupLocalTracer(t *testing.T, port int) *LocalTracer { + logger := log.NewNopLogger() + cfg := config.DefaultConfig() + cfg.SetRoot(t.TempDir()) + cfg.Instrumentation.TraceBufferSize = 100 + cfg.Instrumentation.TracingTables = testEventTable + cfg.Instrumentation.TracePullAddress = fmt.Sprintf(":%d", port) + + client, err := NewLocalTracer(cfg, logger, "test_chain", "test_node") + if err != nil { + t.Fatalf("failed to create local client: %v", err) + } + + return client +} + +// getFreePort returns a free port and optionally an error. +func getFreePort() (int, error) { + a, err := net.ResolveTCPAddr("tcp", "localhost:0") + if err != nil { + return 0, err + } + + l, err := net.ListenTCP("tcp", a) + if err != nil { + return 0, err + } + defer l.Close() + return l.Addr().(*net.TCPAddr).Port, nil +} diff --git a/internal/trace/schema/consensus.go b/internal/trace/schema/consensus.go new file mode 100644 index 00000000000..f47f86f5b82 --- /dev/null +++ b/internal/trace/schema/consensus.go @@ -0,0 +1,250 @@ +package schema + +import ( + "github.com/cometbft/cometbft/internal/trace" + "github.com/cometbft/cometbft/types" +) + +// ConsensusTables returns the list of tables that are used for consensus +// tracing. +func ConsensusTables() []string { + return []string{ + RoundStateTable, + BlockPartsTable, + BlockTable, + VoteTable, + ConsensusStateTable, + ProposalTable, + } +} + +// Schema constants for the consensus round state tracing database. +const ( + // RoundStateTable is the name of the table that stores the consensus + // state traces. + RoundStateTable = "consensus_round_state" +) + +// RoundState describes schema for the "consensus_round_state" table. +type RoundState struct { + Height int64 `json:"height"` + Round int32 `json:"round"` + Step uint8 `json:"step"` +} + +// Table returns the table name for the RoundState struct. +func (r RoundState) Table() string { + return RoundStateTable +} + +// WriteRoundState writes a tracing point for a tx using the predetermined +// schema for consensus state tracing. +func WriteRoundState(client trace.Tracer, height int64, round int32, step uint8) { + client.Write(RoundState{Height: height, Round: round, Step: step}) +} + +// Schema constants for the "consensus_block_parts" table. +const ( + // BlockPartsTable is the name of the table that stores the consensus block + // parts. + BlockPartsTable = "consensus_block_parts" +) + +// BlockPart describes schema for the "consensus_block_parts" table. +type BlockPart struct { + Height int64 `json:"height"` + Round int32 `json:"round"` + Index int32 `json:"index"` + Catchup bool `json:"catchup"` + Peer string `json:"peer"` + TransferType TransferType `json:"transfer_type"` +} + +// Table returns the table name for the BlockPart struct. +func (b BlockPart) Table() string { + return BlockPartsTable +} + +// WriteBlockPart writes a tracing point for a BlockPart using the predetermined +// schema for consensus state tracing. +func WriteBlockPart( + client trace.Tracer, + height int64, + round int32, + index uint32, + catchup bool, + peer string, + transferType TransferType, +) { + // this check is redundant to what is checked during client.Write, although it + // is an optimization to avoid allocations from the map of fields. + if !client.IsCollecting(BlockPartsTable) { + return + } + client.Write(BlockPart{ + Height: height, + Round: round, + //nolint:gosec + Index: int32(index), + Catchup: catchup, + Peer: peer, + TransferType: transferType, + }) +} + +// Schema constants for the consensus votes tracing database. +const ( + // VoteTable is the name of the table that stores the consensus + // voting traces. + VoteTable = "consensus_vote" +) + +// Vote describes schema for the "consensus_vote" table. +type Vote struct { + Height int64 `json:"height"` + Round int32 `json:"round"` + VoteType string `json:"vote_type"` + VoteHeight int64 `json:"vote_height"` + VoteRound int32 `json:"vote_round"` + VoteMillisecondTimestamp int64 `json:"vote_unix_millisecond_timestamp"` + ValidatorAddress string `json:"vote_validator_address"` + Peer string `json:"peer"` + TransferType TransferType `json:"transfer_type"` +} + +func (v Vote) Table() string { + return VoteTable +} + +// WriteVote writes a tracing point for a vote using the predetermined +// schema for consensus vote tracing. +func WriteVote(client trace.Tracer, + height int64, // height of the current peer when it received/sent the vote + round int32, // round of the current peer when it received/sent the vote + vote *types.Vote, // vote received by the current peer + peer string, // the peer from which it received the vote or the peer to which it sent the vote + transferType TransferType, // download (received) or upload(sent) +) { + client.Write(Vote{ + Height: height, + Round: round, + VoteType: vote.Type.String(), + VoteHeight: vote.Height, + VoteRound: vote.Round, + VoteMillisecondTimestamp: vote.Timestamp.UnixMilli(), + ValidatorAddress: vote.ValidatorAddress.String(), + Peer: peer, + TransferType: transferType, + }) +} + +const ( + // BlockTable is the name of the table that stores metadata about consensus blocks. + BlockTable = "consensus_block" +) + +// BlockSummary describes schema for the "consensus_block" table. +type BlockSummary struct { + Height int64 `json:"height"` + UnixMillisecondTimestamp int64 `json:"unix_millisecond_timestamp"` + TxCount int `json:"tx_count"` + SquareSize uint64 `json:"square_size"` + BlockSize int `json:"block_size"` + Proposer string `json:"proposer"` + LastCommitRound int32 `json:"last_commit_round"` +} + +func (b BlockSummary) Table() string { + return BlockTable +} + +// WriteBlockSummary writes a tracing point for a block using the predetermined +func WriteBlockSummary(client trace.Tracer, block *types.Block, size int) { + client.Write(BlockSummary{ + Height: block.Height, + UnixMillisecondTimestamp: block.Time.UnixMilli(), + TxCount: len(block.Data.Txs), + SquareSize: block.SquareSize, + BlockSize: size, + Proposer: block.ProposerAddress.String(), + LastCommitRound: block.LastCommit.Round, + }) +} + +const ( + ConsensusStateTable = "consensus_state" +) + +type ConsensusStateUpdateType string + +const ( + ConsensusNewValidBlock ConsensusStateUpdateType = "new_valid_block" + ConsensusNewRoundStep ConsensusStateUpdateType = "new_round_step" + ConsensusVoteSetBits ConsensusStateUpdateType = "vote_set_bits" + ConsensusVoteSet23Prevote ConsensusStateUpdateType = "vote_set_23_prevote" + ConsensusVoteSet23Precommit ConsensusStateUpdateType = "vote_set_23_precommit" + ConsensusHasVote ConsensusStateUpdateType = "has_vote" + ConsensusPOL ConsensusStateUpdateType = "pol" +) + +type ConsensusState struct { + Height int64 `json:"height"` + Round int32 `json:"round"` + UpdateType string `json:"update_type"` + Peer string `json:"peer"` + TransferType TransferType `json:"transfer_type"` + Data []string `json:"data,omitempty"` +} + +func (c ConsensusState) Table() string { + return ConsensusStateTable +} + +func WriteConsensusState( + client trace.Tracer, + height int64, + round int32, + peer string, + updateType ConsensusStateUpdateType, + transferType TransferType, + data ...string, +) { + client.Write(ConsensusState{ + Height: height, + Round: round, + Peer: peer, + UpdateType: string(updateType), + TransferType: transferType, + Data: data, + }) +} + +const ( + ProposalTable = "consensus_proposal" +) + +type Proposal struct { + Height int64 `json:"height"` + Round int32 `json:"round"` + PeerID string `json:"peer_id"` + TransferType TransferType `json:"transfer_type"` +} + +func (p Proposal) Table() string { + return ProposalTable +} + +func WriteProposal( + client trace.Tracer, + height int64, + round int32, + peerID string, + transferType TransferType, +) { + client.Write(Proposal{ + Height: height, + Round: round, + PeerID: peerID, + TransferType: transferType, + }) +} diff --git a/internal/trace/schema/mempool.go b/internal/trace/schema/mempool.go new file mode 100644 index 00000000000..4c7ea47b00a --- /dev/null +++ b/internal/trace/schema/mempool.go @@ -0,0 +1,100 @@ +package schema + +import ( + "github.com/cometbft/cometbft/internal/trace" + "github.com/cometbft/cometbft/libs/bytes" +) + +// MempoolTables returns the list of tables for mempool tracing. +func MempoolTables() []string { + return []string{ + MempoolTxTable, + MempoolPeerStateTable, + } +} + +// Schema constants for the mempool_tx table +const ( + // MempoolTxTable is the tracing "measurement" (aka table) for the mempool + // that stores tracing data related to gossiping transactions. + MempoolTxTable = "mempool_tx" +) + +// MemPoolTx describes the schema for the "mempool_tx" table. +type MempoolTx struct { + TxHash string `json:"tx_hash"` + Peer string `json:"peer"` + Size int `json:"size"` + TransferType TransferType `json:"transfer_type"` +} + +// Table returns the table name for the MempoolTx struct. +func (m MempoolTx) Table() string { + return MempoolTxTable +} + +// WriteMempoolTx writes a tracing point for a tx using the predetermined +// schema for mempool tracing. +func WriteMempoolTx(client trace.Tracer, peer string, txHash []byte, size int, transferType TransferType) { + // this check is redundant to what is checked during client.Write, although it + // is an optimization to avoid allocations from the map of fields. + if !client.IsCollecting(MempoolTxTable) { + return + } + client.Write(MempoolTx{ + TxHash: bytes.HexBytes(txHash).String(), + Peer: peer, + Size: size, + TransferType: transferType, + }) +} + +const ( + // MempoolPeerState is the tracing "measurement" (aka table) for the mempool + // that stores tracing data related to mempool state, specifically + // the gossipping of "SeenTx" and "WantTx". + MempoolPeerStateTable = "mempool_peer_state" +) + +type MempoolStateUpdateType string + +const ( + SeenTx MempoolStateUpdateType = "SeenTx" + WantTx MempoolStateUpdateType = "WantTx" + Unknown MempoolStateUpdateType = "Unknown" +) + +// MempoolPeerState describes the schema for the "mempool_peer_state" table. +type MempoolPeerState struct { + Peer string `json:"peer"` + StateUpdate MempoolStateUpdateType `json:"state_update"` + TxHash string `json:"tx_hash"` + TransferType TransferType `json:"transfer_type"` +} + +// Table returns the table name for the MempoolPeerState struct. +func (m MempoolPeerState) Table() string { + return MempoolPeerStateTable +} + +// WriteMempoolPeerState writes a tracing point for the mempool state using +// the predetermined schema for mempool tracing. +func WriteMempoolPeerState( + client trace.Tracer, + peer string, + stateUpdate MempoolStateUpdateType, + txHash []byte, + transferType TransferType, +) { + // this check is redundant to what is checked during client.Write, although it + // is an optimization to avoid allocations from creating the map of fields. + if !client.IsCollecting(MempoolPeerStateTable) { + return + } + client.Write(MempoolPeerState{ + Peer: peer, + StateUpdate: stateUpdate, + TransferType: transferType, + TxHash: bytes.HexBytes(txHash).String(), + }) +} diff --git a/internal/trace/schema/misc.go b/internal/trace/schema/misc.go new file mode 100644 index 00000000000..d8d13f17c4d --- /dev/null +++ b/internal/trace/schema/misc.go @@ -0,0 +1,42 @@ +package schema + +import "github.com/cometbft/cometbft/internal/trace" + +const ( + ABCITable = "abci" +) + +// ABCIUpdate is an enum that represents the different types of ABCI +// trace data. +type ABCIUpdate string + +const ( + PrepareProposalStart ABCIUpdate = "prepare_proposal_start" + PrepareProposalEnd ABCIUpdate = "prepare_proposal_end" + ProcessProposalStart ABCIUpdate = "process_proposal_start" + ProcessProposalEnd ABCIUpdate = "process_proposal_end" + CommitStart ABCIUpdate = "commit_start" + CommitEnd ABCIUpdate = "commit_end" +) + +// ABCI describes schema for the "abci" table. +type ABCI struct { + TraceType string `json:"trace"` + Height int64 `json:"height"` + Round int32 `json:"round"` +} + +// Table returns the table name for the ABCI struct and fulfills the +// trace.Entry interface. +func (m ABCI) Table() string { + return ABCITable +} + +// WriteABCI writes a trace for an ABCI method. +func WriteABCI(client trace.Tracer, traceType ABCIUpdate, height int64, round int32) { + client.Write(ABCI{ + TraceType: string(traceType), + Height: height, + Round: round, + }) +} diff --git a/internal/trace/schema/p2p.go b/internal/trace/schema/p2p.go new file mode 100644 index 00000000000..c5d57ed97ae --- /dev/null +++ b/internal/trace/schema/p2p.go @@ -0,0 +1,88 @@ +diff --git a/pkg/trace/schema/p2p.go b/pkg/trace/schema/p2p.go +new file mode 100644 +index 000000000..e36da0d83 +--- /dev/null ++++ b/pkg/trace/schema/p2p.go +@@ -0,0 +1,82 @@ ++package schema ++ ++import "github.com/tendermint/tendermint/pkg/trace" ++ ++// P2PTables returns the list of tables that are used for p2p tracing. ++func P2PTables() []string { ++ return []string{ ++ PeersTable, ++ PendingBytesTable, ++ ReceivedBytesTable, ++ } ++} ++ ++const ( ++ // PeerUpdateTable is the name of the table that stores the p2p peer ++ // updates. ++ PeersTable = "peers" ++) ++ ++// P2PPeerUpdate is an enum that represents the different types of p2p ++// trace data. ++type P2PPeerUpdate string ++ ++const ( ++ // PeerJoin is the action for when a peer is connected. ++ PeerJoin P2PPeerUpdate = "connect" ++ // PeerDisconnect is the action for when a peer is disconnected. ++ PeerDisconnect P2PPeerUpdate = "disconnect" ++) ++ ++// PeerUpdate describes schema for the "peer_update" table. ++type PeerUpdate struct { ++ PeerID string `json:"peer_id"` ++ Action string `json:"action"` ++ Reason string `json:"reason"` ++} ++ ++// Table returns the table name for the PeerUpdate struct. ++func (p PeerUpdate) Table() string { ++ return PeersTable ++} ++ ++// WritePeerUpdate writes a tracing point for a peer update using the predetermined ++// schema for p2p tracing. ++func WritePeerUpdate(client trace.Tracer, peerID string, action P2PPeerUpdate, reason string) { ++ client.Write(PeerUpdate{PeerID: peerID, Action: string(action), Reason: reason}) ++} ++ ++const ( ++ PendingBytesTable = "pending_bytes" ++) ++ ++type PendingBytes struct { ++ PeerID string `json:"peer_id"` ++ Bytes map[byte]int `json:"bytes"` ++} ++ ++func (s PendingBytes) Table() string { ++ return PendingBytesTable ++} ++ ++func WritePendingBytes(client trace.Tracer, peerID string, bytes map[byte]int) { ++ client.Write(PendingBytes{PeerID: peerID, Bytes: bytes}) ++} ++ ++const ( ++ ReceivedBytesTable = "received_bytes" ++) ++ ++type ReceivedBytes struct { ++ PeerID string `json:"peer_id"` ++ Channel byte `json:"channel"` ++ Bytes int `json:"bytes"` ++} ++ ++func (s ReceivedBytes) Table() string { ++ return ReceivedBytesTable ++} ++ ++func WriteReceivedBytes(client trace.Tracer, peerID string, channel byte, bytes int) { ++ client.Write(ReceivedBytes{PeerID: peerID, Channel: channel, Bytes: bytes}) ++} diff --git a/internal/trace/schema/schema.go b/internal/trace/schema/schema.go new file mode 100644 index 00000000000..78c2fb81407 --- /dev/null +++ b/internal/trace/schema/schema.go @@ -0,0 +1,42 @@ +package schema + +import ( + "strings" + + "github.com/cometbft/cometbft/config" +) + +func init() { + config.DefaultTracingTables = strings.Join(AllTables(), ",") +} + +func AllTables() []string { + tables := []string{} + tables = append(tables, MempoolTables()...) + tables = append(tables, ConsensusTables()...) + tables = append(tables, P2PTables()...) + tables = append(tables, ABCITable) + return tables +} + +const ( + Broadcast = "broadcast" +) + +type TransferType int + +const ( + Download TransferType = iota + Upload +) + +func (t TransferType) String() string { + switch t { + case Download: + return "download" + case Upload: + return "upload" + default: + return "unknown" + } +} diff --git a/internal/trace/schema/schema_test.go b/internal/trace/schema/schema_test.go new file mode 100644 index 00000000000..e93260d74d7 --- /dev/null +++ b/internal/trace/schema/schema_test.go @@ -0,0 +1,17 @@ +package schema + +// Define a test struct with various field types and json tags +type TestStruct struct { + Name string `json:"name"` + Age int `json:"age"` + Email string `json:"email"` +} + +// Mock for a custom type with String method +type CustomType int + +// TestStructWithCustomType includes a field with a custom type having a String method +type TestStructWithCustomType struct { + ID int `json:"id"` + Type CustomType `json:"type"` +} diff --git a/internal/trace/tracer.go b/internal/trace/tracer.go new file mode 100644 index 00000000000..2820612e403 --- /dev/null +++ b/internal/trace/tracer.go @@ -0,0 +1,48 @@ +package trace + +import ( + "errors" + "os" + + "github.com/cometbft/cometbft/config" + "github.com/cometbft/cometbft/libs/log" +) + +// Entry is an interface for all structs that are used to define the schema for +// traces. +type Entry interface { + // Table defines which table the struct belongs to. + Table() string +} + +// Tracer defines the methods for a client that can write and read trace data. +type Tracer interface { + Write(Entry) + IsCollecting(table string) bool + Stop() +} + +func NewTracer(cfg *config.Config, logger log.Logger, chainID, nodeID string) (Tracer, error) { + switch cfg.Instrumentation.TraceType { + case "local": + return NewLocalTracer(cfg, logger, chainID, nodeID) + case "noop": + return NoOpTracer(), nil + default: + logger.Error("unknown tracer type, using noop", "type", cfg.Instrumentation.TraceType) + return NoOpTracer(), nil + } +} + +func NoOpTracer() Tracer { + return &noOpTracer{} +} + +type noOpTracer struct{} + +func (n *noOpTracer) Write(_ Entry) {} +func (n *noOpTracer) ReadTable(_ string) (*os.File, error) { + return nil, errors.New("no-op tracer does not support reading") +} +func (n *noOpTracer) IsCollecting(_ string) bool { return false } +func (n *noOpTracer) Stop() {} diff --git a/libs/bytes/bytes.go b/libs/bytes/bytes.go index 7c5037eaab6..1d31e17f664 100644 --- a/libs/bytes/bytes.go +++ b/libs/bytes/bytes.go @@ -1,6 +1,7 @@ package bytes import ( + "encoding/binary" "encoding/hex" "fmt" "strings" @@ -43,6 +44,15 @@ func (bz *HexBytes) UnmarshalJSON(data []byte) error { return nil } +// TODO: is this used anywhere? +func (bz HexBytes) MarshalDelimited() ([]byte, error) { + lenBuf := make([]byte, binary.MaxVarintLen64) + length := uint64(len(bz)) + n := binary.PutUvarint(lenBuf, length) + + return append(lenBuf[:n], bz...), nil +} + // Bytes fulfills various interfaces in light-client, etc... func (bz HexBytes) Bytes() []byte { return bz @@ -67,3 +77,7 @@ func (bz HexBytes) Format(s fmt.State, verb rune) { } } } + +func FromBytes(b []byte) []HexBytes { + return []HexBytes{b} +} diff --git a/node/setup.go b/node/setup.go index ec9ec61d9bd..41002e47620 100644 --- a/node/setup.go +++ b/node/setup.go @@ -324,6 +324,11 @@ func createMempoolAndMempoolReactor( reactor.SetLogger(logger) return mp, reactor + case cfg.MempoolTypeCAT: + //TODO: create CAT mempool + // mp := mempl.NewCATMempool(config.Mempool, proxyApp.Mempool(), state.LastBlockHeight) + // reactor := mempl.NewReactor(config.Mempool, mp, waitSync) + // return mp, reactor case cfg.MempoolTypeNop: // Strictly speaking, there's no need to have a `mempl.NopMempoolReactor`, but // adding it leads to a cleaner code. diff --git a/patches/blockchain/v0/reactor.go.patch b/patches/blockchain/v0/reactor.go.patch new file mode 100644 index 00000000000..e13bcb6ee6c --- /dev/null +++ b/patches/blockchain/v0/reactor.go.patch @@ -0,0 +1,38 @@ +diff --git a/blockchain/v0/reactor.go b/blockchain/v0/reactor.go +index 0ac278041..b7bef9095 100644 +--- a/blockchain/v0/reactor.go ++++ b/blockchain/v0/reactor.go +@@ -366,6 +366,21 @@ FOR_LOOP: + err := state.Validators.VerifyCommitLight( + chainID, firstID, first.Height, second.LastCommit) + ++ if err == nil { ++ var stateMachineValid bool ++ // Block sync doesn't check that the `Data` in a block is valid. ++ // Since celestia-core can't determine if the `Data` in a block ++ // is valid, the next line asks celestia-app to check if the ++ // block is valid via ProcessProposal. If this step wasn't ++ // performed, a malicious node could fabricate an alternative ++ // set of transactions that would cause a different app hash and ++ // thus cause this node to panic. ++ stateMachineValid, err = bcR.blockExec.ProcessProposal(first) ++ if !stateMachineValid { ++ err = fmt.Errorf("application has rejected syncing block (%X) at height %d", first.Hash(), first.Height) ++ } ++ } ++ + if err == nil { + // validate the block before we persist it + err = bcR.blockExec.ValidateBlock(state, first) +@@ -395,9 +410,8 @@ FOR_LOOP: + // TODO: batch saves so we dont persist to disk every block + bcR.store.SaveBlock(first, firstParts, second.LastCommit) + +- // TODO: same thing for app - but we would need a way to +- // get the hash without persisting the state +- state, _, err = bcR.blockExec.ApplyBlock(state, firstID, first) ++ // TODO: same thing for app - but we would need a way to get the hash without persisting the state ++ state, _, err = bcR.blockExec.ApplyBlock(state, firstID, first, second.LastCommit) + if err != nil { + // TODO This is bad, are we zombie? + panic(fmt.Sprintf("Failed to process committed block (%d:%X): %v", first.Height, first.Hash(), err)) diff --git a/patches/blockchain/v0/reactor_test.go.patch b/patches/blockchain/v0/reactor_test.go.patch new file mode 100644 index 00000000000..cec358ef85d --- /dev/null +++ b/patches/blockchain/v0/reactor_test.go.patch @@ -0,0 +1,36 @@ +diff --git a/blockchain/v0/reactor_test.go b/blockchain/v0/reactor_test.go +index 0bb02b440..e2cd58e62 100644 +--- a/blockchain/v0/reactor_test.go ++++ b/blockchain/v0/reactor_test.go +@@ -22,6 +22,7 @@ import ( + "github.com/tendermint/tendermint/proxy" + sm "github.com/tendermint/tendermint/state" + "github.com/tendermint/tendermint/store" ++ "github.com/tendermint/tendermint/test/factory" + "github.com/tendermint/tendermint/types" + cmttime "github.com/tendermint/tendermint/types/time" + ) +@@ -123,7 +124,7 @@ func newBlockchainReactor( + thisParts := thisBlock.MakePartSet(types.BlockPartSizeBytes) + blockID := types.BlockID{Hash: thisBlock.Hash(), PartSetHeader: thisParts.Header()} + +- state, _, err = blockExec.ApplyBlock(state, blockID, thisBlock) ++ state, _, err = blockExec.ApplyBlock(state, blockID, thisBlock, lastCommit) + if err != nil { + panic(fmt.Errorf("error apply block: %w", err)) + } +@@ -314,7 +315,13 @@ func makeTxs(height int64) (txs []types.Tx) { + } + + func makeBlock(height int64, state sm.State, lastCommit *types.Commit) *types.Block { +- block, _ := state.MakeBlock(height, makeTxs(height), lastCommit, nil, state.Validators.GetProposer().Address) ++ block, _ := state.MakeBlock( ++ height, ++ factory.MakeData(makeTxs(height)), ++ lastCommit, ++ nil, ++ state.Validators.GetProposer().Address, ++ ) + return block + } + diff --git a/patches/cmd/cometbft/commands/run_node.go.patch b/patches/cmd/cometbft/commands/run_node.go.patch new file mode 100644 index 00000000000..d05b0afaef8 --- /dev/null +++ b/patches/cmd/cometbft/commands/run_node.go.patch @@ -0,0 +1,43 @@ +diff --git a/cmd/cometbft/commands/run_node.go b/cmd/cometbft/commands/run_node.go +index acfc6f15b..0bef6485b 100644 +--- a/cmd/cometbft/commands/run_node.go ++++ b/cmd/cometbft/commands/run_node.go +@@ -12,6 +12,7 @@ import ( + cfg "github.com/tendermint/tendermint/config" + cmtos "github.com/tendermint/tendermint/libs/os" + nm "github.com/tendermint/tendermint/node" ++ "github.com/tendermint/tendermint/pkg/trace" + ) + + var ( +@@ -93,6 +94,30 @@ func AddNodeFlags(cmd *cobra.Command) { + "db_dir", + config.DBPath, + "database directory") ++ ++ cmd.PersistentFlags().String( ++ trace.FlagTracePushConfig, ++ config.Instrumentation.TracePushConfig, ++ trace.FlagTracePushConfigDescription, ++ ) ++ ++ cmd.PersistentFlags().String( ++ trace.FlagTracePullAddress, ++ config.Instrumentation.TracePullAddress, ++ trace.FlagTracePullAddressDescription, ++ ) ++ ++ cmd.PersistentFlags().String( ++ trace.FlagPyroscopeURL, ++ config.Instrumentation.PyroscopeURL, ++ trace.FlagPyroscopeURLDescription, ++ ) ++ ++ cmd.PersistentFlags().Bool( ++ trace.FlagPyroscopeTrace, ++ config.Instrumentation.PyroscopeTrace, ++ trace.FlagPyroscopeTraceDescription, ++ ) + } + + // NewRunNodeCmd returns the command that allows the CLI to start a node. diff --git a/patches/consensus/byzantine_test.go.patch b/patches/consensus/byzantine_test.go.patch new file mode 100644 index 00000000000..3e3420f381a --- /dev/null +++ b/patches/consensus/byzantine_test.go.patch @@ -0,0 +1,68 @@ +diff --git a/consensus/byzantine_test.go b/consensus/byzantine_test.go +index 53d262444..e060601e4 100644 +--- a/consensus/byzantine_test.go ++++ b/consensus/byzantine_test.go +@@ -23,6 +23,7 @@ import ( + mempl "github.com/tendermint/tendermint/mempool" + + cfg "github.com/tendermint/tendermint/config" ++ mempoolv2 "github.com/tendermint/tendermint/mempool/cat" + mempoolv0 "github.com/tendermint/tendermint/mempool/v0" + mempoolv1 "github.com/tendermint/tendermint/mempool/v1" + "github.com/tendermint/tendermint/p2p" +@@ -88,6 +89,15 @@ func TestByzantinePrevoteEquivocation(t *testing.T) { + mempoolv1.WithPreCheck(sm.TxPreCheck(state)), + mempoolv1.WithPostCheck(sm.TxPostCheck(state)), + ) ++ case cfg.MempoolV2: ++ mempool = mempoolv2.NewTxPool( ++ logger, ++ config.Mempool, ++ proxyAppConnConMem, ++ state.LastBlockHeight, ++ mempoolv2.WithPreCheck(sm.TxPreCheck(state)), ++ mempoolv2.WithPostCheck(sm.TxPostCheck(state)), ++ ) + } + + if thisConfig.Consensus.WaitForTxs() { +@@ -232,7 +242,7 @@ func TestByzantinePrevoteEquivocation(t *testing.T) { + + // Make proposal + propBlockID := types.BlockID{Hash: block.Hash(), PartSetHeader: blockParts.Header()} +- proposal := types.NewProposal(height, round, lazyProposer.ValidRound, propBlockID) ++ proposal := types.NewProposal(height, round, lazyProposer.TwoThirdPrevoteRound, propBlockID) + p := proposal.ToProto() + if err := lazyProposer.privValidator.SignProposal(lazyProposer.state.ChainID, p); err == nil { + proposal.Signature = p.Signature +@@ -455,8 +465,8 @@ func TestByzantineConflictingProposalsWithPartition(t *testing.T) { + case <-done: + case <-tick.C: + for i, reactor := range reactors { +- t.Logf(fmt.Sprintf("Consensus Reactor %v", i)) +- t.Logf(fmt.Sprintf("%v", reactor)) ++ t.Logf("Consensus Reactor %v", i) ++ t.Logf("%v", reactor) + } + t.Fatalf("Timed out waiting for all validators to commit first block") + } +@@ -471,7 +481,8 @@ func byzantineDecideProposalFunc(t *testing.T, height int64, round int32, cs *St + + // Create a new proposal block from state/txs from the mempool. + block1, blockParts1 := cs.createProposalBlock() +- polRound, propBlockID := cs.ValidRound, types.BlockID{Hash: block1.Hash(), PartSetHeader: blockParts1.Header()} ++ polRound := cs.TwoThirdPrevoteRound ++ propBlockID := types.BlockID{Hash: block1.Hash(), PartSetHeader: blockParts1.Header()} + proposal1 := types.NewProposal(height, round, polRound, propBlockID) + p1 := proposal1.ToProto() + if err := cs.privValidator.SignProposal(cs.state.ChainID, p1); err != nil { +@@ -485,7 +496,8 @@ func byzantineDecideProposalFunc(t *testing.T, height int64, round int32, cs *St + + // Create a new proposal block from state/txs from the mempool. + block2, blockParts2 := cs.createProposalBlock() +- polRound, propBlockID = cs.ValidRound, types.BlockID{Hash: block2.Hash(), PartSetHeader: blockParts2.Header()} ++ polRound = cs.TwoThirdPrevoteRound ++ propBlockID = types.BlockID{Hash: block2.Hash(), PartSetHeader: blockParts2.Header()} + proposal2 := types.NewProposal(height, round, polRound, propBlockID) + p2 := proposal2.ToProto() + if err := cs.privValidator.SignProposal(cs.state.ChainID, p2); err != nil { diff --git a/patches/consensus/common_test.go.patch b/patches/consensus/common_test.go.patch new file mode 100644 index 00000000000..8f556930845 --- /dev/null +++ b/patches/consensus/common_test.go.patch @@ -0,0 +1,51 @@ +diff --git a/consensus/common_test.go b/consensus/common_test.go +index ddf487cdd..511b004ea 100644 +--- a/consensus/common_test.go ++++ b/consensus/common_test.go +@@ -29,6 +29,7 @@ import ( + cmtpubsub "github.com/tendermint/tendermint/libs/pubsub" + cmtsync "github.com/tendermint/tendermint/libs/sync" + mempl "github.com/tendermint/tendermint/mempool" ++ mempoolv2 "github.com/tendermint/tendermint/mempool/cat" + mempoolv0 "github.com/tendermint/tendermint/mempool/v0" + mempoolv1 "github.com/tendermint/tendermint/mempool/v1" + "github.com/tendermint/tendermint/p2p" +@@ -207,7 +208,7 @@ func decideProposal( + ) (proposal *types.Proposal, block *types.Block) { + cs1.mtx.Lock() + block, blockParts := cs1.createProposalBlock() +- validRound := cs1.ValidRound ++ validRound := cs1.TwoThirdPrevoteRound + chainID := cs1.state.ChainID + cs1.mtx.Unlock() + if block == nil { +@@ -418,6 +419,17 @@ func newStateWithConfigAndBlockStore( + mempoolv1.WithPreCheck(sm.TxPreCheck(state)), + mempoolv1.WithPostCheck(sm.TxPostCheck(state)), + ) ++ case cfg.MempoolV2: ++ logger := consensusLogger() ++ mempool = mempoolv2.NewTxPool( ++ logger, ++ config.Mempool, ++ proxyAppConnConMem, ++ state.LastBlockHeight, ++ mempoolv2.WithMetrics(memplMetrics), ++ mempoolv2.WithPreCheck(sm.TxPreCheck(state)), ++ mempoolv2.WithPostCheck(sm.TxPostCheck(state)), ++ ) + } + if thisConfig.Consensus.WaitForTxs() { + mempool.EnableTxsAvailable() +@@ -706,7 +718,10 @@ func consensusLogger() log.Logger { + return log.TestingLoggerWithColorFn(func(keyvals ...interface{}) term.FgBgColor { + for i := 0; i < len(keyvals)-1; i += 2 { + if keyvals[i] == "validator" { +- return term.FgBgColor{Fg: term.Color(uint8(keyvals[i+1].(int) + 1))} ++ index, ok := keyvals[i+1].(int) ++ if ok { ++ return term.FgBgColor{Fg: term.Color(uint8(index + 1))} ++ } + } + } + return term.FgBgColor{} diff --git a/patches/consensus/mempool_test.go.patch b/patches/consensus/mempool_test.go.patch new file mode 100644 index 00000000000..37116dd0eb9 --- /dev/null +++ b/patches/consensus/mempool_test.go.patch @@ -0,0 +1,139 @@ +diff --git a/consensus/mempool_test.go b/consensus/mempool_test.go +index 678091bcf..d6c4e09e5 100644 +--- a/consensus/mempool_test.go ++++ b/consensus/mempool_test.go +@@ -14,6 +14,7 @@ import ( + + "github.com/tendermint/tendermint/abci/example/code" + abci "github.com/tendermint/tendermint/abci/types" ++ cfg "github.com/tendermint/tendermint/config" + mempl "github.com/tendermint/tendermint/mempool" + sm "github.com/tendermint/tendermint/state" + "github.com/tendermint/tendermint/types" +@@ -74,7 +75,7 @@ func TestMempoolProgressInHigherRound(t *testing.T) { + timeoutCh := subscribe(cs.eventBus, types.EventQueryTimeoutPropose) + cs.setProposal = func(proposal *types.Proposal) error { + if cs.Height == 2 && cs.Round == 0 { +- // dont set the proposal in round 0 so we timeout and ++ // don't set the proposal in round 0 so we timeout and + // go to next round + cs.Logger.Info("Ignoring set proposal at height 2, round 0") + return nil +@@ -90,7 +91,11 @@ func TestMempoolProgressInHigherRound(t *testing.T) { + round = 0 + + ensureNewRound(newRoundCh, height, round) // first round at next height +- deliverTxsRange(cs, 0, 1) // we deliver txs, but dont set a proposal so we get the next round ++ deliverTxsRange(cs, 0, 1) // we deliver txs, but don't set a proposal so we get the next round ++ // The use of cs.config.TimeoutPropose.Nanoseconds() as the timeout propose is acceptable in this test case, the following line. ++ // Even though timeouts are version-dependent, cs is created with an empty previous state in this scenario. ++ // As there's no timeout propose in the previous state, we default to the timeout propose in the config. ++ // This makes the test case valid. + ensureNewTimeout(timeoutCh, height, round, cs.config.TimeoutPropose.Nanoseconds()) + + round++ // moving to the next round +@@ -136,6 +141,8 @@ func TestMempoolTxConcurrentWithCommit(t *testing.T) { + + func TestMempoolRmBadTx(t *testing.T) { + state, privVals := randGenesisState(1, false, 10) ++ config = ResetConfig("consensus_mempool_rm_bad_tx") ++ config.Mempool.Version = cfg.MempoolV2 + app := NewCounterApplication() + blockDB := dbm.NewMemDB() + stateStore := sm.NewStore(blockDB, sm.StoreOptions{DiscardABCIResponses: false}) +@@ -154,22 +161,12 @@ func TestMempoolRmBadTx(t *testing.T) { + assert.True(t, len(resCommit.Data) > 0) + + emptyMempoolCh := make(chan struct{}) +- checkTxRespCh := make(chan struct{}) + go func() { + // Try to send the tx through the mempool. + // CheckTx should not err, but the app should return a bad abci code + // and the tx should get removed from the pool +- err := assertMempool(cs.txNotifier).CheckTx(txBytes, func(r *abci.Response) { +- if r.GetCheckTx().Code != code.CodeTypeBadNonce { +- t.Errorf("expected checktx to return bad nonce, got %v", r) +- return +- } +- checkTxRespCh <- struct{}{} +- }, mempl.TxInfo{}) +- if err != nil { +- t.Errorf("error after CheckTx: %v", err) +- return +- } ++ err := assertMempool(cs.txNotifier).CheckTx(txBytes, nil, mempl.TxInfo{}) ++ require.Error(t, err) + + // check for the tx + for { +@@ -182,18 +179,8 @@ func TestMempoolRmBadTx(t *testing.T) { + } + }() + +- // Wait until the tx returns +- ticker := time.After(time.Second * 5) +- select { +- case <-checkTxRespCh: +- // success +- case <-ticker: +- t.Errorf("timed out waiting for tx to return") +- return +- } +- + // Wait until the tx is removed +- ticker = time.After(time.Second * 5) ++ ticker := time.After(time.Second * 5) + select { + case <-emptyMempoolCh: + // success +@@ -221,6 +208,7 @@ func (app *CounterApplication) Info(req abci.RequestInfo) abci.ResponseInfo { + + func (app *CounterApplication) DeliverTx(req abci.RequestDeliverTx) abci.ResponseDeliverTx { + txValue := txAsUint64(req.Tx) ++ //nolint:gosec + if txValue != uint64(app.txCount) { + return abci.ResponseDeliverTx{ + Code: code.CodeTypeBadNonce, +@@ -232,10 +220,20 @@ func (app *CounterApplication) DeliverTx(req abci.RequestDeliverTx) abci.Respons + + func (app *CounterApplication) CheckTx(req abci.RequestCheckTx) abci.ResponseCheckTx { + txValue := txAsUint64(req.Tx) ++ if req.Type == abci.CheckTxType_Recheck { ++ if txValue >= uint64(app.txCount) { ++ return abci.ResponseCheckTx{Code: code.CodeTypeOK} ++ } ++ return abci.ResponseCheckTx{ ++ Code: code.CodeTypeBadNonce, ++ Log: fmt.Sprintf("Invalid nonce. Expected %v, got %v", app.mempoolTxCount, txValue), ++ } ++ } + if txValue != uint64(app.mempoolTxCount) { + return abci.ResponseCheckTx{ + Code: code.CodeTypeBadNonce, +- Log: fmt.Sprintf("Invalid nonce. Expected %v, got %v", app.mempoolTxCount, txValue)} ++ Log: fmt.Sprintf("Invalid nonce. Expected %v, got %v", app.mempoolTxCount, txValue), ++ } + } + app.mempoolTxCount++ + return abci.ResponseCheckTx{Code: code.CodeTypeOK} +@@ -248,11 +246,19 @@ func txAsUint64(tx []byte) uint64 { + } + + func (app *CounterApplication) Commit() abci.ResponseCommit { +- app.mempoolTxCount = app.txCount ++ if app.mempoolTxCount < app.txCount { ++ app.mempoolTxCount = app.txCount ++ } + if app.txCount == 0 { + return abci.ResponseCommit{} + } + hash := make([]byte, 8) ++ //nolint:gosec + binary.BigEndian.PutUint64(hash, uint64(app.txCount)) + return abci.ResponseCommit{Data: hash} + } ++ ++func (app *CounterApplication) PrepareProposal( ++ req abci.RequestPrepareProposal) abci.ResponsePrepareProposal { ++ return abci.ResponsePrepareProposal{BlockData: req.BlockData} ++} diff --git a/patches/consensus/metrics.go.patch b/patches/consensus/metrics.go.patch new file mode 100644 index 00000000000..d27bc209688 --- /dev/null +++ b/patches/consensus/metrics.go.patch @@ -0,0 +1,132 @@ +diff --git a/consensus/metrics.go b/consensus/metrics.go +index dcaaae939..e76ef2d3a 100644 +--- a/consensus/metrics.go ++++ b/consensus/metrics.go +@@ -22,6 +22,8 @@ const ( + type Metrics struct { + // Height of the chain. + Height metrics.Gauge ++ // The height when the metrics started from ++ StartHeight metrics.Gauge + + // ValidatorLastSignedHeight of a validator. + ValidatorLastSignedHeight metrics.Gauge +@@ -48,6 +50,8 @@ type Metrics struct { + + // Time between this and the last block. + BlockIntervalSeconds metrics.Histogram ++ // Block time in seconds. ++ BlockTimeSeconds metrics.Gauge + + // Number of transactions. + NumTxs metrics.Gauge +@@ -57,9 +61,9 @@ type Metrics struct { + TotalTxs metrics.Gauge + // The latest block height. + CommittedHeight metrics.Gauge +- // Whether or not a node is fast syncing. 1 if yes, 0 if no. ++ // Whether a node is fast syncing. 1 if yes, 0 if no. + FastSyncing metrics.Gauge +- // Whether or not a node is state syncing. 1 if yes, 0 if no. ++ // Whether a node is state syncing. 1 if yes, 0 if no. + StateSyncing metrics.Gauge + + // Number of blockparts transmitted by peer. +@@ -88,6 +92,12 @@ type Metrics struct { + // timestamp and the timestamp of the latest prevote in a round where 100% + // of the voting power on the network issued prevotes. + FullPrevoteMessageDelay metrics.Gauge ++ ++ // The amount of proposals that were rejected by the application. ++ ApplicationRejectedProposals metrics.Counter ++ ++ // The amount of proposals that failed to be received in time ++ TimedOutProposals metrics.Counter + } + + // PrometheusMetrics returns Metrics build using Prometheus client library. +@@ -105,6 +115,12 @@ func PrometheusMetrics(namespace string, labelsAndValues ...string) *Metrics { + Name: "height", + Help: "Height of the chain.", + }, labels).With(labelsAndValues...), ++ StartHeight: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{ ++ Namespace: namespace, ++ Subsystem: MetricsSubsystem, ++ Name: "start_height", ++ Help: "Height that metrics began", ++ }, labels).With(labelsAndValues...), + Rounds: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{ + Namespace: namespace, + Subsystem: MetricsSubsystem, +@@ -171,6 +187,12 @@ func PrometheusMetrics(namespace string, labelsAndValues ...string) *Metrics { + Name: "block_interval_seconds", + Help: "Time between this and the last block.", + }, labels).With(labelsAndValues...), ++ BlockTimeSeconds: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{ ++ Namespace: namespace, ++ Subsystem: MetricsSubsystem, ++ Name: "block_time_seconds", ++ Help: "Duration between this block and the preceding one.", ++ }, labels).With(labelsAndValues...), + NumTxs: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{ + Namespace: namespace, + Subsystem: MetricsSubsystem, +@@ -241,13 +263,26 @@ func PrometheusMetrics(namespace string, labelsAndValues ...string) *Metrics { + Help: "Difference in seconds between the proposal timestamp and the timestamp " + + "of the latest prevote that achieved 100% of the voting power in the prevote step.", + }, labels).With(labelsAndValues...), ++ ApplicationRejectedProposals: prometheus.NewCounterFrom(stdprometheus.CounterOpts{ ++ Namespace: namespace, ++ Subsystem: MetricsSubsystem, ++ Name: "application_rejected_proposals", ++ Help: "Number of proposals rejected by the application", ++ }, labels).With(labelsAndValues...), ++ TimedOutProposals: prometheus.NewCounterFrom(stdprometheus.CounterOpts{ ++ Namespace: namespace, ++ Subsystem: MetricsSubsystem, ++ Name: "timed_out_proposals", ++ Help: "Number of proposals that failed to be received in time", ++ }, labels).With(labelsAndValues...), + } + } + + // NopMetrics returns no-op Metrics. + func NopMetrics() *Metrics { + return &Metrics{ +- Height: discard.NewGauge(), ++ Height: discard.NewGauge(), ++ StartHeight: discard.NewGauge(), + + ValidatorLastSignedHeight: discard.NewGauge(), + +@@ -264,17 +299,20 @@ func NopMetrics() *Metrics { + ByzantineValidatorsPower: discard.NewGauge(), + + BlockIntervalSeconds: discard.NewHistogram(), ++ BlockTimeSeconds: discard.NewGauge(), + +- NumTxs: discard.NewGauge(), +- BlockSizeBytes: discard.NewGauge(), +- TotalTxs: discard.NewGauge(), +- CommittedHeight: discard.NewGauge(), +- FastSyncing: discard.NewGauge(), +- StateSyncing: discard.NewGauge(), +- BlockParts: discard.NewCounter(), +- BlockGossipPartsReceived: discard.NewCounter(), +- QuorumPrevoteMessageDelay: discard.NewGauge(), +- FullPrevoteMessageDelay: discard.NewGauge(), ++ NumTxs: discard.NewGauge(), ++ BlockSizeBytes: discard.NewGauge(), ++ TotalTxs: discard.NewGauge(), ++ CommittedHeight: discard.NewGauge(), ++ FastSyncing: discard.NewGauge(), ++ StateSyncing: discard.NewGauge(), ++ BlockParts: discard.NewCounter(), ++ BlockGossipPartsReceived: discard.NewCounter(), ++ QuorumPrevoteMessageDelay: discard.NewGauge(), ++ FullPrevoteMessageDelay: discard.NewGauge(), ++ ApplicationRejectedProposals: discard.NewCounter(), ++ TimedOutProposals: discard.NewCounter(), + } + } + diff --git a/patches/consensus/reactor.go.patch b/patches/consensus/reactor.go.patch new file mode 100644 index 00000000000..6e9106da294 --- /dev/null +++ b/patches/consensus/reactor.go.patch @@ -0,0 +1,541 @@ +diff --git a/consensus/reactor.go b/consensus/reactor.go +index 7dda1c73d..2e09c3ced 100644 +--- a/consensus/reactor.go ++++ b/consensus/reactor.go +@@ -15,6 +15,8 @@ import ( + "github.com/tendermint/tendermint/libs/log" + cmtsync "github.com/tendermint/tendermint/libs/sync" + "github.com/tendermint/tendermint/p2p" ++ "github.com/tendermint/tendermint/pkg/trace" ++ "github.com/tendermint/tendermint/pkg/trace/schema" + cmtcons "github.com/tendermint/tendermint/proto/tendermint/consensus" + cmtproto "github.com/tendermint/tendermint/proto/tendermint/types" + sm "github.com/tendermint/tendermint/state" +@@ -47,7 +49,8 @@ type Reactor struct { + eventBus *types.EventBus + rs *cstypes.RoundState + +- Metrics *Metrics ++ Metrics *Metrics ++ traceClient trace.Tracer + } + + type ReactorOption func(*Reactor) +@@ -56,10 +59,11 @@ type ReactorOption func(*Reactor) + // consensusState. + func NewReactor(consensusState *State, waitSync bool, options ...ReactorOption) *Reactor { + conR := &Reactor{ +- conS: consensusState, +- waitSync: waitSync, +- rs: consensusState.GetRoundState(), +- Metrics: NopMetrics(), ++ conS: consensusState, ++ waitSync: waitSync, ++ rs: consensusState.GetRoundState(), ++ Metrics: NopMetrics(), ++ traceClient: trace.NoOpTracer(), + } + conR.BaseReactor = *p2p.NewBaseReactor("Consensus", conR) + +@@ -268,6 +272,15 @@ func (conR *Reactor) ReceiveEnvelope(e p2p.Envelope) { + conR.conS.mtx.Lock() + initialHeight := conR.conS.state.InitialHeight + conR.conS.mtx.Unlock() ++ schema.WriteConsensusState( ++ conR.traceClient, ++ msg.Height, ++ msg.Round, ++ string(e.Src.ID()), ++ schema.ConsensusNewRoundStep, ++ schema.Download, ++ fmt.Sprintf("%d", msg.Step), ++ ) + if err = msg.ValidateHeight(initialHeight); err != nil { + conR.Logger.Error("Peer sent us invalid msg", "peer", e.Src, "msg", msg, "err", err) + conR.Switch.StopPeerForError(e.Src, err) +@@ -275,14 +288,39 @@ func (conR *Reactor) ReceiveEnvelope(e p2p.Envelope) { + } + ps.ApplyNewRoundStepMessage(msg) + case *NewValidBlockMessage: ++ schema.WriteConsensusState( ++ conR.traceClient, ++ msg.Height, ++ msg.Round, ++ string(e.Src.ID()), ++ schema.ConsensusNewValidBlock, ++ schema.Download, ++ ) + ps.ApplyNewValidBlockMessage(msg) + case *HasVoteMessage: + ps.ApplyHasVoteMessage(msg) ++ schema.WriteConsensusState( ++ conR.traceClient, ++ msg.Height, ++ msg.Round, ++ string(e.Src.ID()), ++ schema.ConsensusHasVote, ++ schema.Download, ++ msg.Type.String(), ++ ) + case *VoteSetMaj23Message: + cs := conR.conS + cs.mtx.Lock() + height, votes := cs.Height, cs.Votes + cs.mtx.Unlock() ++ schema.WriteConsensusState( ++ conR.traceClient, ++ msg.Height, ++ msg.Round, ++ string(e.Src.ID()), ++ schema.ConsensusVoteSet23Precommit, ++ schema.Download, ++ ) + if height != msg.Height { + return + } +@@ -312,10 +350,20 @@ func (conR *Reactor) ReceiveEnvelope(e p2p.Envelope) { + if votes := ourVotes.ToProto(); votes != nil { + eMsg.Votes = *votes + } +- p2p.TrySendEnvelopeShim(e.Src, p2p.Envelope{ //nolint: staticcheck ++ if p2p.TrySendEnvelopeShim(e.Src, p2p.Envelope{ //nolint: staticcheck + ChannelID: VoteSetBitsChannel, + Message: eMsg, +- }, conR.Logger) ++ }, conR.Logger) { ++ schema.WriteConsensusState( ++ conR.traceClient, ++ msg.Height, ++ msg.Round, ++ string(e.Src.ID()), ++ schema.ConsensusVoteSetBits, ++ schema.Upload, ++ msg.Type.String(), ++ ) ++ } + default: + conR.Logger.Error(fmt.Sprintf("Unknown message type %v", reflect.TypeOf(msg))) + } +@@ -329,11 +377,27 @@ func (conR *Reactor) ReceiveEnvelope(e p2p.Envelope) { + case *ProposalMessage: + ps.SetHasProposal(msg.Proposal) + conR.conS.peerMsgQueue <- msgInfo{msg, e.Src.ID()} ++ schema.WriteProposal( ++ conR.traceClient, ++ msg.Proposal.Height, ++ msg.Proposal.Round, ++ string(e.Src.ID()), ++ schema.Download, ++ ) + case *ProposalPOLMessage: + ps.ApplyProposalPOLMessage(msg) ++ schema.WriteConsensusState( ++ conR.traceClient, ++ msg.Height, ++ msg.ProposalPOLRound, ++ string(e.Src.ID()), ++ schema.ConsensusPOL, ++ schema.Download, ++ ) + case *BlockPartMessage: + ps.SetHasProposalBlockPart(msg.Height, msg.Round, int(msg.Part.Index)) + conR.Metrics.BlockParts.With("peer_id", string(e.Src.ID())).Add(1) ++ schema.WriteBlockPart(conR.traceClient, msg.Height, msg.Round, msg.Part.Index, false, string(e.Src.ID()), schema.Download) + conR.conS.peerMsgQueue <- msgInfo{msg, e.Src.ID()} + default: + conR.Logger.Error(fmt.Sprintf("Unknown message type %v", reflect.TypeOf(msg))) +@@ -348,8 +412,12 @@ func (conR *Reactor) ReceiveEnvelope(e p2p.Envelope) { + case *VoteMessage: + cs := conR.conS + cs.mtx.RLock() +- height, valSize, lastCommitSize := cs.Height, cs.Validators.Size(), cs.LastCommit.Size() ++ height, round, valSize, lastCommitSize := cs.Height, cs.Round, ++ cs.Validators.Size(), cs.LastCommit.Size() + cs.mtx.RUnlock() ++ ++ schema.WriteVote(conR.traceClient, height, round, msg.Vote, string(e.Src.ID()), schema.Download) ++ + ps.EnsureVoteBitArrays(height, valSize) + ps.EnsureVoteBitArrays(height-1, lastCommitSize) + ps.SetHasVote(msg.Vote) +@@ -468,6 +536,15 @@ func (conR *Reactor) broadcastNewRoundStepMessage(rs *cstypes.RoundState) { + ChannelID: StateChannel, + Message: nrsMsg, + }) ++ schema.WriteConsensusState( ++ conR.traceClient, ++ nrsMsg.Height, ++ nrsMsg.Round, ++ schema.Broadcast, ++ schema.ConsensusNewRoundStep, ++ schema.Upload, ++ fmt.Sprintf("%d", nrsMsg.Step), ++ ) + } + + func (conR *Reactor) broadcastNewValidBlockMessage(rs *cstypes.RoundState) { +@@ -483,6 +560,14 @@ func (conR *Reactor) broadcastNewValidBlockMessage(rs *cstypes.RoundState) { + ChannelID: StateChannel, + Message: csMsg, + }) ++ schema.WriteConsensusState( ++ conR.traceClient, ++ rs.Height, ++ rs.Round, ++ schema.Broadcast, ++ schema.ConsensusNewValidBlock, ++ schema.Upload, ++ ) + } + + // Broadcasts HasVoteMessage to peers that care. +@@ -497,6 +582,15 @@ func (conR *Reactor) broadcastHasVoteMessage(vote *types.Vote) { + ChannelID: StateChannel, + Message: msg, + }) ++ schema.WriteConsensusState( ++ conR.traceClient, ++ vote.Height, ++ vote.Round, ++ schema.Broadcast, ++ schema.ConsensusHasVote, ++ schema.Upload, ++ vote.Type.String(), ++ ) + /* + // TODO: Make this broadcast more selective. + for _, peer := range conR.Switch.Peers().List() { +@@ -535,10 +629,20 @@ func makeRoundStepMessage(rs *cstypes.RoundState) (nrsMsg *cmtcons.NewRoundStep) + func (conR *Reactor) sendNewRoundStepMessage(peer p2p.Peer) { + rs := conR.getRoundState() + nrsMsg := makeRoundStepMessage(rs) +- p2p.SendEnvelopeShim(peer, p2p.Envelope{ //nolint: staticcheck ++ if p2p.SendEnvelopeShim(peer, p2p.Envelope{ //nolint: staticcheck + ChannelID: StateChannel, + Message: nrsMsg, +- }, conR.Logger) ++ }, conR.Logger) { ++ schema.WriteConsensusState( ++ conR.traceClient, ++ nrsMsg.Height, ++ nrsMsg.Round, ++ string(peer.ID()), ++ schema.ConsensusNewRoundStep, ++ schema.Upload, ++ fmt.Sprintf("%d", nrsMsg.Step), ++ ) ++ } + } + + func (conR *Reactor) updateRoundStateRoutine() { +@@ -590,6 +694,7 @@ OUTER_LOOP: + Part: *parts, + }, + }, logger) { ++ schema.WriteBlockPart(conR.traceClient, rs.Height, rs.Round, part.Index, false, string(peer.ID()), schema.Upload) + ps.SetHasProposalBlockPart(prs.Height, prs.Round, index) + } + continue OUTER_LOOP +@@ -642,6 +747,13 @@ OUTER_LOOP: + }, logger) { + // NOTE[ZM]: A peer might have received different proposal msg so this Proposal msg will be rejected! + ps.SetHasProposal(rs.Proposal) ++ schema.WriteProposal( ++ conR.traceClient, ++ rs.Height, ++ rs.Round, ++ string(peer.ID()), ++ schema.Upload, ++ ) + } + } + // ProposalPOL: lets peer know which POL votes we have so far. +@@ -650,14 +762,23 @@ OUTER_LOOP: + // so we definitely have rs.Votes.Prevotes(rs.Proposal.POLRound). + if 0 <= rs.Proposal.POLRound { + logger.Debug("Sending POL", "height", prs.Height, "round", prs.Round) +- p2p.SendEnvelopeShim(peer, p2p.Envelope{ //nolint: staticcheck ++ if p2p.SendEnvelopeShim(peer, p2p.Envelope{ //nolint: staticcheck + ChannelID: DataChannel, + Message: &cmtcons.ProposalPOL{ + Height: rs.Height, + ProposalPolRound: rs.Proposal.POLRound, + ProposalPol: *rs.Votes.Prevotes(rs.Proposal.POLRound).BitArray().ToProto(), + }, +- }, logger) ++ }, logger) { ++ schema.WriteConsensusState( ++ conR.traceClient, ++ rs.Height, ++ rs.Round, ++ string(peer.ID()), ++ schema.ConsensusPOL, ++ schema.Upload, ++ ) ++ } + } + continue OUTER_LOOP + } +@@ -709,6 +830,16 @@ func (conR *Reactor) gossipDataForCatchup(logger log.Logger, rs *cstypes.RoundSt + }, + }, logger) { + ps.SetHasProposalBlockPart(prs.Height, prs.Round, index) ++ schema.WriteBlockPart( ++ conR.traceClient, ++ prs.Height, ++ prs.Round, ++ //nolint:gosec ++ uint32(index), ++ true, ++ string(peer.ID()), ++ schema.Upload, ++ ) + } else { + logger.Debug("Sending block part for catchup failed") + // sleep to avoid retrying too fast +@@ -756,7 +887,7 @@ OUTER_LOOP: + // Special catchup logic. + // If peer is lagging by height 1, send LastCommit. + if prs.Height != 0 && rs.Height == prs.Height+1 { +- if ps.PickSendVote(rs.LastCommit) { ++ if conR.pickSendVoteAndTrace(rs.LastCommit, rs, ps) { + logger.Debug("Picked rs.LastCommit to send", "height", prs.Height) + continue OUTER_LOOP + } +@@ -769,8 +900,11 @@ OUTER_LOOP: + // Load the block commit for prs.Height, + // which contains precommit signatures for prs.Height. + if commit := conR.conS.blockStore.LoadBlockCommit(prs.Height); commit != nil { +- if ps.PickSendVote(commit) { ++ vote := ps.PickSendVote(commit) ++ if vote != nil { + logger.Debug("Picked Catchup commit to send", "height", prs.Height) ++ schema.WriteVote(conR.traceClient, rs.Height, rs.Round, vote, ++ string(ps.peer.ID()), schema.Upload) + continue OUTER_LOOP + } + } +@@ -792,6 +926,18 @@ OUTER_LOOP: + } + } + ++// pickSendVoteAndTrace picks a vote to send and traces it. ++// It returns true if a vote is sent. ++// Note that it is a wrapper around PickSendVote with the addition of tracing the vote. ++func (conR *Reactor) pickSendVoteAndTrace(votes types.VoteSetReader, rs *cstypes.RoundState, ps *PeerState) bool { ++ vote := ps.PickSendVote(votes) ++ if vote != nil { // if a vote is sent, trace it ++ schema.WriteVote(conR.traceClient, rs.Height, rs.Round, vote, ++ string(ps.peer.ID()), schema.Upload) ++ return true ++ } ++ return false ++} + func (conR *Reactor) gossipVotesForHeight( + logger log.Logger, + rs *cstypes.RoundState, +@@ -801,7 +947,7 @@ func (conR *Reactor) gossipVotesForHeight( + + // If there are lastCommits to send... + if prs.Step == cstypes.RoundStepNewHeight { +- if ps.PickSendVote(rs.LastCommit) { ++ if conR.pickSendVoteAndTrace(rs.LastCommit, rs, ps) { + logger.Debug("Picked rs.LastCommit to send") + return true + } +@@ -809,7 +955,7 @@ func (conR *Reactor) gossipVotesForHeight( + // If there are POL prevotes to send... + if prs.Step <= cstypes.RoundStepPropose && prs.Round != -1 && prs.Round <= rs.Round && prs.ProposalPOLRound != -1 { + if polPrevotes := rs.Votes.Prevotes(prs.ProposalPOLRound); polPrevotes != nil { +- if ps.PickSendVote(polPrevotes) { ++ if conR.pickSendVoteAndTrace(polPrevotes, rs, ps) { + logger.Debug("Picked rs.Prevotes(prs.ProposalPOLRound) to send", + "round", prs.ProposalPOLRound) + return true +@@ -818,21 +964,21 @@ func (conR *Reactor) gossipVotesForHeight( + } + // If there are prevotes to send... + if prs.Step <= cstypes.RoundStepPrevoteWait && prs.Round != -1 && prs.Round <= rs.Round { +- if ps.PickSendVote(rs.Votes.Prevotes(prs.Round)) { ++ if conR.pickSendVoteAndTrace(rs.Votes.Prevotes(prs.Round), rs, ps) { + logger.Debug("Picked rs.Prevotes(prs.Round) to send", "round", prs.Round) + return true + } + } + // If there are precommits to send... + if prs.Step <= cstypes.RoundStepPrecommitWait && prs.Round != -1 && prs.Round <= rs.Round { +- if ps.PickSendVote(rs.Votes.Precommits(prs.Round)) { ++ if conR.pickSendVoteAndTrace(rs.Votes.Precommits(prs.Round), rs, ps) { + logger.Debug("Picked rs.Precommits(prs.Round) to send", "round", prs.Round) + return true + } + } + // If there are prevotes to send...Needed because of validBlock mechanism + if prs.Round != -1 && prs.Round <= rs.Round { +- if ps.PickSendVote(rs.Votes.Prevotes(prs.Round)) { ++ if conR.pickSendVoteAndTrace(rs.Votes.Prevotes(prs.Round), rs, ps) { + logger.Debug("Picked rs.Prevotes(prs.Round) to send", "round", prs.Round) + return true + } +@@ -840,7 +986,7 @@ func (conR *Reactor) gossipVotesForHeight( + // If there are POLPrevotes to send... + if prs.ProposalPOLRound != -1 { + if polPrevotes := rs.Votes.Prevotes(prs.ProposalPOLRound); polPrevotes != nil { +- if ps.PickSendVote(polPrevotes) { ++ if conR.pickSendVoteAndTrace(polPrevotes, rs, ps) { + logger.Debug("Picked rs.Prevotes(prs.ProposalPOLRound) to send", + "round", prs.ProposalPOLRound) + return true +@@ -869,7 +1015,7 @@ OUTER_LOOP: + if rs.Height == prs.Height { + if maj23, ok := rs.Votes.Prevotes(prs.Round).TwoThirdsMajority(); ok { + +- p2p.TrySendEnvelopeShim(peer, p2p.Envelope{ //nolint: staticcheck ++ if p2p.TrySendEnvelopeShim(peer, p2p.Envelope{ //nolint: staticcheck + ChannelID: StateChannel, + Message: &cmtcons.VoteSetMaj23{ + Height: prs.Height, +@@ -877,7 +1023,16 @@ OUTER_LOOP: + Type: cmtproto.PrevoteType, + BlockID: maj23.ToProto(), + }, +- }, ps.logger) ++ }, ps.logger) { ++ schema.WriteConsensusState( ++ conR.traceClient, ++ rs.Height, ++ rs.Round, ++ string(peer.ID()), ++ schema.ConsensusVoteSet23Prevote, ++ schema.Upload, ++ ) ++ } + time.Sleep(conR.conS.config.PeerQueryMaj23SleepDuration) + } + } +@@ -889,7 +1044,7 @@ OUTER_LOOP: + prs := ps.GetRoundState() + if rs.Height == prs.Height { + if maj23, ok := rs.Votes.Precommits(prs.Round).TwoThirdsMajority(); ok { +- p2p.TrySendEnvelopeShim(peer, p2p.Envelope{ //nolint: staticcheck ++ if p2p.TrySendEnvelopeShim(peer, p2p.Envelope{ //nolint: staticcheck + ChannelID: StateChannel, + Message: &cmtcons.VoteSetMaj23{ + Height: prs.Height, +@@ -897,7 +1052,16 @@ OUTER_LOOP: + Type: cmtproto.PrecommitType, + BlockID: maj23.ToProto(), + }, +- }, ps.logger) ++ }, ps.logger) { ++ schema.WriteConsensusState( ++ conR.traceClient, ++ rs.Height, ++ rs.Round, ++ string(peer.ID()), ++ schema.ConsensusVoteSet23Precommit, ++ schema.Upload, ++ ) ++ } + time.Sleep(conR.conS.config.PeerQueryMaj23SleepDuration) + } + } +@@ -910,7 +1074,7 @@ OUTER_LOOP: + if rs.Height == prs.Height && prs.ProposalPOLRound >= 0 { + if maj23, ok := rs.Votes.Prevotes(prs.ProposalPOLRound).TwoThirdsMajority(); ok { + +- p2p.TrySendEnvelopeShim(peer, p2p.Envelope{ //nolint: staticcheck ++ if p2p.TrySendEnvelopeShim(peer, p2p.Envelope{ //nolint: staticcheck + ChannelID: StateChannel, + Message: &cmtcons.VoteSetMaj23{ + Height: prs.Height, +@@ -918,7 +1082,16 @@ OUTER_LOOP: + Type: cmtproto.PrevoteType, + BlockID: maj23.ToProto(), + }, +- }, ps.logger) ++ }, ps.logger) { ++ schema.WriteConsensusState( ++ conR.traceClient, ++ rs.Height, ++ rs.Round, ++ string(peer.ID()), ++ schema.ConsensusPOL, ++ schema.Upload, ++ ) ++ } + time.Sleep(conR.conS.config.PeerQueryMaj23SleepDuration) + } + } +@@ -933,7 +1106,7 @@ OUTER_LOOP: + if prs.CatchupCommitRound != -1 && prs.Height > 0 && prs.Height <= conR.conS.blockStore.Height() && + prs.Height >= conR.conS.blockStore.Base() { + if commit := conR.conS.LoadCommit(prs.Height); commit != nil { +- p2p.TrySendEnvelopeShim(peer, p2p.Envelope{ //nolint: staticcheck ++ if p2p.TrySendEnvelopeShim(peer, p2p.Envelope{ //nolint: staticcheck + ChannelID: StateChannel, + Message: &cmtcons.VoteSetMaj23{ + Height: prs.Height, +@@ -941,7 +1114,16 @@ OUTER_LOOP: + Type: cmtproto.PrecommitType, + BlockID: commit.BlockID.ToProto(), + }, +- }, ps.logger) ++ }, ps.logger) { ++ schema.WriteConsensusState( ++ conR.traceClient, ++ prs.Height, ++ prs.Round, ++ string(peer.ID()), ++ schema.ConsensusVoteSet23Precommit, ++ schema.Upload, ++ ) ++ } + time.Sleep(conR.conS.config.PeerQueryMaj23SleepDuration) + } + } +@@ -1021,6 +1203,10 @@ func ReactorMetrics(metrics *Metrics) ReactorOption { + return func(conR *Reactor) { conR.Metrics = metrics } + } + ++func ReactorTracing(traceClient trace.Tracer) ReactorOption { ++ return func(conR *Reactor) { conR.traceClient = traceClient } ++} ++ + //----------------------------------------------------------------------------- + + var ( +@@ -1153,8 +1339,8 @@ func (ps *PeerState) SetHasProposalBlockPart(height int64, round int32, index in + } + + // PickSendVote picks a vote and sends it to the peer. +-// Returns true if vote was sent. +-func (ps *PeerState) PickSendVote(votes types.VoteSetReader) bool { ++// Returns the vote if vote was sent. Otherwise, returns nil. ++func (ps *PeerState) PickSendVote(votes types.VoteSetReader) *types.Vote { + if vote, ok := ps.PickVoteToSend(votes); ok { + ps.logger.Debug("Sending vote message", "ps", ps, "vote", vote) + if p2p.SendEnvelopeShim(ps.peer, p2p.Envelope{ //nolint: staticcheck +@@ -1164,11 +1350,11 @@ func (ps *PeerState) PickSendVote(votes types.VoteSetReader) bool { + }, + }, ps.logger) { + ps.SetHasVote(vote) +- return true ++ return vote + } +- return false ++ return nil + } +- return false ++ return nil + } + + // PickVoteToSend picks a vote to send to the peer. +@@ -1196,6 +1382,7 @@ func (ps *PeerState) PickVoteToSend(votes types.VoteSetReader) (vote *types.Vote + return nil, false // Not something worth sending + } + if index, ok := votes.BitArray().Sub(psVotes).PickRandom(); ok { ++ //nolint:gosec + return votes.GetByIndex(int32(index)), true + } + return nil, false diff --git a/patches/consensus/reactor_test.go.patch b/patches/consensus/reactor_test.go.patch new file mode 100644 index 00000000000..098c4549d56 --- /dev/null +++ b/patches/consensus/reactor_test.go.patch @@ -0,0 +1,38 @@ +diff --git a/consensus/reactor_test.go b/consensus/reactor_test.go +index d5dae78dc..7b9381898 100644 +--- a/consensus/reactor_test.go ++++ b/consensus/reactor_test.go +@@ -31,6 +31,7 @@ import ( + "github.com/tendermint/tendermint/libs/log" + cmtsync "github.com/tendermint/tendermint/libs/sync" + mempl "github.com/tendermint/tendermint/mempool" ++ mempoolv2 "github.com/tendermint/tendermint/mempool/cat" + mempoolv0 "github.com/tendermint/tendermint/mempool/v0" + mempoolv1 "github.com/tendermint/tendermint/mempool/v1" + "github.com/tendermint/tendermint/p2p" +@@ -185,6 +186,16 @@ func TestReactorWithEvidence(t *testing.T) { + mempoolv1.WithPreCheck(sm.TxPreCheck(state)), + mempoolv1.WithPostCheck(sm.TxPostCheck(state)), + ) ++ case cfg.MempoolV2: ++ mempool = mempoolv2.NewTxPool( ++ logger, ++ config.Mempool, ++ proxyAppConnConMem, ++ state.LastBlockHeight, ++ mempoolv2.WithMetrics(memplMetrics), ++ mempoolv2.WithPreCheck(sm.TxPreCheck(state)), ++ mempoolv2.WithPostCheck(sm.TxPostCheck(state)), ++ ) + } + if thisConfig.Consensus.WaitForTxs() { + mempool.EnableTxsAvailable() +@@ -698,7 +709,7 @@ func timeoutWaitGroup(t *testing.T, n int, f func(int), css []*State) { + close(done) + }() + +- // we're running many nodes in-process, possibly in in a virtual machine, ++ // we're running many nodes in-process, possibly in a virtual machine, + // and spewing debug messages - making a block could take a while, + timeout := time.Second * 120 + diff --git a/patches/consensus/replay.go.patch b/patches/consensus/replay.go.patch new file mode 100644 index 00000000000..a662c865d46 --- /dev/null +++ b/patches/consensus/replay.go.patch @@ -0,0 +1,110 @@ +diff --git a/consensus/replay.go b/consensus/replay.go +index 7c6d55e1b..bb261c3b0 100644 +--- a/consensus/replay.go ++++ b/consensus/replay.go +@@ -156,8 +156,8 @@ LOOP: + } + + // NOTE: since the priv key is set when the msgs are received +- // it will attempt to eg double sign but we can just ignore it +- // since the votes will be replayed and we'll get to the next step ++ // it will attempt to e.g., double sign, but we can just ignore it ++ // since the votes will be replayed, and we'll get to the next step + if err := cs.readReplayMessage(msg, nil); err != nil { + return err + } +@@ -239,49 +239,47 @@ func (h *Handshaker) NBlocks() int { + } + + // TODO: retry the handshake/replay if it fails ? +-func (h *Handshaker) Handshake(proxyApp proxy.AppConns) error { ++func (h *Handshaker) Handshake(proxyApp proxy.AppConns) (string, error) { + return h.HandshakeWithContext(context.TODO(), proxyApp) + } + + // HandshakeWithContext is cancellable version of Handshake +-func (h *Handshaker) HandshakeWithContext(ctx context.Context, proxyApp proxy.AppConns) error { ++func (h *Handshaker) HandshakeWithContext(ctx context.Context, proxyApp proxy.AppConns) (string, error) { + + // Handshake is done via ABCI Info on the query conn. + res, err := proxyApp.Query().InfoSync(proxy.RequestInfo) + if err != nil { +- return fmt.Errorf("error calling Info: %v", err) ++ return "", fmt.Errorf("error calling Info: %v", err) + } + + blockHeight := res.LastBlockHeight + if blockHeight < 0 { +- return fmt.Errorf("got a negative last block height (%d) from the app", blockHeight) ++ return "", fmt.Errorf("got a negative last block height (%d) from the app", blockHeight) + } + appHash := res.LastBlockAppHash + +- h.logger.Info("ABCI Handshake App Info", +- "height", blockHeight, +- "hash", appHash, +- "software-version", res.Version, +- "protocol-version", res.AppVersion, +- ) +- +- // Only set the version if there is no existing state. +- if h.initialState.LastBlockHeight == 0 { ++ appVersion := h.initialState.Version.Consensus.App ++ // set app version if it's not set via genesis ++ if h.initialState.LastBlockHeight == 0 && appVersion == 0 && res.AppVersion != 0 { ++ appVersion = res.AppVersion + h.initialState.Version.Consensus.App = res.AppVersion + } + + // Replay blocks up to the latest in the blockstore. + appHash, err = h.ReplayBlocksWithContext(ctx, h.initialState, appHash, blockHeight, proxyApp) + if err != nil { +- return fmt.Errorf("error on replay: %v", err) ++ return "", fmt.Errorf("error on replay: %v", err) + } + + h.logger.Info("Completed ABCI Handshake - CometBFT and App are synced", +- "appHeight", blockHeight, "appHash", appHash) ++ "appHeight", blockHeight, ++ "appHash", appHash, ++ "appVersion", appVersion, ++ ) + + // TODO: (on restart) replay mempool + +- return nil ++ return res.Version, nil + } + + // ReplayBlocks replays all blocks since appBlockHeight and ensures the result +@@ -364,6 +362,11 @@ func (h *Handshaker) ReplayBlocksWithContext( + state.ConsensusParams = types.UpdateConsensusParams(state.ConsensusParams, res.ConsensusParams) + state.Version.Consensus.App = state.ConsensusParams.Version.AppVersion + } ++ ++ // update timeouts based on the InitChainSync response ++ state.TimeoutCommit = res.Timeouts.TimeoutCommit ++ state.TimeoutPropose = res.Timeouts.TimeoutPropose ++ + // We update the last results hash with the empty hash, to conform with RFC-6962. + state.LastResultsHash = merkle.HashFromByteSlices(nil) + if err := h.stateStore.Save(state); err != nil { +@@ -516,15 +519,16 @@ func (h *Handshaker) replayBlocks( + // ApplyBlock on the proxyApp with the last block. + func (h *Handshaker) replayBlock(state sm.State, height int64, proxyApp proxy.AppConnConsensus) (sm.State, error) { + block := h.store.LoadBlock(height) ++ seenCommit := h.store.LoadSeenCommit(height) + meta := h.store.LoadBlockMeta(height) + + // Use stubs for both mempool and evidence pool since no transactions nor + // evidence are needed here - block already exists. +- blockExec := sm.NewBlockExecutor(h.stateStore, h.logger, proxyApp, emptyMempool{}, sm.EmptyEvidencePool{}) ++ blockExec := sm.NewBlockExecutor(h.stateStore, h.logger, proxyApp, emptyMempool{}, sm.EmptyEvidencePool{}, sm.WithBlockStore(h.store)) + blockExec.SetEventBus(h.eventBus) + + var err error +- state, _, err = blockExec.ApplyBlock(state, meta.BlockID, block) ++ state, _, err = blockExec.ApplyBlock(state, meta.BlockID, block, seenCommit) + if err != nil { + return sm.State{}, err + } diff --git a/patches/consensus/replay_file.go.patch b/patches/consensus/replay_file.go.patch new file mode 100644 index 00000000000..4fbf6a05eaf --- /dev/null +++ b/patches/consensus/replay_file.go.patch @@ -0,0 +1,38 @@ +diff --git a/consensus/replay_file.go b/consensus/replay_file.go +index 03096fd74..44cee2187 100644 +--- a/consensus/replay_file.go ++++ b/consensus/replay_file.go +@@ -29,7 +29,7 @@ const ( + //-------------------------------------------------------- + // replay messages interactively or all at once + +-// replay the wal file ++// RunReplayFile replays the wal file + func RunReplayFile(config cfg.BaseConfig, csConfig *cfg.ConsensusConfig, console bool) { + consensusState := newConsensusStateForReplay(config, csConfig) + +@@ -38,7 +38,7 @@ func RunReplayFile(config cfg.BaseConfig, csConfig *cfg.ConsensusConfig, console + } + } + +-// Replay msgs in file or start the console ++// ReplayFile replays msgs in file or start the console + func (cs *State) ReplayFile(file string, console bool) error { + + if cs.IsRunning() { +@@ -324,13 +324,13 @@ func newConsensusStateForReplay(config cfg.BaseConfig, csConfig *cfg.ConsensusCo + + handshaker := NewHandshaker(stateStore, state, blockStore, gdoc) + handshaker.SetEventBus(eventBus) +- err = handshaker.Handshake(proxyApp) ++ _, err = handshaker.Handshake(proxyApp) + if err != nil { + cmtos.Exit(fmt.Sprintf("Error on handshake: %v", err)) + } + + mempool, evpool := emptyMempool{}, sm.EmptyEvidencePool{} +- blockExec := sm.NewBlockExecutor(stateStore, log.TestingLogger(), proxyApp.Consensus(), mempool, evpool) ++ blockExec := sm.NewBlockExecutor(stateStore, log.TestingLogger(), proxyApp.Consensus(), mempool, evpool, sm.WithBlockStore(blockStore)) + + consensusState := NewState(csConfig, state.Copy(), blockExec, + blockStore, mempool, evpool) diff --git a/patches/consensus/replay_stubs.go.patch b/patches/consensus/replay_stubs.go.patch new file mode 100644 index 00000000000..dd6438a8a87 --- /dev/null +++ b/patches/consensus/replay_stubs.go.patch @@ -0,0 +1,17 @@ +diff --git a/consensus/replay_stubs.go b/consensus/replay_stubs.go +index a53ef49e8..f05d7eeed 100644 +--- a/consensus/replay_stubs.go ++++ b/consensus/replay_stubs.go +@@ -47,8 +47,10 @@ func (emptyMempool) TxsBytes() int64 { return 0 } + func (emptyMempool) TxsFront() *clist.CElement { return nil } + func (emptyMempool) TxsWaitChan() <-chan struct{} { return nil } + +-func (emptyMempool) InitWAL() error { return nil } +-func (emptyMempool) CloseWAL() {} ++func (emptyMempool) InitWAL() error { return nil } ++func (emptyMempool) CloseWAL() {} ++func (emptyMempool) GetTxByKey(types.TxKey) (types.Tx, bool) { return nil, false } ++func (emptyMempool) WasRecentlyEvicted(types.TxKey) bool { return false } + + //----------------------------------------------------------------------------- + // mockProxyApp uses ABCIResponses to give the right results. diff --git a/patches/consensus/replay_test.go.patch b/patches/consensus/replay_test.go.patch new file mode 100644 index 00000000000..b20561b0473 --- /dev/null +++ b/patches/consensus/replay_test.go.patch @@ -0,0 +1,178 @@ +diff --git a/consensus/replay_test.go b/consensus/replay_test.go +index 30fdc38f7..90a7710ab 100644 +--- a/consensus/replay_test.go ++++ b/consensus/replay_test.go +@@ -27,10 +27,13 @@ import ( + mempl "github.com/tendermint/tendermint/mempool" + "github.com/tendermint/tendermint/privval" + cmtstate "github.com/tendermint/tendermint/proto/tendermint/state" ++ cmtstore "github.com/tendermint/tendermint/proto/tendermint/store" + cmtproto "github.com/tendermint/tendermint/proto/tendermint/types" + "github.com/tendermint/tendermint/proxy" + sm "github.com/tendermint/tendermint/state" ++ "github.com/tendermint/tendermint/state/test/factory" + "github.com/tendermint/tendermint/types" ++ "github.com/tendermint/tendermint/version" + ) + + func TestMain(m *testing.M) { +@@ -56,7 +59,7 @@ func TestMain(m *testing.M) { + // the `Handshake Tests` are for failures in applying the block. + // With the help of the WAL, we can recover from it all! + +-//------------------------------------------------------------------------------------------ ++// ------------------------------------------------------------------------------------------ + // WAL Tests + + // TODO: It would be better to verify explicitly which states we can recover from without the wal +@@ -318,7 +321,7 @@ var ( + sim testSim + ) + +-//--------------------------------------- ++// --------------------------------------- + // Test handshake/replay + + // 0 - all synced up +@@ -753,13 +756,14 @@ func testHandshakeReplay(t *testing.T, config *cfg.Config, nBlocks int, mode uin + } + }) + +- err := handshaker.Handshake(proxyApp) ++ softwareVersion, err := handshaker.Handshake(proxyApp) + if expectError { + require.Error(t, err) + return + } else if err != nil { + t.Fatalf("Error on abci handshake: %v", err) + } ++ require.Equal(t, softwareVersion, version.ABCISemVer) + + // get the latest app hash from the app + res, err := proxyApp.Query().InfoSync(abci.RequestInfo{Version: ""}) +@@ -792,7 +796,7 @@ func applyBlock(stateStore sm.Store, st sm.State, blk *types.Block, proxyApp pro + blockExec := sm.NewBlockExecutor(stateStore, log.TestingLogger(), proxyApp.Consensus(), mempool, evpool) + + blkID := types.BlockID{Hash: blk.Hash(), PartSetHeader: blk.MakePartSet(testPartSize).Header()} +- newState, _, err := blockExec.ApplyBlock(st, blkID, blk) ++ newState, _, err := blockExec.ApplyBlock(st, blkID, blk, nil) + if err != nil { + panic(err) + } +@@ -931,7 +935,8 @@ func TestHandshakePanicsIfAppReturnsWrongAppHash(t *testing.T) { + + assert.Panics(t, func() { + h := NewHandshaker(stateStore, state, store, genDoc) +- if err = h.Handshake(proxyApp); err != nil { ++ _, err = h.Handshake(proxyApp) ++ if err != nil { + t.Log(err) + } + }) +@@ -955,7 +960,8 @@ func TestHandshakePanicsIfAppReturnsWrongAppHash(t *testing.T) { + + assert.Panics(t, func() { + h := NewHandshaker(stateStore, state, store, genDoc) +- if err = h.Handshake(proxyApp); err != nil { ++ _, err = h.Handshake(proxyApp) ++ if err != nil { + t.Log(err) + } + }) +@@ -1005,7 +1011,13 @@ func makeBlock(state sm.State, lastBlock *types.Block, lastBlockMeta *types.Bloc + lastBlockMeta.BlockID, []types.CommitSig{vote.CommitSig()}) + } + +- return state.MakeBlock(height, []types.Tx{}, lastCommit, nil, state.Validators.GetProposer().Address) ++ return state.MakeBlock( ++ height, ++ factory.MakeData([]types.Tx{}), ++ lastCommit, ++ nil, ++ state.Validators.GetProposer().Address, ++ ) + } + + type badApp struct { +@@ -1030,7 +1042,7 @@ func (app *badApp) Commit() abci.ResponseCommit { + panic("either allHashesAreWrong or onlyLastHashIsWrong must be set") + } + +-//-------------------------- ++// -------------------------- + // utils for making blocks + + func makeBlockchainFromWAL(wal WAL) ([]*types.Block, []*types.Commit, error) { +@@ -1176,8 +1188,9 @@ func stateAndStore( + return stateDB, state, store + } + +-//---------------------------------- ++// ---------------------------------- + // mock block store ++var _ sm.BlockStore = &mockBlockStore{} + + type mockBlockStore struct { + config *cfg.Config +@@ -1200,7 +1213,7 @@ func (bs *mockBlockStore) LoadBlock(height int64) *types.Block { return bs.chain + func (bs *mockBlockStore) LoadBlockByHash(hash []byte) *types.Block { + return bs.chain[int64(len(bs.chain))-1] + } +- ++func (bs *mockBlockStore) LoadBlockMetaByHash(hash []byte) *types.BlockMeta { return nil } + func (bs *mockBlockStore) LoadBlockMeta(height int64) *types.BlockMeta { + block := bs.chain[height-1] + return &types.BlockMeta{ +@@ -1211,6 +1224,10 @@ func (bs *mockBlockStore) LoadBlockMeta(height int64) *types.BlockMeta { + func (bs *mockBlockStore) LoadBlockPart(height int64, index int) *types.Part { return nil } + func (bs *mockBlockStore) SaveBlock(block *types.Block, blockParts *types.PartSet, seenCommit *types.Commit) { + } ++func (bs *mockBlockStore) SaveTxInfo(block *types.Block, txResponseCodes []uint32, logs []string) error { ++ return nil ++} ++func (bs *mockBlockStore) LoadTxInfo(hash []byte) *cmtstore.TxInfo { return &cmtstore.TxInfo{} } + + func (bs *mockBlockStore) LoadBlockCommit(height int64) *types.Commit { + return bs.commits[height-1] +@@ -1231,7 +1248,7 @@ func (bs *mockBlockStore) PruneBlocks(height int64) (uint64, error) { + return pruned, nil + } + +-//--------------------------------------- ++// --------------------------------------- + // Test handshake/init chain + + func TestHandshakeUpdatesValidators(t *testing.T) { +@@ -1264,9 +1281,12 @@ func TestHandshakeUpdatesValidators(t *testing.T) { + t.Error(err) + } + }) +- if err := handshaker.Handshake(proxyApp); err != nil { ++ version, err := handshaker.Handshake(proxyApp) ++ if err != nil { + t.Fatalf("Error on abci handshake: %v", err) + } ++ require.Equal(t, customVersion, version) ++ + // reload the state, check the validator set was updated + state, err = stateStore.Load() + require.NoError(t, err) +@@ -1277,6 +1297,8 @@ func TestHandshakeUpdatesValidators(t *testing.T) { + assert.Equal(t, newValAddr, expectValAddr) + } + ++const customVersion = "v1.0.0" ++ + // returns the vals on InitChain + type initChainApp struct { + abci.BaseApplication +@@ -1288,3 +1310,9 @@ func (ica *initChainApp) InitChain(req abci.RequestInitChain) abci.ResponseInitC + Validators: ica.vals, + } + } ++ ++func (ica *initChainApp) Info(req abci.RequestInfo) abci.ResponseInfo { ++ return abci.ResponseInfo{ ++ Version: customVersion, ++ } ++} diff --git a/patches/consensus/state.go.patch b/patches/consensus/state.go.patch new file mode 100644 index 00000000000..5c2450758ba --- /dev/null +++ b/patches/consensus/state.go.patch @@ -0,0 +1,321 @@ +diff --git a/consensus/state.go b/consensus/state.go +index 4efdf4231..18cf1b4be 100644 +--- a/consensus/state.go ++++ b/consensus/state.go +@@ -24,6 +24,8 @@ import ( + "github.com/tendermint/tendermint/libs/service" + cmtsync "github.com/tendermint/tendermint/libs/sync" + "github.com/tendermint/tendermint/p2p" ++ "github.com/tendermint/tendermint/pkg/trace" ++ "github.com/tendermint/tendermint/pkg/trace/schema" + cmtproto "github.com/tendermint/tendermint/proto/tendermint/types" + sm "github.com/tendermint/tendermint/state" + "github.com/tendermint/tendermint/types" +@@ -141,6 +143,8 @@ type State struct { + + // for reporting metrics + metrics *Metrics ++ ++ traceClient trace.Tracer + } + + // StateOption sets an optional parameter on the State. +@@ -171,6 +175,7 @@ func NewState( + evpool: evpool, + evsw: cmtevents.NewEventSwitch(), + metrics: NopMetrics(), ++ traceClient: trace.NoOpTracer(), + } + + // set function defaults (may be overwritten before calling Start) +@@ -212,6 +217,11 @@ func StateMetrics(metrics *Metrics) StateOption { + return func(cs *State) { cs.metrics = metrics } + } + ++// SetTraceClient sets the remote event collector. ++func SetTraceClient(ec trace.Tracer) StateOption { ++ return func(cs *State) { cs.traceClient = ec } ++} ++ + // String returns a string. + func (cs *State) String() string { + // better not to access shared variables +@@ -306,6 +316,8 @@ func (cs *State) OnStart() error { + } + } + ++ cs.metrics.StartHeight.Set(float64(cs.Height)) ++ + // we need the timeoutRoutine for replay so + // we don't block on the tick chan. + // NOTE: we will get a build up of garbage go routines +@@ -527,6 +539,7 @@ func (cs *State) updateRoundStep(round int32, step cstypes.RoundStepType) { + } + if cs.Step != step { + cs.metrics.MarkStep(cs.Step) ++ schema.WriteRoundState(cs.traceClient, cs.Height, round, uint8(step)) + } + } + cs.Round = round +@@ -661,9 +674,21 @@ func (cs *State) updateToState(state sm.State) { + // to be gathered for the first block. + // And alternative solution that relies on clocks: + // cs.StartTime = state.LastBlockTime.Add(timeoutCommit) +- cs.StartTime = cs.config.Commit(cmttime.Now()) ++ ++ if state.LastBlockHeight == 0 { ++ // Don't use cs.state.TimeoutCommit because that is zero ++ cs.StartTime = cs.config.CommitWithCustomTimeout(cmttime.Now(), state.TimeoutCommit) ++ } else { ++ cs.StartTime = cs.config.CommitWithCustomTimeout(cmttime.Now(), cs.state.TimeoutCommit) ++ } ++ + } else { +- cs.StartTime = cs.config.Commit(cs.CommitTime) ++ if state.LastBlockHeight == 0 { ++ cs.StartTime = cs.config.CommitWithCustomTimeout(cs.CommitTime, state.TimeoutCommit) ++ } else { ++ cs.StartTime = cs.config.CommitWithCustomTimeout(cs.CommitTime, cs.state.TimeoutCommit) ++ } ++ + } + + cs.Validators = validators +@@ -673,9 +698,9 @@ func (cs *State) updateToState(state sm.State) { + cs.LockedRound = -1 + cs.LockedBlock = nil + cs.LockedBlockParts = nil +- cs.ValidRound = -1 +- cs.ValidBlock = nil +- cs.ValidBlockParts = nil ++ cs.TwoThirdPrevoteRound = -1 ++ cs.TwoThirdPrevoteBlock = nil ++ cs.TwoThirdPrevoteBlockParts = nil + cs.Votes = cstypes.NewHeightVoteSet(state.ChainID, height, validators) + cs.CommitRound = -1 + cs.LastValidators = state.LastValidators +@@ -760,8 +785,10 @@ func (cs *State) receiveRoutine(maxSteps int) { + cs.handleTxsAvailable() + + case mi = <-cs.peerMsgQueue: +- if err := cs.wal.Write(mi); err != nil { +- cs.Logger.Error("failed writing to WAL", "err", err) ++ if !cs.config.OnlyInternalWal { ++ if err := cs.wal.Write(mi); err != nil { ++ cs.Logger.Error("failed writing to WAL", "err", err) ++ } + } + + // handles proposals, block parts, votes +@@ -1098,7 +1125,7 @@ func (cs *State) enterPropose(height int64, round int32) { + }() + + // If we don't get the proposal and all block parts quick enough, enterPrevote +- cs.scheduleTimeout(cs.config.Propose(round), height, round, cstypes.RoundStepPropose) ++ cs.scheduleTimeout(cs.config.ProposeWithCustomTimeout(round, cs.state.TimeoutPropose), height, round, cstypes.RoundStepPropose) + + // Nothing more to do if we're not a validator + if cs.privValidator == nil { +@@ -1140,12 +1167,14 @@ func (cs *State) defaultDecideProposal(height int64, round int32) { + var blockParts *types.PartSet + + // Decide on block +- if cs.ValidBlock != nil { ++ if cs.TwoThirdPrevoteBlock != nil { + // If there is valid block, choose that. +- block, blockParts = cs.ValidBlock, cs.ValidBlockParts ++ block, blockParts = cs.TwoThirdPrevoteBlock, cs.TwoThirdPrevoteBlockParts + } else { + // Create a new proposal block from state/txs from the mempool. ++ schema.WriteABCI(cs.traceClient, schema.PrepareProposalStart, height, round) + block, blockParts = cs.createProposalBlock() ++ schema.WriteABCI(cs.traceClient, schema.PrepareProposalEnd, height, round) + if block == nil { + return + } +@@ -1159,7 +1188,7 @@ func (cs *State) defaultDecideProposal(height int64, round int32) { + + // Make proposal + propBlockID := types.BlockID{Hash: block.Hash(), PartSetHeader: blockParts.Header()} +- proposal := types.NewProposal(height, round, cs.ValidRound, propBlockID) ++ proposal := types.NewProposal(height, round, cs.TwoThirdPrevoteRound, propBlockID) + p := proposal.ToProto() + if err := cs.privValidator.SignProposal(cs.state.ChainID, p); err == nil { + proposal.Signature = p.Signature +@@ -1276,6 +1305,7 @@ func (cs *State) defaultDoPrevote(height int64, round int32) { + // If ProposalBlock is nil, prevote nil. + if cs.ProposalBlock == nil { + logger.Debug("prevote step: ProposalBlock is nil") ++ cs.metrics.TimedOutProposals.Add(1) + cs.signAddVote(cmtproto.PrevoteType, nil, types.PartSetHeader{}) + return + } +@@ -1289,6 +1319,25 @@ func (cs *State) defaultDoPrevote(height int64, round int32) { + return + } + ++ schema.WriteABCI(cs.traceClient, schema.ProcessProposalStart, height, round) ++ ++ stateMachineValidBlock, err := cs.blockExec.ProcessProposal(cs.ProposalBlock) ++ if err != nil { ++ cs.Logger.Error("state machine returned an error when trying to process proposal block", "err", err) ++ return ++ } ++ ++ schema.WriteABCI(cs.traceClient, schema.ProcessProposalEnd, height, round) ++ ++ // Vote nil if application invalidated the block ++ if !stateMachineValidBlock { ++ // The app says we must vote nil ++ logger.Error("prevote step: the application deems this block to be mustVoteNil", "err", err) ++ cs.metrics.ApplicationRejectedProposals.Add(1) ++ cs.signAddVote(cmtproto.PrevoteType, nil, types.PartSetHeader{}) ++ return ++ } ++ + // Prevote cs.ProposalBlock + // NOTE: the proposal signature is validated when it is received, + // and the proposal block parts are validated as they are received (against the merkle hash in the proposal) +@@ -1479,6 +1528,7 @@ func (cs *State) enterPrecommitWait(height int64, round int32) { + defer func() { + // Done enterPrecommitWait: + cs.TriggeredTimeoutPrecommit = true ++ cs.updateRoundStep(round, cstypes.RoundStepPrecommitWait) + cs.newStep() + }() + +@@ -1619,11 +1669,12 @@ func (cs *State) finalizeCommit(height int64) { + fail.Fail() // XXX + + // Save to blockStore. ++ var seenCommit *types.Commit + if cs.blockStore.Height() < block.Height { + // NOTE: the seenCommit is local justification to commit this block, + // but may differ from the LastCommit included in the next block + precommits := cs.Votes.Precommits(cs.CommitRound) +- seenCommit := precommits.MakeCommit() ++ seenCommit = precommits.MakeCommit() + cs.blockStore.SaveBlock(block, blockParts, seenCommit) + } else { + // Happens during replay if we already saved the block but didn't commit +@@ -1643,7 +1694,7 @@ func (cs *State) finalizeCommit(height int64) { + // exists. + // + // Either way, the State should not be resumed until we +- // successfully call ApplyBlock (ie. later here, or in Handshake after ++ // successfully call ApplyBlock (i.e., later here, or in Handshake after + // restart). + endMsg := EndHeightMessage{height} + if err := cs.wal.WriteSync(endMsg); err != nil { // NOTE: fsync +@@ -1659,12 +1710,14 @@ func (cs *State) finalizeCommit(height int64) { + stateCopy := cs.state.Copy() + + // Execute and commit the block, update and save the state, and update the mempool. +- // NOTE The block.AppHash wont reflect these txs until the next block. ++ // NOTE The block.AppHash won't reflect these txs until the next block. + var ( + err error + retainHeight int64 + ) + ++ schema.WriteABCI(cs.traceClient, schema.CommitStart, height, 0) ++ + stateCopy, retainHeight, err = cs.blockExec.ApplyBlock( + stateCopy, + types.BlockID{ +@@ -1672,11 +1725,14 @@ func (cs *State) finalizeCommit(height int64) { + PartSetHeader: blockParts.Header(), + }, + block, ++ seenCommit, + ) + if err != nil { + panic(fmt.Sprintf("failed to apply block; error %v", err)) + } + ++ schema.WriteABCI(cs.traceClient, schema.CommitEnd, height, 0) ++ + fail.Fail() // XXX + + // Prune old heights, if requested by ABCI app. +@@ -1697,7 +1753,7 @@ func (cs *State) finalizeCommit(height int64) { + + fail.Fail() // XXX + +- // Private validator might have changed it's key pair => refetch pubkey. ++ // Private validator might have changed its key pair => refetch pubkey. + if err := cs.updatePrivValidatorPubKey(); err != nil { + logger.Error("failed to get private validator pubkey", "err", err) + } +@@ -1804,15 +1860,21 @@ func (cs *State) recordMetrics(height int64, block *types.Block) { + if height > 1 { + lastBlockMeta := cs.blockStore.LoadBlockMeta(height - 1) + if lastBlockMeta != nil { +- cs.metrics.BlockIntervalSeconds.Observe( +- block.Time.Sub(lastBlockMeta.Header.Time).Seconds(), +- ) ++ elapsedTime := block.Time.Sub(lastBlockMeta.Header.Time).Seconds() ++ cs.metrics.BlockIntervalSeconds.Observe(elapsedTime) ++ cs.metrics.BlockTimeSeconds.Set(elapsedTime) ++ + } + } + ++ blockSize := block.Size() ++ ++ // trace some metadata about the block ++ schema.WriteBlockSummary(cs.traceClient, block, blockSize) ++ + cs.metrics.NumTxs.Set(float64(len(block.Data.Txs))) + cs.metrics.TotalTxs.Add(float64(len(block.Data.Txs))) +- cs.metrics.BlockSizeBytes.Set(float64(block.Size())) ++ cs.metrics.BlockSizeBytes.Set(float64(blockSize)) + cs.metrics.CommittedHeight.Set(float64(block.Height)) + } + +@@ -1943,7 +2005,7 @@ func (cs *State) handleCompleteProposal(blockHeight int64) { + // Update Valid* if we can. + prevotes := cs.Votes.Prevotes(cs.Round) + blockID, hasTwoThirds := prevotes.TwoThirdsMajority() +- if hasTwoThirds && !blockID.IsZero() && (cs.ValidRound < cs.Round) { ++ if hasTwoThirds && !blockID.IsZero() && (cs.TwoThirdPrevoteRound < cs.Round) { + if cs.ProposalBlock.HashesTo(blockID.Hash) { + cs.Logger.Debug( + "updating valid block to new proposal block", +@@ -1951,9 +2013,9 @@ func (cs *State) handleCompleteProposal(blockHeight int64) { + "valid_block_hash", log.NewLazyBlockHash(cs.ProposalBlock), + ) + +- cs.ValidRound = cs.Round +- cs.ValidBlock = cs.ProposalBlock +- cs.ValidBlockParts = cs.ProposalBlockParts ++ cs.TwoThirdPrevoteRound = cs.Round ++ cs.TwoThirdPrevoteBlock = cs.ProposalBlock ++ cs.TwoThirdPrevoteBlockParts = cs.ProposalBlockParts + } + // TODO: In case there is +2/3 majority in Prevotes set for some + // block and cs.ProposalBlock contains different block, either +@@ -2113,12 +2175,18 @@ func (cs *State) addVote(vote *types.Vote, peerID p2p.ID) (added bool, err error + + // Update Valid* if we can. + // NOTE: our proposal block may be nil or not what received a polka.. +- if len(blockID.Hash) != 0 && (cs.ValidRound < vote.Round) && (vote.Round == cs.Round) { ++ if len(blockID.Hash) != 0 && (cs.TwoThirdPrevoteRound < vote.Round) && (vote.Round == cs.Round) { + if cs.ProposalBlock.HashesTo(blockID.Hash) { +- cs.Logger.Debug("updating valid block because of POL", "valid_round", cs.ValidRound, "pol_round", vote.Round) +- cs.ValidRound = vote.Round +- cs.ValidBlock = cs.ProposalBlock +- cs.ValidBlockParts = cs.ProposalBlockParts ++ cs.Logger.Debug( ++ "updating valid block because of POL", ++ "valid_round", ++ cs.TwoThirdPrevoteRound, ++ "pol_round", ++ vote.Round, ++ ) ++ cs.TwoThirdPrevoteRound = vote.Round ++ cs.TwoThirdPrevoteBlock = cs.ProposalBlock ++ cs.TwoThirdPrevoteBlockParts = cs.ProposalBlockParts + } else { + cs.Logger.Debug( + "valid block we do not know about; set ProposalBlock=nil", diff --git a/patches/consensus/state_test.go.patch b/patches/consensus/state_test.go.patch new file mode 100644 index 00000000000..e147bf0c31e --- /dev/null +++ b/patches/consensus/state_test.go.patch @@ -0,0 +1,159 @@ +diff --git a/consensus/state_test.go b/consensus/state_test.go +index 016869ed2..467101852 100644 +--- a/consensus/state_test.go ++++ b/consensus/state_test.go +@@ -144,6 +144,10 @@ func TestStateEnterProposeNoPrivValidator(t *testing.T) { + startTestRound(cs, height, round) + + // if we're not a validator, EnterPropose should timeout ++ // The use of cs.config.TimeoutPropose.Nanoseconds() as the timeout propose is acceptable in this test case. ++ // Even though timeouts are version-dependent, cs is created with an empty previous state in this scenario. ++ // As there's no timeout propose in the previous state, we default to the timeout propose in the config. ++ // This makes the test case valid. + ensureNewTimeout(timeoutCh, height, round, cs.config.TimeoutPropose.Nanoseconds()) + + if cs.GetRoundState().Proposal != nil { +@@ -179,6 +183,10 @@ func TestStateEnterProposeYesPrivValidator(t *testing.T) { + } + + // if we're a validator, enterPropose should not timeout ++ // The use of cs.config.TimeoutPropose.Nanoseconds() as the timeout propose is acceptable in this test case. ++ // Even though timeouts are version-dependent, cs is created with an empty previous state in this scenario. ++ // As there's no timeout propose in the previous state, we default to the timeout propose in the config. ++ // This makes the test case valid. + ensureNoNewTimeout(timeoutCh, cs.config.TimeoutPropose.Nanoseconds()) + } + +@@ -310,7 +318,7 @@ func TestStateOversizedBlock(t *testing.T) { + lockedRound = -1 + // if the block is oversized cs1 should log an error with the block part message as it exceeds + // the consensus params. The block is not added to cs.ProposalBlock so the node timeouts. +- ensureNewTimeout(timeoutProposeCh, height, round, cs1.config.Propose(round).Nanoseconds()) ++ ensureNewTimeout(timeoutProposeCh, height, round, cs1.config.ProposeWithCustomTimeout(round, cs1.state.TimeoutPropose).Nanoseconds()) + // and then should send nil prevote and precommit regardless of whether other validators prevote and + // precommit on it + } +@@ -497,7 +505,7 @@ func TestStateLockNoPOL(t *testing.T) { + incrementRound(vs2) + + // now we're on a new round and not the proposer, so wait for timeout +- ensureNewTimeout(timeoutProposeCh, height, round, cs1.config.Propose(round).Nanoseconds()) ++ ensureNewTimeout(timeoutProposeCh, height, round, cs1.config.ProposeWithCustomTimeout(round, cs1.state.TimeoutPropose).Nanoseconds()) + + rs := cs1.GetRoundState() + +@@ -1029,7 +1037,7 @@ func TestStateLockPOLSafety1(t *testing.T) { + */ + + // timeout of propose +- ensureNewTimeout(timeoutProposeCh, height, round, cs1.config.Propose(round).Nanoseconds()) ++ ensureNewTimeout(timeoutProposeCh, height, round, cs1.config.ProposeWithCustomTimeout(round, cs1.state.TimeoutPropose).Nanoseconds()) + + // finish prevote + ensurePrevote(voteCh, height, round) +@@ -1199,7 +1207,7 @@ func TestProposeValidBlock(t *testing.T) { + t.Log("### ONTO ROUND 2") + + // timeout of propose +- ensureNewTimeout(timeoutProposeCh, height, round, cs1.config.Propose(round).Nanoseconds()) ++ ensureNewTimeout(timeoutProposeCh, height, round, cs1.config.ProposeWithCustomTimeout(round, cs1.state.TimeoutPropose).Nanoseconds()) + + ensurePrevote(voteCh, height, round) + validatePrevote(t, cs1, round, vss[0], propBlockHash) +@@ -1234,9 +1242,9 @@ func TestProposeValidBlock(t *testing.T) { + + rs = cs1.GetRoundState() + assert.True(t, bytes.Equal(rs.ProposalBlock.Hash(), propBlockHash)) +- assert.True(t, bytes.Equal(rs.ProposalBlock.Hash(), rs.ValidBlock.Hash())) +- assert.True(t, rs.Proposal.POLRound == rs.ValidRound) +- assert.True(t, bytes.Equal(rs.Proposal.BlockID.Hash, rs.ValidBlock.Hash())) ++ assert.True(t, bytes.Equal(rs.ProposalBlock.Hash(), rs.TwoThirdPrevoteBlock.Hash())) ++ assert.True(t, rs.Proposal.POLRound == rs.TwoThirdPrevoteRound) ++ assert.True(t, bytes.Equal(rs.Proposal.BlockID.Hash, rs.TwoThirdPrevoteBlock.Hash())) + } + + // What we want: +@@ -1284,9 +1292,9 @@ func TestSetValidBlockOnDelayedPrevote(t *testing.T) { + + rs = cs1.GetRoundState() + +- assert.True(t, rs.ValidBlock == nil) +- assert.True(t, rs.ValidBlockParts == nil) +- assert.True(t, rs.ValidRound == -1) ++ assert.True(t, rs.TwoThirdPrevoteBlock == nil) ++ assert.True(t, rs.TwoThirdPrevoteBlockParts == nil) ++ assert.True(t, rs.TwoThirdPrevoteRound == -1) + + // vs2 send (delayed) prevote for propBlock + signAddVotes(cs1, cmtproto.PrevoteType, propBlockHash, propBlockParts.Header(), vs4) +@@ -1295,9 +1303,9 @@ func TestSetValidBlockOnDelayedPrevote(t *testing.T) { + + rs = cs1.GetRoundState() + +- assert.True(t, bytes.Equal(rs.ValidBlock.Hash(), propBlockHash)) +- assert.True(t, rs.ValidBlockParts.Header().Equals(propBlockParts.Header())) +- assert.True(t, rs.ValidRound == round) ++ assert.True(t, bytes.Equal(rs.TwoThirdPrevoteBlock.Hash(), propBlockHash)) ++ assert.True(t, rs.TwoThirdPrevoteBlockParts.Header().Equals(propBlockParts.Header())) ++ assert.True(t, rs.TwoThirdPrevoteRound == round) + } + + // What we want: +@@ -1326,7 +1334,7 @@ func TestSetValidBlockOnDelayedProposal(t *testing.T) { + startTestRound(cs1, cs1.Height, round) + ensureNewRound(newRoundCh, height, round) + +- ensureNewTimeout(timeoutProposeCh, height, round, cs1.config.Propose(round).Nanoseconds()) ++ ensureNewTimeout(timeoutProposeCh, height, round, cs1.config.ProposeWithCustomTimeout(round, cs1.state.TimeoutPropose).Nanoseconds()) + + ensurePrevote(voteCh, height, round) + validatePrevote(t, cs1, round, vss[0], nil) +@@ -1351,9 +1359,9 @@ func TestSetValidBlockOnDelayedProposal(t *testing.T) { + ensureNewProposal(proposalCh, height, round) + rs := cs1.GetRoundState() + +- assert.True(t, bytes.Equal(rs.ValidBlock.Hash(), propBlockHash)) +- assert.True(t, rs.ValidBlockParts.Header().Equals(propBlockParts.Header())) +- assert.True(t, rs.ValidRound == round) ++ assert.True(t, bytes.Equal(rs.TwoThirdPrevoteBlock.Hash(), propBlockHash)) ++ assert.True(t, rs.TwoThirdPrevoteBlockParts.Header().Equals(propBlockParts.Header())) ++ assert.True(t, rs.TwoThirdPrevoteRound == round) + } + + // 4 vals, 3 Nil Precommits at P0 +@@ -1407,7 +1415,7 @@ func TestWaitingTimeoutProposeOnNewRound(t *testing.T) { + rs := cs1.GetRoundState() + assert.True(t, rs.Step == cstypes.RoundStepPropose) // P0 does not prevote before timeoutPropose expires + +- ensureNewTimeout(timeoutWaitCh, height, round, cs1.config.Propose(round).Nanoseconds()) ++ ensureNewTimeout(timeoutWaitCh, height, round, cs1.config.ProposeWithCustomTimeout(round, cs1.state.TimeoutPropose).Nanoseconds()) + + ensurePrevote(voteCh, height, round) + validatePrevote(t, cs1, round, vss[0], nil) +@@ -1471,7 +1479,7 @@ func TestWaitTimeoutProposeOnNilPolkaForTheCurrentRound(t *testing.T) { + incrementRound(vss[1:]...) + signAddVotes(cs1, cmtproto.PrevoteType, nil, types.PartSetHeader{}, vs2, vs3, vs4) + +- ensureNewTimeout(timeoutProposeCh, height, round, cs1.config.Propose(round).Nanoseconds()) ++ ensureNewTimeout(timeoutProposeCh, height, round, cs1.config.ProposeWithCustomTimeout(round, cs1.state.TimeoutPropose).Nanoseconds()) + + ensurePrevote(voteCh, height, round) + validatePrevote(t, cs1, round, vss[0], nil) +@@ -1619,7 +1627,7 @@ func TestStartNextHeightCorrectlyAfterTimeout(t *testing.T) { + + cs1.txNotifier.(*fakeTxNotifier).Notify() + +- ensureNewTimeout(timeoutProposeCh, height+1, round, cs1.config.Propose(round).Nanoseconds()) ++ ensureNewTimeout(timeoutProposeCh, height+1, round, cs1.config.ProposeWithCustomTimeout(round, cs1.state.TimeoutPropose).Nanoseconds()) + rs = cs1.GetRoundState() + assert.False( + t, +@@ -1959,7 +1967,7 @@ func findBlockSizeLimit(t *testing.T, height, maxBytes int64, cs *State, partSiz + for i := softMaxDataBytes; i < softMaxDataBytes*2; i++ { + propBlock, propBlockParts := cs.state.MakeBlock( + height, +- []types.Tx{[]byte("a=" + strings.Repeat("o", i-2))}, ++ types.Data{Txs: []types.Tx{[]byte("a=" + strings.Repeat("o", i-2))}}, + &types.Commit{}, + nil, + cs.privValidatorPubKey.Address(), diff --git a/patches/consensus/types/round_state.go.patch b/patches/consensus/types/round_state.go.patch new file mode 100644 index 00000000000..e0f2f211c53 --- /dev/null +++ b/patches/consensus/types/round_state.go.patch @@ -0,0 +1,50 @@ +diff --git a/consensus/types/round_state.go b/consensus/types/round_state.go +index 9e67b76c0..3b11c2e17 100644 +--- a/consensus/types/round_state.go ++++ b/consensus/types/round_state.go +@@ -81,11 +81,11 @@ type RoundState struct { + LockedBlockParts *types.PartSet `json:"locked_block_parts"` + + // Last known round with POL for non-nil valid block. +- ValidRound int32 `json:"valid_round"` +- ValidBlock *types.Block `json:"valid_block"` // Last known block of POL mentioned above. ++ TwoThirdPrevoteRound int32 `json:"valid_round"` ++ TwoThirdPrevoteBlock *types.Block `json:"valid_block"` // Last known block of POL mentioned above. + + // Last known block parts of POL mentioned above. +- ValidBlockParts *types.PartSet `json:"valid_block_parts"` ++ TwoThirdPrevoteBlockParts *types.PartSet `json:"valid_block_parts"` + Votes *HeightVoteSet `json:"votes"` + CommitRound int32 `json:"commit_round"` // + LastCommit *types.VoteSet `json:"last_commit"` // Last precommits at Height-1 +@@ -119,7 +119,7 @@ func (rs *RoundState) RoundStateSimple() RoundStateSimple { + StartTime: rs.StartTime, + ProposalBlockHash: rs.ProposalBlock.Hash(), + LockedBlockHash: rs.LockedBlock.Hash(), +- ValidBlockHash: rs.ValidBlock.Hash(), ++ ValidBlockHash: rs.TwoThirdPrevoteBlock.Hash(), + Votes: votesJSON, + Proposer: types.ValidatorInfo{ + Address: addr, +@@ -186,8 +186,8 @@ func (rs *RoundState) StringIndented(indent string) string { + %s ProposalBlock: %v %v + %s LockedRound: %v + %s LockedBlock: %v %v +-%s ValidRound: %v +-%s ValidBlock: %v %v ++%s TwoThirdPrevoteRound: %v ++%s TwoThirdPrevoteBlock: %v %v + %s Votes: %v + %s LastCommit: %v + %s LastValidators:%v +@@ -200,8 +200,8 @@ func (rs *RoundState) StringIndented(indent string) string { + indent, rs.ProposalBlockParts.StringShort(), rs.ProposalBlock.StringShort(), + indent, rs.LockedRound, + indent, rs.LockedBlockParts.StringShort(), rs.LockedBlock.StringShort(), +- indent, rs.ValidRound, +- indent, rs.ValidBlockParts.StringShort(), rs.ValidBlock.StringShort(), ++ indent, rs.TwoThirdPrevoteRound, ++ indent, rs.TwoThirdPrevoteBlockParts.StringShort(), rs.TwoThirdPrevoteBlock.StringShort(), + indent, rs.Votes.StringIndented(indent+" "), + indent, rs.LastCommit.StringShort(), + indent, rs.LastValidators.StringIndented(indent+" "), diff --git a/patches/consensus/wal.go.patch b/patches/consensus/wal.go.patch new file mode 100644 index 00000000000..263c5d10d3d --- /dev/null +++ b/patches/consensus/wal.go.patch @@ -0,0 +1,12 @@ +diff --git a/consensus/wal.go b/consensus/wal.go +index 90c7e208c..4e9aa7f74 100644 +--- a/consensus/wal.go ++++ b/consensus/wal.go +@@ -313,6 +313,7 @@ func (enc *WALEncoder) Encode(v *TimedWALMessage) error { + } + + crc := crc32.Checksum(data, crc32c) ++ //nolint:gosec + length := uint32(len(data)) + if length > maxMsgSizeBytes { + return fmt.Errorf("msg is too big: %d bytes, max: %d bytes", length, maxMsgSizeBytes) diff --git a/patches/consensus/wal_generator.go.patch b/patches/consensus/wal_generator.go.patch new file mode 100644 index 00000000000..8321f3f1b3d --- /dev/null +++ b/patches/consensus/wal_generator.go.patch @@ -0,0 +1,13 @@ +diff --git a/consensus/wal_generator.go b/consensus/wal_generator.go +index f49cedf94..cf5f1e731 100644 +--- a/consensus/wal_generator.go ++++ b/consensus/wal_generator.go +@@ -84,7 +84,7 @@ func WALGenerateNBlocks(t *testing.T, wr io.Writer, numBlocks int) (err error) { + }) + mempool := emptyMempool{} + evpool := sm.EmptyEvidencePool{} +- blockExec := sm.NewBlockExecutor(stateStore, log.TestingLogger(), proxyApp.Consensus(), mempool, evpool) ++ blockExec := sm.NewBlockExecutor(stateStore, log.TestingLogger(), proxyApp.Consensus(), mempool, evpool, sm.WithBlockStore(blockStore)) + consensusState := NewState(config.Consensus, state.Copy(), blockExec, blockStore, mempool, evpool) + consensusState.SetLogger(logger) + consensusState.SetEventBus(eventBus) diff --git a/patches/light/client.go.patch b/patches/light/client.go.patch new file mode 100644 index 00000000000..1b4f5671a02 --- /dev/null +++ b/patches/light/client.go.patch @@ -0,0 +1,78 @@ +diff --git a/light/client.go b/light/client.go +index 10dd2c2ea..1eb51ea51 100644 +--- a/light/client.go ++++ b/light/client.go +@@ -384,7 +384,7 @@ func (c *Client) initializeWithTrustOptions(ctx context.Context, options TrustOp + } + + // 3) Cross-verify with witnesses to ensure everybody has the same state. +- if err := c.compareFirstHeaderWithWitnesses(ctx, l.SignedHeader); err != nil { ++ if err := c.compareFirstLightBlockWithWitnesses(ctx, l); err != nil { + return err + } + +@@ -1126,9 +1126,9 @@ func (c *Client) findNewPrimary(ctx context.Context, height int64, remove bool) + return nil, lastError + } + +-// compareFirstHeaderWithWitnesses compares h with all witnesses. If any ++// compareFirstLightBlockWithWitnesses compares light block l with all witnesses. If any + // witness reports a different header than h, the function returns an error. +-func (c *Client) compareFirstHeaderWithWitnesses(ctx context.Context, h *types.SignedHeader) error { ++func (c *Client) compareFirstLightBlockWithWitnesses(ctx context.Context, l *types.LightBlock) error { + compareCtx, cancel := context.WithCancel(ctx) + defer cancel() + +@@ -1141,7 +1141,7 @@ func (c *Client) compareFirstHeaderWithWitnesses(ctx context.Context, h *types.S + + errc := make(chan error, len(c.witnesses)) + for i, witness := range c.witnesses { +- go c.compareNewHeaderWithWitness(compareCtx, errc, h, witness, i) ++ go c.compareNewLightBlockWithWitness(compareCtx, errc, l, witness, i) + } + + witnessesToRemove := make([]int, 0, len(c.witnesses)) +@@ -1153,23 +1153,29 @@ func (c *Client) compareFirstHeaderWithWitnesses(ctx context.Context, h *types.S + switch e := err.(type) { + case nil: + continue +- case errConflictingHeaders: +- c.logger.Error(fmt.Sprintf(`Witness #%d has a different header. Please check primary is correct +-and remove witness. Otherwise, use the different primary`, e.WitnessIndex), "witness", c.witnesses[e.WitnessIndex]) ++ case ErrConflictingHeaders: ++ c.logger.Error("Witness reports a conflicting header. "+ ++ "Please check if the primary is correct or use a different witness.", ++ "witness", c.witnesses[e.WitnessIndex], "err", err) + return err + case errBadWitness: + // If witness sent us an invalid header, then remove it +- c.logger.Info("witness sent an invalid light block, removing...", ++ c.logger.Info("Witness sent an invalid light block, removing...", + "witness", c.witnesses[e.WitnessIndex], + "err", err) + witnessesToRemove = append(witnessesToRemove, e.WitnessIndex) ++ case ErrProposerPrioritiesDiverge: ++ c.logger.Error("Witness reports conflicting proposer priorities. "+ ++ "Please check if the primary is correct or use a different witness.", ++ "witness", c.witnesses[e.WitnessIndex], "err", err) ++ return err + default: // benign errors can be ignored with the exception of context errors + if errors.Is(err, context.Canceled) || errors.Is(err, context.DeadlineExceeded) { + return err + } + + // the witness either didn't respond or didn't have the block. We ignore it. +- c.logger.Info("error comparing first header with witness. You may want to consider removing the witness", ++ c.logger.Info("Error comparing first header with witness. You may want to consider removing the witness", + "err", err) + } + +@@ -1177,7 +1183,7 @@ and remove witness. Otherwise, use the different primary`, e.WitnessIndex), "wit + + // remove witnesses that have misbehaved + if err := c.removeWitnesses(witnessesToRemove); err != nil { +- c.logger.Error("failed to remove witnesses", "err", err, "witnessesToRemove", witnessesToRemove) ++ c.logger.Error("Failed to remove witnesses", "err", err, "witnessesToRemove", witnessesToRemove) + } + + return nil diff --git a/patches/light/client_test.go.patch b/patches/light/client_test.go.patch new file mode 100644 index 00000000000..5f73a0026ec --- /dev/null +++ b/patches/light/client_test.go.patch @@ -0,0 +1,126 @@ +diff --git a/light/client_test.go b/light/client_test.go +index 5e00731a2..542f9640d 100644 +--- a/light/client_test.go ++++ b/light/client_test.go +@@ -31,11 +31,13 @@ var ( + bTime, _ = time.Parse(time.RFC3339, "2006-01-02T15:04:05Z") + h1 = keys.GenSignedHeader(chainID, 1, bTime, nil, vals, vals, + hash("app_hash"), hash("cons_hash"), hash("results_hash"), 0, len(keys)) +- // 3/3 signed +- h2 = keys.GenSignedHeaderLastBlockID(chainID, 2, bTime.Add(30*time.Minute), nil, vals, vals, ++ // 3/3 signed. ++ vals2 = vals.CopyIncrementProposerPriority(1) ++ h2 = keys.GenSignedHeaderLastBlockID(chainID, 2, bTime.Add(30*time.Minute), nil, vals2, vals2, + hash("app_hash"), hash("cons_hash"), hash("results_hash"), 0, len(keys), types.BlockID{Hash: h1.Hash()}) +- // 3/3 signed +- h3 = keys.GenSignedHeaderLastBlockID(chainID, 3, bTime.Add(1*time.Hour), nil, vals, vals, ++ // 3/3 signed. ++ vals3 = vals2.CopyIncrementProposerPriority(1) ++ h3 = keys.GenSignedHeaderLastBlockID(chainID, 3, bTime.Add(1*time.Hour), nil, vals3, vals3, + hash("app_hash"), hash("cons_hash"), hash("results_hash"), 0, len(keys), types.BlockID{Hash: h2.Hash()}) + trustPeriod = 4 * time.Hour + trustOptions = light.TrustOptions{ +@@ -45,9 +47,9 @@ var ( + } + valSet = map[int64]*types.ValidatorSet{ + 1: vals, +- 2: vals, +- 3: vals, +- 4: vals, ++ 2: vals2, ++ 3: vals3, ++ 4: vals.CopyIncrementProposerPriority(1), + } + headerSet = map[int64]*types.SignedHeader{ + 1: h1, +@@ -57,7 +59,7 @@ var ( + 3: h3, + } + l1 = &types.LightBlock{SignedHeader: h1, ValidatorSet: vals} +- l2 = &types.LightBlock{SignedHeader: h2, ValidatorSet: vals} ++ l2 = &types.LightBlock{SignedHeader: h2, ValidatorSet: vals2} + fullNode = mockp.New( + chainID, + headerSet, +@@ -914,13 +916,13 @@ func TestClientRemovesWitnessIfItSendsUsIncorrectHeader(t *testing.T) { + chainID, + map[int64]*types.SignedHeader{ + 1: h1, +- 2: keys.GenSignedHeaderLastBlockID(chainID, 2, bTime.Add(30*time.Minute), nil, vals, vals, ++ 2: keys.GenSignedHeaderLastBlockID(chainID, 2, bTime.Add(30*time.Minute), nil, vals2, vals2, + hash("app_hash2"), hash("cons_hash"), hash("results_hash"), + len(keys), len(keys), types.BlockID{Hash: h1.Hash()}), + }, + map[int64]*types.ValidatorSet{ + 1: vals, +- 2: vals, ++ 2: vals2, + }, + ) + // header is empty +@@ -932,7 +934,7 @@ func TestClientRemovesWitnessIfItSendsUsIncorrectHeader(t *testing.T) { + }, + map[int64]*types.ValidatorSet{ + 1: vals, +- 2: vals, ++ 2: vals2, + }, + ) + +@@ -1158,3 +1160,56 @@ func TestClientHandlesContexts(t *testing.T) { + require.True(t, errors.Is(err, context.Canceled)) + + } ++ ++// TestClientErrorsDifferentProposerPriorities tests the case where the witness ++// sends us a light block with a validator set with different proposer priorities. ++func TestClientErrorsDifferentProposerPriorities(t *testing.T) { ++ primary := mockp.New( ++ chainID, ++ map[int64]*types.SignedHeader{ ++ 1: h1, ++ 2: h2, ++ }, ++ map[int64]*types.ValidatorSet{ ++ 1: vals, ++ 2: vals2, ++ }, ++ ) ++ witness := mockp.New( ++ chainID, ++ map[int64]*types.SignedHeader{ ++ 1: h1, ++ 2: h2, ++ }, ++ map[int64]*types.ValidatorSet{ ++ 1: vals, ++ 2: vals, ++ }, ++ ) ++ ++ // Proposer priorities in vals and vals2 are different. ++ // This is because vals2 = vals.CopyIncrementProposerPriority(1) ++ require.Equal(t, vals.Hash(), vals2.Hash()) ++ require.NotEqual(t, vals.ProposerPriorityHash(), vals2.ProposerPriorityHash()) ++ ++ c, err := light.NewClient( ++ ctx, ++ chainID, ++ trustOptions, ++ fullNode, ++ []provider.Provider{primary, witness}, ++ dbs.New(dbm.NewMemDB(), chainID), ++ light.Logger(log.TestingLogger()), ++ light.MaxRetryAttempts(1), ++ ) ++ // witness should have behaved properly -> no error ++ require.NoError(t, err) ++ assert.EqualValues(t, 2, len(c.Witnesses())) ++ ++ // witness behaves incorrectly, but we can't prove who's guilty -> error ++ _, err = c.VerifyLightBlockAtHeight(ctx, 2, bTime.Add(2*time.Hour)) ++ require.Error(t, err) ++ ++ // witness left in the list ++ assert.EqualValues(t, 2, len(c.Witnesses())) ++} diff --git a/patches/light/detector.go.patch b/patches/light/detector.go.patch new file mode 100644 index 00000000000..53a4b027b03 --- /dev/null +++ b/patches/light/detector.go.patch @@ -0,0 +1,109 @@ +diff --git a/light/detector.go b/light/detector.go +index 2c55c1729..6ff795e28 100644 +--- a/light/detector.go ++++ b/light/detector.go +@@ -31,7 +31,8 @@ func (c *Client) detectDivergence(ctx context.Context, primaryTrace []*types.Lig + } + var ( + headerMatched bool +- lastVerifiedHeader = primaryTrace[len(primaryTrace)-1].SignedHeader ++ lastVerifiedBlock = primaryTrace[len(primaryTrace)-1] ++ lastVerifiedHeader = lastVerifiedBlock.SignedHeader + witnessesToRemove = make([]int, 0) + ) + c.logger.Debug("Running detector against trace", "endBlockHeight", lastVerifiedHeader.Height, +@@ -48,7 +49,7 @@ func (c *Client) detectDivergence(ctx context.Context, primaryTrace []*types.Lig + // and compare it with the header from the primary + errc := make(chan error, len(c.witnesses)) + for i, witness := range c.witnesses { +- go c.compareNewHeaderWithWitness(ctx, errc, lastVerifiedHeader, witness, i) ++ go c.compareNewLightBlockWithWitness(ctx, errc, lastVerifiedBlock, witness, i) + } + + // handle errors from the header comparisons as they come in +@@ -58,7 +59,7 @@ func (c *Client) detectDivergence(ctx context.Context, primaryTrace []*types.Lig + switch e := err.(type) { + case nil: // at least one header matched + headerMatched = true +- case errConflictingHeaders: ++ case ErrConflictingHeaders: + // We have conflicting headers. This could possibly imply an attack on the light client. + // First we need to verify the witness's header using the same skipping verification and then we + // need to find the point that the headers diverge and examine this for any evidence of an attack. +@@ -79,6 +80,10 @@ func (c *Client) detectDivergence(ctx context.Context, primaryTrace []*types.Lig + c.logger.Info("witness returned an error during header comparison, removing...", + "witness", c.witnesses[e.WitnessIndex], "err", err) + witnessesToRemove = append(witnessesToRemove, e.WitnessIndex) ++ case ErrProposerPrioritiesDiverge: ++ c.logger.Info("witness reported validator set with different proposer priorities", ++ "witness", c.witnesses[e.WitnessIndex], "err", err) ++ return e + default: + // Benign errors which can be ignored unless there was a context + // canceled +@@ -104,17 +109,19 @@ func (c *Client) detectDivergence(ctx context.Context, primaryTrace []*types.Lig + return ErrFailedHeaderCrossReferencing + } + +-// compareNewHeaderWithWitness takes the verified header from the primary and compares it with a ++// compareNewLightBlockWithWitness takes the verified header from the primary and compares it with a + // header from a specified witness. The function can return one of three errors: + // +-// 1: errConflictingHeaders -> there may have been an attack on this light client ++// 1: ErrConflictingHeaders -> there may have been an attack on this light client + // 2: errBadWitness -> the witness has either not responded, doesn't have the header or has given us an invalid one + // + // Note: In the case of an invalid header we remove the witness + // + // 3: nil -> the hashes of the two headers match +-func (c *Client) compareNewHeaderWithWitness(ctx context.Context, errc chan error, h *types.SignedHeader, +- witness provider.Provider, witnessIndex int) { ++func (c *Client) compareNewLightBlockWithWitness(ctx context.Context, errc chan error, l *types.LightBlock, ++ witness provider.Provider, witnessIndex int, ++) { ++ h := l.SignedHeader + + lightBlock, err := witness.LightBlock(ctx, h.Height) + switch err { +@@ -150,7 +157,7 @@ func (c *Client) compareNewHeaderWithWitness(ctx context.Context, errc chan erro + // witness' last header is below the primary's header. We check the times to see if the blocks + // have conflicting times + if !lightBlock.Time.Before(h.Time) { +- errc <- errConflictingHeaders{Block: lightBlock, WitnessIndex: witnessIndex} ++ errc <- ErrConflictingHeaders{Block: lightBlock, WitnessIndex: witnessIndex} + return + } + +@@ -175,7 +182,7 @@ func (c *Client) compareNewHeaderWithWitness(ctx context.Context, errc chan erro + // the witness still doesn't have a block at the height of the primary. + // Check if there is a conflicting time + if !lightBlock.Time.Before(h.Time) { +- errc <- errConflictingHeaders{Block: lightBlock, WitnessIndex: witnessIndex} ++ errc <- ErrConflictingHeaders{Block: lightBlock, WitnessIndex: witnessIndex} + return + } + +@@ -197,7 +204,13 @@ func (c *Client) compareNewHeaderWithWitness(ctx context.Context, errc chan erro + } + + if !bytes.Equal(h.Hash(), lightBlock.Hash()) { +- errc <- errConflictingHeaders{Block: lightBlock, WitnessIndex: witnessIndex} ++ errc <- ErrConflictingHeaders{Block: lightBlock, WitnessIndex: witnessIndex} ++ } ++ ++ // ProposerPriorityHash is not part of the header hash, so we need to check it separately. ++ wanted, got := l.ValidatorSet.ProposerPriorityHash(), lightBlock.ValidatorSet.ProposerPriorityHash() ++ if !bytes.Equal(wanted, got) { ++ errc <- ErrProposerPrioritiesDiverge{WitnessHash: got, WitnessIndex: witnessIndex, PrimaryHash: wanted} + } + + c.logger.Debug("Matching header received by witness", "height", h.Height, "witness", witnessIndex) +@@ -245,7 +258,7 @@ func (c *Client) handleConflictingHeaders( + if primaryBlock.Commit.Round != witnessTrace[len(witnessTrace)-1].Commit.Round { + c.logger.Info("The light client has detected, and prevented, an attempted amnesia attack." + + " We think this attack is pretty unlikely, so if you see it, that's interesting to us." + +- " Can you let us know by opening an issue through https://github.com/comet/comet/issues/new?") ++ " Can you let us know by opening an issue through https://github.com/cometbft/cometbft/issues/new?") + } + + // This may not be valid because the witness itself is at fault. So now we reverse it, examining the diff --git a/patches/light/errors.go.patch b/patches/light/errors.go.patch new file mode 100644 index 00000000000..a16e165f850 --- /dev/null +++ b/patches/light/errors.go.patch @@ -0,0 +1,45 @@ +diff --git a/light/errors.go b/light/errors.go +index 73ed232b3..665e227d0 100644 +--- a/light/errors.go ++++ b/light/errors.go +@@ -75,20 +75,36 @@ var ErrLightClientAttack = errors.New(`attempted attack detected. + // continue running the light client. + var ErrNoWitnesses = errors.New("no witnesses connected. please reset light client") + +-// ----------------------------- INTERNAL ERRORS --------------------------------- +- + // ErrConflictingHeaders is thrown when two conflicting headers are discovered. +-type errConflictingHeaders struct { ++type ErrConflictingHeaders struct { + Block *types.LightBlock + WitnessIndex int + } + +-func (e errConflictingHeaders) Error() string { ++func (e ErrConflictingHeaders) Error() string { + return fmt.Sprintf( + "header hash (%X) from witness (%d) does not match primary", + e.Block.Hash(), e.WitnessIndex) + } + ++// ErrProposerPrioritiesDiverge is thrown when two conflicting headers are ++// discovered, but the error is non-attributable comparing to ErrConflictingHeaders. ++// The difference is in validator set proposer priorities, which may change ++// with every round of consensus. ++type ErrProposerPrioritiesDiverge struct { ++ WitnessHash []byte ++ WitnessIndex int ++ PrimaryHash []byte ++} ++ ++func (e ErrProposerPrioritiesDiverge) Error() string { ++ return fmt.Sprintf( ++ "validator set's proposer priority hashes do not match: witness[%d]=%X, primary=%X", ++ e.WitnessIndex, e.WitnessHash, e.PrimaryHash) ++} ++ ++// ----------------------------- INTERNAL ERRORS --------------------------------- ++ + // errBadWitness is returned when the witness either does not respond or + // responds with an invalid header. + type errBadWitness struct { diff --git a/patches/light/provider/http/http.go.patch b/patches/light/provider/http/http.go.patch new file mode 100644 index 00000000000..47f568711af --- /dev/null +++ b/patches/light/provider/http/http.go.patch @@ -0,0 +1,42 @@ +diff --git a/light/provider/http/http.go b/light/provider/http/http.go +index 430dddd54..f8f00cc3f 100644 +--- a/light/provider/http/http.go ++++ b/light/provider/http/http.go +@@ -157,9 +157,18 @@ OUTER_LOOP: + + case regexpTimedOut.MatchString(err.Error()): + // we wait and try again with exponential backoff ++ //nolint:gosec + time.Sleep(backoffTimeout(uint16(attempt))) + continue + ++ // NOTE: it seems like the context errors are being wrapped in a way that ++ // makes them hard to detect. For now, we just check the error string. ++ case strings.Contains(err.Error(), context.DeadlineExceeded.Error()): ++ return nil, context.DeadlineExceeded ++ ++ case ctx.Err() != nil: ++ return nil, ctx.Err() ++ + // context canceled or connection refused we return the error + default: + return nil, err +@@ -199,9 +208,18 @@ func (p *http) signedHeader(ctx context.Context, height *int64) (*types.SignedHe + + case regexpTimedOut.MatchString(err.Error()): + // we wait and try again with exponential backoff ++ //nolint:gosec + time.Sleep(backoffTimeout(uint16(attempt))) + continue + ++ // NOTE: it seems like the context errors are being wrapped in a way that ++ // makes them hard to detect. For now, we just check the error string. ++ case strings.Contains(err.Error(), context.DeadlineExceeded.Error()): ++ return nil, context.DeadlineExceeded ++ ++ case ctx.Err() != nil: ++ return nil, ctx.Err() ++ + // either context was cancelled or connection refused. + default: + return nil, err diff --git a/patches/light/provider/http/http_test.go.patch b/patches/light/provider/http/http_test.go.patch new file mode 100644 index 00000000000..28058c89ab9 --- /dev/null +++ b/patches/light/provider/http/http_test.go.patch @@ -0,0 +1,66 @@ +diff --git a/light/provider/http/http_test.go b/light/provider/http/http_test.go +index 36be4d281..4067e6e18 100644 +--- a/light/provider/http/http_test.go ++++ b/light/provider/http/http_test.go +@@ -15,6 +15,7 @@ import ( + lighthttp "github.com/tendermint/tendermint/light/provider/http" + rpcclient "github.com/tendermint/tendermint/rpc/client" + rpchttp "github.com/tendermint/tendermint/rpc/client/http" ++ ctypes "github.com/tendermint/tendermint/rpc/core/types" + rpctest "github.com/tendermint/tendermint/rpc/test" + "github.com/tendermint/tendermint/types" + ) +@@ -46,10 +47,9 @@ func TestProvider(t *testing.T) { + chainID := genDoc.ChainID + + c, err := rpchttp.New(rpcAddr, "/websocket") +- require.Nil(t, err) ++ require.NoError(t, err) + + p := lighthttp.NewWithClient(chainID, c) +- require.NoError(t, err) + require.NotNil(t, p) + + // let it produce some blocks +@@ -82,6 +82,21 @@ func TestProvider(t *testing.T) { + require.Nil(t, lb) + assert.Equal(t, provider.ErrLightBlockNotFound, err) + ++ // fetching with the context cancelled ++ ctx, cancel := context.WithCancel(context.Background()) ++ cancel() ++ _, err = p.LightBlock(ctx, lower+3) ++ require.Error(t, err) ++ require.Equal(t, context.Canceled, err) ++ ++ // fetching with the deadline exceeded (a mock RPC client is used to simulate this) ++ c2, err := newMockHTTP(rpcAddr) ++ require.NoError(t, err) ++ p2 := lighthttp.NewWithClient(chainID, c2) ++ _, err = p2.LightBlock(context.Background(), 0) ++ require.Error(t, err) ++ require.Equal(t, context.DeadlineExceeded, err) ++ + // stop the full node and check that a no response error is returned + rpctest.StopTendermint(node) + time.Sleep(10 * time.Second) +@@ -91,3 +106,19 @@ func TestProvider(t *testing.T) { + require.Contains(t, err.Error(), "connection refused") + require.Nil(t, lb) + } ++ ++type mockHTTP struct { ++ *rpchttp.HTTP ++} ++ ++func newMockHTTP(remote string) (*mockHTTP, error) { ++ c, err := rpchttp.New(remote, "/websocket") ++ if err != nil { ++ return nil, err ++ } ++ return &mockHTTP{c}, nil ++} ++ ++func (m *mockHTTP) Validators(ctx context.Context, height *int64, page, perPage *int) (*ctypes.ResultValidators, error) { ++ return nil, fmt.Errorf("post failed: %w", context.DeadlineExceeded) ++} diff --git a/patches/light/proxy/routes.go.patch b/patches/light/proxy/routes.go.patch new file mode 100644 index 00000000000..b12e83d0457 --- /dev/null +++ b/patches/light/proxy/routes.go.patch @@ -0,0 +1,59 @@ +diff --git a/light/proxy/routes.go b/light/proxy/routes.go +index 2898853d2..b32b6a39d 100644 +--- a/light/proxy/routes.go ++++ b/light/proxy/routes.go +@@ -28,6 +28,8 @@ func RPCRoutes(c *lrpc.Client) map[string]*rpcserver.RPCFunc { + "block_by_hash": rpcserver.NewRPCFunc(makeBlockByHashFunc(c), "hash", rpcserver.Cacheable()), + "block_results": rpcserver.NewRPCFunc(makeBlockResultsFunc(c), "height", rpcserver.Cacheable("height")), + "commit": rpcserver.NewRPCFunc(makeCommitFunc(c), "height", rpcserver.Cacheable("height")), ++ "header": rpcserver.NewRPCFunc(makeHeaderFunc(c), "height", rpcserver.Cacheable("height")), ++ "header_by_hash": rpcserver.NewRPCFunc(makeHeaderByHashFunc(c), "hash"), + "tx": rpcserver.NewRPCFunc(makeTxFunc(c), "hash,prove", rpcserver.Cacheable()), + "tx_search": rpcserver.NewRPCFunc(makeTxSearchFuncMatchEvents(c), "query,prove,page,per_page,order_by,match_events"), + "block_search": rpcserver.NewRPCFunc(makeBlockSearchFuncMatchEvents(c), "query,page,per_page,order_by,match_events"), +@@ -37,6 +39,7 @@ func RPCRoutes(c *lrpc.Client) map[string]*rpcserver.RPCFunc { + "consensus_params": rpcserver.NewRPCFunc(makeConsensusParamsFunc(c), "height", rpcserver.Cacheable("height")), + "unconfirmed_txs": rpcserver.NewRPCFunc(makeUnconfirmedTxsFunc(c), "limit"), + "num_unconfirmed_txs": rpcserver.NewRPCFunc(makeNumUnconfirmedTxsFunc(c), ""), ++ "tx_status": rpcserver.NewRPCFunc(makeTxStatusFunc(c), "hash"), + + // tx broadcast API + "broadcast_tx_commit": rpcserver.NewRPCFunc(makeBroadcastTxCommitFunc(c), "tx"), +@@ -109,6 +112,22 @@ func makeBlockFunc(c *lrpc.Client) rpcBlockFunc { + } + } + ++type rpcHeaderFunc func(ctx *rpctypes.Context, height *int64) (*ctypes.ResultHeader, error) ++ ++func makeHeaderFunc(c *lrpc.Client) rpcHeaderFunc { ++ return func(ctx *rpctypes.Context, height *int64) (*ctypes.ResultHeader, error) { ++ return c.Header(ctx.Context(), height) ++ } ++} ++ ++type rpcHeaderByHashFunc func(ctx *rpctypes.Context, hash []byte) (*ctypes.ResultHeader, error) ++ ++func makeHeaderByHashFunc(c *lrpc.Client) rpcHeaderByHashFunc { ++ return func(ctx *rpctypes.Context, hash []byte) (*ctypes.ResultHeader, error) { ++ return c.HeaderByHash(ctx.Context(), hash) ++ } ++} ++ + type rpcBlockByHashFunc func(ctx *rpctypes.Context, hash []byte) (*ctypes.ResultBlock, error) + + func makeBlockByHashFunc(c *lrpc.Client) rpcBlockByHashFunc { +@@ -125,6 +144,14 @@ func makeBlockResultsFunc(c *lrpc.Client) rpcBlockResultsFunc { + } + } + ++type rpcTxStatusFunc func(ctx *rpctypes.Context, hash []byte) (*ctypes.ResultTxStatus, error) ++ ++func makeTxStatusFunc(c *lrpc.Client) rpcTxStatusFunc { ++ return func(ctx *rpctypes.Context, hash []byte) (*ctypes.ResultTxStatus, error) { ++ return c.TxStatus(ctx.Context(), hash) ++ } ++} ++ + type rpcCommitFunc func(ctx *rpctypes.Context, height *int64) (*ctypes.ResultCommit, error) + + func makeCommitFunc(c *lrpc.Client) rpcCommitFunc { diff --git a/patches/light/rpc/client.go.patch b/patches/light/rpc/client.go.patch new file mode 100644 index 00000000000..783126ad132 --- /dev/null +++ b/patches/light/rpc/client.go.patch @@ -0,0 +1,165 @@ +diff --git a/light/rpc/client.go b/light/rpc/client.go +index 73e4e82dc..7f7fc64b1 100644 +--- a/light/rpc/client.go ++++ b/light/rpc/client.go +@@ -343,6 +343,52 @@ func (c *Client) Block(ctx context.Context, height *int64) (*ctypes.ResultBlock, + return res, nil + } + ++// SignedBlock calls rpcclient#SignedBlock and then verifies the result. ++func (c *Client) SignedBlock(ctx context.Context, height *int64) (*ctypes.ResultSignedBlock, error) { ++ res, err := c.next.SignedBlock(ctx, height) ++ if err != nil { ++ return nil, err ++ } ++ ++ // Validate res. ++ if err := res.Header.ValidateBasic(); err != nil { ++ return nil, err ++ } ++ if height != nil && res.Header.Height != *height { ++ return nil, fmt.Errorf("incorrect height returned. Expected %d, got %d", *height, res.Header.Height) ++ } ++ if err := res.Commit.ValidateBasic(); err != nil { ++ return nil, err ++ } ++ if err := res.ValidatorSet.ValidateBasic(); err != nil { ++ return nil, err ++ } ++ ++ // NOTE: this will re-request the header and commit from the primary. Ideally, you'd just ++ // fetch the data from the primary and use the light client to verify it. ++ l, err := c.updateLightClientIfNeededTo(ctx, &res.Header.Height) ++ if err != nil { ++ return nil, err ++ } ++ ++ if bmH, bH := l.Header.Hash(), res.Header.Hash(); !bytes.Equal(bmH, bH) { ++ return nil, fmt.Errorf("light client header %X does not match with response header %X", ++ bmH, bH) ++ } ++ ++ if bmH, bH := l.Header.DataHash, res.Data.Hash(); !bytes.Equal(bmH, bH) { ++ return nil, fmt.Errorf("light client data hash %X does not match with response data %X", ++ bmH, bH) ++ } ++ ++ return &ctypes.ResultSignedBlock{ ++ Header: res.Header, ++ Commit: *l.Commit, ++ ValidatorSet: *l.ValidatorSet, ++ Data: res.Data, ++ }, nil ++} ++ + // BlockByHash calls rpcclient#BlockByHash and then verifies the result. + func (c *Client) BlockByHash(ctx context.Context, hash []byte) (*ctypes.ResultBlock, error) { + res, err := c.next.BlockByHash(ctx, hash) +@@ -441,6 +487,45 @@ func (c *Client) BlockResults(ctx context.Context, height *int64) (*ctypes.Resul + return res, nil + } + ++// TxStatus retrieves the status of the transaction given its hash. ++func (c *Client) TxStatus(ctx context.Context, hash []byte) (*ctypes.ResultTxStatus, error) { ++ return c.next.TxStatus(ctx, hash) ++} ++ ++// Header fetches and verifies the header directly via the light client ++func (c *Client) Header(ctx context.Context, height *int64) (*ctypes.ResultHeader, error) { ++ lb, err := c.updateLightClientIfNeededTo(ctx, height) ++ if err != nil { ++ return nil, err ++ } ++ ++ return &ctypes.ResultHeader{Header: lb.Header}, nil ++} ++ ++// HeaderByHash calls rpcclient#HeaderByHash and updates the client if it's falling behind. ++func (c *Client) HeaderByHash(ctx context.Context, hash cmtbytes.HexBytes) (*ctypes.ResultHeader, error) { ++ res, err := c.next.HeaderByHash(ctx, hash) ++ if err != nil { ++ return nil, err ++ } ++ ++ if err := res.Header.ValidateBasic(); err != nil { ++ return nil, err ++ } ++ ++ lb, err := c.updateLightClientIfNeededTo(ctx, &res.Header.Height) ++ if err != nil { ++ return nil, err ++ } ++ ++ if !bytes.Equal(lb.Header.Hash(), res.Header.Hash()) { ++ return nil, fmt.Errorf("primary header hash does not match trusted header hash. (%X != %X)", ++ lb.Header.Hash(), res.Header.Hash()) ++ } ++ ++ return res, nil ++} ++ + func (c *Client) Commit(ctx context.Context, height *int64) (*ctypes.ResultCommit, error) { + // Update the light client if we're behind and retrieve the light block at the requested height + // or at the latest height if no height is provided. +@@ -455,6 +540,26 @@ func (c *Client) Commit(ctx context.Context, height *int64) (*ctypes.ResultCommi + }, nil + } + ++func (c *Client) DataCommitment( ++ ctx context.Context, ++ start uint64, ++ end uint64, ++) (*ctypes.ResultDataCommitment, error) { ++ return c.next.DataCommitment(ctx, start, end) ++} ++ ++// DataRootInclusionProof calls rpcclient#DataRootInclusionProof method and returns ++// a merkle proof for the data root of block height `height` to the set of blocks ++// defined by `start` and `end`. ++func (c *Client) DataRootInclusionProof( ++ ctx context.Context, ++ height uint64, ++ start uint64, ++ end uint64, ++) (*ctypes.ResultDataRootInclusionProof, error) { ++ return c.next.DataRootInclusionProof(ctx, height, start, end) ++} ++ + // Tx calls rpcclient#Tx method and then verifies the proof if such was + // requested. + func (c *Client) Tx(ctx context.Context, hash []byte, prove bool) (*ctypes.ResultTx, error) { +@@ -478,6 +583,34 @@ func (c *Client) Tx(ctx context.Context, hash []byte, prove bool) (*ctypes.Resul + return res, res.Proof.Validate(l.DataHash) + } + ++// ProveShares calls rpcclient#ProveShares method and returns an NMT proof for a set ++// of shares, defined by `startShare` and `endShare`, to the corresponding rows. ++// Then, a binary merkle inclusion proof from the latter rows to the data root. ++// Deprecated: Use ProveSharesV2 instead. ++func (c *Client) ProveShares( ++ ctx context.Context, ++ height uint64, ++ startShare uint64, ++ endShare uint64, ++) (types.ShareProof, error) { ++ res, err := c.next.ProveShares(ctx, height, startShare, endShare) ++ return res, err ++} ++ ++// ProveSharesV2 returns a proof of inclusion for a share range to the data root ++// of the given height. ++// The range is end-exclusive and defined by startShare and endShare. ++// Note: this proof is composed of multiple proofs. ++func (c *Client) ProveSharesV2( ++ ctx context.Context, ++ height uint64, ++ startShare uint64, ++ endShare uint64, ++) (*ctypes.ResultShareProof, error) { ++ res, err := c.next.ProveSharesV2(ctx, height, startShare, endShare) ++ return res, err ++} ++ + func (c *Client) TxSearch( + ctx context.Context, + query string, diff --git a/patches/light/rpc/mocks/light_client.go.patch b/patches/light/rpc/mocks/light_client.go.patch new file mode 100644 index 00000000000..eefa8a734f2 --- /dev/null +++ b/patches/light/rpc/mocks/light_client.go.patch @@ -0,0 +1,105 @@ +diff --git a/light/rpc/mocks/light_client.go b/light/rpc/mocks/light_client.go +index fabf73b01..3d0467dc3 100644 +--- a/light/rpc/mocks/light_client.go ++++ b/light/rpc/mocks/light_client.go +@@ -21,6 +21,10 @@ type LightClient struct { + func (_m *LightClient) ChainID() string { + ret := _m.Called() + ++ if len(ret) == 0 { ++ panic("no return value specified for ChainID") ++ } ++ + var r0 string + if rf, ok := ret.Get(0).(func() string); ok { + r0 = rf() +@@ -35,7 +39,15 @@ func (_m *LightClient) ChainID() string { + func (_m *LightClient) TrustedLightBlock(height int64) (*types.LightBlock, error) { + ret := _m.Called(height) + ++ if len(ret) == 0 { ++ panic("no return value specified for TrustedLightBlock") ++ } ++ + var r0 *types.LightBlock ++ var r1 error ++ if rf, ok := ret.Get(0).(func(int64) (*types.LightBlock, error)); ok { ++ return rf(height) ++ } + if rf, ok := ret.Get(0).(func(int64) *types.LightBlock); ok { + r0 = rf(height) + } else { +@@ -44,7 +56,6 @@ func (_m *LightClient) TrustedLightBlock(height int64) (*types.LightBlock, error + } + } + +- var r1 error + if rf, ok := ret.Get(1).(func(int64) error); ok { + r1 = rf(height) + } else { +@@ -58,7 +69,15 @@ func (_m *LightClient) TrustedLightBlock(height int64) (*types.LightBlock, error + func (_m *LightClient) Update(ctx context.Context, now time.Time) (*types.LightBlock, error) { + ret := _m.Called(ctx, now) + ++ if len(ret) == 0 { ++ panic("no return value specified for Update") ++ } ++ + var r0 *types.LightBlock ++ var r1 error ++ if rf, ok := ret.Get(0).(func(context.Context, time.Time) (*types.LightBlock, error)); ok { ++ return rf(ctx, now) ++ } + if rf, ok := ret.Get(0).(func(context.Context, time.Time) *types.LightBlock); ok { + r0 = rf(ctx, now) + } else { +@@ -67,7 +86,6 @@ func (_m *LightClient) Update(ctx context.Context, now time.Time) (*types.LightB + } + } + +- var r1 error + if rf, ok := ret.Get(1).(func(context.Context, time.Time) error); ok { + r1 = rf(ctx, now) + } else { +@@ -81,7 +99,15 @@ func (_m *LightClient) Update(ctx context.Context, now time.Time) (*types.LightB + func (_m *LightClient) VerifyLightBlockAtHeight(ctx context.Context, height int64, now time.Time) (*types.LightBlock, error) { + ret := _m.Called(ctx, height, now) + ++ if len(ret) == 0 { ++ panic("no return value specified for VerifyLightBlockAtHeight") ++ } ++ + var r0 *types.LightBlock ++ var r1 error ++ if rf, ok := ret.Get(0).(func(context.Context, int64, time.Time) (*types.LightBlock, error)); ok { ++ return rf(ctx, height, now) ++ } + if rf, ok := ret.Get(0).(func(context.Context, int64, time.Time) *types.LightBlock); ok { + r0 = rf(ctx, height, now) + } else { +@@ -90,7 +116,6 @@ func (_m *LightClient) VerifyLightBlockAtHeight(ctx context.Context, height int6 + } + } + +- var r1 error + if rf, ok := ret.Get(1).(func(context.Context, int64, time.Time) error); ok { + r1 = rf(ctx, height, now) + } else { +@@ -100,13 +125,12 @@ func (_m *LightClient) VerifyLightBlockAtHeight(ctx context.Context, height int6 + return r0, r1 + } + +-type mockConstructorTestingTNewLightClient interface { ++// NewLightClient creates a new instance of LightClient. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. ++// The first argument is typically a *testing.T value. ++func NewLightClient(t interface { + mock.TestingT + Cleanup(func()) +-} +- +-// NewLightClient creates a new instance of LightClient. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +-func NewLightClient(t mockConstructorTestingTNewLightClient) *LightClient { ++}) *LightClient { + mock := &LightClient{} + mock.Mock.Test(t) + diff --git a/patches/light/store/db/db.go.patch b/patches/light/store/db/db.go.patch new file mode 100644 index 00000000000..ff12283632e --- /dev/null +++ b/patches/light/store/db/db.go.patch @@ -0,0 +1,12 @@ +diff --git a/light/store/db/db.go b/light/store/db/db.go +index 37666fdcf..37aeae893 100644 +--- a/light/store/db/db.go ++++ b/light/store/db/db.go +@@ -271,6 +271,7 @@ func (s *dbs) Prune(size uint16) error { + s.mtx.Lock() + defer s.mtx.Unlock() + ++ //nolint:gosec + s.size -= uint16(pruned) + + if wErr := s.db.SetSync(sizeKey, marshalSize(s.size)); wErr != nil { diff --git a/patches/mempool/cache.go.patch b/patches/mempool/cache.go.patch new file mode 100644 index 00000000000..0cf4d339f71 --- /dev/null +++ b/patches/mempool/cache.go.patch @@ -0,0 +1,56 @@ +diff --git a/mempool/cache.go b/mempool/cache.go +index 3c8748ff4..0f1a9f43e 100644 +--- a/mempool/cache.go ++++ b/mempool/cache.go +@@ -26,6 +26,9 @@ type TxCache interface { + // Has reports whether tx is present in the cache. Checking for presence is + // not treated as an access of the value. + Has(tx types.Tx) bool ++ ++ // HasKey reports whether the given key is present in the cache. ++ HasKey(key types.TxKey) bool + } + + var _ TxCache = (*LRUTxCache)(nil) +@@ -89,10 +92,14 @@ func (c *LRUTxCache) Push(tx types.Tx) bool { + } + + func (c *LRUTxCache) Remove(tx types.Tx) { ++ key := tx.Key() ++ c.RemoveTxByKey(key) ++} ++ ++func (c *LRUTxCache) RemoveTxByKey(key types.TxKey) { + c.mtx.Lock() + defer c.mtx.Unlock() + +- key := tx.Key() + e := c.cacheMap[key] + delete(c.cacheMap, key) + +@@ -109,12 +116,21 @@ func (c *LRUTxCache) Has(tx types.Tx) bool { + return ok + } + ++func (c *LRUTxCache) HasKey(key types.TxKey) bool { ++ c.mtx.Lock() ++ defer c.mtx.Unlock() ++ ++ _, ok := c.cacheMap[key] ++ return ok ++} ++ + // NopTxCache defines a no-op raw transaction cache. + type NopTxCache struct{} + + var _ TxCache = (*NopTxCache)(nil) + +-func (NopTxCache) Reset() {} +-func (NopTxCache) Push(types.Tx) bool { return true } +-func (NopTxCache) Remove(types.Tx) {} +-func (NopTxCache) Has(types.Tx) bool { return false } ++func (NopTxCache) Reset() {} ++func (NopTxCache) Push(types.Tx) bool { return true } ++func (NopTxCache) Remove(types.Tx) {} ++func (NopTxCache) Has(types.Tx) bool { return false } ++func (NopTxCache) HasKey(types.TxKey) bool { return false } diff --git a/patches/mempool/cache_test.go.patch b/patches/mempool/cache_test.go.patch new file mode 100644 index 00000000000..646f025a228 --- /dev/null +++ b/patches/mempool/cache_test.go.patch @@ -0,0 +1,71 @@ +diff --git a/mempool/cache_test.go b/mempool/cache_test.go +index 44b2beb01..86b6ebc66 100644 +--- a/mempool/cache_test.go ++++ b/mempool/cache_test.go +@@ -5,27 +5,37 @@ import ( + "testing" + + "github.com/stretchr/testify/require" ++ "github.com/tendermint/tendermint/types" + ) + +-func TestCacheRemove(t *testing.T) { +- cache := NewLRUTxCache(100) +- numTxs := 10 ++func populate(cache TxCache, numTxs int) ([][]byte, error) { + + txs := make([][]byte, numTxs) + for i := 0; i < numTxs; i++ { + // probability of collision is 2**-256 + txBytes := make([]byte, 32) + _, err := rand.Read(txBytes) +- require.NoError(t, err) ++ ++ if err != nil { ++ return nil, err ++ } + + txs[i] = txBytes + cache.Push(txBytes) +- +- // make sure its added to both the linked list and the map +- require.Equal(t, i+1, len(cache.cacheMap)) +- require.Equal(t, i+1, cache.list.Len()) + } + ++ return txs, nil ++} ++ ++func TestCacheRemove(t *testing.T) { ++ cache := NewLRUTxCache(100) ++ numTxs := 10 ++ ++ txs, err := populate(cache, numTxs) ++ require.NoError(t, err) ++ require.Equal(t, numTxs, len(cache.cacheMap)) ++ require.Equal(t, numTxs, cache.list.Len()) ++ + for i := 0; i < numTxs; i++ { + cache.Remove(txs[i]) + // make sure its removed from both the map and the linked list +@@ -33,3 +43,20 @@ func TestCacheRemove(t *testing.T) { + require.Equal(t, numTxs-(i+1), cache.list.Len()) + } + } ++ ++func TestCacheRemoveByKey(t *testing.T) { ++ cache := NewLRUTxCache(100) ++ numTxs := 10 ++ ++ txs, err := populate(cache, numTxs) ++ require.NoError(t, err) ++ require.Equal(t, numTxs, len(cache.cacheMap)) ++ require.Equal(t, numTxs, cache.list.Len()) ++ ++ for i := 0; i < numTxs; i++ { ++ cache.RemoveTxByKey(types.Tx(txs[i]).Key()) ++ // make sure its removed from both the map and the linked list ++ require.Equal(t, numTxs-(i+1), len(cache.cacheMap)) ++ require.Equal(t, numTxs-(i+1), cache.list.Len()) ++ } ++} diff --git a/patches/mempool/cat/cache.go.patch b/patches/mempool/cat/cache.go.patch new file mode 100644 index 00000000000..4d0d21a18ac --- /dev/null +++ b/patches/mempool/cat/cache.go.patch @@ -0,0 +1,216 @@ +diff --git a/mempool/cat/cache.go b/mempool/cat/cache.go +new file mode 100644 +index 000000000..7540f7912 +--- /dev/null ++++ b/mempool/cat/cache.go +@@ -0,0 +1,210 @@ ++package cat ++ ++import ( ++ "container/list" ++ "time" ++ ++ tmsync "github.com/tendermint/tendermint/libs/sync" ++ "github.com/tendermint/tendermint/types" ++) ++ ++// LRUTxCache maintains a thread-safe LRU cache of raw transactions. The cache ++// only stores the hash of the raw transaction. ++// NOTE: This has been copied from mempool/cache with the main diffence of using ++// tx keys instead of raw transactions. ++type LRUTxCache struct { ++ staticSize int ++ ++ mtx tmsync.Mutex ++ // cacheMap is used as a quick look up table ++ cacheMap map[types.TxKey]*list.Element ++ // list is a doubly linked list used to capture the FIFO nature of the cache ++ list *list.List ++} ++ ++func NewLRUTxCache(cacheSize int) *LRUTxCache { ++ return &LRUTxCache{ ++ staticSize: cacheSize, ++ cacheMap: make(map[types.TxKey]*list.Element, cacheSize), ++ list: list.New(), ++ } ++} ++ ++func (c *LRUTxCache) Reset() { ++ c.mtx.Lock() ++ defer c.mtx.Unlock() ++ ++ c.cacheMap = make(map[types.TxKey]*list.Element, c.staticSize) ++ c.list.Init() ++} ++ ++func (c *LRUTxCache) Push(txKey types.TxKey) bool { ++ if c.staticSize == 0 { ++ return true ++ } ++ ++ c.mtx.Lock() ++ defer c.mtx.Unlock() ++ ++ moved, ok := c.cacheMap[txKey] ++ if ok { ++ c.list.MoveToBack(moved) ++ return false ++ } ++ ++ if c.list.Len() >= c.staticSize { ++ front := c.list.Front() ++ if front != nil { ++ frontKey := front.Value.(types.TxKey) ++ delete(c.cacheMap, frontKey) ++ c.list.Remove(front) ++ } ++ } ++ ++ e := c.list.PushBack(txKey) ++ c.cacheMap[txKey] = e ++ ++ return true ++} ++ ++func (c *LRUTxCache) Remove(txKey types.TxKey) { ++ if c.staticSize == 0 { ++ return ++ } ++ ++ c.mtx.Lock() ++ defer c.mtx.Unlock() ++ ++ e := c.cacheMap[txKey] ++ delete(c.cacheMap, txKey) ++ ++ if e != nil { ++ c.list.Remove(e) ++ } ++} ++ ++func (c *LRUTxCache) Has(txKey types.TxKey) bool { ++ if c.staticSize == 0 { ++ return false ++ } ++ ++ c.mtx.Lock() ++ defer c.mtx.Unlock() ++ ++ _, ok := c.cacheMap[txKey] ++ return ok ++} ++ ++// SeenTxSet records transactions that have been ++// seen by other peers but not yet by us ++type SeenTxSet struct { ++ mtx tmsync.Mutex ++ set map[types.TxKey]timestampedPeerSet ++} ++ ++type timestampedPeerSet struct { ++ peers map[uint16]struct{} ++ time time.Time ++} ++ ++func NewSeenTxSet() *SeenTxSet { ++ return &SeenTxSet{ ++ set: make(map[types.TxKey]timestampedPeerSet), ++ } ++} ++ ++func (s *SeenTxSet) Add(txKey types.TxKey, peer uint16) { ++ if peer == 0 { ++ return ++ } ++ s.mtx.Lock() ++ defer s.mtx.Unlock() ++ seenSet, exists := s.set[txKey] ++ if !exists { ++ s.set[txKey] = timestampedPeerSet{ ++ peers: map[uint16]struct{}{peer: {}}, ++ time: time.Now().UTC(), ++ } ++ } else { ++ seenSet.peers[peer] = struct{}{} ++ } ++} ++ ++func (s *SeenTxSet) RemoveKey(txKey types.TxKey) { ++ s.mtx.Lock() ++ defer s.mtx.Unlock() ++ delete(s.set, txKey) ++} ++ ++func (s *SeenTxSet) Remove(txKey types.TxKey, peer uint16) { ++ s.mtx.Lock() ++ defer s.mtx.Unlock() ++ set, exists := s.set[txKey] ++ if exists { ++ if len(set.peers) == 1 { ++ delete(s.set, txKey) ++ } else { ++ delete(set.peers, peer) ++ } ++ } ++} ++ ++func (s *SeenTxSet) RemovePeer(peer uint16) { ++ s.mtx.Lock() ++ defer s.mtx.Unlock() ++ for key, seenSet := range s.set { ++ delete(seenSet.peers, peer) ++ if len(seenSet.peers) == 0 { ++ delete(s.set, key) ++ } ++ } ++} ++ ++func (s *SeenTxSet) Prune(limit time.Time) { ++ s.mtx.Lock() ++ defer s.mtx.Unlock() ++ for key, seenSet := range s.set { ++ if seenSet.time.Before(limit) { ++ delete(s.set, key) ++ } ++ } ++} ++ ++func (s *SeenTxSet) Has(txKey types.TxKey, peer uint16) bool { ++ s.mtx.Lock() ++ defer s.mtx.Unlock() ++ seenSet, exists := s.set[txKey] ++ if !exists { ++ return false ++ } ++ _, has := seenSet.peers[peer] ++ return has ++} ++ ++func (s *SeenTxSet) Get(txKey types.TxKey) map[uint16]struct{} { ++ s.mtx.Lock() ++ defer s.mtx.Unlock() ++ seenSet, exists := s.set[txKey] ++ if !exists { ++ return nil ++ } ++ // make a copy of the struct to avoid concurrency issues ++ peers := make(map[uint16]struct{}, len(seenSet.peers)) ++ for peer := range seenSet.peers { ++ peers[peer] = struct{}{} ++ } ++ return peers ++} ++ ++// Len returns the amount of cached items. Mostly used for testing. ++func (s *SeenTxSet) Len() int { ++ s.mtx.Lock() ++ defer s.mtx.Unlock() ++ return len(s.set) ++} ++ ++func (s *SeenTxSet) Reset() { ++ s.mtx.Lock() ++ defer s.mtx.Unlock() ++ s.set = make(map[types.TxKey]timestampedPeerSet) ++} diff --git a/patches/mempool/cat/cache_test.go.patch b/patches/mempool/cat/cache_test.go.patch new file mode 100644 index 00000000000..7e56a8d39f1 --- /dev/null +++ b/patches/mempool/cat/cache_test.go.patch @@ -0,0 +1,157 @@ +diff --git a/mempool/cat/cache_test.go b/mempool/cat/cache_test.go +new file mode 100644 +index 000000000..6e47ef1d6 +--- /dev/null ++++ b/mempool/cat/cache_test.go +@@ -0,0 +1,151 @@ ++package cat ++ ++import ( ++ "fmt" ++ "math/rand" ++ "sync" ++ "testing" ++ "time" ++ ++ "github.com/stretchr/testify/require" ++ ++ "github.com/tendermint/tendermint/types" ++) ++ ++func TestSeenTxSet(t *testing.T) { ++ var ( ++ tx1Key = types.Tx("tx1").Key() ++ tx2Key = types.Tx("tx2").Key() ++ tx3Key = types.Tx("tx3").Key() ++ peer1 uint16 = 1 ++ peer2 uint16 = 2 ++ ) ++ ++ seenSet := NewSeenTxSet() ++ require.Nil(t, seenSet.Get(tx1Key)) ++ ++ seenSet.Add(tx1Key, peer1) ++ seenSet.Add(tx1Key, peer1) ++ require.Equal(t, 1, seenSet.Len()) ++ seenSet.Add(tx1Key, peer2) ++ peers := seenSet.Get(tx1Key) ++ require.NotNil(t, peers) ++ require.Equal(t, map[uint16]struct{}{peer1: {}, peer2: {}}, peers) ++ seenSet.Add(tx2Key, peer1) ++ seenSet.Add(tx3Key, peer1) ++ require.Equal(t, 3, seenSet.Len()) ++ seenSet.RemoveKey(tx2Key) ++ require.Equal(t, 2, seenSet.Len()) ++ require.Nil(t, seenSet.Get(tx2Key)) ++ require.True(t, seenSet.Has(tx3Key, peer1)) ++} ++ ++func TestLRUTxCacheRemove(t *testing.T) { ++ cache := NewLRUTxCache(100) ++ numTxs := 10 ++ ++ txs := make([][32]byte, numTxs) ++ for i := 0; i < numTxs; i++ { ++ // probability of collision is 2**-256 ++ txBytes := make([]byte, 32) ++ _, err := rand.Read(txBytes) ++ require.NoError(t, err) ++ ++ copy(txs[i][:], txBytes) ++ cache.Push(txs[i]) ++ ++ // make sure its added to both the linked list and the map ++ require.Equal(t, i+1, cache.list.Len()) ++ } ++ ++ for i := 0; i < numTxs; i++ { ++ cache.Remove(txs[i]) ++ // make sure its removed from both the map and the linked list ++ require.Equal(t, numTxs-(i+1), cache.list.Len()) ++ } ++} ++ ++func TestLRUTxCacheSize(t *testing.T) { ++ const size = 10 ++ cache := NewLRUTxCache(size) ++ ++ for i := 0; i < size*2; i++ { ++ tx := types.Tx([]byte(fmt.Sprintf("tx%d", i))) ++ cache.Push(tx.Key()) ++ require.Less(t, cache.list.Len(), size+1) ++ } ++} ++ ++func TestSeenTxSetConcurrency(t *testing.T) { ++ seenSet := NewSeenTxSet() ++ ++ const ( ++ concurrency = 10 ++ numTx = 100 ++ ) ++ ++ wg := sync.WaitGroup{} ++ for i := 0; i < concurrency; i++ { ++ wg.Add(1) ++ go func(peer uint16) { ++ defer wg.Done() ++ for i := 0; i < numTx; i++ { ++ tx := types.Tx([]byte(fmt.Sprintf("tx%d", i))) ++ seenSet.Add(tx.Key(), peer) ++ } ++ }(uint16(i % 2)) ++ } ++ time.Sleep(time.Millisecond) ++ for i := 0; i < concurrency; i++ { ++ wg.Add(1) ++ go func(peer uint16) { ++ defer wg.Done() ++ for i := 0; i < numTx; i++ { ++ tx := types.Tx([]byte(fmt.Sprintf("tx%d", i))) ++ seenSet.Has(tx.Key(), peer) ++ } ++ }(uint16(i % 2)) ++ } ++ time.Sleep(time.Millisecond) ++ for i := 0; i < concurrency; i++ { ++ wg.Add(1) ++ go func(peer uint16) { ++ defer wg.Done() ++ for i := numTx - 1; i >= 0; i-- { ++ tx := types.Tx([]byte(fmt.Sprintf("tx%d", i))) ++ seenSet.RemoveKey(tx.Key()) ++ } ++ }(uint16(i % 2)) ++ } ++ wg.Wait() ++} ++ ++func TestLRUTxCacheConcurrency(t *testing.T) { ++ cache := NewLRUTxCache(100) ++ ++ const ( ++ concurrency = 10 ++ numTx = 100 ++ ) ++ ++ wg := sync.WaitGroup{} ++ for i := 0; i < concurrency; i++ { ++ wg.Add(1) ++ go func() { ++ defer wg.Done() ++ for i := 0; i < numTx; i++ { ++ tx := types.Tx([]byte(fmt.Sprintf("tx%d", i))) ++ cache.Push(tx.Key()) ++ } ++ for i := 0; i < numTx; i++ { ++ tx := types.Tx([]byte(fmt.Sprintf("tx%d", i))) ++ cache.Has(tx.Key()) ++ } ++ for i := numTx - 1; i >= 0; i-- { ++ tx := types.Tx([]byte(fmt.Sprintf("tx%d", i))) ++ cache.Remove(tx.Key()) ++ } ++ }() ++ } ++ wg.Wait() ++} diff --git a/patches/mempool/cat/peers.go.patch b/patches/mempool/cat/peers.go.patch new file mode 100644 index 00000000000..ed98621b7e5 --- /dev/null +++ b/patches/mempool/cat/peers.go.patch @@ -0,0 +1,121 @@ +diff --git a/mempool/cat/peers.go b/mempool/cat/peers.go +new file mode 100644 +index 000000000..86a6f4c01 +--- /dev/null ++++ b/mempool/cat/peers.go +@@ -0,0 +1,115 @@ ++package cat ++ ++import ( ++ "fmt" ++ ++ tmsync "github.com/tendermint/tendermint/libs/sync" ++ "github.com/tendermint/tendermint/mempool" ++ "github.com/tendermint/tendermint/p2p" ++) ++ ++const firstPeerID = mempool.UnknownPeerID + 1 ++ ++// mempoolIDs is a thread-safe map of peer IDs to shorter uint16 IDs used by the Reactor for tracking peer ++// messages and peer state such as what transactions peers have seen ++type mempoolIDs struct { ++ mtx tmsync.RWMutex ++ peerMap map[p2p.ID]uint16 // quick lookup table for peer ID to short ID ++ nextID uint16 // assumes that a node will never have over 65536 active peers ++ activeIDs map[uint16]p2p.Peer // used to check if a given peerID key is used, the value doesn't matter ++} ++ ++func newMempoolIDs() *mempoolIDs { ++ return &mempoolIDs{ ++ peerMap: make(map[p2p.ID]uint16), ++ activeIDs: make(map[uint16]p2p.Peer), ++ nextID: firstPeerID, // reserve unknownPeerID(0) for mempoolReactor.BroadcastTx ++ } ++} ++ ++// ReserveForPeer searches for the next unused ID and assigns it to the ++// peer. ++func (ids *mempoolIDs) ReserveForPeer(peer p2p.Peer) { ++ ids.mtx.Lock() ++ defer ids.mtx.Unlock() ++ ++ if _, ok := ids.peerMap[peer.ID()]; ok { ++ panic("duplicate peer added to mempool") ++ } ++ ++ curID := ids.nextPeerID() ++ ids.peerMap[peer.ID()] = curID ++ ids.activeIDs[curID] = peer ++} ++ ++// nextPeerID returns the next unused peer ID to use. ++// This assumes that ids's mutex is already locked. ++func (ids *mempoolIDs) nextPeerID() uint16 { ++ if len(ids.activeIDs) == mempool.MaxActiveIDs { ++ panic(fmt.Sprintf("node has maximum %d active IDs and wanted to get one more", mempool.MaxActiveIDs)) ++ } ++ ++ _, idExists := ids.activeIDs[ids.nextID] ++ for idExists { ++ ids.nextID++ ++ _, idExists = ids.activeIDs[ids.nextID] ++ } ++ curID := ids.nextID ++ ids.nextID++ ++ return curID ++} ++ ++// Reclaim returns the ID reserved for the peer back to unused pool. ++func (ids *mempoolIDs) Reclaim(peerID p2p.ID) uint16 { ++ ids.mtx.Lock() ++ defer ids.mtx.Unlock() ++ ++ removedID, ok := ids.peerMap[peerID] ++ if ok { ++ delete(ids.activeIDs, removedID) ++ delete(ids.peerMap, peerID) ++ return removedID ++ } ++ return 0 ++} ++ ++// GetIDForPeer returns the shorthand ID reserved for the peer. ++func (ids *mempoolIDs) GetIDForPeer(peerID p2p.ID) uint16 { ++ ids.mtx.RLock() ++ defer ids.mtx.RUnlock() ++ ++ id, exists := ids.peerMap[peerID] ++ if !exists { ++ return 0 ++ } ++ return id ++} ++ ++// GetPeer returns the peer for the given shorthand ID. ++func (ids *mempoolIDs) GetPeer(id uint16) p2p.Peer { ++ ids.mtx.RLock() ++ defer ids.mtx.RUnlock() ++ ++ return ids.activeIDs[id] ++} ++ ++// GetAll returns all active peers. ++func (ids *mempoolIDs) GetAll() map[uint16]p2p.Peer { ++ ids.mtx.RLock() ++ defer ids.mtx.RUnlock() ++ ++ // make a copy of the map. ++ peers := make(map[uint16]p2p.Peer, len(ids.activeIDs)) ++ for id, peer := range ids.activeIDs { ++ peers[id] = peer ++ } ++ return peers ++} ++ ++// Len returns the number of active peers. ++func (ids *mempoolIDs) Len() int { ++ ids.mtx.RLock() ++ defer ids.mtx.RUnlock() ++ ++ return len(ids.activeIDs) ++} diff --git a/patches/mempool/cat/peers_test.go.patch b/patches/mempool/cat/peers_test.go.patch new file mode 100644 index 00000000000..5099c482518 --- /dev/null +++ b/patches/mempool/cat/peers_test.go.patch @@ -0,0 +1,43 @@ +diff --git a/mempool/cat/peers_test.go b/mempool/cat/peers_test.go +new file mode 100644 +index 000000000..5edbb0666 +--- /dev/null ++++ b/mempool/cat/peers_test.go +@@ -0,0 +1,37 @@ ++package cat ++ ++import ( ++ "testing" ++ ++ "github.com/stretchr/testify/require" ++ ++ "github.com/tendermint/tendermint/p2p" ++ "github.com/tendermint/tendermint/p2p/mocks" ++) ++ ++func TestPeerLifecycle(t *testing.T) { ++ ids := newMempoolIDs() ++ peer1 := &mocks.Peer{} ++ peerID := p2p.ID("peer1") ++ peer1.On("ID").Return(peerID) ++ ++ require.Nil(t, ids.GetPeer(1)) ++ require.Zero(t, ids.GetIDForPeer(peerID)) ++ require.Len(t, ids.GetAll(), 0) ++ ids.ReserveForPeer(peer1) ++ ++ id := ids.GetIDForPeer(peerID) ++ require.Equal(t, uint16(1), id) ++ require.Equal(t, peer1, ids.GetPeer(id)) ++ require.Len(t, ids.GetAll(), 1) ++ ++ // duplicate peer should panic ++ require.Panics(t, func() { ++ ids.ReserveForPeer(peer1) ++ }) ++ ++ require.Equal(t, ids.Reclaim(peerID), id) ++ require.Nil(t, ids.GetPeer(id)) ++ require.Zero(t, ids.GetIDForPeer(peerID)) ++ require.Len(t, ids.GetAll(), 0) ++} diff --git a/patches/mempool/cat/pool.go.patch b/patches/mempool/cat/pool.go.patch new file mode 100644 index 00000000000..82e598783d6 --- /dev/null +++ b/patches/mempool/cat/pool.go.patch @@ -0,0 +1,775 @@ +diff --git a/mempool/cat/pool.go b/mempool/cat/pool.go +new file mode 100644 +index 000000000..cc9d47933 +--- /dev/null ++++ b/mempool/cat/pool.go +@@ -0,0 +1,769 @@ ++package cat ++ ++import ( ++ "errors" ++ "fmt" ++ "sort" ++ "sync" ++ "time" ++ ++ abci "github.com/tendermint/tendermint/abci/types" ++ "github.com/tendermint/tendermint/config" ++ "github.com/tendermint/tendermint/libs/log" ++ "github.com/tendermint/tendermint/mempool" ++ "github.com/tendermint/tendermint/proxy" ++ "github.com/tendermint/tendermint/types" ++) ++ ++// enforce compile-time satisfaction of the Mempool interface ++var _ mempool.Mempool = (*TxPool)(nil) ++ ++var ( ++ ErrTxInMempool = errors.New("tx already exists in mempool") ++ ErrTxAlreadyRejected = errors.New("tx was previously rejected") ++) ++ ++// TxPoolOption sets an optional parameter on the TxPool. ++type TxPoolOption func(*TxPool) ++ ++// TxPool implemements the Mempool interface and allows the application to ++// set priority values on transactions in the CheckTx response. When selecting ++// transactions to include in a block, higher-priority transactions are chosen ++// first. When evicting transactions from the mempool for size constraints, ++// lower-priority transactions are evicted first. Transactions themselves are ++// unordered (A map is used). They can be broadcast in an order different from ++// the order to which transactions are entered. There is no guarantee when CheckTx ++// passes that a transaction has been successfully broadcast to any of its peers. ++// ++// A TTL can be set to remove transactions after a period of time or a number ++// of heights. ++// ++// A cache of rejectedTxs can be set in the mempool config. Transactions that ++// are rejected because of `CheckTx` or other validity checks will be instantly ++// rejected if they are seen again. Committed transactions are also added to ++// this cache. This serves somewhat as replay protection but applications should ++// implement something more comprehensive ++type TxPool struct { ++ // Immutable fields ++ logger log.Logger ++ config *config.MempoolConfig ++ proxyAppConn proxy.AppConnMempool ++ metrics *mempool.Metrics ++ ++ // these values are modified once per height ++ mtx sync.Mutex ++ notifiedTxsAvailable bool ++ txsAvailable chan struct{} // one value sent per height when mempool is not empty ++ preCheckFn mempool.PreCheckFunc ++ postCheckFn mempool.PostCheckFunc ++ height int64 // the latest height passed to Update ++ lastPurgeTime time.Time // the last time we attempted to purge transactions via the TTL ++ ++ // Thread-safe cache of rejected transactions for quick look-up ++ rejectedTxCache *LRUTxCache ++ // Thread-safe cache of evicted transactions for quick look-up ++ evictedTxCache *LRUTxCache ++ // Thread-safe list of transactions peers have seen that we have not yet seen ++ seenByPeersSet *SeenTxSet ++ ++ // Store of wrapped transactions ++ store *store ++ ++ // broadcastCh is an unbuffered channel of new transactions that need to ++ // be broadcasted to peers. Only populated if `broadcast` in the config is enabled ++ broadcastCh chan *wrappedTx ++ broadcastMtx sync.Mutex ++ txsToBeBroadcast []types.TxKey ++} ++ ++// NewTxPool constructs a new, empty content addressable txpool at the specified ++// initial height and using the given config and options. ++func NewTxPool( ++ logger log.Logger, ++ cfg *config.MempoolConfig, ++ proxyAppConn proxy.AppConnMempool, ++ height int64, ++ options ...TxPoolOption, ++) *TxPool { ++ txmp := &TxPool{ ++ logger: logger, ++ config: cfg, ++ proxyAppConn: proxyAppConn, ++ metrics: mempool.NopMetrics(), ++ rejectedTxCache: NewLRUTxCache(cfg.CacheSize), ++ evictedTxCache: NewLRUTxCache(cfg.CacheSize / 5), ++ seenByPeersSet: NewSeenTxSet(), ++ height: height, ++ preCheckFn: func(_ types.Tx) error { return nil }, ++ postCheckFn: func(_ types.Tx, _ *abci.ResponseCheckTx) error { return nil }, ++ store: newStore(), ++ broadcastCh: make(chan *wrappedTx), ++ txsToBeBroadcast: make([]types.TxKey, 0), ++ } ++ ++ for _, opt := range options { ++ opt(txmp) ++ } ++ ++ return txmp ++} ++ ++// WithPreCheck sets a filter for the mempool to reject a transaction if f(tx) ++// returns an error. This is executed before CheckTx. It only applies to the ++// first created block. After that, Update() overwrites the existing value. ++func WithPreCheck(f mempool.PreCheckFunc) TxPoolOption { ++ return func(txmp *TxPool) { txmp.preCheckFn = f } ++} ++ ++// WithPostCheck sets a filter for the mempool to reject a transaction if ++// f(tx, resp) returns an error. This is executed after CheckTx. It only applies ++// to the first created block. After that, Update overwrites the existing value. ++func WithPostCheck(f mempool.PostCheckFunc) TxPoolOption { ++ return func(txmp *TxPool) { txmp.postCheckFn = f } ++} ++ ++// WithMetrics sets the mempool's metrics collector. ++func WithMetrics(metrics *mempool.Metrics) TxPoolOption { ++ return func(txmp *TxPool) { txmp.metrics = metrics } ++} ++ ++// Lock locks the mempool, no new transactions can be processed ++func (txmp *TxPool) Lock() { ++ txmp.mtx.Lock() ++} ++ ++// Unlock unlocks the mempool ++func (txmp *TxPool) Unlock() { ++ txmp.mtx.Unlock() ++} ++ ++// Size returns the number of valid transactions in the mempool. It is ++// thread-safe. ++func (txmp *TxPool) Size() int { return txmp.store.size() } ++ ++// SizeBytes returns the total sum in bytes of all the valid transactions in the ++// mempool. It is thread-safe. ++func (txmp *TxPool) SizeBytes() int64 { return txmp.store.totalBytes() } ++ ++// FlushAppConn executes FlushSync on the mempool's proxyAppConn. ++// ++// The caller must hold an exclusive mempool lock (by calling txmp.Lock) before ++// calling FlushAppConn. ++func (txmp *TxPool) FlushAppConn() error { ++ return txmp.proxyAppConn.FlushSync() ++} ++ ++// EnableTxsAvailable enables the mempool to trigger events when transactions ++// are available on a block by block basis. ++func (txmp *TxPool) EnableTxsAvailable() { ++ txmp.txsAvailable = make(chan struct{}, 1) ++} ++ ++// TxsAvailable returns a channel which fires once for every height, and only ++// when transactions are available in the mempool. It is thread-safe. ++func (txmp *TxPool) TxsAvailable() <-chan struct{} { return txmp.txsAvailable } ++ ++// Height returns the latest height that the mempool is at ++func (txmp *TxPool) Height() int64 { ++ txmp.mtx.Lock() ++ defer txmp.mtx.Unlock() ++ return txmp.height ++} ++ ++// Has returns true if the transaction is currently in the mempool ++func (txmp *TxPool) Has(txKey types.TxKey) bool { ++ return txmp.store.has(txKey) ++} ++ ++// Get retrieves a transaction based on the key. ++// Deprecated: use GetTxByKey instead. ++func (txmp *TxPool) Get(txKey types.TxKey) (types.Tx, bool) { ++ return txmp.GetTxByKey(txKey) ++} ++ ++// GetTxByKey retrieves a transaction based on the key. It returns a bool ++// indicating whether transaction was found in the cache. ++func (txmp *TxPool) GetTxByKey(txKey types.TxKey) (types.Tx, bool) { ++ wtx := txmp.store.get(txKey) ++ if wtx != nil { ++ return wtx.tx, true ++ } ++ return types.Tx{}, false ++} ++ ++// WasRecentlyEvicted returns a bool indicating whether the transaction with ++// the specified key was recently evicted and is currently within the cache. ++func (txmp *TxPool) WasRecentlyEvicted(txKey types.TxKey) bool { ++ return txmp.evictedTxCache.Has(txKey) ++} ++ ++// IsRejectedTx returns true if the transaction was recently rejected and is ++// currently within the cache ++func (txmp *TxPool) IsRejectedTx(txKey types.TxKey) bool { ++ return txmp.rejectedTxCache.Has(txKey) ++} ++ ++// CheckToPurgeExpiredTxs checks if there has been adequate time since the last time ++// the txpool looped through all transactions and if so, performs a purge of any transaction ++// that has expired according to the TTLDuration. This is thread safe. ++func (txmp *TxPool) CheckToPurgeExpiredTxs() { ++ txmp.mtx.Lock() ++ defer txmp.mtx.Unlock() ++ if txmp.config.TTLDuration > 0 && time.Since(txmp.lastPurgeTime) > txmp.config.TTLDuration { ++ expirationAge := time.Now().Add(-txmp.config.TTLDuration) ++ // A height of 0 means no transactions will be removed because of height ++ // (in other words, no transaction has a height less than 0) ++ purgedTxs, numExpired := txmp.store.purgeExpiredTxs(0, expirationAge) ++ // Add the purged transactions to the evicted cache ++ for _, tx := range purgedTxs { ++ txmp.evictedTxCache.Push(tx.key) ++ } ++ txmp.metrics.EvictedTxs.Add(float64(numExpired)) ++ txmp.lastPurgeTime = time.Now() ++ } ++} ++ ++// CheckTx adds the given transaction to the mempool if it fits and passes the ++// application's ABCI CheckTx method. This should be viewed as the entry method for new transactions ++// into the network. In practice this happens via an RPC endpoint ++func (txmp *TxPool) CheckTx(tx types.Tx, cb func(*abci.Response), txInfo mempool.TxInfo) error { ++ // Reject transactions in excess of the configured maximum transaction size. ++ if len(tx) > txmp.config.MaxTxBytes { ++ return mempool.ErrTxTooLarge{Max: txmp.config.MaxTxBytes, Actual: len(tx)} ++ } ++ ++ // This is a new transaction that we haven't seen before. Verify it against the app and attempt ++ // to add it to the transaction pool. ++ key := tx.Key() ++ rsp, err := txmp.TryAddNewTx(tx, key, txInfo) ++ if err != nil { ++ return err ++ } ++ defer func() { ++ // call the callback if it is set ++ if cb != nil { ++ cb(&abci.Response{Value: &abci.Response_CheckTx{CheckTx: rsp}}) ++ } ++ }() ++ ++ // push to the broadcast queue that a new transaction is ready ++ txmp.markToBeBroadcast(key) ++ return nil ++} ++ ++// next is used by the reactor to get the next transaction to broadcast ++// to all other peers. ++func (txmp *TxPool) next() <-chan *wrappedTx { ++ txmp.broadcastMtx.Lock() ++ defer txmp.broadcastMtx.Unlock() ++ for len(txmp.txsToBeBroadcast) != 0 { ++ ch := make(chan *wrappedTx, 1) ++ key := txmp.txsToBeBroadcast[0] ++ txmp.txsToBeBroadcast = txmp.txsToBeBroadcast[1:] ++ wtx := txmp.store.get(key) ++ if wtx == nil { ++ continue ++ } ++ ch <- wtx ++ return ch ++ } ++ ++ return txmp.broadcastCh ++} ++ ++// markToBeBroadcast marks a transaction to be broadcasted to peers. ++// This should never block so we use a map to create an unbounded queue ++// of transactions that need to be gossiped. ++func (txmp *TxPool) markToBeBroadcast(key types.TxKey) { ++ if !txmp.config.Broadcast { ++ return ++ } ++ ++ wtx := txmp.store.get(key) ++ if wtx == nil { ++ return ++ } ++ ++ select { ++ case txmp.broadcastCh <- wtx: ++ default: ++ txmp.broadcastMtx.Lock() ++ defer txmp.broadcastMtx.Unlock() ++ txmp.txsToBeBroadcast = append(txmp.txsToBeBroadcast, key) ++ } ++} ++ ++// TryAddNewTx attempts to add a tx that has not already been seen before. It first marks it as seen ++// to avoid races with the same tx. It then call `CheckTx` so that the application can validate it. ++// If it passes `CheckTx`, the new transaction is added to the mempool as long as it has ++// sufficient priority and space else if evicted it will return an error ++func (txmp *TxPool) TryAddNewTx(tx types.Tx, key types.TxKey, txInfo mempool.TxInfo) (*abci.ResponseCheckTx, error) { ++ // First check any of the caches to see if we can conclude early. We may have already seen and processed ++ // the transaction if: ++ // - We are connected to nodes running v0 or v1 which simply flood the network ++ // - If a client submits a transaction to multiple nodes (via RPC) ++ // - We send multiple requests and the first peer eventually responds after the second peer has already provided the tx ++ if txmp.IsRejectedTx(key) { ++ // The peer has sent us a transaction that we have previously marked as invalid. Since `CheckTx` can ++ // be non-deterministic, we don't punish the peer but instead just ignore the tx ++ return nil, ErrTxAlreadyRejected ++ } ++ ++ if txmp.Has(key) { ++ txmp.metrics.AlreadySeenTxs.Add(1) ++ // The peer has sent us a transaction that we have already seen ++ return nil, ErrTxInMempool ++ } ++ ++ // reserve the key ++ if !txmp.store.reserve(key) { ++ txmp.logger.Debug("mempool already attempting to verify and add transaction", "txKey", fmt.Sprintf("%X", key)) ++ txmp.PeerHasTx(txInfo.SenderID, key) ++ return nil, ErrTxInMempool ++ } ++ defer txmp.store.release(key) ++ ++ // If a precheck hook is defined, call it before invoking the application. ++ if err := txmp.preCheck(tx); err != nil { ++ txmp.metrics.FailedTxs.Add(1) ++ return nil, mempool.ErrPreCheck{Reason: err} ++ } ++ ++ // Early exit if the proxy connection has an error. ++ if err := txmp.proxyAppConn.Error(); err != nil { ++ return nil, err ++ } ++ ++ txmp.mtx.Lock() ++ defer txmp.mtx.Unlock() ++ ++ // Invoke an ABCI CheckTx for this transaction. ++ rsp, err := txmp.proxyAppConn.CheckTxSync(abci.RequestCheckTx{Tx: tx}) ++ if err != nil { ++ return rsp, err ++ } ++ if rsp.Code != abci.CodeTypeOK { ++ if txmp.config.KeepInvalidTxsInCache { ++ txmp.rejectedTxCache.Push(key) ++ } ++ txmp.metrics.FailedTxs.Add(1) ++ return rsp, fmt.Errorf("application rejected transaction with code %d (Log: %s)", rsp.Code, rsp.Log) ++ } ++ ++ // Create wrapped tx ++ wtx := newWrappedTx( ++ tx, key, txmp.height, rsp.GasWanted, rsp.Priority, rsp.Sender, ++ ) ++ ++ // Perform the post check ++ err = txmp.postCheck(wtx.tx, rsp) ++ if err != nil { ++ if txmp.config.KeepInvalidTxsInCache { ++ txmp.rejectedTxCache.Push(key) ++ } ++ txmp.metrics.FailedTxs.Add(1) ++ return rsp, fmt.Errorf("rejected bad transaction after post check: %w", err) ++ } ++ ++ // Now we consider the transaction to be valid. Once a transaction is valid, it ++ // can only become invalid if recheckTx is enabled and RecheckTx returns a non zero code ++ if err := txmp.addNewTransaction(wtx, rsp); err != nil { ++ return nil, err ++ } ++ return rsp, nil ++} ++ ++// RemoveTxByKey removes the transaction with the specified key from the ++// mempool. It adds it to the rejectedTxCache so it will not be added again ++func (txmp *TxPool) RemoveTxByKey(txKey types.TxKey) error { ++ txmp.removeTxByKey(txKey) ++ txmp.metrics.EvictedTxs.Add(1) ++ return nil ++} ++ ++func (txmp *TxPool) removeTxByKey(txKey types.TxKey) { ++ txmp.rejectedTxCache.Push(txKey) ++ _ = txmp.store.remove(txKey) ++ txmp.seenByPeersSet.RemoveKey(txKey) ++} ++ ++// Flush purges the contents of the mempool and the cache, leaving both empty. ++// The current height is not modified by this operation. ++func (txmp *TxPool) Flush() { ++ // Remove all the transactions in the list explicitly, so that the sizes ++ // and indexes get updated properly. ++ size := txmp.Size() ++ txmp.store.reset() ++ txmp.seenByPeersSet.Reset() ++ txmp.rejectedTxCache.Reset() ++ txmp.evictedTxCache.Reset() ++ txmp.metrics.EvictedTxs.Add(float64(size)) ++ txmp.broadcastMtx.Lock() ++ defer txmp.broadcastMtx.Unlock() ++ txmp.txsToBeBroadcast = make([]types.TxKey, 0) ++} ++ ++// PeerHasTx marks that the transaction has been seen by a peer. ++func (txmp *TxPool) PeerHasTx(peer uint16, txKey types.TxKey) { ++ txmp.logger.Debug("peer has tx", "peer", peer, "txKey", fmt.Sprintf("%X", txKey)) ++ txmp.seenByPeersSet.Add(txKey, peer) ++} ++ ++// ReapMaxBytesMaxGas returns a slice of valid transactions that fit within the ++// size and gas constraints. The results are ordered by nonincreasing priority, ++// with ties broken by increasing order of arrival. Reaping transactions does ++// not remove them from the mempool ++// ++// If maxBytes < 0, no limit is set on the total size in bytes. ++// If maxGas < 0, no limit is set on the total gas cost. ++// ++// If the mempool is empty or has no transactions fitting within the given ++// constraints, the result will also be empty. ++func (txmp *TxPool) ReapMaxBytesMaxGas(maxBytes, maxGas int64) types.Txs { ++ var totalGas, totalBytes int64 ++ ++ var keep []types.Tx ++ txmp.store.iterateOrderedTxs(func(w *wrappedTx) bool { ++ // N.B. When computing byte size, we need to include the overhead for ++ // encoding as protobuf to send to the application. This actually overestimates it ++ // as we add the proto overhead to each transaction ++ txBytes := types.ComputeProtoSizeForTxs([]types.Tx{w.tx}) ++ if (maxGas >= 0 && totalGas+w.gasWanted > maxGas) || (maxBytes >= 0 && totalBytes+txBytes > maxBytes) { ++ return true ++ } ++ totalBytes += txBytes ++ totalGas += w.gasWanted ++ keep = append(keep, w.tx) ++ return true ++ }) ++ return keep ++} ++ ++// ReapMaxTxs returns up to max transactions from the mempool. The results are ++// ordered by nonincreasing priority with ties broken by increasing order of ++// arrival. Reaping transactions does not remove them from the mempool. ++// ++// If max < 0, all transactions in the mempool are reaped. ++// ++// The result may have fewer than max elements (possibly zero) if the mempool ++// does not have that many transactions available. ++func (txmp *TxPool) ReapMaxTxs(max int) types.Txs { ++ var keep []types.Tx ++ ++ txmp.store.iterateOrderedTxs(func(w *wrappedTx) bool { ++ if max >= 0 && len(keep) >= max { ++ return false ++ } ++ keep = append(keep, w.tx) ++ return true ++ }) ++ return keep ++} ++ ++// Update removes all the given transactions from the mempool and the cache, ++// and updates the current block height. The blockTxs and deliverTxResponses ++// must have the same length with each response corresponding to the tx at the ++// same offset. ++// ++// If the configuration enables recheck, Update sends each remaining ++// transaction after removing blockTxs to the ABCI CheckTx method. Any ++// transactions marked as invalid during recheck are also removed. ++// ++// The caller must hold an exclusive mempool lock (by calling txmp.Lock) before ++// calling Update. ++func (txmp *TxPool) Update( ++ blockHeight int64, ++ blockTxs types.Txs, ++ deliverTxResponses []*abci.ResponseDeliverTx, ++ newPreFn mempool.PreCheckFunc, ++ newPostFn mempool.PostCheckFunc, ++) error { ++ // Safety check: Transactions and responses must match in number. ++ if len(blockTxs) != len(deliverTxResponses) { ++ panic(fmt.Sprintf("mempool: got %d transactions but %d DeliverTx responses", ++ len(blockTxs), len(deliverTxResponses))) ++ } ++ txmp.logger.Debug("updating mempool", "height", blockHeight, "txs", len(blockTxs)) ++ ++ txmp.height = blockHeight ++ txmp.notifiedTxsAvailable = false ++ ++ if newPreFn != nil { ++ txmp.preCheckFn = newPreFn ++ } ++ if newPostFn != nil { ++ txmp.postCheckFn = newPostFn ++ } ++ txmp.lastPurgeTime = time.Now() ++ ++ txmp.metrics.SuccessfulTxs.Add(float64(len(blockTxs))) ++ for _, tx := range blockTxs { ++ // Regardless of success, remove the transaction from the mempool. ++ txmp.removeTxByKey(tx.Key()) ++ } ++ ++ txmp.purgeExpiredTxs(blockHeight) ++ ++ // If there any uncommitted transactions left in the mempool, we either ++ // initiate re-CheckTx per remaining transaction or notify that remaining ++ // transactions are left. ++ size := txmp.Size() ++ txmp.metrics.Size.Set(float64(size)) ++ txmp.metrics.SizeBytes.Set(float64(txmp.SizeBytes())) ++ if size > 0 { ++ if txmp.config.Recheck { ++ txmp.recheckTransactions() ++ } else { ++ txmp.notifyTxsAvailable() ++ } ++ } ++ return nil ++} ++ ++// addNewTransaction handles the ABCI CheckTx response for the first time a ++// transaction is added to the mempool. A recheck after a block is committed ++// goes to handleRecheckResult. ++// ++// If either the application rejected the transaction or a post-check hook is ++// defined and rejects the transaction, it is discarded. ++// ++// Otherwise, if the mempool is full, check for lower-priority transactions ++// that can be evicted to make room for the new one. If no such transactions ++// exist, this transaction is logged and dropped; otherwise the selected ++// transactions are evicted. ++// ++// Finally, the new transaction is added and size stats updated. ++func (txmp *TxPool) addNewTransaction(wtx *wrappedTx, checkTxRes *abci.ResponseCheckTx) error { ++ // At this point the application has ruled the transaction valid, but the ++ // mempool might be full. If so, find the lowest-priority items with lower ++ // priority than the application assigned to this new one, and evict as many ++ // of them as necessary to make room for tx. If no such items exist, we ++ // discard tx. ++ if !txmp.canAddTx(wtx.size()) { ++ victims, victimBytes := txmp.store.getTxsBelowPriority(wtx.priority) ++ ++ // If there are no suitable eviction candidates, or the total size of ++ // those candidates is not enough to make room for the new transaction, ++ // drop the new one. ++ if len(victims) == 0 || victimBytes < wtx.size() { ++ txmp.metrics.EvictedTxs.Add(1) ++ txmp.evictedTxCache.Push(wtx.key) ++ checkTxRes.MempoolError = fmt.Sprintf("rejected valid incoming transaction; mempool is full (%X)", ++ wtx.key) ++ return fmt.Errorf("rejected valid incoming transaction; mempool is full (%X). Size: (%d:%d)", ++ wtx.key.String(), txmp.Size(), txmp.SizeBytes()) ++ } ++ ++ txmp.logger.Debug("evicting lower-priority transactions", ++ "new_tx", wtx.key.String(), ++ "new_priority", wtx.priority, ++ ) ++ ++ // Sort lowest priority items first so they will be evicted first. Break ++ // ties in favor of newer items (to maintain FIFO semantics in a group). ++ sort.Slice(victims, func(i, j int) bool { ++ iw := victims[i] ++ jw := victims[j] ++ if iw.priority == jw.priority { ++ return iw.timestamp.After(jw.timestamp) ++ } ++ return iw.priority < jw.priority ++ }) ++ ++ // Evict as many of the victims as necessary to make room. ++ availableBytes := txmp.availableBytes() ++ for _, tx := range victims { ++ txmp.evictTx(tx) ++ ++ // We may not need to evict all the eligible transactions. Bail out ++ // early if we have made enough room. ++ availableBytes += tx.size() ++ if availableBytes >= wtx.size() { ++ break ++ } ++ } ++ } ++ ++ txmp.store.set(wtx) ++ ++ txmp.metrics.TxSizeBytes.Observe(float64(wtx.size())) ++ txmp.metrics.Size.Set(float64(txmp.Size())) ++ txmp.metrics.SizeBytes.Set(float64(txmp.SizeBytes())) ++ txmp.logger.Debug( ++ "inserted new valid transaction", ++ "priority", wtx.priority, ++ "tx", fmt.Sprintf("%X", wtx.key), ++ "height", wtx.height, ++ "num_txs", txmp.Size(), ++ ) ++ txmp.notifyTxsAvailable() ++ return nil ++} ++ ++func (txmp *TxPool) evictTx(wtx *wrappedTx) { ++ txmp.store.remove(wtx.key) ++ txmp.evictedTxCache.Push(wtx.key) ++ txmp.metrics.EvictedTxs.Add(1) ++ txmp.logger.Debug( ++ "evicted valid existing transaction; mempool full", ++ "old_tx", fmt.Sprintf("%X", wtx.key), ++ "old_priority", wtx.priority, ++ ) ++} ++ ++// handleRecheckResult handles the responses from ABCI CheckTx calls issued ++// during the recheck phase of a block Update. It removes any transactions ++// invalidated by the application. ++// ++// This method is NOT executed for the initial CheckTx on a new transaction; ++// that case is handled by addNewTransaction instead. ++func (txmp *TxPool) handleRecheckResult(wtx *wrappedTx, checkTxRes *abci.ResponseCheckTx) { ++ txmp.metrics.RecheckTimes.Add(1) ++ ++ // If a postcheck hook is defined, call it before checking the result. ++ err := txmp.postCheck(wtx.tx, checkTxRes) ++ ++ if checkTxRes.Code == abci.CodeTypeOK && err == nil { ++ // Note that we do not update the transaction with any of the values returned in ++ // recheck tx ++ return // N.B. Size of mempool did not change ++ } ++ ++ txmp.logger.Debug( ++ "existing transaction no longer valid; failed re-CheckTx callback", ++ "priority", wtx.priority, ++ "tx", fmt.Sprintf("%X", wtx.key), ++ "err", err, ++ "code", checkTxRes.Code, ++ ) ++ txmp.store.remove(wtx.key) ++ if txmp.config.KeepInvalidTxsInCache { ++ txmp.rejectedTxCache.Push(wtx.key) ++ } ++ txmp.metrics.FailedTxs.Add(1) ++ txmp.metrics.Size.Set(float64(txmp.Size())) ++ txmp.metrics.SizeBytes.Set(float64(txmp.SizeBytes())) ++} ++ ++// recheckTransactions initiates re-CheckTx ABCI calls for all the transactions ++// currently in the mempool. It reports the number of recheck calls that were ++// successfully initiated. ++// ++// Precondition: The mempool is not empty. ++// The caller must hold txmp.mtx exclusively. ++func (txmp *TxPool) recheckTransactions() { ++ if txmp.Size() == 0 { ++ panic("mempool: cannot run recheck on an empty mempool") ++ } ++ txmp.logger.Debug( ++ "executing re-CheckTx for all remaining transactions", ++ "num_txs", txmp.Size(), ++ "height", txmp.height, ++ ) ++ ++ // Issue CheckTx calls for each remaining transaction, and when all the ++ // rechecks are complete signal watchers that transactions may be available. ++ txmp.store.iterateOrderedTxs(func(wtx *wrappedTx) bool { ++ // The response for this CheckTx is handled by the default recheckTxCallback. ++ rsp, err := txmp.proxyAppConn.CheckTxSync(abci.RequestCheckTx{ ++ Tx: wtx.tx, ++ Type: abci.CheckTxType_Recheck, ++ }) ++ if err != nil { ++ txmp.logger.Error("failed to execute CheckTx during recheck", ++ "err", err, "key", fmt.Sprintf("%x", wtx.key)) ++ } else { ++ txmp.handleRecheckResult(wtx, rsp) ++ } ++ return true ++ }) ++ _ = txmp.proxyAppConn.FlushAsync() ++ ++ // When recheck is complete, trigger a notification for more transactions. ++ txmp.notifyTxsAvailable() ++} ++ ++// availableBytes returns the number of bytes available in the mempool. ++func (txmp *TxPool) availableBytes() int64 { ++ return txmp.config.MaxTxsBytes - txmp.SizeBytes() ++} ++ ++// canAddTx returns an error if we cannot insert the provided *wrappedTx into ++// the mempool due to mempool configured constraints. Otherwise, nil is ++// returned and the transaction can be inserted into the mempool. ++func (txmp *TxPool) canAddTx(size int64) bool { ++ numTxs := txmp.Size() ++ txBytes := txmp.SizeBytes() ++ ++ if numTxs > txmp.config.Size || size+txBytes > txmp.config.MaxTxsBytes { ++ return false ++ } ++ ++ return true ++} ++ ++// purgeExpiredTxs removes all transactions from the mempool that have exceeded ++// their respective height or time-based limits as of the given blockHeight. ++// Transactions removed by this operation are not removed from the rejectedTxCache. ++func (txmp *TxPool) purgeExpiredTxs(blockHeight int64) { ++ if txmp.config.TTLNumBlocks == 0 && txmp.config.TTLDuration == 0 { ++ return // nothing to do ++ } ++ ++ expirationHeight := blockHeight - txmp.config.TTLNumBlocks ++ if txmp.config.TTLNumBlocks == 0 { ++ expirationHeight = 0 ++ } ++ ++ now := time.Now() ++ expirationAge := now.Add(-txmp.config.TTLDuration) ++ if txmp.config.TTLDuration == 0 { ++ expirationAge = time.Time{} ++ } ++ ++ purgedTxs, numExpired := txmp.store.purgeExpiredTxs(expirationHeight, expirationAge) ++ // Add the purged transactions to the evicted cache ++ for _, tx := range purgedTxs { ++ txmp.evictedTxCache.Push(tx.key) ++ } ++ txmp.metrics.ExpiredTxs.Add(float64(numExpired)) ++ ++ // purge old evicted and seen transactions ++ if txmp.config.TTLDuration == 0 { ++ // ensure that seenByPeersSet are eventually pruned ++ expirationAge = now.Add(-time.Hour) ++ } ++ txmp.seenByPeersSet.Prune(expirationAge) ++} ++ ++func (txmp *TxPool) notifyTxsAvailable() { ++ if txmp.Size() == 0 { ++ return // nothing to do ++ } ++ ++ if txmp.txsAvailable != nil && !txmp.notifiedTxsAvailable { ++ // channel cap is 1, so this will send once ++ txmp.notifiedTxsAvailable = true ++ ++ select { ++ case txmp.txsAvailable <- struct{}{}: ++ default: ++ } ++ } ++} ++ ++func (txmp *TxPool) preCheck(tx types.Tx) error { ++ txmp.mtx.Lock() ++ defer txmp.mtx.Unlock() ++ if txmp.preCheckFn != nil { ++ return txmp.preCheckFn(tx) ++ } ++ return nil ++} ++ ++func (txmp *TxPool) postCheck(tx types.Tx, res *abci.ResponseCheckTx) error { ++ if txmp.postCheckFn != nil { ++ return txmp.postCheckFn(tx, res) ++ } ++ return nil ++} diff --git a/patches/mempool/cat/pool_bench_test.go.patch b/patches/mempool/cat/pool_bench_test.go.patch new file mode 100644 index 00000000000..7712c4522c2 --- /dev/null +++ b/patches/mempool/cat/pool_bench_test.go.patch @@ -0,0 +1,38 @@ +diff --git a/mempool/cat/pool_bench_test.go b/mempool/cat/pool_bench_test.go +new file mode 100644 +index 000000000..9fd687d34 +--- /dev/null ++++ b/mempool/cat/pool_bench_test.go +@@ -0,0 +1,32 @@ ++package cat ++ ++import ( ++ "fmt" ++ "math/rand" ++ "testing" ++ "time" ++ ++ "github.com/stretchr/testify/require" ++ "github.com/tendermint/tendermint/mempool" ++) ++ ++func BenchmarkTxPool_CheckTx(b *testing.B) { ++ txmp := setup(b, 10000) ++ txmp.config.Size = b.N ++ rng := rand.New(rand.NewSource(time.Now().UnixNano())) ++ ++ b.ResetTimer() ++ ++ for n := 0; n < b.N; n++ { ++ b.StopTimer() ++ prefix := make([]byte, 20) ++ _, err := rng.Read(prefix) ++ require.NoError(b, err) ++ ++ priority := int64(rng.Intn(9999-1000) + 1000) ++ tx := []byte(fmt.Sprintf("sender%d=%X=%d", n, prefix, priority)) ++ b.StartTimer() ++ ++ require.NoError(b, txmp.CheckTx(tx, nil, mempool.TxInfo{})) ++ } ++} diff --git a/patches/mempool/cat/pool_test.go.patch b/patches/mempool/cat/pool_test.go.patch new file mode 100644 index 00000000000..415cca85b4c --- /dev/null +++ b/patches/mempool/cat/pool_test.go.patch @@ -0,0 +1,790 @@ +diff --git a/mempool/cat/pool_test.go b/mempool/cat/pool_test.go +new file mode 100644 +index 000000000..d37aad90d +--- /dev/null ++++ b/mempool/cat/pool_test.go +@@ -0,0 +1,784 @@ ++package cat ++ ++import ( ++ "bytes" ++ "context" ++ "errors" ++ "fmt" ++ "math/rand" ++ "os" ++ "sort" ++ "strconv" ++ "sync" ++ "testing" ++ "time" ++ ++ "github.com/stretchr/testify/assert" ++ "github.com/stretchr/testify/require" ++ ++ "github.com/tendermint/tendermint/abci/example/code" ++ "github.com/tendermint/tendermint/abci/example/kvstore" ++ abci "github.com/tendermint/tendermint/abci/types" ++ "github.com/tendermint/tendermint/config" ++ "github.com/tendermint/tendermint/libs/log" ++ "github.com/tendermint/tendermint/mempool" ++ "github.com/tendermint/tendermint/pkg/consts" ++ tmproto "github.com/tendermint/tendermint/proto/tendermint/types" ++ "github.com/tendermint/tendermint/proxy" ++ "github.com/tendermint/tendermint/types" ++) ++ ++// application extends the KV store application by overriding CheckTx to provide ++// transaction priority based on the value in the key/value pair. ++type application struct { ++ *kvstore.Application ++} ++ ++type testTx struct { ++ tx types.Tx ++ priority int64 ++} ++ ++func newTx(i int, peerID uint16, msg []byte, priority int64) []byte { ++ return []byte(fmt.Sprintf("sender-%03d-%d=%X=%d", i, peerID, msg, priority)) ++} ++ ++func newDefaultTx(msg string) types.Tx { ++ return types.Tx(newTx(0, 0, []byte(msg), 1)) ++} ++ ++func (app *application) CheckTx(req abci.RequestCheckTx) abci.ResponseCheckTx { ++ var ( ++ priority int64 ++ sender string ++ ) ++ ++ // infer the priority from the raw transaction value (sender=key=value) ++ parts := bytes.Split(req.Tx, []byte("=")) ++ if len(parts) == 3 { ++ v, err := strconv.ParseInt(string(parts[2]), 10, 64) ++ if err != nil { ++ return abci.ResponseCheckTx{ ++ Priority: priority, ++ Code: 100, ++ GasWanted: 1, ++ } ++ } ++ ++ priority = v ++ sender = string(parts[0]) ++ } else { ++ return abci.ResponseCheckTx{ ++ Priority: priority, ++ Code: 101, ++ GasWanted: 1, ++ } ++ } ++ ++ return abci.ResponseCheckTx{ ++ Priority: priority, ++ Sender: sender, ++ Code: code.CodeTypeOK, ++ GasWanted: 1, ++ } ++} ++ ++func setup(t testing.TB, cacheSize int, options ...TxPoolOption) *TxPool { ++ t.Helper() ++ ++ app := &application{kvstore.NewApplication()} ++ cc := proxy.NewLocalClientCreator(app) ++ ++ cfg := config.TestMempoolConfig() ++ cfg.CacheSize = cacheSize ++ ++ appConnMem, err := cc.NewABCIClient() ++ require.NoError(t, err) ++ require.NoError(t, appConnMem.Start()) ++ ++ t.Cleanup(func() { ++ os.RemoveAll(cfg.RootDir) ++ require.NoError(t, appConnMem.Stop()) ++ }) ++ ++ return NewTxPool(log.TestingLogger().With("test", t.Name()), cfg, appConnMem, 1, options...) ++} ++ ++// mustCheckTx invokes txmp.CheckTx for the given transaction and waits until ++// its callback has finished executing. It fails t if CheckTx fails. ++func mustCheckTx(t *testing.T, txmp *TxPool, spec string) { ++ require.NoError(t, txmp.CheckTx([]byte(spec), nil, mempool.TxInfo{})) ++} ++ ++func checkTxs(t *testing.T, txmp *TxPool, numTxs int, peerID uint16) []testTx { ++ txs := make([]testTx, numTxs) ++ txInfo := mempool.TxInfo{SenderID: peerID} ++ ++ rng := rand.New(rand.NewSource(time.Now().UnixNano())) ++ ++ current := txmp.Size() ++ for i := 0; i < numTxs; i++ { ++ prefix := make([]byte, 20) ++ _, err := rng.Read(prefix) ++ require.NoError(t, err) ++ ++ priority := int64(rng.Intn(9999-1000) + 1000) ++ ++ txs[i] = testTx{ ++ tx: newTx(i, peerID, prefix, priority), ++ priority: priority, ++ } ++ require.NoError(t, txmp.CheckTx(txs[i].tx, nil, txInfo)) ++ // assert that none of them get silently evicted ++ require.Equal(t, current+i+1, txmp.Size()) ++ } ++ ++ return txs ++} ++ ++func TestTxPool_TxsAvailable(t *testing.T) { ++ txmp := setup(t, 0) ++ txmp.EnableTxsAvailable() ++ ++ ensureNoTxFire := func() { ++ timer := time.NewTimer(500 * time.Millisecond) ++ select { ++ case <-txmp.TxsAvailable(): ++ require.Fail(t, "unexpected transactions event") ++ case <-timer.C: ++ } ++ } ++ ++ ensureTxFire := func() { ++ timer := time.NewTimer(500 * time.Millisecond) ++ select { ++ case <-txmp.TxsAvailable(): ++ case <-timer.C: ++ require.Fail(t, "expected transactions event") ++ } ++ } ++ ++ // ensure no event as we have not executed any transactions yet ++ ensureNoTxFire() ++ ++ // Execute CheckTx for some transactions and ensure TxsAvailable only fires ++ // once. ++ txs := checkTxs(t, txmp, 100, 0) ++ ensureTxFire() ++ ensureNoTxFire() ++ ++ rawTxs := make([]types.Tx, len(txs)) ++ for i, tx := range txs { ++ rawTxs[i] = tx.tx ++ } ++ ++ responses := make([]*abci.ResponseDeliverTx, len(rawTxs[:50])) ++ for i := 0; i < len(responses); i++ { ++ responses[i] = &abci.ResponseDeliverTx{Code: abci.CodeTypeOK} ++ } ++ ++ require.Equal(t, 100, txmp.Size()) ++ ++ // commit half the transactions and ensure we fire an event ++ txmp.Lock() ++ require.NoError(t, txmp.Update(1, rawTxs[:50], responses, nil, nil)) ++ txmp.Unlock() ++ ensureTxFire() ++ ensureNoTxFire() ++ ++ // Execute CheckTx for more transactions and ensure we do not fire another ++ // event as we're still on the same height (1). ++ _ = checkTxs(t, txmp, 100, 0) ++ ensureNoTxFire() ++} ++ ++func TestTxPool_Size(t *testing.T) { ++ txmp := setup(t, 0) ++ txs := checkTxs(t, txmp, 100, 0) ++ require.Equal(t, len(txs), txmp.Size()) ++ require.Equal(t, int64(5800), txmp.SizeBytes()) ++ ++ rawTxs := make([]types.Tx, len(txs)) ++ for i, tx := range txs { ++ rawTxs[i] = tx.tx ++ } ++ ++ responses := make([]*abci.ResponseDeliverTx, len(rawTxs[:50])) ++ for i := 0; i < len(responses); i++ { ++ responses[i] = &abci.ResponseDeliverTx{Code: abci.CodeTypeOK} ++ } ++ ++ txmp.Lock() ++ require.NoError(t, txmp.Update(1, rawTxs[:50], responses, nil, nil)) ++ txmp.Unlock() ++ ++ require.Equal(t, len(rawTxs)/2, txmp.Size()) ++ require.Equal(t, int64(2900), txmp.SizeBytes()) ++} ++ ++func TestTxPool_Eviction(t *testing.T) { ++ txmp := setup(t, 1000) ++ txmp.config.Size = 5 ++ txmp.config.MaxTxsBytes = 60 ++ txExists := func(spec string) bool { ++ return txmp.Has(types.Tx(spec).Key()) ++ } ++ ++ // A transaction bigger than the mempool should be rejected even when there ++ // are slots available. ++ err := txmp.CheckTx(types.Tx("big=0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef=1"), nil, mempool.TxInfo{}) ++ require.Error(t, err) ++ require.Contains(t, err.Error(), "mempool is full") ++ require.Equal(t, 0, txmp.Size()) ++ ++ // Nearly-fill the mempool with a low-priority transaction, to show that it ++ // is evicted even when slots are available for a higher-priority tx. ++ const bigTx = "big=0123456789abcdef0123456789abcdef0123456789abcdef01234=2" ++ mustCheckTx(t, txmp, bigTx) ++ require.Equal(t, 1, txmp.Size()) // bigTx is the only element ++ require.True(t, txExists(bigTx)) ++ require.Equal(t, int64(len(bigTx)), txmp.SizeBytes()) ++ ++ // The next transaction should evict bigTx, because it is higher priority ++ // but does not fit on size. ++ mustCheckTx(t, txmp, "key1=0000=25") ++ require.True(t, txExists("key1=0000=25")) ++ require.False(t, txExists(bigTx)) ++ require.True(t, txmp.WasRecentlyEvicted(types.Tx(bigTx).Key())) ++ require.Equal(t, int64(len("key1=0000=25")), txmp.SizeBytes()) ++ ++ // Now fill up the rest of the slots with other transactions. ++ mustCheckTx(t, txmp, "key2=0001=5") ++ mustCheckTx(t, txmp, "key3=0002=10") ++ mustCheckTx(t, txmp, "key4=0003=3") ++ mustCheckTx(t, txmp, "key5=0004=3") ++ ++ // A new transaction with low priority should be discarded. ++ err = txmp.CheckTx(types.Tx("key6=0005=1"), nil, mempool.TxInfo{}) ++ require.Error(t, err) ++ require.Contains(t, err.Error(), "mempool is full") ++ require.False(t, txExists("key6=0005=1")) ++ require.True(t, txmp.WasRecentlyEvicted(types.Tx("key6=0005=1").Key())) ++ ++ // A new transaction with higher priority should evict key5, which is the ++ // newest of the two transactions with lowest priority. ++ mustCheckTx(t, txmp, "key7=0006=7") ++ require.True(t, txExists("key7=0006=7")) // new transaction added ++ require.False(t, txExists("key5=0004=3")) // newest low-priority tx evicted ++ require.True(t, txmp.WasRecentlyEvicted(types.Tx("key5=0004=3").Key())) ++ require.True(t, txExists("key4=0003=3")) // older low-priority tx retained ++ ++ // Another new transaction evicts the other low-priority element. ++ mustCheckTx(t, txmp, "key8=0007=20") ++ require.True(t, txExists("key8=0007=20")) ++ require.False(t, txExists("key4=0003=3")) ++ require.True(t, txmp.WasRecentlyEvicted(types.Tx("key4=0003=3").Key())) ++ ++ // Now the lowest-priority tx is 5, so that should be the next to go. ++ mustCheckTx(t, txmp, "key9=0008=9") ++ require.True(t, txExists("key9=0008=9")) ++ require.False(t, txExists("key2=0001=5")) ++ require.True(t, txmp.WasRecentlyEvicted(types.Tx("key2=0001=5").Key())) ++ ++ // Add a transaction that requires eviction of multiple lower-priority ++ // entries, in order to fit the size of the element. ++ mustCheckTx(t, txmp, "key10=0123456789abcdef=11") // evict 10, 9, 7; keep 25, 20, 11 ++ require.True(t, txExists("key1=0000=25")) ++ require.True(t, txExists("key8=0007=20")) ++ require.True(t, txExists("key10=0123456789abcdef=11")) ++ require.False(t, txExists("key3=0002=10")) ++ require.True(t, txmp.WasRecentlyEvicted(types.Tx("key3=0002=10").Key())) ++ require.False(t, txExists("key9=0008=9")) ++ require.True(t, txmp.WasRecentlyEvicted(types.Tx("key9=0008=9").Key())) ++ require.False(t, txExists("key7=0006=7")) ++ require.True(t, txmp.WasRecentlyEvicted(types.Tx("key7=0006=7").Key())) ++ ++ // Free up some space so we can add back previously evicted txs ++ err = txmp.Update(1, types.Txs{types.Tx("key10=0123456789abcdef=11")}, []*abci.ResponseDeliverTx{{Code: abci.CodeTypeOK}}, nil, nil) ++ require.NoError(t, err) ++ require.False(t, txExists("key10=0123456789abcdef=11")) ++ mustCheckTx(t, txmp, "key3=0002=10") ++ require.True(t, txExists("key3=0002=10")) ++ ++ // remove a high priority tx and check if there is ++ // space for the previously evicted tx ++ require.NoError(t, txmp.RemoveTxByKey(types.Tx("key8=0007=20").Key())) ++ require.False(t, txExists("key8=0007=20")) ++ require.False(t, txmp.WasRecentlyEvicted(types.Tx("key8=0007=20").Key())) ++} ++ ++func TestTxPool_Flush(t *testing.T) { ++ txmp := setup(t, 0) ++ txs := checkTxs(t, txmp, 100, 0) ++ require.Equal(t, len(txs), txmp.Size()) ++ require.Equal(t, int64(5800), txmp.SizeBytes()) ++ ++ rawTxs := make([]types.Tx, len(txs)) ++ for i, tx := range txs { ++ rawTxs[i] = tx.tx ++ } ++ ++ responses := make([]*abci.ResponseDeliverTx, len(rawTxs[:50])) ++ for i := 0; i < len(responses); i++ { ++ responses[i] = &abci.ResponseDeliverTx{Code: abci.CodeTypeOK} ++ } ++ ++ txmp.Lock() ++ require.NoError(t, txmp.Update(1, rawTxs[:50], responses, nil, nil)) ++ txmp.Unlock() ++ ++ txmp.Flush() ++ require.Zero(t, txmp.Size()) ++ require.Equal(t, int64(0), txmp.SizeBytes()) ++} ++ ++func TestTxPool_ReapMaxBytesMaxGas(t *testing.T) { ++ txmp := setup(t, 0) ++ tTxs := checkTxs(t, txmp, 100, 0) // all txs request 1 gas unit ++ require.Equal(t, len(tTxs), txmp.Size()) ++ require.Equal(t, int64(5800), txmp.SizeBytes()) ++ ++ txMap := make(map[types.TxKey]testTx) ++ priorities := make([]int64, len(tTxs)) ++ for i, tTx := range tTxs { ++ txMap[tTx.tx.Key()] = tTx ++ priorities[i] = tTx.priority ++ } ++ ++ sort.Slice(priorities, func(i, j int) bool { ++ // sort by priority, i.e. decreasing order ++ return priorities[i] > priorities[j] ++ }) ++ ++ ensurePrioritized := func(reapedTxs types.Txs) { ++ reapedPriorities := make([]int64, len(reapedTxs)) ++ for i, rTx := range reapedTxs { ++ reapedPriorities[i] = txMap[rTx.Key()].priority ++ } ++ ++ require.Equal(t, priorities[:len(reapedPriorities)], reapedPriorities) ++ } ++ ++ // reap by gas capacity only ++ reapedTxs := txmp.ReapMaxBytesMaxGas(-1, 50) ++ ensurePrioritized(reapedTxs) ++ require.Equal(t, len(tTxs), txmp.Size()) ++ require.Equal(t, int64(5800), txmp.SizeBytes()) ++ require.Len(t, reapedTxs, 50) ++ ++ // reap by transaction bytes only ++ reapedTxs = txmp.ReapMaxBytesMaxGas(1200, -1) ++ ensurePrioritized(reapedTxs) ++ require.Equal(t, len(tTxs), txmp.Size()) ++ require.Equal(t, int64(5800), txmp.SizeBytes()) ++ // each tx is 57 bytes, 20 * 57 = 1140 + overhead for proto encoding ++ require.Equal(t, len(reapedTxs), 20) ++ ++ // Reap by both transaction bytes and gas, where the size yields 31 reaped ++ // transactions and the gas limit reaps 25 transactions. ++ reapedTxs = txmp.ReapMaxBytesMaxGas(2000, 25) ++ ensurePrioritized(reapedTxs) ++ require.Equal(t, len(tTxs), txmp.Size()) ++ require.Equal(t, int64(5800), txmp.SizeBytes()) ++ require.Len(t, reapedTxs, 25) ++} ++ ++func TestTxMempoolTxLargerThanMaxBytes(t *testing.T) { ++ rng := rand.New(rand.NewSource(time.Now().UnixNano())) ++ txmp := setup(t, 0) ++ bigPrefix := make([]byte, 100) ++ _, err := rng.Read(bigPrefix) ++ require.NoError(t, err) ++ // large high priority tx ++ bigTx := []byte(fmt.Sprintf("sender-1-1=%X=2", bigPrefix)) ++ smallPrefix := make([]byte, 20) ++ _, err = rng.Read(smallPrefix) ++ require.NoError(t, err) ++ // smaller low priority tx with different sender ++ smallTx := []byte(fmt.Sprintf("sender-2-1=%X=1", smallPrefix)) ++ require.NoError(t, txmp.CheckTx(bigTx, nil, mempool.TxInfo{SenderID: 1})) ++ require.NoError(t, txmp.CheckTx(smallTx, nil, mempool.TxInfo{SenderID: 1})) ++ ++ // reap by max bytes less than the large tx ++ reapedTxs := txmp.ReapMaxBytesMaxGas(100, -1) ++ require.Len(t, reapedTxs, 1) ++ require.Equal(t, types.Tx(smallTx), reapedTxs[0]) ++} ++ ++func TestTxPool_ReapMaxTxs(t *testing.T) { ++ txmp := setup(t, 0) ++ txs := checkTxs(t, txmp, 100, 0) ++ require.Equal(t, len(txs), txmp.Size()) ++ require.Equal(t, int64(5800), txmp.SizeBytes()) ++ ++ txMap := make(map[types.TxKey]int64) ++ for _, tx := range txs { ++ txMap[tx.tx.Key()] = tx.priority ++ } ++ ++ ensurePrioritized := func(reapedTxs types.Txs) { ++ for i := 0; i < len(reapedTxs)-1; i++ { ++ currPriority := txMap[reapedTxs[i].Key()] ++ nextPriority := txMap[reapedTxs[i+1].Key()] ++ require.GreaterOrEqual(t, currPriority, nextPriority) ++ } ++ } ++ ++ // reap all transactions ++ reapedTxs := txmp.ReapMaxTxs(-1) ++ ensurePrioritized(reapedTxs) ++ require.Equal(t, len(txs), txmp.Size()) ++ require.Equal(t, int64(5800), txmp.SizeBytes()) ++ require.Len(t, reapedTxs, len(txs)) ++ ++ // reap a single transaction ++ reapedTxs = txmp.ReapMaxTxs(1) ++ ensurePrioritized(reapedTxs) ++ require.Equal(t, len(txs), txmp.Size()) ++ require.Equal(t, int64(5800), txmp.SizeBytes()) ++ require.Len(t, reapedTxs, 1) ++ ++ // reap half of the transactions ++ reapedTxs = txmp.ReapMaxTxs(len(txs) / 2) ++ ensurePrioritized(reapedTxs) ++ require.Equal(t, len(txs), txmp.Size()) ++ require.Equal(t, int64(5800), txmp.SizeBytes()) ++ require.Len(t, reapedTxs, len(txs)/2) ++} ++ ++func TestTxPool_CheckTxExceedsMaxSize(t *testing.T) { ++ txmp := setup(t, 0) ++ ++ rng := rand.New(rand.NewSource(time.Now().UnixNano())) ++ tx := make([]byte, txmp.config.MaxTxBytes+1) ++ _, err := rng.Read(tx) ++ require.NoError(t, err) ++ ++ err = txmp.CheckTx(tx, nil, mempool.TxInfo{SenderID: 0}) ++ require.Equal(t, mempool.ErrTxTooLarge{Max: txmp.config.MaxTxBytes, Actual: len(tx)}, err) ++ ++ tx = make([]byte, txmp.config.MaxTxBytes-1) ++ _, err = rng.Read(tx) ++ require.NoError(t, err) ++ ++ err = txmp.CheckTx(tx, nil, mempool.TxInfo{SenderID: 0}) ++ require.NotEqual(t, mempool.ErrTxTooLarge{Max: txmp.config.MaxTxBytes, Actual: len(tx)}, err) ++} ++ ++func TestTxPool_CheckTxSamePeer(t *testing.T) { ++ txmp := setup(t, 100) ++ peerID := uint16(1) ++ rng := rand.New(rand.NewSource(time.Now().UnixNano())) ++ ++ prefix := make([]byte, 20) ++ _, err := rng.Read(prefix) ++ require.NoError(t, err) ++ ++ tx := []byte(fmt.Sprintf("sender-0=%X=%d", prefix, 50)) ++ ++ require.NoError(t, txmp.CheckTx(tx, nil, mempool.TxInfo{SenderID: peerID})) ++ require.Error(t, txmp.CheckTx(tx, nil, mempool.TxInfo{SenderID: peerID})) ++} ++ ++// TestTxPool_ConcurrentTxs adds a bunch of txs to the txPool (via checkTx) and ++// then reaps transactions from the mempool. At the end it asserts that the ++// mempool is empty. ++func TestTxPool_ConcurrentTxs(t *testing.T) { ++ cacheSize := 10 ++ txPool := setup(t, cacheSize) ++ checkTxDone := make(chan struct{}) ++ ++ var wg sync.WaitGroup ++ wg.Add(1) ++ go func() { ++ for i := 0; i < 10; i++ { ++ numTxs := 10 ++ peerID := uint16(0) ++ _ = checkTxs(t, txPool, numTxs, peerID) ++ } ++ ++ wg.Done() ++ close(checkTxDone) ++ }() ++ ++ wg.Add(1) ++ go func() { ++ ticker := time.NewTicker(time.Second) ++ defer ticker.Stop() ++ defer wg.Done() ++ ++ height := int64(1) ++ for range ticker.C { ++ reapedTxs := txPool.ReapMaxTxs(50) ++ if len(reapedTxs) > 0 { ++ responses := generateResponses(len(reapedTxs)) ++ err := txPool.Update(height, reapedTxs, responses, nil, nil) ++ require.NoError(t, err) ++ height++ ++ } else { ++ select { ++ case <-checkTxDone: ++ // only return once we know we finished the CheckTx loop ++ return ++ default: ++ } ++ } ++ } ++ }() ++ ++ wg.Wait() ++ assert.Zero(t, txPool.Size()) ++ assert.Zero(t, txPool.SizeBytes()) ++} ++ ++func generateResponses(numResponses int) (responses []*abci.ResponseDeliverTx) { ++ for i := 0; i < numResponses; i++ { ++ var response *abci.ResponseDeliverTx ++ if i%2 == 0 { ++ response = &abci.ResponseDeliverTx{Code: abci.CodeTypeOK} ++ } else { ++ response = &abci.ResponseDeliverTx{Code: 100} ++ } ++ responses = append(responses, response) ++ } ++ return responses ++} ++ ++func TestTxPool_ExpiredTxs_Timestamp(t *testing.T) { ++ txmp := setup(t, 5000) ++ txmp.config.TTLDuration = 5 * time.Millisecond ++ ++ added1 := checkTxs(t, txmp, 10, 0) ++ require.Equal(t, len(added1), txmp.Size()) ++ ++ // Wait a while, then add some more transactions that should not be expired ++ // when the first batch TTLs out. ++ // Because the TTL is 5ms which is very short, we need to have a more precise ++ // pruning interval to ensure that the transactions are expired ++ // so that the expired event is caught quickly enough ++ // that the second batch of transactions are not expired. ++ ++ time.Sleep(2500 * time.Microsecond) ++ added2 := checkTxs(t, txmp, 10, 1) ++ ++ // use require.Eventually to wait for the TTL to expire ++ require.Eventually(t, func() bool { ++ // Trigger an update so that pruning will occur. ++ txmp.Lock() ++ defer txmp.Unlock() ++ require.NoError(t, txmp.Update(txmp.height+1, nil, nil, nil, nil)) ++ ++ // All the transactions in the original set should have been purged. ++ for _, tx := range added1 { ++ // Check that it was added to the evictedTxCache ++ evicted := txmp.WasRecentlyEvicted(tx.tx.Key()) ++ if !evicted { ++ return false ++ } ++ ++ if txmp.store.has(tx.tx.Key()) { ++ t.Errorf("Transaction %X should have been purged for TTL", tx.tx.Key()) ++ return false ++ } ++ if txmp.rejectedTxCache.Has(tx.tx.Key()) { ++ t.Errorf("Transaction %X should have been removed from the cache", tx.tx.Key()) ++ return false ++ } ++ } ++ return true ++ }, 10*time.Millisecond, 50*time.Microsecond) ++ ++ // All the transactions added later should still be around. ++ for _, tx := range added2 { ++ if !txmp.store.has(tx.tx.Key()) { ++ t.Errorf("Transaction %X should still be in the mempool, but is not", tx.tx.Key()) ++ } ++ } ++} ++ ++func TestTxPool_ExpiredTxs_NumBlocks(t *testing.T) { ++ txmp := setup(t, 500) ++ txmp.height = 100 ++ txmp.config.TTLNumBlocks = 10 ++ ++ tTxs := checkTxs(t, txmp, 100, 0) ++ require.Equal(t, len(tTxs), txmp.Size()) ++ ++ // reap 5 txs at the next height -- no txs should expire ++ reapedTxs := txmp.ReapMaxTxs(5) ++ responses := make([]*abci.ResponseDeliverTx, len(reapedTxs)) ++ for i := 0; i < len(responses); i++ { ++ responses[i] = &abci.ResponseDeliverTx{Code: abci.CodeTypeOK} ++ } ++ ++ txmp.Lock() ++ require.NoError(t, txmp.Update(txmp.height+1, reapedTxs, responses, nil, nil)) ++ txmp.Unlock() ++ ++ require.Equal(t, 95, txmp.Size()) ++ ++ // check more txs at height 101 ++ _ = checkTxs(t, txmp, 50, 1) ++ require.Equal(t, 145, txmp.Size()) ++ ++ // Reap 5 txs at a height that would expire all the transactions from before ++ // the previous Update (height 100). ++ // ++ // NOTE: When we reap txs below, we do not know if we're picking txs from the ++ // initial CheckTx calls or from the second round of CheckTx calls. Thus, we ++ // cannot guarantee that all 95 txs are remaining that should be expired and ++ // removed. However, we do know that that at most 95 txs can be expired and ++ // removed. ++ reapedTxs = txmp.ReapMaxTxs(5) ++ responses = make([]*abci.ResponseDeliverTx, len(reapedTxs)) ++ for i := 0; i < len(responses); i++ { ++ responses[i] = &abci.ResponseDeliverTx{Code: abci.CodeTypeOK} ++ } ++ ++ txmp.Lock() ++ require.NoError(t, txmp.Update(txmp.height+10, reapedTxs, responses, nil, nil)) ++ txmp.Unlock() ++ ++ require.GreaterOrEqual(t, txmp.Size(), 45) ++} ++ ++func TestTxPool_CheckTxPostCheckError(t *testing.T) { ++ cases := []struct { ++ name string ++ err error ++ }{ ++ { ++ name: "error", ++ err: errors.New("test error"), ++ }, ++ { ++ name: "no error", ++ err: nil, ++ }, ++ } ++ for _, tc := range cases { ++ testCase := tc ++ t.Run(testCase.name, func(t *testing.T) { ++ postCheckFn := func(_ types.Tx, _ *abci.ResponseCheckTx) error { ++ return testCase.err ++ } ++ txmp := setup(t, 0, WithPostCheck(postCheckFn)) ++ tx := []byte("sender=0000=1") ++ err := txmp.CheckTx(tx, nil, mempool.TxInfo{SenderID: 0}) ++ require.True(t, errors.Is(err, testCase.err)) ++ }) ++ } ++} ++ ++func TestTxPool_RemoveBlobTx(t *testing.T) { ++ app := kvstore.NewApplication() ++ cc := proxy.NewLocalClientCreator(app) ++ ++ cfg := config.TestMempoolConfig() ++ cfg.CacheSize = 100 ++ ++ appConnMem, err := cc.NewABCIClient() ++ require.NoError(t, err) ++ require.NoError(t, appConnMem.Start()) ++ ++ t.Cleanup(func() { ++ os.RemoveAll(cfg.RootDir) ++ require.NoError(t, appConnMem.Stop()) ++ }) ++ ++ txmp := NewTxPool(log.TestingLogger(), cfg, appConnMem, 1) ++ ++ originalTx := []byte{1, 2, 3, 4} ++ indexWrapper, err := types.MarshalIndexWrapper(originalTx, 100) ++ require.NoError(t, err) ++ namespaceOne := bytes.Repeat([]byte{1}, consts.NamespaceIDSize) ++ ++ // create the blobTx ++ b := tmproto.Blob{ ++ NamespaceId: namespaceOne, ++ Data: []byte{1, 2, 3, 4, 5, 6, 7, 8, 1, 2, 3, 4, 5, 6, 7, 8, 9}, ++ ShareVersion: 0, ++ NamespaceVersion: 0, ++ } ++ bTx, err := types.MarshalBlobTx(originalTx, &b) ++ require.NoError(t, err) ++ ++ err = txmp.CheckTx(bTx, nil, mempool.TxInfo{}) ++ require.NoError(t, err) ++ ++ err = txmp.Update(1, []types.Tx{indexWrapper}, abciResponses(1, abci.CodeTypeOK), nil, nil) ++ require.NoError(t, err) ++ require.EqualValues(t, 0, txmp.Size()) ++ require.EqualValues(t, 0, txmp.SizeBytes()) ++} ++ ++func abciResponses(n int, code uint32) []*abci.ResponseDeliverTx { ++ responses := make([]*abci.ResponseDeliverTx, 0, n) ++ for i := 0; i < n; i++ { ++ responses = append(responses, &abci.ResponseDeliverTx{Code: code}) ++ } ++ return responses ++} ++ ++func TestTxPool_ConcurrentlyAddingTx(t *testing.T) { ++ cacheSize := 500 ++ txPool := setup(t, cacheSize) ++ tx := types.Tx("sender=0000=1") ++ ++ numTxs := 10 ++ errCh := make(chan error, numTxs) ++ wg := &sync.WaitGroup{} ++ for i := 0; i < numTxs; i++ { ++ wg.Add(1) ++ go func(sender uint16) { ++ defer wg.Done() ++ _, err := txPool.TryAddNewTx(tx, tx.Key(), mempool.TxInfo{SenderID: sender}) ++ errCh <- err ++ }(uint16(i + 1)) ++ } ++ go func() { ++ wg.Wait() ++ close(errCh) ++ }() ++ ++ errCount := 0 ++ for err := range errCh { ++ if err != nil { ++ require.Equal(t, ErrTxInMempool, err) ++ errCount++ ++ } ++ } ++ // At least one tx should succeed and get added to the mempool without an error. ++ require.Less(t, errCount, numTxs) ++ // The rest of the txs may fail with ErrTxInMempool but the number of errors isn't exact. ++ require.LessOrEqual(t, errCount, numTxs-1) ++} ++ ++func TestTxPool_BroadcastQueue(t *testing.T) { ++ ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) ++ defer cancel() ++ txmp := setup(t, 1) ++ txs := 10 ++ ++ wg := sync.WaitGroup{} ++ wg.Add(1) ++ ++ for i := 0; i < txs; i++ { ++ tx := newDefaultTx(fmt.Sprintf("%d", i)) ++ require.NoError(t, txmp.CheckTx(tx, nil, mempool.TxInfo{SenderID: 0})) ++ } ++ ++ go func() { ++ defer wg.Done() ++ for i := 0; i < txs; i++ { ++ select { ++ case <-ctx.Done(): ++ assert.FailNowf(t, "failed to receive all txs (got %d/%d)", "", i+1, txs) ++ case wtx := <-txmp.next(): ++ require.Equal(t, wtx.tx, newDefaultTx(fmt.Sprintf("%d", i))) ++ } ++ } ++ }() ++ ++ wg.Wait() ++} diff --git a/patches/mempool/cat/reactor.go.patch b/patches/mempool/cat/reactor.go.patch new file mode 100644 index 00000000000..c1c27367b74 --- /dev/null +++ b/patches/mempool/cat/reactor.go.patch @@ -0,0 +1,495 @@ +diff --git a/mempool/cat/reactor.go b/mempool/cat/reactor.go +new file mode 100644 +index 000000000..88fd9abcf +--- /dev/null ++++ b/mempool/cat/reactor.go +@@ -0,0 +1,489 @@ ++package cat ++ ++import ( ++ "fmt" ++ "math/rand" ++ "time" ++ ++ "github.com/gogo/protobuf/proto" ++ ++ cfg "github.com/tendermint/tendermint/config" ++ "github.com/tendermint/tendermint/crypto/tmhash" ++ "github.com/tendermint/tendermint/libs/log" ++ "github.com/tendermint/tendermint/mempool" ++ "github.com/tendermint/tendermint/p2p" ++ "github.com/tendermint/tendermint/pkg/trace" ++ "github.com/tendermint/tendermint/pkg/trace/schema" ++ protomem "github.com/tendermint/tendermint/proto/tendermint/mempool" ++ "github.com/tendermint/tendermint/types" ++) ++ ++const ( ++ // default duration to wait before considering a peer non-responsive ++ // and searching for the tx from a new peer ++ DefaultGossipDelay = 200 * time.Millisecond ++ ++ // Content Addressable Tx Pool gossips state based messages (SeenTx and WantTx) on a separate channel ++ // for cross compatibility ++ MempoolStateChannel = byte(0x31) ++ ++ // peerHeightDiff signifies the tolerance in difference in height between the peer and the height ++ // the node received the tx ++ peerHeightDiff = 10 ++) ++ ++// Reactor handles mempool tx broadcasting logic amongst peers. For the main ++// logic behind the protocol, refer to `ReceiveEnvelope` or to the english ++// spec under /.spec.md ++type Reactor struct { ++ p2p.BaseReactor ++ opts *ReactorOptions ++ mempool *TxPool ++ ids *mempoolIDs ++ requests *requestScheduler ++ traceClient trace.Tracer ++} ++ ++type ReactorOptions struct { ++ // ListenOnly means that the node will never broadcast any of the transactions that ++ // it receives. This is useful for keeping transactions private ++ ListenOnly bool ++ ++ // MaxTxSize is the maximum size of a transaction that can be received ++ MaxTxSize int ++ ++ // MaxGossipDelay is the maximum allotted time that the reactor expects a transaction to ++ // arrive before issuing a new request to a different peer ++ MaxGossipDelay time.Duration ++ ++ // TraceClient is the trace client for collecting trace level events ++ TraceClient trace.Tracer ++} ++ ++func (opts *ReactorOptions) VerifyAndComplete() error { ++ if opts.MaxTxSize == 0 { ++ opts.MaxTxSize = cfg.DefaultMempoolConfig().MaxTxBytes ++ } ++ ++ if opts.MaxGossipDelay == 0 { ++ opts.MaxGossipDelay = DefaultGossipDelay ++ } ++ ++ if opts.MaxTxSize < 0 { ++ return fmt.Errorf("max tx size (%d) cannot be negative", opts.MaxTxSize) ++ } ++ ++ if opts.MaxGossipDelay < 0 { ++ return fmt.Errorf("max gossip delay (%d) cannot be negative", opts.MaxGossipDelay) ++ } ++ ++ return nil ++} ++ ++// NewReactor returns a new Reactor with the given config and mempool. ++func NewReactor(mempool *TxPool, opts *ReactorOptions) (*Reactor, error) { ++ err := opts.VerifyAndComplete() ++ if err != nil { ++ return nil, err ++ } ++ memR := &Reactor{ ++ opts: opts, ++ mempool: mempool, ++ ids: newMempoolIDs(), ++ requests: newRequestScheduler(opts.MaxGossipDelay, defaultGlobalRequestTimeout), ++ traceClient: trace.NoOpTracer(), ++ } ++ memR.BaseReactor = *p2p.NewBaseReactor("Mempool", memR) ++ return memR, nil ++} ++ ++// SetLogger sets the Logger on the reactor and the underlying mempool. ++func (memR *Reactor) SetLogger(l log.Logger) { ++ memR.Logger = l ++} ++ ++// OnStart implements Service. ++func (memR *Reactor) OnStart() error { ++ if !memR.opts.ListenOnly { ++ go func() { ++ for { ++ select { ++ case <-memR.Quit(): ++ return ++ ++ // listen in for any newly verified tx via RPC, then immediately ++ // broadcast it to all connected peers. ++ case nextTx := <-memR.mempool.next(): ++ memR.broadcastNewTx(nextTx) ++ } ++ } ++ }() ++ } else { ++ memR.Logger.Info("Tx broadcasting is disabled") ++ } ++ // run a separate go routine to check for time based TTLs ++ if memR.mempool.config.TTLDuration > 0 { ++ go func() { ++ ticker := time.NewTicker(memR.mempool.config.TTLDuration) ++ for { ++ select { ++ case <-ticker.C: ++ memR.mempool.CheckToPurgeExpiredTxs() ++ case <-memR.Quit(): ++ return ++ } ++ } ++ }() ++ } ++ ++ return nil ++} ++ ++// OnStop implements Service ++func (memR *Reactor) OnStop() { ++ // stop all the timers tracking outbound requests ++ memR.requests.Close() ++} ++ ++// GetChannels implements Reactor by returning the list of channels for this ++// reactor. ++func (memR *Reactor) GetChannels() []*p2p.ChannelDescriptor { ++ largestTx := make([]byte, memR.opts.MaxTxSize) ++ txMsg := protomem.Message{ ++ Sum: &protomem.Message_Txs{ ++ Txs: &protomem.Txs{Txs: [][]byte{largestTx}}, ++ }, ++ } ++ ++ stateMsg := protomem.Message{ ++ Sum: &protomem.Message_SeenTx{ ++ SeenTx: &protomem.SeenTx{ ++ TxKey: make([]byte, tmhash.Size), ++ }, ++ }, ++ } ++ ++ return []*p2p.ChannelDescriptor{ ++ { ++ ID: mempool.MempoolChannel, ++ Priority: 6, ++ RecvMessageCapacity: txMsg.Size(), ++ MessageType: &protomem.Message{}, ++ }, ++ { ++ ID: MempoolStateChannel, ++ Priority: 5, ++ RecvMessageCapacity: stateMsg.Size(), ++ MessageType: &protomem.Message{}, ++ }, ++ } ++} ++ ++// InitPeer implements Reactor by creating a state for the peer. ++func (memR *Reactor) InitPeer(peer p2p.Peer) p2p.Peer { ++ memR.ids.ReserveForPeer(peer) ++ return peer ++} ++ ++// RemovePeer implements Reactor. For all current outbound requests to this ++// peer it will find a new peer to rerequest the same transactions. ++func (memR *Reactor) RemovePeer(peer p2p.Peer, reason interface{}) { ++ peerID := memR.ids.Reclaim(peer.ID()) ++ // clear all memory of seen txs by that peer ++ memR.mempool.seenByPeersSet.RemovePeer(peerID) ++ ++ // remove and rerequest all pending outbound requests to that peer since we know ++ // we won't receive any responses from them. ++ outboundRequests := memR.requests.ClearAllRequestsFrom(peerID) ++ for key := range outboundRequests { ++ memR.mempool.metrics.RequestedTxs.Add(1) ++ memR.findNewPeerToRequestTx(key) ++ } ++} ++ ++func (memR *Reactor) Receive(chID byte, peer p2p.Peer, msgBytes []byte) { ++ msg := &protomem.Message{} ++ err := proto.Unmarshal(msgBytes, msg) ++ if err != nil { ++ panic(err) ++ } ++ uw, err := msg.Unwrap() ++ if err != nil { ++ panic(err) ++ } ++ memR.ReceiveEnvelope(p2p.Envelope{ ++ ChannelID: chID, ++ Src: peer, ++ Message: uw, ++ }) ++} ++ ++// ReceiveEnvelope implements Reactor. ++// It processes one of three messages: Txs, SeenTx, WantTx. ++func (memR *Reactor) ReceiveEnvelope(e p2p.Envelope) { ++ switch msg := e.Message.(type) { ++ ++ // A peer has sent us one or more transactions. This could be either because we requested them ++ // or because the peer received a new transaction and is broadcasting it to us. ++ // NOTE: This setup also means that we can support older mempool implementations that simply ++ // flooded the network with transactions. ++ case *protomem.Txs: ++ protoTxs := msg.GetTxs() ++ if len(protoTxs) == 0 { ++ memR.Logger.Error("received empty txs from peer", "src", e.Src) ++ return ++ } ++ peerID := memR.ids.GetIDForPeer(e.Src.ID()) ++ txInfo := mempool.TxInfo{SenderID: peerID} ++ txInfo.SenderP2PID = e.Src.ID() ++ ++ var err error ++ for _, tx := range protoTxs { ++ ntx := types.Tx(tx) ++ key := ntx.Key() ++ schema.WriteMempoolTx(memR.traceClient, string(e.Src.ID()), key[:], len(tx), schema.Download) ++ // If we requested the transaction we mark it as received. ++ if memR.requests.Has(peerID, key) { ++ memR.requests.MarkReceived(peerID, key) ++ memR.Logger.Debug("received a response for a requested transaction", "peerID", peerID, "txKey", key) ++ } else { ++ // If we didn't request the transaction we simply mark the peer as having the ++ // tx (we'd have already done it if we were requesting the tx). ++ memR.mempool.PeerHasTx(peerID, key) ++ memR.Logger.Debug("received new trasaction", "peerID", peerID, "txKey", key) ++ } ++ _, err = memR.mempool.TryAddNewTx(ntx, key, txInfo) ++ if err != nil && err != ErrTxInMempool { ++ memR.Logger.Info("Could not add tx", "txKey", key, "err", err) ++ return ++ } ++ if !memR.opts.ListenOnly { ++ // We broadcast only transactions that we deem valid and actually have in our mempool. ++ memR.broadcastSeenTx(key) ++ } ++ } ++ ++ // A peer has indicated to us that it has a transaction. We first verify the txkey and ++ // mark that peer as having the transaction. Then we proceed with the following logic: ++ // ++ // 1. If we have the transaction, we do nothing. ++ // 2. If we don't yet have the tx but have an outgoing request for it, we do nothing. ++ // 3. If we recently evicted the tx and still don't have space for it, we do nothing. ++ // 4. Else, we request the transaction from that peer. ++ case *protomem.SeenTx: ++ txKey, err := types.TxKeyFromBytes(msg.TxKey) ++ if err != nil { ++ memR.Logger.Error("peer sent SeenTx with incorrect tx key", "err", err) ++ memR.Switch.StopPeerForError(e.Src, err) ++ return ++ } ++ schema.WriteMempoolPeerState( ++ memR.traceClient, ++ string(e.Src.ID()), ++ schema.SeenTx, ++ txKey[:], ++ schema.Download, ++ ) ++ peerID := memR.ids.GetIDForPeer(e.Src.ID()) ++ memR.mempool.PeerHasTx(peerID, txKey) ++ // Check if we don't already have the transaction and that it was recently rejected ++ if memR.mempool.Has(txKey) || memR.mempool.IsRejectedTx(txKey) { ++ memR.Logger.Debug("received a seen tx for a tx we already have", "txKey", txKey) ++ return ++ } ++ ++ // If we are already requesting that tx, then we don't need to go any further. ++ if memR.requests.ForTx(txKey) != 0 { ++ memR.Logger.Debug("received a SeenTx message for a transaction we are already requesting", "txKey", txKey) ++ return ++ } ++ ++ // We don't have the transaction, nor are we requesting it so we send the node ++ // a want msg ++ memR.requestTx(txKey, e.Src) ++ ++ // A peer is requesting a transaction that we have claimed to have. Find the specified ++ // transaction and broadcast it to the peer. We may no longer have the transaction ++ case *protomem.WantTx: ++ txKey, err := types.TxKeyFromBytes(msg.TxKey) ++ if err != nil { ++ memR.Logger.Error("peer sent WantTx with incorrect tx key", "err", err) ++ memR.Switch.StopPeerForError(e.Src, err) ++ return ++ } ++ schema.WriteMempoolPeerState( ++ memR.traceClient, ++ string(e.Src.ID()), ++ schema.WantTx, ++ txKey[:], ++ schema.Download, ++ ) ++ tx, has := memR.mempool.GetTxByKey(txKey) ++ if has && !memR.opts.ListenOnly { ++ peerID := memR.ids.GetIDForPeer(e.Src.ID()) ++ memR.Logger.Debug("sending a tx in response to a want msg", "peer", peerID) ++ if p2p.SendEnvelopeShim(e.Src, p2p.Envelope{ //nolint:staticcheck ++ ChannelID: mempool.MempoolChannel, ++ Message: &protomem.Txs{Txs: [][]byte{tx}}, ++ }, memR.Logger) { ++ memR.mempool.PeerHasTx(peerID, txKey) ++ schema.WriteMempoolTx( ++ memR.traceClient, ++ string(e.Src.ID()), ++ txKey[:], ++ len(tx), ++ schema.Upload, ++ ) ++ } ++ } ++ ++ default: ++ memR.Logger.Error("unknown message type", "src", e.Src, "chId", e.ChannelID, "msg", fmt.Sprintf("%T", msg)) ++ memR.Switch.StopPeerForError(e.Src, fmt.Errorf("mempool cannot handle message of type: %T", msg)) ++ return ++ } ++} ++ ++// PeerState describes the state of a peer. ++type PeerState interface { ++ GetHeight() int64 ++} ++ ++// broadcastSeenTx broadcasts a SeenTx message to all peers unless we ++// know they have already seen the transaction ++func (memR *Reactor) broadcastSeenTx(txKey types.TxKey) { ++ memR.Logger.Debug("broadcasting seen tx to all peers", "tx_key", txKey.String()) ++ msg := &protomem.Message{ ++ Sum: &protomem.Message_SeenTx{ ++ SeenTx: &protomem.SeenTx{ ++ TxKey: txKey[:], ++ }, ++ }, ++ } ++ bz, err := msg.Marshal() ++ if err != nil { ++ panic(err) ++ } ++ ++ // Add jitter to when the node broadcasts it's seen txs to stagger when nodes ++ // in the network broadcast their seenTx messages. ++ time.Sleep(time.Duration(rand.Intn(10)*10) * time.Millisecond) //nolint:gosec ++ ++ for id, peer := range memR.ids.GetAll() { ++ if p, ok := peer.Get(types.PeerStateKey).(PeerState); ok { ++ // make sure peer isn't too far behind. This can happen ++ // if the peer is blocksyncing still and catching up ++ // in which case we just skip sending the transaction ++ if p.GetHeight() < memR.mempool.Height()-peerHeightDiff { ++ memR.Logger.Debug("peer is too far behind us. Skipping broadcast of seen tx") ++ continue ++ } ++ } ++ // no need to send a seen tx message to a peer that already ++ // has that tx. ++ if memR.mempool.seenByPeersSet.Has(txKey, id) { ++ continue ++ } ++ ++ peer.Send(MempoolStateChannel, bz) //nolint:staticcheck ++ } ++} ++ ++// broadcastNewTx broadcast new transaction to all peers unless we are already sure they have seen the tx. ++func (memR *Reactor) broadcastNewTx(wtx *wrappedTx) { ++ msg := &protomem.Message{ ++ Sum: &protomem.Message_Txs{ ++ Txs: &protomem.Txs{ ++ Txs: [][]byte{wtx.tx}, ++ }, ++ }, ++ } ++ bz, err := msg.Marshal() ++ if err != nil { ++ panic(err) ++ } ++ ++ for id, peer := range memR.ids.GetAll() { ++ if p, ok := peer.Get(types.PeerStateKey).(PeerState); ok { ++ // make sure peer isn't too far behind. This can happen ++ // if the peer is blocksyncing still and catching up ++ // in which case we just skip sending the transaction ++ if p.GetHeight() < wtx.height-peerHeightDiff { ++ memR.Logger.Debug("peer is too far behind us. Skipping broadcast of seen tx") ++ continue ++ } ++ } ++ ++ if memR.mempool.seenByPeersSet.Has(wtx.key, id) { ++ continue ++ } ++ ++ if peer.Send(mempool.MempoolChannel, bz) { //nolint:staticcheck ++ memR.mempool.PeerHasTx(id, wtx.key) ++ } ++ } ++} ++ ++// requestTx requests a transaction from a peer and tracks it, ++// requesting it from another peer if the first peer does not respond. ++func (memR *Reactor) requestTx(txKey types.TxKey, peer p2p.Peer) { ++ if peer == nil { ++ // we have disconnected from the peer ++ return ++ } ++ memR.Logger.Debug("requesting tx", "txKey", txKey, "peerID", peer.ID()) ++ msg := &protomem.Message{ ++ Sum: &protomem.Message_WantTx{ ++ WantTx: &protomem.WantTx{TxKey: txKey[:]}, ++ }, ++ } ++ bz, err := msg.Marshal() ++ if err != nil { ++ panic(err) ++ } ++ ++ success := peer.Send(MempoolStateChannel, bz) //nolint:staticcheck ++ if success { ++ memR.mempool.metrics.RequestedTxs.Add(1) ++ requested := memR.requests.Add(txKey, memR.ids.GetIDForPeer(peer.ID()), memR.findNewPeerToRequestTx) ++ if !requested { ++ memR.Logger.Error("have already marked a tx as requested", "txKey", txKey, "peerID", peer.ID()) ++ } ++ } ++} ++ ++// findNewPeerToSendTx finds a new peer that has already seen the transaction to ++// request a transaction from. ++func (memR *Reactor) findNewPeerToRequestTx(txKey types.TxKey) { ++ // ensure that we are connected to peers ++ if memR.ids.Len() == 0 { ++ return ++ } ++ ++ // pop the next peer in the list of remaining peers that have seen the tx ++ // and does not already have an outbound request for that tx ++ seenMap := memR.mempool.seenByPeersSet.Get(txKey) ++ var peerID uint16 ++ for possiblePeer := range seenMap { ++ if !memR.requests.Has(possiblePeer, txKey) { ++ peerID = possiblePeer ++ break ++ } ++ } ++ ++ if peerID == 0 { ++ // No other free peer has the transaction we are looking for. ++ // We give up 🤷‍♂️ and hope either a peer responds late or the tx ++ // is gossiped again ++ memR.Logger.Info("no other peer has the tx we are looking for", "txKey", txKey) ++ return ++ } ++ peer := memR.ids.GetPeer(peerID) ++ if peer == nil { ++ // we disconnected from that peer, retry again until we exhaust the list ++ memR.findNewPeerToRequestTx(txKey) ++ } else { ++ memR.mempool.metrics.RerequestedTxs.Add(1) ++ memR.requestTx(txKey, peer) ++ } ++} diff --git a/patches/mempool/cat/reactor_test.go.patch b/patches/mempool/cat/reactor_test.go.patch new file mode 100644 index 00000000000..99c04839ca7 --- /dev/null +++ b/patches/mempool/cat/reactor_test.go.patch @@ -0,0 +1,416 @@ +diff --git a/mempool/cat/reactor_test.go b/mempool/cat/reactor_test.go +new file mode 100644 +index 000000000..97f1843ee +--- /dev/null ++++ b/mempool/cat/reactor_test.go +@@ -0,0 +1,410 @@ ++package cat ++ ++import ( ++ "encoding/hex" ++ "os" ++ "sort" ++ "sync" ++ "testing" ++ "time" ++ ++ "github.com/go-kit/log/term" ++ "github.com/gogo/protobuf/proto" ++ "github.com/stretchr/testify/assert" ++ "github.com/stretchr/testify/require" ++ ++ "github.com/tendermint/tendermint/abci/example/kvstore" ++ "github.com/tendermint/tendermint/crypto/ed25519" ++ p2pmock "github.com/tendermint/tendermint/p2p/mock" ++ ++ cfg "github.com/tendermint/tendermint/config" ++ ++ "github.com/tendermint/tendermint/libs/log" ++ "github.com/tendermint/tendermint/mempool" ++ "github.com/tendermint/tendermint/p2p" ++ "github.com/tendermint/tendermint/p2p/mocks" ++ protomem "github.com/tendermint/tendermint/proto/tendermint/mempool" ++ "github.com/tendermint/tendermint/proxy" ++ "github.com/tendermint/tendermint/types" ++) ++ ++const ( ++ numTxs = 10 ++ timeout = 120 * time.Second // ridiculously high because CircleCI is slow ++) ++ ++type peerState struct { ++ height int64 ++} ++ ++func (ps peerState) GetHeight() int64 { ++ return ps.height ++} ++ ++// Send a bunch of txs to the first reactor's mempool and wait for them all to ++// be received in the others. ++func TestReactorBroadcastTxsMessage(t *testing.T) { ++ config := cfg.TestConfig() ++ const N = 5 ++ reactors := makeAndConnectReactors(t, config, N) ++ ++ txs := checkTxs(t, reactors[0].mempool, numTxs, mempool.UnknownPeerID) ++ sort.Slice(txs, func(i, j int) bool { ++ return txs[i].priority > txs[j].priority // N.B. higher priorities first ++ }) ++ transactions := make(types.Txs, len(txs)) ++ for idx, tx := range txs { ++ transactions[idx] = tx.tx ++ } ++ ++ waitForTxsOnReactors(t, transactions, reactors) ++} ++ ++func TestReactorSendWantTxAfterReceiveingSeenTx(t *testing.T) { ++ reactor, _ := setupReactor(t) ++ ++ tx := newDefaultTx("hello") ++ key := tx.Key() ++ msgSeen := &protomem.Message{ ++ Sum: &protomem.Message_SeenTx{SeenTx: &protomem.SeenTx{TxKey: key[:]}}, ++ } ++ msgSeenB, err := msgSeen.Marshal() ++ require.NoError(t, err) ++ ++ msgWant := &protomem.Message{ ++ Sum: &protomem.Message_WantTx{WantTx: &protomem.WantTx{TxKey: key[:]}}, ++ } ++ msgWantB, err := msgWant.Marshal() ++ require.NoError(t, err) ++ ++ peer := genPeer() ++ peer.On("Send", MempoolStateChannel, msgWantB).Return(true) ++ ++ reactor.InitPeer(peer) ++ reactor.Receive(MempoolStateChannel, peer, msgSeenB) ++ ++ peer.AssertExpectations(t) ++} ++ ++func TestReactorSendsTxAfterReceivingWantTx(t *testing.T) { ++ reactor, pool := setupReactor(t) ++ ++ tx := newDefaultTx("hello") ++ key := tx.Key() ++ txEnvelope := p2p.Envelope{ ++ Message: &protomem.Txs{Txs: [][]byte{tx}}, ++ ChannelID: mempool.MempoolChannel, ++ } ++ ++ msgWant := &protomem.Message{ ++ Sum: &protomem.Message_WantTx{WantTx: &protomem.WantTx{TxKey: key[:]}}, ++ } ++ msgWantB, err := msgWant.Marshal() ++ require.NoError(t, err) ++ ++ peer := genPeer() ++ peer.On("SendEnvelope", txEnvelope).Return(true) ++ ++ // add the transaction to the nodes pool. It's not connected to ++ // any peers so it shouldn't broadcast anything yet ++ require.NoError(t, pool.CheckTx(tx, nil, mempool.TxInfo{})) ++ ++ // Add the peer ++ reactor.InitPeer(peer) ++ // The peer sends a want msg for this tx ++ reactor.Receive(MempoolStateChannel, peer, msgWantB) ++ ++ // Should send the tx to the peer in response ++ peer.AssertExpectations(t) ++ ++ // pool should have marked the peer as having seen the tx ++ peerID := reactor.ids.GetIDForPeer(peer.ID()) ++ require.True(t, pool.seenByPeersSet.Has(key, peerID)) ++} ++ ++func TestReactorBroadcastsSeenTxAfterReceivingTx(t *testing.T) { ++ reactor, _ := setupReactor(t) ++ ++ tx := newDefaultTx("hello") ++ key := tx.Key() ++ txMsg := &protomem.Message{ ++ Sum: &protomem.Message_Txs{Txs: &protomem.Txs{Txs: [][]byte{tx}}}, ++ } ++ txMsgBytes, err := txMsg.Marshal() ++ require.NoError(t, err) ++ ++ seenMsg := &protomem.Message{ ++ Sum: &protomem.Message_SeenTx{SeenTx: &protomem.SeenTx{TxKey: key[:]}}, ++ } ++ seenMsgBytes, err := seenMsg.Marshal() ++ require.NoError(t, err) ++ ++ peers := genPeers(2) ++ // only peer 1 should receive the seen tx message as peer 0 broadcasted ++ // the transaction in the first place ++ peers[1].On("Send", MempoolStateChannel, seenMsgBytes).Return(true) ++ ++ reactor.InitPeer(peers[0]) ++ reactor.InitPeer(peers[1]) ++ reactor.Receive(mempool.MempoolChannel, peers[0], txMsgBytes) ++ ++ peers[0].AssertExpectations(t) ++ peers[1].AssertExpectations(t) ++} ++ ++func TestRemovePeerRequestFromOtherPeer(t *testing.T) { ++ reactor, _ := setupReactor(t) ++ ++ tx := newDefaultTx("hello") ++ key := tx.Key() ++ peers := genPeers(2) ++ reactor.InitPeer(peers[0]) ++ reactor.InitPeer(peers[1]) ++ ++ seenMsg := &protomem.SeenTx{TxKey: key[:]} ++ ++ wantMsg := &protomem.Message{ ++ Sum: &protomem.Message_WantTx{WantTx: &protomem.WantTx{TxKey: key[:]}}, ++ } ++ wantMsgBytes, err := wantMsg.Marshal() ++ require.NoError(t, err) ++ peers[0].On("Send", MempoolStateChannel, wantMsgBytes).Return(true) ++ peers[1].On("Send", MempoolStateChannel, wantMsgBytes).Return(true) ++ ++ reactor.ReceiveEnvelope(p2p.Envelope{ ++ Src: peers[0], ++ Message: seenMsg, ++ ChannelID: MempoolStateChannel, ++ }) ++ time.Sleep(100 * time.Millisecond) ++ reactor.ReceiveEnvelope(p2p.Envelope{ ++ Src: peers[1], ++ Message: seenMsg, ++ ChannelID: MempoolStateChannel, ++ }) ++ ++ reactor.RemovePeer(peers[0], "test") ++ ++ peers[0].AssertExpectations(t) ++ peers[1].AssertExpectations(t) ++ ++ require.True(t, reactor.mempool.seenByPeersSet.Has(key, 2)) ++ // we should have automatically sent another request out for peer 2 ++ require.EqualValues(t, 2, reactor.requests.ForTx(key)) ++ require.True(t, reactor.requests.Has(2, key)) ++ require.False(t, reactor.mempool.seenByPeersSet.Has(key, 1)) ++} ++ ++func TestMempoolVectors(t *testing.T) { ++ testCases := []struct { ++ testName string ++ tx []byte ++ expBytes string ++ }{ ++ {"tx 1", []byte{123}, "0a030a017b"}, ++ {"tx 2", []byte("proto encoding in mempool"), "0a1b0a1970726f746f20656e636f64696e6720696e206d656d706f6f6c"}, ++ } ++ ++ for _, tc := range testCases { ++ tc := tc ++ ++ msg := protomem.Message{ ++ Sum: &protomem.Message_Txs{ ++ Txs: &protomem.Txs{Txs: [][]byte{tc.tx}}, ++ }, ++ } ++ bz, err := msg.Marshal() ++ require.NoError(t, err, tc.testName) ++ ++ require.Equal(t, tc.expBytes, hex.EncodeToString(bz), tc.testName) ++ } ++} ++ ++func TestReactorEventuallyRemovesExpiredTransaction(t *testing.T) { ++ reactor, _ := setupReactor(t) ++ reactor.mempool.config.TTLDuration = 100 * time.Millisecond ++ ++ tx := newDefaultTx("hello") ++ key := tx.Key() ++ txMsg := &protomem.Message{ ++ Sum: &protomem.Message_Txs{Txs: &protomem.Txs{Txs: [][]byte{tx}}}, ++ } ++ txMsgBytes, err := txMsg.Marshal() ++ require.NoError(t, err) ++ ++ peer := genPeer() ++ require.NoError(t, reactor.Start()) ++ reactor.InitPeer(peer) ++ reactor.Receive(mempool.MempoolChannel, peer, txMsgBytes) ++ require.True(t, reactor.mempool.Has(key)) ++ ++ // wait for the transaction to expire ++ require.Eventually(t, ++ func() bool { return !reactor.mempool.Has(key) }, ++ 4*reactor.mempool.config.TTLDuration, ++ 50*time.Millisecond, ++ "transaction was not removed after TTL expired") ++} ++ ++func TestLegacyReactorReceiveBasic(t *testing.T) { ++ config := cfg.TestConfig() ++ // if there were more than two reactors, the order of transactions could not be ++ // asserted in waitForTxsOnReactors (due to transactions gossiping). If we ++ // replace Connect2Switches (full mesh) with a func, which connects first ++ // reactor to others and nothing else, this test should also pass with >2 reactors. ++ const N = 1 ++ reactors := makeAndConnectReactors(t, config, N) ++ var ( ++ reactor = reactors[0] ++ peer = p2pmock.NewPeer(nil) ++ ) ++ defer func() { ++ err := reactor.Stop() ++ assert.NoError(t, err) ++ }() ++ ++ reactor.InitPeer(peer) ++ reactor.AddPeer(peer) ++ ++ msg := &protomem.Message{ ++ Sum: &protomem.Message_Txs{ ++ Txs: &protomem.Txs{Txs: [][]byte{}}, ++ }, ++ } ++ m, err := proto.Marshal(msg) ++ assert.NoError(t, err) ++ ++ assert.NotPanics(t, func() { ++ reactor.Receive(mempool.MempoolChannel, peer, m) ++ }) ++} ++ ++func setupReactor(t *testing.T) (*Reactor, *TxPool) { ++ app := &application{kvstore.NewApplication()} ++ cc := proxy.NewLocalClientCreator(app) ++ pool, cleanup := newMempoolWithApp(cc) ++ t.Cleanup(cleanup) ++ reactor, err := NewReactor(pool, &ReactorOptions{}) ++ require.NoError(t, err) ++ return reactor, pool ++} ++ ++func makeAndConnectReactors(t *testing.T, config *cfg.Config, n int) []*Reactor { ++ reactors := make([]*Reactor, n) ++ logger := mempoolLogger() ++ for i := 0; i < n; i++ { ++ var pool *TxPool ++ reactors[i], pool = setupReactor(t) ++ pool.logger = logger.With("validator", i) ++ reactors[i].SetLogger(logger.With("validator", i)) ++ } ++ ++ switches := p2p.MakeConnectedSwitches(config.P2P, n, func(i int, s *p2p.Switch) *p2p.Switch { ++ s.AddReactor("MEMPOOL", reactors[i]) ++ return s ++ }, p2p.Connect2Switches) ++ ++ t.Cleanup(func() { ++ for _, s := range switches { ++ if err := s.Stop(); err != nil { ++ assert.NoError(t, err) ++ } ++ } ++ }) ++ ++ for _, r := range reactors { ++ for _, peer := range r.Switch.Peers().List() { ++ peer.Set(types.PeerStateKey, peerState{1}) ++ } ++ } ++ return reactors ++} ++ ++// mempoolLogger is a TestingLogger which uses a different ++// color for each validator ("validator" key must exist). ++func mempoolLogger() log.Logger { ++ return log.TestingLoggerWithColorFn(func(keyvals ...interface{}) term.FgBgColor { ++ for i := 0; i < len(keyvals)-1; i += 2 { ++ if keyvals[i] == "validator" { ++ return term.FgBgColor{Fg: term.Color(uint8(keyvals[i+1].(int) + 1))} ++ } ++ } ++ return term.FgBgColor{} ++ }) ++} ++ ++func newMempoolWithApp(cc proxy.ClientCreator) (*TxPool, func()) { ++ conf := cfg.ResetTestRoot("mempool_test") ++ ++ mp, cu := newMempoolWithAppAndConfig(cc, conf) ++ return mp, cu ++} ++ ++func newMempoolWithAppAndConfig(cc proxy.ClientCreator, conf *cfg.Config) (*TxPool, func()) { ++ appConnMem, _ := cc.NewABCIClient() ++ appConnMem.SetLogger(log.TestingLogger().With("module", "abci-client", "connection", "mempool")) ++ err := appConnMem.Start() ++ if err != nil { ++ panic(err) ++ } ++ ++ mp := NewTxPool(log.TestingLogger(), conf.Mempool, appConnMem, 1) ++ ++ return mp, func() { os.RemoveAll(conf.RootDir) } ++} ++ ++func waitForTxsOnReactors(t *testing.T, txs types.Txs, reactors []*Reactor) { ++ // wait for the txs in all mempools ++ wg := new(sync.WaitGroup) ++ for i, reactor := range reactors { ++ wg.Add(1) ++ go func(r *Reactor, reactorIndex int) { ++ defer wg.Done() ++ waitForTxsOnReactor(t, txs, r, reactorIndex) ++ }(reactor, i) ++ } ++ ++ done := make(chan struct{}) ++ go func() { ++ wg.Wait() ++ close(done) ++ }() ++ ++ timer := time.After(timeout) ++ select { ++ case <-timer: ++ t.Fatal("Timed out waiting for txs") ++ case <-done: ++ } ++} ++ ++func waitForTxsOnReactor(t *testing.T, txs types.Txs, reactor *Reactor, reactorIndex int) { ++ mempool := reactor.mempool ++ for mempool.Size() < len(txs) { ++ time.Sleep(time.Millisecond * 100) ++ } ++ ++ reapedTxs := mempool.ReapMaxTxs(len(txs)) ++ for i, tx := range txs { ++ require.Contains(t, reapedTxs, tx) ++ require.Equal(t, tx, reapedTxs[i], ++ "txs at index %d on reactor %d don't match: %x vs %x", i, reactorIndex, tx, reapedTxs[i]) ++ } ++} ++ ++func genPeers(n int) []*mocks.Peer { ++ peers := make([]*mocks.Peer, n) ++ for i := 0; i < n; i++ { ++ peers[i] = genPeer() ++ } ++ return peers ++ ++} ++ ++func genPeer() *mocks.Peer { ++ peer := &mocks.Peer{} ++ nodeKey := p2p.NodeKey{PrivKey: ed25519.GenPrivKey()} ++ peer.On("ID").Return(nodeKey.ID()) ++ peer.On("Get", types.PeerStateKey).Return(nil).Maybe() ++ return peer ++} diff --git a/patches/mempool/cat/requests.go.patch b/patches/mempool/cat/requests.go.patch new file mode 100644 index 00000000000..b886cec624e --- /dev/null +++ b/patches/mempool/cat/requests.go.patch @@ -0,0 +1,160 @@ +diff --git a/mempool/cat/requests.go b/mempool/cat/requests.go +new file mode 100644 +index 000000000..ea78ff502 +--- /dev/null ++++ b/mempool/cat/requests.go +@@ -0,0 +1,154 @@ ++package cat ++ ++import ( ++ "sync" ++ "time" ++ ++ "github.com/tendermint/tendermint/types" ++) ++ ++const defaultGlobalRequestTimeout = 1 * time.Hour ++ ++// requestScheduler tracks the lifecycle of outbound transaction requests. ++type requestScheduler struct { ++ mtx sync.Mutex ++ ++ // responseTime is the time the scheduler ++ // waits for a response from a peer before ++ // invoking the callback ++ responseTime time.Duration ++ ++ // globalTimeout represents the longest duration ++ // to wait for any late response (after the reponseTime). ++ // After this period the request is garbage collected. ++ globalTimeout time.Duration ++ ++ // requestsByPeer is a lookup table of requests by peer. ++ // Multiple tranasctions can be requested by a single peer at one ++ requestsByPeer map[uint16]requestSet ++ ++ // requestsByTx is a lookup table for requested txs. ++ // There can only be one request per tx. ++ requestsByTx map[types.TxKey]uint16 ++} ++ ++type requestSet map[types.TxKey]*time.Timer ++ ++func newRequestScheduler(responseTime, globalTimeout time.Duration) *requestScheduler { ++ return &requestScheduler{ ++ responseTime: responseTime, ++ globalTimeout: globalTimeout, ++ requestsByPeer: make(map[uint16]requestSet), ++ requestsByTx: make(map[types.TxKey]uint16), ++ } ++} ++ ++func (r *requestScheduler) Add(key types.TxKey, peer uint16, onTimeout func(key types.TxKey)) bool { ++ if peer == 0 { ++ return false ++ } ++ r.mtx.Lock() ++ defer r.mtx.Unlock() ++ ++ // not allowed to have more than one outgoing transaction at once ++ if _, ok := r.requestsByTx[key]; ok { ++ return false ++ } ++ ++ timer := time.AfterFunc(r.responseTime, func() { ++ r.mtx.Lock() ++ delete(r.requestsByTx, key) ++ r.mtx.Unlock() ++ ++ // trigger callback. Callback can `Add` the tx back to the scheduler ++ if onTimeout != nil { ++ onTimeout(key) ++ } ++ ++ // We set another timeout because the peer could still send ++ // a late response after the first timeout and it's important ++ // to recognise that it is a transaction in response to a ++ // request and not a new transaction being broadcasted to the entire ++ // network. This timer cannot be stopped and is used to ensure ++ // garbage collection. ++ time.AfterFunc(r.globalTimeout, func() { ++ r.mtx.Lock() ++ defer r.mtx.Unlock() ++ delete(r.requestsByPeer[peer], key) ++ }) ++ }) ++ if _, ok := r.requestsByPeer[peer]; !ok { ++ r.requestsByPeer[peer] = requestSet{key: timer} ++ } else { ++ r.requestsByPeer[peer][key] = timer ++ } ++ r.requestsByTx[key] = peer ++ return true ++} ++ ++func (r *requestScheduler) ForTx(key types.TxKey) uint16 { ++ r.mtx.Lock() ++ defer r.mtx.Unlock() ++ ++ return r.requestsByTx[key] ++} ++ ++func (r *requestScheduler) Has(peer uint16, key types.TxKey) bool { ++ r.mtx.Lock() ++ defer r.mtx.Unlock() ++ ++ requestSet, ok := r.requestsByPeer[peer] ++ if !ok { ++ return false ++ } ++ _, ok = requestSet[key] ++ return ok ++} ++ ++func (r *requestScheduler) ClearAllRequestsFrom(peer uint16) requestSet { ++ r.mtx.Lock() ++ defer r.mtx.Unlock() ++ ++ requests, ok := r.requestsByPeer[peer] ++ if !ok { ++ return requestSet{} ++ } ++ for tx, timer := range requests { ++ timer.Stop() ++ delete(r.requestsByTx, tx) ++ } ++ delete(r.requestsByPeer, peer) ++ return requests ++} ++ ++func (r *requestScheduler) MarkReceived(peer uint16, key types.TxKey) bool { ++ r.mtx.Lock() ++ defer r.mtx.Unlock() ++ ++ if _, ok := r.requestsByPeer[peer]; !ok { ++ return false ++ } ++ ++ if timer, ok := r.requestsByPeer[peer][key]; ok { ++ timer.Stop() ++ } else { ++ return false ++ } ++ ++ delete(r.requestsByPeer[peer], key) ++ delete(r.requestsByTx, key) ++ return true ++} ++ ++// Close stops all timers and clears all requests. ++// Add should never be called after `Close`. ++func (r *requestScheduler) Close() { ++ r.mtx.Lock() ++ defer r.mtx.Unlock() ++ ++ for _, requestSet := range r.requestsByPeer { ++ for _, timer := range requestSet { ++ timer.Stop() ++ } ++ } ++} diff --git a/patches/mempool/cat/requests_test.go.patch b/patches/mempool/cat/requests_test.go.patch new file mode 100644 index 00000000000..50143572f73 --- /dev/null +++ b/patches/mempool/cat/requests_test.go.patch @@ -0,0 +1,142 @@ +diff --git a/mempool/cat/requests_test.go b/mempool/cat/requests_test.go +new file mode 100644 +index 000000000..16065e0a3 +--- /dev/null ++++ b/mempool/cat/requests_test.go +@@ -0,0 +1,136 @@ ++package cat ++ ++import ( ++ "fmt" ++ "sync" ++ "testing" ++ "time" ++ ++ "github.com/fortytw2/leaktest" ++ "github.com/stretchr/testify/require" ++ "github.com/tendermint/tendermint/types" ++) ++ ++func TestRequestSchedulerRerequest(t *testing.T) { ++ var ( ++ requests = newRequestScheduler(10*time.Millisecond, 1*time.Minute) ++ tx = types.Tx("tx") ++ key = tx.Key() ++ peerA uint16 = 1 // should be non-zero ++ peerB uint16 = 2 ++ ) ++ t.Cleanup(requests.Close) ++ ++ // check zero state ++ require.Zero(t, requests.ForTx(key)) ++ require.False(t, requests.Has(peerA, key)) ++ // marking a tx that was never requested should return false ++ require.False(t, requests.MarkReceived(peerA, key)) ++ ++ // create a request ++ closeCh := make(chan struct{}) ++ require.True(t, requests.Add(key, peerA, func(key types.TxKey) { ++ require.Equal(t, key, key) ++ // the first peer times out to respond so we ask the second peer ++ require.True(t, requests.Add(key, peerB, func(key types.TxKey) { ++ t.Fatal("did not expect to timeout") ++ })) ++ close(closeCh) ++ })) ++ ++ // check that the request was added ++ require.Equal(t, peerA, requests.ForTx(key)) ++ require.True(t, requests.Has(peerA, key)) ++ ++ // should not be able to add the same request again ++ require.False(t, requests.Add(key, peerA, nil)) ++ ++ // wait for the scheduler to invoke the timeout ++ <-closeCh ++ ++ // check that the request stil exists ++ require.True(t, requests.Has(peerA, key)) ++ // check that peerB was requested ++ require.True(t, requests.Has(peerB, key)) ++ ++ // There should still be a request for the Tx ++ require.Equal(t, peerB, requests.ForTx(key)) ++ ++ // record a response from peerB ++ require.True(t, requests.MarkReceived(peerB, key)) ++ ++ // peerA comes in later with a response but it's still ++ // considered a response from an earlier request ++ require.True(t, requests.MarkReceived(peerA, key)) ++} ++ ++func TestRequestSchedulerNonResponsivePeer(t *testing.T) { ++ var ( ++ requests = newRequestScheduler(10*time.Millisecond, time.Millisecond) ++ tx = types.Tx("tx") ++ key = tx.Key() ++ peerA uint16 = 1 // should be non-zero ++ ) ++ ++ require.True(t, requests.Add(key, peerA, nil)) ++ require.Eventually(t, func() bool { ++ return requests.ForTx(key) == 0 ++ }, 100*time.Millisecond, 5*time.Millisecond) ++} ++ ++func TestRequestSchedulerConcurrencyAddsAndReads(t *testing.T) { ++ leaktest.CheckTimeout(t, time.Second)() ++ requests := newRequestScheduler(10*time.Millisecond, time.Millisecond) ++ defer requests.Close() ++ ++ N := 5 ++ keys := make([]types.TxKey, N) ++ for i := 0; i < N; i++ { ++ tx := types.Tx(fmt.Sprintf("tx%d", i)) ++ keys[i] = tx.Key() ++ } ++ ++ addWg := sync.WaitGroup{} ++ receiveWg := sync.WaitGroup{} ++ doneCh := make(chan struct{}) ++ for i := 1; i < N*N; i++ { ++ addWg.Add(1) ++ go func(peer uint16) { ++ defer addWg.Done() ++ requests.Add(keys[int(peer)%N], peer, nil) ++ }(uint16(i)) ++ } ++ for i := 1; i < N*N; i++ { ++ receiveWg.Add(1) ++ go func(peer uint16) { ++ defer receiveWg.Done() ++ markReceived := func() { ++ for _, key := range keys { ++ if requests.Has(peer, key) { ++ requests.MarkReceived(peer, key) ++ } ++ } ++ } ++ for { ++ select { ++ case <-doneCh: ++ // need to ensure this is run ++ // at least once after all adds ++ // are done ++ markReceived() ++ return ++ default: ++ markReceived() ++ } ++ } ++ }(uint16(i)) ++ } ++ addWg.Wait() ++ close(doneCh) ++ ++ receiveWg.Wait() ++ ++ for _, key := range keys { ++ require.Zero(t, requests.ForTx(key)) ++ } ++} diff --git a/patches/mempool/cat/store.go.patch b/patches/mempool/cat/store.go.patch new file mode 100644 index 00000000000..fa640f781b7 --- /dev/null +++ b/patches/mempool/cat/store.go.patch @@ -0,0 +1,222 @@ +diff --git a/mempool/cat/store.go b/mempool/cat/store.go +new file mode 100644 +index 000000000..9840d5241 +--- /dev/null ++++ b/mempool/cat/store.go +@@ -0,0 +1,216 @@ ++package cat ++ ++import ( ++ "fmt" ++ "sort" ++ "sync" ++ "time" ++ ++ "github.com/tendermint/tendermint/types" ++) ++ ++// simple, thread-safe in memory store for transactions ++type store struct { ++ mtx sync.RWMutex ++ bytes int64 ++ orderedTxs []*wrappedTx ++ txs map[types.TxKey]*wrappedTx ++ reservedTxs map[types.TxKey]struct{} ++} ++ ++func newStore() *store { ++ return &store{ ++ bytes: 0, ++ orderedTxs: make([]*wrappedTx, 0), ++ txs: make(map[types.TxKey]*wrappedTx), ++ reservedTxs: make(map[types.TxKey]struct{}), ++ } ++} ++ ++func (s *store) set(wtx *wrappedTx) bool { ++ if wtx == nil { ++ return false ++ } ++ s.mtx.Lock() ++ defer s.mtx.Unlock() ++ if _, exists := s.txs[wtx.key]; !exists { ++ s.txs[wtx.key] = wtx ++ s.orderTx(wtx) ++ s.bytes += wtx.size() ++ return true ++ } ++ return false ++} ++ ++func (s *store) get(txKey types.TxKey) *wrappedTx { ++ s.mtx.RLock() ++ defer s.mtx.RUnlock() ++ return s.txs[txKey] ++} ++ ++func (s *store) has(txKey types.TxKey) bool { ++ s.mtx.RLock() ++ defer s.mtx.RUnlock() ++ _, has := s.txs[txKey] ++ return has ++} ++ ++func (s *store) remove(txKey types.TxKey) bool { ++ s.mtx.Lock() ++ defer s.mtx.Unlock() ++ tx, exists := s.txs[txKey] ++ if !exists { ++ return false ++ } ++ s.bytes -= tx.size() ++ if err := s.deleteOrderedTx(tx); err != nil { ++ panic(err) ++ } ++ delete(s.txs, txKey) ++ return true ++} ++ ++// reserve adds an empty placeholder for the specified key to prevent ++// a transaction with the same key from being added ++func (s *store) reserve(txKey types.TxKey) bool { ++ s.mtx.Lock() ++ defer s.mtx.Unlock() ++ _, isReserved := s.reservedTxs[txKey] ++ if !isReserved { ++ s.reservedTxs[txKey] = struct{}{} ++ return true ++ } ++ return false ++} ++ ++func (s *store) isReserved(txKey types.TxKey) bool { ++ s.mtx.RLock() ++ defer s.mtx.RUnlock() ++ _, isReserved := s.reservedTxs[txKey] ++ return isReserved ++} ++ ++// release is called at the end of the process of adding a transaction. ++// Regardless if it is added or not, the reserveTxs lookup map element is deleted. ++func (s *store) release(txKey types.TxKey) { ++ s.mtx.Lock() ++ defer s.mtx.Unlock() ++ delete(s.reservedTxs, txKey) ++} ++ ++func (s *store) size() int { ++ s.mtx.RLock() ++ defer s.mtx.RUnlock() ++ return len(s.txs) ++} ++ ++func (s *store) totalBytes() int64 { ++ s.mtx.RLock() ++ defer s.mtx.RUnlock() ++ return s.bytes ++} ++ ++func (s *store) getAllKeys() []types.TxKey { ++ s.mtx.RLock() ++ defer s.mtx.RUnlock() ++ keys := make([]types.TxKey, len(s.txs)) ++ idx := 0 ++ for key := range s.txs { ++ keys[idx] = key ++ idx++ ++ } ++ return keys ++} ++ ++func (s *store) getAllTxs() []*wrappedTx { ++ s.mtx.RLock() ++ defer s.mtx.RUnlock() ++ txs := make([]*wrappedTx, len(s.txs)) ++ idx := 0 ++ for _, tx := range s.txs { ++ txs[idx] = tx ++ idx++ ++ } ++ return txs ++} ++ ++func (s *store) getTxsBelowPriority(priority int64) ([]*wrappedTx, int64) { ++ s.mtx.RLock() ++ defer s.mtx.RUnlock() ++ txs := make([]*wrappedTx, 0, len(s.txs)) ++ bytes := int64(0) ++ for i := len(s.orderedTxs) - 1; i >= 0; i-- { ++ tx := s.orderedTxs[i] ++ if tx.priority < priority { ++ txs = append(txs, tx) ++ bytes += tx.size() ++ } else { ++ break ++ } ++ } ++ return txs, bytes ++} ++ ++// purgeExpiredTxs removes all transactions that are older than the given height ++// and time. Returns the purged txs and amount of transactions that were purged. ++func (s *store) purgeExpiredTxs(expirationHeight int64, expirationAge time.Time) ([]*wrappedTx, int) { ++ s.mtx.Lock() ++ defer s.mtx.Unlock() ++ ++ var purgedTxs []*wrappedTx ++ counter := 0 ++ ++ for key, tx := range s.txs { ++ if tx.height < expirationHeight || tx.timestamp.Before(expirationAge) { ++ s.bytes -= tx.size() ++ delete(s.txs, key) ++ purgedTxs = append(purgedTxs, tx) ++ counter++ ++ } ++ } ++ return purgedTxs, counter ++} ++ ++func (s *store) reset() { ++ s.mtx.Lock() ++ defer s.mtx.Unlock() ++ s.bytes = 0 ++ s.txs = make(map[types.TxKey]*wrappedTx) ++ s.orderedTxs = make([]*wrappedTx, 0) ++} ++ ++func (s *store) orderTx(tx *wrappedTx) { ++ idx := s.getTxOrder(tx) ++ s.orderedTxs = append(s.orderedTxs[:idx], append([]*wrappedTx{tx}, s.orderedTxs[idx:]...)...) ++} ++ ++func (s *store) deleteOrderedTx(tx *wrappedTx) error { ++ if len(s.orderedTxs) == 0 { ++ return fmt.Errorf("ordered transactions list is empty") ++ } ++ idx := s.getTxOrder(tx) - 1 ++ if idx >= len(s.orderedTxs) || s.orderedTxs[idx] != tx { ++ return fmt.Errorf("transaction %X not found in ordered list", tx.key) ++ } ++ s.orderedTxs = append(s.orderedTxs[:idx], s.orderedTxs[idx+1:]...) ++ return nil ++} ++ ++func (s *store) getTxOrder(tx *wrappedTx) int { ++ return sort.Search(len(s.orderedTxs), func(i int) bool { ++ if s.orderedTxs[i].priority == tx.priority { ++ return tx.timestamp.Before(s.orderedTxs[i].timestamp) ++ } ++ return s.orderedTxs[i].priority < tx.priority ++ }) ++} ++ ++func (s *store) iterateOrderedTxs(fn func(tx *wrappedTx) bool) { ++ s.mtx.RLock() ++ defer s.mtx.RUnlock() ++ for _, tx := range s.orderedTxs { ++ if !fn(tx) { ++ break ++ } ++ } ++} diff --git a/patches/mempool/cat/store_test.go.patch b/patches/mempool/cat/store_test.go.patch new file mode 100644 index 00000000000..f943e3aa590 --- /dev/null +++ b/patches/mempool/cat/store_test.go.patch @@ -0,0 +1,271 @@ +diff --git a/mempool/cat/store_test.go b/mempool/cat/store_test.go +new file mode 100644 +index 000000000..56bae0d75 +--- /dev/null ++++ b/mempool/cat/store_test.go +@@ -0,0 +1,265 @@ ++package cat ++ ++import ( ++ "bytes" ++ "fmt" ++ "sync" ++ "testing" ++ "time" ++ ++ "github.com/stretchr/testify/require" ++ "github.com/tendermint/tendermint/types" ++) ++ ++func TestStoreSimple(t *testing.T) { ++ store := newStore() ++ ++ tx := types.Tx("tx1") ++ key := tx.Key() ++ wtx := newWrappedTx(tx, key, 1, 1, 1, "") ++ ++ // asset zero state ++ require.Nil(t, store.get(key)) ++ require.False(t, store.has(key)) ++ require.False(t, store.remove(key)) ++ require.Zero(t, store.size()) ++ require.Zero(t, store.totalBytes()) ++ require.Empty(t, store.getAllKeys()) ++ require.Empty(t, store.getAllTxs()) ++ ++ // add a tx ++ store.set(wtx) ++ require.True(t, store.has(key)) ++ require.Equal(t, wtx, store.get(key)) ++ require.Equal(t, int(1), store.size()) ++ require.Equal(t, wtx.size(), store.totalBytes()) ++ ++ // remove a tx ++ store.remove(key) ++ require.False(t, store.has(key)) ++ require.Nil(t, store.get(key)) ++ require.Zero(t, store.size()) ++ require.Zero(t, store.totalBytes()) ++ require.Empty(t, store.orderedTxs) ++ require.Empty(t, store.txs) ++} ++ ++func TestStoreOrdering(t *testing.T) { ++ store := newStore() ++ ++ tx1 := types.Tx("tx1") ++ tx2 := types.Tx("tx2") ++ tx3 := types.Tx("tx3") ++ ++ // Create wrapped txs with different priorities ++ wtx1 := newWrappedTx(tx1, tx1.Key(), 1, 1, 1, "") ++ wtx2 := newWrappedTx(tx2, tx2.Key(), 2, 2, 2, "") ++ wtx3 := newWrappedTx(tx3, tx3.Key(), 3, 3, 3, "") ++ ++ // Add txs in reverse priority order ++ store.set(wtx1) ++ store.set(wtx2) ++ store.set(wtx3) ++ ++ // Check that iteration returns txs in correct priority order ++ var orderedTxs []*wrappedTx ++ store.iterateOrderedTxs(func(tx *wrappedTx) bool { ++ orderedTxs = append(orderedTxs, tx) ++ return true ++ }) ++ ++ require.Equal(t, 3, len(orderedTxs)) ++ require.Equal(t, wtx3, orderedTxs[0]) ++ require.Equal(t, wtx2, orderedTxs[1]) ++ require.Equal(t, wtx1, orderedTxs[2]) ++} ++ ++func TestStore(t *testing.T) { ++ t.Run("deleteOrderedTx", func(*testing.T) { ++ store := newStore() ++ ++ tx1 := types.Tx("tx1") ++ tx2 := types.Tx("tx2") ++ tx3 := types.Tx("tx3") ++ ++ // Create wrapped txs with different priorities ++ wtx1 := newWrappedTx(tx1, tx1.Key(), 1, 1, 1, "") ++ wtx2 := newWrappedTx(tx2, tx2.Key(), 2, 2, 2, "") ++ wtx3 := newWrappedTx(tx3, tx3.Key(), 3, 3, 3, "") ++ ++ // Add txs in reverse priority order ++ store.set(wtx1) ++ store.set(wtx2) ++ store.set(wtx3) ++ ++ orderedTxs := getOrderedTxs(store) ++ require.Equal(t, []*wrappedTx{wtx3, wtx2, wtx1}, orderedTxs) ++ ++ err := store.deleteOrderedTx(wtx2) ++ require.NoError(t, err) ++ require.Equal(t, []*wrappedTx{wtx3, wtx1}, getOrderedTxs(store)) ++ ++ err = store.deleteOrderedTx(wtx3) ++ require.NoError(t, err) ++ require.Equal(t, []*wrappedTx{wtx1}, getOrderedTxs(store)) ++ ++ err = store.deleteOrderedTx(wtx1) ++ require.NoError(t, err) ++ require.Equal(t, []*wrappedTx{}, getOrderedTxs(store)) ++ ++ err = store.deleteOrderedTx(wtx1) ++ require.ErrorContains(t, err, "ordered transactions list is empty") ++ }) ++} ++ ++func getOrderedTxs(store *store) []*wrappedTx { ++ orderedTxs := []*wrappedTx{} ++ store.iterateOrderedTxs(func(tx *wrappedTx) bool { ++ orderedTxs = append(orderedTxs, tx) ++ return true ++ }) ++ return orderedTxs ++} ++ ++func TestStoreReservingTxs(t *testing.T) { ++ store := newStore() ++ ++ tx := types.Tx("tx1") ++ key := tx.Key() ++ wtx := newWrappedTx(tx, key, 1, 1, 1, "") ++ ++ // asset zero state ++ store.release(key) ++ ++ // reserve a tx ++ store.reserve(key) ++ require.True(t, store.isReserved(key)) ++ // should not update the total bytes ++ require.Zero(t, store.totalBytes()) ++ ++ // should be able to add a tx ++ store.set(wtx) ++ require.Equal(t, tx, store.get(key).tx) ++ require.Equal(t, wtx.size(), store.totalBytes()) ++ ++ // releasing should do nothing on a set tx ++ store.release(key) ++ require.True(t, store.has(key)) ++ require.Equal(t, tx, store.get(key).tx) ++ ++ store.remove(key) ++ require.False(t, store.has(key)) ++ ++ // reserve the tx again ++ store.reserve(key) ++ require.True(t, store.isReserved(key)) ++ ++ // release should remove the tx ++ store.release(key) ++ require.False(t, store.has(key)) ++} ++ ++func TestReadReserved(t *testing.T) { ++ store := newStore() ++ tx := types.Tx("tx1") ++ store.reserve(tx.Key()) ++ ++ require.Nil(t, store.get(tx.Key())) ++ require.False(t, store.has(tx.Key())) ++ require.Len(t, store.getAllKeys(), 0) ++ require.Len(t, store.getAllTxs(), 0) ++} ++ ++func TestStoreConcurrentAccess(t *testing.T) { ++ store := newStore() ++ ++ numTxs := 100 ++ ++ wg := &sync.WaitGroup{} ++ for i := 0; i < numTxs; i++ { ++ wg.Add(1) ++ go func(i int) { ++ defer wg.Done() ++ ticker := time.NewTicker(10 * time.Millisecond) ++ for range ticker.C { ++ tx := types.Tx(fmt.Sprintf("tx%d", i%(numTxs/10))) ++ key := tx.Key() ++ wtx := newWrappedTx(tx, key, 1, 1, 1, "") ++ existingTx := store.get(key) ++ if existingTx != nil && bytes.Equal(existingTx.tx, tx) { ++ // tx has already been added ++ return ++ } ++ if store.reserve(key) { ++ // some fail ++ if i%3 == 0 { ++ store.release(key) ++ return ++ } ++ store.set(wtx) ++ // this should be a noop ++ store.release(key) ++ return ++ } ++ // already reserved so we retry in 10 milliseconds ++ } ++ }(i) ++ } ++ wg.Wait() ++ ++ require.Equal(t, numTxs/10, store.size()) ++} ++ ++func TestStoreGetTxs(t *testing.T) { ++ store := newStore() ++ ++ numTxs := 100 ++ for i := 0; i < numTxs; i++ { ++ tx := types.Tx(fmt.Sprintf("tx%d", i)) ++ key := tx.Key() ++ wtx := newWrappedTx(tx, key, 1, 1, int64(i), "") ++ store.set(wtx) ++ } ++ ++ require.Equal(t, numTxs, store.size()) ++ ++ // get all txs ++ txs := store.getAllTxs() ++ require.Equal(t, numTxs, len(txs)) ++ ++ // get txs by keys ++ keys := store.getAllKeys() ++ require.Equal(t, numTxs, len(keys)) ++ ++ // get txs below a certain priority ++ txs, bz := store.getTxsBelowPriority(int64(numTxs / 2)) ++ require.Equal(t, numTxs/2, len(txs)) ++ var actualBz int64 ++ for _, tx := range txs { ++ actualBz += tx.size() ++ } ++ require.Equal(t, actualBz, bz) ++} ++ ++func TestStoreExpiredTxs(t *testing.T) { ++ store := newStore() ++ numTxs := 100 ++ for i := 0; i < numTxs; i++ { ++ tx := types.Tx(fmt.Sprintf("tx%d", i)) ++ key := tx.Key() ++ wtx := newWrappedTx(tx, key, int64(i), 1, 1, "") ++ store.set(wtx) ++ } ++ ++ // half of them should get purged ++ store.purgeExpiredTxs(int64(numTxs/2), time.Time{}) ++ ++ remainingTxs := store.getAllTxs() ++ require.Equal(t, numTxs/2, len(remainingTxs)) ++ for _, tx := range remainingTxs { ++ require.GreaterOrEqual(t, tx.height, int64(numTxs/2)) ++ } ++ ++ store.purgeExpiredTxs(int64(0), time.Now().Add(time.Second)) ++ require.Empty(t, store.getAllTxs()) ++} diff --git a/patches/mempool/cat/tx.go.patch b/patches/mempool/cat/tx.go.patch new file mode 100644 index 00000000000..2e47b40c8a5 --- /dev/null +++ b/patches/mempool/cat/tx.go.patch @@ -0,0 +1,42 @@ +diff --git a/mempool/cat/tx.go b/mempool/cat/tx.go +new file mode 100644 +index 000000000..8de425cee +--- /dev/null ++++ b/mempool/cat/tx.go +@@ -0,0 +1,36 @@ ++package cat ++ ++import ( ++ "time" ++ ++ "github.com/tendermint/tendermint/types" ++) ++ ++// wrappedTx defines a wrapper around a raw transaction with additional metadata ++// that is used for indexing. With the exception of the map of peers who have ++// seen this transaction, this struct should never be modified ++type wrappedTx struct { ++ // these fields are immutable ++ tx types.Tx // the original transaction data ++ key types.TxKey // the transaction hash ++ height int64 // height when this transaction was initially checked (for expiry) ++ timestamp time.Time // time when transaction was entered (for TTL) ++ gasWanted int64 // app: gas required to execute this transaction ++ priority int64 // app: priority value for this transaction ++ sender string // app: assigned sender label ++} ++ ++func newWrappedTx(tx types.Tx, key types.TxKey, height, gasWanted, priority int64, sender string) *wrappedTx { ++ return &wrappedTx{ ++ tx: tx, ++ key: key, ++ height: height, ++ timestamp: time.Now().UTC(), ++ gasWanted: gasWanted, ++ priority: priority, ++ sender: sender, ++ } ++} ++ ++// Size reports the size of the raw transaction in bytes. ++func (w *wrappedTx) size() int64 { return int64(len(w.tx)) } diff --git a/patches/mempool/ids.go.patch b/patches/mempool/ids.go.patch new file mode 100644 index 00000000000..e698d366fc2 --- /dev/null +++ b/patches/mempool/ids.go.patch @@ -0,0 +1,9 @@ +diff --git a/mempool/ids.go b/mempool/ids.go +deleted file mode 100644 +index d64a07bda..000000000 +--- a/mempool/ids.go ++++ /dev/null +@@ -1,3 +0,0 @@ +-package mempool +- +-// These functions were moved into v0/reactor.go and v1/reactor.go diff --git a/patches/mempool/ids_test.go.patch b/patches/mempool/ids_test.go.patch new file mode 100644 index 00000000000..e300e7b3358 --- /dev/null +++ b/patches/mempool/ids_test.go.patch @@ -0,0 +1,29 @@ +diff --git a/mempool/ids_test.go b/mempool/ids_test.go +deleted file mode 100644 +index 2d72076e7..000000000 +--- a/mempool/ids_test.go ++++ /dev/null +@@ -1,23 +0,0 @@ +-package mempool +- +-// import ( +-// "testing" +- +-// "github.com/stretchr/testify/require" +-// "github.com/tendermint/tendermint/types" +-// ) +- +-// func TestMempoolIDsBasic(t *testing.T) { +-// ids := NewMempoolIDs() +- +-// peerID, err := types.NewNodeID("0011223344556677889900112233445566778899") +-// require.NoError(t, err) +- +-// ids.ReserveForPeer(peerID) +-// require.EqualValues(t, 1, ids.GetForPeer(peerID)) +-// ids.Reclaim(peerID) +- +-// ids.ReserveForPeer(peerID) +-// require.EqualValues(t, 2, ids.GetForPeer(peerID)) +-// ids.Reclaim(peerID) +-// } diff --git a/patches/mempool/mempool.go.patch b/patches/mempool/mempool.go.patch new file mode 100644 index 00000000000..d87e5b967ee --- /dev/null +++ b/patches/mempool/mempool.go.patch @@ -0,0 +1,20 @@ +diff --git a/mempool/mempool.go b/mempool/mempool.go +index 83613245d..cbbf1e2d4 100644 +--- a/mempool/mempool.go ++++ b/mempool/mempool.go +@@ -91,6 +91,15 @@ type Mempool interface { + // trigger once every height when transactions are available. + EnableTxsAvailable() + ++ // GetTxByKey gets a tx by its key from the mempool. Returns the tx and a bool indicating its presence in the tx cache. ++ // Used in the RPC endpoint: TxStatus. ++ GetTxByKey(key types.TxKey) (types.Tx, bool) ++ ++ // WasRecentlyEvicted returns true if the tx was evicted from the mempool and exists in the ++ // evicted cache. ++ // Used in the RPC endpoint: TxStatus. ++ WasRecentlyEvicted(key types.TxKey) bool ++ + // Size returns the number of transactions in the mempool. + Size() int + diff --git a/patches/mempool/metrics.go.patch b/patches/mempool/metrics.go.patch new file mode 100644 index 00000000000..702df9aa345 --- /dev/null +++ b/patches/mempool/metrics.go.patch @@ -0,0 +1,127 @@ +diff --git a/mempool/metrics.go b/mempool/metrics.go +index 84798f853..1d50ae67b 100644 +--- a/mempool/metrics.go ++++ b/mempool/metrics.go +@@ -25,24 +25,39 @@ type Metrics struct { + // Histogram of transaction sizes, in bytes. + TxSizeBytes metrics.Histogram + +- // Number of failed transactions. ++ // FailedTxs defines the number of failed transactions. These were marked ++ // invalid by the application in either CheckTx or RecheckTx. + FailedTxs metrics.Counter + +- // RejectedTxs defines the number of rejected transactions. These are +- // transactions that passed CheckTx but failed to make it into the mempool +- // due to resource limits, e.g. mempool is full and no lower priority +- // transactions exist in the mempool. +- RejectedTxs metrics.Counter +- + // EvictedTxs defines the number of evicted transactions. These are valid + // transactions that passed CheckTx and existed in the mempool but were later +- // evicted to make room for higher priority valid transactions that passed +- // CheckTx. ++ // evicted to make room for higher priority valid transactions + EvictedTxs metrics.Counter + ++ // ExpiredTxs defines transactions that were removed from the mempool due ++ // to a TTL ++ ExpiredTxs metrics.Counter ++ ++ // SuccessfulTxs defines the number of transactions that successfully made ++ // it into a block. ++ SuccessfulTxs metrics.Counter ++ + // Number of times transactions are rechecked in the mempool. + RecheckTimes metrics.Counter + ++ // AlreadySeenTxs defines the number of transactions that entered the ++ // mempool which were already present in the mempool. This is a good ++ // indicator of the degree of duplication in message gossiping. ++ AlreadySeenTxs metrics.Counter ++ ++ // RequestedTxs defines the number of times that the node requested a ++ // tx to a peer ++ RequestedTxs metrics.Counter ++ ++ // RerequestedTxs defines the number of times that a requested tx ++ // never received a response in time and a new request was made. ++ RerequestedTxs metrics.Counter ++ + // Number of connections being actively used for gossiping transactions + // (experimental feature). + ActiveOutboundConnections metrics.Gauge +@@ -86,18 +101,25 @@ func PrometheusMetrics(namespace string, labelsAndValues ...string) *Metrics { + Help: "Number of failed transactions.", + }, labels).With(labelsAndValues...), + +- RejectedTxs: prometheus.NewCounterFrom(stdprometheus.CounterOpts{ ++ EvictedTxs: prometheus.NewCounterFrom(stdprometheus.CounterOpts{ + Namespace: namespace, + Subsystem: MetricsSubsystem, +- Name: "rejected_txs", +- Help: "Number of rejected transactions.", ++ Name: "evicted_txs", ++ Help: "Number of evicted transactions.", + }, labels).With(labelsAndValues...), + +- EvictedTxs: prometheus.NewCounterFrom(stdprometheus.CounterOpts{ ++ ExpiredTxs: prometheus.NewCounterFrom(stdprometheus.CounterOpts{ + Namespace: namespace, + Subsystem: MetricsSubsystem, +- Name: "evicted_txs", +- Help: "Number of evicted transactions.", ++ Name: "expired_txs", ++ Help: "Number of expired transactions.", ++ }, labels).With(labelsAndValues...), ++ ++ SuccessfulTxs: prometheus.NewCounterFrom(stdprometheus.CounterOpts{ ++ Namespace: namespace, ++ Subsystem: MetricsSubsystem, ++ Name: "successful_txs", ++ Help: "Number of transactions that successfully made it into a block.", + }, labels).With(labelsAndValues...), + + RecheckTimes: prometheus.NewCounterFrom(stdprometheus.CounterOpts{ +@@ -107,6 +129,26 @@ func PrometheusMetrics(namespace string, labelsAndValues ...string) *Metrics { + Help: "Number of times transactions are rechecked in the mempool.", + }, labels).With(labelsAndValues...), + ++ AlreadySeenTxs: prometheus.NewCounterFrom(stdprometheus.CounterOpts{ ++ Namespace: namespace, ++ Subsystem: MetricsSubsystem, ++ Name: "already_seen_txs", ++ Help: "Number of transactions that entered the mempool but were already present in the mempool.", ++ }, labels).With(labelsAndValues...), ++ ++ RequestedTxs: prometheus.NewCounterFrom(stdprometheus.CounterOpts{ ++ Namespace: namespace, ++ Subsystem: MetricsSubsystem, ++ Name: "requested_txs", ++ Help: "Number of initial requests for a transaction", ++ }, labels).With(labelsAndValues...), ++ ++ RerequestedTxs: prometheus.NewCounterFrom(stdprometheus.CounterOpts{ ++ Namespace: namespace, ++ Subsystem: MetricsSubsystem, ++ Name: "rerequested_txs", ++ Help: "Number of times a transaction was requested again after a previous request timed out", ++ }, labels).With(labelsAndValues...), + ActiveOutboundConnections: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{ + Namespace: namespace, + Subsystem: MetricsSubsystem, +@@ -123,9 +165,13 @@ func NopMetrics() *Metrics { + SizeBytes: discard.NewGauge(), + TxSizeBytes: discard.NewHistogram(), + FailedTxs: discard.NewCounter(), +- RejectedTxs: discard.NewCounter(), + EvictedTxs: discard.NewCounter(), ++ ExpiredTxs: discard.NewCounter(), ++ SuccessfulTxs: discard.NewCounter(), + RecheckTimes: discard.NewCounter(), ++ AlreadySeenTxs: discard.NewCounter(), ++ RequestedTxs: discard.NewCounter(), ++ RerequestedTxs: discard.NewCounter(), + ActiveOutboundConnections: discard.NewGauge(), + } + } diff --git a/patches/mempool/mock/mempool.go.patch b/patches/mempool/mock/mempool.go.patch new file mode 100644 index 00000000000..65fa5c44564 --- /dev/null +++ b/patches/mempool/mock/mempool.go.patch @@ -0,0 +1,28 @@ +diff --git a/mempool/mock/mempool.go b/mempool/mock/mempool.go +index 3f293381f..708471291 100644 +--- a/mempool/mock/mempool.go ++++ b/mempool/mock/mempool.go +@@ -30,14 +30,15 @@ func (Mempool) Update( + ) error { + return nil + } +-func (Mempool) Flush() {} +-func (Mempool) FlushAppConn() error { return nil } +-func (Mempool) TxsAvailable() <-chan struct{} { return make(chan struct{}) } +-func (Mempool) EnableTxsAvailable() {} +-func (Mempool) SizeBytes() int64 { return 0 } +- +-func (Mempool) TxsFront() *clist.CElement { return nil } +-func (Mempool) TxsWaitChan() <-chan struct{} { return nil } ++func (Mempool) Flush() {} ++func (Mempool) FlushAppConn() error { return nil } ++func (Mempool) TxsAvailable() <-chan struct{} { return make(chan struct{}) } ++func (Mempool) EnableTxsAvailable() {} ++func (Mempool) SizeBytes() int64 { return 0 } ++func (m Mempool) GetTxByKey(types.TxKey) (types.Tx, bool) { return nil, false } ++func (m Mempool) WasRecentlyEvicted(types.TxKey) bool { return false } ++func (Mempool) TxsFront() *clist.CElement { return nil } ++func (Mempool) TxsWaitChan() <-chan struct{} { return nil } + + func (Mempool) InitWAL() error { return nil } + func (Mempool) CloseWAL() {} diff --git a/patches/mempool/v0/clist_mempool.go.patch b/patches/mempool/v0/clist_mempool.go.patch new file mode 100644 index 00000000000..e182804f642 --- /dev/null +++ b/patches/mempool/v0/clist_mempool.go.patch @@ -0,0 +1,60 @@ +diff --git a/mempool/v0/clist_mempool.go b/mempool/v0/clist_mempool.go +index df905a5e9..8d8b63730 100644 +--- a/mempool/v0/clist_mempool.go ++++ b/mempool/v0/clist_mempool.go +@@ -183,6 +183,21 @@ func (mem *CListMempool) TxsFront() *clist.CElement { + return mem.txs.Front() + } + ++// GetTxByKey retrieves a transaction from the mempool using its key. ++func (mem *CListMempool) GetTxByKey(key types.TxKey) (types.Tx, bool) { ++ e, ok := mem.txsMap.Load(key) ++ if !ok { ++ return nil, false ++ } ++ memTx, ok := e.(*clist.CElement).Value.(*mempoolTx) ++ return memTx.tx, ok ++} ++ ++// WasRecentlyEvicted returns false consistently as this implementation does not support transaction eviction. ++func (mem *CListMempool) WasRecentlyEvicted(key types.TxKey) bool { ++ return false ++} ++ + // TxsWaitChan returns a channel to wait on transactions. It will be closed + // once the mempool is not empty (ie. the internal `mem.txs` has at least one + // element) +@@ -242,6 +257,7 @@ func (mem *CListMempool) CheckTx( + // (eg. after committing a block, txs are removed from mempool but not cache), + // so we only record the sender for txs still in the mempool. + if e, ok := mem.txsMap.Load(tx.Key()); ok { ++ mem.metrics.AlreadySeenTxs.Add(1) + memTx := e.(*clist.CElement).Value.(*mempoolTx) + memTx.senders.LoadOrStore(txInfo.SenderID, true) + // TODO: consider punishing peer for dups, +@@ -276,6 +292,7 @@ func (mem *CListMempool) globalCb(req *abci.Request, res *abci.Response) { + + // update metrics + mem.metrics.Size.Set(float64(mem.Size())) ++ mem.metrics.SizeBytes.Set(float64(mem.SizeBytes())) + } + + // Request specific callback that should be set on individual reqRes objects +@@ -328,6 +345,9 @@ func (mem *CListMempool) removeTx(tx types.Tx, elem *clist.CElement, removeFromC + mem.txs.Remove(elem) + elem.DetachPrev() + mem.txsMap.Delete(tx.Key()) ++ if memtx, ok := elem.Value.(*mempoolTx); ok { ++ tx = memtx.tx ++ } + atomic.AddInt64(&mem.txsBytes, int64(-len(tx))) + + if removeFromCache { +@@ -609,6 +629,7 @@ func (mem *CListMempool) Update( + mem.postCheck = postCheck + } + ++ mem.metrics.SuccessfulTxs.Add(float64(len(txs))) + for i, tx := range txs { + if deliverTxResponses[i].Code == abci.CodeTypeOK { + // Add valid committed tx to the cache (if missing). diff --git a/patches/mempool/v0/clist_mempool_test.go.patch b/patches/mempool/v0/clist_mempool_test.go.patch new file mode 100644 index 00000000000..c588fdbe334 --- /dev/null +++ b/patches/mempool/v0/clist_mempool_test.go.patch @@ -0,0 +1,112 @@ +diff --git a/mempool/v0/clist_mempool_test.go b/mempool/v0/clist_mempool_test.go +index 8d9678238..ca2cdcc45 100644 +--- a/mempool/v0/clist_mempool_test.go ++++ b/mempool/v0/clist_mempool_test.go +@@ -1,6 +1,7 @@ + package v0 + + import ( ++ "bytes" + "encoding/binary" + "fmt" + mrand "math/rand" +@@ -25,6 +26,8 @@ import ( + cmtrand "github.com/tendermint/tendermint/libs/rand" + "github.com/tendermint/tendermint/libs/service" + "github.com/tendermint/tendermint/mempool" ++ "github.com/tendermint/tendermint/pkg/consts" ++ tmproto "github.com/tendermint/tendermint/proto/tendermint/types" + "github.com/tendermint/tendermint/proxy" + "github.com/tendermint/tendermint/types" + ) +@@ -198,14 +201,14 @@ func TestMempoolFilters(t *testing.T) { + }{ + {10, nopPreFilter, nopPostFilter, 10}, + {10, mempool.PreCheckMaxBytes(10), nopPostFilter, 0}, +- {10, mempool.PreCheckMaxBytes(22), nopPostFilter, 10}, ++ {10, mempool.PreCheckMaxBytes(26), nopPostFilter, 10}, + {10, nopPreFilter, mempool.PostCheckMaxGas(-1), 10}, + {10, nopPreFilter, mempool.PostCheckMaxGas(0), 0}, + {10, nopPreFilter, mempool.PostCheckMaxGas(1), 10}, + {10, nopPreFilter, mempool.PostCheckMaxGas(3000), 10}, + {10, mempool.PreCheckMaxBytes(10), mempool.PostCheckMaxGas(20), 0}, + {10, mempool.PreCheckMaxBytes(30), mempool.PostCheckMaxGas(20), 10}, +- {10, mempool.PreCheckMaxBytes(22), mempool.PostCheckMaxGas(1), 10}, ++ {10, mempool.PreCheckMaxBytes(28), mempool.PostCheckMaxGas(1), 10}, + {10, mempool.PreCheckMaxBytes(22), mempool.PostCheckMaxGas(0), 0}, + } + for tcIndex, tt := range tests { +@@ -557,6 +560,32 @@ func TestMempool_CheckTxChecksTxSize(t *testing.T) { + } + } + ++func TestGetTxByKey(t *testing.T) { ++ app := kvstore.NewApplication() ++ cc := proxy.NewLocalClientCreator(app) ++ ++ mp, cleanup := newMempoolWithApp(cc) ++ defer cleanup() ++ ++ // Create a tx ++ tx := types.Tx([]byte{0x01}) ++ // Add it to the mempool ++ err := mp.CheckTx(tx, nil, mempool.TxInfo{}) ++ require.NoError(t, err) ++ ++ // Query the tx from the mempool ++ got, ok := mp.GetTxByKey(tx.Key()) ++ require.True(t, ok) ++ // Ensure the returned tx is the same as the one we added ++ require.Equal(t, tx, got) ++ ++ // Query a random tx from the mempool ++ randomTx, ok := mp.GetTxByKey(types.Tx([]byte{0x02}).Key()) ++ // Ensure the returned tx is nil ++ require.False(t, ok) ++ require.Nil(t, randomTx) ++} ++ + func TestMempoolTxsBytes(t *testing.T) { + app := kvstore.NewApplication() + cc := proxy.NewLocalClientCreator(app) +@@ -736,6 +765,40 @@ func TestMempoolRemoteAppConcurrency(t *testing.T) { + require.NoError(t, mp.FlushAppConn()) + } + ++func TestRemoveBlobTx(t *testing.T) { ++ app := kvstore.NewApplication() ++ cc := proxy.NewLocalClientCreator(app) ++ namespaceOne := bytes.Repeat([]byte{1}, consts.NamespaceIDSize) ++ ++ cfg := config.ResetTestRoot("mempool_test") ++ ++ cfg.Mempool.MaxTxsBytes = 1000 ++ mp, cleanup := newMempoolWithAppAndConfig(cc, cfg) ++ defer cleanup() ++ ++ originalTx := []byte{1, 2, 3, 4} ++ indexWrapper, err := types.MarshalIndexWrapper(originalTx, 100) ++ require.NoError(t, err) ++ ++ // create the blobTx ++ b := tmproto.Blob{ ++ NamespaceId: namespaceOne, ++ Data: []byte{1, 2, 3, 4, 5, 6, 7, 8, 1, 2, 3, 4, 5, 6, 7, 8, 9}, ++ ShareVersion: 0, ++ NamespaceVersion: 0, ++ } ++ bTx, err := types.MarshalBlobTx(originalTx, &b) ++ require.NoError(t, err) ++ ++ err = mp.CheckTx(bTx, nil, mempool.TxInfo{}) ++ require.NoError(t, err) ++ ++ err = mp.Update(1, []types.Tx{indexWrapper}, abciResponses(1, abci.CodeTypeOK), nil, nil) ++ require.NoError(t, err) ++ assert.EqualValues(t, 0, mp.Size()) ++ assert.EqualValues(t, 0, mp.SizeBytes()) ++} ++ + // caller must close server + func newRemoteApp(t *testing.T, addr string, app abci.Application) (abciclient.Client, service.Service) { + clientCreator, err := abciclient.NewClient(addr, "socket", true) diff --git a/patches/mempool/v0/doc.go.patch b/patches/mempool/v0/doc.go.patch new file mode 100644 index 00000000000..c481ee2ae5a --- /dev/null +++ b/patches/mempool/v0/doc.go.patch @@ -0,0 +1,13 @@ +diff --git a/mempool/v0/doc.go b/mempool/v0/doc.go +index 3b5d0d20d..537bee9aa 100644 +--- a/mempool/v0/doc.go ++++ b/mempool/v0/doc.go +@@ -15,7 +15,7 @@ + // 2. Mutations to the linked-list elements are atomic + // 3. CheckTx() and/or ReapMaxBytesMaxGas() calls can be paused upon Update(), protected by .updateMtx + +-// Garbage collection of old elements from mempool.txs is handlde via the ++// Garbage collection of old elements from mempool.txs is handled via the + // DetachPrev() call, which makes old elements not reachable by peer + // broadcastTxRoutine(). + diff --git a/patches/mempool/v1/mempool.go.patch b/patches/mempool/v1/mempool.go.patch new file mode 100644 index 00000000000..6cecf0e12e9 --- /dev/null +++ b/patches/mempool/v1/mempool.go.patch @@ -0,0 +1,432 @@ +diff --git a/mempool/v1/mempool.go b/mempool/v1/mempool.go +index 5fea34964..cc4136e3c 100644 +--- a/mempool/v1/mempool.go ++++ b/mempool/v1/mempool.go +@@ -2,19 +2,17 @@ package v1 + + import ( + "fmt" +- "runtime" + "sort" + "sync" + "sync/atomic" + "time" + +- "github.com/creachadair/taskgroup" +- + abci "github.com/tendermint/tendermint/abci/types" + "github.com/tendermint/tendermint/config" + "github.com/tendermint/tendermint/libs/clist" + "github.com/tendermint/tendermint/libs/log" + "github.com/tendermint/tendermint/mempool" ++ "github.com/tendermint/tendermint/pkg/trace" + "github.com/tendermint/tendermint/proxy" + "github.com/tendermint/tendermint/types" + ) +@@ -48,13 +46,17 @@ type TxMempool struct { + mtx *sync.RWMutex + notifiedTxsAvailable bool + txsAvailable chan struct{} // one value sent per height when mempool is not empty +- preCheck mempool.PreCheckFunc +- postCheck mempool.PostCheckFunc +- height int64 // the latest height passed to Update ++ preCheckFn mempool.PreCheckFunc ++ postCheckFn mempool.PostCheckFunc ++ height int64 // the latest height passed to Update ++ lastPurgeTime time.Time // the last time we attempted to purge transactions via the TTL + + txs *clist.CList // valid transactions (passed CheckTx) + txByKey map[types.TxKey]*clist.CElement + txBySender map[string]*clist.CElement // for sender != "" ++ evictedTxs mempool.TxCache // for tracking evicted transactions ++ ++ traceClient trace.Tracer + } + + // NewTxMempool constructs a new, empty priority mempool at the specified +@@ -78,9 +80,11 @@ func NewTxMempool( + height: height, + txByKey: make(map[types.TxKey]*clist.CElement), + txBySender: make(map[string]*clist.CElement), ++ traceClient: trace.NoOpTracer(), + } + if cfg.CacheSize > 0 { + txmp.cache = mempool.NewLRUTxCache(cfg.CacheSize) ++ txmp.evictedTxs = mempool.NewLRUTxCache(cfg.CacheSize / 5) + } + + for _, opt := range options { +@@ -94,14 +98,14 @@ func NewTxMempool( + // returns an error. This is executed before CheckTx. It only applies to the + // first created block. After that, Update() overwrites the existing value. + func WithPreCheck(f mempool.PreCheckFunc) TxMempoolOption { +- return func(txmp *TxMempool) { txmp.preCheck = f } ++ return func(txmp *TxMempool) { txmp.preCheckFn = f } + } + + // WithPostCheck sets a filter for the mempool to reject a transaction if + // f(tx, resp) returns an error. This is executed after CheckTx. It only applies + // to the first created block. After that, Update overwrites the existing value. + func WithPostCheck(f mempool.PostCheckFunc) TxMempoolOption { +- return func(txmp *TxMempool) { txmp.postCheck = f } ++ return func(txmp *TxMempool) { txmp.postCheckFn = f } + } + + // WithMetrics sets the mempool's metrics collector. +@@ -109,6 +113,12 @@ func WithMetrics(metrics *mempool.Metrics) TxMempoolOption { + return func(txmp *TxMempool) { txmp.metrics = metrics } + } + ++func WithTraceClient(tc trace.Tracer) TxMempoolOption { ++ return func(txmp *TxMempool) { ++ txmp.traceClient = tc ++ } ++} ++ + // Lock obtains a write-lock on the mempool. A caller must be sure to explicitly + // release the lock when finished. + func (txmp *TxMempool) Lock() { txmp.mtx.Lock() } +@@ -175,48 +185,42 @@ func (txmp *TxMempool) TxsAvailable() <-chan struct{} { return txmp.txsAvailable + // the size of tx, and adds tx instead. If no such transactions exist, tx is + // discarded. + func (txmp *TxMempool) CheckTx(tx types.Tx, cb func(*abci.Response), txInfo mempool.TxInfo) error { +- + // During the initial phase of CheckTx, we do not need to modify any state. +- // A transaction will not actually be added to the mempool until it survives +- // a call to the ABCI CheckTx method and size constraint checks. +- height, err := func() (int64, error) { +- txmp.mtx.RLock() +- defer txmp.mtx.RUnlock() +- +- // Reject transactions in excess of the configured maximum transaction size. +- if len(tx) > txmp.config.MaxTxBytes { +- return 0, mempool.ErrTxTooLarge{Max: txmp.config.MaxTxBytes, Actual: len(tx)} +- } + +- // If a precheck hook is defined, call it before invoking the application. +- if txmp.preCheck != nil { +- if err := txmp.preCheck(tx); err != nil { +- return 0, mempool.ErrPreCheck{Reason: err} +- } +- } ++ // Reject transactions in excess of the configured maximum transaction size. ++ if len(tx) > txmp.config.MaxTxBytes { ++ return mempool.ErrTxTooLarge{Max: txmp.config.MaxTxBytes, Actual: len(tx)} ++ } + +- // Early exit if the proxy connection has an error. +- if err := txmp.proxyAppConn.Error(); err != nil { +- return 0, err +- } ++ // If a precheck hook is defined, call it before invoking the application. ++ if err := txmp.preCheck(tx); err != nil { ++ txmp.metrics.FailedTxs.Add(1) ++ return mempool.ErrPreCheck{Reason: err} ++ } + +- txKey := tx.Key() ++ // Early exit if the proxy connection has an error. ++ if err := txmp.proxyAppConn.Error(); err != nil { ++ return err ++ } + +- // Check for the transaction in the cache. +- if !txmp.cache.Push(tx) { +- // If the cached transaction is also in the pool, record its sender. +- if elt, ok := txmp.txByKey[txKey]; ok { +- w := elt.Value.(*WrappedTx) +- w.SetPeer(txInfo.SenderID) +- } +- return 0, mempool.ErrTxInCache ++ txKey := tx.Key() ++ ++ // Check for the transaction in the cache. ++ if !txmp.cache.Push(tx) { ++ // If the cached transaction is also in the pool, record its sender. ++ if elt, ok := txmp.txByKey[txKey]; ok { ++ txmp.metrics.AlreadySeenTxs.Add(1) ++ w := elt.Value.(*WrappedTx) ++ w.SetPeer(txInfo.SenderID) + } +- return txmp.height, nil +- }() +- if err != nil { +- return err ++ return mempool.ErrTxInCache + } + ++ // At this point, we need to ensure that passing CheckTx and adding to ++ // the mempool is atomic. ++ txmp.Lock() ++ defer txmp.Unlock() ++ + // Invoke an ABCI CheckTx for this transaction. + rsp, err := txmp.proxyAppConn.CheckTxSync(abci.RequestCheckTx{Tx: tx}) + if err != nil { +@@ -227,9 +231,10 @@ func (txmp *TxMempool) CheckTx(tx types.Tx, cb func(*abci.Response), txInfo memp + tx: tx, + hash: tx.Key(), + timestamp: time.Now().UTC(), +- height: height, ++ height: txmp.height, + } + wtx.SetPeer(txInfo.SenderID) ++ // This won't add the transaction if the response code is non zero (i.e. there was an error) + txmp.addNewTransaction(wtx, rsp) + if cb != nil { + cb(&abci.Response{Value: &abci.Response_CheckTx{CheckTx: rsp}}) +@@ -246,6 +251,24 @@ func (txmp *TxMempool) RemoveTxByKey(txKey types.TxKey) error { + return txmp.removeTxByKey(txKey) + } + ++// GetTxByKey retrieves a transaction based on the key. It returns a bool ++// indicating whether transaction was found in the cache. ++func (txmp *TxMempool) GetTxByKey(txKey types.TxKey) (types.Tx, bool) { ++ txmp.mtx.RLock() ++ defer txmp.mtx.RUnlock() ++ ++ if elt, ok := txmp.txByKey[txKey]; ok { ++ return elt.Value.(*WrappedTx).tx, true ++ } ++ return nil, false ++} ++ ++// WasRecentlyEvicted returns a bool indicating whether the transaction with ++// the specified key was recently evicted and is currently within the evicted cache. ++func (txmp *TxMempool) WasRecentlyEvicted(txKey types.TxKey) bool { ++ return txmp.evictedTxs.HasKey(txKey) ++} ++ + // removeTxByKey removes the specified transaction key from the mempool. + // The caller must hold txmp.mtx excluxively. + func (txmp *TxMempool) removeTxByKey(key types.TxKey) error { +@@ -327,12 +350,14 @@ func (txmp *TxMempool) ReapMaxBytesMaxGas(maxBytes, maxGas int64) types.Txs { + var keep []types.Tx //nolint:prealloc + for _, w := range txmp.allEntriesSorted() { + // N.B. When computing byte size, we need to include the overhead for +- // encoding as protobuf to send to the application. +- totalGas += w.gasWanted +- totalBytes += types.ComputeProtoSizeForTxs([]types.Tx{w.tx}) +- if (maxGas >= 0 && totalGas > maxGas) || (maxBytes >= 0 && totalBytes > maxBytes) { +- break ++ // encoding as protobuf to send to the application. This actually overestimates it ++ // as we add the proto overhead to each transaction ++ txBytes := types.ComputeProtoSizeForTxs([]types.Tx{w.tx}) ++ if (maxGas >= 0 && totalGas+w.gasWanted > maxGas) || (maxBytes >= 0 && totalBytes+txBytes > maxBytes) { ++ continue + } ++ totalBytes += txBytes ++ totalGas += w.gasWanted + keep = append(keep, w.tx) + } + return keep +@@ -394,12 +419,13 @@ func (txmp *TxMempool) Update( + txmp.notifiedTxsAvailable = false + + if newPreFn != nil { +- txmp.preCheck = newPreFn ++ txmp.preCheckFn = newPreFn + } + if newPostFn != nil { +- txmp.postCheck = newPostFn ++ txmp.postCheckFn = newPostFn + } + ++ txmp.metrics.SuccessfulTxs.Add(float64(len(blockTxs))) + for i, tx := range blockTxs { + // Add successful committed transactions to the cache (if they are not + // already present). Transactions that failed to commit are removed from +@@ -421,6 +447,7 @@ func (txmp *TxMempool) Update( + // transactions are left. + size := txmp.Size() + txmp.metrics.Size.Set(float64(size)) ++ txmp.metrics.SizeBytes.Set(float64(txmp.SizeBytes())) + if size > 0 { + if txmp.config.Recheck { + txmp.recheckTransactions() +@@ -445,12 +472,9 @@ func (txmp *TxMempool) Update( + // + // Finally, the new transaction is added and size stats updated. + func (txmp *TxMempool) addNewTransaction(wtx *WrappedTx, checkTxRes *abci.ResponseCheckTx) { +- txmp.mtx.Lock() +- defer txmp.mtx.Unlock() +- + var err error +- if txmp.postCheck != nil { +- err = txmp.postCheck(wtx.tx, checkTxRes) ++ if txmp.postCheckFn != nil { ++ err = txmp.postCheckFn(wtx.tx, checkTxRes) + } + + if err != nil || checkTxRes.Code != abci.CodeTypeOK { +@@ -497,7 +521,6 @@ func (txmp *TxMempool) addNewTransaction(wtx *WrappedTx, checkTxRes *abci.Respon + checkTxRes.MempoolError = + fmt.Sprintf("rejected valid incoming transaction; tx already exists for sender %q (%X)", + sender, w.tx.Hash()) +- txmp.metrics.RejectedTxs.Add(1) + return + } + } +@@ -532,7 +555,9 @@ func (txmp *TxMempool) addNewTransaction(wtx *WrappedTx, checkTxRes *abci.Respon + checkTxRes.MempoolError = + fmt.Sprintf("rejected valid incoming transaction; mempool is full (%X)", + wtx.tx.Hash()) +- txmp.metrics.RejectedTxs.Add(1) ++ txmp.metrics.EvictedTxs.Add(1) ++ // Add it to evicted transactions cache ++ txmp.evictedTxs.Push(wtx.tx) + return + } + +@@ -565,7 +590,8 @@ func (txmp *TxMempool) addNewTransaction(wtx *WrappedTx, checkTxRes *abci.Respon + txmp.removeTxByElement(vic) + txmp.cache.Remove(w.tx) + txmp.metrics.EvictedTxs.Add(1) +- ++ // Add it to evicted transactions cache ++ txmp.evictedTxs.Push(w.tx) + // We may not need to evict all the eligible transactions. Bail out + // early if we have made enough room. + evictedBytes += w.Size() +@@ -582,6 +608,7 @@ func (txmp *TxMempool) addNewTransaction(wtx *WrappedTx, checkTxRes *abci.Respon + + txmp.metrics.TxSizeBytes.Observe(float64(wtx.Size())) + txmp.metrics.Size.Set(float64(txmp.Size())) ++ txmp.metrics.SizeBytes.Set(float64(txmp.SizeBytes())) + txmp.logger.Debug( + "inserted new valid transaction", + "priority", wtx.Priority(), +@@ -610,8 +637,6 @@ func (txmp *TxMempool) insertTx(wtx *WrappedTx) { + // that case is handled by addNewTransaction instead. + func (txmp *TxMempool) handleRecheckResult(tx types.Tx, checkTxRes *abci.ResponseCheckTx) { + txmp.metrics.RecheckTimes.Add(1) +- txmp.mtx.Lock() +- defer txmp.mtx.Unlock() + + // Find the transaction reported by the ABCI callback. It is possible the + // transaction was evicted during the recheck, in which case the transaction +@@ -624,8 +649,8 @@ func (txmp *TxMempool) handleRecheckResult(tx types.Tx, checkTxRes *abci.Respons + + // If a postcheck hook is defined, call it before checking the result. + var err error +- if txmp.postCheck != nil { +- err = txmp.postCheck(tx, checkTxRes) ++ if txmp.postCheckFn != nil { ++ err = txmp.postCheckFn(tx, checkTxRes) + } + + if checkTxRes.Code == abci.CodeTypeOK && err == nil { +@@ -646,6 +671,7 @@ func (txmp *TxMempool) handleRecheckResult(tx types.Tx, checkTxRes *abci.Respons + txmp.cache.Remove(wtx.tx) + } + txmp.metrics.Size.Set(float64(txmp.Size())) ++ txmp.metrics.SizeBytes.Set(float64(txmp.SizeBytes())) + } + + // recheckTransactions initiates re-CheckTx ABCI calls for all the transactions +@@ -672,34 +698,23 @@ func (txmp *TxMempool) recheckTransactions() { + + // Issue CheckTx calls for each remaining transaction, and when all the + // rechecks are complete signal watchers that transactions may be available. +- go func() { +- g, start := taskgroup.New(nil).Limit(2 * runtime.NumCPU()) +- +- for _, wtx := range wtxs { +- wtx := wtx +- start(func() error { +- // The response for this CheckTx is handled by the default recheckTxCallback. +- rsp, err := txmp.proxyAppConn.CheckTxSync(abci.RequestCheckTx{ +- Tx: wtx.tx, +- Type: abci.CheckTxType_Recheck, +- }) +- if err != nil { +- txmp.logger.Error("failed to execute CheckTx during recheck", +- "err", err, "hash", fmt.Sprintf("%x", wtx.tx.Hash())) +- } else { +- txmp.handleRecheckResult(wtx.tx, rsp) +- } +- return nil +- }) ++ for _, wtx := range wtxs { ++ wtx := wtx ++ // The response for this CheckTx is handled by the default recheckTxCallback. ++ rsp, err := txmp.proxyAppConn.CheckTxSync(abci.RequestCheckTx{ ++ Tx: wtx.tx, ++ Type: abci.CheckTxType_Recheck, ++ }) ++ if err != nil { ++ txmp.logger.Error("failed to execute CheckTx during recheck", ++ "err", err, "hash", fmt.Sprintf("%x", wtx.tx.Hash())) ++ } else { ++ txmp.handleRecheckResult(wtx.tx, rsp) + } +- _ = txmp.proxyAppConn.FlushAsync() +- +- // When recheck is complete, trigger a notification for more transactions. +- _ = g.Wait() +- txmp.mtx.Lock() +- defer txmp.mtx.Unlock() +- txmp.notifyTxsAvailable() +- }() ++ } ++ _ = txmp.proxyAppConn.FlushAsync() ++ ++ txmp.notifyTxsAvailable() + } + + // canAddTx returns an error if we cannot insert the provided *WrappedTx into +@@ -721,6 +736,17 @@ func (txmp *TxMempool) canAddTx(wtx *WrappedTx) error { + return nil + } + ++// CheckToPurgeExpiredTxs checks if there has been adequate time since the last time ++// the txpool looped through all transactions and if so, performs a purge of any transaction ++// that has expired according to the TTLDuration. This is thread safe. ++func (txmp *TxMempool) CheckToPurgeExpiredTxs() { ++ txmp.mtx.Lock() ++ defer txmp.mtx.Unlock() ++ if txmp.config.TTLDuration > 0 && time.Since(txmp.lastPurgeTime) > txmp.config.TTLDuration { ++ txmp.purgeExpiredTxs(txmp.height) ++ } ++} ++ + // purgeExpiredTxs removes all transactions from the mempool that have exceeded + // their respective height or time-based limits as of the given blockHeight. + // Transactions removed by this operation are not removed from the cache. +@@ -739,17 +765,17 @@ func (txmp *TxMempool) purgeExpiredTxs(blockHeight int64) { + next := cur.Next() + + w := cur.Value.(*WrappedTx) +- if txmp.config.TTLNumBlocks > 0 && (blockHeight-w.height) > txmp.config.TTLNumBlocks { ++ if txmp.config.TTLNumBlocks > 0 && (blockHeight-w.height) > txmp.config.TTLNumBlocks || ++ txmp.config.TTLDuration > 0 && now.Sub(w.timestamp) > txmp.config.TTLDuration { + txmp.removeTxByElement(cur) + txmp.cache.Remove(w.tx) +- txmp.metrics.EvictedTxs.Add(1) +- } else if txmp.config.TTLDuration > 0 && now.Sub(w.timestamp) > txmp.config.TTLDuration { +- txmp.removeTxByElement(cur) +- txmp.cache.Remove(w.tx) +- txmp.metrics.EvictedTxs.Add(1) ++ txmp.evictedTxs.Push(w.tx) ++ txmp.metrics.ExpiredTxs.Add(1) + } + cur = next + } ++ ++ txmp.lastPurgeTime = now + } + + func (txmp *TxMempool) notifyTxsAvailable() { +@@ -767,3 +793,12 @@ func (txmp *TxMempool) notifyTxsAvailable() { + } + } + } ++ ++func (txmp *TxMempool) preCheck(tx types.Tx) error { ++ txmp.mtx.Lock() ++ defer txmp.mtx.Unlock() ++ if txmp.preCheckFn != nil { ++ return txmp.preCheckFn(tx) ++ } ++ return nil ++} diff --git a/patches/mempool/v1/mempool_test.go.patch b/patches/mempool/v1/mempool_test.go.patch new file mode 100644 index 00000000000..45248b3aff1 --- /dev/null +++ b/patches/mempool/v1/mempool_test.go.patch @@ -0,0 +1,314 @@ +diff --git a/mempool/v1/mempool_test.go b/mempool/v1/mempool_test.go +index 7e62b9100..eb0f8171f 100644 +--- a/mempool/v1/mempool_test.go ++++ b/mempool/v1/mempool_test.go +@@ -13,6 +13,7 @@ import ( + "testing" + "time" + ++ "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/tendermint/tendermint/abci/example/code" +@@ -21,6 +22,8 @@ import ( + "github.com/tendermint/tendermint/config" + "github.com/tendermint/tendermint/libs/log" + "github.com/tendermint/tendermint/mempool" ++ "github.com/tendermint/tendermint/pkg/consts" ++ tmproto "github.com/tendermint/tendermint/proto/tendermint/types" + "github.com/tendermint/tendermint/proxy" + "github.com/tendermint/tendermint/types" + ) +@@ -105,6 +108,8 @@ func mustCheckTx(t *testing.T, txmp *TxMempool, spec string) { + <-done + } + ++// checkTxs generates a specified number of txs, checks them into the mempool, ++// and returns them. + func checkTxs(t *testing.T, txmp *TxMempool, numTxs int, peerID uint16) []testTx { + txs := make([]testTx, numTxs) + txInfo := mempool.TxInfo{SenderID: peerID} +@@ -119,7 +124,7 @@ func checkTxs(t *testing.T, txmp *TxMempool, numTxs int, peerID uint16) []testTx + priority := int64(rng.Intn(9999-1000) + 1000) + + txs[i] = testTx{ +- tx: []byte(fmt.Sprintf("sender-%d-%d=%X=%d", i, peerID, prefix, priority)), ++ tx: []byte(fmt.Sprintf("sender-%03d-%d=%X=%d", i, peerID, prefix, priority)), + priority: priority, + } + require.NoError(t, txmp.CheckTx(txs[i].tx, nil, txInfo)) +@@ -186,7 +191,7 @@ func TestTxMempool_Size(t *testing.T) { + txmp := setup(t, 0) + txs := checkTxs(t, txmp, 100, 0) + require.Equal(t, len(txs), txmp.Size()) +- require.Equal(t, int64(5690), txmp.SizeBytes()) ++ require.Equal(t, int64(5800), txmp.SizeBytes()) + + rawTxs := make([]types.Tx, len(txs)) + for i, tx := range txs { +@@ -203,7 +208,7 @@ func TestTxMempool_Size(t *testing.T) { + txmp.Unlock() + + require.Equal(t, len(rawTxs)/2, txmp.Size()) +- require.Equal(t, int64(2850), txmp.SizeBytes()) ++ require.Equal(t, int64(2900), txmp.SizeBytes()) + } + + func TestTxMempool_Eviction(t *testing.T) { +@@ -236,7 +241,9 @@ func TestTxMempool_Eviction(t *testing.T) { + mustCheckTx(t, txmp, "key1=0000=25") + require.True(t, txExists("key1=0000=25")) + require.False(t, txExists(bigTx)) +- require.False(t, txmp.cache.Has([]byte(bigTx))) ++ bigTxKey := types.Tx((bigTx)).Key() ++ require.False(t, txmp.cache.HasKey(bigTxKey)) ++ require.True(t, txmp.WasRecentlyEvicted(bigTxKey)) // bigTx evicted + require.Equal(t, int64(len("key1=0000=25")), txmp.SizeBytes()) + + // Now fill up the rest of the slots with other transactions. +@@ -248,13 +255,15 @@ func TestTxMempool_Eviction(t *testing.T) { + // A new transaction with low priority should be discarded. + mustCheckTx(t, txmp, "key6=0005=1") + require.False(t, txExists("key6=0005=1")) ++ require.True(t, txmp.WasRecentlyEvicted(types.Tx(("key6=0005=1")).Key())) // key6 evicted + + // A new transaction with higher priority should evict key5, which is the + // newest of the two transactions with lowest priority. + mustCheckTx(t, txmp, "key7=0006=7") +- require.True(t, txExists("key7=0006=7")) // new transaction added +- require.False(t, txExists("key5=0004=3")) // newest low-priority tx evicted +- require.True(t, txExists("key4=0003=3")) // older low-priority tx retained ++ require.True(t, txExists("key7=0006=7")) // new transaction added ++ require.False(t, txExists("key5=0004=3")) // newest low-priority tx evicted ++ require.True(t, txmp.WasRecentlyEvicted(types.Tx(("key5=0004=3")).Key())) // key5 evicted ++ require.True(t, txExists("key4=0003=3")) // older low-priority tx retained + + // Another new transaction evicts the other low-priority element. + mustCheckTx(t, txmp, "key8=0007=20") +@@ -264,7 +273,8 @@ func TestTxMempool_Eviction(t *testing.T) { + // Now the lowest-priority tx is 5, so that should be the next to go. + mustCheckTx(t, txmp, "key9=0008=9") + require.True(t, txExists("key9=0008=9")) +- require.False(t, txExists("k3y2=0001=5")) ++ require.False(t, txExists("key2=0001=5")) ++ require.True(t, txmp.WasRecentlyEvicted(types.Tx(("key2=0001=5")).Key())) // key2 evicted + + // Add a transaction that requires eviction of multiple lower-priority + // entries, in order to fit the size of the element. +@@ -273,15 +283,18 @@ func TestTxMempool_Eviction(t *testing.T) { + require.True(t, txExists("key8=0007=20")) + require.True(t, txExists("key10=0123456789abcdef=11")) + require.False(t, txExists("key3=0002=10")) ++ require.True(t, txmp.WasRecentlyEvicted(types.Tx(("key3=0002=10")).Key())) // key3 evicted + require.False(t, txExists("key9=0008=9")) ++ require.True(t, txmp.WasRecentlyEvicted(types.Tx(("key9=0008=9")).Key())) // key9 evicted + require.False(t, txExists("key7=0006=7")) ++ require.True(t, txmp.WasRecentlyEvicted(types.Tx(("key7=0006=7")).Key())) // key7 evicted + } + + func TestTxMempool_Flush(t *testing.T) { + txmp := setup(t, 0) + txs := checkTxs(t, txmp, 100, 0) + require.Equal(t, len(txs), txmp.Size()) +- require.Equal(t, int64(5690), txmp.SizeBytes()) ++ require.Equal(t, int64(5800), txmp.SizeBytes()) + + rawTxs := make([]types.Tx, len(txs)) + for i, tx := range txs { +@@ -303,11 +316,15 @@ func TestTxMempool_Flush(t *testing.T) { + } + + func TestTxMempool_ReapMaxBytesMaxGas(t *testing.T) { ++ // totalSizeBytes is the expected size of the mempool after adding 100 txs ++ // this value is highly dependant upon the size of the txs and the overhead ++ // introduced in the mempool. This number will need to be adjusted if ++ // changes are made to any of those things. ++ totalSizeBytes := int64(5800) + txmp := setup(t, 0) + tTxs := checkTxs(t, txmp, 100, 0) // all txs request 1 gas unit + require.Equal(t, len(tTxs), txmp.Size()) +- require.Equal(t, int64(5690), txmp.SizeBytes()) +- ++ require.Equal(t, totalSizeBytes, txmp.SizeBytes()) + txMap := make(map[types.TxKey]testTx) + priorities := make([]int64, len(tTxs)) + for i, tTx := range tTxs { +@@ -333,14 +350,14 @@ func TestTxMempool_ReapMaxBytesMaxGas(t *testing.T) { + reapedTxs := txmp.ReapMaxBytesMaxGas(-1, 50) + ensurePrioritized(reapedTxs) + require.Equal(t, len(tTxs), txmp.Size()) +- require.Equal(t, int64(5690), txmp.SizeBytes()) ++ require.Equal(t, totalSizeBytes, txmp.SizeBytes()) + require.Len(t, reapedTxs, 50) + + // reap by transaction bytes only + reapedTxs = txmp.ReapMaxBytesMaxGas(1000, -1) + ensurePrioritized(reapedTxs) + require.Equal(t, len(tTxs), txmp.Size()) +- require.Equal(t, int64(5690), txmp.SizeBytes()) ++ require.Equal(t, totalSizeBytes, txmp.SizeBytes()) + require.GreaterOrEqual(t, len(reapedTxs), 16) + + // Reap by both transaction bytes and gas, where the size yields 31 reaped +@@ -348,15 +365,37 @@ func TestTxMempool_ReapMaxBytesMaxGas(t *testing.T) { + reapedTxs = txmp.ReapMaxBytesMaxGas(1500, 30) + ensurePrioritized(reapedTxs) + require.Equal(t, len(tTxs), txmp.Size()) +- require.Equal(t, int64(5690), txmp.SizeBytes()) ++ require.Equal(t, totalSizeBytes, txmp.SizeBytes()) + require.Len(t, reapedTxs, 25) + } + ++func TestTxMempoolTxLargerThanMaxBytes(t *testing.T) { ++ rng := rand.New(rand.NewSource(time.Now().UnixNano())) ++ txmp := setup(t, 0) ++ bigPrefix := make([]byte, 100) ++ _, err := rng.Read(bigPrefix) ++ require.NoError(t, err) ++ // large high priority tx ++ bigTx := []byte(fmt.Sprintf("sender-1-1=%X=2", bigPrefix)) ++ smallPrefix := make([]byte, 20) ++ _, err = rng.Read(smallPrefix) ++ require.NoError(t, err) ++ // smaller low priority tx with different sender ++ smallTx := []byte(fmt.Sprintf("sender-2-1=%X=1", smallPrefix)) ++ require.NoError(t, txmp.CheckTx(bigTx, nil, mempool.TxInfo{SenderID: 1})) ++ require.NoError(t, txmp.CheckTx(smallTx, nil, mempool.TxInfo{SenderID: 1})) ++ ++ // reap by max bytes less than the large tx ++ reapedTxs := txmp.ReapMaxBytesMaxGas(100, -1) ++ require.Len(t, reapedTxs, 1) ++ require.Equal(t, types.Tx(smallTx), reapedTxs[0]) ++} ++ + func TestTxMempool_ReapMaxTxs(t *testing.T) { + txmp := setup(t, 0) + tTxs := checkTxs(t, txmp, 100, 0) + require.Equal(t, len(tTxs), txmp.Size()) +- require.Equal(t, int64(5690), txmp.SizeBytes()) ++ require.Equal(t, int64(5800), txmp.SizeBytes()) + + txMap := make(map[types.TxKey]testTx) + priorities := make([]int64, len(tTxs)) +@@ -383,26 +422,26 @@ func TestTxMempool_ReapMaxTxs(t *testing.T) { + reapedTxs := txmp.ReapMaxTxs(-1) + ensurePrioritized(reapedTxs) + require.Equal(t, len(tTxs), txmp.Size()) +- require.Equal(t, int64(5690), txmp.SizeBytes()) ++ require.Equal(t, int64(5800), txmp.SizeBytes()) + require.Len(t, reapedTxs, len(tTxs)) + + // reap a single transaction + reapedTxs = txmp.ReapMaxTxs(1) + ensurePrioritized(reapedTxs) + require.Equal(t, len(tTxs), txmp.Size()) +- require.Equal(t, int64(5690), txmp.SizeBytes()) ++ require.Equal(t, int64(5800), txmp.SizeBytes()) + require.Len(t, reapedTxs, 1) + + // reap half of the transactions + reapedTxs = txmp.ReapMaxTxs(len(tTxs) / 2) + ensurePrioritized(reapedTxs) + require.Equal(t, len(tTxs), txmp.Size()) +- require.Equal(t, int64(5690), txmp.SizeBytes()) ++ require.Equal(t, int64(5800), txmp.SizeBytes()) + require.Len(t, reapedTxs, len(tTxs)/2) + } + + func TestTxMempool_CheckTxExceedsMaxSize(t *testing.T) { +- txmp := setup(t, 0) ++ txmp := setup(t, 1) + + rng := rand.New(rand.NewSource(time.Now().UnixNano())) + tx := make([]byte, txmp.config.MaxTxBytes+1) +@@ -520,6 +559,7 @@ func TestTxMempool_ConcurrentTxs(t *testing.T) { + } + + func TestTxMempool_ExpiredTxs_Timestamp(t *testing.T) { ++ t.Skip("This test is flaky and needs to be fixed") + txmp := setup(t, 5000) + txmp.config.TTLDuration = 5 * time.Millisecond + +@@ -551,6 +591,10 @@ func TestTxMempool_ExpiredTxs_Timestamp(t *testing.T) { + + // All the transactions in the original set should have been purged. + for _, tx := range added1 { ++ // Check that they were added to the evicted cache. ++ evicted := txmp.WasRecentlyEvicted(tx.tx.Key()) ++ require.True(t, evicted) ++ + if _, ok := txmp.txByKey[tx.tx.Key()]; ok { + t.Errorf("Transaction %X should have been purged for TTL", tx.tx.Key()) + } +@@ -567,6 +611,23 @@ func TestTxMempool_ExpiredTxs_Timestamp(t *testing.T) { + } + } + ++func TestGetTxByKey_GetsTx(t *testing.T) { ++ txmp := setup(t, 500) ++ txs := checkTxs(t, txmp, 100, 0) ++ ++ // Should get all valid txs ++ for _, tx := range txs { ++ txKey := tx.tx.Key() ++ txFromMempool, exists := txmp.GetTxByKey(txKey) ++ require.Equal(t, tx.tx, txFromMempool) ++ require.True(t, exists) ++ } ++ ++ // Non-existent tx should return false ++ _, exists := txmp.GetTxByKey(types.Tx("non-existent-tx").Key()) ++ require.False(t, exists) ++} ++ + func TestTxMempool_ExpiredTxs_NumBlocks(t *testing.T) { + txmp := setup(t, 500) + txmp.height = 100 +@@ -633,7 +694,7 @@ func TestTxMempool_CheckTxPostCheckError(t *testing.T) { + postCheckFn := func(_ types.Tx, _ *abci.ResponseCheckTx) error { + return testCase.err + } +- txmp := setup(t, 0, WithPostCheck(postCheckFn)) ++ txmp := setup(t, 1, WithPostCheck(postCheckFn)) + rng := rand.New(rand.NewSource(time.Now().UnixNano())) + tx := make([]byte, txmp.config.MaxTxBytes-1) + _, err := rng.Read(tx) +@@ -652,3 +713,38 @@ func TestTxMempool_CheckTxPostCheckError(t *testing.T) { + }) + } + } ++ ++func TestRemoveBlobTx(t *testing.T) { ++ txmp := setup(t, 500) ++ namespaceOne := bytes.Repeat([]byte{1}, consts.NamespaceIDSize) ++ ++ originalTx := []byte{1, 2, 3, 4} ++ indexWrapper, err := types.MarshalIndexWrapper(originalTx, 100) ++ require.NoError(t, err) ++ ++ // create the blobTx ++ b := tmproto.Blob{ ++ NamespaceId: namespaceOne, ++ Data: []byte{1, 2, 3, 4, 5, 6, 7, 8, 1, 2, 3, 4, 5, 6, 7, 8, 9}, ++ ShareVersion: 0, ++ NamespaceVersion: 0, ++ } ++ bTx, err := types.MarshalBlobTx(originalTx, &b) ++ require.NoError(t, err) ++ ++ err = txmp.CheckTx(bTx, nil, mempool.TxInfo{}) ++ require.NoError(t, err) ++ ++ err = txmp.Update(1, []types.Tx{indexWrapper}, abciResponses(1, abci.CodeTypeOK), nil, nil) ++ require.NoError(t, err) ++ assert.EqualValues(t, 0, txmp.Size()) ++ assert.EqualValues(t, 0, txmp.SizeBytes()) ++} ++ ++func abciResponses(n int, code uint32) []*abci.ResponseDeliverTx { ++ responses := make([]*abci.ResponseDeliverTx, 0, n) ++ for i := 0; i < n; i++ { ++ responses = append(responses, &abci.ResponseDeliverTx{Code: code}) ++ } ++ return responses ++} diff --git a/patches/mempool/v1/reactor.go.patch b/patches/mempool/v1/reactor.go.patch new file mode 100644 index 00000000000..e3846a76666 --- /dev/null +++ b/patches/mempool/v1/reactor.go.patch @@ -0,0 +1,99 @@ +diff --git a/mempool/v1/reactor.go b/mempool/v1/reactor.go +index 8a7ddca69..7e0b22bcf 100644 +--- a/mempool/v1/reactor.go ++++ b/mempool/v1/reactor.go +@@ -13,6 +13,8 @@ import ( + cmtsync "github.com/tendermint/tendermint/libs/sync" + "github.com/tendermint/tendermint/mempool" + "github.com/tendermint/tendermint/p2p" ++ "github.com/tendermint/tendermint/pkg/trace" ++ "github.com/tendermint/tendermint/pkg/trace/schema" + protomem "github.com/tendermint/tendermint/proto/tendermint/mempool" + "github.com/tendermint/tendermint/types" + ) +@@ -22,9 +24,10 @@ import ( + // peers you received it from. + type Reactor struct { + p2p.BaseReactor +- config *cfg.MempoolConfig +- mempool *TxMempool +- ids *mempoolIDs ++ config *cfg.MempoolConfig ++ mempool *TxMempool ++ ids *mempoolIDs ++ traceClient trace.Tracer + } + + type mempoolIDs struct { +@@ -91,11 +94,12 @@ func newMempoolIDs() *mempoolIDs { + } + + // NewReactor returns a new Reactor with the given config and mempool. +-func NewReactor(config *cfg.MempoolConfig, mempool *TxMempool) *Reactor { ++func NewReactor(config *cfg.MempoolConfig, mempool *TxMempool, traceClient trace.Tracer) *Reactor { + memR := &Reactor{ +- config: config, +- mempool: mempool, +- ids: newMempoolIDs(), ++ config: config, ++ mempool: mempool, ++ ids: newMempoolIDs(), ++ traceClient: traceClient, + } + memR.BaseReactor = *p2p.NewBaseReactor("Mempool", memR) + return memR +@@ -117,6 +121,22 @@ func (memR *Reactor) OnStart() error { + if !memR.config.Broadcast { + memR.Logger.Info("Tx broadcasting is disabled") + } ++ ++ // run a separate go routine to check for time based TTLs ++ if memR.mempool.config.TTLDuration > 0 { ++ go func() { ++ ticker := time.NewTicker(memR.mempool.config.TTLDuration) ++ for { ++ select { ++ case <-ticker.C: ++ memR.mempool.CheckToPurgeExpiredTxs() ++ case <-memR.Quit(): ++ return ++ } ++ } ++ }() ++ } ++ + return nil + } + +@@ -173,6 +193,13 @@ func (memR *Reactor) ReceiveEnvelope(e p2p.Envelope) { + var err error + for _, tx := range protoTxs { + ntx := types.Tx(tx) ++ schema.WriteMempoolTx( ++ memR.traceClient, ++ string(e.Src.ID()), ++ ntx.Hash(), ++ len(tx), ++ schema.Download, ++ ) + err = memR.mempool.CheckTx(ntx, nil, txInfo) + if errors.Is(err, mempool.ErrTxInCache) { + memR.Logger.Debug("Tx already exists in cache", "tx", ntx.String()) +@@ -269,6 +296,17 @@ func (memR *Reactor) broadcastTxRoutine(peer p2p.Peer) { + if !success { + time.Sleep(mempool.PeerCatchupSleepIntervalMS * time.Millisecond) + continue ++ } else { ++ // record that we have sent the peer the transaction ++ // to avoid doing it a second time ++ memTx.SetPeer(peerID) ++ schema.WriteMempoolTx( ++ memR.traceClient, ++ string(peer.ID()), ++ memTx.tx.Hash(), ++ len(memTx.tx), ++ schema.Upload, ++ ) + } + } + diff --git a/patches/mempool/v1/reactor_test.go.patch b/patches/mempool/v1/reactor_test.go.patch new file mode 100644 index 00000000000..43b410006f2 --- /dev/null +++ b/patches/mempool/v1/reactor_test.go.patch @@ -0,0 +1,76 @@ +diff --git a/mempool/v1/reactor_test.go b/mempool/v1/reactor_test.go +index 74f9f469f..31cb9672e 100644 +--- a/mempool/v1/reactor_test.go ++++ b/mempool/v1/reactor_test.go +@@ -14,6 +14,7 @@ import ( + + "github.com/tendermint/tendermint/abci/example/kvstore" + "github.com/tendermint/tendermint/p2p/mock" ++ "github.com/tendermint/tendermint/pkg/trace" + + cfg "github.com/tendermint/tendermint/config" + +@@ -95,6 +96,36 @@ func TestMempoolVectors(t *testing.T) { + } + } + ++func TestReactorEventuallyRemovesExpiredTransaction(t *testing.T) { ++ config := cfg.TestConfig() ++ config.Mempool.TTLDuration = 100 * time.Millisecond ++ const N = 1 ++ reactor := makeAndConnectReactors(config, N)[0] ++ ++ tx := types.Tx([]byte("test")) ++ key := tx.Key() ++ txMsg := &memproto.Message{ ++ Sum: &memproto.Message_Txs{Txs: &memproto.Txs{Txs: [][]byte{tx}}}, ++ } ++ txMsgBytes, err := txMsg.Marshal() ++ require.NoError(t, err) ++ ++ peer := mock.NewPeer(nil) ++ reactor.InitPeer(peer) ++ reactor.Receive(mempool.MempoolChannel, peer, txMsgBytes) ++ reactor.mempool.Lock() ++ _, has := reactor.mempool.txByKey[key] ++ reactor.mempool.Unlock() ++ require.True(t, has) ++ ++ // wait for the transaction to expire ++ time.Sleep(reactor.mempool.config.TTLDuration * 2) ++ reactor.mempool.Lock() ++ _, has = reactor.mempool.txByKey[key] ++ reactor.mempool.Unlock() ++ require.False(t, has) ++} ++ + func TestLegacyReactorReceiveBasic(t *testing.T) { + config := cfg.TestConfig() + // if there were more than two reactors, the order of transactions could not be +@@ -130,10 +161,10 @@ func makeAndConnectReactors(config *cfg.Config, n int) []*Reactor { + for i := 0; i < n; i++ { + app := kvstore.NewApplication() + cc := proxy.NewLocalClientCreator(app) +- mempool, cleanup := newMempoolWithApp(cc) ++ mempool, cleanup := newMempoolWithAppAndConfig(cc, config) + defer cleanup() + +- reactors[i] = NewReactor(config.Mempool, mempool) // so we dont start the consensus states ++ reactors[i] = NewReactor(config.Mempool, mempool, trace.NoOpTracer()) // so we dont start the consensus states + reactors[i].SetLogger(logger.With("validator", i)) + } + +@@ -158,13 +189,6 @@ func mempoolLogger() log.Logger { + }) + } + +-func newMempoolWithApp(cc proxy.ClientCreator) (*TxMempool, func()) { +- conf := cfg.ResetTestRoot("mempool_test") +- +- mp, cu := newMempoolWithAppAndConfig(cc, conf) +- return mp, cu +-} +- + func newMempoolWithAppAndConfig(cc proxy.ClientCreator, conf *cfg.Config) (*TxMempool, func()) { + appConnMem, _ := cc.NewABCIClient() + appConnMem.SetLogger(log.TestingLogger().With("module", "abci-client", "connection", "mempool")) diff --git a/patches/node/node.go.patch b/patches/node/node.go.patch new file mode 100644 index 00000000000..6cf41b5967d --- /dev/null +++ b/patches/node/node.go.patch @@ -0,0 +1,460 @@ +diff --git a/node/node.go b/node/node.go +index ae16caca8..810d3264d 100644 +--- a/node/node.go ++++ b/node/node.go +@@ -11,9 +11,11 @@ import ( + "time" + + dbm "github.com/cometbft/cometbft-db" ++ "github.com/grafana/pyroscope-go" + "github.com/prometheus/client_golang/prometheus" + "github.com/prometheus/client_golang/prometheus/promhttp" + "github.com/rs/cors" ++ sdktrace "go.opentelemetry.io/otel/sdk/trace" + + abci "github.com/tendermint/tendermint/abci/types" + bcv0 "github.com/tendermint/tendermint/blockchain/v0" +@@ -23,6 +25,7 @@ import ( + cs "github.com/tendermint/tendermint/consensus" + "github.com/tendermint/tendermint/crypto" + "github.com/tendermint/tendermint/evidence" ++ "github.com/tendermint/tendermint/pkg/trace" + + cmtjson "github.com/tendermint/tendermint/libs/json" + "github.com/tendermint/tendermint/libs/log" +@@ -30,6 +33,7 @@ import ( + "github.com/tendermint/tendermint/libs/service" + "github.com/tendermint/tendermint/light" + mempl "github.com/tendermint/tendermint/mempool" ++ mempoolv2 "github.com/tendermint/tendermint/mempool/cat" + mempoolv0 "github.com/tendermint/tendermint/mempool/v0" + mempoolv1 "github.com/tendermint/tendermint/mempool/v1" + "github.com/tendermint/tendermint/p2p" +@@ -115,17 +119,17 @@ func DefaultNewNode(config *cfg.Config, logger log.Logger) (*Node, error) { + } + + // MetricsProvider returns a consensus, p2p and mempool Metrics. +-type MetricsProvider func(chainID string) (*cs.Metrics, *p2p.Metrics, *mempl.Metrics, *sm.Metrics) ++type MetricsProvider func(chainID, softwareVersion string) (*cs.Metrics, *p2p.Metrics, *mempl.Metrics, *sm.Metrics) + + // DefaultMetricsProvider returns Metrics build using Prometheus client library + // if Prometheus is enabled. Otherwise, it returns no-op Metrics. + func DefaultMetricsProvider(config *cfg.InstrumentationConfig) MetricsProvider { +- return func(chainID string) (*cs.Metrics, *p2p.Metrics, *mempl.Metrics, *sm.Metrics) { ++ return func(chainID, softwareVersion string) (*cs.Metrics, *p2p.Metrics, *mempl.Metrics, *sm.Metrics) { + if config.Prometheus { +- return cs.PrometheusMetrics(config.Namespace, "chain_id", chainID), +- p2p.PrometheusMetrics(config.Namespace, "chain_id", chainID), +- mempl.PrometheusMetrics(config.Namespace, "chain_id", chainID), +- sm.PrometheusMetrics(config.Namespace, "chain_id", chainID) ++ return cs.PrometheusMetrics(config.Namespace, "chain_id", chainID, "version", softwareVersion), ++ p2p.PrometheusMetrics(config.Namespace, "chain_id", chainID, "version", softwareVersion), ++ mempl.PrometheusMetrics(config.Namespace, "chain_id", chainID, "version", softwareVersion), ++ sm.PrometheusMetrics(config.Namespace, "chain_id", chainID, "version", softwareVersion) + } + return cs.NopMetrics(), p2p.NopMetrics(), mempl.NopMetrics(), sm.NopMetrics() + } +@@ -230,6 +234,9 @@ type Node struct { + blockIndexer indexer.BlockIndexer + indexerService *txindex.IndexerService + prometheusSrv *http.Server ++ tracer trace.Tracer ++ pyroscopeProfiler *pyroscope.Profiler ++ pyroscopeTracer *sdktrace.TracerProvider + } + + func initDBs(config *cfg.Config, dbProvider DBProvider) (blockStore *store.BlockStore, stateDB dbm.DB, err error) { +@@ -323,14 +330,11 @@ func doHandshake( + eventBus types.BlockEventPublisher, + proxyApp proxy.AppConns, + consensusLogger log.Logger, +-) error { ++) (string, error) { + handshaker := cs.NewHandshaker(stateStore, state, blockStore, genDoc) + handshaker.SetLogger(consensusLogger) + handshaker.SetEventBus(eventBus) +- if err := handshaker.HandshakeWithContext(ctx, proxyApp); err != nil { +- return fmt.Errorf("error during handshake: %v", err) +- } +- return nil ++ return handshaker.Handshake(proxyApp) + } + + func logNodeStartupInfo(state sm.State, pubKey crypto.PubKey, logger, consensusLogger log.Logger) { +@@ -374,8 +378,39 @@ func createMempoolAndMempoolReactor( + state sm.State, + memplMetrics *mempl.Metrics, + logger log.Logger, ++ traceClient trace.Tracer, + ) (mempl.Mempool, p2p.Reactor) { + switch config.Mempool.Version { ++ case cfg.MempoolV2: ++ mp := mempoolv2.NewTxPool( ++ logger, ++ config.Mempool, ++ proxyApp.Mempool(), ++ state.LastBlockHeight, ++ mempoolv2.WithMetrics(memplMetrics), ++ mempoolv2.WithPreCheck(sm.TxPreCheck(state)), ++ mempoolv2.WithPostCheck(sm.TxPostCheck(state)), ++ ) ++ ++ reactor, err := mempoolv2.NewReactor( ++ mp, ++ &mempoolv2.ReactorOptions{ ++ ListenOnly: !config.Mempool.Broadcast, ++ MaxTxSize: config.Mempool.MaxTxBytes, ++ TraceClient: traceClient, ++ MaxGossipDelay: config.Mempool.MaxGossipDelay, ++ }, ++ ) ++ if err != nil { ++ // TODO: find a more polite way of handling this error ++ panic(err) ++ } ++ if config.Consensus.WaitForTxs() { ++ mp.EnableTxsAvailable() ++ } ++ reactor.SetLogger(logger) ++ ++ return mp, reactor + case cfg.MempoolV1: + mp := mempoolv1.NewTxMempool( + logger, +@@ -385,15 +420,18 @@ func createMempoolAndMempoolReactor( + mempoolv1.WithMetrics(memplMetrics), + mempoolv1.WithPreCheck(sm.TxPreCheck(state)), + mempoolv1.WithPostCheck(sm.TxPostCheck(state)), ++ mempoolv1.WithTraceClient(traceClient), + ) + + reactor := mempoolv1.NewReactor( + config.Mempool, + mp, ++ traceClient, + ) + if config.Consensus.WaitForTxs() { + mp.EnableTxsAvailable() + } ++ reactor.SetLogger(logger) + + return mp, reactor + +@@ -416,6 +454,7 @@ func createMempoolAndMempoolReactor( + if config.Consensus.WaitForTxs() { + mp.EnableTxsAvailable() + } ++ reactor.SetLogger(logger) + + return mp, reactor + +@@ -476,6 +515,7 @@ func createConsensusReactor(config *cfg.Config, + waitSync bool, + eventBus *types.EventBus, + consensusLogger log.Logger, ++ traceClient trace.Tracer, + ) (*cs.Reactor, *cs.State) { + consensusState := cs.NewState( + config.Consensus, +@@ -485,12 +525,18 @@ func createConsensusReactor(config *cfg.Config, + mempool, + evidencePool, + cs.StateMetrics(csMetrics), ++ cs.SetTraceClient(traceClient), + ) + consensusState.SetLogger(consensusLogger) + if privValidator != nil { + consensusState.SetPrivValidator(privValidator) + } +- consensusReactor := cs.NewReactor(consensusState, waitSync, cs.ReactorMetrics(csMetrics)) ++ consensusReactor := cs.NewReactor( ++ consensusState, ++ waitSync, ++ cs.ReactorMetrics(csMetrics), ++ cs.ReactorTracing(traceClient), ++ ) + consensusReactor.SetLogger(consensusLogger) + // services which will be publishing and/or subscribing for messages (events) + // consensusReactor will set it on consensusState and blockExecutor +@@ -503,13 +549,14 @@ func createTransport( + nodeInfo p2p.NodeInfo, + nodeKey *p2p.NodeKey, + proxyApp proxy.AppConns, ++ tracer trace.Tracer, + ) ( + *p2p.MultiplexTransport, + []p2p.PeerFilterFunc, + ) { + var ( + mConnConfig = p2p.MConnConfig(config.P2P) +- transport = p2p.NewMultiplexTransport(nodeInfo, *nodeKey, mConnConfig) ++ transport = p2p.NewMultiplexTransport(nodeInfo, *nodeKey, mConnConfig, tracer) + connFilters = []p2p.ConnFilterFunc{} + peerFilters = []p2p.PeerFilterFunc{} + ) +@@ -579,12 +626,14 @@ func createSwitch(config *cfg.Config, + nodeInfo p2p.NodeInfo, + nodeKey *p2p.NodeKey, + p2pLogger log.Logger, ++ tracer trace.Tracer, + ) *p2p.Switch { + sw := p2p.NewSwitch( + config.P2P, + transport, + p2p.WithMetrics(p2pMetrics), + p2p.SwitchPeerFilters(peerFilters...), ++ p2p.WithTracer(tracer), + ) + sw.SetLogger(p2pLogger) + sw.AddReactor("MEMPOOL", mempoolReactor) +@@ -716,6 +765,10 @@ func NewNode(config *cfg.Config, + logger log.Logger, + options ...Option, + ) (*Node, error) { ++ if err := config.ValidateBasic(); err != nil { ++ return nil, fmt.Errorf("invalid config: %w", err) ++ } ++ + return NewNodeWithContext(context.TODO(), config, privValidator, + nodeKey, clientCreator, genesisDocProvider, dbProvider, + metricsProvider, logger, options...) +@@ -793,30 +846,54 @@ func NewNodeWithContext(ctx context.Context, + // Create the handshaker, which calls RequestInfo, sets the AppVersion on the state, + // and replays any blocks as necessary to sync CometBFT with the app. + consensusLogger := logger.With("module", "consensus") ++ var softwareVersion string + if !stateSync { +- if err := doHandshake(ctx, stateStore, state, blockStore, genDoc, eventBus, proxyApp, consensusLogger); err != nil { ++ softwareVersion, err = doHandshake(context.TODO(), stateStore, state, blockStore, genDoc, eventBus, proxyApp, consensusLogger) ++ if err != nil { + return nil, err + } + + // Reload the state. It will have the Version.Consensus.App set by the +- // Handshake, and may have other modifications as well (ie. depending on ++ // Handshake, and may have other modifications as well (i.e., depending on + // what happened during block replay). + state, err = stateStore.Load() ++ logger.Info("Loaded state after doHandshake", "height", ++ state.LastBlockHeight, "app_version", ++ state.Version.Consensus.App, "timeout_commit", state.TimeoutCommit, "timeout_propose", state.TimeoutPropose) + if err != nil { ++ logger.Info("Error loading state after doHandshake", "err", err) + return nil, fmt.Errorf("cannot load state: %w", err) + } ++ } else { ++ logger.Info("Skipping handshake, Starting state sync") ++ resp, err := proxyApp.Query().InfoSync(proxy.RequestInfo) ++ if err != nil { ++ return nil, fmt.Errorf("error during info call: %w", err) ++ } ++ softwareVersion = resp.Version + } + + // Determine whether we should do fast sync. This must happen after the handshake, since the +- // app may modify the validator set, specifying ourself as the only validator. ++ // app may modify the validator set, specifying ourselves as the only validator. + fastSync := config.FastSyncMode && !onlyValidatorIsUs(state, pubKey) + + logNodeStartupInfo(state, pubKey, logger, consensusLogger) + +- csMetrics, p2pMetrics, memplMetrics, smMetrics := metricsProvider(genDoc.ChainID) ++ csMetrics, p2pMetrics, memplMetrics, smMetrics := metricsProvider(genDoc.ChainID, softwareVersion) ++ ++ // create an optional tracer client to collect trace data. ++ tracer, err := trace.NewTracer( ++ config, ++ logger, ++ genDoc.ChainID, ++ string(nodeKey.ID()), ++ ) ++ if err != nil { ++ return nil, err ++ } + + // Make MempoolReactor +- mempool, mempoolReactor := createMempoolAndMempoolReactor(config, proxyApp, state, memplMetrics, logger) ++ mempool, mempoolReactor := createMempoolAndMempoolReactor(config, proxyApp, state, memplMetrics, logger, tracer) + + // Make Evidence Reactor + evidenceReactor, evidencePool, err := createEvidenceReactor(config, dbProvider, stateDB, blockStore, logger) +@@ -832,6 +909,7 @@ func NewNodeWithContext(ctx context.Context, + mempool, + evidencePool, + sm.BlockExecutorWithMetrics(smMetrics), ++ sm.WithBlockStore(blockStore), + ) + + // Make BlockchainReactor. Don't start fast sync if we're doing a state sync first. +@@ -849,9 +927,10 @@ func NewNodeWithContext(ctx context.Context, + } + consensusReactor, consensusState := createConsensusReactor( + config, state, blockExec, blockStore, mempool, evidencePool, +- privValidator, csMetrics, stateSync || fastSync, eventBus, consensusLogger, ++ privValidator, csMetrics, stateSync || fastSync, eventBus, consensusLogger, tracer, + ) + ++ logger.Info("Consensus reactor created", "timeout_propose", consensusState.GetState().TimeoutPropose, "timeout_commit", consensusState.GetState().TimeoutCommit) + // Set up state sync reactor, and schedule a sync if requested. + // FIXME The way we do phased startups (e.g. replay -> fast sync -> consensus) is very messy, + // we should clean this whole thing up. See: +@@ -864,19 +943,19 @@ func NewNodeWithContext(ctx context.Context, + ) + stateSyncReactor.SetLogger(logger.With("module", "statesync")) + +- nodeInfo, err := makeNodeInfo(config, nodeKey, txIndexer, genDoc, state) ++ nodeInfo, err := makeNodeInfo(config, nodeKey, txIndexer, genDoc, state, softwareVersion) + if err != nil { + return nil, err + } + + // Setup Transport. +- transport, peerFilters := createTransport(config, nodeInfo, nodeKey, proxyApp) ++ transport, peerFilters := createTransport(config, nodeInfo, nodeKey, proxyApp, tracer) + + // Setup Switch. + p2pLogger := logger.With("module", "p2p") + sw := createSwitch( + config, transport, p2pMetrics, peerFilters, mempoolReactor, bcReactor, +- stateSyncReactor, consensusReactor, evidenceReactor, nodeInfo, nodeKey, p2pLogger, ++ stateSyncReactor, consensusReactor, evidenceReactor, nodeInfo, nodeKey, p2pLogger, tracer, + ) + + err = sw.AddPersistentPeers(splitAndTrimEmpty(config.P2P.PersistentPeers, ",", " ")) +@@ -947,6 +1026,7 @@ func NewNodeWithContext(ctx context.Context, + indexerService: indexerService, + blockIndexer: blockIndexer, + eventBus: eventBus, ++ tracer: tracer, + } + node.BaseService = *service.NewBaseService(logger, "Node", node) + +@@ -970,7 +1050,7 @@ func (n *Node) OnStart() error { + n.addrBook.AddPrivateIDs(splitAndTrimEmpty(n.config.P2P.PrivatePeerIDs, ",", " ")) + + // Start the RPC server before the P2P server +- // so we can eg. receive txs for the first block ++ // so we can e.g., receive txs for the first block + if n.config.RPC.ListenAddress != "" { + listeners, err := n.startRPC() + if err != nil { +@@ -984,6 +1064,18 @@ func (n *Node) OnStart() error { + n.prometheusSrv = n.startPrometheusServer(n.config.Instrumentation.PrometheusListenAddr) + } + ++ if n.config.Instrumentation.PyroscopeURL != "" { ++ profiler, tracer, err := setupPyroscope( ++ n.config.Instrumentation, ++ string(n.nodeKey.ID()), ++ ) ++ if err != nil { ++ return err ++ } ++ n.pyroscopeProfiler = profiler ++ n.pyroscopeTracer = tracer ++ } ++ + // Start the transport. + addr, err := p2p.NewNetAddressString(p2p.IDAddressString(n.nodeKey.ID(), n.config.P2P.ListenAddress)) + if err != nil { +@@ -1068,18 +1160,37 @@ func (n *Node) OnStop() { + n.Logger.Error("Prometheus HTTP server Shutdown", "err", err) + } + } ++ + if n.blockStore != nil { + n.Logger.Info("Closing blockstore") + if err := n.blockStore.Close(); err != nil { + n.Logger.Error("problem closing blockstore", "err", err) + } + } ++ + if n.stateStore != nil { + n.Logger.Info("Closing statestore") + if err := n.stateStore.Close(); err != nil { + n.Logger.Error("problem closing statestore", "err", err) + } + } ++ ++ if n.tracer != nil { ++ n.tracer.Stop() ++ } ++ ++ if n.pyroscopeProfiler != nil { ++ if err := n.pyroscopeProfiler.Stop(); err != nil { ++ n.Logger.Error("Pyroscope profiler Stop", "err", err) ++ } ++ } ++ ++ if n.pyroscopeTracer != nil { ++ if err := n.pyroscopeTracer.Shutdown(context.Background()); err != nil { ++ n.Logger.Error("Pyroscope tracer Shutdown", "err", err) ++ } ++ } ++ + if n.evidencePool != nil { + n.Logger.Info("Closing evidencestore") + if err := n.EvidencePool().Close(); err != nil { +@@ -1117,11 +1228,8 @@ func (n *Node) ConfigureRPC() error { + + Config: *n.config.RPC, + }) +- if err := rpccore.InitGenesisChunks(); err != nil { +- return err +- } + +- return nil ++ return rpccore.InitGenesisChunks() + } + + func (n *Node) startRPC() ([]net.Listener, error) { +@@ -1353,6 +1461,7 @@ func makeNodeInfo( + txIndexer txindex.TxIndexer, + genDoc *types.GenesisDoc, + state sm.State, ++ softwareVersion string, + ) (p2p.DefaultNodeInfo, error) { + txIndexerStatus := "on" + if _, ok := txIndexer.(*null.TxIndex); ok { +@@ -1379,7 +1488,7 @@ func makeNodeInfo( + ), + DefaultNodeID: nodeKey.ID(), + Network: genDoc.ChainID, +- Version: version.TMCoreSemVer, ++ Version: softwareVersion, + Channels: []byte{ + bcChannel, + cs.StateChannel, cs.DataChannel, cs.VoteChannel, cs.VoteSetBitsChannel, +@@ -1398,6 +1507,10 @@ func makeNodeInfo( + nodeInfo.Channels = append(nodeInfo.Channels, pex.PexChannel) + } + ++ if config.Mempool.Version == cfg.MempoolV2 { ++ nodeInfo.Channels = append(nodeInfo.Channels, mempoolv2.MempoolStateChannel) ++ } ++ + lAddr := config.P2P.ExternalAddress + + if lAddr == "" { +@@ -1429,7 +1542,7 @@ func LoadStateFromDBOrGenesisDocProvider( + return sm.State{}, nil, err + } + // save genesis doc to prevent a certain class of user errors (e.g. when it +- // was changed, accidentally or not). Also good for audit trail. ++ // was changed, accidentally or not). Also, good for audit trail. + if err := saveGenesisDoc(stateDB, genDoc); err != nil { + return sm.State{}, nil, err + } +@@ -1467,11 +1580,8 @@ func saveGenesisDoc(db dbm.DB, genDoc *types.GenesisDoc) error { + if err != nil { + return fmt.Errorf("failed to save genesis doc due to marshaling error: %w", err) + } +- if err := db.SetSync(genesisDocKey, b); err != nil { +- return err +- } + +- return nil ++ return db.SetSync(genesisDocKey, b) + } + + func createAndStartPrivValidatorSocketClient( diff --git a/patches/node/node_test.go.patch b/patches/node/node_test.go.patch new file mode 100644 index 00000000000..19ba320ab94 --- /dev/null +++ b/patches/node/node_test.go.patch @@ -0,0 +1,55 @@ +diff --git a/node/node_test.go b/node/node_test.go +index 30c883a68..db486b80c 100644 +--- a/node/node_test.go ++++ b/node/node_test.go +@@ -21,6 +21,7 @@ import ( + "github.com/tendermint/tendermint/libs/log" + cmtrand "github.com/tendermint/tendermint/libs/rand" + mempl "github.com/tendermint/tendermint/mempool" ++ mempoolv2 "github.com/tendermint/tendermint/mempool/cat" + mempoolv0 "github.com/tendermint/tendermint/mempool/v0" + mempoolv1 "github.com/tendermint/tendermint/mempool/v1" + "github.com/tendermint/tendermint/p2p" +@@ -266,6 +267,16 @@ func TestCreateProposalBlock(t *testing.T) { + mempoolv1.WithPreCheck(sm.TxPreCheck(state)), + mempoolv1.WithPostCheck(sm.TxPostCheck(state)), + ) ++ case cfg.MempoolV2: ++ mempool = mempoolv2.NewTxPool( ++ logger, ++ config.Mempool, ++ proxyApp.Mempool(), ++ state.LastBlockHeight, ++ mempoolv2.WithMetrics(memplMetrics), ++ mempoolv2.WithPreCheck(sm.TxPreCheck(state)), ++ mempoolv2.WithPostCheck(sm.TxPostCheck(state)), ++ ) + } + + // Make EvidencePool +@@ -370,6 +381,16 @@ func TestMaxProposalBlockSize(t *testing.T) { + mempoolv1.WithPreCheck(sm.TxPreCheck(state)), + mempoolv1.WithPostCheck(sm.TxPostCheck(state)), + ) ++ case cfg.MempoolV2: ++ mempool = mempoolv2.NewTxPool( ++ logger, ++ config.Mempool, ++ proxyApp.Mempool(), ++ state.LastBlockHeight, ++ mempoolv2.WithMetrics(memplMetrics), ++ mempoolv2.WithPreCheck(sm.TxPreCheck(state)), ++ mempoolv2.WithPostCheck(sm.TxPostCheck(state)), ++ ) + } + + // fill the mempool with one txs just below the maximum size +@@ -409,7 +430,7 @@ func TestNodeNewNodeCustomReactors(t *testing.T) { + cr := p2pmock.NewReactor() + cr.Channels = []*conn.ChannelDescriptor{ + { +- ID: byte(0x31), ++ ID: byte(0x32), + Priority: 5, + SendQueueCapacity: 100, + RecvMessageCapacity: 100, diff --git a/patches/node/tracing.go.patch b/patches/node/tracing.go.patch new file mode 100644 index 00000000000..fd86a42203f --- /dev/null +++ b/patches/node/tracing.go.patch @@ -0,0 +1,91 @@ +diff --git a/node/tracing.go b/node/tracing.go +new file mode 100644 +index 000000000..f49fd89f8 +--- /dev/null ++++ b/node/tracing.go +@@ -0,0 +1,85 @@ ++package node ++ ++import ( ++ "github.com/grafana/pyroscope-go" ++ "github.com/tendermint/tendermint/config" ++ ++ otelpyroscope "github.com/grafana/otel-profiling-go" ++ "go.opentelemetry.io/otel" ++ "go.opentelemetry.io/otel/exporters/stdout/stdouttrace" ++ "go.opentelemetry.io/otel/propagation" ++ ++ sdktrace "go.opentelemetry.io/otel/sdk/trace" ++) ++ ++// setupPyroscope sets up pyroscope profiler and optionally tracing. ++func setupPyroscope(instCfg *config.InstrumentationConfig, nodeID string) (*pyroscope.Profiler, *sdktrace.TracerProvider, error) { ++ tp, err := tracerProviderDebug() ++ if err != nil { ++ return nil, nil, err ++ } ++ ++ labels := map[string]string{"node_id": nodeID} ++ ++ if instCfg.PyroscopeTrace { ++ if _, err = setupTracing(instCfg.PyroscopeURL, labels); err != nil { ++ return nil, nil, err ++ } ++ } else { ++ tp = nil ++ } ++ ++ pflr, err := pyroscope.Start(pyroscope.Config{ ++ ApplicationName: "celestia", ++ ServerAddress: instCfg.PyroscopeURL, ++ Logger: nil, // use the noop logger by passing nil ++ Tags: labels, ++ ProfileTypes: toPyroscopeProfiles(instCfg.PyroscopeProfileTypes), ++ }) ++ ++ return pflr, tp, err ++} ++ ++func setupTracing(addr string, labels map[string]string) (tp *sdktrace.TracerProvider, err error) { ++ tp, err = tracerProviderDebug() ++ if err != nil { ++ return nil, err ++ } ++ ++ // Set the Tracer Provider and the W3C Trace Context propagator as globals. ++ // We wrap the tracer provider to also annotate goroutines with Span ID so ++ // that pprof would add corresponding labels to profiling samples. ++ otel.SetTracerProvider(otelpyroscope.NewTracerProvider(tp, ++ otelpyroscope.WithAppName("celestia"), ++ otelpyroscope.WithRootSpanOnly(true), ++ otelpyroscope.WithAddSpanName(true), ++ otelpyroscope.WithPyroscopeURL(addr), ++ otelpyroscope.WithProfileBaselineLabels(labels), ++ otelpyroscope.WithProfileBaselineURL(true), ++ otelpyroscope.WithProfileURL(true), ++ )) ++ ++ // Register the trace context and baggage propagators so data is propagated across services/processes. ++ otel.SetTextMapPropagator(propagation.NewCompositeTextMapPropagator( ++ propagation.TraceContext{}, ++ propagation.Baggage{}, ++ )) ++ ++ return tp, err ++} ++ ++func tracerProviderDebug() (*sdktrace.TracerProvider, error) { ++ exp, err := stdouttrace.New(stdouttrace.WithPrettyPrint()) ++ if err != nil { ++ return nil, err ++ } ++ return sdktrace.NewTracerProvider(sdktrace.WithSpanProcessor(sdktrace.NewBatchSpanProcessor(exp))), nil ++} ++ ++func toPyroscopeProfiles(profiles []string) []pyroscope.ProfileType { ++ pts := make([]pyroscope.ProfileType, 0, len(profiles)) ++ for _, p := range profiles { ++ pts = append(pts, pyroscope.ProfileType(p)) ++ } ++ return pts ++} diff --git a/patches/p2p/conn/secret_connection.go.patch b/patches/p2p/conn/secret_connection.go.patch new file mode 100644 index 00000000000..5126e90ebf7 --- /dev/null +++ b/patches/p2p/conn/secret_connection.go.patch @@ -0,0 +1,12 @@ +diff --git a/p2p/conn/secret_connection.go b/p2p/conn/secret_connection.go +index 89adf06f7..4201b153e 100644 +--- a/p2p/conn/secret_connection.go ++++ b/p2p/conn/secret_connection.go +@@ -207,6 +207,7 @@ func (sc *SecretConnection) Write(data []byte) (n int, err error) { + data = nil + } + chunkLength := len(chunk) ++ //nolint:gosec + binary.LittleEndian.PutUint32(frame, uint32(chunkLength)) + copy(frame[dataLenSize:], chunk) + diff --git a/patches/p2p/key.go.patch b/patches/p2p/key.go.patch new file mode 100644 index 00000000000..443b9ae3abb --- /dev/null +++ b/patches/p2p/key.go.patch @@ -0,0 +1,20 @@ +diff --git a/p2p/key.go b/p2p/key.go +index 51b604f98..c5f73212e 100644 +--- a/p2p/key.go ++++ b/p2p/key.go +@@ -108,6 +108,7 @@ func MakePoWTarget(difficulty, targetBits uint) []byte { + panic(fmt.Sprintf("difficulty (%d) >= targetBits (%d)", difficulty, targetBits)) + } + targetBytes := targetBits / 8 ++ //nolint:gosec + zeroPrefixLen := (int(difficulty) / 8) + prefix := bytes.Repeat([]byte{0}, zeroPrefixLen) + mod := (difficulty % 8) +@@ -115,6 +116,7 @@ func MakePoWTarget(difficulty, targetBits uint) []byte { + nonZeroPrefix := byte(1<<(8-mod) - 1) + prefix = append(prefix, nonZeroPrefix) + } ++ //nolint:gosec + tailLen := int(targetBytes) - len(prefix) + return append(prefix, bytes.Repeat([]byte{0xFF}, tailLen)...) + } diff --git a/patches/p2p/metrics.go.patch b/patches/p2p/metrics.go.patch new file mode 100644 index 00000000000..1b539c0ad49 --- /dev/null +++ b/patches/p2p/metrics.go.patch @@ -0,0 +1,20 @@ +diff --git a/p2p/metrics.go b/p2p/metrics.go +index 7c80658e5..9da34d47c 100644 +--- a/p2p/metrics.go ++++ b/p2p/metrics.go +@@ -87,13 +87,13 @@ func PrometheusMetrics(namespace string, labelsAndValues ...string) *Metrics { + Subsystem: MetricsSubsystem, + Name: "message_receive_bytes_total", + Help: "Number of bytes of each message type received.", +- }, append(labels, "message_type")).With(labelsAndValues...), ++ }, append(labels, "message_type", "chID", "peer_id")).With(labelsAndValues...), + MessageSendBytesTotal: prometheus.NewCounterFrom(stdprometheus.CounterOpts{ + Namespace: namespace, + Subsystem: MetricsSubsystem, + Name: "message_send_bytes_total", + Help: "Number of bytes of each message type sent.", +- }, append(labels, "message_type")).With(labelsAndValues...), ++ }, append(labels, "message_type", "chID", "peer_id")).With(labelsAndValues...), + } + } + diff --git a/patches/p2p/mock/peer.go.patch b/patches/p2p/mock/peer.go.patch new file mode 100644 index 00000000000..3e0abed4cd6 --- /dev/null +++ b/patches/p2p/mock/peer.go.patch @@ -0,0 +1,12 @@ +diff --git a/p2p/mock/peer.go b/p2p/mock/peer.go +index 31ce85623..0ceea3c53 100644 +--- a/p2p/mock/peer.go ++++ b/p2p/mock/peer.go +@@ -57,6 +57,7 @@ func (mp *Peer) Status() conn.ConnectionStatus { return conn.ConnectionStatus{} + func (mp *Peer) ID() p2p.ID { return mp.id } + func (mp *Peer) IsOutbound() bool { return mp.Outbound } + func (mp *Peer) IsPersistent() bool { return mp.Persistent } ++func (mp *Peer) HasIPChanged() bool { return false } + func (mp *Peer) Get(key string) interface{} { + if value, ok := mp.kv[key]; ok { + return value diff --git a/patches/p2p/mocks/peer.go.patch b/patches/p2p/mocks/peer.go.patch new file mode 100644 index 00000000000..8b842e9176e --- /dev/null +++ b/patches/p2p/mocks/peer.go.patch @@ -0,0 +1,303 @@ +diff --git a/p2p/mocks/peer.go b/p2p/mocks/peer.go +index a9151c7d8..805b4538a 100644 +--- a/p2p/mocks/peer.go ++++ b/p2p/mocks/peer.go +@@ -22,6 +22,10 @@ type Peer struct { + func (_m *Peer) CloseConn() error { + ret := _m.Called() + ++ if len(ret) == 0 { ++ panic("no return value specified for CloseConn") ++ } ++ + var r0 error + if rf, ok := ret.Get(0).(func() error); ok { + r0 = rf() +@@ -41,6 +45,10 @@ func (_m *Peer) FlushStop() { + func (_m *Peer) Get(_a0 string) interface{} { + ret := _m.Called(_a0) + ++ if len(ret) == 0 { ++ panic("no return value specified for Get") ++ } ++ + var r0 interface{} + if rf, ok := ret.Get(0).(func(string) interface{}); ok { + r0 = rf(_a0) +@@ -57,6 +65,10 @@ func (_m *Peer) Get(_a0 string) interface{} { + func (_m *Peer) GetRemovalFailed() bool { + ret := _m.Called() + ++ if len(ret) == 0 { ++ panic("no return value specified for GetRemovalFailed") ++ } ++ + var r0 bool + if rf, ok := ret.Get(0).(func() bool); ok { + r0 = rf() +@@ -71,6 +83,10 @@ func (_m *Peer) GetRemovalFailed() bool { + func (_m *Peer) ID() p2p.ID { + ret := _m.Called() + ++ if len(ret) == 0 { ++ panic("no return value specified for ID") ++ } ++ + var r0 p2p.ID + if rf, ok := ret.Get(0).(func() p2p.ID); ok { + r0 = rf() +@@ -81,10 +97,28 @@ func (_m *Peer) ID() p2p.ID { + return r0 + } + ++// HasIPChanged provides a mock function with given fields: ++func (_m *Peer) HasIPChanged() bool { ++ ret := _m.Called() ++ ++ var r0 bool ++ if rf, ok := ret.Get(0).(func() bool); ok { ++ r0 = rf() ++ } else { ++ r0 = ret.Get(0).(bool) ++ } ++ ++ return r0 ++} ++ + // IsOutbound provides a mock function with given fields: + func (_m *Peer) IsOutbound() bool { + ret := _m.Called() + ++ if len(ret) == 0 { ++ panic("no return value specified for IsOutbound") ++ } ++ + var r0 bool + if rf, ok := ret.Get(0).(func() bool); ok { + r0 = rf() +@@ -99,6 +133,10 @@ func (_m *Peer) IsOutbound() bool { + func (_m *Peer) IsPersistent() bool { + ret := _m.Called() + ++ if len(ret) == 0 { ++ panic("no return value specified for IsPersistent") ++ } ++ + var r0 bool + if rf, ok := ret.Get(0).(func() bool); ok { + r0 = rf() +@@ -113,6 +151,10 @@ func (_m *Peer) IsPersistent() bool { + func (_m *Peer) IsRunning() bool { + ret := _m.Called() + ++ if len(ret) == 0 { ++ panic("no return value specified for IsRunning") ++ } ++ + var r0 bool + if rf, ok := ret.Get(0).(func() bool); ok { + r0 = rf() +@@ -123,10 +165,42 @@ func (_m *Peer) IsRunning() bool { + return r0 + } + ++// SendEnvelope provides a mock function with given fields: _a0 ++func (_m *Peer) SendEnvelope(_a0 p2p.Envelope) bool { ++ ret := _m.Called(_a0) ++ ++ var r0 bool ++ if rf, ok := ret.Get(0).(func(p2p.Envelope) bool); ok { ++ r0 = rf(_a0) ++ } else { ++ r0 = ret.Get(0).(bool) ++ } ++ ++ return r0 ++} ++ ++// TrySendEnvelope provides a mock function with given fields: _a0 ++func (_m *Peer) TrySendEnvelope(_a0 p2p.Envelope) bool { ++ ret := _m.Called(_a0) ++ ++ var r0 bool ++ if rf, ok := ret.Get(0).(func(p2p.Envelope) bool); ok { ++ r0 = rf(_a0) ++ } else { ++ r0 = ret.Get(0).(bool) ++ } ++ ++ return r0 ++} ++ + // NodeInfo provides a mock function with given fields: + func (_m *Peer) NodeInfo() p2p.NodeInfo { + ret := _m.Called() + ++ if len(ret) == 0 { ++ panic("no return value specified for NodeInfo") ++ } ++ + var r0 p2p.NodeInfo + if rf, ok := ret.Get(0).(func() p2p.NodeInfo); ok { + r0 = rf() +@@ -143,6 +217,10 @@ func (_m *Peer) NodeInfo() p2p.NodeInfo { + func (_m *Peer) OnReset() error { + ret := _m.Called() + ++ if len(ret) == 0 { ++ panic("no return value specified for OnReset") ++ } ++ + var r0 error + if rf, ok := ret.Get(0).(func() error); ok { + r0 = rf() +@@ -157,6 +235,10 @@ func (_m *Peer) OnReset() error { + func (_m *Peer) OnStart() error { + ret := _m.Called() + ++ if len(ret) == 0 { ++ panic("no return value specified for OnStart") ++ } ++ + var r0 error + if rf, ok := ret.Get(0).(func() error); ok { + r0 = rf() +@@ -176,6 +258,10 @@ func (_m *Peer) OnStop() { + func (_m *Peer) Quit() <-chan struct{} { + ret := _m.Called() + ++ if len(ret) == 0 { ++ panic("no return value specified for Quit") ++ } ++ + var r0 <-chan struct{} + if rf, ok := ret.Get(0).(func() <-chan struct{}); ok { + r0 = rf() +@@ -192,6 +278,10 @@ func (_m *Peer) Quit() <-chan struct{} { + func (_m *Peer) RemoteAddr() net.Addr { + ret := _m.Called() + ++ if len(ret) == 0 { ++ panic("no return value specified for RemoteAddr") ++ } ++ + var r0 net.Addr + if rf, ok := ret.Get(0).(func() net.Addr); ok { + r0 = rf() +@@ -208,6 +298,10 @@ func (_m *Peer) RemoteAddr() net.Addr { + func (_m *Peer) RemoteIP() net.IP { + ret := _m.Called() + ++ if len(ret) == 0 { ++ panic("no return value specified for RemoteIP") ++ } ++ + var r0 net.IP + if rf, ok := ret.Get(0).(func() net.IP); ok { + r0 = rf() +@@ -224,6 +318,10 @@ func (_m *Peer) RemoteIP() net.IP { + func (_m *Peer) Reset() error { + ret := _m.Called() + ++ if len(ret) == 0 { ++ panic("no return value specified for Reset") ++ } ++ + var r0 error + if rf, ok := ret.Get(0).(func() error); ok { + r0 = rf() +@@ -238,6 +336,10 @@ func (_m *Peer) Reset() error { + func (_m *Peer) Send(_a0 byte, _a1 []byte) bool { + ret := _m.Called(_a0, _a1) + ++ if len(ret) == 0 { ++ panic("no return value specified for Send") ++ } ++ + var r0 bool + if rf, ok := ret.Get(0).(func(byte, []byte) bool); ok { + r0 = rf(_a0, _a1) +@@ -267,6 +369,10 @@ func (_m *Peer) SetRemovalFailed() { + func (_m *Peer) SocketAddr() *p2p.NetAddress { + ret := _m.Called() + ++ if len(ret) == 0 { ++ panic("no return value specified for SocketAddr") ++ } ++ + var r0 *p2p.NetAddress + if rf, ok := ret.Get(0).(func() *p2p.NetAddress); ok { + r0 = rf() +@@ -283,6 +389,10 @@ func (_m *Peer) SocketAddr() *p2p.NetAddress { + func (_m *Peer) Start() error { + ret := _m.Called() + ++ if len(ret) == 0 { ++ panic("no return value specified for Start") ++ } ++ + var r0 error + if rf, ok := ret.Get(0).(func() error); ok { + r0 = rf() +@@ -297,6 +407,10 @@ func (_m *Peer) Start() error { + func (_m *Peer) Status() conn.ConnectionStatus { + ret := _m.Called() + ++ if len(ret) == 0 { ++ panic("no return value specified for Status") ++ } ++ + var r0 conn.ConnectionStatus + if rf, ok := ret.Get(0).(func() conn.ConnectionStatus); ok { + r0 = rf() +@@ -311,6 +425,10 @@ func (_m *Peer) Status() conn.ConnectionStatus { + func (_m *Peer) Stop() error { + ret := _m.Called() + ++ if len(ret) == 0 { ++ panic("no return value specified for Stop") ++ } ++ + var r0 error + if rf, ok := ret.Get(0).(func() error); ok { + r0 = rf() +@@ -325,6 +443,10 @@ func (_m *Peer) Stop() error { + func (_m *Peer) String() string { + ret := _m.Called() + ++ if len(ret) == 0 { ++ panic("no return value specified for String") ++ } ++ + var r0 string + if rf, ok := ret.Get(0).(func() string); ok { + r0 = rf() +@@ -339,6 +461,10 @@ func (_m *Peer) String() string { + func (_m *Peer) TrySend(_a0 byte, _a1 []byte) bool { + ret := _m.Called(_a0, _a1) + ++ if len(ret) == 0 { ++ panic("no return value specified for TrySend") ++ } ++ + var r0 bool + if rf, ok := ret.Get(0).(func(byte, []byte) bool); ok { + r0 = rf(_a0, _a1) +@@ -349,13 +475,12 @@ func (_m *Peer) TrySend(_a0 byte, _a1 []byte) bool { + return r0 + } + +-type mockConstructorTestingTNewPeer interface { ++// NewPeer creates a new instance of Peer. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. ++// The first argument is typically a *testing.T value. ++func NewPeer(t interface { + mock.TestingT + Cleanup(func()) +-} +- +-// NewPeer creates a new instance of Peer. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +-func NewPeer(t mockConstructorTestingTNewPeer) *Peer { ++}) *Peer { + mock := &Peer{} + mock.Mock.Test(t) + diff --git a/patches/p2p/mocks/peer_envelope_sender.go.patch b/patches/p2p/mocks/peer_envelope_sender.go.patch new file mode 100644 index 00000000000..76fd6be0aa2 --- /dev/null +++ b/patches/p2p/mocks/peer_envelope_sender.go.patch @@ -0,0 +1,289 @@ +diff --git a/p2p/mocks/peer_envelope_sender.go b/p2p/mocks/peer_envelope_sender.go +index 89f231104..ecdba0e3f 100644 +--- a/p2p/mocks/peer_envelope_sender.go ++++ b/p2p/mocks/peer_envelope_sender.go +@@ -22,6 +22,10 @@ type PeerEnvelopeSender struct { + func (_m *PeerEnvelopeSender) CloseConn() error { + ret := _m.Called() + ++ if len(ret) == 0 { ++ panic("no return value specified for CloseConn") ++ } ++ + var r0 error + if rf, ok := ret.Get(0).(func() error); ok { + r0 = rf() +@@ -41,6 +45,10 @@ func (_m *PeerEnvelopeSender) FlushStop() { + func (_m *PeerEnvelopeSender) Get(_a0 string) interface{} { + ret := _m.Called(_a0) + ++ if len(ret) == 0 { ++ panic("no return value specified for Get") ++ } ++ + var r0 interface{} + if rf, ok := ret.Get(0).(func(string) interface{}); ok { + r0 = rf(_a0) +@@ -57,6 +65,10 @@ func (_m *PeerEnvelopeSender) Get(_a0 string) interface{} { + func (_m *PeerEnvelopeSender) GetRemovalFailed() bool { + ret := _m.Called() + ++ if len(ret) == 0 { ++ panic("no return value specified for GetRemovalFailed") ++ } ++ + var r0 bool + if rf, ok := ret.Get(0).(func() bool); ok { + r0 = rf() +@@ -71,6 +83,10 @@ func (_m *PeerEnvelopeSender) GetRemovalFailed() bool { + func (_m *PeerEnvelopeSender) ID() p2p.ID { + ret := _m.Called() + ++ if len(ret) == 0 { ++ panic("no return value specified for ID") ++ } ++ + var r0 p2p.ID + if rf, ok := ret.Get(0).(func() p2p.ID); ok { + r0 = rf() +@@ -85,6 +101,10 @@ func (_m *PeerEnvelopeSender) ID() p2p.ID { + func (_m *PeerEnvelopeSender) IsOutbound() bool { + ret := _m.Called() + ++ if len(ret) == 0 { ++ panic("no return value specified for IsOutbound") ++ } ++ + var r0 bool + if rf, ok := ret.Get(0).(func() bool); ok { + r0 = rf() +@@ -99,6 +119,24 @@ func (_m *PeerEnvelopeSender) IsOutbound() bool { + func (_m *PeerEnvelopeSender) IsPersistent() bool { + ret := _m.Called() + ++ if len(ret) == 0 { ++ panic("no return value specified for IsPersistent") ++ } ++ ++ var r0 bool ++ if rf, ok := ret.Get(0).(func() bool); ok { ++ r0 = rf() ++ } else { ++ r0 = ret.Get(0).(bool) ++ } ++ ++ return r0 ++} ++ ++// HasIPChanged provides a mock function for given fields: ++func (_m *PeerEnvelopeSender) HasIPChanged() bool { ++ ret := _m.Called() ++ + var r0 bool + if rf, ok := ret.Get(0).(func() bool); ok { + r0 = rf() +@@ -113,6 +151,10 @@ func (_m *PeerEnvelopeSender) IsPersistent() bool { + func (_m *PeerEnvelopeSender) IsRunning() bool { + ret := _m.Called() + ++ if len(ret) == 0 { ++ panic("no return value specified for IsRunning") ++ } ++ + var r0 bool + if rf, ok := ret.Get(0).(func() bool); ok { + r0 = rf() +@@ -127,6 +169,10 @@ func (_m *PeerEnvelopeSender) IsRunning() bool { + func (_m *PeerEnvelopeSender) NodeInfo() p2p.NodeInfo { + ret := _m.Called() + ++ if len(ret) == 0 { ++ panic("no return value specified for NodeInfo") ++ } ++ + var r0 p2p.NodeInfo + if rf, ok := ret.Get(0).(func() p2p.NodeInfo); ok { + r0 = rf() +@@ -143,6 +189,10 @@ func (_m *PeerEnvelopeSender) NodeInfo() p2p.NodeInfo { + func (_m *PeerEnvelopeSender) OnReset() error { + ret := _m.Called() + ++ if len(ret) == 0 { ++ panic("no return value specified for OnReset") ++ } ++ + var r0 error + if rf, ok := ret.Get(0).(func() error); ok { + r0 = rf() +@@ -157,6 +207,10 @@ func (_m *PeerEnvelopeSender) OnReset() error { + func (_m *PeerEnvelopeSender) OnStart() error { + ret := _m.Called() + ++ if len(ret) == 0 { ++ panic("no return value specified for OnStart") ++ } ++ + var r0 error + if rf, ok := ret.Get(0).(func() error); ok { + r0 = rf() +@@ -176,6 +230,10 @@ func (_m *PeerEnvelopeSender) OnStop() { + func (_m *PeerEnvelopeSender) Quit() <-chan struct{} { + ret := _m.Called() + ++ if len(ret) == 0 { ++ panic("no return value specified for Quit") ++ } ++ + var r0 <-chan struct{} + if rf, ok := ret.Get(0).(func() <-chan struct{}); ok { + r0 = rf() +@@ -192,6 +250,10 @@ func (_m *PeerEnvelopeSender) Quit() <-chan struct{} { + func (_m *PeerEnvelopeSender) RemoteAddr() net.Addr { + ret := _m.Called() + ++ if len(ret) == 0 { ++ panic("no return value specified for RemoteAddr") ++ } ++ + var r0 net.Addr + if rf, ok := ret.Get(0).(func() net.Addr); ok { + r0 = rf() +@@ -208,6 +270,10 @@ func (_m *PeerEnvelopeSender) RemoteAddr() net.Addr { + func (_m *PeerEnvelopeSender) RemoteIP() net.IP { + ret := _m.Called() + ++ if len(ret) == 0 { ++ panic("no return value specified for RemoteIP") ++ } ++ + var r0 net.IP + if rf, ok := ret.Get(0).(func() net.IP); ok { + r0 = rf() +@@ -224,6 +290,10 @@ func (_m *PeerEnvelopeSender) RemoteIP() net.IP { + func (_m *PeerEnvelopeSender) Reset() error { + ret := _m.Called() + ++ if len(ret) == 0 { ++ panic("no return value specified for Reset") ++ } ++ + var r0 error + if rf, ok := ret.Get(0).(func() error); ok { + r0 = rf() +@@ -238,6 +308,10 @@ func (_m *PeerEnvelopeSender) Reset() error { + func (_m *PeerEnvelopeSender) Send(_a0 byte, _a1 []byte) bool { + ret := _m.Called(_a0, _a1) + ++ if len(ret) == 0 { ++ panic("no return value specified for Send") ++ } ++ + var r0 bool + if rf, ok := ret.Get(0).(func(byte, []byte) bool); ok { + r0 = rf(_a0, _a1) +@@ -252,6 +326,10 @@ func (_m *PeerEnvelopeSender) Send(_a0 byte, _a1 []byte) bool { + func (_m *PeerEnvelopeSender) SendEnvelope(_a0 p2p.Envelope) bool { + ret := _m.Called(_a0) + ++ if len(ret) == 0 { ++ panic("no return value specified for SendEnvelope") ++ } ++ + var r0 bool + if rf, ok := ret.Get(0).(func(p2p.Envelope) bool); ok { + r0 = rf(_a0) +@@ -281,6 +359,10 @@ func (_m *PeerEnvelopeSender) SetRemovalFailed() { + func (_m *PeerEnvelopeSender) SocketAddr() *p2p.NetAddress { + ret := _m.Called() + ++ if len(ret) == 0 { ++ panic("no return value specified for SocketAddr") ++ } ++ + var r0 *p2p.NetAddress + if rf, ok := ret.Get(0).(func() *p2p.NetAddress); ok { + r0 = rf() +@@ -297,6 +379,10 @@ func (_m *PeerEnvelopeSender) SocketAddr() *p2p.NetAddress { + func (_m *PeerEnvelopeSender) Start() error { + ret := _m.Called() + ++ if len(ret) == 0 { ++ panic("no return value specified for Start") ++ } ++ + var r0 error + if rf, ok := ret.Get(0).(func() error); ok { + r0 = rf() +@@ -311,6 +397,10 @@ func (_m *PeerEnvelopeSender) Start() error { + func (_m *PeerEnvelopeSender) Status() conn.ConnectionStatus { + ret := _m.Called() + ++ if len(ret) == 0 { ++ panic("no return value specified for Status") ++ } ++ + var r0 conn.ConnectionStatus + if rf, ok := ret.Get(0).(func() conn.ConnectionStatus); ok { + r0 = rf() +@@ -325,6 +415,10 @@ func (_m *PeerEnvelopeSender) Status() conn.ConnectionStatus { + func (_m *PeerEnvelopeSender) Stop() error { + ret := _m.Called() + ++ if len(ret) == 0 { ++ panic("no return value specified for Stop") ++ } ++ + var r0 error + if rf, ok := ret.Get(0).(func() error); ok { + r0 = rf() +@@ -339,6 +433,10 @@ func (_m *PeerEnvelopeSender) Stop() error { + func (_m *PeerEnvelopeSender) String() string { + ret := _m.Called() + ++ if len(ret) == 0 { ++ panic("no return value specified for String") ++ } ++ + var r0 string + if rf, ok := ret.Get(0).(func() string); ok { + r0 = rf() +@@ -353,6 +451,10 @@ func (_m *PeerEnvelopeSender) String() string { + func (_m *PeerEnvelopeSender) TrySend(_a0 byte, _a1 []byte) bool { + ret := _m.Called(_a0, _a1) + ++ if len(ret) == 0 { ++ panic("no return value specified for TrySend") ++ } ++ + var r0 bool + if rf, ok := ret.Get(0).(func(byte, []byte) bool); ok { + r0 = rf(_a0, _a1) +@@ -367,6 +469,10 @@ func (_m *PeerEnvelopeSender) TrySend(_a0 byte, _a1 []byte) bool { + func (_m *PeerEnvelopeSender) TrySendEnvelope(_a0 p2p.Envelope) bool { + ret := _m.Called(_a0) + ++ if len(ret) == 0 { ++ panic("no return value specified for TrySendEnvelope") ++ } ++ + var r0 bool + if rf, ok := ret.Get(0).(func(p2p.Envelope) bool); ok { + r0 = rf(_a0) +@@ -377,13 +483,12 @@ func (_m *PeerEnvelopeSender) TrySendEnvelope(_a0 p2p.Envelope) bool { + return r0 + } + +-type mockConstructorTestingTNewPeerEnvelopeSender interface { ++// NewPeerEnvelopeSender creates a new instance of PeerEnvelopeSender. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. ++// The first argument is typically a *testing.T value. ++func NewPeerEnvelopeSender(t interface { + mock.TestingT + Cleanup(func()) +-} +- +-// NewPeerEnvelopeSender creates a new instance of PeerEnvelopeSender. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +-func NewPeerEnvelopeSender(t mockConstructorTestingTNewPeerEnvelopeSender) *PeerEnvelopeSender { ++}) *PeerEnvelopeSender { + mock := &PeerEnvelopeSender{} + mock.Mock.Test(t) + diff --git a/patches/p2p/netaddress.go.patch b/patches/p2p/netaddress.go.patch new file mode 100644 index 00000000000..a9ff8d9c471 --- /dev/null +++ b/patches/p2p/netaddress.go.patch @@ -0,0 +1,24 @@ +diff --git a/p2p/netaddress.go b/p2p/netaddress.go +index b8e0c2419..dec331758 100644 +--- a/p2p/netaddress.go ++++ b/p2p/netaddress.go +@@ -57,6 +57,7 @@ func NewNetAddress(id ID, addr net.Addr) *NetAddress { + } + + ip := tcpAddr.IP ++ //nolint:gosec + port := uint16(tcpAddr.Port) + na := NewNetAddressIPPort(ip, port) + na.ID = id +@@ -146,8 +147,9 @@ func NetAddressFromProto(pb tmp2p.NetAddress) (*NetAddress, error) { + return nil, fmt.Errorf("invalid port number %v", pb.Port) + } + return &NetAddress{ +- ID: ID(pb.ID), +- IP: ip, ++ ID: ID(pb.ID), ++ IP: ip, ++ //nolint:gosec + Port: uint16(pb.Port), + }, nil + } diff --git a/patches/p2p/peer.go.patch b/patches/p2p/peer.go.patch new file mode 100644 index 00000000000..a1f1428f087 --- /dev/null +++ b/patches/p2p/peer.go.patch @@ -0,0 +1,141 @@ +diff --git a/p2p/peer.go b/p2p/peer.go +index 49c0dd4e4..bb04cd36c 100644 +--- a/p2p/peer.go ++++ b/p2p/peer.go +@@ -11,6 +11,8 @@ import ( + "github.com/tendermint/tendermint/libs/cmap" + "github.com/tendermint/tendermint/libs/log" + "github.com/tendermint/tendermint/libs/service" ++ "github.com/tendermint/tendermint/pkg/trace" ++ "github.com/tendermint/tendermint/pkg/trace/schema" + + cmtconn "github.com/tendermint/tendermint/p2p/conn" + ) +@@ -30,6 +32,8 @@ type Peer interface { + IsOutbound() bool // did we dial the peer + IsPersistent() bool // do we redial this peer when we disconnect + ++ HasIPChanged() bool // has the peer's IP changed ++ + CloseConn() error // close original connection + + NodeInfo() NodeInfo // peer's info +@@ -175,6 +179,7 @@ type peer struct { + Data *cmap.CMap + + metrics *Metrics ++ traceClient trace.Tracer + metricsTicker *time.Ticker + mlc *metricsLabelCache + +@@ -184,6 +189,12 @@ type peer struct { + + type PeerOption func(*peer) + ++func WithPeerTracer(t trace.Tracer) PeerOption { ++ return func(p *peer) { ++ p.traceClient = t ++ } ++} ++ + func newPeer( + pc peerConn, + mConfig cmtconn.MConnConfig, +@@ -203,6 +214,7 @@ func newPeer( + metricsTicker: time.NewTicker(metricsTickerDuration), + metrics: NopMetrics(), + mlc: mlc, ++ traceClient: trace.NoOpTracer(), + } + + p.mconn = createMConnection( +@@ -290,6 +302,18 @@ func (p *peer) IsPersistent() bool { + return p.peerConn.persistent + } + ++// HasIPChanged returns true and the new IP if the peer's IP has changed. ++func (p *peer) HasIPChanged() bool { ++ oldIP := p.ip ++ if oldIP == nil { ++ return false ++ } ++ // Reset the IP so we can get the new one ++ p.ip = nil ++ newIP := p.RemoteIP() ++ return !oldIP.Equal(newIP) ++} ++ + // NodeInfo returns a copy of the peer's NodeInfo. + func (p *peer) NodeInfo() NodeInfo { + return p.nodeInfo +@@ -331,7 +355,12 @@ func (p *peer) SendEnvelope(e Envelope) bool { + } + res := p.Send(e.ChannelID, msgBytes) + if res { +- p.metrics.MessageSendBytesTotal.With("message_type", metricLabelValue).Add(float64(len(msgBytes))) ++ labels := []string{ ++ "message_type", metricLabelValue, ++ "chID", fmt.Sprintf("%#x", e.ChannelID), ++ "peer_id", string(p.ID()), ++ } ++ p.metrics.MessageSendBytesTotal.With(labels...).Add(float64(len(msgBytes))) + } + return res + } +@@ -380,7 +409,12 @@ func (p *peer) TrySendEnvelope(e Envelope) bool { + } + res := p.TrySend(e.ChannelID, msgBytes) + if res { +- p.metrics.MessageSendBytesTotal.With("message_type", metricLabelValue).Add(float64(len(msgBytes))) ++ labels := []string{ ++ "message_type", metricLabelValue, ++ "chID", fmt.Sprintf("%#x", e.ChannelID), ++ "peer_id", string(p.ID()), ++ } ++ p.metrics.MessageSendBytesTotal.With(labels...).Add(float64(len(msgBytes))) + } + return res + } +@@ -484,11 +518,14 @@ func (p *peer) metricsReporter() { + case <-p.metricsTicker.C: + status := p.mconn.Status() + var sendQueueSize float64 ++ queues := make(map[byte]int, len(status.Channels)) + for _, chStatus := range status.Channels { + sendQueueSize += float64(chStatus.SendQueueSize) ++ queues[chStatus.ID] = chStatus.SendQueueSize + } + + p.metrics.PeerPendingSendBytes.With("peer_id", string(p.ID())).Set(sendQueueSize) ++ schema.WritePendingBytes(p.traceClient, string(p.ID()), queues) + case <-p.Quit(): + return + } +@@ -521,18 +558,22 @@ func createMConnection( + if err != nil { + panic(fmt.Errorf("unmarshaling message: %s into type: %s", err, reflect.TypeOf(mt))) + } +- labels := []string{ +- "peer_id", string(p.ID()), +- "chID", fmt.Sprintf("%#x", chID), +- } ++ + if w, ok := msg.(Unwrapper); ok { + msg, err = w.Unwrap() + if err != nil { + panic(fmt.Errorf("unwrapping message: %s", err)) + } + } ++ ++ labels := []string{ ++ "peer_id", string(p.ID()), ++ "chID", fmt.Sprintf("%#x", chID), ++ } ++ + p.metrics.PeerReceiveBytesTotal.With(labels...).Add(float64(len(msgBytes))) +- p.metrics.MessageReceiveBytesTotal.With("message_type", p.mlc.ValueToMetricLabel(msg)).Add(float64(len(msgBytes))) ++ p.metrics.MessageReceiveBytesTotal.With(append(labels, "message_type", p.mlc.ValueToMetricLabel(msg))...).Add(float64(len(msgBytes))) ++ schema.WriteReceivedBytes(p.traceClient, string(p.ID()), chID, len(msgBytes)) + if nr, ok := reactor.(EnvelopeReceiver); ok { + nr.ReceiveEnvelope(Envelope{ + ChannelID: chID, diff --git a/patches/p2p/peer_set_test.go.patch b/patches/p2p/peer_set_test.go.patch new file mode 100644 index 00000000000..a350e12f278 --- /dev/null +++ b/patches/p2p/peer_set_test.go.patch @@ -0,0 +1,12 @@ +diff --git a/p2p/peer_set_test.go b/p2p/peer_set_test.go +index 6501dd77a..ca92c65cb 100644 +--- a/p2p/peer_set_test.go ++++ b/p2p/peer_set_test.go +@@ -27,6 +27,7 @@ func (mp *mockPeer) NodeInfo() NodeInfo { return DefaultNodeInfo{} + func (mp *mockPeer) Status() ConnectionStatus { return ConnectionStatus{} } + func (mp *mockPeer) ID() ID { return mp.id } + func (mp *mockPeer) IsOutbound() bool { return false } ++func (mp *mockPeer) HasIPChanged() bool { return false } + func (mp *mockPeer) IsPersistent() bool { return true } + func (mp *mockPeer) Get(s string) interface{} { return s } + func (mp *mockPeer) Set(string, interface{}) {} diff --git a/patches/p2p/pex/addrbook.go.patch b/patches/p2p/pex/addrbook.go.patch new file mode 100644 index 00000000000..c17224d1449 --- /dev/null +++ b/patches/p2p/pex/addrbook.go.patch @@ -0,0 +1,28 @@ +diff --git a/p2p/pex/addrbook.go b/p2p/pex/addrbook.go +index a6a6c6dbc..63218692d 100644 +--- a/p2p/pex/addrbook.go ++++ b/p2p/pex/addrbook.go +@@ -681,6 +681,7 @@ func (a *addrBook) addAddress(addr, src *p2p.NetAddress) error { + return nil + } + // The more entries we have, the less likely we are to add more. ++ //nolint:gosec + factor := int32(2 * len(ka.Buckets)) + if a.rand.Int31n(factor) != 0 { + return nil +@@ -849,6 +850,7 @@ func (a *addrBook) calcNewBucket(addr, src *p2p.NetAddress) (int, error) { + if err != nil { + return 0, err + } ++ //nolint:gosec + result := int(binary.BigEndian.Uint64(hash2) % newBucketCount) + return result, nil + } +@@ -875,6 +877,7 @@ func (a *addrBook) calcOldBucket(addr *p2p.NetAddress) (int, error) { + if err != nil { + return 0, err + } ++ //nolint:gosec + result := int(binary.BigEndian.Uint64(hash2) % oldBucketCount) + return result, nil + } diff --git a/patches/p2p/switch.go.patch b/patches/p2p/switch.go.patch new file mode 100644 index 00000000000..f220c318cc1 --- /dev/null +++ b/patches/p2p/switch.go.patch @@ -0,0 +1,109 @@ +diff --git a/p2p/switch.go b/p2p/switch.go +index 60d26729b..8bdaa9617 100644 +--- a/p2p/switch.go ++++ b/p2p/switch.go +@@ -12,6 +12,8 @@ import ( + "github.com/tendermint/tendermint/libs/rand" + "github.com/tendermint/tendermint/libs/service" + "github.com/tendermint/tendermint/p2p/conn" ++ "github.com/tendermint/tendermint/pkg/trace" ++ "github.com/tendermint/tendermint/pkg/trace/schema" + ) + + const ( +@@ -91,8 +93,9 @@ type Switch struct { + + rng *rand.Rand // seed for randomizing dial times and orders + +- metrics *Metrics +- mlc *metricsLabelCache ++ metrics *Metrics ++ mlc *metricsLabelCache ++ traceClient trace.Tracer + } + + // NetAddress returns the address the switch is listening on. +@@ -126,6 +129,7 @@ func NewSwitch( + persistentPeersAddrs: make([]*NetAddress, 0), + unconditionalPeerIDs: make(map[ID]struct{}), + mlc: newMetricsLabelCache(), ++ traceClient: trace.NoOpTracer(), + } + + // Ensure we have a completely undeterministic PRNG. +@@ -155,6 +159,10 @@ func WithMetrics(metrics *Metrics) SwitchOption { + return func(sw *Switch) { sw.metrics = metrics } + } + ++func WithTracer(tracer trace.Tracer) SwitchOption { ++ return func(sw *Switch) { sw.traceClient = tracer } ++} ++ + //--------------------------------------------------------------------- + // Switch setup + +@@ -373,20 +381,37 @@ func (sw *Switch) StopPeerForError(peer Peer, reason interface{}) { + sw.stopAndRemovePeer(peer, reason) + + if peer.IsPersistent() { +- var addr *NetAddress +- if peer.IsOutbound() { // socket address for outbound peers +- addr = peer.SocketAddr() +- } else { // self-reported address for inbound peers +- var err error +- addr, err = peer.NodeInfo().NetAddress() +- if err != nil { +- sw.Logger.Error("Wanted to reconnect to inbound peer, but self-reported address is wrong", +- "peer", peer, "err", err) +- return +- } ++ addr, err := sw.getPeerAddress(peer) ++ if err != nil { ++ sw.Logger.Error("Failed to get address for persistent peer", "peer", peer, "err", err) ++ return + } + go sw.reconnectToPeer(addr) + } ++ ++ if peer.HasIPChanged() { ++ addr, err := sw.getPeerAddress(peer) ++ if err != nil { ++ sw.Logger.Error("Failed to get address for peer with changed IP", "peer", peer, "err", err) ++ } ++ go sw.reconnectToPeer(addr) ++ } ++} ++ ++// getPeerAddress returns the appropriate NetAddress for a given peer, ++// handling both outbound and inbound peers. ++func (sw *Switch) getPeerAddress(peer Peer) (*NetAddress, error) { ++ if peer.IsOutbound() { ++ return peer.SocketAddr(), nil ++ } ++ // For inbound peers, get the self-reported address ++ addr, err := peer.NodeInfo().NetAddress() ++ if err != nil { ++ sw.Logger.Error("Failed to get address for inbound peer", ++ "peer", peer, "err", err) ++ return nil, err ++ } ++ return addr, nil + } + + // StopPeerGracefully disconnects from a peer gracefully. +@@ -398,6 +423,7 @@ func (sw *Switch) StopPeerGracefully(peer Peer) { + + func (sw *Switch) stopAndRemovePeer(peer Peer, reason interface{}) { + sw.transport.Cleanup(peer) ++ schema.WritePeerUpdate(sw.traceClient, string(peer.ID()), schema.PeerDisconnect, fmt.Sprintf("%v", reason)) + if err := peer.Stop(); err != nil { + sw.Logger.Error("error while stopping peer", "error", err) // TODO: should return error to be handled accordingly + } +@@ -883,6 +909,7 @@ func (sw *Switch) addPeer(p Peer) error { + return err + } + sw.metrics.Peers.Add(float64(1)) ++ schema.WritePeerUpdate(sw.traceClient, string(p.ID()), schema.PeerJoin, "") + + // Start all the reactor protocols on the peer. + for _, reactor := range sw.reactors { diff --git a/patches/p2p/test_util.go.patch b/patches/p2p/test_util.go.patch new file mode 100644 index 00000000000..77baf2473ea --- /dev/null +++ b/patches/p2p/test_util.go.patch @@ -0,0 +1,21 @@ +diff --git a/p2p/test_util.go b/p2p/test_util.go +index 32166043a..c52786305 100644 +--- a/p2p/test_util.go ++++ b/p2p/test_util.go +@@ -10,6 +10,7 @@ import ( + "github.com/tendermint/tendermint/libs/log" + cmtnet "github.com/tendermint/tendermint/libs/net" + cmtrand "github.com/tendermint/tendermint/libs/rand" ++ "github.com/tendermint/tendermint/pkg/trace" + + "github.com/tendermint/tendermint/config" + "github.com/tendermint/tendermint/p2p/conn" +@@ -199,7 +200,7 @@ func MakeSwitch( + panic(err) + } + +- t := NewMultiplexTransport(nodeInfo, nodeKey, MConnConfig(cfg)) ++ t := NewMultiplexTransport(nodeInfo, nodeKey, MConnConfig(cfg), trace.NoOpTracer()) + + if err := t.Listen(*addr); err != nil { + panic(err) diff --git a/patches/p2p/transport.go.patch b/patches/p2p/transport.go.patch new file mode 100644 index 00000000000..7fb0e54c2fe --- /dev/null +++ b/patches/p2p/transport.go.patch @@ -0,0 +1,46 @@ +diff --git a/p2p/transport.go b/p2p/transport.go +index 416c94694..5c82f1f75 100644 +--- a/p2p/transport.go ++++ b/p2p/transport.go +@@ -12,6 +12,7 @@ import ( + "github.com/tendermint/tendermint/crypto" + "github.com/tendermint/tendermint/libs/protoio" + "github.com/tendermint/tendermint/p2p/conn" ++ "github.com/tendermint/tendermint/pkg/trace" + tmp2p "github.com/tendermint/tendermint/proto/tendermint/p2p" + ) + +@@ -158,6 +159,9 @@ type MultiplexTransport struct { + // peer currently. All relevant configuration should be refactored into options + // with sane defaults. + mConfig conn.MConnConfig ++ ++ // the tracer is passed to peers for collecting trace data ++ tracer trace.Tracer + } + + // Test multiplexTransport for interface completeness. +@@ -169,6 +173,7 @@ func NewMultiplexTransport( + nodeInfo NodeInfo, + nodeKey NodeKey, + mConfig conn.MConnConfig, ++ tracer trace.Tracer, + ) *MultiplexTransport { + return &MultiplexTransport{ + acceptc: make(chan accept), +@@ -181,6 +186,7 @@ func NewMultiplexTransport( + nodeKey: nodeKey, + conns: NewConnSet(), + resolver: net.DefaultResolver, ++ tracer: tracer, + } + } + +@@ -527,6 +533,7 @@ func (mt *MultiplexTransport) wrapPeer( + cfg.onPeerError, + cfg.mlc, + PeerMetrics(cfg.metrics), ++ WithPeerTracer(mt.tracer), + ) + + return p diff --git a/patches/p2p/transport_test.go.patch b/patches/p2p/transport_test.go.patch new file mode 100644 index 00000000000..0b94cce4486 --- /dev/null +++ b/patches/p2p/transport_test.go.patch @@ -0,0 +1,21 @@ +diff --git a/p2p/transport_test.go b/p2p/transport_test.go +index adaab3995..151ac7edf 100644 +--- a/p2p/transport_test.go ++++ b/p2p/transport_test.go +@@ -13,6 +13,7 @@ import ( + "github.com/tendermint/tendermint/crypto/ed25519" + "github.com/tendermint/tendermint/libs/protoio" + "github.com/tendermint/tendermint/p2p/conn" ++ "github.com/tendermint/tendermint/pkg/trace" + tmp2p "github.com/tendermint/tendermint/proto/tendermint/p2p" + ) + +@@ -30,7 +31,7 @@ func newMultiplexTransport( + nodeKey NodeKey, + ) *MultiplexTransport { + return NewMultiplexTransport( +- nodeInfo, nodeKey, conn.DefaultMConnConfig(), ++ nodeInfo, nodeKey, conn.DefaultMConnConfig(), trace.NoOpTracer(), + ) + } + diff --git a/patches/proto/gogoproto/gogo.pb.go.patch b/patches/proto/gogoproto/gogo.pb.go.patch new file mode 100644 index 00000000000..bbe86d7b252 --- /dev/null +++ b/patches/proto/gogoproto/gogo.pb.go.patch @@ -0,0 +1,894 @@ +diff --git a/proto/gogoproto/gogo.pb.go b/proto/gogoproto/gogo.pb.go +new file mode 100644 +index 000000000..9a76ba844 +--- /dev/null ++++ b/proto/gogoproto/gogo.pb.go +@@ -0,0 +1,888 @@ ++// Code generated by protoc-gen-gogo. DO NOT EDIT. ++// source: gogoproto/gogo.proto ++ ++package gogoproto ++ ++import ( ++ fmt "fmt" ++ proto "github.com/gogo/protobuf/proto" ++ descriptorpb "google.golang.org/protobuf/types/descriptorpb" ++ math "math" ++) ++ ++// Reference imports to suppress errors if they are not otherwise used. ++var _ = proto.Marshal ++var _ = fmt.Errorf ++var _ = math.Inf ++ ++// This is a compile-time assertion to ensure that this generated file ++// is compatible with the proto package it is being compiled against. ++// A compilation error at this line likely means your copy of the ++// proto package needs to be updated. ++const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package ++ ++var E_GoprotoEnumPrefix = &proto.ExtensionDesc{ ++ ExtendedType: (*descriptorpb.EnumOptions)(nil), ++ ExtensionType: (*bool)(nil), ++ Field: 62001, ++ Name: "gogoproto.goproto_enum_prefix", ++ Tag: "varint,62001,opt,name=goproto_enum_prefix", ++ Filename: "gogoproto/gogo.proto", ++} ++ ++var E_GoprotoEnumStringer = &proto.ExtensionDesc{ ++ ExtendedType: (*descriptorpb.EnumOptions)(nil), ++ ExtensionType: (*bool)(nil), ++ Field: 62021, ++ Name: "gogoproto.goproto_enum_stringer", ++ Tag: "varint,62021,opt,name=goproto_enum_stringer", ++ Filename: "gogoproto/gogo.proto", ++} ++ ++var E_EnumStringer = &proto.ExtensionDesc{ ++ ExtendedType: (*descriptorpb.EnumOptions)(nil), ++ ExtensionType: (*bool)(nil), ++ Field: 62022, ++ Name: "gogoproto.enum_stringer", ++ Tag: "varint,62022,opt,name=enum_stringer", ++ Filename: "gogoproto/gogo.proto", ++} ++ ++var E_EnumCustomname = &proto.ExtensionDesc{ ++ ExtendedType: (*descriptorpb.EnumOptions)(nil), ++ ExtensionType: (*string)(nil), ++ Field: 62023, ++ Name: "gogoproto.enum_customname", ++ Tag: "bytes,62023,opt,name=enum_customname", ++ Filename: "gogoproto/gogo.proto", ++} ++ ++var E_Enumdecl = &proto.ExtensionDesc{ ++ ExtendedType: (*descriptorpb.EnumOptions)(nil), ++ ExtensionType: (*bool)(nil), ++ Field: 62024, ++ Name: "gogoproto.enumdecl", ++ Tag: "varint,62024,opt,name=enumdecl", ++ Filename: "gogoproto/gogo.proto", ++} ++ ++var E_EnumvalueCustomname = &proto.ExtensionDesc{ ++ ExtendedType: (*descriptorpb.EnumValueOptions)(nil), ++ ExtensionType: (*string)(nil), ++ Field: 66001, ++ Name: "gogoproto.enumvalue_customname", ++ Tag: "bytes,66001,opt,name=enumvalue_customname", ++ Filename: "gogoproto/gogo.proto", ++} ++ ++var E_GoprotoGettersAll = &proto.ExtensionDesc{ ++ ExtendedType: (*descriptorpb.FileOptions)(nil), ++ ExtensionType: (*bool)(nil), ++ Field: 63001, ++ Name: "gogoproto.goproto_getters_all", ++ Tag: "varint,63001,opt,name=goproto_getters_all", ++ Filename: "gogoproto/gogo.proto", ++} ++ ++var E_GoprotoEnumPrefixAll = &proto.ExtensionDesc{ ++ ExtendedType: (*descriptorpb.FileOptions)(nil), ++ ExtensionType: (*bool)(nil), ++ Field: 63002, ++ Name: "gogoproto.goproto_enum_prefix_all", ++ Tag: "varint,63002,opt,name=goproto_enum_prefix_all", ++ Filename: "gogoproto/gogo.proto", ++} ++ ++var E_GoprotoStringerAll = &proto.ExtensionDesc{ ++ ExtendedType: (*descriptorpb.FileOptions)(nil), ++ ExtensionType: (*bool)(nil), ++ Field: 63003, ++ Name: "gogoproto.goproto_stringer_all", ++ Tag: "varint,63003,opt,name=goproto_stringer_all", ++ Filename: "gogoproto/gogo.proto", ++} ++ ++var E_VerboseEqualAll = &proto.ExtensionDesc{ ++ ExtendedType: (*descriptorpb.FileOptions)(nil), ++ ExtensionType: (*bool)(nil), ++ Field: 63004, ++ Name: "gogoproto.verbose_equal_all", ++ Tag: "varint,63004,opt,name=verbose_equal_all", ++ Filename: "gogoproto/gogo.proto", ++} ++ ++var E_FaceAll = &proto.ExtensionDesc{ ++ ExtendedType: (*descriptorpb.FileOptions)(nil), ++ ExtensionType: (*bool)(nil), ++ Field: 63005, ++ Name: "gogoproto.face_all", ++ Tag: "varint,63005,opt,name=face_all", ++ Filename: "gogoproto/gogo.proto", ++} ++ ++var E_GostringAll = &proto.ExtensionDesc{ ++ ExtendedType: (*descriptorpb.FileOptions)(nil), ++ ExtensionType: (*bool)(nil), ++ Field: 63006, ++ Name: "gogoproto.gostring_all", ++ Tag: "varint,63006,opt,name=gostring_all", ++ Filename: "gogoproto/gogo.proto", ++} ++ ++var E_PopulateAll = &proto.ExtensionDesc{ ++ ExtendedType: (*descriptorpb.FileOptions)(nil), ++ ExtensionType: (*bool)(nil), ++ Field: 63007, ++ Name: "gogoproto.populate_all", ++ Tag: "varint,63007,opt,name=populate_all", ++ Filename: "gogoproto/gogo.proto", ++} ++ ++var E_StringerAll = &proto.ExtensionDesc{ ++ ExtendedType: (*descriptorpb.FileOptions)(nil), ++ ExtensionType: (*bool)(nil), ++ Field: 63008, ++ Name: "gogoproto.stringer_all", ++ Tag: "varint,63008,opt,name=stringer_all", ++ Filename: "gogoproto/gogo.proto", ++} ++ ++var E_OnlyoneAll = &proto.ExtensionDesc{ ++ ExtendedType: (*descriptorpb.FileOptions)(nil), ++ ExtensionType: (*bool)(nil), ++ Field: 63009, ++ Name: "gogoproto.onlyone_all", ++ Tag: "varint,63009,opt,name=onlyone_all", ++ Filename: "gogoproto/gogo.proto", ++} ++ ++var E_EqualAll = &proto.ExtensionDesc{ ++ ExtendedType: (*descriptorpb.FileOptions)(nil), ++ ExtensionType: (*bool)(nil), ++ Field: 63013, ++ Name: "gogoproto.equal_all", ++ Tag: "varint,63013,opt,name=equal_all", ++ Filename: "gogoproto/gogo.proto", ++} ++ ++var E_DescriptionAll = &proto.ExtensionDesc{ ++ ExtendedType: (*descriptorpb.FileOptions)(nil), ++ ExtensionType: (*bool)(nil), ++ Field: 63014, ++ Name: "gogoproto.description_all", ++ Tag: "varint,63014,opt,name=description_all", ++ Filename: "gogoproto/gogo.proto", ++} ++ ++var E_TestgenAll = &proto.ExtensionDesc{ ++ ExtendedType: (*descriptorpb.FileOptions)(nil), ++ ExtensionType: (*bool)(nil), ++ Field: 63015, ++ Name: "gogoproto.testgen_all", ++ Tag: "varint,63015,opt,name=testgen_all", ++ Filename: "gogoproto/gogo.proto", ++} ++ ++var E_BenchgenAll = &proto.ExtensionDesc{ ++ ExtendedType: (*descriptorpb.FileOptions)(nil), ++ ExtensionType: (*bool)(nil), ++ Field: 63016, ++ Name: "gogoproto.benchgen_all", ++ Tag: "varint,63016,opt,name=benchgen_all", ++ Filename: "gogoproto/gogo.proto", ++} ++ ++var E_MarshalerAll = &proto.ExtensionDesc{ ++ ExtendedType: (*descriptorpb.FileOptions)(nil), ++ ExtensionType: (*bool)(nil), ++ Field: 63017, ++ Name: "gogoproto.marshaler_all", ++ Tag: "varint,63017,opt,name=marshaler_all", ++ Filename: "gogoproto/gogo.proto", ++} ++ ++var E_UnmarshalerAll = &proto.ExtensionDesc{ ++ ExtendedType: (*descriptorpb.FileOptions)(nil), ++ ExtensionType: (*bool)(nil), ++ Field: 63018, ++ Name: "gogoproto.unmarshaler_all", ++ Tag: "varint,63018,opt,name=unmarshaler_all", ++ Filename: "gogoproto/gogo.proto", ++} ++ ++var E_StableMarshalerAll = &proto.ExtensionDesc{ ++ ExtendedType: (*descriptorpb.FileOptions)(nil), ++ ExtensionType: (*bool)(nil), ++ Field: 63019, ++ Name: "gogoproto.stable_marshaler_all", ++ Tag: "varint,63019,opt,name=stable_marshaler_all", ++ Filename: "gogoproto/gogo.proto", ++} ++ ++var E_SizerAll = &proto.ExtensionDesc{ ++ ExtendedType: (*descriptorpb.FileOptions)(nil), ++ ExtensionType: (*bool)(nil), ++ Field: 63020, ++ Name: "gogoproto.sizer_all", ++ Tag: "varint,63020,opt,name=sizer_all", ++ Filename: "gogoproto/gogo.proto", ++} ++ ++var E_GoprotoEnumStringerAll = &proto.ExtensionDesc{ ++ ExtendedType: (*descriptorpb.FileOptions)(nil), ++ ExtensionType: (*bool)(nil), ++ Field: 63021, ++ Name: "gogoproto.goproto_enum_stringer_all", ++ Tag: "varint,63021,opt,name=goproto_enum_stringer_all", ++ Filename: "gogoproto/gogo.proto", ++} ++ ++var E_EnumStringerAll = &proto.ExtensionDesc{ ++ ExtendedType: (*descriptorpb.FileOptions)(nil), ++ ExtensionType: (*bool)(nil), ++ Field: 63022, ++ Name: "gogoproto.enum_stringer_all", ++ Tag: "varint,63022,opt,name=enum_stringer_all", ++ Filename: "gogoproto/gogo.proto", ++} ++ ++var E_UnsafeMarshalerAll = &proto.ExtensionDesc{ ++ ExtendedType: (*descriptorpb.FileOptions)(nil), ++ ExtensionType: (*bool)(nil), ++ Field: 63023, ++ Name: "gogoproto.unsafe_marshaler_all", ++ Tag: "varint,63023,opt,name=unsafe_marshaler_all", ++ Filename: "gogoproto/gogo.proto", ++} ++ ++var E_UnsafeUnmarshalerAll = &proto.ExtensionDesc{ ++ ExtendedType: (*descriptorpb.FileOptions)(nil), ++ ExtensionType: (*bool)(nil), ++ Field: 63024, ++ Name: "gogoproto.unsafe_unmarshaler_all", ++ Tag: "varint,63024,opt,name=unsafe_unmarshaler_all", ++ Filename: "gogoproto/gogo.proto", ++} ++ ++var E_GoprotoExtensionsMapAll = &proto.ExtensionDesc{ ++ ExtendedType: (*descriptorpb.FileOptions)(nil), ++ ExtensionType: (*bool)(nil), ++ Field: 63025, ++ Name: "gogoproto.goproto_extensions_map_all", ++ Tag: "varint,63025,opt,name=goproto_extensions_map_all", ++ Filename: "gogoproto/gogo.proto", ++} ++ ++var E_GoprotoUnrecognizedAll = &proto.ExtensionDesc{ ++ ExtendedType: (*descriptorpb.FileOptions)(nil), ++ ExtensionType: (*bool)(nil), ++ Field: 63026, ++ Name: "gogoproto.goproto_unrecognized_all", ++ Tag: "varint,63026,opt,name=goproto_unrecognized_all", ++ Filename: "gogoproto/gogo.proto", ++} ++ ++var E_GogoprotoImport = &proto.ExtensionDesc{ ++ ExtendedType: (*descriptorpb.FileOptions)(nil), ++ ExtensionType: (*bool)(nil), ++ Field: 63027, ++ Name: "gogoproto.gogoproto_import", ++ Tag: "varint,63027,opt,name=gogoproto_import", ++ Filename: "gogoproto/gogo.proto", ++} ++ ++var E_ProtosizerAll = &proto.ExtensionDesc{ ++ ExtendedType: (*descriptorpb.FileOptions)(nil), ++ ExtensionType: (*bool)(nil), ++ Field: 63028, ++ Name: "gogoproto.protosizer_all", ++ Tag: "varint,63028,opt,name=protosizer_all", ++ Filename: "gogoproto/gogo.proto", ++} ++ ++var E_CompareAll = &proto.ExtensionDesc{ ++ ExtendedType: (*descriptorpb.FileOptions)(nil), ++ ExtensionType: (*bool)(nil), ++ Field: 63029, ++ Name: "gogoproto.compare_all", ++ Tag: "varint,63029,opt,name=compare_all", ++ Filename: "gogoproto/gogo.proto", ++} ++ ++var E_TypedeclAll = &proto.ExtensionDesc{ ++ ExtendedType: (*descriptorpb.FileOptions)(nil), ++ ExtensionType: (*bool)(nil), ++ Field: 63030, ++ Name: "gogoproto.typedecl_all", ++ Tag: "varint,63030,opt,name=typedecl_all", ++ Filename: "gogoproto/gogo.proto", ++} ++ ++var E_EnumdeclAll = &proto.ExtensionDesc{ ++ ExtendedType: (*descriptorpb.FileOptions)(nil), ++ ExtensionType: (*bool)(nil), ++ Field: 63031, ++ Name: "gogoproto.enumdecl_all", ++ Tag: "varint,63031,opt,name=enumdecl_all", ++ Filename: "gogoproto/gogo.proto", ++} ++ ++var E_GoprotoRegistration = &proto.ExtensionDesc{ ++ ExtendedType: (*descriptorpb.FileOptions)(nil), ++ ExtensionType: (*bool)(nil), ++ Field: 63032, ++ Name: "gogoproto.goproto_registration", ++ Tag: "varint,63032,opt,name=goproto_registration", ++ Filename: "gogoproto/gogo.proto", ++} ++ ++var E_MessagenameAll = &proto.ExtensionDesc{ ++ ExtendedType: (*descriptorpb.FileOptions)(nil), ++ ExtensionType: (*bool)(nil), ++ Field: 63033, ++ Name: "gogoproto.messagename_all", ++ Tag: "varint,63033,opt,name=messagename_all", ++ Filename: "gogoproto/gogo.proto", ++} ++ ++var E_GoprotoSizecacheAll = &proto.ExtensionDesc{ ++ ExtendedType: (*descriptorpb.FileOptions)(nil), ++ ExtensionType: (*bool)(nil), ++ Field: 63034, ++ Name: "gogoproto.goproto_sizecache_all", ++ Tag: "varint,63034,opt,name=goproto_sizecache_all", ++ Filename: "gogoproto/gogo.proto", ++} ++ ++var E_GoprotoUnkeyedAll = &proto.ExtensionDesc{ ++ ExtendedType: (*descriptorpb.FileOptions)(nil), ++ ExtensionType: (*bool)(nil), ++ Field: 63035, ++ Name: "gogoproto.goproto_unkeyed_all", ++ Tag: "varint,63035,opt,name=goproto_unkeyed_all", ++ Filename: "gogoproto/gogo.proto", ++} ++ ++var E_GoprotoGetters = &proto.ExtensionDesc{ ++ ExtendedType: (*descriptorpb.MessageOptions)(nil), ++ ExtensionType: (*bool)(nil), ++ Field: 64001, ++ Name: "gogoproto.goproto_getters", ++ Tag: "varint,64001,opt,name=goproto_getters", ++ Filename: "gogoproto/gogo.proto", ++} ++ ++var E_GoprotoStringer = &proto.ExtensionDesc{ ++ ExtendedType: (*descriptorpb.MessageOptions)(nil), ++ ExtensionType: (*bool)(nil), ++ Field: 64003, ++ Name: "gogoproto.goproto_stringer", ++ Tag: "varint,64003,opt,name=goproto_stringer", ++ Filename: "gogoproto/gogo.proto", ++} ++ ++var E_VerboseEqual = &proto.ExtensionDesc{ ++ ExtendedType: (*descriptorpb.MessageOptions)(nil), ++ ExtensionType: (*bool)(nil), ++ Field: 64004, ++ Name: "gogoproto.verbose_equal", ++ Tag: "varint,64004,opt,name=verbose_equal", ++ Filename: "gogoproto/gogo.proto", ++} ++ ++var E_Face = &proto.ExtensionDesc{ ++ ExtendedType: (*descriptorpb.MessageOptions)(nil), ++ ExtensionType: (*bool)(nil), ++ Field: 64005, ++ Name: "gogoproto.face", ++ Tag: "varint,64005,opt,name=face", ++ Filename: "gogoproto/gogo.proto", ++} ++ ++var E_Gostring = &proto.ExtensionDesc{ ++ ExtendedType: (*descriptorpb.MessageOptions)(nil), ++ ExtensionType: (*bool)(nil), ++ Field: 64006, ++ Name: "gogoproto.gostring", ++ Tag: "varint,64006,opt,name=gostring", ++ Filename: "gogoproto/gogo.proto", ++} ++ ++var E_Populate = &proto.ExtensionDesc{ ++ ExtendedType: (*descriptorpb.MessageOptions)(nil), ++ ExtensionType: (*bool)(nil), ++ Field: 64007, ++ Name: "gogoproto.populate", ++ Tag: "varint,64007,opt,name=populate", ++ Filename: "gogoproto/gogo.proto", ++} ++ ++var E_Stringer = &proto.ExtensionDesc{ ++ ExtendedType: (*descriptorpb.MessageOptions)(nil), ++ ExtensionType: (*bool)(nil), ++ Field: 67008, ++ Name: "gogoproto.stringer", ++ Tag: "varint,67008,opt,name=stringer", ++ Filename: "gogoproto/gogo.proto", ++} ++ ++var E_Onlyone = &proto.ExtensionDesc{ ++ ExtendedType: (*descriptorpb.MessageOptions)(nil), ++ ExtensionType: (*bool)(nil), ++ Field: 64009, ++ Name: "gogoproto.onlyone", ++ Tag: "varint,64009,opt,name=onlyone", ++ Filename: "gogoproto/gogo.proto", ++} ++ ++var E_Equal = &proto.ExtensionDesc{ ++ ExtendedType: (*descriptorpb.MessageOptions)(nil), ++ ExtensionType: (*bool)(nil), ++ Field: 64013, ++ Name: "gogoproto.equal", ++ Tag: "varint,64013,opt,name=equal", ++ Filename: "gogoproto/gogo.proto", ++} ++ ++var E_Description = &proto.ExtensionDesc{ ++ ExtendedType: (*descriptorpb.MessageOptions)(nil), ++ ExtensionType: (*bool)(nil), ++ Field: 64014, ++ Name: "gogoproto.description", ++ Tag: "varint,64014,opt,name=description", ++ Filename: "gogoproto/gogo.proto", ++} ++ ++var E_Testgen = &proto.ExtensionDesc{ ++ ExtendedType: (*descriptorpb.MessageOptions)(nil), ++ ExtensionType: (*bool)(nil), ++ Field: 64015, ++ Name: "gogoproto.testgen", ++ Tag: "varint,64015,opt,name=testgen", ++ Filename: "gogoproto/gogo.proto", ++} ++ ++var E_Benchgen = &proto.ExtensionDesc{ ++ ExtendedType: (*descriptorpb.MessageOptions)(nil), ++ ExtensionType: (*bool)(nil), ++ Field: 64016, ++ Name: "gogoproto.benchgen", ++ Tag: "varint,64016,opt,name=benchgen", ++ Filename: "gogoproto/gogo.proto", ++} ++ ++var E_Marshaler = &proto.ExtensionDesc{ ++ ExtendedType: (*descriptorpb.MessageOptions)(nil), ++ ExtensionType: (*bool)(nil), ++ Field: 64017, ++ Name: "gogoproto.marshaler", ++ Tag: "varint,64017,opt,name=marshaler", ++ Filename: "gogoproto/gogo.proto", ++} ++ ++var E_Unmarshaler = &proto.ExtensionDesc{ ++ ExtendedType: (*descriptorpb.MessageOptions)(nil), ++ ExtensionType: (*bool)(nil), ++ Field: 64018, ++ Name: "gogoproto.unmarshaler", ++ Tag: "varint,64018,opt,name=unmarshaler", ++ Filename: "gogoproto/gogo.proto", ++} ++ ++var E_StableMarshaler = &proto.ExtensionDesc{ ++ ExtendedType: (*descriptorpb.MessageOptions)(nil), ++ ExtensionType: (*bool)(nil), ++ Field: 64019, ++ Name: "gogoproto.stable_marshaler", ++ Tag: "varint,64019,opt,name=stable_marshaler", ++ Filename: "gogoproto/gogo.proto", ++} ++ ++var E_Sizer = &proto.ExtensionDesc{ ++ ExtendedType: (*descriptorpb.MessageOptions)(nil), ++ ExtensionType: (*bool)(nil), ++ Field: 64020, ++ Name: "gogoproto.sizer", ++ Tag: "varint,64020,opt,name=sizer", ++ Filename: "gogoproto/gogo.proto", ++} ++ ++var E_UnsafeMarshaler = &proto.ExtensionDesc{ ++ ExtendedType: (*descriptorpb.MessageOptions)(nil), ++ ExtensionType: (*bool)(nil), ++ Field: 64023, ++ Name: "gogoproto.unsafe_marshaler", ++ Tag: "varint,64023,opt,name=unsafe_marshaler", ++ Filename: "gogoproto/gogo.proto", ++} ++ ++var E_UnsafeUnmarshaler = &proto.ExtensionDesc{ ++ ExtendedType: (*descriptorpb.MessageOptions)(nil), ++ ExtensionType: (*bool)(nil), ++ Field: 64024, ++ Name: "gogoproto.unsafe_unmarshaler", ++ Tag: "varint,64024,opt,name=unsafe_unmarshaler", ++ Filename: "gogoproto/gogo.proto", ++} ++ ++var E_GoprotoExtensionsMap = &proto.ExtensionDesc{ ++ ExtendedType: (*descriptorpb.MessageOptions)(nil), ++ ExtensionType: (*bool)(nil), ++ Field: 64025, ++ Name: "gogoproto.goproto_extensions_map", ++ Tag: "varint,64025,opt,name=goproto_extensions_map", ++ Filename: "gogoproto/gogo.proto", ++} ++ ++var E_GoprotoUnrecognized = &proto.ExtensionDesc{ ++ ExtendedType: (*descriptorpb.MessageOptions)(nil), ++ ExtensionType: (*bool)(nil), ++ Field: 64026, ++ Name: "gogoproto.goproto_unrecognized", ++ Tag: "varint,64026,opt,name=goproto_unrecognized", ++ Filename: "gogoproto/gogo.proto", ++} ++ ++var E_Protosizer = &proto.ExtensionDesc{ ++ ExtendedType: (*descriptorpb.MessageOptions)(nil), ++ ExtensionType: (*bool)(nil), ++ Field: 64028, ++ Name: "gogoproto.protosizer", ++ Tag: "varint,64028,opt,name=protosizer", ++ Filename: "gogoproto/gogo.proto", ++} ++ ++var E_Compare = &proto.ExtensionDesc{ ++ ExtendedType: (*descriptorpb.MessageOptions)(nil), ++ ExtensionType: (*bool)(nil), ++ Field: 64029, ++ Name: "gogoproto.compare", ++ Tag: "varint,64029,opt,name=compare", ++ Filename: "gogoproto/gogo.proto", ++} ++ ++var E_Typedecl = &proto.ExtensionDesc{ ++ ExtendedType: (*descriptorpb.MessageOptions)(nil), ++ ExtensionType: (*bool)(nil), ++ Field: 64030, ++ Name: "gogoproto.typedecl", ++ Tag: "varint,64030,opt,name=typedecl", ++ Filename: "gogoproto/gogo.proto", ++} ++ ++var E_Messagename = &proto.ExtensionDesc{ ++ ExtendedType: (*descriptorpb.MessageOptions)(nil), ++ ExtensionType: (*bool)(nil), ++ Field: 64033, ++ Name: "gogoproto.messagename", ++ Tag: "varint,64033,opt,name=messagename", ++ Filename: "gogoproto/gogo.proto", ++} ++ ++var E_GoprotoSizecache = &proto.ExtensionDesc{ ++ ExtendedType: (*descriptorpb.MessageOptions)(nil), ++ ExtensionType: (*bool)(nil), ++ Field: 64034, ++ Name: "gogoproto.goproto_sizecache", ++ Tag: "varint,64034,opt,name=goproto_sizecache", ++ Filename: "gogoproto/gogo.proto", ++} ++ ++var E_GoprotoUnkeyed = &proto.ExtensionDesc{ ++ ExtendedType: (*descriptorpb.MessageOptions)(nil), ++ ExtensionType: (*bool)(nil), ++ Field: 64035, ++ Name: "gogoproto.goproto_unkeyed", ++ Tag: "varint,64035,opt,name=goproto_unkeyed", ++ Filename: "gogoproto/gogo.proto", ++} ++ ++var E_Nullable = &proto.ExtensionDesc{ ++ ExtendedType: (*descriptorpb.FieldOptions)(nil), ++ ExtensionType: (*bool)(nil), ++ Field: 65001, ++ Name: "gogoproto.nullable", ++ Tag: "varint,65001,opt,name=nullable", ++ Filename: "gogoproto/gogo.proto", ++} ++ ++var E_Embed = &proto.ExtensionDesc{ ++ ExtendedType: (*descriptorpb.FieldOptions)(nil), ++ ExtensionType: (*bool)(nil), ++ Field: 65002, ++ Name: "gogoproto.embed", ++ Tag: "varint,65002,opt,name=embed", ++ Filename: "gogoproto/gogo.proto", ++} ++ ++var E_Customtype = &proto.ExtensionDesc{ ++ ExtendedType: (*descriptorpb.FieldOptions)(nil), ++ ExtensionType: (*string)(nil), ++ Field: 65003, ++ Name: "gogoproto.customtype", ++ Tag: "bytes,65003,opt,name=customtype", ++ Filename: "gogoproto/gogo.proto", ++} ++ ++var E_Customname = &proto.ExtensionDesc{ ++ ExtendedType: (*descriptorpb.FieldOptions)(nil), ++ ExtensionType: (*string)(nil), ++ Field: 65004, ++ Name: "gogoproto.customname", ++ Tag: "bytes,65004,opt,name=customname", ++ Filename: "gogoproto/gogo.proto", ++} ++ ++var E_Jsontag = &proto.ExtensionDesc{ ++ ExtendedType: (*descriptorpb.FieldOptions)(nil), ++ ExtensionType: (*string)(nil), ++ Field: 65005, ++ Name: "gogoproto.jsontag", ++ Tag: "bytes,65005,opt,name=jsontag", ++ Filename: "gogoproto/gogo.proto", ++} ++ ++var E_Moretags = &proto.ExtensionDesc{ ++ ExtendedType: (*descriptorpb.FieldOptions)(nil), ++ ExtensionType: (*string)(nil), ++ Field: 65006, ++ Name: "gogoproto.moretags", ++ Tag: "bytes,65006,opt,name=moretags", ++ Filename: "gogoproto/gogo.proto", ++} ++ ++var E_Casttype = &proto.ExtensionDesc{ ++ ExtendedType: (*descriptorpb.FieldOptions)(nil), ++ ExtensionType: (*string)(nil), ++ Field: 65007, ++ Name: "gogoproto.casttype", ++ Tag: "bytes,65007,opt,name=casttype", ++ Filename: "gogoproto/gogo.proto", ++} ++ ++var E_Castkey = &proto.ExtensionDesc{ ++ ExtendedType: (*descriptorpb.FieldOptions)(nil), ++ ExtensionType: (*string)(nil), ++ Field: 65008, ++ Name: "gogoproto.castkey", ++ Tag: "bytes,65008,opt,name=castkey", ++ Filename: "gogoproto/gogo.proto", ++} ++ ++var E_Castvalue = &proto.ExtensionDesc{ ++ ExtendedType: (*descriptorpb.FieldOptions)(nil), ++ ExtensionType: (*string)(nil), ++ Field: 65009, ++ Name: "gogoproto.castvalue", ++ Tag: "bytes,65009,opt,name=castvalue", ++ Filename: "gogoproto/gogo.proto", ++} ++ ++var E_Stdtime = &proto.ExtensionDesc{ ++ ExtendedType: (*descriptorpb.FieldOptions)(nil), ++ ExtensionType: (*bool)(nil), ++ Field: 65010, ++ Name: "gogoproto.stdtime", ++ Tag: "varint,65010,opt,name=stdtime", ++ Filename: "gogoproto/gogo.proto", ++} ++ ++var E_Stdduration = &proto.ExtensionDesc{ ++ ExtendedType: (*descriptorpb.FieldOptions)(nil), ++ ExtensionType: (*bool)(nil), ++ Field: 65011, ++ Name: "gogoproto.stdduration", ++ Tag: "varint,65011,opt,name=stdduration", ++ Filename: "gogoproto/gogo.proto", ++} ++ ++var E_Wktpointer = &proto.ExtensionDesc{ ++ ExtendedType: (*descriptorpb.FieldOptions)(nil), ++ ExtensionType: (*bool)(nil), ++ Field: 65012, ++ Name: "gogoproto.wktpointer", ++ Tag: "varint,65012,opt,name=wktpointer", ++ Filename: "gogoproto/gogo.proto", ++} ++ ++var E_Castrepeated = &proto.ExtensionDesc{ ++ ExtendedType: (*descriptorpb.FieldOptions)(nil), ++ ExtensionType: (*string)(nil), ++ Field: 65013, ++ Name: "gogoproto.castrepeated", ++ Tag: "bytes,65013,opt,name=castrepeated", ++ Filename: "gogoproto/gogo.proto", ++} ++ ++func init() { ++ proto.RegisterExtension(E_GoprotoEnumPrefix) ++ proto.RegisterExtension(E_GoprotoEnumStringer) ++ proto.RegisterExtension(E_EnumStringer) ++ proto.RegisterExtension(E_EnumCustomname) ++ proto.RegisterExtension(E_Enumdecl) ++ proto.RegisterExtension(E_EnumvalueCustomname) ++ proto.RegisterExtension(E_GoprotoGettersAll) ++ proto.RegisterExtension(E_GoprotoEnumPrefixAll) ++ proto.RegisterExtension(E_GoprotoStringerAll) ++ proto.RegisterExtension(E_VerboseEqualAll) ++ proto.RegisterExtension(E_FaceAll) ++ proto.RegisterExtension(E_GostringAll) ++ proto.RegisterExtension(E_PopulateAll) ++ proto.RegisterExtension(E_StringerAll) ++ proto.RegisterExtension(E_OnlyoneAll) ++ proto.RegisterExtension(E_EqualAll) ++ proto.RegisterExtension(E_DescriptionAll) ++ proto.RegisterExtension(E_TestgenAll) ++ proto.RegisterExtension(E_BenchgenAll) ++ proto.RegisterExtension(E_MarshalerAll) ++ proto.RegisterExtension(E_UnmarshalerAll) ++ proto.RegisterExtension(E_StableMarshalerAll) ++ proto.RegisterExtension(E_SizerAll) ++ proto.RegisterExtension(E_GoprotoEnumStringerAll) ++ proto.RegisterExtension(E_EnumStringerAll) ++ proto.RegisterExtension(E_UnsafeMarshalerAll) ++ proto.RegisterExtension(E_UnsafeUnmarshalerAll) ++ proto.RegisterExtension(E_GoprotoExtensionsMapAll) ++ proto.RegisterExtension(E_GoprotoUnrecognizedAll) ++ proto.RegisterExtension(E_GogoprotoImport) ++ proto.RegisterExtension(E_ProtosizerAll) ++ proto.RegisterExtension(E_CompareAll) ++ proto.RegisterExtension(E_TypedeclAll) ++ proto.RegisterExtension(E_EnumdeclAll) ++ proto.RegisterExtension(E_GoprotoRegistration) ++ proto.RegisterExtension(E_MessagenameAll) ++ proto.RegisterExtension(E_GoprotoSizecacheAll) ++ proto.RegisterExtension(E_GoprotoUnkeyedAll) ++ proto.RegisterExtension(E_GoprotoGetters) ++ proto.RegisterExtension(E_GoprotoStringer) ++ proto.RegisterExtension(E_VerboseEqual) ++ proto.RegisterExtension(E_Face) ++ proto.RegisterExtension(E_Gostring) ++ proto.RegisterExtension(E_Populate) ++ proto.RegisterExtension(E_Stringer) ++ proto.RegisterExtension(E_Onlyone) ++ proto.RegisterExtension(E_Equal) ++ proto.RegisterExtension(E_Description) ++ proto.RegisterExtension(E_Testgen) ++ proto.RegisterExtension(E_Benchgen) ++ proto.RegisterExtension(E_Marshaler) ++ proto.RegisterExtension(E_Unmarshaler) ++ proto.RegisterExtension(E_StableMarshaler) ++ proto.RegisterExtension(E_Sizer) ++ proto.RegisterExtension(E_UnsafeMarshaler) ++ proto.RegisterExtension(E_UnsafeUnmarshaler) ++ proto.RegisterExtension(E_GoprotoExtensionsMap) ++ proto.RegisterExtension(E_GoprotoUnrecognized) ++ proto.RegisterExtension(E_Protosizer) ++ proto.RegisterExtension(E_Compare) ++ proto.RegisterExtension(E_Typedecl) ++ proto.RegisterExtension(E_Messagename) ++ proto.RegisterExtension(E_GoprotoSizecache) ++ proto.RegisterExtension(E_GoprotoUnkeyed) ++ proto.RegisterExtension(E_Nullable) ++ proto.RegisterExtension(E_Embed) ++ proto.RegisterExtension(E_Customtype) ++ proto.RegisterExtension(E_Customname) ++ proto.RegisterExtension(E_Jsontag) ++ proto.RegisterExtension(E_Moretags) ++ proto.RegisterExtension(E_Casttype) ++ proto.RegisterExtension(E_Castkey) ++ proto.RegisterExtension(E_Castvalue) ++ proto.RegisterExtension(E_Stdtime) ++ proto.RegisterExtension(E_Stdduration) ++ proto.RegisterExtension(E_Wktpointer) ++ proto.RegisterExtension(E_Castrepeated) ++} ++ ++func init() { proto.RegisterFile("gogoproto/gogo.proto", fileDescriptor_c586470e9b64aee7) } ++ ++var fileDescriptor_c586470e9b64aee7 = []byte{ ++ // 1382 bytes of a gzipped FileDescriptorProto ++ 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x94, 0x98, 0x49, 0x6c, 0x1c, 0x45, ++ 0x17, 0x80, 0x63, 0xfd, 0x89, 0x62, 0x97, 0xed, 0x38, 0x5e, 0xfe, 0x10, 0x22, 0x30, 0x81, 0x13, ++ 0x27, 0xe7, 0x14, 0xa1, 0x94, 0x15, 0x45, 0x8e, 0xe5, 0x58, 0x41, 0x24, 0x18, 0x27, 0x0e, 0x9b, ++ 0xd0, 0xa8, 0x67, 0xa6, 0xdc, 0x6e, 0xd2, 0xdd, 0xd5, 0x74, 0x57, 0x87, 0x38, 0x37, 0x14, 0x16, ++ 0x21, 0x04, 0x84, 0x45, 0x82, 0x84, 0x24, 0x10, 0x10, 0xfb, 0x1a, 0xf6, 0xe5, 0xc2, 0x05, 0xc8, ++ 0x31, 0xdc, 0x38, 0xa2, 0x98, 0x0b, 0x60, 0x76, 0x73, 0xf2, 0x05, 0xbd, 0xee, 0xf7, 0x7a, 0xaa, ++ 0xdb, 0x23, 0x55, 0xcd, 0x6d, 0x3c, 0xae, 0xef, 0x73, 0xf5, 0x7b, 0x55, 0xef, 0x3d, 0x37, 0x1b, ++ 0x71, 0xa5, 0x2b, 0xa3, 0x58, 0x2a, 0xb9, 0x03, 0x3e, 0x8d, 0x65, 0x1f, 0x87, 0x7a, 0x8a, 0x6f, ++ 0xb7, 0x6d, 0x77, 0xa5, 0x74, 0x7d, 0xb1, 0x23, 0xfb, 0xa9, 0x9e, 0xce, 0xef, 0x68, 0x8a, 0xa4, ++ 0x11, 0x7b, 0x91, 0x92, 0x71, 0xbe, 0x98, 0x1f, 0x64, 0xc3, 0xb8, 0xb8, 0x26, 0xc2, 0x34, 0xa8, ++ 0x45, 0xb1, 0x98, 0xf7, 0x8e, 0x0f, 0x5d, 0x33, 0x96, 0x93, 0x63, 0x44, 0x8e, 0x4d, 0x85, 0x69, ++ 0x70, 0x6b, 0xa4, 0x3c, 0x19, 0x26, 0x5b, 0x2f, 0xfe, 0xf4, 0xbf, 0xed, 0x5d, 0x37, 0x76, 0xcf, ++ 0x0e, 0x22, 0x0a, 0xbf, 0x9b, 0xc9, 0x40, 0x3e, 0xcb, 0xfe, 0x5f, 0xf2, 0x25, 0x2a, 0xf6, 0x42, ++ 0x57, 0xc4, 0x06, 0xe3, 0x37, 0x68, 0x1c, 0xd6, 0x8c, 0x87, 0x10, 0xe5, 0x93, 0xac, 0xbf, 0x13, ++ 0xd7, 0xb7, 0xe8, 0xea, 0x13, 0xba, 0x64, 0x9a, 0x0d, 0x64, 0x92, 0x46, 0x9a, 0x28, 0x19, 0x84, ++ 0x4e, 0x20, 0x0c, 0x9a, 0xef, 0x32, 0x4d, 0xcf, 0xec, 0x26, 0xc0, 0x26, 0x0b, 0x8a, 0x73, 0xd6, ++ 0x0d, 0xdf, 0x34, 0x45, 0xc3, 0x37, 0x18, 0x2e, 0xe1, 0x46, 0x8a, 0xf5, 0xfc, 0x08, 0x1b, 0x81, ++ 0xcf, 0xc7, 0x1c, 0x3f, 0x15, 0xfa, 0x4e, 0xae, 0x6f, 0xeb, 0x39, 0x02, 0xcb, 0x48, 0xf6, 0xfd, ++ 0xc9, 0xf5, 0xd9, 0x76, 0x86, 0x0b, 0x81, 0xb6, 0x27, 0x2d, 0x8b, 0xae, 0x50, 0x4a, 0xc4, 0x49, ++ 0xcd, 0xf1, 0xdb, 0x6d, 0x6f, 0x9f, 0xe7, 0x17, 0xc6, 0xd3, 0xcb, 0xe5, 0x2c, 0x4e, 0xe7, 0xe4, ++ 0x84, 0xef, 0xf3, 0x39, 0x76, 0x55, 0x9b, 0x53, 0x61, 0xe1, 0x3c, 0x83, 0xce, 0x91, 0x35, 0x27, ++ 0x03, 0xb4, 0x33, 0x8c, 0xbe, 0x2f, 0x72, 0x69, 0xe1, 0x7c, 0x01, 0x9d, 0x43, 0xc8, 0x52, 0x4a, ++ 0xc1, 0x78, 0x33, 0x1b, 0x3c, 0x26, 0xe2, 0xba, 0x4c, 0x44, 0x4d, 0xdc, 0x97, 0x3a, 0xbe, 0x85, ++ 0xee, 0x2c, 0xea, 0x06, 0x10, 0x9c, 0x02, 0x0e, 0x5c, 0xbb, 0x58, 0xf7, 0xbc, 0xd3, 0x10, 0x16, ++ 0x8a, 0x73, 0xa8, 0xd8, 0x08, 0xeb, 0x01, 0x9d, 0x60, 0x7d, 0xae, 0xcc, 0x1f, 0xc9, 0x02, 0x3f, ++ 0x8f, 0x78, 0x2f, 0x31, 0xa8, 0x88, 0x64, 0x94, 0xfa, 0x8e, 0xb2, 0xd9, 0xc1, 0x8b, 0xa4, 0x20, ++ 0x06, 0x15, 0x1d, 0x84, 0xf5, 0x25, 0x52, 0x24, 0x5a, 0x3c, 0xf7, 0xb0, 0x5e, 0x19, 0xfa, 0x8b, ++ 0x32, 0xb4, 0xd9, 0xc4, 0x05, 0x34, 0x30, 0x44, 0x40, 0x30, 0xce, 0x7a, 0x6c, 0x13, 0xf1, 0xea, ++ 0x32, 0x5d, 0x0f, 0xca, 0xc0, 0x34, 0x1b, 0xa0, 0x02, 0xe5, 0xc9, 0xd0, 0x42, 0xf1, 0x1a, 0x2a, ++ 0x36, 0x69, 0x18, 0x3e, 0x86, 0x12, 0x89, 0x72, 0x85, 0x8d, 0xe4, 0x75, 0x7a, 0x0c, 0x44, 0x30, ++ 0x94, 0x75, 0x11, 0x36, 0x16, 0xec, 0x0c, 0x6f, 0x50, 0x28, 0x89, 0x01, 0xc5, 0x24, 0xeb, 0x0f, ++ 0x9c, 0x38, 0x59, 0x70, 0x7c, 0xab, 0x74, 0xbc, 0x89, 0x8e, 0xbe, 0x02, 0xc2, 0x88, 0xa4, 0x61, ++ 0x27, 0x9a, 0xb7, 0x28, 0x22, 0x1a, 0x86, 0x57, 0x2f, 0x51, 0x4e, 0xdd, 0x17, 0xb5, 0x4e, 0x6c, ++ 0x6f, 0xd3, 0xd5, 0xcb, 0xd9, 0x03, 0xba, 0x71, 0x9c, 0xf5, 0x24, 0xde, 0x09, 0x2b, 0xcd, 0x3b, ++ 0x94, 0xe9, 0x0c, 0x00, 0xf8, 0x4e, 0x76, 0x75, 0xdb, 0x36, 0x61, 0x21, 0x7b, 0x17, 0x65, 0x5b, ++ 0xda, 0xb4, 0x0a, 0x2c, 0x09, 0x9d, 0x2a, 0xdf, 0xa3, 0x92, 0x20, 0x2a, 0xae, 0x19, 0x36, 0x92, ++ 0x86, 0x89, 0x33, 0xdf, 0x59, 0xd4, 0xde, 0xa7, 0xa8, 0xe5, 0x6c, 0x29, 0x6a, 0x87, 0xd9, 0x16, ++ 0x34, 0x76, 0x96, 0xd7, 0x0f, 0xa8, 0xb0, 0xe6, 0xf4, 0x5c, 0x39, 0xbb, 0x77, 0xb3, 0x6d, 0x45, ++ 0x38, 0x8f, 0x2b, 0x11, 0x26, 0xc0, 0xd4, 0x02, 0x27, 0xb2, 0x30, 0x5f, 0x44, 0x33, 0x55, 0xfc, ++ 0xa9, 0x42, 0x70, 0xc0, 0x89, 0x40, 0x7e, 0x07, 0xdb, 0x4a, 0xf2, 0x34, 0x8c, 0x45, 0x43, 0xba, ++ 0xa1, 0x77, 0x42, 0x34, 0x2d, 0xd4, 0x1f, 0x56, 0x52, 0x35, 0xa7, 0xe1, 0x60, 0xde, 0xcf, 0x36, ++ 0x17, 0xb3, 0x4a, 0xcd, 0x0b, 0x22, 0x19, 0x2b, 0x83, 0xf1, 0x23, 0xca, 0x54, 0xc1, 0xed, 0xcf, ++ 0x30, 0x3e, 0xc5, 0x36, 0x65, 0x3f, 0xda, 0x1e, 0xc9, 0x8f, 0x51, 0xd4, 0xdf, 0xa2, 0xb0, 0x70, ++ 0x34, 0x64, 0x10, 0x39, 0xb1, 0x4d, 0xfd, 0xfb, 0x84, 0x0a, 0x07, 0x22, 0x58, 0x38, 0xd4, 0x62, ++ 0x24, 0xa0, 0xdb, 0x5b, 0x18, 0x3e, 0xa5, 0xc2, 0x41, 0x0c, 0x2a, 0x68, 0x60, 0xb0, 0x50, 0x7c, ++ 0x46, 0x0a, 0x62, 0x40, 0x71, 0x5b, 0xab, 0xd1, 0xc6, 0xc2, 0xf5, 0x12, 0x15, 0x3b, 0xb0, 0xda, ++ 0xa0, 0xfa, 0x7c, 0xb9, 0x3c, 0x84, 0xcd, 0x6a, 0x28, 0x54, 0xa2, 0x40, 0x24, 0x89, 0xe3, 0x0a, ++ 0x98, 0x38, 0x2c, 0x36, 0xf6, 0x05, 0x55, 0x22, 0x0d, 0x83, 0xbd, 0x69, 0x13, 0x22, 0x84, 0xbd, ++ 0xe1, 0x34, 0x16, 0x6c, 0x74, 0x5f, 0x56, 0x36, 0x77, 0x88, 0x58, 0x70, 0x6a, 0xf3, 0x4f, 0x1a, ++ 0x1e, 0x15, 0x8b, 0x56, 0xa7, 0xf3, 0xab, 0xca, 0xfc, 0x33, 0x97, 0x93, 0x79, 0x0d, 0x19, 0xa8, ++ 0xcc, 0x53, 0x43, 0xd7, 0xad, 0x71, 0x1d, 0xc8, 0x9f, 0x8b, 0x74, 0x0f, 0xac, 0xe0, 0xf3, 0x96, ++ 0xc7, 0x29, 0x7e, 0x0b, 0x1c, 0xf2, 0xf2, 0xd0, 0x63, 0x96, 0x9d, 0x5c, 0x29, 0xce, 0x79, 0x69, ++ 0xe6, 0xe1, 0xfb, 0x58, 0x7f, 0x69, 0xe0, 0x31, 0xab, 0x1e, 0x44, 0x55, 0x9f, 0x3e, 0xef, 0xf0, ++ 0x9d, 0x6c, 0x3d, 0x0c, 0x2f, 0x66, 0xfc, 0x21, 0xc4, 0xb3, 0xe5, 0x7c, 0x37, 0xeb, 0xa6, 0xa1, ++ 0xc5, 0x8c, 0x3e, 0x8c, 0x68, 0x81, 0x00, 0x4e, 0x03, 0x8b, 0x19, 0x7f, 0x84, 0x70, 0x42, 0x00, ++ 0xb7, 0x0f, 0xe1, 0xd7, 0x8f, 0xad, 0xc7, 0xa6, 0x43, 0xb1, 0x1b, 0x67, 0x1b, 0x71, 0x52, 0x31, ++ 0xd3, 0x8f, 0xe2, 0x1f, 0x27, 0x82, 0xdf, 0xc4, 0x36, 0x58, 0x06, 0xfc, 0x71, 0x44, 0xf3, 0xf5, ++ 0x7c, 0x92, 0xf5, 0x6a, 0xd3, 0x89, 0x19, 0x7f, 0x02, 0x71, 0x9d, 0x82, 0xad, 0xe3, 0x74, 0x62, ++ 0x16, 0x3c, 0x49, 0x5b, 0x47, 0x02, 0xc2, 0x46, 0x83, 0x89, 0x99, 0x3e, 0x45, 0x51, 0x27, 0x84, ++ 0xef, 0x61, 0x3d, 0x45, 0xb3, 0x31, 0xf3, 0x4f, 0x21, 0xdf, 0x62, 0x20, 0x02, 0x5a, 0xb3, 0x33, ++ 0x2b, 0x9e, 0xa6, 0x08, 0x68, 0x14, 0x5c, 0xa3, 0xea, 0x00, 0x63, 0x36, 0x3d, 0x43, 0xd7, 0xa8, ++ 0x32, 0xbf, 0x40, 0x36, 0xb3, 0x9a, 0x6f, 0x56, 0x3c, 0x4b, 0xd9, 0xcc, 0xd6, 0xc3, 0x36, 0xaa, ++ 0x13, 0x81, 0xd9, 0xf1, 0x1c, 0x6d, 0xa3, 0x32, 0x10, 0xf0, 0x19, 0x36, 0xb4, 0x76, 0x1a, 0x30, ++ 0xfb, 0x9e, 0x47, 0xdf, 0xe0, 0x9a, 0x61, 0x80, 0xdf, 0xce, 0xb6, 0xb4, 0x9f, 0x04, 0xcc, 0xd6, ++ 0xd3, 0x2b, 0x95, 0xff, 0xdd, 0xf4, 0x41, 0x80, 0x1f, 0x6e, 0xb5, 0x14, 0x7d, 0x0a, 0x30, 0x6b, ++ 0xcf, 0xac, 0x94, 0x0b, 0xb7, 0x3e, 0x04, 0xf0, 0x09, 0xc6, 0x5a, 0x0d, 0xd8, 0xec, 0x3a, 0x8b, ++ 0x2e, 0x0d, 0x82, 0xab, 0x81, 0xfd, 0xd7, 0xcc, 0x9f, 0xa3, 0xab, 0x81, 0x04, 0x5c, 0x0d, 0x6a, ++ 0xbd, 0x66, 0xfa, 0x3c, 0x5d, 0x0d, 0x42, 0xe0, 0x64, 0x6b, 0xdd, 0xcd, 0x6c, 0xb8, 0x40, 0x27, ++ 0x5b, 0xa3, 0xf8, 0x41, 0x36, 0xb8, 0xa6, 0x21, 0x9a, 0x55, 0x2f, 0xa3, 0x6a, 0x73, 0xb5, 0x1f, ++ 0xea, 0xcd, 0x0b, 0x9b, 0xa1, 0xd9, 0xf6, 0x4a, 0xa5, 0x79, 0x61, 0x2f, 0xe4, 0xe3, 0xac, 0x3b, ++ 0x4c, 0x7d, 0x1f, 0x2e, 0xcf, 0xd0, 0xb5, 0x6d, 0xba, 0xa9, 0xf0, 0x9b, 0xa4, 0xf8, 0x79, 0x15, ++ 0xa3, 0x43, 0x00, 0xdf, 0xc9, 0x36, 0x88, 0xa0, 0x2e, 0x9a, 0x26, 0xf2, 0x97, 0x55, 0x2a, 0x98, ++ 0xb0, 0x9a, 0xef, 0x61, 0x2c, 0x7f, 0x35, 0x02, 0x61, 0x36, 0xb1, 0xbf, 0xae, 0xe6, 0x6f, 0x69, ++ 0x34, 0xa4, 0x25, 0xc8, 0x92, 0x62, 0x10, 0x2c, 0x97, 0x05, 0x59, 0x46, 0x76, 0xb1, 0x8d, 0xf7, ++ 0x26, 0x32, 0x54, 0x8e, 0x6b, 0xa2, 0x7f, 0x43, 0x9a, 0xd6, 0x43, 0xc0, 0x02, 0x19, 0x0b, 0xe5, ++ 0xb8, 0x89, 0x89, 0xfd, 0x1d, 0xd9, 0x02, 0x00, 0xb8, 0xe1, 0x24, 0xca, 0xe6, 0xb9, 0xff, 0x20, ++ 0x98, 0x00, 0xd8, 0x34, 0x7c, 0x3e, 0x2a, 0x16, 0x4d, 0xec, 0x9f, 0xb4, 0x69, 0x5c, 0xcf, 0x77, ++ 0xb3, 0x1e, 0xf8, 0x98, 0xbd, 0x55, 0x32, 0xc1, 0x7f, 0x21, 0xdc, 0x22, 0xe0, 0x2f, 0x27, 0xaa, ++ 0xa9, 0x3c, 0x73, 0xb0, 0xff, 0xc6, 0x4c, 0xd3, 0x7a, 0x3e, 0xc1, 0x7a, 0x13, 0xd5, 0x6c, 0xa6, ++ 0x38, 0x9f, 0x1a, 0xf0, 0x7f, 0x56, 0x8b, 0x57, 0x16, 0x05, 0x03, 0xd9, 0xbe, 0xff, 0xa8, 0x8a, ++ 0xa4, 0x17, 0x2a, 0x11, 0x9b, 0x0c, 0x2b, 0x68, 0xd0, 0x10, 0x3e, 0xc9, 0xfa, 0xe0, 0x59, 0x62, ++ 0x11, 0x09, 0x47, 0x99, 0x4f, 0xeb, 0xbf, 0x18, 0x80, 0x12, 0xb4, 0xf7, 0x9e, 0x4b, 0x57, 0x46, ++ 0xbb, 0x2e, 0x5f, 0x19, 0xed, 0xfa, 0xf1, 0xca, 0x68, 0xd7, 0xa9, 0xa5, 0xd1, 0x75, 0x97, 0x97, ++ 0x46, 0xd7, 0xfd, 0xb0, 0x34, 0xba, 0x8e, 0x0d, 0x37, 0x64, 0x50, 0x35, 0xee, 0x65, 0xd3, 0x72, ++ 0x5a, 0xce, 0x64, 0x45, 0xec, 0xae, 0x1b, 0x5c, 0x4f, 0x2d, 0xa4, 0xf5, 0xb1, 0x86, 0x0c, 0xb2, ++ 0xd7, 0xb8, 0xad, 0xb7, 0xb5, 0xc5, 0x3f, 0x39, 0xff, 0x05, 0x00, 0x00, 0xff, 0xff, 0x5f, 0xbe, ++ 0x0f, 0x06, 0xea, 0x15, 0x00, 0x00, ++} diff --git a/patches/proto/tendermint/mempool/message.go.patch b/patches/proto/tendermint/mempool/message.go.patch new file mode 100644 index 00000000000..fa6929f55b6 --- /dev/null +++ b/patches/proto/tendermint/mempool/message.go.patch @@ -0,0 +1,53 @@ +diff --git a/proto/tendermint/mempool/message.go b/proto/tendermint/mempool/message.go +index e2e6c42a7..319453a1b 100644 +--- a/proto/tendermint/mempool/message.go ++++ b/proto/tendermint/mempool/message.go +@@ -7,8 +7,12 @@ import ( + "github.com/tendermint/tendermint/p2p" + ) + +-var _ p2p.Wrapper = &Txs{} +-var _ p2p.Unwrapper = &Message{} ++var ( ++ _ p2p.Wrapper = &Txs{} ++ _ p2p.Wrapper = &SeenTx{} ++ _ p2p.Wrapper = &WantTx{} ++ _ p2p.Unwrapper = &Message{} ++) + + // Wrap implements the p2p Wrapper interface and wraps a mempool message. + func (m *Txs) Wrap() proto.Message { +@@ -17,6 +21,20 @@ func (m *Txs) Wrap() proto.Message { + return mm + } + ++// Wrap implements the p2p Wrapper interface and wraps a mempool seen tx message. ++func (m *SeenTx) Wrap() proto.Message { ++ mm := &Message{} ++ mm.Sum = &Message_SeenTx{SeenTx: m} ++ return mm ++} ++ ++// Wrap implements the p2p Wrapper interface and wraps a mempool want tx message. ++func (m *WantTx) Wrap() proto.Message { ++ mm := &Message{} ++ mm.Sum = &Message_WantTx{WantTx: m} ++ return mm ++} ++ + // Unwrap implements the p2p Wrapper interface and unwraps a wrapped mempool + // message. + func (m *Message) Unwrap() (proto.Message, error) { +@@ -24,6 +42,12 @@ func (m *Message) Unwrap() (proto.Message, error) { + case *Message_Txs: + return m.GetTxs(), nil + ++ case *Message_SeenTx: ++ return m.GetSeenTx(), nil ++ ++ case *Message_WantTx: ++ return m.GetWantTx(), nil ++ + default: + return nil, fmt.Errorf("unknown message: %T", msg) + } diff --git a/patches/proto/tendermint/mempool/types.pb.go.patch b/patches/proto/tendermint/mempool/types.pb.go.patch new file mode 100644 index 00000000000..630cc58de8d --- /dev/null +++ b/patches/proto/tendermint/mempool/types.pb.go.patch @@ -0,0 +1,631 @@ +diff --git a/proto/tendermint/mempool/types.pb.go b/proto/tendermint/mempool/types.pb.go +index ffe5ab359..14d1197ef 100644 +--- a/proto/tendermint/mempool/types.pb.go ++++ b/proto/tendermint/mempool/types.pb.go +@@ -66,10 +66,100 @@ func (m *Txs) GetTxs() [][]byte { + return nil + } + ++type SeenTx struct { ++ TxKey []byte `protobuf:"bytes,1,opt,name=tx_key,json=txKey,proto3" json:"tx_key,omitempty"` ++} ++ ++func (m *SeenTx) Reset() { *m = SeenTx{} } ++func (m *SeenTx) String() string { return proto.CompactTextString(m) } ++func (*SeenTx) ProtoMessage() {} ++func (*SeenTx) Descriptor() ([]byte, []int) { ++ return fileDescriptor_2af51926fdbcbc05, []int{1} ++} ++func (m *SeenTx) XXX_Unmarshal(b []byte) error { ++ return m.Unmarshal(b) ++} ++func (m *SeenTx) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { ++ if deterministic { ++ return xxx_messageInfo_SeenTx.Marshal(b, m, deterministic) ++ } else { ++ b = b[:cap(b)] ++ n, err := m.MarshalToSizedBuffer(b) ++ if err != nil { ++ return nil, err ++ } ++ return b[:n], nil ++ } ++} ++func (m *SeenTx) XXX_Merge(src proto.Message) { ++ xxx_messageInfo_SeenTx.Merge(m, src) ++} ++func (m *SeenTx) XXX_Size() int { ++ return m.Size() ++} ++func (m *SeenTx) XXX_DiscardUnknown() { ++ xxx_messageInfo_SeenTx.DiscardUnknown(m) ++} ++ ++var xxx_messageInfo_SeenTx proto.InternalMessageInfo ++ ++func (m *SeenTx) GetTxKey() []byte { ++ if m != nil { ++ return m.TxKey ++ } ++ return nil ++} ++ ++type WantTx struct { ++ TxKey []byte `protobuf:"bytes,1,opt,name=tx_key,json=txKey,proto3" json:"tx_key,omitempty"` ++} ++ ++func (m *WantTx) Reset() { *m = WantTx{} } ++func (m *WantTx) String() string { return proto.CompactTextString(m) } ++func (*WantTx) ProtoMessage() {} ++func (*WantTx) Descriptor() ([]byte, []int) { ++ return fileDescriptor_2af51926fdbcbc05, []int{2} ++} ++func (m *WantTx) XXX_Unmarshal(b []byte) error { ++ return m.Unmarshal(b) ++} ++func (m *WantTx) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { ++ if deterministic { ++ return xxx_messageInfo_WantTx.Marshal(b, m, deterministic) ++ } else { ++ b = b[:cap(b)] ++ n, err := m.MarshalToSizedBuffer(b) ++ if err != nil { ++ return nil, err ++ } ++ return b[:n], nil ++ } ++} ++func (m *WantTx) XXX_Merge(src proto.Message) { ++ xxx_messageInfo_WantTx.Merge(m, src) ++} ++func (m *WantTx) XXX_Size() int { ++ return m.Size() ++} ++func (m *WantTx) XXX_DiscardUnknown() { ++ xxx_messageInfo_WantTx.DiscardUnknown(m) ++} ++ ++var xxx_messageInfo_WantTx proto.InternalMessageInfo ++ ++func (m *WantTx) GetTxKey() []byte { ++ if m != nil { ++ return m.TxKey ++ } ++ return nil ++} ++ + type Message struct { + // Types that are valid to be assigned to Sum: + // + // *Message_Txs ++ // *Message_SeenTx ++ // *Message_WantTx + Sum isMessage_Sum `protobuf_oneof:"sum"` + } + +@@ -77,7 +167,7 @@ func (m *Message) Reset() { *m = Message{} } + func (m *Message) String() string { return proto.CompactTextString(m) } + func (*Message) ProtoMessage() {} + func (*Message) Descriptor() ([]byte, []int) { +- return fileDescriptor_2af51926fdbcbc05, []int{1} ++ return fileDescriptor_2af51926fdbcbc05, []int{3} + } + func (m *Message) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +@@ -115,8 +205,16 @@ type isMessage_Sum interface { + type Message_Txs struct { + Txs *Txs `protobuf:"bytes,1,opt,name=txs,proto3,oneof" json:"txs,omitempty"` + } ++type Message_SeenTx struct { ++ SeenTx *SeenTx `protobuf:"bytes,2,opt,name=seen_tx,json=seenTx,proto3,oneof" json:"seen_tx,omitempty"` ++} ++type Message_WantTx struct { ++ WantTx *WantTx `protobuf:"bytes,3,opt,name=want_tx,json=wantTx,proto3,oneof" json:"want_tx,omitempty"` ++} + +-func (*Message_Txs) isMessage_Sum() {} ++func (*Message_Txs) isMessage_Sum() {} ++func (*Message_SeenTx) isMessage_Sum() {} ++func (*Message_WantTx) isMessage_Sum() {} + + func (m *Message) GetSum() isMessage_Sum { + if m != nil { +@@ -132,34 +230,57 @@ func (m *Message) GetTxs() *Txs { + return nil + } + ++func (m *Message) GetSeenTx() *SeenTx { ++ if x, ok := m.GetSum().(*Message_SeenTx); ok { ++ return x.SeenTx ++ } ++ return nil ++} ++ ++func (m *Message) GetWantTx() *WantTx { ++ if x, ok := m.GetSum().(*Message_WantTx); ok { ++ return x.WantTx ++ } ++ return nil ++} ++ + // XXX_OneofWrappers is for the internal use of the proto package. + func (*Message) XXX_OneofWrappers() []interface{} { + return []interface{}{ + (*Message_Txs)(nil), ++ (*Message_SeenTx)(nil), ++ (*Message_WantTx)(nil), + } + } + + func init() { + proto.RegisterType((*Txs)(nil), "tendermint.mempool.Txs") ++ proto.RegisterType((*SeenTx)(nil), "tendermint.mempool.SeenTx") ++ proto.RegisterType((*WantTx)(nil), "tendermint.mempool.WantTx") + proto.RegisterType((*Message)(nil), "tendermint.mempool.Message") + } + + func init() { proto.RegisterFile("tendermint/mempool/types.proto", fileDescriptor_2af51926fdbcbc05) } + + var fileDescriptor_2af51926fdbcbc05 = []byte{ +- // 179 bytes of a gzipped FileDescriptorProto ++ // 268 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x92, 0x2b, 0x49, 0xcd, 0x4b, + 0x49, 0x2d, 0xca, 0xcd, 0xcc, 0x2b, 0xd1, 0xcf, 0x4d, 0xcd, 0x2d, 0xc8, 0xcf, 0xcf, 0xd1, 0x2f, + 0xa9, 0x2c, 0x48, 0x2d, 0xd6, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, 0x12, 0x42, 0xc8, 0xeb, 0x41, + 0xe5, 0x95, 0xc4, 0xb9, 0x98, 0x43, 0x2a, 0x8a, 0x85, 0x04, 0xb8, 0x98, 0x4b, 0x2a, 0x8a, 0x25, +- 0x18, 0x15, 0x98, 0x35, 0x78, 0x82, 0x40, 0x4c, 0x25, 0x5b, 0x2e, 0x76, 0xdf, 0xd4, 0xe2, 0xe2, +- 0xc4, 0xf4, 0x54, 0x21, 0x6d, 0x98, 0x24, 0xa3, 0x06, 0xb7, 0x91, 0xb8, 0x1e, 0xa6, 0x29, 0x7a, +- 0x21, 0x15, 0xc5, 0x1e, 0x0c, 0x60, 0x7d, 0x4e, 0xac, 0x5c, 0xcc, 0xc5, 0xa5, 0xb9, 0x4e, 0xc1, +- 0x27, 0x1e, 0xc9, 0x31, 0x5e, 0x78, 0x24, 0xc7, 0xf8, 0xe0, 0x91, 0x1c, 0xe3, 0x84, 0xc7, 0x72, +- 0x0c, 0x17, 0x1e, 0xcb, 0x31, 0xdc, 0x78, 0x2c, 0xc7, 0x10, 0x65, 0x99, 0x9e, 0x59, 0x92, 0x51, +- 0x9a, 0xa4, 0x97, 0x9c, 0x9f, 0xab, 0x8f, 0xe4, 0x60, 0x24, 0x26, 0xd8, 0xb5, 0xfa, 0x98, 0x9e, +- 0x49, 0x62, 0x03, 0xcb, 0x18, 0x03, 0x02, 0x00, 0x00, 0xff, 0xff, 0xca, 0xc3, 0xa0, 0xfc, 0xe9, +- 0x00, 0x00, 0x00, ++ 0x18, 0x15, 0x98, 0x35, 0x78, 0x82, 0x40, 0x4c, 0x25, 0x79, 0x2e, 0xb6, 0xe0, 0xd4, 0xd4, 0xbc, ++ 0x90, 0x0a, 0x21, 0x51, 0x2e, 0xb6, 0x92, 0x8a, 0xf8, 0xec, 0xd4, 0x4a, 0x09, 0x46, 0x05, 0x46, ++ 0x0d, 0x9e, 0x20, 0xd6, 0x92, 0x0a, 0xef, 0xd4, 0x4a, 0x90, 0x82, 0xf0, 0xc4, 0xbc, 0x12, 0xdc, ++ 0x0a, 0x56, 0x33, 0x72, 0xb1, 0xfb, 0xa6, 0x16, 0x17, 0x27, 0xa6, 0xa7, 0x0a, 0x69, 0xc3, 0xcc, ++ 0x67, 0xd4, 0xe0, 0x36, 0x12, 0xd7, 0xc3, 0x74, 0x88, 0x5e, 0x48, 0x45, 0xb1, 0x07, 0x03, 0xd8, ++ 0x6a, 0x21, 0x53, 0x2e, 0xf6, 0xe2, 0xd4, 0xd4, 0xbc, 0xf8, 0x92, 0x0a, 0x09, 0x26, 0xb0, 0x06, ++ 0x29, 0x6c, 0x1a, 0x20, 0xae, 0xf3, 0x60, 0x08, 0x62, 0x2b, 0x86, 0xb8, 0xd3, 0x94, 0x8b, 0xbd, ++ 0x3c, 0x31, 0xaf, 0x04, 0xa4, 0x8d, 0x19, 0xb7, 0x36, 0x88, 0x9b, 0x41, 0xda, 0xca, 0xc1, 0x2c, ++ 0x27, 0x56, 0x2e, 0xe6, 0xe2, 0xd2, 0x5c, 0xa7, 0xe0, 0x13, 0x8f, 0xe4, 0x18, 0x2f, 0x3c, 0x92, ++ 0x63, 0x7c, 0xf0, 0x48, 0x8e, 0x71, 0xc2, 0x63, 0x39, 0x86, 0x0b, 0x8f, 0xe5, 0x18, 0x6e, 0x3c, ++ 0x96, 0x63, 0x88, 0xb2, 0x4c, 0xcf, 0x2c, 0xc9, 0x28, 0x4d, 0xd2, 0x4b, 0xce, 0xcf, 0xd5, 0x47, ++ 0x0a, 0x61, 0x24, 0x26, 0x38, 0x78, 0xf5, 0x31, 0x43, 0x3f, 0x89, 0x0d, 0x2c, 0x63, 0x0c, 0x08, ++ 0x00, 0x00, 0xff, 0xff, 0x3b, 0xd2, 0x5d, 0x18, 0x9a, 0x01, 0x00, 0x00, + } + + func (m *Txs) Marshal() (dAtA []byte, err error) { +@@ -194,6 +315,66 @@ func (m *Txs) MarshalToSizedBuffer(dAtA []byte) (int, error) { + return len(dAtA) - i, nil + } + ++func (m *SeenTx) Marshal() (dAtA []byte, err error) { ++ size := m.Size() ++ dAtA = make([]byte, size) ++ n, err := m.MarshalToSizedBuffer(dAtA[:size]) ++ if err != nil { ++ return nil, err ++ } ++ return dAtA[:n], nil ++} ++ ++func (m *SeenTx) MarshalTo(dAtA []byte) (int, error) { ++ size := m.Size() ++ return m.MarshalToSizedBuffer(dAtA[:size]) ++} ++ ++func (m *SeenTx) MarshalToSizedBuffer(dAtA []byte) (int, error) { ++ i := len(dAtA) ++ _ = i ++ var l int ++ _ = l ++ if len(m.TxKey) > 0 { ++ i -= len(m.TxKey) ++ copy(dAtA[i:], m.TxKey) ++ i = encodeVarintTypes(dAtA, i, uint64(len(m.TxKey))) ++ i-- ++ dAtA[i] = 0xa ++ } ++ return len(dAtA) - i, nil ++} ++ ++func (m *WantTx) Marshal() (dAtA []byte, err error) { ++ size := m.Size() ++ dAtA = make([]byte, size) ++ n, err := m.MarshalToSizedBuffer(dAtA[:size]) ++ if err != nil { ++ return nil, err ++ } ++ return dAtA[:n], nil ++} ++ ++func (m *WantTx) MarshalTo(dAtA []byte) (int, error) { ++ size := m.Size() ++ return m.MarshalToSizedBuffer(dAtA[:size]) ++} ++ ++func (m *WantTx) MarshalToSizedBuffer(dAtA []byte) (int, error) { ++ i := len(dAtA) ++ _ = i ++ var l int ++ _ = l ++ if len(m.TxKey) > 0 { ++ i -= len(m.TxKey) ++ copy(dAtA[i:], m.TxKey) ++ i = encodeVarintTypes(dAtA, i, uint64(len(m.TxKey))) ++ i-- ++ dAtA[i] = 0xa ++ } ++ return len(dAtA) - i, nil ++} ++ + func (m *Message) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) +@@ -247,6 +428,48 @@ func (m *Message_Txs) MarshalToSizedBuffer(dAtA []byte) (int, error) { + } + return len(dAtA) - i, nil + } ++func (m *Message_SeenTx) MarshalTo(dAtA []byte) (int, error) { ++ size := m.Size() ++ return m.MarshalToSizedBuffer(dAtA[:size]) ++} ++ ++func (m *Message_SeenTx) MarshalToSizedBuffer(dAtA []byte) (int, error) { ++ i := len(dAtA) ++ if m.SeenTx != nil { ++ { ++ size, err := m.SeenTx.MarshalToSizedBuffer(dAtA[:i]) ++ if err != nil { ++ return 0, err ++ } ++ i -= size ++ i = encodeVarintTypes(dAtA, i, uint64(size)) ++ } ++ i-- ++ dAtA[i] = 0x12 ++ } ++ return len(dAtA) - i, nil ++} ++func (m *Message_WantTx) MarshalTo(dAtA []byte) (int, error) { ++ size := m.Size() ++ return m.MarshalToSizedBuffer(dAtA[:size]) ++} ++ ++func (m *Message_WantTx) MarshalToSizedBuffer(dAtA []byte) (int, error) { ++ i := len(dAtA) ++ if m.WantTx != nil { ++ { ++ size, err := m.WantTx.MarshalToSizedBuffer(dAtA[:i]) ++ if err != nil { ++ return 0, err ++ } ++ i -= size ++ i = encodeVarintTypes(dAtA, i, uint64(size)) ++ } ++ i-- ++ dAtA[i] = 0x1a ++ } ++ return len(dAtA) - i, nil ++} + func encodeVarintTypes(dAtA []byte, offset int, v uint64) int { + offset -= sovTypes(v) + base := offset +@@ -273,6 +496,32 @@ func (m *Txs) Size() (n int) { + return n + } + ++func (m *SeenTx) Size() (n int) { ++ if m == nil { ++ return 0 ++ } ++ var l int ++ _ = l ++ l = len(m.TxKey) ++ if l > 0 { ++ n += 1 + l + sovTypes(uint64(l)) ++ } ++ return n ++} ++ ++func (m *WantTx) Size() (n int) { ++ if m == nil { ++ return 0 ++ } ++ var l int ++ _ = l ++ l = len(m.TxKey) ++ if l > 0 { ++ n += 1 + l + sovTypes(uint64(l)) ++ } ++ return n ++} ++ + func (m *Message) Size() (n int) { + if m == nil { + return 0 +@@ -297,6 +546,30 @@ func (m *Message_Txs) Size() (n int) { + } + return n + } ++func (m *Message_SeenTx) Size() (n int) { ++ if m == nil { ++ return 0 ++ } ++ var l int ++ _ = l ++ if m.SeenTx != nil { ++ l = m.SeenTx.Size() ++ n += 1 + l + sovTypes(uint64(l)) ++ } ++ return n ++} ++func (m *Message_WantTx) Size() (n int) { ++ if m == nil { ++ return 0 ++ } ++ var l int ++ _ = l ++ if m.WantTx != nil { ++ l = m.WantTx.Size() ++ n += 1 + l + sovTypes(uint64(l)) ++ } ++ return n ++} + + func sovTypes(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +@@ -386,6 +659,174 @@ func (m *Txs) Unmarshal(dAtA []byte) error { + } + return nil + } ++func (m *SeenTx) Unmarshal(dAtA []byte) error { ++ l := len(dAtA) ++ iNdEx := 0 ++ for iNdEx < l { ++ preIndex := iNdEx ++ var wire uint64 ++ for shift := uint(0); ; shift += 7 { ++ if shift >= 64 { ++ return ErrIntOverflowTypes ++ } ++ if iNdEx >= l { ++ return io.ErrUnexpectedEOF ++ } ++ b := dAtA[iNdEx] ++ iNdEx++ ++ wire |= uint64(b&0x7F) << shift ++ if b < 0x80 { ++ break ++ } ++ } ++ fieldNum := int32(wire >> 3) ++ wireType := int(wire & 0x7) ++ if wireType == 4 { ++ return fmt.Errorf("proto: SeenTx: wiretype end group for non-group") ++ } ++ if fieldNum <= 0 { ++ return fmt.Errorf("proto: SeenTx: illegal tag %d (wire type %d)", fieldNum, wire) ++ } ++ switch fieldNum { ++ case 1: ++ if wireType != 2 { ++ return fmt.Errorf("proto: wrong wireType = %d for field TxKey", wireType) ++ } ++ var byteLen int ++ for shift := uint(0); ; shift += 7 { ++ if shift >= 64 { ++ return ErrIntOverflowTypes ++ } ++ if iNdEx >= l { ++ return io.ErrUnexpectedEOF ++ } ++ b := dAtA[iNdEx] ++ iNdEx++ ++ byteLen |= int(b&0x7F) << shift ++ if b < 0x80 { ++ break ++ } ++ } ++ if byteLen < 0 { ++ return ErrInvalidLengthTypes ++ } ++ postIndex := iNdEx + byteLen ++ if postIndex < 0 { ++ return ErrInvalidLengthTypes ++ } ++ if postIndex > l { ++ return io.ErrUnexpectedEOF ++ } ++ m.TxKey = append(m.TxKey[:0], dAtA[iNdEx:postIndex]...) ++ if m.TxKey == nil { ++ m.TxKey = []byte{} ++ } ++ iNdEx = postIndex ++ default: ++ iNdEx = preIndex ++ skippy, err := skipTypes(dAtA[iNdEx:]) ++ if err != nil { ++ return err ++ } ++ if (skippy < 0) || (iNdEx+skippy) < 0 { ++ return ErrInvalidLengthTypes ++ } ++ if (iNdEx + skippy) > l { ++ return io.ErrUnexpectedEOF ++ } ++ iNdEx += skippy ++ } ++ } ++ ++ if iNdEx > l { ++ return io.ErrUnexpectedEOF ++ } ++ return nil ++} ++func (m *WantTx) Unmarshal(dAtA []byte) error { ++ l := len(dAtA) ++ iNdEx := 0 ++ for iNdEx < l { ++ preIndex := iNdEx ++ var wire uint64 ++ for shift := uint(0); ; shift += 7 { ++ if shift >= 64 { ++ return ErrIntOverflowTypes ++ } ++ if iNdEx >= l { ++ return io.ErrUnexpectedEOF ++ } ++ b := dAtA[iNdEx] ++ iNdEx++ ++ wire |= uint64(b&0x7F) << shift ++ if b < 0x80 { ++ break ++ } ++ } ++ fieldNum := int32(wire >> 3) ++ wireType := int(wire & 0x7) ++ if wireType == 4 { ++ return fmt.Errorf("proto: WantTx: wiretype end group for non-group") ++ } ++ if fieldNum <= 0 { ++ return fmt.Errorf("proto: WantTx: illegal tag %d (wire type %d)", fieldNum, wire) ++ } ++ switch fieldNum { ++ case 1: ++ if wireType != 2 { ++ return fmt.Errorf("proto: wrong wireType = %d for field TxKey", wireType) ++ } ++ var byteLen int ++ for shift := uint(0); ; shift += 7 { ++ if shift >= 64 { ++ return ErrIntOverflowTypes ++ } ++ if iNdEx >= l { ++ return io.ErrUnexpectedEOF ++ } ++ b := dAtA[iNdEx] ++ iNdEx++ ++ byteLen |= int(b&0x7F) << shift ++ if b < 0x80 { ++ break ++ } ++ } ++ if byteLen < 0 { ++ return ErrInvalidLengthTypes ++ } ++ postIndex := iNdEx + byteLen ++ if postIndex < 0 { ++ return ErrInvalidLengthTypes ++ } ++ if postIndex > l { ++ return io.ErrUnexpectedEOF ++ } ++ m.TxKey = append(m.TxKey[:0], dAtA[iNdEx:postIndex]...) ++ if m.TxKey == nil { ++ m.TxKey = []byte{} ++ } ++ iNdEx = postIndex ++ default: ++ iNdEx = preIndex ++ skippy, err := skipTypes(dAtA[iNdEx:]) ++ if err != nil { ++ return err ++ } ++ if (skippy < 0) || (iNdEx+skippy) < 0 { ++ return ErrInvalidLengthTypes ++ } ++ if (iNdEx + skippy) > l { ++ return io.ErrUnexpectedEOF ++ } ++ iNdEx += skippy ++ } ++ } ++ ++ if iNdEx > l { ++ return io.ErrUnexpectedEOF ++ } ++ return nil ++} + func (m *Message) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 +@@ -450,6 +891,76 @@ func (m *Message) Unmarshal(dAtA []byte) error { + } + m.Sum = &Message_Txs{v} + iNdEx = postIndex ++ case 2: ++ if wireType != 2 { ++ return fmt.Errorf("proto: wrong wireType = %d for field SeenTx", wireType) ++ } ++ var msglen int ++ for shift := uint(0); ; shift += 7 { ++ if shift >= 64 { ++ return ErrIntOverflowTypes ++ } ++ if iNdEx >= l { ++ return io.ErrUnexpectedEOF ++ } ++ b := dAtA[iNdEx] ++ iNdEx++ ++ msglen |= int(b&0x7F) << shift ++ if b < 0x80 { ++ break ++ } ++ } ++ if msglen < 0 { ++ return ErrInvalidLengthTypes ++ } ++ postIndex := iNdEx + msglen ++ if postIndex < 0 { ++ return ErrInvalidLengthTypes ++ } ++ if postIndex > l { ++ return io.ErrUnexpectedEOF ++ } ++ v := &SeenTx{} ++ if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { ++ return err ++ } ++ m.Sum = &Message_SeenTx{v} ++ iNdEx = postIndex ++ case 3: ++ if wireType != 2 { ++ return fmt.Errorf("proto: wrong wireType = %d for field WantTx", wireType) ++ } ++ var msglen int ++ for shift := uint(0); ; shift += 7 { ++ if shift >= 64 { ++ return ErrIntOverflowTypes ++ } ++ if iNdEx >= l { ++ return io.ErrUnexpectedEOF ++ } ++ b := dAtA[iNdEx] ++ iNdEx++ ++ msglen |= int(b&0x7F) << shift ++ if b < 0x80 { ++ break ++ } ++ } ++ if msglen < 0 { ++ return ErrInvalidLengthTypes ++ } ++ postIndex := iNdEx + msglen ++ if postIndex < 0 { ++ return ErrInvalidLengthTypes ++ } ++ if postIndex > l { ++ return io.ErrUnexpectedEOF ++ } ++ v := &WantTx{} ++ if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { ++ return err ++ } ++ m.Sum = &Message_WantTx{v} ++ iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipTypes(dAtA[iNdEx:]) diff --git a/patches/proto/tendermint/rpc/grpc/types.pb.go.patch b/patches/proto/tendermint/rpc/grpc/types.pb.go.patch new file mode 100644 index 00000000000..18a48e2fa74 --- /dev/null +++ b/patches/proto/tendermint/rpc/grpc/types.pb.go.patch @@ -0,0 +1,4744 @@ +diff --git a/proto/tendermint/rpc/grpc/types.pb.go b/proto/tendermint/rpc/grpc/types.pb.go +index b9cbee03f..9f50d2cd1 100644 +--- a/proto/tendermint/rpc/grpc/types.pb.go ++++ b/proto/tendermint/rpc/grpc/types.pb.go +@@ -6,20 +6,28 @@ package coregrpc + import ( + context "context" + fmt "fmt" ++ _ "github.com/gogo/protobuf/gogoproto" + proto "github.com/gogo/protobuf/proto" ++ _ "github.com/gogo/protobuf/types" ++ github_com_gogo_protobuf_types "github.com/gogo/protobuf/types" + types "github.com/tendermint/tendermint/abci/types" ++ crypto "github.com/tendermint/tendermint/proto/tendermint/crypto" ++ p2p "github.com/tendermint/tendermint/proto/tendermint/p2p" ++ types1 "github.com/tendermint/tendermint/proto/tendermint/types" + grpc "google.golang.org/grpc" + codes "google.golang.org/grpc/codes" + status "google.golang.org/grpc/status" + io "io" + math "math" + math_bits "math/bits" ++ time "time" + ) + + // Reference imports to suppress errors if they are not otherwise used. + var _ = proto.Marshal + var _ = fmt.Errorf + var _ = math.Inf ++var _ = time.Kitchen + + // This is a compile-time assertion to ensure that this generated file + // is compatible with the proto package it is being compiled against. +@@ -107,6 +115,280 @@ func (m *RequestBroadcastTx) GetTx() []byte { + return nil + } + ++type BlockByHashRequest struct { ++ Hash []byte `protobuf:"bytes,1,opt,name=hash,proto3" json:"hash,omitempty"` ++ Prove bool `protobuf:"varint,2,opt,name=prove,proto3" json:"prove,omitempty"` ++} ++ ++func (m *BlockByHashRequest) Reset() { *m = BlockByHashRequest{} } ++func (m *BlockByHashRequest) String() string { return proto.CompactTextString(m) } ++func (*BlockByHashRequest) ProtoMessage() {} ++func (*BlockByHashRequest) Descriptor() ([]byte, []int) { ++ return fileDescriptor_0ffff5682c662b95, []int{2} ++} ++func (m *BlockByHashRequest) XXX_Unmarshal(b []byte) error { ++ return m.Unmarshal(b) ++} ++func (m *BlockByHashRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { ++ if deterministic { ++ return xxx_messageInfo_BlockByHashRequest.Marshal(b, m, deterministic) ++ } else { ++ b = b[:cap(b)] ++ n, err := m.MarshalToSizedBuffer(b) ++ if err != nil { ++ return nil, err ++ } ++ return b[:n], nil ++ } ++} ++func (m *BlockByHashRequest) XXX_Merge(src proto.Message) { ++ xxx_messageInfo_BlockByHashRequest.Merge(m, src) ++} ++func (m *BlockByHashRequest) XXX_Size() int { ++ return m.Size() ++} ++func (m *BlockByHashRequest) XXX_DiscardUnknown() { ++ xxx_messageInfo_BlockByHashRequest.DiscardUnknown(m) ++} ++ ++var xxx_messageInfo_BlockByHashRequest proto.InternalMessageInfo ++ ++func (m *BlockByHashRequest) GetHash() []byte { ++ if m != nil { ++ return m.Hash ++ } ++ return nil ++} ++ ++func (m *BlockByHashRequest) GetProve() bool { ++ if m != nil { ++ return m.Prove ++ } ++ return false ++} ++ ++type BlockByHeightRequest struct { ++ // Height the requested block height. ++ // If height is equal to 0, the latest height stored in the block store ++ // will be used. ++ Height int64 `protobuf:"varint,1,opt,name=height,proto3" json:"height,omitempty"` ++ // Prove set to true to return the parts proofs. ++ Prove bool `protobuf:"varint,2,opt,name=prove,proto3" json:"prove,omitempty"` ++} ++ ++func (m *BlockByHeightRequest) Reset() { *m = BlockByHeightRequest{} } ++func (m *BlockByHeightRequest) String() string { return proto.CompactTextString(m) } ++func (*BlockByHeightRequest) ProtoMessage() {} ++func (*BlockByHeightRequest) Descriptor() ([]byte, []int) { ++ return fileDescriptor_0ffff5682c662b95, []int{3} ++} ++func (m *BlockByHeightRequest) XXX_Unmarshal(b []byte) error { ++ return m.Unmarshal(b) ++} ++func (m *BlockByHeightRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { ++ if deterministic { ++ return xxx_messageInfo_BlockByHeightRequest.Marshal(b, m, deterministic) ++ } else { ++ b = b[:cap(b)] ++ n, err := m.MarshalToSizedBuffer(b) ++ if err != nil { ++ return nil, err ++ } ++ return b[:n], nil ++ } ++} ++func (m *BlockByHeightRequest) XXX_Merge(src proto.Message) { ++ xxx_messageInfo_BlockByHeightRequest.Merge(m, src) ++} ++func (m *BlockByHeightRequest) XXX_Size() int { ++ return m.Size() ++} ++func (m *BlockByHeightRequest) XXX_DiscardUnknown() { ++ xxx_messageInfo_BlockByHeightRequest.DiscardUnknown(m) ++} ++ ++var xxx_messageInfo_BlockByHeightRequest proto.InternalMessageInfo ++ ++func (m *BlockByHeightRequest) GetHeight() int64 { ++ if m != nil { ++ return m.Height ++ } ++ return 0 ++} ++ ++func (m *BlockByHeightRequest) GetProve() bool { ++ if m != nil { ++ return m.Prove ++ } ++ return false ++} ++ ++type CommitRequest struct { ++ // Height the requested block commit height. ++ // If height is equal to 0, the latest height stored in the block store ++ // will be used. ++ Height int64 `protobuf:"varint,1,opt,name=height,proto3" json:"height,omitempty"` ++} ++ ++func (m *CommitRequest) Reset() { *m = CommitRequest{} } ++func (m *CommitRequest) String() string { return proto.CompactTextString(m) } ++func (*CommitRequest) ProtoMessage() {} ++func (*CommitRequest) Descriptor() ([]byte, []int) { ++ return fileDescriptor_0ffff5682c662b95, []int{4} ++} ++func (m *CommitRequest) XXX_Unmarshal(b []byte) error { ++ return m.Unmarshal(b) ++} ++func (m *CommitRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { ++ if deterministic { ++ return xxx_messageInfo_CommitRequest.Marshal(b, m, deterministic) ++ } else { ++ b = b[:cap(b)] ++ n, err := m.MarshalToSizedBuffer(b) ++ if err != nil { ++ return nil, err ++ } ++ return b[:n], nil ++ } ++} ++func (m *CommitRequest) XXX_Merge(src proto.Message) { ++ xxx_messageInfo_CommitRequest.Merge(m, src) ++} ++func (m *CommitRequest) XXX_Size() int { ++ return m.Size() ++} ++func (m *CommitRequest) XXX_DiscardUnknown() { ++ xxx_messageInfo_CommitRequest.DiscardUnknown(m) ++} ++ ++var xxx_messageInfo_CommitRequest proto.InternalMessageInfo ++ ++func (m *CommitRequest) GetHeight() int64 { ++ if m != nil { ++ return m.Height ++ } ++ return 0 ++} ++ ++type ValidatorSetRequest struct { ++ // Height the requested validator set height. ++ // If height is equal to 0, the latest height stored in the block store ++ // will be used. ++ Height int64 `protobuf:"varint,1,opt,name=height,proto3" json:"height,omitempty"` ++} ++ ++func (m *ValidatorSetRequest) Reset() { *m = ValidatorSetRequest{} } ++func (m *ValidatorSetRequest) String() string { return proto.CompactTextString(m) } ++func (*ValidatorSetRequest) ProtoMessage() {} ++func (*ValidatorSetRequest) Descriptor() ([]byte, []int) { ++ return fileDescriptor_0ffff5682c662b95, []int{5} ++} ++func (m *ValidatorSetRequest) XXX_Unmarshal(b []byte) error { ++ return m.Unmarshal(b) ++} ++func (m *ValidatorSetRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { ++ if deterministic { ++ return xxx_messageInfo_ValidatorSetRequest.Marshal(b, m, deterministic) ++ } else { ++ b = b[:cap(b)] ++ n, err := m.MarshalToSizedBuffer(b) ++ if err != nil { ++ return nil, err ++ } ++ return b[:n], nil ++ } ++} ++func (m *ValidatorSetRequest) XXX_Merge(src proto.Message) { ++ xxx_messageInfo_ValidatorSetRequest.Merge(m, src) ++} ++func (m *ValidatorSetRequest) XXX_Size() int { ++ return m.Size() ++} ++func (m *ValidatorSetRequest) XXX_DiscardUnknown() { ++ xxx_messageInfo_ValidatorSetRequest.DiscardUnknown(m) ++} ++ ++var xxx_messageInfo_ValidatorSetRequest proto.InternalMessageInfo ++ ++func (m *ValidatorSetRequest) GetHeight() int64 { ++ if m != nil { ++ return m.Height ++ } ++ return 0 ++} ++ ++type SubscribeNewHeightsRequest struct { ++} ++ ++func (m *SubscribeNewHeightsRequest) Reset() { *m = SubscribeNewHeightsRequest{} } ++func (m *SubscribeNewHeightsRequest) String() string { return proto.CompactTextString(m) } ++func (*SubscribeNewHeightsRequest) ProtoMessage() {} ++func (*SubscribeNewHeightsRequest) Descriptor() ([]byte, []int) { ++ return fileDescriptor_0ffff5682c662b95, []int{6} ++} ++func (m *SubscribeNewHeightsRequest) XXX_Unmarshal(b []byte) error { ++ return m.Unmarshal(b) ++} ++func (m *SubscribeNewHeightsRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { ++ if deterministic { ++ return xxx_messageInfo_SubscribeNewHeightsRequest.Marshal(b, m, deterministic) ++ } else { ++ b = b[:cap(b)] ++ n, err := m.MarshalToSizedBuffer(b) ++ if err != nil { ++ return nil, err ++ } ++ return b[:n], nil ++ } ++} ++func (m *SubscribeNewHeightsRequest) XXX_Merge(src proto.Message) { ++ xxx_messageInfo_SubscribeNewHeightsRequest.Merge(m, src) ++} ++func (m *SubscribeNewHeightsRequest) XXX_Size() int { ++ return m.Size() ++} ++func (m *SubscribeNewHeightsRequest) XXX_DiscardUnknown() { ++ xxx_messageInfo_SubscribeNewHeightsRequest.DiscardUnknown(m) ++} ++ ++var xxx_messageInfo_SubscribeNewHeightsRequest proto.InternalMessageInfo ++ ++type StatusRequest struct { ++} ++ ++func (m *StatusRequest) Reset() { *m = StatusRequest{} } ++func (m *StatusRequest) String() string { return proto.CompactTextString(m) } ++func (*StatusRequest) ProtoMessage() {} ++func (*StatusRequest) Descriptor() ([]byte, []int) { ++ return fileDescriptor_0ffff5682c662b95, []int{7} ++} ++func (m *StatusRequest) XXX_Unmarshal(b []byte) error { ++ return m.Unmarshal(b) ++} ++func (m *StatusRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { ++ if deterministic { ++ return xxx_messageInfo_StatusRequest.Marshal(b, m, deterministic) ++ } else { ++ b = b[:cap(b)] ++ n, err := m.MarshalToSizedBuffer(b) ++ if err != nil { ++ return nil, err ++ } ++ return b[:n], nil ++ } ++} ++func (m *StatusRequest) XXX_Merge(src proto.Message) { ++ xxx_messageInfo_StatusRequest.Merge(m, src) ++} ++func (m *StatusRequest) XXX_Size() int { ++ return m.Size() ++} ++func (m *StatusRequest) XXX_DiscardUnknown() { ++ xxx_messageInfo_StatusRequest.DiscardUnknown(m) ++} ++ ++var xxx_messageInfo_StatusRequest proto.InternalMessageInfo ++ + type ResponsePing struct { + } + +@@ -114,7 +396,7 @@ func (m *ResponsePing) Reset() { *m = ResponsePing{} } + func (m *ResponsePing) String() string { return proto.CompactTextString(m) } + func (*ResponsePing) ProtoMessage() {} + func (*ResponsePing) Descriptor() ([]byte, []int) { +- return fileDescriptor_0ffff5682c662b95, []int{2} ++ return fileDescriptor_0ffff5682c662b95, []int{8} + } + func (m *ResponsePing) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +@@ -152,7 +434,7 @@ func (m *ResponseBroadcastTx) Reset() { *m = ResponseBroadcastTx{} } + func (m *ResponseBroadcastTx) String() string { return proto.CompactTextString(m) } + func (*ResponseBroadcastTx) ProtoMessage() {} + func (*ResponseBroadcastTx) Descriptor() ([]byte, []int) { +- return fileDescriptor_0ffff5682c662b95, []int{3} ++ return fileDescriptor_0ffff5682c662b95, []int{9} + } + func (m *ResponseBroadcastTx) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +@@ -195,344 +477,3675 @@ func (m *ResponseBroadcastTx) GetDeliverTx() *types.ResponseDeliverTx { + return nil + } + +-func init() { +- proto.RegisterType((*RequestPing)(nil), "tendermint.rpc.grpc.RequestPing") +- proto.RegisterType((*RequestBroadcastTx)(nil), "tendermint.rpc.grpc.RequestBroadcastTx") +- proto.RegisterType((*ResponsePing)(nil), "tendermint.rpc.grpc.ResponsePing") +- proto.RegisterType((*ResponseBroadcastTx)(nil), "tendermint.rpc.grpc.ResponseBroadcastTx") ++type StreamedBlockByHashResponse struct { ++ BlockPart *types1.Part `protobuf:"bytes,1,opt,name=block_part,json=blockPart,proto3" json:"block_part,omitempty"` ++ // Commit is only set in the first part, and ++ // it stays nil in the remaining ones. ++ Commit *types1.Commit `protobuf:"bytes,2,opt,name=commit,proto3" json:"commit,omitempty"` ++ // ValidatorSet is only set in the first part, and ++ // it stays nil in the remaining ones. ++ ValidatorSet *types1.ValidatorSet `protobuf:"bytes,3,opt,name=validator_set,json=validatorSet,proto3" json:"validator_set,omitempty"` ++ IsLast bool `protobuf:"varint,4,opt,name=is_last,json=isLast,proto3" json:"is_last,omitempty"` + } + +-func init() { proto.RegisterFile("tendermint/rpc/grpc/types.proto", fileDescriptor_0ffff5682c662b95) } +- +-var fileDescriptor_0ffff5682c662b95 = []byte{ +- // 316 bytes of a gzipped FileDescriptorProto +- 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x92, 0x2f, 0x49, 0xcd, 0x4b, +- 0x49, 0x2d, 0xca, 0xcd, 0xcc, 0x2b, 0xd1, 0x2f, 0x2a, 0x48, 0xd6, 0x4f, 0x07, 0x11, 0x25, 0x95, +- 0x05, 0xa9, 0xc5, 0x7a, 0x05, 0x45, 0xf9, 0x25, 0xf9, 0x42, 0xc2, 0x08, 0x05, 0x7a, 0x45, 0x05, +- 0xc9, 0x7a, 0x20, 0x05, 0x52, 0xd2, 0x48, 0xba, 0x12, 0x93, 0x92, 0x33, 0x91, 0x75, 0x28, 0xf1, +- 0x72, 0x71, 0x07, 0xa5, 0x16, 0x96, 0xa6, 0x16, 0x97, 0x04, 0x64, 0xe6, 0xa5, 0x2b, 0xa9, 0x70, +- 0x09, 0x41, 0xb9, 0x4e, 0x45, 0xf9, 0x89, 0x29, 0xc9, 0x89, 0xc5, 0x25, 0x21, 0x15, 0x42, 0x7c, +- 0x5c, 0x4c, 0x25, 0x15, 0x12, 0x8c, 0x0a, 0x8c, 0x1a, 0x3c, 0x41, 0x4c, 0x25, 0x15, 0x4a, 0x7c, +- 0x5c, 0x3c, 0x41, 0xa9, 0xc5, 0x05, 0xf9, 0x79, 0xc5, 0xa9, 0x60, 0x5d, 0x53, 0x19, 0xb9, 0x84, +- 0x61, 0x02, 0xc8, 0xfa, 0xac, 0xb9, 0x38, 0x92, 0x33, 0x52, 0x93, 0xb3, 0xe3, 0xa1, 0xba, 0xb9, +- 0x8d, 0x14, 0xf4, 0x90, 0x5c, 0x08, 0x72, 0x8c, 0x1e, 0x4c, 0x9f, 0x33, 0x48, 0x61, 0x48, 0x45, +- 0x10, 0x7b, 0x32, 0x84, 0x21, 0xe4, 0xc8, 0xc5, 0x95, 0x92, 0x9a, 0x93, 0x59, 0x96, 0x5a, 0x04, +- 0xd2, 0xce, 0x04, 0xd6, 0xae, 0x84, 0x53, 0xbb, 0x0b, 0x44, 0x69, 0x48, 0x45, 0x10, 0x67, 0x0a, +- 0x8c, 0x69, 0xb4, 0x97, 0x91, 0x8b, 0x07, 0xee, 0x1e, 0xc7, 0x00, 0x4f, 0x21, 0x6f, 0x2e, 0x16, +- 0x90, 0x83, 0x85, 0x50, 0x9c, 0x01, 0x0b, 0x28, 0x3d, 0xa4, 0x80, 0x90, 0x52, 0xc4, 0xa1, 0x02, +- 0xe1, 0x6b, 0xa1, 0x04, 0x2e, 0x6e, 0x64, 0xcf, 0xaa, 0xe3, 0x33, 0x13, 0x49, 0xa1, 0x94, 0x06, +- 0x5e, 0xa3, 0x91, 0x54, 0x3a, 0xf9, 0x9c, 0x78, 0x24, 0xc7, 0x78, 0xe1, 0x91, 0x1c, 0xe3, 0x83, +- 0x47, 0x72, 0x8c, 0x13, 0x1e, 0xcb, 0x31, 0x5c, 0x78, 0x2c, 0xc7, 0x70, 0xe3, 0xb1, 0x1c, 0x43, +- 0x94, 0x51, 0x7a, 0x66, 0x49, 0x46, 0x69, 0x92, 0x5e, 0x72, 0x7e, 0xae, 0x3e, 0x52, 0xf4, 0x62, +- 0x49, 0x1f, 0xd6, 0xc9, 0xf9, 0x45, 0xa9, 0x20, 0x46, 0x12, 0x1b, 0x38, 0xc6, 0x8d, 0x01, 0x01, +- 0x00, 0x00, 0xff, 0xff, 0xf6, 0x4b, 0x02, 0xd8, 0x46, 0x02, 0x00, 0x00, ++func (m *StreamedBlockByHashResponse) Reset() { *m = StreamedBlockByHashResponse{} } ++func (m *StreamedBlockByHashResponse) String() string { return proto.CompactTextString(m) } ++func (*StreamedBlockByHashResponse) ProtoMessage() {} ++func (*StreamedBlockByHashResponse) Descriptor() ([]byte, []int) { ++ return fileDescriptor_0ffff5682c662b95, []int{10} + } +- +-// Reference imports to suppress errors if they are not otherwise used. +-var _ context.Context +-var _ grpc.ClientConn +- +-// This is a compile-time assertion to ensure that this generated file +-// is compatible with the grpc package it is being compiled against. +-const _ = grpc.SupportPackageIsVersion4 +- +-// BroadcastAPIClient is the client API for BroadcastAPI service. +-// +-// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. +-type BroadcastAPIClient interface { +- Ping(ctx context.Context, in *RequestPing, opts ...grpc.CallOption) (*ResponsePing, error) +- BroadcastTx(ctx context.Context, in *RequestBroadcastTx, opts ...grpc.CallOption) (*ResponseBroadcastTx, error) ++func (m *StreamedBlockByHashResponse) XXX_Unmarshal(b []byte) error { ++ return m.Unmarshal(b) + } +- +-type broadcastAPIClient struct { +- cc *grpc.ClientConn ++func (m *StreamedBlockByHashResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { ++ if deterministic { ++ return xxx_messageInfo_StreamedBlockByHashResponse.Marshal(b, m, deterministic) ++ } else { ++ b = b[:cap(b)] ++ n, err := m.MarshalToSizedBuffer(b) ++ if err != nil { ++ return nil, err ++ } ++ return b[:n], nil ++ } ++} ++func (m *StreamedBlockByHashResponse) XXX_Merge(src proto.Message) { ++ xxx_messageInfo_StreamedBlockByHashResponse.Merge(m, src) ++} ++func (m *StreamedBlockByHashResponse) XXX_Size() int { ++ return m.Size() ++} ++func (m *StreamedBlockByHashResponse) XXX_DiscardUnknown() { ++ xxx_messageInfo_StreamedBlockByHashResponse.DiscardUnknown(m) + } + +-func NewBroadcastAPIClient(cc *grpc.ClientConn) BroadcastAPIClient { +- return &broadcastAPIClient{cc} ++var xxx_messageInfo_StreamedBlockByHashResponse proto.InternalMessageInfo ++ ++func (m *StreamedBlockByHashResponse) GetBlockPart() *types1.Part { ++ if m != nil { ++ return m.BlockPart ++ } ++ return nil + } + +-func (c *broadcastAPIClient) Ping(ctx context.Context, in *RequestPing, opts ...grpc.CallOption) (*ResponsePing, error) { +- out := new(ResponsePing) +- err := c.cc.Invoke(ctx, "/tendermint.rpc.grpc.BroadcastAPI/Ping", in, out, opts...) +- if err != nil { +- return nil, err ++func (m *StreamedBlockByHashResponse) GetCommit() *types1.Commit { ++ if m != nil { ++ return m.Commit + } +- return out, nil ++ return nil + } + +-func (c *broadcastAPIClient) BroadcastTx(ctx context.Context, in *RequestBroadcastTx, opts ...grpc.CallOption) (*ResponseBroadcastTx, error) { +- out := new(ResponseBroadcastTx) +- err := c.cc.Invoke(ctx, "/tendermint.rpc.grpc.BroadcastAPI/BroadcastTx", in, out, opts...) +- if err != nil { +- return nil, err ++func (m *StreamedBlockByHashResponse) GetValidatorSet() *types1.ValidatorSet { ++ if m != nil { ++ return m.ValidatorSet + } +- return out, nil ++ return nil + } + +-// BroadcastAPIServer is the server API for BroadcastAPI service. +-type BroadcastAPIServer interface { +- Ping(context.Context, *RequestPing) (*ResponsePing, error) +- BroadcastTx(context.Context, *RequestBroadcastTx) (*ResponseBroadcastTx, error) ++func (m *StreamedBlockByHashResponse) GetIsLast() bool { ++ if m != nil { ++ return m.IsLast ++ } ++ return false + } + +-// UnimplementedBroadcastAPIServer can be embedded to have forward compatible implementations. +-type UnimplementedBroadcastAPIServer struct { ++type StreamedBlockByHeightResponse struct { ++ BlockPart *types1.Part `protobuf:"bytes,1,opt,name=block_part,json=blockPart,proto3" json:"block_part,omitempty"` ++ // Commit is only set in the first part, and ++ // it stays nil in the remaining ones. ++ Commit *types1.Commit `protobuf:"bytes,2,opt,name=commit,proto3" json:"commit,omitempty"` ++ // ValidatorSet is only set in the first part, and ++ // it stays nil in the remaining ones. ++ ValidatorSet *types1.ValidatorSet `protobuf:"bytes,3,opt,name=validator_set,json=validatorSet,proto3" json:"validator_set,omitempty"` ++ IsLast bool `protobuf:"varint,4,opt,name=is_last,json=isLast,proto3" json:"is_last,omitempty"` + } + +-func (*UnimplementedBroadcastAPIServer) Ping(ctx context.Context, req *RequestPing) (*ResponsePing, error) { +- return nil, status.Errorf(codes.Unimplemented, "method Ping not implemented") ++func (m *StreamedBlockByHeightResponse) Reset() { *m = StreamedBlockByHeightResponse{} } ++func (m *StreamedBlockByHeightResponse) String() string { return proto.CompactTextString(m) } ++func (*StreamedBlockByHeightResponse) ProtoMessage() {} ++func (*StreamedBlockByHeightResponse) Descriptor() ([]byte, []int) { ++ return fileDescriptor_0ffff5682c662b95, []int{11} + } +-func (*UnimplementedBroadcastAPIServer) BroadcastTx(ctx context.Context, req *RequestBroadcastTx) (*ResponseBroadcastTx, error) { +- return nil, status.Errorf(codes.Unimplemented, "method BroadcastTx not implemented") ++func (m *StreamedBlockByHeightResponse) XXX_Unmarshal(b []byte) error { ++ return m.Unmarshal(b) ++} ++func (m *StreamedBlockByHeightResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { ++ if deterministic { ++ return xxx_messageInfo_StreamedBlockByHeightResponse.Marshal(b, m, deterministic) ++ } else { ++ b = b[:cap(b)] ++ n, err := m.MarshalToSizedBuffer(b) ++ if err != nil { ++ return nil, err ++ } ++ return b[:n], nil ++ } ++} ++func (m *StreamedBlockByHeightResponse) XXX_Merge(src proto.Message) { ++ xxx_messageInfo_StreamedBlockByHeightResponse.Merge(m, src) ++} ++func (m *StreamedBlockByHeightResponse) XXX_Size() int { ++ return m.Size() ++} ++func (m *StreamedBlockByHeightResponse) XXX_DiscardUnknown() { ++ xxx_messageInfo_StreamedBlockByHeightResponse.DiscardUnknown(m) + } + +-func RegisterBroadcastAPIServer(s *grpc.Server, srv BroadcastAPIServer) { +- s.RegisterService(&_BroadcastAPI_serviceDesc, srv) ++var xxx_messageInfo_StreamedBlockByHeightResponse proto.InternalMessageInfo ++ ++func (m *StreamedBlockByHeightResponse) GetBlockPart() *types1.Part { ++ if m != nil { ++ return m.BlockPart ++ } ++ return nil + } + +-func _BroadcastAPI_Ping_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { +- in := new(RequestPing) +- if err := dec(in); err != nil { +- return nil, err ++func (m *StreamedBlockByHeightResponse) GetCommit() *types1.Commit { ++ if m != nil { ++ return m.Commit + } +- if interceptor == nil { +- return srv.(BroadcastAPIServer).Ping(ctx, in) +- } +- info := &grpc.UnaryServerInfo{ +- Server: srv, +- FullMethod: "/tendermint.rpc.grpc.BroadcastAPI/Ping", +- } +- handler := func(ctx context.Context, req interface{}) (interface{}, error) { +- return srv.(BroadcastAPIServer).Ping(ctx, req.(*RequestPing)) +- } +- return interceptor(ctx, in, info, handler) ++ return nil + } + +-func _BroadcastAPI_BroadcastTx_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { +- in := new(RequestBroadcastTx) +- if err := dec(in); err != nil { +- return nil, err +- } +- if interceptor == nil { +- return srv.(BroadcastAPIServer).BroadcastTx(ctx, in) +- } +- info := &grpc.UnaryServerInfo{ +- Server: srv, +- FullMethod: "/tendermint.rpc.grpc.BroadcastAPI/BroadcastTx", ++func (m *StreamedBlockByHeightResponse) GetValidatorSet() *types1.ValidatorSet { ++ if m != nil { ++ return m.ValidatorSet + } +- handler := func(ctx context.Context, req interface{}) (interface{}, error) { +- return srv.(BroadcastAPIServer).BroadcastTx(ctx, req.(*RequestBroadcastTx)) ++ return nil ++} ++ ++func (m *StreamedBlockByHeightResponse) GetIsLast() bool { ++ if m != nil { ++ return m.IsLast + } +- return interceptor(ctx, in, info, handler) ++ return false + } + +-var _BroadcastAPI_serviceDesc = grpc.ServiceDesc{ +- ServiceName: "tendermint.rpc.grpc.BroadcastAPI", +- HandlerType: (*BroadcastAPIServer)(nil), +- Methods: []grpc.MethodDesc{ +- { +- MethodName: "Ping", +- Handler: _BroadcastAPI_Ping_Handler, +- }, +- { +- MethodName: "BroadcastTx", +- Handler: _BroadcastAPI_BroadcastTx_Handler, +- }, +- }, +- Streams: []grpc.StreamDesc{}, +- Metadata: "tendermint/rpc/grpc/types.proto", ++type CommitResponse struct { ++ Commit *types1.Commit `protobuf:"bytes,1,opt,name=commit,proto3" json:"commit,omitempty"` + } + +-func (m *RequestPing) Marshal() (dAtA []byte, err error) { +- size := m.Size() +- dAtA = make([]byte, size) +- n, err := m.MarshalToSizedBuffer(dAtA[:size]) +- if err != nil { +- return nil, err ++func (m *CommitResponse) Reset() { *m = CommitResponse{} } ++func (m *CommitResponse) String() string { return proto.CompactTextString(m) } ++func (*CommitResponse) ProtoMessage() {} ++func (*CommitResponse) Descriptor() ([]byte, []int) { ++ return fileDescriptor_0ffff5682c662b95, []int{12} ++} ++func (m *CommitResponse) XXX_Unmarshal(b []byte) error { ++ return m.Unmarshal(b) ++} ++func (m *CommitResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { ++ if deterministic { ++ return xxx_messageInfo_CommitResponse.Marshal(b, m, deterministic) ++ } else { ++ b = b[:cap(b)] ++ n, err := m.MarshalToSizedBuffer(b) ++ if err != nil { ++ return nil, err ++ } ++ return b[:n], nil + } +- return dAtA[:n], nil + } +- +-func (m *RequestPing) MarshalTo(dAtA []byte) (int, error) { +- size := m.Size() +- return m.MarshalToSizedBuffer(dAtA[:size]) ++func (m *CommitResponse) XXX_Merge(src proto.Message) { ++ xxx_messageInfo_CommitResponse.Merge(m, src) + } +- +-func (m *RequestPing) MarshalToSizedBuffer(dAtA []byte) (int, error) { +- i := len(dAtA) +- _ = i +- var l int +- _ = l +- return len(dAtA) - i, nil ++func (m *CommitResponse) XXX_Size() int { ++ return m.Size() ++} ++func (m *CommitResponse) XXX_DiscardUnknown() { ++ xxx_messageInfo_CommitResponse.DiscardUnknown(m) + } + +-func (m *RequestBroadcastTx) Marshal() (dAtA []byte, err error) { +- size := m.Size() +- dAtA = make([]byte, size) +- n, err := m.MarshalToSizedBuffer(dAtA[:size]) +- if err != nil { +- return nil, err ++var xxx_messageInfo_CommitResponse proto.InternalMessageInfo ++ ++func (m *CommitResponse) GetCommit() *types1.Commit { ++ if m != nil { ++ return m.Commit + } +- return dAtA[:n], nil ++ return nil + } + +-func (m *RequestBroadcastTx) MarshalTo(dAtA []byte) (int, error) { +- size := m.Size() +- return m.MarshalToSizedBuffer(dAtA[:size]) ++type ValidatorSetResponse struct { ++ // ValidatorSet the requested validator set. ++ ValidatorSet *types1.ValidatorSet `protobuf:"bytes,1,opt,name=validator_set,json=validatorSet,proto3" json:"validator_set,omitempty"` ++ // Height the height corresponding to the returned ++ // validator set. ++ Height int64 `protobuf:"varint,2,opt,name=height,proto3" json:"height,omitempty"` + } + +-func (m *RequestBroadcastTx) MarshalToSizedBuffer(dAtA []byte) (int, error) { +- i := len(dAtA) +- _ = i +- var l int +- _ = l +- if len(m.Tx) > 0 { +- i -= len(m.Tx) +- copy(dAtA[i:], m.Tx) +- i = encodeVarintTypes(dAtA, i, uint64(len(m.Tx))) +- i-- +- dAtA[i] = 0xa ++func (m *ValidatorSetResponse) Reset() { *m = ValidatorSetResponse{} } ++func (m *ValidatorSetResponse) String() string { return proto.CompactTextString(m) } ++func (*ValidatorSetResponse) ProtoMessage() {} ++func (*ValidatorSetResponse) Descriptor() ([]byte, []int) { ++ return fileDescriptor_0ffff5682c662b95, []int{13} ++} ++func (m *ValidatorSetResponse) XXX_Unmarshal(b []byte) error { ++ return m.Unmarshal(b) ++} ++func (m *ValidatorSetResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { ++ if deterministic { ++ return xxx_messageInfo_ValidatorSetResponse.Marshal(b, m, deterministic) ++ } else { ++ b = b[:cap(b)] ++ n, err := m.MarshalToSizedBuffer(b) ++ if err != nil { ++ return nil, err ++ } ++ return b[:n], nil + } +- return len(dAtA) - i, nil ++} ++func (m *ValidatorSetResponse) XXX_Merge(src proto.Message) { ++ xxx_messageInfo_ValidatorSetResponse.Merge(m, src) ++} ++func (m *ValidatorSetResponse) XXX_Size() int { ++ return m.Size() ++} ++func (m *ValidatorSetResponse) XXX_DiscardUnknown() { ++ xxx_messageInfo_ValidatorSetResponse.DiscardUnknown(m) + } + +-func (m *ResponsePing) Marshal() (dAtA []byte, err error) { +- size := m.Size() +- dAtA = make([]byte, size) +- n, err := m.MarshalToSizedBuffer(dAtA[:size]) +- if err != nil { +- return nil, err ++var xxx_messageInfo_ValidatorSetResponse proto.InternalMessageInfo ++ ++func (m *ValidatorSetResponse) GetValidatorSet() *types1.ValidatorSet { ++ if m != nil { ++ return m.ValidatorSet + } +- return dAtA[:n], nil ++ return nil + } + +-func (m *ResponsePing) MarshalTo(dAtA []byte) (int, error) { +- size := m.Size() +- return m.MarshalToSizedBuffer(dAtA[:size]) ++func (m *ValidatorSetResponse) GetHeight() int64 { ++ if m != nil { ++ return m.Height ++ } ++ return 0 + } + +-func (m *ResponsePing) MarshalToSizedBuffer(dAtA []byte) (int, error) { +- i := len(dAtA) +- _ = i +- var l int +- _ = l +- return len(dAtA) - i, nil ++type NewHeightEvent struct { ++ Height int64 `protobuf:"varint,1,opt,name=height,proto3" json:"height,omitempty"` ++ Hash []byte `protobuf:"bytes,2,opt,name=hash,proto3" json:"hash,omitempty"` + } + +-func (m *ResponseBroadcastTx) Marshal() (dAtA []byte, err error) { +- size := m.Size() +- dAtA = make([]byte, size) +- n, err := m.MarshalToSizedBuffer(dAtA[:size]) +- if err != nil { +- return nil, err ++func (m *NewHeightEvent) Reset() { *m = NewHeightEvent{} } ++func (m *NewHeightEvent) String() string { return proto.CompactTextString(m) } ++func (*NewHeightEvent) ProtoMessage() {} ++func (*NewHeightEvent) Descriptor() ([]byte, []int) { ++ return fileDescriptor_0ffff5682c662b95, []int{14} ++} ++func (m *NewHeightEvent) XXX_Unmarshal(b []byte) error { ++ return m.Unmarshal(b) ++} ++func (m *NewHeightEvent) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { ++ if deterministic { ++ return xxx_messageInfo_NewHeightEvent.Marshal(b, m, deterministic) ++ } else { ++ b = b[:cap(b)] ++ n, err := m.MarshalToSizedBuffer(b) ++ if err != nil { ++ return nil, err ++ } ++ return b[:n], nil + } +- return dAtA[:n], nil ++} ++func (m *NewHeightEvent) XXX_Merge(src proto.Message) { ++ xxx_messageInfo_NewHeightEvent.Merge(m, src) ++} ++func (m *NewHeightEvent) XXX_Size() int { ++ return m.Size() ++} ++func (m *NewHeightEvent) XXX_DiscardUnknown() { ++ xxx_messageInfo_NewHeightEvent.DiscardUnknown(m) + } + +-func (m *ResponseBroadcastTx) MarshalTo(dAtA []byte) (int, error) { +- size := m.Size() +- return m.MarshalToSizedBuffer(dAtA[:size]) ++var xxx_messageInfo_NewHeightEvent proto.InternalMessageInfo ++ ++func (m *NewHeightEvent) GetHeight() int64 { ++ if m != nil { ++ return m.Height ++ } ++ return 0 + } + +-func (m *ResponseBroadcastTx) MarshalToSizedBuffer(dAtA []byte) (int, error) { +- i := len(dAtA) +- _ = i +- var l int +- _ = l +- if m.DeliverTx != nil { +- { +- size, err := m.DeliverTx.MarshalToSizedBuffer(dAtA[:i]) +- if err != nil { +- return 0, err +- } +- i -= size +- i = encodeVarintTypes(dAtA, i, uint64(size)) +- } +- i-- +- dAtA[i] = 0x12 ++func (m *NewHeightEvent) GetHash() []byte { ++ if m != nil { ++ return m.Hash + } +- if m.CheckTx != nil { +- { +- size, err := m.CheckTx.MarshalToSizedBuffer(dAtA[:i]) +- if err != nil { +- return 0, err +- } +- i -= size +- i = encodeVarintTypes(dAtA, i, uint64(size)) ++ return nil ++} ++ ++type StatusResponse struct { ++ NodeInfo *p2p.DefaultNodeInfo `protobuf:"bytes,1,opt,name=node_info,json=nodeInfo,proto3" json:"node_info,omitempty"` ++ SyncInfo *SyncInfo `protobuf:"bytes,2,opt,name=sync_info,json=syncInfo,proto3" json:"sync_info,omitempty"` ++ ValidatorInfo *ValidatorInfo `protobuf:"bytes,3,opt,name=validator_info,json=validatorInfo,proto3" json:"validator_info,omitempty"` ++} ++ ++func (m *StatusResponse) Reset() { *m = StatusResponse{} } ++func (m *StatusResponse) String() string { return proto.CompactTextString(m) } ++func (*StatusResponse) ProtoMessage() {} ++func (*StatusResponse) Descriptor() ([]byte, []int) { ++ return fileDescriptor_0ffff5682c662b95, []int{15} ++} ++func (m *StatusResponse) XXX_Unmarshal(b []byte) error { ++ return m.Unmarshal(b) ++} ++func (m *StatusResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { ++ if deterministic { ++ return xxx_messageInfo_StatusResponse.Marshal(b, m, deterministic) ++ } else { ++ b = b[:cap(b)] ++ n, err := m.MarshalToSizedBuffer(b) ++ if err != nil { ++ return nil, err + } +- i-- +- dAtA[i] = 0xa ++ return b[:n], nil + } +- return len(dAtA) - i, nil ++} ++func (m *StatusResponse) XXX_Merge(src proto.Message) { ++ xxx_messageInfo_StatusResponse.Merge(m, src) ++} ++func (m *StatusResponse) XXX_Size() int { ++ return m.Size() ++} ++func (m *StatusResponse) XXX_DiscardUnknown() { ++ xxx_messageInfo_StatusResponse.DiscardUnknown(m) + } + +-func encodeVarintTypes(dAtA []byte, offset int, v uint64) int { +- offset -= sovTypes(v) +- base := offset +- for v >= 1<<7 { +- dAtA[offset] = uint8(v&0x7f | 0x80) +- v >>= 7 +- offset++ ++var xxx_messageInfo_StatusResponse proto.InternalMessageInfo ++ ++func (m *StatusResponse) GetNodeInfo() *p2p.DefaultNodeInfo { ++ if m != nil { ++ return m.NodeInfo + } +- dAtA[offset] = uint8(v) +- return base ++ return nil + } +-func (m *RequestPing) Size() (n int) { +- if m == nil { +- return 0 ++ ++func (m *StatusResponse) GetSyncInfo() *SyncInfo { ++ if m != nil { ++ return m.SyncInfo + } +- var l int +- _ = l +- return n ++ return nil + } + +-func (m *RequestBroadcastTx) Size() (n int) { +- if m == nil { +- return 0 +- } +- var l int +- _ = l +- l = len(m.Tx) +- if l > 0 { +- n += 1 + l + sovTypes(uint64(l)) ++func (m *StatusResponse) GetValidatorInfo() *ValidatorInfo { ++ if m != nil { ++ return m.ValidatorInfo + } +- return n ++ return nil + } + +-func (m *ResponsePing) Size() (n int) { +- if m == nil { +- return 0 +- } +- var l int +- _ = l +- return n ++type SyncInfo struct { ++ LatestBlockHash []byte `protobuf:"bytes,1,opt,name=latest_block_hash,json=latestBlockHash,proto3" json:"latest_block_hash,omitempty"` ++ LatestAppHash []byte `protobuf:"bytes,2,opt,name=latest_app_hash,json=latestAppHash,proto3" json:"latest_app_hash,omitempty"` ++ LatestBlockHeight int64 `protobuf:"varint,3,opt,name=latest_block_height,json=latestBlockHeight,proto3" json:"latest_block_height,omitempty"` ++ LatestBlockTime time.Time `protobuf:"bytes,4,opt,name=latest_block_time,json=latestBlockTime,proto3,stdtime" json:"latest_block_time"` ++ EarliestBlockHash []byte `protobuf:"bytes,5,opt,name=earliest_block_hash,json=earliestBlockHash,proto3" json:"earliest_block_hash,omitempty"` ++ EarliestAppHash []byte `protobuf:"bytes,6,opt,name=earliest_app_hash,json=earliestAppHash,proto3" json:"earliest_app_hash,omitempty"` ++ EarliestBlockHeight int64 `protobuf:"varint,7,opt,name=earliest_block_height,json=earliestBlockHeight,proto3" json:"earliest_block_height,omitempty"` ++ EarliestBlockTime time.Time `protobuf:"bytes,8,opt,name=earliest_block_time,json=earliestBlockTime,proto3,stdtime" json:"earliest_block_time"` ++ CatchingUp bool `protobuf:"varint,9,opt,name=catching_up,json=catchingUp,proto3" json:"catching_up,omitempty"` + } + +-func (m *ResponseBroadcastTx) Size() (n int) { +- if m == nil { +- return 0 +- } ++func (m *SyncInfo) Reset() { *m = SyncInfo{} } ++func (m *SyncInfo) String() string { return proto.CompactTextString(m) } ++func (*SyncInfo) ProtoMessage() {} ++func (*SyncInfo) Descriptor() ([]byte, []int) { ++ return fileDescriptor_0ffff5682c662b95, []int{16} ++} ++func (m *SyncInfo) XXX_Unmarshal(b []byte) error { ++ return m.Unmarshal(b) ++} ++func (m *SyncInfo) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { ++ if deterministic { ++ return xxx_messageInfo_SyncInfo.Marshal(b, m, deterministic) ++ } else { ++ b = b[:cap(b)] ++ n, err := m.MarshalToSizedBuffer(b) ++ if err != nil { ++ return nil, err ++ } ++ return b[:n], nil ++ } ++} ++func (m *SyncInfo) XXX_Merge(src proto.Message) { ++ xxx_messageInfo_SyncInfo.Merge(m, src) ++} ++func (m *SyncInfo) XXX_Size() int { ++ return m.Size() ++} ++func (m *SyncInfo) XXX_DiscardUnknown() { ++ xxx_messageInfo_SyncInfo.DiscardUnknown(m) ++} ++ ++var xxx_messageInfo_SyncInfo proto.InternalMessageInfo ++ ++func (m *SyncInfo) GetLatestBlockHash() []byte { ++ if m != nil { ++ return m.LatestBlockHash ++ } ++ return nil ++} ++ ++func (m *SyncInfo) GetLatestAppHash() []byte { ++ if m != nil { ++ return m.LatestAppHash ++ } ++ return nil ++} ++ ++func (m *SyncInfo) GetLatestBlockHeight() int64 { ++ if m != nil { ++ return m.LatestBlockHeight ++ } ++ return 0 ++} ++ ++func (m *SyncInfo) GetLatestBlockTime() time.Time { ++ if m != nil { ++ return m.LatestBlockTime ++ } ++ return time.Time{} ++} ++ ++func (m *SyncInfo) GetEarliestBlockHash() []byte { ++ if m != nil { ++ return m.EarliestBlockHash ++ } ++ return nil ++} ++ ++func (m *SyncInfo) GetEarliestAppHash() []byte { ++ if m != nil { ++ return m.EarliestAppHash ++ } ++ return nil ++} ++ ++func (m *SyncInfo) GetEarliestBlockHeight() int64 { ++ if m != nil { ++ return m.EarliestBlockHeight ++ } ++ return 0 ++} ++ ++func (m *SyncInfo) GetEarliestBlockTime() time.Time { ++ if m != nil { ++ return m.EarliestBlockTime ++ } ++ return time.Time{} ++} ++ ++func (m *SyncInfo) GetCatchingUp() bool { ++ if m != nil { ++ return m.CatchingUp ++ } ++ return false ++} ++ ++type ValidatorInfo struct { ++ Address []byte `protobuf:"bytes,1,opt,name=address,proto3" json:"address,omitempty"` ++ PubKey *crypto.PublicKey `protobuf:"bytes,2,opt,name=pub_key,json=pubKey,proto3" json:"pub_key,omitempty"` ++ VotingPower int64 `protobuf:"varint,3,opt,name=voting_power,json=votingPower,proto3" json:"voting_power,omitempty"` ++} ++ ++func (m *ValidatorInfo) Reset() { *m = ValidatorInfo{} } ++func (m *ValidatorInfo) String() string { return proto.CompactTextString(m) } ++func (*ValidatorInfo) ProtoMessage() {} ++func (*ValidatorInfo) Descriptor() ([]byte, []int) { ++ return fileDescriptor_0ffff5682c662b95, []int{17} ++} ++func (m *ValidatorInfo) XXX_Unmarshal(b []byte) error { ++ return m.Unmarshal(b) ++} ++func (m *ValidatorInfo) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { ++ if deterministic { ++ return xxx_messageInfo_ValidatorInfo.Marshal(b, m, deterministic) ++ } else { ++ b = b[:cap(b)] ++ n, err := m.MarshalToSizedBuffer(b) ++ if err != nil { ++ return nil, err ++ } ++ return b[:n], nil ++ } ++} ++func (m *ValidatorInfo) XXX_Merge(src proto.Message) { ++ xxx_messageInfo_ValidatorInfo.Merge(m, src) ++} ++func (m *ValidatorInfo) XXX_Size() int { ++ return m.Size() ++} ++func (m *ValidatorInfo) XXX_DiscardUnknown() { ++ xxx_messageInfo_ValidatorInfo.DiscardUnknown(m) ++} ++ ++var xxx_messageInfo_ValidatorInfo proto.InternalMessageInfo ++ ++func (m *ValidatorInfo) GetAddress() []byte { ++ if m != nil { ++ return m.Address ++ } ++ return nil ++} ++ ++func (m *ValidatorInfo) GetPubKey() *crypto.PublicKey { ++ if m != nil { ++ return m.PubKey ++ } ++ return nil ++} ++ ++func (m *ValidatorInfo) GetVotingPower() int64 { ++ if m != nil { ++ return m.VotingPower ++ } ++ return 0 ++} ++ ++func init() { ++ proto.RegisterType((*RequestPing)(nil), "tendermint.rpc.grpc.RequestPing") ++ proto.RegisterType((*RequestBroadcastTx)(nil), "tendermint.rpc.grpc.RequestBroadcastTx") ++ proto.RegisterType((*BlockByHashRequest)(nil), "tendermint.rpc.grpc.BlockByHashRequest") ++ proto.RegisterType((*BlockByHeightRequest)(nil), "tendermint.rpc.grpc.BlockByHeightRequest") ++ proto.RegisterType((*CommitRequest)(nil), "tendermint.rpc.grpc.CommitRequest") ++ proto.RegisterType((*ValidatorSetRequest)(nil), "tendermint.rpc.grpc.ValidatorSetRequest") ++ proto.RegisterType((*SubscribeNewHeightsRequest)(nil), "tendermint.rpc.grpc.SubscribeNewHeightsRequest") ++ proto.RegisterType((*StatusRequest)(nil), "tendermint.rpc.grpc.StatusRequest") ++ proto.RegisterType((*ResponsePing)(nil), "tendermint.rpc.grpc.ResponsePing") ++ proto.RegisterType((*ResponseBroadcastTx)(nil), "tendermint.rpc.grpc.ResponseBroadcastTx") ++ proto.RegisterType((*StreamedBlockByHashResponse)(nil), "tendermint.rpc.grpc.StreamedBlockByHashResponse") ++ proto.RegisterType((*StreamedBlockByHeightResponse)(nil), "tendermint.rpc.grpc.StreamedBlockByHeightResponse") ++ proto.RegisterType((*CommitResponse)(nil), "tendermint.rpc.grpc.CommitResponse") ++ proto.RegisterType((*ValidatorSetResponse)(nil), "tendermint.rpc.grpc.ValidatorSetResponse") ++ proto.RegisterType((*NewHeightEvent)(nil), "tendermint.rpc.grpc.NewHeightEvent") ++ proto.RegisterType((*StatusResponse)(nil), "tendermint.rpc.grpc.StatusResponse") ++ proto.RegisterType((*SyncInfo)(nil), "tendermint.rpc.grpc.SyncInfo") ++ proto.RegisterType((*ValidatorInfo)(nil), "tendermint.rpc.grpc.ValidatorInfo") ++} ++ ++func init() { proto.RegisterFile("tendermint/rpc/grpc/types.proto", fileDescriptor_0ffff5682c662b95) } ++ ++var fileDescriptor_0ffff5682c662b95 = []byte{ ++ // 1102 bytes of a gzipped FileDescriptorProto ++ 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xdc, 0x57, 0x4f, 0x6f, 0x1b, 0x45, ++ 0x14, 0xcf, 0x3a, 0xa9, 0xe3, 0x3c, 0xff, 0xa9, 0x32, 0x0e, 0xc5, 0xda, 0xa6, 0x76, 0xba, 0x20, ++ 0x9a, 0x56, 0x62, 0x1d, 0x19, 0xf5, 0x42, 0x2b, 0xa4, 0x38, 0x41, 0x22, 0x4a, 0x55, 0x19, 0x27, ++ 0x70, 0xe0, 0x62, 0xc6, 0xeb, 0x89, 0xbd, 0xc4, 0xde, 0x19, 0x76, 0xc6, 0x6e, 0x7c, 0xe6, 0x0b, ++ 0xf4, 0xc2, 0xc7, 0xe1, 0xde, 0x63, 0x2f, 0x48, 0x9c, 0x00, 0x25, 0x08, 0xbe, 0x06, 0xda, 0x99, ++ 0x59, 0x7b, 0x36, 0xf6, 0xa6, 0x81, 0x63, 0x2f, 0xd6, 0x9b, 0x37, 0xef, 0xbd, 0xfd, 0xbd, 0x37, ++ 0xef, 0x9f, 0xa1, 0x26, 0x48, 0xd0, 0x23, 0xe1, 0xc8, 0x0f, 0x44, 0x3d, 0x64, 0x5e, 0xbd, 0x1f, ++ 0xfd, 0x88, 0x29, 0x23, 0xdc, 0x65, 0x21, 0x15, 0x14, 0x95, 0xe7, 0x02, 0x6e, 0xc8, 0x3c, 0x37, ++ 0x12, 0xb0, 0xef, 0x1b, 0x5a, 0xb8, 0xeb, 0xf9, 0xa6, 0x86, 0xbd, 0x6d, 0x5c, 0x4a, 0x7e, 0xe2, ++ 0xd6, 0x36, 0x6e, 0x59, 0x83, 0xa5, 0x6a, 0x7a, 0xe1, 0x94, 0x09, 0x5a, 0x3f, 0x27, 0xd3, 0xf8, ++ 0x76, 0x67, 0xc1, 0xee, 0x04, 0x0f, 0xfd, 0x1e, 0x16, 0x34, 0xd4, 0x12, 0xb5, 0x3e, 0xa5, 0xfd, ++ 0x21, 0xa9, 0xcb, 0x53, 0x77, 0x7c, 0x56, 0x17, 0xfe, 0x88, 0x70, 0x81, 0x47, 0x4c, 0x0b, 0x6c, ++ 0xf5, 0x69, 0x9f, 0x4a, 0xb2, 0x1e, 0x51, 0x8a, 0xeb, 0x14, 0x21, 0xdf, 0x26, 0x3f, 0x8e, 0x09, ++ 0x17, 0x2d, 0x3f, 0xe8, 0x3b, 0x1f, 0x03, 0xd2, 0xc7, 0x66, 0x48, 0x71, 0xcf, 0xc3, 0x5c, 0x9c, ++ 0x5e, 0xa0, 0x12, 0x64, 0xc4, 0x45, 0xc5, 0xda, 0xb1, 0x76, 0x0b, 0xed, 0x8c, 0xb8, 0x70, 0xbe, ++ 0x00, 0xd4, 0x1c, 0x52, 0xef, 0xbc, 0x39, 0xfd, 0x0a, 0xf3, 0x81, 0x56, 0x40, 0x08, 0xd6, 0x06, ++ 0x98, 0x0f, 0xb4, 0x9c, 0xa4, 0xd1, 0x16, 0xdc, 0x61, 0x21, 0x9d, 0x90, 0x4a, 0x66, 0xc7, 0xda, ++ 0xcd, 0xb5, 0xd5, 0xc1, 0x39, 0x84, 0xad, 0x58, 0x9f, 0xf8, 0xfd, 0x81, 0x88, 0x2d, 0xdc, 0x83, ++ 0xec, 0x40, 0x32, 0xa4, 0x8d, 0xd5, 0xb6, 0x3e, 0xa5, 0x58, 0x79, 0x04, 0xc5, 0x03, 0x3a, 0x1a, ++ 0xf9, 0xef, 0x52, 0x77, 0x3e, 0x85, 0xf2, 0xb7, 0x71, 0xb4, 0x4e, 0xc8, 0x3b, 0xc5, 0xb7, 0xc1, ++ 0x3e, 0x19, 0x77, 0xb9, 0x17, 0xfa, 0x5d, 0xf2, 0x92, 0xbc, 0x52, 0x10, 0xb9, 0xd6, 0x72, 0xee, ++ 0x42, 0xf1, 0x44, 0x60, 0x31, 0x9e, 0x31, 0x4a, 0x50, 0x68, 0x13, 0xce, 0x68, 0xc0, 0x89, 0x0c, ++ 0xe1, 0xcf, 0x16, 0x94, 0x63, 0x86, 0x19, 0xc4, 0x67, 0x90, 0xf3, 0x06, 0xc4, 0x3b, 0xef, 0xe8, ++ 0x50, 0xe6, 0x1b, 0x3b, 0xae, 0x91, 0x5f, 0x51, 0x2a, 0xb9, 0xb1, 0xde, 0x41, 0x24, 0x78, 0x7a, ++ 0xd1, 0x5e, 0xf7, 0x14, 0x81, 0xf6, 0x01, 0x7a, 0x64, 0xe8, 0x4f, 0x48, 0x18, 0xa9, 0x67, 0xa4, ++ 0xba, 0x93, 0xaa, 0x7e, 0xa8, 0x44, 0x4f, 0x2f, 0xda, 0x1b, 0xbd, 0x98, 0x74, 0xfe, 0xb2, 0xe0, ++ 0xfe, 0x89, 0x08, 0x09, 0x1e, 0x91, 0x5e, 0xe2, 0xf5, 0x94, 0x0e, 0x7a, 0x0a, 0xd0, 0x8d, 0xd8, ++ 0x1d, 0x86, 0x43, 0xa1, 0x11, 0xde, 0x33, 0x3f, 0xa1, 0xb2, 0xb5, 0x85, 0x43, 0xd1, 0xde, 0x90, ++ 0x92, 0x11, 0x89, 0xf6, 0x20, 0xeb, 0xc9, 0x57, 0xd0, 0xa8, 0x2a, 0x8b, 0x2a, 0xfa, 0x95, 0xb4, ++ 0x1c, 0x3a, 0x80, 0xe2, 0x2c, 0x79, 0x3b, 0x9c, 0x88, 0xca, 0xaa, 0x54, 0xac, 0x2e, 0x2a, 0x26, ++ 0x5e, 0xad, 0x30, 0x31, 0x4e, 0xe8, 0x43, 0x58, 0xf7, 0x79, 0x67, 0x88, 0xb9, 0xa8, 0xac, 0xc9, ++ 0xa4, 0xc8, 0xfa, 0xfc, 0x05, 0xe6, 0xc2, 0xf9, 0xdb, 0x82, 0x07, 0xd7, 0xdd, 0xd4, 0x49, 0xf6, ++ 0x7e, 0x39, 0xda, 0x84, 0x52, 0x9c, 0xfe, 0xda, 0xb1, 0x39, 0x42, 0xeb, 0x76, 0x08, 0x1d, 0x0e, ++ 0x5b, 0xc9, 0xca, 0xd0, 0x96, 0x16, 0x90, 0x5b, 0xff, 0x03, 0xf9, 0xbc, 0xbe, 0x32, 0x89, 0xfa, ++ 0x7a, 0x0e, 0xa5, 0x59, 0x59, 0x7d, 0x39, 0x21, 0x41, 0x7a, 0xdd, 0xc7, 0x1d, 0x25, 0x33, 0xef, ++ 0x28, 0xce, 0xaf, 0x16, 0x94, 0xe2, 0x02, 0xd4, 0x68, 0x9f, 0xc3, 0x46, 0x40, 0x7b, 0xa4, 0xe3, ++ 0x07, 0x67, 0x54, 0x23, 0xad, 0x99, 0x48, 0x59, 0x83, 0xb9, 0x87, 0xe4, 0x0c, 0x8f, 0x87, 0xe2, ++ 0x25, 0xed, 0x91, 0xa3, 0xe0, 0x8c, 0xb6, 0x73, 0x81, 0xa6, 0xd0, 0xe7, 0xb0, 0xc1, 0xa7, 0x81, ++ 0xa7, 0xb4, 0xd5, 0xd3, 0x3e, 0x70, 0x97, 0x34, 0x7e, 0xf7, 0x64, 0x1a, 0x78, 0x4a, 0x97, 0x6b, ++ 0x0a, 0x1d, 0x41, 0x69, 0x1e, 0x27, 0x69, 0x60, 0x75, 0xb1, 0x34, 0x67, 0x06, 0x66, 0xb1, 0x92, ++ 0x56, 0xe6, 0x11, 0x8e, 0x8e, 0xce, 0x3f, 0xab, 0x90, 0x8b, 0xbf, 0x80, 0x9e, 0xc0, 0xe6, 0x10, ++ 0x0b, 0xc2, 0x45, 0x47, 0x65, 0xaa, 0xd1, 0x57, 0xef, 0xaa, 0x0b, 0x99, 0xda, 0x51, 0xfd, 0xa2, ++ 0x4f, 0x40, 0xb3, 0x3a, 0x98, 0xb1, 0x8e, 0x11, 0xaf, 0xa2, 0x62, 0xef, 0x33, 0x26, 0xe5, 0x5c, ++ 0x28, 0x27, 0x6d, 0xaa, 0x88, 0xaf, 0xca, 0x88, 0x6f, 0x9a, 0x56, 0x55, 0xf0, 0x5b, 0xd7, 0x30, ++ 0x44, 0xf3, 0x44, 0xa6, 0x60, 0xbe, 0x61, 0xbb, 0x6a, 0xd8, 0xb8, 0xf1, 0xb0, 0x71, 0x4f, 0xe3, ++ 0x61, 0xd3, 0xcc, 0xbd, 0xf9, 0xbd, 0xb6, 0xf2, 0xfa, 0x8f, 0x9a, 0x95, 0x40, 0x1a, 0xdd, 0x47, ++ 0x08, 0x08, 0x0e, 0x87, 0xfe, 0x35, 0xbf, 0xee, 0x48, 0xb4, 0x9b, 0xf1, 0xd5, 0xdc, 0xb3, 0x27, ++ 0x30, 0x63, 0xce, 0x7d, 0xcb, 0xaa, 0x28, 0xc4, 0x17, 0xb1, 0x77, 0x0d, 0xf8, 0xe0, 0xba, 0x6d, ++ 0xe5, 0xdf, 0xba, 0xf4, 0xaf, 0x9c, 0xb4, 0xae, 0x3c, 0x3c, 0x5d, 0xc0, 0x23, 0x7d, 0xcc, 0xfd, ++ 0x07, 0x1f, 0x93, 0xa8, 0xa5, 0x97, 0x35, 0xc8, 0x7b, 0x58, 0x78, 0x03, 0x3f, 0xe8, 0x77, 0xc6, ++ 0xac, 0xb2, 0x21, 0x8b, 0x16, 0x62, 0xd6, 0x37, 0xcc, 0xf9, 0xc9, 0x82, 0x62, 0x22, 0x15, 0x50, ++ 0x05, 0xd6, 0x71, 0xaf, 0x17, 0x12, 0xce, 0xf5, 0x23, 0xc7, 0x47, 0xf4, 0x14, 0xd6, 0xd9, 0xb8, ++ 0xdb, 0x39, 0x27, 0x53, 0x9d, 0x9a, 0xdb, 0x66, 0x66, 0xa9, 0x3d, 0xc1, 0x6d, 0x8d, 0xbb, 0x43, ++ 0xdf, 0x3b, 0x26, 0xd3, 0x76, 0x96, 0x8d, 0xbb, 0xc7, 0x64, 0x8a, 0x1e, 0x42, 0x61, 0x42, 0x45, ++ 0x84, 0x80, 0xd1, 0x57, 0x24, 0xd4, 0x8f, 0x9c, 0x57, 0xbc, 0x56, 0xc4, 0x6a, 0xfc, 0x62, 0x41, ++ 0x61, 0x36, 0x9e, 0xf6, 0x5b, 0x47, 0xe8, 0x18, 0xd6, 0xa2, 0xf9, 0x85, 0x76, 0x96, 0xe6, 0xae, ++ 0xb1, 0x24, 0xd8, 0x0f, 0x53, 0x24, 0xe6, 0x43, 0x10, 0x7d, 0x0f, 0x79, 0x73, 0xf6, 0x3d, 0xba, ++ 0xc9, 0xa6, 0x21, 0x68, 0xef, 0xde, 0x68, 0xda, 0x90, 0x6c, 0x5c, 0xae, 0x41, 0x4e, 0x06, 0x3d, ++ 0xc2, 0xfe, 0x03, 0xe4, 0x8d, 0x91, 0x96, 0xf2, 0xb9, 0xc5, 0x95, 0xc5, 0xde, 0x5b, 0x5e, 0xe8, ++ 0xe9, 0x53, 0x72, 0xcf, 0x42, 0x0c, 0x8a, 0x89, 0xb9, 0x82, 0x1e, 0xdf, 0xf8, 0x35, 0x73, 0xc1, ++ 0xb1, 0x1b, 0xb7, 0xfa, 0x5e, 0x62, 0x5c, 0xed, 0x59, 0xe8, 0x6b, 0xc8, 0xaa, 0xbe, 0x8d, 0x96, ++ 0xf7, 0x95, 0xc4, 0x16, 0x64, 0x7f, 0x74, 0xa3, 0x8c, 0x6e, 0x99, 0x1e, 0x14, 0xcc, 0xce, 0x8d, ++ 0x76, 0x6f, 0x6e, 0x58, 0xf3, 0xad, 0xc9, 0x7e, 0x7c, 0x0b, 0x49, 0xfd, 0x91, 0x11, 0x94, 0x97, ++ 0x2c, 0x52, 0xa8, 0xbe, 0x3c, 0x08, 0xa9, 0x2b, 0x57, 0x8a, 0x47, 0xc9, 0x19, 0xa2, 0xc2, 0xa4, ++ 0x06, 0x43, 0x4a, 0x98, 0x12, 0x6b, 0x5b, 0x8a, 0xd1, 0xe4, 0x64, 0x69, 0xbe, 0x78, 0x73, 0x59, ++ 0xb5, 0xde, 0x5e, 0x56, 0xad, 0x3f, 0x2f, 0xab, 0xd6, 0xeb, 0xab, 0xea, 0xca, 0xdb, 0xab, 0xea, ++ 0xca, 0x6f, 0x57, 0xd5, 0x95, 0xef, 0x1a, 0x7d, 0x5f, 0x0c, 0xc6, 0x5d, 0xd7, 0xa3, 0xa3, 0xba, ++ 0xb9, 0x9b, 0x2f, 0xfe, 0xa3, 0x78, 0xe6, 0xd1, 0x90, 0x44, 0x44, 0x37, 0x2b, 0x5b, 0xc9, 0x67, ++ 0xff, 0x06, 0x00, 0x00, 0xff, 0xff, 0xe7, 0x4a, 0xdc, 0x8a, 0x78, 0x0c, 0x00, 0x00, ++} ++ ++// Reference imports to suppress errors if they are not otherwise used. ++var _ context.Context ++var _ grpc.ClientConn ++ ++// This is a compile-time assertion to ensure that this generated file ++// is compatible with the grpc package it is being compiled against. ++const _ = grpc.SupportPackageIsVersion4 ++ ++// BroadcastAPIClient is the client API for BroadcastAPI service. ++// ++// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. ++type BroadcastAPIClient interface { ++ Ping(ctx context.Context, in *RequestPing, opts ...grpc.CallOption) (*ResponsePing, error) ++ BroadcastTx(ctx context.Context, in *RequestBroadcastTx, opts ...grpc.CallOption) (*ResponseBroadcastTx, error) ++} ++ ++type broadcastAPIClient struct { ++ cc *grpc.ClientConn ++} ++ ++func NewBroadcastAPIClient(cc *grpc.ClientConn) BroadcastAPIClient { ++ return &broadcastAPIClient{cc} ++} ++ ++func (c *broadcastAPIClient) Ping(ctx context.Context, in *RequestPing, opts ...grpc.CallOption) (*ResponsePing, error) { ++ out := new(ResponsePing) ++ err := c.cc.Invoke(ctx, "/tendermint.rpc.grpc.BroadcastAPI/Ping", in, out, opts...) ++ if err != nil { ++ return nil, err ++ } ++ return out, nil ++} ++ ++func (c *broadcastAPIClient) BroadcastTx(ctx context.Context, in *RequestBroadcastTx, opts ...grpc.CallOption) (*ResponseBroadcastTx, error) { ++ out := new(ResponseBroadcastTx) ++ err := c.cc.Invoke(ctx, "/tendermint.rpc.grpc.BroadcastAPI/BroadcastTx", in, out, opts...) ++ if err != nil { ++ return nil, err ++ } ++ return out, nil ++} ++ ++// BroadcastAPIServer is the server API for BroadcastAPI service. ++type BroadcastAPIServer interface { ++ Ping(context.Context, *RequestPing) (*ResponsePing, error) ++ BroadcastTx(context.Context, *RequestBroadcastTx) (*ResponseBroadcastTx, error) ++} ++ ++// UnimplementedBroadcastAPIServer can be embedded to have forward compatible implementations. ++type UnimplementedBroadcastAPIServer struct { ++} ++ ++func (*UnimplementedBroadcastAPIServer) Ping(ctx context.Context, req *RequestPing) (*ResponsePing, error) { ++ return nil, status.Errorf(codes.Unimplemented, "method Ping not implemented") ++} ++func (*UnimplementedBroadcastAPIServer) BroadcastTx(ctx context.Context, req *RequestBroadcastTx) (*ResponseBroadcastTx, error) { ++ return nil, status.Errorf(codes.Unimplemented, "method BroadcastTx not implemented") ++} ++ ++func RegisterBroadcastAPIServer(s *grpc.Server, srv BroadcastAPIServer) { ++ s.RegisterService(&_BroadcastAPI_serviceDesc, srv) ++} ++ ++func _BroadcastAPI_Ping_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { ++ in := new(RequestPing) ++ if err := dec(in); err != nil { ++ return nil, err ++ } ++ if interceptor == nil { ++ return srv.(BroadcastAPIServer).Ping(ctx, in) ++ } ++ info := &grpc.UnaryServerInfo{ ++ Server: srv, ++ FullMethod: "/tendermint.rpc.grpc.BroadcastAPI/Ping", ++ } ++ handler := func(ctx context.Context, req interface{}) (interface{}, error) { ++ return srv.(BroadcastAPIServer).Ping(ctx, req.(*RequestPing)) ++ } ++ return interceptor(ctx, in, info, handler) ++} ++ ++func _BroadcastAPI_BroadcastTx_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { ++ in := new(RequestBroadcastTx) ++ if err := dec(in); err != nil { ++ return nil, err ++ } ++ if interceptor == nil { ++ return srv.(BroadcastAPIServer).BroadcastTx(ctx, in) ++ } ++ info := &grpc.UnaryServerInfo{ ++ Server: srv, ++ FullMethod: "/tendermint.rpc.grpc.BroadcastAPI/BroadcastTx", ++ } ++ handler := func(ctx context.Context, req interface{}) (interface{}, error) { ++ return srv.(BroadcastAPIServer).BroadcastTx(ctx, req.(*RequestBroadcastTx)) ++ } ++ return interceptor(ctx, in, info, handler) ++} ++ ++var _BroadcastAPI_serviceDesc = grpc.ServiceDesc{ ++ ServiceName: "tendermint.rpc.grpc.BroadcastAPI", ++ HandlerType: (*BroadcastAPIServer)(nil), ++ Methods: []grpc.MethodDesc{ ++ { ++ MethodName: "Ping", ++ Handler: _BroadcastAPI_Ping_Handler, ++ }, ++ { ++ MethodName: "BroadcastTx", ++ Handler: _BroadcastAPI_BroadcastTx_Handler, ++ }, ++ }, ++ Streams: []grpc.StreamDesc{}, ++ Metadata: "tendermint/rpc/grpc/types.proto", ++} ++ ++// BlockAPIClient is the client API for BlockAPI service. ++// ++// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. ++type BlockAPIClient interface { ++ BlockByHash(ctx context.Context, in *BlockByHashRequest, opts ...grpc.CallOption) (BlockAPI_BlockByHashClient, error) ++ BlockByHeight(ctx context.Context, in *BlockByHeightRequest, opts ...grpc.CallOption) (BlockAPI_BlockByHeightClient, error) ++ Commit(ctx context.Context, in *CommitRequest, opts ...grpc.CallOption) (*CommitResponse, error) ++ ValidatorSet(ctx context.Context, in *ValidatorSetRequest, opts ...grpc.CallOption) (*ValidatorSetResponse, error) ++ SubscribeNewHeights(ctx context.Context, in *SubscribeNewHeightsRequest, opts ...grpc.CallOption) (BlockAPI_SubscribeNewHeightsClient, error) ++ Status(ctx context.Context, in *StatusRequest, opts ...grpc.CallOption) (*StatusResponse, error) ++} ++ ++type blockAPIClient struct { ++ cc *grpc.ClientConn ++} ++ ++func NewBlockAPIClient(cc *grpc.ClientConn) BlockAPIClient { ++ return &blockAPIClient{cc} ++} ++ ++func (c *blockAPIClient) BlockByHash(ctx context.Context, in *BlockByHashRequest, opts ...grpc.CallOption) (BlockAPI_BlockByHashClient, error) { ++ stream, err := c.cc.NewStream(ctx, &_BlockAPI_serviceDesc.Streams[0], "/tendermint.rpc.grpc.BlockAPI/BlockByHash", opts...) ++ if err != nil { ++ return nil, err ++ } ++ x := &blockAPIBlockByHashClient{stream} ++ if err := x.ClientStream.SendMsg(in); err != nil { ++ return nil, err ++ } ++ if err := x.ClientStream.CloseSend(); err != nil { ++ return nil, err ++ } ++ return x, nil ++} ++ ++type BlockAPI_BlockByHashClient interface { ++ Recv() (*StreamedBlockByHashResponse, error) ++ grpc.ClientStream ++} ++ ++type blockAPIBlockByHashClient struct { ++ grpc.ClientStream ++} ++ ++func (x *blockAPIBlockByHashClient) Recv() (*StreamedBlockByHashResponse, error) { ++ m := new(StreamedBlockByHashResponse) ++ if err := x.ClientStream.RecvMsg(m); err != nil { ++ return nil, err ++ } ++ return m, nil ++} ++ ++func (c *blockAPIClient) BlockByHeight(ctx context.Context, in *BlockByHeightRequest, opts ...grpc.CallOption) (BlockAPI_BlockByHeightClient, error) { ++ stream, err := c.cc.NewStream(ctx, &_BlockAPI_serviceDesc.Streams[1], "/tendermint.rpc.grpc.BlockAPI/BlockByHeight", opts...) ++ if err != nil { ++ return nil, err ++ } ++ x := &blockAPIBlockByHeightClient{stream} ++ if err := x.ClientStream.SendMsg(in); err != nil { ++ return nil, err ++ } ++ if err := x.ClientStream.CloseSend(); err != nil { ++ return nil, err ++ } ++ return x, nil ++} ++ ++type BlockAPI_BlockByHeightClient interface { ++ Recv() (*StreamedBlockByHeightResponse, error) ++ grpc.ClientStream ++} ++ ++type blockAPIBlockByHeightClient struct { ++ grpc.ClientStream ++} ++ ++func (x *blockAPIBlockByHeightClient) Recv() (*StreamedBlockByHeightResponse, error) { ++ m := new(StreamedBlockByHeightResponse) ++ if err := x.ClientStream.RecvMsg(m); err != nil { ++ return nil, err ++ } ++ return m, nil ++} ++ ++func (c *blockAPIClient) Commit(ctx context.Context, in *CommitRequest, opts ...grpc.CallOption) (*CommitResponse, error) { ++ out := new(CommitResponse) ++ err := c.cc.Invoke(ctx, "/tendermint.rpc.grpc.BlockAPI/Commit", in, out, opts...) ++ if err != nil { ++ return nil, err ++ } ++ return out, nil ++} ++ ++func (c *blockAPIClient) ValidatorSet(ctx context.Context, in *ValidatorSetRequest, opts ...grpc.CallOption) (*ValidatorSetResponse, error) { ++ out := new(ValidatorSetResponse) ++ err := c.cc.Invoke(ctx, "/tendermint.rpc.grpc.BlockAPI/ValidatorSet", in, out, opts...) ++ if err != nil { ++ return nil, err ++ } ++ return out, nil ++} ++ ++func (c *blockAPIClient) SubscribeNewHeights(ctx context.Context, in *SubscribeNewHeightsRequest, opts ...grpc.CallOption) (BlockAPI_SubscribeNewHeightsClient, error) { ++ stream, err := c.cc.NewStream(ctx, &_BlockAPI_serviceDesc.Streams[2], "/tendermint.rpc.grpc.BlockAPI/SubscribeNewHeights", opts...) ++ if err != nil { ++ return nil, err ++ } ++ x := &blockAPISubscribeNewHeightsClient{stream} ++ if err := x.ClientStream.SendMsg(in); err != nil { ++ return nil, err ++ } ++ if err := x.ClientStream.CloseSend(); err != nil { ++ return nil, err ++ } ++ return x, nil ++} ++ ++type BlockAPI_SubscribeNewHeightsClient interface { ++ Recv() (*NewHeightEvent, error) ++ grpc.ClientStream ++} ++ ++type blockAPISubscribeNewHeightsClient struct { ++ grpc.ClientStream ++} ++ ++func (x *blockAPISubscribeNewHeightsClient) Recv() (*NewHeightEvent, error) { ++ m := new(NewHeightEvent) ++ if err := x.ClientStream.RecvMsg(m); err != nil { ++ return nil, err ++ } ++ return m, nil ++} ++ ++func (c *blockAPIClient) Status(ctx context.Context, in *StatusRequest, opts ...grpc.CallOption) (*StatusResponse, error) { ++ out := new(StatusResponse) ++ err := c.cc.Invoke(ctx, "/tendermint.rpc.grpc.BlockAPI/Status", in, out, opts...) ++ if err != nil { ++ return nil, err ++ } ++ return out, nil ++} ++ ++// BlockAPIServer is the server API for BlockAPI service. ++type BlockAPIServer interface { ++ BlockByHash(*BlockByHashRequest, BlockAPI_BlockByHashServer) error ++ BlockByHeight(*BlockByHeightRequest, BlockAPI_BlockByHeightServer) error ++ Commit(context.Context, *CommitRequest) (*CommitResponse, error) ++ ValidatorSet(context.Context, *ValidatorSetRequest) (*ValidatorSetResponse, error) ++ SubscribeNewHeights(*SubscribeNewHeightsRequest, BlockAPI_SubscribeNewHeightsServer) error ++ Status(context.Context, *StatusRequest) (*StatusResponse, error) ++} ++ ++// UnimplementedBlockAPIServer can be embedded to have forward compatible implementations. ++type UnimplementedBlockAPIServer struct { ++} ++ ++func (*UnimplementedBlockAPIServer) BlockByHash(req *BlockByHashRequest, srv BlockAPI_BlockByHashServer) error { ++ return status.Errorf(codes.Unimplemented, "method BlockByHash not implemented") ++} ++func (*UnimplementedBlockAPIServer) BlockByHeight(req *BlockByHeightRequest, srv BlockAPI_BlockByHeightServer) error { ++ return status.Errorf(codes.Unimplemented, "method BlockByHeight not implemented") ++} ++func (*UnimplementedBlockAPIServer) Commit(ctx context.Context, req *CommitRequest) (*CommitResponse, error) { ++ return nil, status.Errorf(codes.Unimplemented, "method Commit not implemented") ++} ++func (*UnimplementedBlockAPIServer) ValidatorSet(ctx context.Context, req *ValidatorSetRequest) (*ValidatorSetResponse, error) { ++ return nil, status.Errorf(codes.Unimplemented, "method ValidatorSet not implemented") ++} ++func (*UnimplementedBlockAPIServer) SubscribeNewHeights(req *SubscribeNewHeightsRequest, srv BlockAPI_SubscribeNewHeightsServer) error { ++ return status.Errorf(codes.Unimplemented, "method SubscribeNewHeights not implemented") ++} ++func (*UnimplementedBlockAPIServer) Status(ctx context.Context, req *StatusRequest) (*StatusResponse, error) { ++ return nil, status.Errorf(codes.Unimplemented, "method Status not implemented") ++} ++ ++func RegisterBlockAPIServer(s *grpc.Server, srv BlockAPIServer) { ++ s.RegisterService(&_BlockAPI_serviceDesc, srv) ++} ++ ++func _BlockAPI_BlockByHash_Handler(srv interface{}, stream grpc.ServerStream) error { ++ m := new(BlockByHashRequest) ++ if err := stream.RecvMsg(m); err != nil { ++ return err ++ } ++ return srv.(BlockAPIServer).BlockByHash(m, &blockAPIBlockByHashServer{stream}) ++} ++ ++type BlockAPI_BlockByHashServer interface { ++ Send(*StreamedBlockByHashResponse) error ++ grpc.ServerStream ++} ++ ++type blockAPIBlockByHashServer struct { ++ grpc.ServerStream ++} ++ ++func (x *blockAPIBlockByHashServer) Send(m *StreamedBlockByHashResponse) error { ++ return x.ServerStream.SendMsg(m) ++} ++ ++func _BlockAPI_BlockByHeight_Handler(srv interface{}, stream grpc.ServerStream) error { ++ m := new(BlockByHeightRequest) ++ if err := stream.RecvMsg(m); err != nil { ++ return err ++ } ++ return srv.(BlockAPIServer).BlockByHeight(m, &blockAPIBlockByHeightServer{stream}) ++} ++ ++type BlockAPI_BlockByHeightServer interface { ++ Send(*StreamedBlockByHeightResponse) error ++ grpc.ServerStream ++} ++ ++type blockAPIBlockByHeightServer struct { ++ grpc.ServerStream ++} ++ ++func (x *blockAPIBlockByHeightServer) Send(m *StreamedBlockByHeightResponse) error { ++ return x.ServerStream.SendMsg(m) ++} ++ ++func _BlockAPI_Commit_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { ++ in := new(CommitRequest) ++ if err := dec(in); err != nil { ++ return nil, err ++ } ++ if interceptor == nil { ++ return srv.(BlockAPIServer).Commit(ctx, in) ++ } ++ info := &grpc.UnaryServerInfo{ ++ Server: srv, ++ FullMethod: "/tendermint.rpc.grpc.BlockAPI/Commit", ++ } ++ handler := func(ctx context.Context, req interface{}) (interface{}, error) { ++ return srv.(BlockAPIServer).Commit(ctx, req.(*CommitRequest)) ++ } ++ return interceptor(ctx, in, info, handler) ++} ++ ++func _BlockAPI_ValidatorSet_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { ++ in := new(ValidatorSetRequest) ++ if err := dec(in); err != nil { ++ return nil, err ++ } ++ if interceptor == nil { ++ return srv.(BlockAPIServer).ValidatorSet(ctx, in) ++ } ++ info := &grpc.UnaryServerInfo{ ++ Server: srv, ++ FullMethod: "/tendermint.rpc.grpc.BlockAPI/ValidatorSet", ++ } ++ handler := func(ctx context.Context, req interface{}) (interface{}, error) { ++ return srv.(BlockAPIServer).ValidatorSet(ctx, req.(*ValidatorSetRequest)) ++ } ++ return interceptor(ctx, in, info, handler) ++} ++ ++func _BlockAPI_SubscribeNewHeights_Handler(srv interface{}, stream grpc.ServerStream) error { ++ m := new(SubscribeNewHeightsRequest) ++ if err := stream.RecvMsg(m); err != nil { ++ return err ++ } ++ return srv.(BlockAPIServer).SubscribeNewHeights(m, &blockAPISubscribeNewHeightsServer{stream}) ++} ++ ++type BlockAPI_SubscribeNewHeightsServer interface { ++ Send(*NewHeightEvent) error ++ grpc.ServerStream ++} ++ ++type blockAPISubscribeNewHeightsServer struct { ++ grpc.ServerStream ++} ++ ++func (x *blockAPISubscribeNewHeightsServer) Send(m *NewHeightEvent) error { ++ return x.ServerStream.SendMsg(m) ++} ++ ++func _BlockAPI_Status_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { ++ in := new(StatusRequest) ++ if err := dec(in); err != nil { ++ return nil, err ++ } ++ if interceptor == nil { ++ return srv.(BlockAPIServer).Status(ctx, in) ++ } ++ info := &grpc.UnaryServerInfo{ ++ Server: srv, ++ FullMethod: "/tendermint.rpc.grpc.BlockAPI/Status", ++ } ++ handler := func(ctx context.Context, req interface{}) (interface{}, error) { ++ return srv.(BlockAPIServer).Status(ctx, req.(*StatusRequest)) ++ } ++ return interceptor(ctx, in, info, handler) ++} ++ ++var _BlockAPI_serviceDesc = grpc.ServiceDesc{ ++ ServiceName: "tendermint.rpc.grpc.BlockAPI", ++ HandlerType: (*BlockAPIServer)(nil), ++ Methods: []grpc.MethodDesc{ ++ { ++ MethodName: "Commit", ++ Handler: _BlockAPI_Commit_Handler, ++ }, ++ { ++ MethodName: "ValidatorSet", ++ Handler: _BlockAPI_ValidatorSet_Handler, ++ }, ++ { ++ MethodName: "Status", ++ Handler: _BlockAPI_Status_Handler, ++ }, ++ }, ++ Streams: []grpc.StreamDesc{ ++ { ++ StreamName: "BlockByHash", ++ Handler: _BlockAPI_BlockByHash_Handler, ++ ServerStreams: true, ++ }, ++ { ++ StreamName: "BlockByHeight", ++ Handler: _BlockAPI_BlockByHeight_Handler, ++ ServerStreams: true, ++ }, ++ { ++ StreamName: "SubscribeNewHeights", ++ Handler: _BlockAPI_SubscribeNewHeights_Handler, ++ ServerStreams: true, ++ }, ++ }, ++ Metadata: "tendermint/rpc/grpc/types.proto", ++} ++ ++func (m *RequestPing) Marshal() (dAtA []byte, err error) { ++ size := m.Size() ++ dAtA = make([]byte, size) ++ n, err := m.MarshalToSizedBuffer(dAtA[:size]) ++ if err != nil { ++ return nil, err ++ } ++ return dAtA[:n], nil ++} ++ ++func (m *RequestPing) MarshalTo(dAtA []byte) (int, error) { ++ size := m.Size() ++ return m.MarshalToSizedBuffer(dAtA[:size]) ++} ++ ++func (m *RequestPing) MarshalToSizedBuffer(dAtA []byte) (int, error) { ++ i := len(dAtA) ++ _ = i ++ var l int ++ _ = l ++ return len(dAtA) - i, nil ++} ++ ++func (m *RequestBroadcastTx) Marshal() (dAtA []byte, err error) { ++ size := m.Size() ++ dAtA = make([]byte, size) ++ n, err := m.MarshalToSizedBuffer(dAtA[:size]) ++ if err != nil { ++ return nil, err ++ } ++ return dAtA[:n], nil ++} ++ ++func (m *RequestBroadcastTx) MarshalTo(dAtA []byte) (int, error) { ++ size := m.Size() ++ return m.MarshalToSizedBuffer(dAtA[:size]) ++} ++ ++func (m *RequestBroadcastTx) MarshalToSizedBuffer(dAtA []byte) (int, error) { ++ i := len(dAtA) ++ _ = i ++ var l int ++ _ = l ++ if len(m.Tx) > 0 { ++ i -= len(m.Tx) ++ copy(dAtA[i:], m.Tx) ++ i = encodeVarintTypes(dAtA, i, uint64(len(m.Tx))) ++ i-- ++ dAtA[i] = 0xa ++ } ++ return len(dAtA) - i, nil ++} ++ ++func (m *BlockByHashRequest) Marshal() (dAtA []byte, err error) { ++ size := m.Size() ++ dAtA = make([]byte, size) ++ n, err := m.MarshalToSizedBuffer(dAtA[:size]) ++ if err != nil { ++ return nil, err ++ } ++ return dAtA[:n], nil ++} ++ ++func (m *BlockByHashRequest) MarshalTo(dAtA []byte) (int, error) { ++ size := m.Size() ++ return m.MarshalToSizedBuffer(dAtA[:size]) ++} ++ ++func (m *BlockByHashRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { ++ i := len(dAtA) ++ _ = i ++ var l int ++ _ = l ++ if m.Prove { ++ i-- ++ if m.Prove { ++ dAtA[i] = 1 ++ } else { ++ dAtA[i] = 0 ++ } ++ i-- ++ dAtA[i] = 0x10 ++ } ++ if len(m.Hash) > 0 { ++ i -= len(m.Hash) ++ copy(dAtA[i:], m.Hash) ++ i = encodeVarintTypes(dAtA, i, uint64(len(m.Hash))) ++ i-- ++ dAtA[i] = 0xa ++ } ++ return len(dAtA) - i, nil ++} ++ ++func (m *BlockByHeightRequest) Marshal() (dAtA []byte, err error) { ++ size := m.Size() ++ dAtA = make([]byte, size) ++ n, err := m.MarshalToSizedBuffer(dAtA[:size]) ++ if err != nil { ++ return nil, err ++ } ++ return dAtA[:n], nil ++} ++ ++func (m *BlockByHeightRequest) MarshalTo(dAtA []byte) (int, error) { ++ size := m.Size() ++ return m.MarshalToSizedBuffer(dAtA[:size]) ++} ++ ++func (m *BlockByHeightRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { ++ i := len(dAtA) ++ _ = i ++ var l int ++ _ = l ++ if m.Prove { ++ i-- ++ if m.Prove { ++ dAtA[i] = 1 ++ } else { ++ dAtA[i] = 0 ++ } ++ i-- ++ dAtA[i] = 0x10 ++ } ++ if m.Height != 0 { ++ i = encodeVarintTypes(dAtA, i, uint64(m.Height)) ++ i-- ++ dAtA[i] = 0x8 ++ } ++ return len(dAtA) - i, nil ++} ++ ++func (m *CommitRequest) Marshal() (dAtA []byte, err error) { ++ size := m.Size() ++ dAtA = make([]byte, size) ++ n, err := m.MarshalToSizedBuffer(dAtA[:size]) ++ if err != nil { ++ return nil, err ++ } ++ return dAtA[:n], nil ++} ++ ++func (m *CommitRequest) MarshalTo(dAtA []byte) (int, error) { ++ size := m.Size() ++ return m.MarshalToSizedBuffer(dAtA[:size]) ++} ++ ++func (m *CommitRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { ++ i := len(dAtA) ++ _ = i ++ var l int ++ _ = l ++ if m.Height != 0 { ++ i = encodeVarintTypes(dAtA, i, uint64(m.Height)) ++ i-- ++ dAtA[i] = 0x8 ++ } ++ return len(dAtA) - i, nil ++} ++ ++func (m *ValidatorSetRequest) Marshal() (dAtA []byte, err error) { ++ size := m.Size() ++ dAtA = make([]byte, size) ++ n, err := m.MarshalToSizedBuffer(dAtA[:size]) ++ if err != nil { ++ return nil, err ++ } ++ return dAtA[:n], nil ++} ++ ++func (m *ValidatorSetRequest) MarshalTo(dAtA []byte) (int, error) { ++ size := m.Size() ++ return m.MarshalToSizedBuffer(dAtA[:size]) ++} ++ ++func (m *ValidatorSetRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { ++ i := len(dAtA) ++ _ = i ++ var l int ++ _ = l ++ if m.Height != 0 { ++ i = encodeVarintTypes(dAtA, i, uint64(m.Height)) ++ i-- ++ dAtA[i] = 0x8 ++ } ++ return len(dAtA) - i, nil ++} ++ ++func (m *SubscribeNewHeightsRequest) Marshal() (dAtA []byte, err error) { ++ size := m.Size() ++ dAtA = make([]byte, size) ++ n, err := m.MarshalToSizedBuffer(dAtA[:size]) ++ if err != nil { ++ return nil, err ++ } ++ return dAtA[:n], nil ++} ++ ++func (m *SubscribeNewHeightsRequest) MarshalTo(dAtA []byte) (int, error) { ++ size := m.Size() ++ return m.MarshalToSizedBuffer(dAtA[:size]) ++} ++ ++func (m *SubscribeNewHeightsRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { ++ i := len(dAtA) ++ _ = i ++ var l int ++ _ = l ++ return len(dAtA) - i, nil ++} ++ ++func (m *StatusRequest) Marshal() (dAtA []byte, err error) { ++ size := m.Size() ++ dAtA = make([]byte, size) ++ n, err := m.MarshalToSizedBuffer(dAtA[:size]) ++ if err != nil { ++ return nil, err ++ } ++ return dAtA[:n], nil ++} ++ ++func (m *StatusRequest) MarshalTo(dAtA []byte) (int, error) { ++ size := m.Size() ++ return m.MarshalToSizedBuffer(dAtA[:size]) ++} ++ ++func (m *StatusRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { ++ i := len(dAtA) ++ _ = i ++ var l int ++ _ = l ++ return len(dAtA) - i, nil ++} ++ ++func (m *ResponsePing) Marshal() (dAtA []byte, err error) { ++ size := m.Size() ++ dAtA = make([]byte, size) ++ n, err := m.MarshalToSizedBuffer(dAtA[:size]) ++ if err != nil { ++ return nil, err ++ } ++ return dAtA[:n], nil ++} ++ ++func (m *ResponsePing) MarshalTo(dAtA []byte) (int, error) { ++ size := m.Size() ++ return m.MarshalToSizedBuffer(dAtA[:size]) ++} ++ ++func (m *ResponsePing) MarshalToSizedBuffer(dAtA []byte) (int, error) { ++ i := len(dAtA) ++ _ = i ++ var l int ++ _ = l ++ return len(dAtA) - i, nil ++} ++ ++func (m *ResponseBroadcastTx) Marshal() (dAtA []byte, err error) { ++ size := m.Size() ++ dAtA = make([]byte, size) ++ n, err := m.MarshalToSizedBuffer(dAtA[:size]) ++ if err != nil { ++ return nil, err ++ } ++ return dAtA[:n], nil ++} ++ ++func (m *ResponseBroadcastTx) MarshalTo(dAtA []byte) (int, error) { ++ size := m.Size() ++ return m.MarshalToSizedBuffer(dAtA[:size]) ++} ++ ++func (m *ResponseBroadcastTx) MarshalToSizedBuffer(dAtA []byte) (int, error) { ++ i := len(dAtA) ++ _ = i ++ var l int ++ _ = l ++ if m.DeliverTx != nil { ++ { ++ size, err := m.DeliverTx.MarshalToSizedBuffer(dAtA[:i]) ++ if err != nil { ++ return 0, err ++ } ++ i -= size ++ i = encodeVarintTypes(dAtA, i, uint64(size)) ++ } ++ i-- ++ dAtA[i] = 0x12 ++ } ++ if m.CheckTx != nil { ++ { ++ size, err := m.CheckTx.MarshalToSizedBuffer(dAtA[:i]) ++ if err != nil { ++ return 0, err ++ } ++ i -= size ++ i = encodeVarintTypes(dAtA, i, uint64(size)) ++ } ++ i-- ++ dAtA[i] = 0xa ++ } ++ return len(dAtA) - i, nil ++} ++ ++func (m *StreamedBlockByHashResponse) Marshal() (dAtA []byte, err error) { ++ size := m.Size() ++ dAtA = make([]byte, size) ++ n, err := m.MarshalToSizedBuffer(dAtA[:size]) ++ if err != nil { ++ return nil, err ++ } ++ return dAtA[:n], nil ++} ++ ++func (m *StreamedBlockByHashResponse) MarshalTo(dAtA []byte) (int, error) { ++ size := m.Size() ++ return m.MarshalToSizedBuffer(dAtA[:size]) ++} ++ ++func (m *StreamedBlockByHashResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { ++ i := len(dAtA) ++ _ = i ++ var l int ++ _ = l ++ if m.IsLast { ++ i-- ++ if m.IsLast { ++ dAtA[i] = 1 ++ } else { ++ dAtA[i] = 0 ++ } ++ i-- ++ dAtA[i] = 0x20 ++ } ++ if m.ValidatorSet != nil { ++ { ++ size, err := m.ValidatorSet.MarshalToSizedBuffer(dAtA[:i]) ++ if err != nil { ++ return 0, err ++ } ++ i -= size ++ i = encodeVarintTypes(dAtA, i, uint64(size)) ++ } ++ i-- ++ dAtA[i] = 0x1a ++ } ++ if m.Commit != nil { ++ { ++ size, err := m.Commit.MarshalToSizedBuffer(dAtA[:i]) ++ if err != nil { ++ return 0, err ++ } ++ i -= size ++ i = encodeVarintTypes(dAtA, i, uint64(size)) ++ } ++ i-- ++ dAtA[i] = 0x12 ++ } ++ if m.BlockPart != nil { ++ { ++ size, err := m.BlockPart.MarshalToSizedBuffer(dAtA[:i]) ++ if err != nil { ++ return 0, err ++ } ++ i -= size ++ i = encodeVarintTypes(dAtA, i, uint64(size)) ++ } ++ i-- ++ dAtA[i] = 0xa ++ } ++ return len(dAtA) - i, nil ++} ++ ++func (m *StreamedBlockByHeightResponse) Marshal() (dAtA []byte, err error) { ++ size := m.Size() ++ dAtA = make([]byte, size) ++ n, err := m.MarshalToSizedBuffer(dAtA[:size]) ++ if err != nil { ++ return nil, err ++ } ++ return dAtA[:n], nil ++} ++ ++func (m *StreamedBlockByHeightResponse) MarshalTo(dAtA []byte) (int, error) { ++ size := m.Size() ++ return m.MarshalToSizedBuffer(dAtA[:size]) ++} ++ ++func (m *StreamedBlockByHeightResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { ++ i := len(dAtA) ++ _ = i ++ var l int ++ _ = l ++ if m.IsLast { ++ i-- ++ if m.IsLast { ++ dAtA[i] = 1 ++ } else { ++ dAtA[i] = 0 ++ } ++ i-- ++ dAtA[i] = 0x20 ++ } ++ if m.ValidatorSet != nil { ++ { ++ size, err := m.ValidatorSet.MarshalToSizedBuffer(dAtA[:i]) ++ if err != nil { ++ return 0, err ++ } ++ i -= size ++ i = encodeVarintTypes(dAtA, i, uint64(size)) ++ } ++ i-- ++ dAtA[i] = 0x1a ++ } ++ if m.Commit != nil { ++ { ++ size, err := m.Commit.MarshalToSizedBuffer(dAtA[:i]) ++ if err != nil { ++ return 0, err ++ } ++ i -= size ++ i = encodeVarintTypes(dAtA, i, uint64(size)) ++ } ++ i-- ++ dAtA[i] = 0x12 ++ } ++ if m.BlockPart != nil { ++ { ++ size, err := m.BlockPart.MarshalToSizedBuffer(dAtA[:i]) ++ if err != nil { ++ return 0, err ++ } ++ i -= size ++ i = encodeVarintTypes(dAtA, i, uint64(size)) ++ } ++ i-- ++ dAtA[i] = 0xa ++ } ++ return len(dAtA) - i, nil ++} ++ ++func (m *CommitResponse) Marshal() (dAtA []byte, err error) { ++ size := m.Size() ++ dAtA = make([]byte, size) ++ n, err := m.MarshalToSizedBuffer(dAtA[:size]) ++ if err != nil { ++ return nil, err ++ } ++ return dAtA[:n], nil ++} ++ ++func (m *CommitResponse) MarshalTo(dAtA []byte) (int, error) { ++ size := m.Size() ++ return m.MarshalToSizedBuffer(dAtA[:size]) ++} ++ ++func (m *CommitResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { ++ i := len(dAtA) ++ _ = i ++ var l int ++ _ = l ++ if m.Commit != nil { ++ { ++ size, err := m.Commit.MarshalToSizedBuffer(dAtA[:i]) ++ if err != nil { ++ return 0, err ++ } ++ i -= size ++ i = encodeVarintTypes(dAtA, i, uint64(size)) ++ } ++ i-- ++ dAtA[i] = 0xa ++ } ++ return len(dAtA) - i, nil ++} ++ ++func (m *ValidatorSetResponse) Marshal() (dAtA []byte, err error) { ++ size := m.Size() ++ dAtA = make([]byte, size) ++ n, err := m.MarshalToSizedBuffer(dAtA[:size]) ++ if err != nil { ++ return nil, err ++ } ++ return dAtA[:n], nil ++} ++ ++func (m *ValidatorSetResponse) MarshalTo(dAtA []byte) (int, error) { ++ size := m.Size() ++ return m.MarshalToSizedBuffer(dAtA[:size]) ++} ++ ++func (m *ValidatorSetResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { ++ i := len(dAtA) ++ _ = i ++ var l int ++ _ = l ++ if m.Height != 0 { ++ i = encodeVarintTypes(dAtA, i, uint64(m.Height)) ++ i-- ++ dAtA[i] = 0x10 ++ } ++ if m.ValidatorSet != nil { ++ { ++ size, err := m.ValidatorSet.MarshalToSizedBuffer(dAtA[:i]) ++ if err != nil { ++ return 0, err ++ } ++ i -= size ++ i = encodeVarintTypes(dAtA, i, uint64(size)) ++ } ++ i-- ++ dAtA[i] = 0xa ++ } ++ return len(dAtA) - i, nil ++} ++ ++func (m *NewHeightEvent) Marshal() (dAtA []byte, err error) { ++ size := m.Size() ++ dAtA = make([]byte, size) ++ n, err := m.MarshalToSizedBuffer(dAtA[:size]) ++ if err != nil { ++ return nil, err ++ } ++ return dAtA[:n], nil ++} ++ ++func (m *NewHeightEvent) MarshalTo(dAtA []byte) (int, error) { ++ size := m.Size() ++ return m.MarshalToSizedBuffer(dAtA[:size]) ++} ++ ++func (m *NewHeightEvent) MarshalToSizedBuffer(dAtA []byte) (int, error) { ++ i := len(dAtA) ++ _ = i ++ var l int ++ _ = l ++ if len(m.Hash) > 0 { ++ i -= len(m.Hash) ++ copy(dAtA[i:], m.Hash) ++ i = encodeVarintTypes(dAtA, i, uint64(len(m.Hash))) ++ i-- ++ dAtA[i] = 0x12 ++ } ++ if m.Height != 0 { ++ i = encodeVarintTypes(dAtA, i, uint64(m.Height)) ++ i-- ++ dAtA[i] = 0x8 ++ } ++ return len(dAtA) - i, nil ++} ++ ++func (m *StatusResponse) Marshal() (dAtA []byte, err error) { ++ size := m.Size() ++ dAtA = make([]byte, size) ++ n, err := m.MarshalToSizedBuffer(dAtA[:size]) ++ if err != nil { ++ return nil, err ++ } ++ return dAtA[:n], nil ++} ++ ++func (m *StatusResponse) MarshalTo(dAtA []byte) (int, error) { ++ size := m.Size() ++ return m.MarshalToSizedBuffer(dAtA[:size]) ++} ++ ++func (m *StatusResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { ++ i := len(dAtA) ++ _ = i ++ var l int ++ _ = l ++ if m.ValidatorInfo != nil { ++ { ++ size, err := m.ValidatorInfo.MarshalToSizedBuffer(dAtA[:i]) ++ if err != nil { ++ return 0, err ++ } ++ i -= size ++ i = encodeVarintTypes(dAtA, i, uint64(size)) ++ } ++ i-- ++ dAtA[i] = 0x1a ++ } ++ if m.SyncInfo != nil { ++ { ++ size, err := m.SyncInfo.MarshalToSizedBuffer(dAtA[:i]) ++ if err != nil { ++ return 0, err ++ } ++ i -= size ++ i = encodeVarintTypes(dAtA, i, uint64(size)) ++ } ++ i-- ++ dAtA[i] = 0x12 ++ } ++ if m.NodeInfo != nil { ++ { ++ size, err := m.NodeInfo.MarshalToSizedBuffer(dAtA[:i]) ++ if err != nil { ++ return 0, err ++ } ++ i -= size ++ i = encodeVarintTypes(dAtA, i, uint64(size)) ++ } ++ i-- ++ dAtA[i] = 0xa ++ } ++ return len(dAtA) - i, nil ++} ++ ++func (m *SyncInfo) Marshal() (dAtA []byte, err error) { ++ size := m.Size() ++ dAtA = make([]byte, size) ++ n, err := m.MarshalToSizedBuffer(dAtA[:size]) ++ if err != nil { ++ return nil, err ++ } ++ return dAtA[:n], nil ++} ++ ++func (m *SyncInfo) MarshalTo(dAtA []byte) (int, error) { ++ size := m.Size() ++ return m.MarshalToSizedBuffer(dAtA[:size]) ++} ++ ++func (m *SyncInfo) MarshalToSizedBuffer(dAtA []byte) (int, error) { ++ i := len(dAtA) ++ _ = i ++ var l int ++ _ = l ++ if m.CatchingUp { ++ i-- ++ if m.CatchingUp { ++ dAtA[i] = 1 ++ } else { ++ dAtA[i] = 0 ++ } ++ i-- ++ dAtA[i] = 0x48 ++ } ++ n14, err14 := github_com_gogo_protobuf_types.StdTimeMarshalTo(m.EarliestBlockTime, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdTime(m.EarliestBlockTime):]) ++ if err14 != nil { ++ return 0, err14 ++ } ++ i -= n14 ++ i = encodeVarintTypes(dAtA, i, uint64(n14)) ++ i-- ++ dAtA[i] = 0x42 ++ if m.EarliestBlockHeight != 0 { ++ i = encodeVarintTypes(dAtA, i, uint64(m.EarliestBlockHeight)) ++ i-- ++ dAtA[i] = 0x38 ++ } ++ if len(m.EarliestAppHash) > 0 { ++ i -= len(m.EarliestAppHash) ++ copy(dAtA[i:], m.EarliestAppHash) ++ i = encodeVarintTypes(dAtA, i, uint64(len(m.EarliestAppHash))) ++ i-- ++ dAtA[i] = 0x32 ++ } ++ if len(m.EarliestBlockHash) > 0 { ++ i -= len(m.EarliestBlockHash) ++ copy(dAtA[i:], m.EarliestBlockHash) ++ i = encodeVarintTypes(dAtA, i, uint64(len(m.EarliestBlockHash))) ++ i-- ++ dAtA[i] = 0x2a ++ } ++ n15, err15 := github_com_gogo_protobuf_types.StdTimeMarshalTo(m.LatestBlockTime, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdTime(m.LatestBlockTime):]) ++ if err15 != nil { ++ return 0, err15 ++ } ++ i -= n15 ++ i = encodeVarintTypes(dAtA, i, uint64(n15)) ++ i-- ++ dAtA[i] = 0x22 ++ if m.LatestBlockHeight != 0 { ++ i = encodeVarintTypes(dAtA, i, uint64(m.LatestBlockHeight)) ++ i-- ++ dAtA[i] = 0x18 ++ } ++ if len(m.LatestAppHash) > 0 { ++ i -= len(m.LatestAppHash) ++ copy(dAtA[i:], m.LatestAppHash) ++ i = encodeVarintTypes(dAtA, i, uint64(len(m.LatestAppHash))) ++ i-- ++ dAtA[i] = 0x12 ++ } ++ if len(m.LatestBlockHash) > 0 { ++ i -= len(m.LatestBlockHash) ++ copy(dAtA[i:], m.LatestBlockHash) ++ i = encodeVarintTypes(dAtA, i, uint64(len(m.LatestBlockHash))) ++ i-- ++ dAtA[i] = 0xa ++ } ++ return len(dAtA) - i, nil ++} ++ ++func (m *ValidatorInfo) Marshal() (dAtA []byte, err error) { ++ size := m.Size() ++ dAtA = make([]byte, size) ++ n, err := m.MarshalToSizedBuffer(dAtA[:size]) ++ if err != nil { ++ return nil, err ++ } ++ return dAtA[:n], nil ++} ++ ++func (m *ValidatorInfo) MarshalTo(dAtA []byte) (int, error) { ++ size := m.Size() ++ return m.MarshalToSizedBuffer(dAtA[:size]) ++} ++ ++func (m *ValidatorInfo) MarshalToSizedBuffer(dAtA []byte) (int, error) { ++ i := len(dAtA) ++ _ = i ++ var l int ++ _ = l ++ if m.VotingPower != 0 { ++ i = encodeVarintTypes(dAtA, i, uint64(m.VotingPower)) ++ i-- ++ dAtA[i] = 0x18 ++ } ++ if m.PubKey != nil { ++ { ++ size, err := m.PubKey.MarshalToSizedBuffer(dAtA[:i]) ++ if err != nil { ++ return 0, err ++ } ++ i -= size ++ i = encodeVarintTypes(dAtA, i, uint64(size)) ++ } ++ i-- ++ dAtA[i] = 0x12 ++ } ++ if len(m.Address) > 0 { ++ i -= len(m.Address) ++ copy(dAtA[i:], m.Address) ++ i = encodeVarintTypes(dAtA, i, uint64(len(m.Address))) ++ i-- ++ dAtA[i] = 0xa ++ } ++ return len(dAtA) - i, nil ++} ++ ++func encodeVarintTypes(dAtA []byte, offset int, v uint64) int { ++ offset -= sovTypes(v) ++ base := offset ++ for v >= 1<<7 { ++ dAtA[offset] = uint8(v&0x7f | 0x80) ++ v >>= 7 ++ offset++ ++ } ++ dAtA[offset] = uint8(v) ++ return base ++} ++func (m *RequestPing) Size() (n int) { ++ if m == nil { ++ return 0 ++ } ++ var l int ++ _ = l ++ return n ++} ++ ++func (m *RequestBroadcastTx) Size() (n int) { ++ if m == nil { ++ return 0 ++ } ++ var l int ++ _ = l ++ l = len(m.Tx) ++ if l > 0 { ++ n += 1 + l + sovTypes(uint64(l)) ++ } ++ return n ++} ++ ++func (m *BlockByHashRequest) Size() (n int) { ++ if m == nil { ++ return 0 ++ } ++ var l int ++ _ = l ++ l = len(m.Hash) ++ if l > 0 { ++ n += 1 + l + sovTypes(uint64(l)) ++ } ++ if m.Prove { ++ n += 2 ++ } ++ return n ++} ++ ++func (m *BlockByHeightRequest) Size() (n int) { ++ if m == nil { ++ return 0 ++ } ++ var l int ++ _ = l ++ if m.Height != 0 { ++ n += 1 + sovTypes(uint64(m.Height)) ++ } ++ if m.Prove { ++ n += 2 ++ } ++ return n ++} ++ ++func (m *CommitRequest) Size() (n int) { ++ if m == nil { ++ return 0 ++ } ++ var l int ++ _ = l ++ if m.Height != 0 { ++ n += 1 + sovTypes(uint64(m.Height)) ++ } ++ return n ++} ++ ++func (m *ValidatorSetRequest) Size() (n int) { ++ if m == nil { ++ return 0 ++ } ++ var l int ++ _ = l ++ if m.Height != 0 { ++ n += 1 + sovTypes(uint64(m.Height)) ++ } ++ return n ++} ++ ++func (m *SubscribeNewHeightsRequest) Size() (n int) { ++ if m == nil { ++ return 0 ++ } ++ var l int ++ _ = l ++ return n ++} ++ ++func (m *StatusRequest) Size() (n int) { ++ if m == nil { ++ return 0 ++ } ++ var l int ++ _ = l ++ return n ++} ++ ++func (m *ResponsePing) Size() (n int) { ++ if m == nil { ++ return 0 ++ } ++ var l int ++ _ = l ++ return n ++} ++ ++func (m *ResponseBroadcastTx) Size() (n int) { ++ if m == nil { ++ return 0 ++ } ++ var l int ++ _ = l ++ if m.CheckTx != nil { ++ l = m.CheckTx.Size() ++ n += 1 + l + sovTypes(uint64(l)) ++ } ++ if m.DeliverTx != nil { ++ l = m.DeliverTx.Size() ++ n += 1 + l + sovTypes(uint64(l)) ++ } ++ return n ++} ++ ++func (m *StreamedBlockByHashResponse) Size() (n int) { ++ if m == nil { ++ return 0 ++ } ++ var l int ++ _ = l ++ if m.BlockPart != nil { ++ l = m.BlockPart.Size() ++ n += 1 + l + sovTypes(uint64(l)) ++ } ++ if m.Commit != nil { ++ l = m.Commit.Size() ++ n += 1 + l + sovTypes(uint64(l)) ++ } ++ if m.ValidatorSet != nil { ++ l = m.ValidatorSet.Size() ++ n += 1 + l + sovTypes(uint64(l)) ++ } ++ if m.IsLast { ++ n += 2 ++ } ++ return n ++} ++ ++func (m *StreamedBlockByHeightResponse) Size() (n int) { ++ if m == nil { ++ return 0 ++ } ++ var l int ++ _ = l ++ if m.BlockPart != nil { ++ l = m.BlockPart.Size() ++ n += 1 + l + sovTypes(uint64(l)) ++ } ++ if m.Commit != nil { ++ l = m.Commit.Size() ++ n += 1 + l + sovTypes(uint64(l)) ++ } ++ if m.ValidatorSet != nil { ++ l = m.ValidatorSet.Size() ++ n += 1 + l + sovTypes(uint64(l)) ++ } ++ if m.IsLast { ++ n += 2 ++ } ++ return n ++} ++ ++func (m *CommitResponse) Size() (n int) { ++ if m == nil { ++ return 0 ++ } ++ var l int ++ _ = l ++ if m.Commit != nil { ++ l = m.Commit.Size() ++ n += 1 + l + sovTypes(uint64(l)) ++ } ++ return n ++} ++ ++func (m *ValidatorSetResponse) Size() (n int) { ++ if m == nil { ++ return 0 ++ } ++ var l int ++ _ = l ++ if m.ValidatorSet != nil { ++ l = m.ValidatorSet.Size() ++ n += 1 + l + sovTypes(uint64(l)) ++ } ++ if m.Height != 0 { ++ n += 1 + sovTypes(uint64(m.Height)) ++ } ++ return n ++} ++ ++func (m *NewHeightEvent) Size() (n int) { ++ if m == nil { ++ return 0 ++ } ++ var l int ++ _ = l ++ if m.Height != 0 { ++ n += 1 + sovTypes(uint64(m.Height)) ++ } ++ l = len(m.Hash) ++ if l > 0 { ++ n += 1 + l + sovTypes(uint64(l)) ++ } ++ return n ++} ++ ++func (m *StatusResponse) Size() (n int) { ++ if m == nil { ++ return 0 ++ } ++ var l int ++ _ = l ++ if m.NodeInfo != nil { ++ l = m.NodeInfo.Size() ++ n += 1 + l + sovTypes(uint64(l)) ++ } ++ if m.SyncInfo != nil { ++ l = m.SyncInfo.Size() ++ n += 1 + l + sovTypes(uint64(l)) ++ } ++ if m.ValidatorInfo != nil { ++ l = m.ValidatorInfo.Size() ++ n += 1 + l + sovTypes(uint64(l)) ++ } ++ return n ++} ++ ++func (m *SyncInfo) Size() (n int) { ++ if m == nil { ++ return 0 ++ } ++ var l int ++ _ = l ++ l = len(m.LatestBlockHash) ++ if l > 0 { ++ n += 1 + l + sovTypes(uint64(l)) ++ } ++ l = len(m.LatestAppHash) ++ if l > 0 { ++ n += 1 + l + sovTypes(uint64(l)) ++ } ++ if m.LatestBlockHeight != 0 { ++ n += 1 + sovTypes(uint64(m.LatestBlockHeight)) ++ } ++ l = github_com_gogo_protobuf_types.SizeOfStdTime(m.LatestBlockTime) ++ n += 1 + l + sovTypes(uint64(l)) ++ l = len(m.EarliestBlockHash) ++ if l > 0 { ++ n += 1 + l + sovTypes(uint64(l)) ++ } ++ l = len(m.EarliestAppHash) ++ if l > 0 { ++ n += 1 + l + sovTypes(uint64(l)) ++ } ++ if m.EarliestBlockHeight != 0 { ++ n += 1 + sovTypes(uint64(m.EarliestBlockHeight)) ++ } ++ l = github_com_gogo_protobuf_types.SizeOfStdTime(m.EarliestBlockTime) ++ n += 1 + l + sovTypes(uint64(l)) ++ if m.CatchingUp { ++ n += 2 ++ } ++ return n ++} ++ ++func (m *ValidatorInfo) Size() (n int) { ++ if m == nil { ++ return 0 ++ } + var l int + _ = l +- if m.CheckTx != nil { +- l = m.CheckTx.Size() ++ l = len(m.Address) ++ if l > 0 { + n += 1 + l + sovTypes(uint64(l)) + } +- if m.DeliverTx != nil { +- l = m.DeliverTx.Size() +- n += 1 + l + sovTypes(uint64(l)) ++ if m.PubKey != nil { ++ l = m.PubKey.Size() ++ n += 1 + l + sovTypes(uint64(l)) ++ } ++ if m.VotingPower != 0 { ++ n += 1 + sovTypes(uint64(m.VotingPower)) ++ } ++ return n ++} ++ ++func sovTypes(x uint64) (n int) { ++ return (math_bits.Len64(x|1) + 6) / 7 ++} ++func sozTypes(x uint64) (n int) { ++ return sovTypes(uint64((x << 1) ^ uint64((int64(x) >> 63)))) ++} ++func (m *RequestPing) Unmarshal(dAtA []byte) error { ++ l := len(dAtA) ++ iNdEx := 0 ++ for iNdEx < l { ++ preIndex := iNdEx ++ var wire uint64 ++ for shift := uint(0); ; shift += 7 { ++ if shift >= 64 { ++ return ErrIntOverflowTypes ++ } ++ if iNdEx >= l { ++ return io.ErrUnexpectedEOF ++ } ++ b := dAtA[iNdEx] ++ iNdEx++ ++ wire |= uint64(b&0x7F) << shift ++ if b < 0x80 { ++ break ++ } ++ } ++ fieldNum := int32(wire >> 3) ++ wireType := int(wire & 0x7) ++ if wireType == 4 { ++ return fmt.Errorf("proto: RequestPing: wiretype end group for non-group") ++ } ++ if fieldNum <= 0 { ++ return fmt.Errorf("proto: RequestPing: illegal tag %d (wire type %d)", fieldNum, wire) ++ } ++ switch fieldNum { ++ default: ++ iNdEx = preIndex ++ skippy, err := skipTypes(dAtA[iNdEx:]) ++ if err != nil { ++ return err ++ } ++ if (skippy < 0) || (iNdEx+skippy) < 0 { ++ return ErrInvalidLengthTypes ++ } ++ if (iNdEx + skippy) > l { ++ return io.ErrUnexpectedEOF ++ } ++ iNdEx += skippy ++ } ++ } ++ ++ if iNdEx > l { ++ return io.ErrUnexpectedEOF ++ } ++ return nil ++} ++func (m *RequestBroadcastTx) Unmarshal(dAtA []byte) error { ++ l := len(dAtA) ++ iNdEx := 0 ++ for iNdEx < l { ++ preIndex := iNdEx ++ var wire uint64 ++ for shift := uint(0); ; shift += 7 { ++ if shift >= 64 { ++ return ErrIntOverflowTypes ++ } ++ if iNdEx >= l { ++ return io.ErrUnexpectedEOF ++ } ++ b := dAtA[iNdEx] ++ iNdEx++ ++ wire |= uint64(b&0x7F) << shift ++ if b < 0x80 { ++ break ++ } ++ } ++ fieldNum := int32(wire >> 3) ++ wireType := int(wire & 0x7) ++ if wireType == 4 { ++ return fmt.Errorf("proto: RequestBroadcastTx: wiretype end group for non-group") ++ } ++ if fieldNum <= 0 { ++ return fmt.Errorf("proto: RequestBroadcastTx: illegal tag %d (wire type %d)", fieldNum, wire) ++ } ++ switch fieldNum { ++ case 1: ++ if wireType != 2 { ++ return fmt.Errorf("proto: wrong wireType = %d for field Tx", wireType) ++ } ++ var byteLen int ++ for shift := uint(0); ; shift += 7 { ++ if shift >= 64 { ++ return ErrIntOverflowTypes ++ } ++ if iNdEx >= l { ++ return io.ErrUnexpectedEOF ++ } ++ b := dAtA[iNdEx] ++ iNdEx++ ++ byteLen |= int(b&0x7F) << shift ++ if b < 0x80 { ++ break ++ } ++ } ++ if byteLen < 0 { ++ return ErrInvalidLengthTypes ++ } ++ postIndex := iNdEx + byteLen ++ if postIndex < 0 { ++ return ErrInvalidLengthTypes ++ } ++ if postIndex > l { ++ return io.ErrUnexpectedEOF ++ } ++ m.Tx = append(m.Tx[:0], dAtA[iNdEx:postIndex]...) ++ if m.Tx == nil { ++ m.Tx = []byte{} ++ } ++ iNdEx = postIndex ++ default: ++ iNdEx = preIndex ++ skippy, err := skipTypes(dAtA[iNdEx:]) ++ if err != nil { ++ return err ++ } ++ if (skippy < 0) || (iNdEx+skippy) < 0 { ++ return ErrInvalidLengthTypes ++ } ++ if (iNdEx + skippy) > l { ++ return io.ErrUnexpectedEOF ++ } ++ iNdEx += skippy ++ } ++ } ++ ++ if iNdEx > l { ++ return io.ErrUnexpectedEOF ++ } ++ return nil ++} ++func (m *BlockByHashRequest) Unmarshal(dAtA []byte) error { ++ l := len(dAtA) ++ iNdEx := 0 ++ for iNdEx < l { ++ preIndex := iNdEx ++ var wire uint64 ++ for shift := uint(0); ; shift += 7 { ++ if shift >= 64 { ++ return ErrIntOverflowTypes ++ } ++ if iNdEx >= l { ++ return io.ErrUnexpectedEOF ++ } ++ b := dAtA[iNdEx] ++ iNdEx++ ++ wire |= uint64(b&0x7F) << shift ++ if b < 0x80 { ++ break ++ } ++ } ++ fieldNum := int32(wire >> 3) ++ wireType := int(wire & 0x7) ++ if wireType == 4 { ++ return fmt.Errorf("proto: BlockByHashRequest: wiretype end group for non-group") ++ } ++ if fieldNum <= 0 { ++ return fmt.Errorf("proto: BlockByHashRequest: illegal tag %d (wire type %d)", fieldNum, wire) ++ } ++ switch fieldNum { ++ case 1: ++ if wireType != 2 { ++ return fmt.Errorf("proto: wrong wireType = %d for field Hash", wireType) ++ } ++ var byteLen int ++ for shift := uint(0); ; shift += 7 { ++ if shift >= 64 { ++ return ErrIntOverflowTypes ++ } ++ if iNdEx >= l { ++ return io.ErrUnexpectedEOF ++ } ++ b := dAtA[iNdEx] ++ iNdEx++ ++ byteLen |= int(b&0x7F) << shift ++ if b < 0x80 { ++ break ++ } ++ } ++ if byteLen < 0 { ++ return ErrInvalidLengthTypes ++ } ++ postIndex := iNdEx + byteLen ++ if postIndex < 0 { ++ return ErrInvalidLengthTypes ++ } ++ if postIndex > l { ++ return io.ErrUnexpectedEOF ++ } ++ m.Hash = append(m.Hash[:0], dAtA[iNdEx:postIndex]...) ++ if m.Hash == nil { ++ m.Hash = []byte{} ++ } ++ iNdEx = postIndex ++ case 2: ++ if wireType != 0 { ++ return fmt.Errorf("proto: wrong wireType = %d for field Prove", wireType) ++ } ++ var v int ++ for shift := uint(0); ; shift += 7 { ++ if shift >= 64 { ++ return ErrIntOverflowTypes ++ } ++ if iNdEx >= l { ++ return io.ErrUnexpectedEOF ++ } ++ b := dAtA[iNdEx] ++ iNdEx++ ++ v |= int(b&0x7F) << shift ++ if b < 0x80 { ++ break ++ } ++ } ++ m.Prove = bool(v != 0) ++ default: ++ iNdEx = preIndex ++ skippy, err := skipTypes(dAtA[iNdEx:]) ++ if err != nil { ++ return err ++ } ++ if (skippy < 0) || (iNdEx+skippy) < 0 { ++ return ErrInvalidLengthTypes ++ } ++ if (iNdEx + skippy) > l { ++ return io.ErrUnexpectedEOF ++ } ++ iNdEx += skippy ++ } ++ } ++ ++ if iNdEx > l { ++ return io.ErrUnexpectedEOF ++ } ++ return nil ++} ++func (m *BlockByHeightRequest) Unmarshal(dAtA []byte) error { ++ l := len(dAtA) ++ iNdEx := 0 ++ for iNdEx < l { ++ preIndex := iNdEx ++ var wire uint64 ++ for shift := uint(0); ; shift += 7 { ++ if shift >= 64 { ++ return ErrIntOverflowTypes ++ } ++ if iNdEx >= l { ++ return io.ErrUnexpectedEOF ++ } ++ b := dAtA[iNdEx] ++ iNdEx++ ++ wire |= uint64(b&0x7F) << shift ++ if b < 0x80 { ++ break ++ } ++ } ++ fieldNum := int32(wire >> 3) ++ wireType := int(wire & 0x7) ++ if wireType == 4 { ++ return fmt.Errorf("proto: BlockByHeightRequest: wiretype end group for non-group") ++ } ++ if fieldNum <= 0 { ++ return fmt.Errorf("proto: BlockByHeightRequest: illegal tag %d (wire type %d)", fieldNum, wire) ++ } ++ switch fieldNum { ++ case 1: ++ if wireType != 0 { ++ return fmt.Errorf("proto: wrong wireType = %d for field Height", wireType) ++ } ++ m.Height = 0 ++ for shift := uint(0); ; shift += 7 { ++ if shift >= 64 { ++ return ErrIntOverflowTypes ++ } ++ if iNdEx >= l { ++ return io.ErrUnexpectedEOF ++ } ++ b := dAtA[iNdEx] ++ iNdEx++ ++ m.Height |= int64(b&0x7F) << shift ++ if b < 0x80 { ++ break ++ } ++ } ++ case 2: ++ if wireType != 0 { ++ return fmt.Errorf("proto: wrong wireType = %d for field Prove", wireType) ++ } ++ var v int ++ for shift := uint(0); ; shift += 7 { ++ if shift >= 64 { ++ return ErrIntOverflowTypes ++ } ++ if iNdEx >= l { ++ return io.ErrUnexpectedEOF ++ } ++ b := dAtA[iNdEx] ++ iNdEx++ ++ v |= int(b&0x7F) << shift ++ if b < 0x80 { ++ break ++ } ++ } ++ m.Prove = bool(v != 0) ++ default: ++ iNdEx = preIndex ++ skippy, err := skipTypes(dAtA[iNdEx:]) ++ if err != nil { ++ return err ++ } ++ if (skippy < 0) || (iNdEx+skippy) < 0 { ++ return ErrInvalidLengthTypes ++ } ++ if (iNdEx + skippy) > l { ++ return io.ErrUnexpectedEOF ++ } ++ iNdEx += skippy ++ } ++ } ++ ++ if iNdEx > l { ++ return io.ErrUnexpectedEOF ++ } ++ return nil ++} ++func (m *CommitRequest) Unmarshal(dAtA []byte) error { ++ l := len(dAtA) ++ iNdEx := 0 ++ for iNdEx < l { ++ preIndex := iNdEx ++ var wire uint64 ++ for shift := uint(0); ; shift += 7 { ++ if shift >= 64 { ++ return ErrIntOverflowTypes ++ } ++ if iNdEx >= l { ++ return io.ErrUnexpectedEOF ++ } ++ b := dAtA[iNdEx] ++ iNdEx++ ++ wire |= uint64(b&0x7F) << shift ++ if b < 0x80 { ++ break ++ } ++ } ++ fieldNum := int32(wire >> 3) ++ wireType := int(wire & 0x7) ++ if wireType == 4 { ++ return fmt.Errorf("proto: CommitRequest: wiretype end group for non-group") ++ } ++ if fieldNum <= 0 { ++ return fmt.Errorf("proto: CommitRequest: illegal tag %d (wire type %d)", fieldNum, wire) ++ } ++ switch fieldNum { ++ case 1: ++ if wireType != 0 { ++ return fmt.Errorf("proto: wrong wireType = %d for field Height", wireType) ++ } ++ m.Height = 0 ++ for shift := uint(0); ; shift += 7 { ++ if shift >= 64 { ++ return ErrIntOverflowTypes ++ } ++ if iNdEx >= l { ++ return io.ErrUnexpectedEOF ++ } ++ b := dAtA[iNdEx] ++ iNdEx++ ++ m.Height |= int64(b&0x7F) << shift ++ if b < 0x80 { ++ break ++ } ++ } ++ default: ++ iNdEx = preIndex ++ skippy, err := skipTypes(dAtA[iNdEx:]) ++ if err != nil { ++ return err ++ } ++ if (skippy < 0) || (iNdEx+skippy) < 0 { ++ return ErrInvalidLengthTypes ++ } ++ if (iNdEx + skippy) > l { ++ return io.ErrUnexpectedEOF ++ } ++ iNdEx += skippy ++ } ++ } ++ ++ if iNdEx > l { ++ return io.ErrUnexpectedEOF ++ } ++ return nil ++} ++func (m *ValidatorSetRequest) Unmarshal(dAtA []byte) error { ++ l := len(dAtA) ++ iNdEx := 0 ++ for iNdEx < l { ++ preIndex := iNdEx ++ var wire uint64 ++ for shift := uint(0); ; shift += 7 { ++ if shift >= 64 { ++ return ErrIntOverflowTypes ++ } ++ if iNdEx >= l { ++ return io.ErrUnexpectedEOF ++ } ++ b := dAtA[iNdEx] ++ iNdEx++ ++ wire |= uint64(b&0x7F) << shift ++ if b < 0x80 { ++ break ++ } ++ } ++ fieldNum := int32(wire >> 3) ++ wireType := int(wire & 0x7) ++ if wireType == 4 { ++ return fmt.Errorf("proto: ValidatorSetRequest: wiretype end group for non-group") ++ } ++ if fieldNum <= 0 { ++ return fmt.Errorf("proto: ValidatorSetRequest: illegal tag %d (wire type %d)", fieldNum, wire) ++ } ++ switch fieldNum { ++ case 1: ++ if wireType != 0 { ++ return fmt.Errorf("proto: wrong wireType = %d for field Height", wireType) ++ } ++ m.Height = 0 ++ for shift := uint(0); ; shift += 7 { ++ if shift >= 64 { ++ return ErrIntOverflowTypes ++ } ++ if iNdEx >= l { ++ return io.ErrUnexpectedEOF ++ } ++ b := dAtA[iNdEx] ++ iNdEx++ ++ m.Height |= int64(b&0x7F) << shift ++ if b < 0x80 { ++ break ++ } ++ } ++ default: ++ iNdEx = preIndex ++ skippy, err := skipTypes(dAtA[iNdEx:]) ++ if err != nil { ++ return err ++ } ++ if (skippy < 0) || (iNdEx+skippy) < 0 { ++ return ErrInvalidLengthTypes ++ } ++ if (iNdEx + skippy) > l { ++ return io.ErrUnexpectedEOF ++ } ++ iNdEx += skippy ++ } ++ } ++ ++ if iNdEx > l { ++ return io.ErrUnexpectedEOF ++ } ++ return nil ++} ++func (m *SubscribeNewHeightsRequest) Unmarshal(dAtA []byte) error { ++ l := len(dAtA) ++ iNdEx := 0 ++ for iNdEx < l { ++ preIndex := iNdEx ++ var wire uint64 ++ for shift := uint(0); ; shift += 7 { ++ if shift >= 64 { ++ return ErrIntOverflowTypes ++ } ++ if iNdEx >= l { ++ return io.ErrUnexpectedEOF ++ } ++ b := dAtA[iNdEx] ++ iNdEx++ ++ wire |= uint64(b&0x7F) << shift ++ if b < 0x80 { ++ break ++ } ++ } ++ fieldNum := int32(wire >> 3) ++ wireType := int(wire & 0x7) ++ if wireType == 4 { ++ return fmt.Errorf("proto: SubscribeNewHeightsRequest: wiretype end group for non-group") ++ } ++ if fieldNum <= 0 { ++ return fmt.Errorf("proto: SubscribeNewHeightsRequest: illegal tag %d (wire type %d)", fieldNum, wire) ++ } ++ switch fieldNum { ++ default: ++ iNdEx = preIndex ++ skippy, err := skipTypes(dAtA[iNdEx:]) ++ if err != nil { ++ return err ++ } ++ if (skippy < 0) || (iNdEx+skippy) < 0 { ++ return ErrInvalidLengthTypes ++ } ++ if (iNdEx + skippy) > l { ++ return io.ErrUnexpectedEOF ++ } ++ iNdEx += skippy ++ } ++ } ++ ++ if iNdEx > l { ++ return io.ErrUnexpectedEOF ++ } ++ return nil ++} ++func (m *StatusRequest) Unmarshal(dAtA []byte) error { ++ l := len(dAtA) ++ iNdEx := 0 ++ for iNdEx < l { ++ preIndex := iNdEx ++ var wire uint64 ++ for shift := uint(0); ; shift += 7 { ++ if shift >= 64 { ++ return ErrIntOverflowTypes ++ } ++ if iNdEx >= l { ++ return io.ErrUnexpectedEOF ++ } ++ b := dAtA[iNdEx] ++ iNdEx++ ++ wire |= uint64(b&0x7F) << shift ++ if b < 0x80 { ++ break ++ } ++ } ++ fieldNum := int32(wire >> 3) ++ wireType := int(wire & 0x7) ++ if wireType == 4 { ++ return fmt.Errorf("proto: StatusRequest: wiretype end group for non-group") ++ } ++ if fieldNum <= 0 { ++ return fmt.Errorf("proto: StatusRequest: illegal tag %d (wire type %d)", fieldNum, wire) ++ } ++ switch fieldNum { ++ default: ++ iNdEx = preIndex ++ skippy, err := skipTypes(dAtA[iNdEx:]) ++ if err != nil { ++ return err ++ } ++ if (skippy < 0) || (iNdEx+skippy) < 0 { ++ return ErrInvalidLengthTypes ++ } ++ if (iNdEx + skippy) > l { ++ return io.ErrUnexpectedEOF ++ } ++ iNdEx += skippy ++ } ++ } ++ ++ if iNdEx > l { ++ return io.ErrUnexpectedEOF ++ } ++ return nil ++} ++func (m *ResponsePing) Unmarshal(dAtA []byte) error { ++ l := len(dAtA) ++ iNdEx := 0 ++ for iNdEx < l { ++ preIndex := iNdEx ++ var wire uint64 ++ for shift := uint(0); ; shift += 7 { ++ if shift >= 64 { ++ return ErrIntOverflowTypes ++ } ++ if iNdEx >= l { ++ return io.ErrUnexpectedEOF ++ } ++ b := dAtA[iNdEx] ++ iNdEx++ ++ wire |= uint64(b&0x7F) << shift ++ if b < 0x80 { ++ break ++ } ++ } ++ fieldNum := int32(wire >> 3) ++ wireType := int(wire & 0x7) ++ if wireType == 4 { ++ return fmt.Errorf("proto: ResponsePing: wiretype end group for non-group") ++ } ++ if fieldNum <= 0 { ++ return fmt.Errorf("proto: ResponsePing: illegal tag %d (wire type %d)", fieldNum, wire) ++ } ++ switch fieldNum { ++ default: ++ iNdEx = preIndex ++ skippy, err := skipTypes(dAtA[iNdEx:]) ++ if err != nil { ++ return err ++ } ++ if (skippy < 0) || (iNdEx+skippy) < 0 { ++ return ErrInvalidLengthTypes ++ } ++ if (iNdEx + skippy) > l { ++ return io.ErrUnexpectedEOF ++ } ++ iNdEx += skippy ++ } ++ } ++ ++ if iNdEx > l { ++ return io.ErrUnexpectedEOF ++ } ++ return nil ++} ++func (m *ResponseBroadcastTx) Unmarshal(dAtA []byte) error { ++ l := len(dAtA) ++ iNdEx := 0 ++ for iNdEx < l { ++ preIndex := iNdEx ++ var wire uint64 ++ for shift := uint(0); ; shift += 7 { ++ if shift >= 64 { ++ return ErrIntOverflowTypes ++ } ++ if iNdEx >= l { ++ return io.ErrUnexpectedEOF ++ } ++ b := dAtA[iNdEx] ++ iNdEx++ ++ wire |= uint64(b&0x7F) << shift ++ if b < 0x80 { ++ break ++ } ++ } ++ fieldNum := int32(wire >> 3) ++ wireType := int(wire & 0x7) ++ if wireType == 4 { ++ return fmt.Errorf("proto: ResponseBroadcastTx: wiretype end group for non-group") ++ } ++ if fieldNum <= 0 { ++ return fmt.Errorf("proto: ResponseBroadcastTx: illegal tag %d (wire type %d)", fieldNum, wire) ++ } ++ switch fieldNum { ++ case 1: ++ if wireType != 2 { ++ return fmt.Errorf("proto: wrong wireType = %d for field CheckTx", wireType) ++ } ++ var msglen int ++ for shift := uint(0); ; shift += 7 { ++ if shift >= 64 { ++ return ErrIntOverflowTypes ++ } ++ if iNdEx >= l { ++ return io.ErrUnexpectedEOF ++ } ++ b := dAtA[iNdEx] ++ iNdEx++ ++ msglen |= int(b&0x7F) << shift ++ if b < 0x80 { ++ break ++ } ++ } ++ if msglen < 0 { ++ return ErrInvalidLengthTypes ++ } ++ postIndex := iNdEx + msglen ++ if postIndex < 0 { ++ return ErrInvalidLengthTypes ++ } ++ if postIndex > l { ++ return io.ErrUnexpectedEOF ++ } ++ if m.CheckTx == nil { ++ m.CheckTx = &types.ResponseCheckTx{} ++ } ++ if err := m.CheckTx.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { ++ return err ++ } ++ iNdEx = postIndex ++ case 2: ++ if wireType != 2 { ++ return fmt.Errorf("proto: wrong wireType = %d for field DeliverTx", wireType) ++ } ++ var msglen int ++ for shift := uint(0); ; shift += 7 { ++ if shift >= 64 { ++ return ErrIntOverflowTypes ++ } ++ if iNdEx >= l { ++ return io.ErrUnexpectedEOF ++ } ++ b := dAtA[iNdEx] ++ iNdEx++ ++ msglen |= int(b&0x7F) << shift ++ if b < 0x80 { ++ break ++ } ++ } ++ if msglen < 0 { ++ return ErrInvalidLengthTypes ++ } ++ postIndex := iNdEx + msglen ++ if postIndex < 0 { ++ return ErrInvalidLengthTypes ++ } ++ if postIndex > l { ++ return io.ErrUnexpectedEOF ++ } ++ if m.DeliverTx == nil { ++ m.DeliverTx = &types.ResponseDeliverTx{} ++ } ++ if err := m.DeliverTx.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { ++ return err ++ } ++ iNdEx = postIndex ++ default: ++ iNdEx = preIndex ++ skippy, err := skipTypes(dAtA[iNdEx:]) ++ if err != nil { ++ return err ++ } ++ if (skippy < 0) || (iNdEx+skippy) < 0 { ++ return ErrInvalidLengthTypes ++ } ++ if (iNdEx + skippy) > l { ++ return io.ErrUnexpectedEOF ++ } ++ iNdEx += skippy ++ } ++ } ++ ++ if iNdEx > l { ++ return io.ErrUnexpectedEOF ++ } ++ return nil ++} ++func (m *StreamedBlockByHashResponse) Unmarshal(dAtA []byte) error { ++ l := len(dAtA) ++ iNdEx := 0 ++ for iNdEx < l { ++ preIndex := iNdEx ++ var wire uint64 ++ for shift := uint(0); ; shift += 7 { ++ if shift >= 64 { ++ return ErrIntOverflowTypes ++ } ++ if iNdEx >= l { ++ return io.ErrUnexpectedEOF ++ } ++ b := dAtA[iNdEx] ++ iNdEx++ ++ wire |= uint64(b&0x7F) << shift ++ if b < 0x80 { ++ break ++ } ++ } ++ fieldNum := int32(wire >> 3) ++ wireType := int(wire & 0x7) ++ if wireType == 4 { ++ return fmt.Errorf("proto: StreamedBlockByHashResponse: wiretype end group for non-group") ++ } ++ if fieldNum <= 0 { ++ return fmt.Errorf("proto: StreamedBlockByHashResponse: illegal tag %d (wire type %d)", fieldNum, wire) ++ } ++ switch fieldNum { ++ case 1: ++ if wireType != 2 { ++ return fmt.Errorf("proto: wrong wireType = %d for field BlockPart", wireType) ++ } ++ var msglen int ++ for shift := uint(0); ; shift += 7 { ++ if shift >= 64 { ++ return ErrIntOverflowTypes ++ } ++ if iNdEx >= l { ++ return io.ErrUnexpectedEOF ++ } ++ b := dAtA[iNdEx] ++ iNdEx++ ++ msglen |= int(b&0x7F) << shift ++ if b < 0x80 { ++ break ++ } ++ } ++ if msglen < 0 { ++ return ErrInvalidLengthTypes ++ } ++ postIndex := iNdEx + msglen ++ if postIndex < 0 { ++ return ErrInvalidLengthTypes ++ } ++ if postIndex > l { ++ return io.ErrUnexpectedEOF ++ } ++ if m.BlockPart == nil { ++ m.BlockPart = &types1.Part{} ++ } ++ if err := m.BlockPart.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { ++ return err ++ } ++ iNdEx = postIndex ++ case 2: ++ if wireType != 2 { ++ return fmt.Errorf("proto: wrong wireType = %d for field Commit", wireType) ++ } ++ var msglen int ++ for shift := uint(0); ; shift += 7 { ++ if shift >= 64 { ++ return ErrIntOverflowTypes ++ } ++ if iNdEx >= l { ++ return io.ErrUnexpectedEOF ++ } ++ b := dAtA[iNdEx] ++ iNdEx++ ++ msglen |= int(b&0x7F) << shift ++ if b < 0x80 { ++ break ++ } ++ } ++ if msglen < 0 { ++ return ErrInvalidLengthTypes ++ } ++ postIndex := iNdEx + msglen ++ if postIndex < 0 { ++ return ErrInvalidLengthTypes ++ } ++ if postIndex > l { ++ return io.ErrUnexpectedEOF ++ } ++ if m.Commit == nil { ++ m.Commit = &types1.Commit{} ++ } ++ if err := m.Commit.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { ++ return err ++ } ++ iNdEx = postIndex ++ case 3: ++ if wireType != 2 { ++ return fmt.Errorf("proto: wrong wireType = %d for field ValidatorSet", wireType) ++ } ++ var msglen int ++ for shift := uint(0); ; shift += 7 { ++ if shift >= 64 { ++ return ErrIntOverflowTypes ++ } ++ if iNdEx >= l { ++ return io.ErrUnexpectedEOF ++ } ++ b := dAtA[iNdEx] ++ iNdEx++ ++ msglen |= int(b&0x7F) << shift ++ if b < 0x80 { ++ break ++ } ++ } ++ if msglen < 0 { ++ return ErrInvalidLengthTypes ++ } ++ postIndex := iNdEx + msglen ++ if postIndex < 0 { ++ return ErrInvalidLengthTypes ++ } ++ if postIndex > l { ++ return io.ErrUnexpectedEOF ++ } ++ if m.ValidatorSet == nil { ++ m.ValidatorSet = &types1.ValidatorSet{} ++ } ++ if err := m.ValidatorSet.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { ++ return err ++ } ++ iNdEx = postIndex ++ case 4: ++ if wireType != 0 { ++ return fmt.Errorf("proto: wrong wireType = %d for field IsLast", wireType) ++ } ++ var v int ++ for shift := uint(0); ; shift += 7 { ++ if shift >= 64 { ++ return ErrIntOverflowTypes ++ } ++ if iNdEx >= l { ++ return io.ErrUnexpectedEOF ++ } ++ b := dAtA[iNdEx] ++ iNdEx++ ++ v |= int(b&0x7F) << shift ++ if b < 0x80 { ++ break ++ } ++ } ++ m.IsLast = bool(v != 0) ++ default: ++ iNdEx = preIndex ++ skippy, err := skipTypes(dAtA[iNdEx:]) ++ if err != nil { ++ return err ++ } ++ if (skippy < 0) || (iNdEx+skippy) < 0 { ++ return ErrInvalidLengthTypes ++ } ++ if (iNdEx + skippy) > l { ++ return io.ErrUnexpectedEOF ++ } ++ iNdEx += skippy ++ } ++ } ++ ++ if iNdEx > l { ++ return io.ErrUnexpectedEOF ++ } ++ return nil ++} ++func (m *StreamedBlockByHeightResponse) Unmarshal(dAtA []byte) error { ++ l := len(dAtA) ++ iNdEx := 0 ++ for iNdEx < l { ++ preIndex := iNdEx ++ var wire uint64 ++ for shift := uint(0); ; shift += 7 { ++ if shift >= 64 { ++ return ErrIntOverflowTypes ++ } ++ if iNdEx >= l { ++ return io.ErrUnexpectedEOF ++ } ++ b := dAtA[iNdEx] ++ iNdEx++ ++ wire |= uint64(b&0x7F) << shift ++ if b < 0x80 { ++ break ++ } ++ } ++ fieldNum := int32(wire >> 3) ++ wireType := int(wire & 0x7) ++ if wireType == 4 { ++ return fmt.Errorf("proto: StreamedBlockByHeightResponse: wiretype end group for non-group") ++ } ++ if fieldNum <= 0 { ++ return fmt.Errorf("proto: StreamedBlockByHeightResponse: illegal tag %d (wire type %d)", fieldNum, wire) ++ } ++ switch fieldNum { ++ case 1: ++ if wireType != 2 { ++ return fmt.Errorf("proto: wrong wireType = %d for field BlockPart", wireType) ++ } ++ var msglen int ++ for shift := uint(0); ; shift += 7 { ++ if shift >= 64 { ++ return ErrIntOverflowTypes ++ } ++ if iNdEx >= l { ++ return io.ErrUnexpectedEOF ++ } ++ b := dAtA[iNdEx] ++ iNdEx++ ++ msglen |= int(b&0x7F) << shift ++ if b < 0x80 { ++ break ++ } ++ } ++ if msglen < 0 { ++ return ErrInvalidLengthTypes ++ } ++ postIndex := iNdEx + msglen ++ if postIndex < 0 { ++ return ErrInvalidLengthTypes ++ } ++ if postIndex > l { ++ return io.ErrUnexpectedEOF ++ } ++ if m.BlockPart == nil { ++ m.BlockPart = &types1.Part{} ++ } ++ if err := m.BlockPart.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { ++ return err ++ } ++ iNdEx = postIndex ++ case 2: ++ if wireType != 2 { ++ return fmt.Errorf("proto: wrong wireType = %d for field Commit", wireType) ++ } ++ var msglen int ++ for shift := uint(0); ; shift += 7 { ++ if shift >= 64 { ++ return ErrIntOverflowTypes ++ } ++ if iNdEx >= l { ++ return io.ErrUnexpectedEOF ++ } ++ b := dAtA[iNdEx] ++ iNdEx++ ++ msglen |= int(b&0x7F) << shift ++ if b < 0x80 { ++ break ++ } ++ } ++ if msglen < 0 { ++ return ErrInvalidLengthTypes ++ } ++ postIndex := iNdEx + msglen ++ if postIndex < 0 { ++ return ErrInvalidLengthTypes ++ } ++ if postIndex > l { ++ return io.ErrUnexpectedEOF ++ } ++ if m.Commit == nil { ++ m.Commit = &types1.Commit{} ++ } ++ if err := m.Commit.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { ++ return err ++ } ++ iNdEx = postIndex ++ case 3: ++ if wireType != 2 { ++ return fmt.Errorf("proto: wrong wireType = %d for field ValidatorSet", wireType) ++ } ++ var msglen int ++ for shift := uint(0); ; shift += 7 { ++ if shift >= 64 { ++ return ErrIntOverflowTypes ++ } ++ if iNdEx >= l { ++ return io.ErrUnexpectedEOF ++ } ++ b := dAtA[iNdEx] ++ iNdEx++ ++ msglen |= int(b&0x7F) << shift ++ if b < 0x80 { ++ break ++ } ++ } ++ if msglen < 0 { ++ return ErrInvalidLengthTypes ++ } ++ postIndex := iNdEx + msglen ++ if postIndex < 0 { ++ return ErrInvalidLengthTypes ++ } ++ if postIndex > l { ++ return io.ErrUnexpectedEOF ++ } ++ if m.ValidatorSet == nil { ++ m.ValidatorSet = &types1.ValidatorSet{} ++ } ++ if err := m.ValidatorSet.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { ++ return err ++ } ++ iNdEx = postIndex ++ case 4: ++ if wireType != 0 { ++ return fmt.Errorf("proto: wrong wireType = %d for field IsLast", wireType) ++ } ++ var v int ++ for shift := uint(0); ; shift += 7 { ++ if shift >= 64 { ++ return ErrIntOverflowTypes ++ } ++ if iNdEx >= l { ++ return io.ErrUnexpectedEOF ++ } ++ b := dAtA[iNdEx] ++ iNdEx++ ++ v |= int(b&0x7F) << shift ++ if b < 0x80 { ++ break ++ } ++ } ++ m.IsLast = bool(v != 0) ++ default: ++ iNdEx = preIndex ++ skippy, err := skipTypes(dAtA[iNdEx:]) ++ if err != nil { ++ return err ++ } ++ if (skippy < 0) || (iNdEx+skippy) < 0 { ++ return ErrInvalidLengthTypes ++ } ++ if (iNdEx + skippy) > l { ++ return io.ErrUnexpectedEOF ++ } ++ iNdEx += skippy ++ } ++ } ++ ++ if iNdEx > l { ++ return io.ErrUnexpectedEOF ++ } ++ return nil ++} ++func (m *CommitResponse) Unmarshal(dAtA []byte) error { ++ l := len(dAtA) ++ iNdEx := 0 ++ for iNdEx < l { ++ preIndex := iNdEx ++ var wire uint64 ++ for shift := uint(0); ; shift += 7 { ++ if shift >= 64 { ++ return ErrIntOverflowTypes ++ } ++ if iNdEx >= l { ++ return io.ErrUnexpectedEOF ++ } ++ b := dAtA[iNdEx] ++ iNdEx++ ++ wire |= uint64(b&0x7F) << shift ++ if b < 0x80 { ++ break ++ } ++ } ++ fieldNum := int32(wire >> 3) ++ wireType := int(wire & 0x7) ++ if wireType == 4 { ++ return fmt.Errorf("proto: CommitResponse: wiretype end group for non-group") ++ } ++ if fieldNum <= 0 { ++ return fmt.Errorf("proto: CommitResponse: illegal tag %d (wire type %d)", fieldNum, wire) ++ } ++ switch fieldNum { ++ case 1: ++ if wireType != 2 { ++ return fmt.Errorf("proto: wrong wireType = %d for field Commit", wireType) ++ } ++ var msglen int ++ for shift := uint(0); ; shift += 7 { ++ if shift >= 64 { ++ return ErrIntOverflowTypes ++ } ++ if iNdEx >= l { ++ return io.ErrUnexpectedEOF ++ } ++ b := dAtA[iNdEx] ++ iNdEx++ ++ msglen |= int(b&0x7F) << shift ++ if b < 0x80 { ++ break ++ } ++ } ++ if msglen < 0 { ++ return ErrInvalidLengthTypes ++ } ++ postIndex := iNdEx + msglen ++ if postIndex < 0 { ++ return ErrInvalidLengthTypes ++ } ++ if postIndex > l { ++ return io.ErrUnexpectedEOF ++ } ++ if m.Commit == nil { ++ m.Commit = &types1.Commit{} ++ } ++ if err := m.Commit.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { ++ return err ++ } ++ iNdEx = postIndex ++ default: ++ iNdEx = preIndex ++ skippy, err := skipTypes(dAtA[iNdEx:]) ++ if err != nil { ++ return err ++ } ++ if (skippy < 0) || (iNdEx+skippy) < 0 { ++ return ErrInvalidLengthTypes ++ } ++ if (iNdEx + skippy) > l { ++ return io.ErrUnexpectedEOF ++ } ++ iNdEx += skippy ++ } ++ } ++ ++ if iNdEx > l { ++ return io.ErrUnexpectedEOF ++ } ++ return nil ++} ++func (m *ValidatorSetResponse) Unmarshal(dAtA []byte) error { ++ l := len(dAtA) ++ iNdEx := 0 ++ for iNdEx < l { ++ preIndex := iNdEx ++ var wire uint64 ++ for shift := uint(0); ; shift += 7 { ++ if shift >= 64 { ++ return ErrIntOverflowTypes ++ } ++ if iNdEx >= l { ++ return io.ErrUnexpectedEOF ++ } ++ b := dAtA[iNdEx] ++ iNdEx++ ++ wire |= uint64(b&0x7F) << shift ++ if b < 0x80 { ++ break ++ } ++ } ++ fieldNum := int32(wire >> 3) ++ wireType := int(wire & 0x7) ++ if wireType == 4 { ++ return fmt.Errorf("proto: ValidatorSetResponse: wiretype end group for non-group") ++ } ++ if fieldNum <= 0 { ++ return fmt.Errorf("proto: ValidatorSetResponse: illegal tag %d (wire type %d)", fieldNum, wire) ++ } ++ switch fieldNum { ++ case 1: ++ if wireType != 2 { ++ return fmt.Errorf("proto: wrong wireType = %d for field ValidatorSet", wireType) ++ } ++ var msglen int ++ for shift := uint(0); ; shift += 7 { ++ if shift >= 64 { ++ return ErrIntOverflowTypes ++ } ++ if iNdEx >= l { ++ return io.ErrUnexpectedEOF ++ } ++ b := dAtA[iNdEx] ++ iNdEx++ ++ msglen |= int(b&0x7F) << shift ++ if b < 0x80 { ++ break ++ } ++ } ++ if msglen < 0 { ++ return ErrInvalidLengthTypes ++ } ++ postIndex := iNdEx + msglen ++ if postIndex < 0 { ++ return ErrInvalidLengthTypes ++ } ++ if postIndex > l { ++ return io.ErrUnexpectedEOF ++ } ++ if m.ValidatorSet == nil { ++ m.ValidatorSet = &types1.ValidatorSet{} ++ } ++ if err := m.ValidatorSet.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { ++ return err ++ } ++ iNdEx = postIndex ++ case 2: ++ if wireType != 0 { ++ return fmt.Errorf("proto: wrong wireType = %d for field Height", wireType) ++ } ++ m.Height = 0 ++ for shift := uint(0); ; shift += 7 { ++ if shift >= 64 { ++ return ErrIntOverflowTypes ++ } ++ if iNdEx >= l { ++ return io.ErrUnexpectedEOF ++ } ++ b := dAtA[iNdEx] ++ iNdEx++ ++ m.Height |= int64(b&0x7F) << shift ++ if b < 0x80 { ++ break ++ } ++ } ++ default: ++ iNdEx = preIndex ++ skippy, err := skipTypes(dAtA[iNdEx:]) ++ if err != nil { ++ return err ++ } ++ if (skippy < 0) || (iNdEx+skippy) < 0 { ++ return ErrInvalidLengthTypes ++ } ++ if (iNdEx + skippy) > l { ++ return io.ErrUnexpectedEOF ++ } ++ iNdEx += skippy ++ } ++ } ++ ++ if iNdEx > l { ++ return io.ErrUnexpectedEOF ++ } ++ return nil ++} ++func (m *NewHeightEvent) Unmarshal(dAtA []byte) error { ++ l := len(dAtA) ++ iNdEx := 0 ++ for iNdEx < l { ++ preIndex := iNdEx ++ var wire uint64 ++ for shift := uint(0); ; shift += 7 { ++ if shift >= 64 { ++ return ErrIntOverflowTypes ++ } ++ if iNdEx >= l { ++ return io.ErrUnexpectedEOF ++ } ++ b := dAtA[iNdEx] ++ iNdEx++ ++ wire |= uint64(b&0x7F) << shift ++ if b < 0x80 { ++ break ++ } ++ } ++ fieldNum := int32(wire >> 3) ++ wireType := int(wire & 0x7) ++ if wireType == 4 { ++ return fmt.Errorf("proto: NewHeightEvent: wiretype end group for non-group") ++ } ++ if fieldNum <= 0 { ++ return fmt.Errorf("proto: NewHeightEvent: illegal tag %d (wire type %d)", fieldNum, wire) ++ } ++ switch fieldNum { ++ case 1: ++ if wireType != 0 { ++ return fmt.Errorf("proto: wrong wireType = %d for field Height", wireType) ++ } ++ m.Height = 0 ++ for shift := uint(0); ; shift += 7 { ++ if shift >= 64 { ++ return ErrIntOverflowTypes ++ } ++ if iNdEx >= l { ++ return io.ErrUnexpectedEOF ++ } ++ b := dAtA[iNdEx] ++ iNdEx++ ++ m.Height |= int64(b&0x7F) << shift ++ if b < 0x80 { ++ break ++ } ++ } ++ case 2: ++ if wireType != 2 { ++ return fmt.Errorf("proto: wrong wireType = %d for field Hash", wireType) ++ } ++ var byteLen int ++ for shift := uint(0); ; shift += 7 { ++ if shift >= 64 { ++ return ErrIntOverflowTypes ++ } ++ if iNdEx >= l { ++ return io.ErrUnexpectedEOF ++ } ++ b := dAtA[iNdEx] ++ iNdEx++ ++ byteLen |= int(b&0x7F) << shift ++ if b < 0x80 { ++ break ++ } ++ } ++ if byteLen < 0 { ++ return ErrInvalidLengthTypes ++ } ++ postIndex := iNdEx + byteLen ++ if postIndex < 0 { ++ return ErrInvalidLengthTypes ++ } ++ if postIndex > l { ++ return io.ErrUnexpectedEOF ++ } ++ m.Hash = append(m.Hash[:0], dAtA[iNdEx:postIndex]...) ++ if m.Hash == nil { ++ m.Hash = []byte{} ++ } ++ iNdEx = postIndex ++ default: ++ iNdEx = preIndex ++ skippy, err := skipTypes(dAtA[iNdEx:]) ++ if err != nil { ++ return err ++ } ++ if (skippy < 0) || (iNdEx+skippy) < 0 { ++ return ErrInvalidLengthTypes ++ } ++ if (iNdEx + skippy) > l { ++ return io.ErrUnexpectedEOF ++ } ++ iNdEx += skippy ++ } ++ } ++ ++ if iNdEx > l { ++ return io.ErrUnexpectedEOF + } +- return n ++ return nil + } ++func (m *StatusResponse) Unmarshal(dAtA []byte) error { ++ l := len(dAtA) ++ iNdEx := 0 ++ for iNdEx < l { ++ preIndex := iNdEx ++ var wire uint64 ++ for shift := uint(0); ; shift += 7 { ++ if shift >= 64 { ++ return ErrIntOverflowTypes ++ } ++ if iNdEx >= l { ++ return io.ErrUnexpectedEOF ++ } ++ b := dAtA[iNdEx] ++ iNdEx++ ++ wire |= uint64(b&0x7F) << shift ++ if b < 0x80 { ++ break ++ } ++ } ++ fieldNum := int32(wire >> 3) ++ wireType := int(wire & 0x7) ++ if wireType == 4 { ++ return fmt.Errorf("proto: StatusResponse: wiretype end group for non-group") ++ } ++ if fieldNum <= 0 { ++ return fmt.Errorf("proto: StatusResponse: illegal tag %d (wire type %d)", fieldNum, wire) ++ } ++ switch fieldNum { ++ case 1: ++ if wireType != 2 { ++ return fmt.Errorf("proto: wrong wireType = %d for field NodeInfo", wireType) ++ } ++ var msglen int ++ for shift := uint(0); ; shift += 7 { ++ if shift >= 64 { ++ return ErrIntOverflowTypes ++ } ++ if iNdEx >= l { ++ return io.ErrUnexpectedEOF ++ } ++ b := dAtA[iNdEx] ++ iNdEx++ ++ msglen |= int(b&0x7F) << shift ++ if b < 0x80 { ++ break ++ } ++ } ++ if msglen < 0 { ++ return ErrInvalidLengthTypes ++ } ++ postIndex := iNdEx + msglen ++ if postIndex < 0 { ++ return ErrInvalidLengthTypes ++ } ++ if postIndex > l { ++ return io.ErrUnexpectedEOF ++ } ++ if m.NodeInfo == nil { ++ m.NodeInfo = &p2p.DefaultNodeInfo{} ++ } ++ if err := m.NodeInfo.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { ++ return err ++ } ++ iNdEx = postIndex ++ case 2: ++ if wireType != 2 { ++ return fmt.Errorf("proto: wrong wireType = %d for field SyncInfo", wireType) ++ } ++ var msglen int ++ for shift := uint(0); ; shift += 7 { ++ if shift >= 64 { ++ return ErrIntOverflowTypes ++ } ++ if iNdEx >= l { ++ return io.ErrUnexpectedEOF ++ } ++ b := dAtA[iNdEx] ++ iNdEx++ ++ msglen |= int(b&0x7F) << shift ++ if b < 0x80 { ++ break ++ } ++ } ++ if msglen < 0 { ++ return ErrInvalidLengthTypes ++ } ++ postIndex := iNdEx + msglen ++ if postIndex < 0 { ++ return ErrInvalidLengthTypes ++ } ++ if postIndex > l { ++ return io.ErrUnexpectedEOF ++ } ++ if m.SyncInfo == nil { ++ m.SyncInfo = &SyncInfo{} ++ } ++ if err := m.SyncInfo.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { ++ return err ++ } ++ iNdEx = postIndex ++ case 3: ++ if wireType != 2 { ++ return fmt.Errorf("proto: wrong wireType = %d for field ValidatorInfo", wireType) ++ } ++ var msglen int ++ for shift := uint(0); ; shift += 7 { ++ if shift >= 64 { ++ return ErrIntOverflowTypes ++ } ++ if iNdEx >= l { ++ return io.ErrUnexpectedEOF ++ } ++ b := dAtA[iNdEx] ++ iNdEx++ ++ msglen |= int(b&0x7F) << shift ++ if b < 0x80 { ++ break ++ } ++ } ++ if msglen < 0 { ++ return ErrInvalidLengthTypes ++ } ++ postIndex := iNdEx + msglen ++ if postIndex < 0 { ++ return ErrInvalidLengthTypes ++ } ++ if postIndex > l { ++ return io.ErrUnexpectedEOF ++ } ++ if m.ValidatorInfo == nil { ++ m.ValidatorInfo = &ValidatorInfo{} ++ } ++ if err := m.ValidatorInfo.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { ++ return err ++ } ++ iNdEx = postIndex ++ default: ++ iNdEx = preIndex ++ skippy, err := skipTypes(dAtA[iNdEx:]) ++ if err != nil { ++ return err ++ } ++ if (skippy < 0) || (iNdEx+skippy) < 0 { ++ return ErrInvalidLengthTypes ++ } ++ if (iNdEx + skippy) > l { ++ return io.ErrUnexpectedEOF ++ } ++ iNdEx += skippy ++ } ++ } + +-func sovTypes(x uint64) (n int) { +- return (math_bits.Len64(x|1) + 6) / 7 +-} +-func sozTypes(x uint64) (n int) { +- return sovTypes(uint64((x << 1) ^ uint64((int64(x) >> 63)))) ++ if iNdEx > l { ++ return io.ErrUnexpectedEOF ++ } ++ return nil + } +-func (m *RequestPing) Unmarshal(dAtA []byte) error { ++func (m *SyncInfo) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { +@@ -542,78 +4155,182 @@ func (m *RequestPing) Unmarshal(dAtA []byte) error { + if shift >= 64 { + return ErrIntOverflowTypes + } +- if iNdEx >= l { +- return io.ErrUnexpectedEOF ++ if iNdEx >= l { ++ return io.ErrUnexpectedEOF ++ } ++ b := dAtA[iNdEx] ++ iNdEx++ ++ wire |= uint64(b&0x7F) << shift ++ if b < 0x80 { ++ break ++ } ++ } ++ fieldNum := int32(wire >> 3) ++ wireType := int(wire & 0x7) ++ if wireType == 4 { ++ return fmt.Errorf("proto: SyncInfo: wiretype end group for non-group") ++ } ++ if fieldNum <= 0 { ++ return fmt.Errorf("proto: SyncInfo: illegal tag %d (wire type %d)", fieldNum, wire) ++ } ++ switch fieldNum { ++ case 1: ++ if wireType != 2 { ++ return fmt.Errorf("proto: wrong wireType = %d for field LatestBlockHash", wireType) ++ } ++ var byteLen int ++ for shift := uint(0); ; shift += 7 { ++ if shift >= 64 { ++ return ErrIntOverflowTypes ++ } ++ if iNdEx >= l { ++ return io.ErrUnexpectedEOF ++ } ++ b := dAtA[iNdEx] ++ iNdEx++ ++ byteLen |= int(b&0x7F) << shift ++ if b < 0x80 { ++ break ++ } ++ } ++ if byteLen < 0 { ++ return ErrInvalidLengthTypes ++ } ++ postIndex := iNdEx + byteLen ++ if postIndex < 0 { ++ return ErrInvalidLengthTypes ++ } ++ if postIndex > l { ++ return io.ErrUnexpectedEOF ++ } ++ m.LatestBlockHash = append(m.LatestBlockHash[:0], dAtA[iNdEx:postIndex]...) ++ if m.LatestBlockHash == nil { ++ m.LatestBlockHash = []byte{} ++ } ++ iNdEx = postIndex ++ case 2: ++ if wireType != 2 { ++ return fmt.Errorf("proto: wrong wireType = %d for field LatestAppHash", wireType) ++ } ++ var byteLen int ++ for shift := uint(0); ; shift += 7 { ++ if shift >= 64 { ++ return ErrIntOverflowTypes ++ } ++ if iNdEx >= l { ++ return io.ErrUnexpectedEOF ++ } ++ b := dAtA[iNdEx] ++ iNdEx++ ++ byteLen |= int(b&0x7F) << shift ++ if b < 0x80 { ++ break ++ } ++ } ++ if byteLen < 0 { ++ return ErrInvalidLengthTypes ++ } ++ postIndex := iNdEx + byteLen ++ if postIndex < 0 { ++ return ErrInvalidLengthTypes ++ } ++ if postIndex > l { ++ return io.ErrUnexpectedEOF ++ } ++ m.LatestAppHash = append(m.LatestAppHash[:0], dAtA[iNdEx:postIndex]...) ++ if m.LatestAppHash == nil { ++ m.LatestAppHash = []byte{} ++ } ++ iNdEx = postIndex ++ case 3: ++ if wireType != 0 { ++ return fmt.Errorf("proto: wrong wireType = %d for field LatestBlockHeight", wireType) ++ } ++ m.LatestBlockHeight = 0 ++ for shift := uint(0); ; shift += 7 { ++ if shift >= 64 { ++ return ErrIntOverflowTypes ++ } ++ if iNdEx >= l { ++ return io.ErrUnexpectedEOF ++ } ++ b := dAtA[iNdEx] ++ iNdEx++ ++ m.LatestBlockHeight |= int64(b&0x7F) << shift ++ if b < 0x80 { ++ break ++ } + } +- b := dAtA[iNdEx] +- iNdEx++ +- wire |= uint64(b&0x7F) << shift +- if b < 0x80 { +- break ++ case 4: ++ if wireType != 2 { ++ return fmt.Errorf("proto: wrong wireType = %d for field LatestBlockTime", wireType) + } +- } +- fieldNum := int32(wire >> 3) +- wireType := int(wire & 0x7) +- if wireType == 4 { +- return fmt.Errorf("proto: RequestPing: wiretype end group for non-group") +- } +- if fieldNum <= 0 { +- return fmt.Errorf("proto: RequestPing: illegal tag %d (wire type %d)", fieldNum, wire) +- } +- switch fieldNum { +- default: +- iNdEx = preIndex +- skippy, err := skipTypes(dAtA[iNdEx:]) +- if err != nil { +- return err ++ var msglen int ++ for shift := uint(0); ; shift += 7 { ++ if shift >= 64 { ++ return ErrIntOverflowTypes ++ } ++ if iNdEx >= l { ++ return io.ErrUnexpectedEOF ++ } ++ b := dAtA[iNdEx] ++ iNdEx++ ++ msglen |= int(b&0x7F) << shift ++ if b < 0x80 { ++ break ++ } + } +- if (skippy < 0) || (iNdEx+skippy) < 0 { ++ if msglen < 0 { + return ErrInvalidLengthTypes + } +- if (iNdEx + skippy) > l { ++ postIndex := iNdEx + msglen ++ if postIndex < 0 { ++ return ErrInvalidLengthTypes ++ } ++ if postIndex > l { + return io.ErrUnexpectedEOF + } +- iNdEx += skippy +- } +- } +- +- if iNdEx > l { +- return io.ErrUnexpectedEOF +- } +- return nil +-} +-func (m *RequestBroadcastTx) Unmarshal(dAtA []byte) error { +- l := len(dAtA) +- iNdEx := 0 +- for iNdEx < l { +- preIndex := iNdEx +- var wire uint64 +- for shift := uint(0); ; shift += 7 { +- if shift >= 64 { +- return ErrIntOverflowTypes ++ if err := github_com_gogo_protobuf_types.StdTimeUnmarshal(&m.LatestBlockTime, dAtA[iNdEx:postIndex]); err != nil { ++ return err + } +- if iNdEx >= l { ++ iNdEx = postIndex ++ case 5: ++ if wireType != 2 { ++ return fmt.Errorf("proto: wrong wireType = %d for field EarliestBlockHash", wireType) ++ } ++ var byteLen int ++ for shift := uint(0); ; shift += 7 { ++ if shift >= 64 { ++ return ErrIntOverflowTypes ++ } ++ if iNdEx >= l { ++ return io.ErrUnexpectedEOF ++ } ++ b := dAtA[iNdEx] ++ iNdEx++ ++ byteLen |= int(b&0x7F) << shift ++ if b < 0x80 { ++ break ++ } ++ } ++ if byteLen < 0 { ++ return ErrInvalidLengthTypes ++ } ++ postIndex := iNdEx + byteLen ++ if postIndex < 0 { ++ return ErrInvalidLengthTypes ++ } ++ if postIndex > l { + return io.ErrUnexpectedEOF + } +- b := dAtA[iNdEx] +- iNdEx++ +- wire |= uint64(b&0x7F) << shift +- if b < 0x80 { +- break ++ m.EarliestBlockHash = append(m.EarliestBlockHash[:0], dAtA[iNdEx:postIndex]...) ++ if m.EarliestBlockHash == nil { ++ m.EarliestBlockHash = []byte{} + } +- } +- fieldNum := int32(wire >> 3) +- wireType := int(wire & 0x7) +- if wireType == 4 { +- return fmt.Errorf("proto: RequestBroadcastTx: wiretype end group for non-group") +- } +- if fieldNum <= 0 { +- return fmt.Errorf("proto: RequestBroadcastTx: illegal tag %d (wire type %d)", fieldNum, wire) +- } +- switch fieldNum { +- case 1: ++ iNdEx = postIndex ++ case 6: + if wireType != 2 { +- return fmt.Errorf("proto: wrong wireType = %d for field Tx", wireType) ++ return fmt.Errorf("proto: wrong wireType = %d for field EarliestAppHash", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { +@@ -640,61 +4357,83 @@ func (m *RequestBroadcastTx) Unmarshal(dAtA []byte) error { + if postIndex > l { + return io.ErrUnexpectedEOF + } +- m.Tx = append(m.Tx[:0], dAtA[iNdEx:postIndex]...) +- if m.Tx == nil { +- m.Tx = []byte{} ++ m.EarliestAppHash = append(m.EarliestAppHash[:0], dAtA[iNdEx:postIndex]...) ++ if m.EarliestAppHash == nil { ++ m.EarliestAppHash = []byte{} + } + iNdEx = postIndex +- default: +- iNdEx = preIndex +- skippy, err := skipTypes(dAtA[iNdEx:]) +- if err != nil { +- return err ++ case 7: ++ if wireType != 0 { ++ return fmt.Errorf("proto: wrong wireType = %d for field EarliestBlockHeight", wireType) + } +- if (skippy < 0) || (iNdEx+skippy) < 0 { ++ m.EarliestBlockHeight = 0 ++ for shift := uint(0); ; shift += 7 { ++ if shift >= 64 { ++ return ErrIntOverflowTypes ++ } ++ if iNdEx >= l { ++ return io.ErrUnexpectedEOF ++ } ++ b := dAtA[iNdEx] ++ iNdEx++ ++ m.EarliestBlockHeight |= int64(b&0x7F) << shift ++ if b < 0x80 { ++ break ++ } ++ } ++ case 8: ++ if wireType != 2 { ++ return fmt.Errorf("proto: wrong wireType = %d for field EarliestBlockTime", wireType) ++ } ++ var msglen int ++ for shift := uint(0); ; shift += 7 { ++ if shift >= 64 { ++ return ErrIntOverflowTypes ++ } ++ if iNdEx >= l { ++ return io.ErrUnexpectedEOF ++ } ++ b := dAtA[iNdEx] ++ iNdEx++ ++ msglen |= int(b&0x7F) << shift ++ if b < 0x80 { ++ break ++ } ++ } ++ if msglen < 0 { + return ErrInvalidLengthTypes + } +- if (iNdEx + skippy) > l { ++ postIndex := iNdEx + msglen ++ if postIndex < 0 { ++ return ErrInvalidLengthTypes ++ } ++ if postIndex > l { + return io.ErrUnexpectedEOF + } +- iNdEx += skippy +- } +- } +- +- if iNdEx > l { +- return io.ErrUnexpectedEOF +- } +- return nil +-} +-func (m *ResponsePing) Unmarshal(dAtA []byte) error { +- l := len(dAtA) +- iNdEx := 0 +- for iNdEx < l { +- preIndex := iNdEx +- var wire uint64 +- for shift := uint(0); ; shift += 7 { +- if shift >= 64 { +- return ErrIntOverflowTypes ++ if err := github_com_gogo_protobuf_types.StdTimeUnmarshal(&m.EarliestBlockTime, dAtA[iNdEx:postIndex]); err != nil { ++ return err + } +- if iNdEx >= l { +- return io.ErrUnexpectedEOF ++ iNdEx = postIndex ++ case 9: ++ if wireType != 0 { ++ return fmt.Errorf("proto: wrong wireType = %d for field CatchingUp", wireType) + } +- b := dAtA[iNdEx] +- iNdEx++ +- wire |= uint64(b&0x7F) << shift +- if b < 0x80 { +- break ++ var v int ++ for shift := uint(0); ; shift += 7 { ++ if shift >= 64 { ++ return ErrIntOverflowTypes ++ } ++ if iNdEx >= l { ++ return io.ErrUnexpectedEOF ++ } ++ b := dAtA[iNdEx] ++ iNdEx++ ++ v |= int(b&0x7F) << shift ++ if b < 0x80 { ++ break ++ } + } +- } +- fieldNum := int32(wire >> 3) +- wireType := int(wire & 0x7) +- if wireType == 4 { +- return fmt.Errorf("proto: ResponsePing: wiretype end group for non-group") +- } +- if fieldNum <= 0 { +- return fmt.Errorf("proto: ResponsePing: illegal tag %d (wire type %d)", fieldNum, wire) +- } +- switch fieldNum { ++ m.CatchingUp = bool(v != 0) + default: + iNdEx = preIndex + skippy, err := skipTypes(dAtA[iNdEx:]) +@@ -716,7 +4455,7 @@ func (m *ResponsePing) Unmarshal(dAtA []byte) error { + } + return nil + } +-func (m *ResponseBroadcastTx) Unmarshal(dAtA []byte) error { ++func (m *ValidatorInfo) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { +@@ -739,17 +4478,17 @@ func (m *ResponseBroadcastTx) Unmarshal(dAtA []byte) error { + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { +- return fmt.Errorf("proto: ResponseBroadcastTx: wiretype end group for non-group") ++ return fmt.Errorf("proto: ValidatorInfo: wiretype end group for non-group") + } + if fieldNum <= 0 { +- return fmt.Errorf("proto: ResponseBroadcastTx: illegal tag %d (wire type %d)", fieldNum, wire) ++ return fmt.Errorf("proto: ValidatorInfo: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { +- return fmt.Errorf("proto: wrong wireType = %d for field CheckTx", wireType) ++ return fmt.Errorf("proto: wrong wireType = %d for field Address", wireType) + } +- var msglen int ++ var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes +@@ -759,31 +4498,29 @@ func (m *ResponseBroadcastTx) Unmarshal(dAtA []byte) error { + } + b := dAtA[iNdEx] + iNdEx++ +- msglen |= int(b&0x7F) << shift ++ byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } +- if msglen < 0 { ++ if byteLen < 0 { + return ErrInvalidLengthTypes + } +- postIndex := iNdEx + msglen ++ postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } +- if m.CheckTx == nil { +- m.CheckTx = &types.ResponseCheckTx{} +- } +- if err := m.CheckTx.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { +- return err ++ m.Address = append(m.Address[:0], dAtA[iNdEx:postIndex]...) ++ if m.Address == nil { ++ m.Address = []byte{} + } + iNdEx = postIndex + case 2: + if wireType != 2 { +- return fmt.Errorf("proto: wrong wireType = %d for field DeliverTx", wireType) ++ return fmt.Errorf("proto: wrong wireType = %d for field PubKey", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { +@@ -810,13 +4547,32 @@ func (m *ResponseBroadcastTx) Unmarshal(dAtA []byte) error { + if postIndex > l { + return io.ErrUnexpectedEOF + } +- if m.DeliverTx == nil { +- m.DeliverTx = &types.ResponseDeliverTx{} ++ if m.PubKey == nil { ++ m.PubKey = &crypto.PublicKey{} + } +- if err := m.DeliverTx.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { ++ if err := m.PubKey.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex ++ case 3: ++ if wireType != 0 { ++ return fmt.Errorf("proto: wrong wireType = %d for field VotingPower", wireType) ++ } ++ m.VotingPower = 0 ++ for shift := uint(0); ; shift += 7 { ++ if shift >= 64 { ++ return ErrIntOverflowTypes ++ } ++ if iNdEx >= l { ++ return io.ErrUnexpectedEOF ++ } ++ b := dAtA[iNdEx] ++ iNdEx++ ++ m.VotingPower |= int64(b&0x7F) << shift ++ if b < 0x80 { ++ break ++ } ++ } + default: + iNdEx = preIndex + skippy, err := skipTypes(dAtA[iNdEx:]) diff --git a/patches/proto/tendermint/state/types.pb.go.patch b/patches/proto/tendermint/state/types.pb.go.patch new file mode 100644 index 00000000000..ca95e2eea46 --- /dev/null +++ b/patches/proto/tendermint/state/types.pb.go.patch @@ -0,0 +1,223 @@ +diff --git a/proto/tendermint/state/types.pb.go b/proto/tendermint/state/types.pb.go +index 6b57ca1ae..122cda8eb 100644 +--- a/proto/tendermint/state/types.pb.go ++++ b/proto/tendermint/state/types.pb.go +@@ -330,6 +330,8 @@ type State struct { + LastResultsHash []byte `protobuf:"bytes,12,opt,name=last_results_hash,json=lastResultsHash,proto3" json:"last_results_hash,omitempty"` + // the latest AppHash we've received from calling abci.Commit() + AppHash []byte `protobuf:"bytes,13,opt,name=app_hash,json=appHash,proto3" json:"app_hash,omitempty"` ++ // timeouts to be used for the next block height ++ Timeouts types.TimeoutsInfo `protobuf:"bytes,15,opt,name=timeouts,proto3" json:"timeouts"` + } + + func (m *State) Reset() { *m = State{} } +@@ -463,6 +465,13 @@ func (m *State) GetAppHash() []byte { + return nil + } + ++func (m *State) GetTimeouts() types.TimeoutsInfo { ++ if m != nil { ++ return m.Timeouts ++ } ++ return types.TimeoutsInfo{} ++} ++ + func init() { + proto.RegisterType((*ABCIResponses)(nil), "tendermint.state.ABCIResponses") + proto.RegisterType((*ValidatorsInfo)(nil), "tendermint.state.ValidatorsInfo") +@@ -475,58 +484,59 @@ func init() { + func init() { proto.RegisterFile("tendermint/state/types.proto", fileDescriptor_ccfacf933f22bf93) } + + var fileDescriptor_ccfacf933f22bf93 = []byte{ +- // 805 bytes of a gzipped FileDescriptorProto +- 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xa4, 0x55, 0xcd, 0x8e, 0xe3, 0x44, +- 0x10, 0x8e, 0xc9, 0x6e, 0x7e, 0xca, 0x93, 0x64, 0xb7, 0x07, 0x21, 0x6f, 0x96, 0x75, 0xb2, 0xe1, +- 0x47, 0x23, 0x0e, 0x8e, 0xb4, 0x1c, 0x10, 0x97, 0x95, 0xd6, 0x09, 0xb0, 0x91, 0x56, 0x08, 0x3c, +- 0xa3, 0x39, 0x70, 0xb1, 0x3a, 0x71, 0x8f, 0x6d, 0x91, 0xd8, 0x96, 0xbb, 0x13, 0x86, 0x07, 0xe0, +- 0x3e, 0x57, 0xde, 0x68, 0x8e, 0x73, 0x44, 0x1c, 0x06, 0xc8, 0xbc, 0x08, 0xea, 0x1f, 0xdb, 0x9d, +- 0x84, 0x91, 0x06, 0xed, 0xad, 0x5d, 0xf5, 0xd5, 0x57, 0x5f, 0x55, 0x57, 0xb5, 0xe1, 0x63, 0x46, +- 0x92, 0x80, 0xe4, 0xab, 0x38, 0x61, 0x63, 0xca, 0x30, 0x23, 0x63, 0xf6, 0x6b, 0x46, 0xa8, 0x93, +- 0xe5, 0x29, 0x4b, 0xd1, 0x93, 0xca, 0xeb, 0x08, 0x6f, 0xff, 0xc3, 0x30, 0x0d, 0x53, 0xe1, 0x1c, +- 0xf3, 0x93, 0xc4, 0xf5, 0x9f, 0x6b, 0x2c, 0x78, 0xbe, 0x88, 0x75, 0x92, 0xbe, 0x9e, 0x42, 0xd8, +- 0x77, 0xbc, 0xc3, 0x03, 0xef, 0x06, 0x2f, 0xe3, 0x00, 0xb3, 0x34, 0x57, 0x88, 0x17, 0x07, 0x88, +- 0x0c, 0xe7, 0x78, 0x55, 0x10, 0xd8, 0x9a, 0x7b, 0x43, 0x72, 0x1a, 0xa7, 0xc9, 0x4e, 0x82, 0x41, +- 0x98, 0xa6, 0xe1, 0x92, 0x8c, 0xc5, 0xd7, 0x7c, 0x7d, 0x31, 0x66, 0xf1, 0x8a, 0x50, 0x86, 0x57, +- 0x99, 0x04, 0x8c, 0xfe, 0x34, 0xa0, 0xf3, 0xc6, 0x9d, 0xcc, 0x3c, 0x42, 0xb3, 0x34, 0xa1, 0x84, +- 0xa2, 0x09, 0x98, 0x01, 0x59, 0xc6, 0x1b, 0x92, 0xfb, 0xec, 0x92, 0x5a, 0xc6, 0xb0, 0x7e, 0x62, +- 0xbe, 0x1a, 0x39, 0x5a, 0x33, 0x78, 0x91, 0x4e, 0x11, 0x30, 0x95, 0xd8, 0xb3, 0x4b, 0x0f, 0x82, +- 0xe2, 0x48, 0xd1, 0x6b, 0x68, 0x93, 0x24, 0xf0, 0xe7, 0xcb, 0x74, 0xf1, 0xb3, 0xf5, 0xc1, 0xd0, +- 0x38, 0x31, 0x5f, 0xbd, 0xbc, 0x97, 0xe2, 0x9b, 0x24, 0x70, 0x39, 0xd0, 0x6b, 0x11, 0x75, 0x42, +- 0x53, 0x30, 0xe7, 0x24, 0x8c, 0x13, 0xc5, 0x50, 0x17, 0x0c, 0x9f, 0xdc, 0xcb, 0xe0, 0x72, 0xac, +- 0xe4, 0x80, 0x79, 0x79, 0x1e, 0xfd, 0x66, 0x40, 0xf7, 0xbc, 0x68, 0x28, 0x9d, 0x25, 0x17, 0x29, +- 0x9a, 0x40, 0xa7, 0x6c, 0xb1, 0x4f, 0x09, 0xb3, 0x0c, 0x41, 0x6d, 0xeb, 0xd4, 0xb2, 0x81, 0x65, +- 0xe0, 0x29, 0x61, 0xde, 0xd1, 0x46, 0xfb, 0x42, 0x0e, 0x1c, 0x2f, 0x31, 0x65, 0x7e, 0x44, 0xe2, +- 0x30, 0x62, 0xfe, 0x22, 0xc2, 0x49, 0x48, 0x02, 0x51, 0x67, 0xdd, 0x7b, 0xca, 0x5d, 0x6f, 0x85, +- 0x67, 0x22, 0x1d, 0xa3, 0xdf, 0x0d, 0x38, 0x9e, 0x70, 0x9d, 0x09, 0x5d, 0xd3, 0x1f, 0xc4, 0xfd, +- 0x09, 0x31, 0x1e, 0x3c, 0x59, 0x14, 0x66, 0x5f, 0xde, 0xab, 0xd2, 0xf3, 0xf2, 0x50, 0xcf, 0x1e, +- 0x81, 0xfb, 0xe8, 0xfa, 0x76, 0x50, 0xf3, 0x7a, 0x8b, 0x5d, 0xf3, 0xff, 0xd6, 0x46, 0xe1, 0xe9, +- 0xce, 0xfd, 0x0b, 0x61, 0xdf, 0x42, 0x97, 0xf7, 0xd7, 0xcf, 0x0b, 0xab, 0x92, 0x35, 0x70, 0xf6, +- 0x77, 0xc2, 0xd9, 0x09, 0xf6, 0x3a, 0x3c, 0xac, 0x9a, 0xa5, 0x8f, 0xa0, 0x21, 0x75, 0xa8, 0xfc, +- 0xea, 0x6b, 0x14, 0x41, 0xf3, 0x5c, 0x4e, 0x2b, 0x7a, 0x03, 0xed, 0xb2, 0x04, 0x95, 0xe5, 0x85, +- 0x9e, 0x45, 0x4d, 0x75, 0x55, 0xbe, 0x2a, 0xbc, 0x8a, 0x42, 0x7d, 0x68, 0xd1, 0xf4, 0x82, 0xfd, +- 0x82, 0x73, 0x22, 0xf2, 0xb4, 0xbd, 0xf2, 0x7b, 0xf4, 0x4f, 0x03, 0x1e, 0x9f, 0x72, 0xa1, 0xe8, +- 0x6b, 0x68, 0x2a, 0x2e, 0x95, 0xe6, 0xd9, 0x61, 0x31, 0x4a, 0x94, 0x4a, 0x51, 0xe0, 0xd1, 0xe7, +- 0xd0, 0x5a, 0x44, 0x38, 0x4e, 0xfc, 0x58, 0x36, 0xb2, 0xed, 0x9a, 0xdb, 0xdb, 0x41, 0x73, 0xc2, +- 0x6d, 0xb3, 0xa9, 0xd7, 0x14, 0xce, 0x59, 0x80, 0x3e, 0x83, 0x6e, 0x9c, 0xc4, 0x2c, 0xc6, 0x4b, +- 0xd5, 0x7e, 0xab, 0x2b, 0xca, 0xee, 0x28, 0xab, 0xec, 0x3c, 0xfa, 0x02, 0xc4, 0x3d, 0xc8, 0xd9, +- 0x2e, 0x90, 0x75, 0x81, 0xec, 0x71, 0x87, 0x18, 0x5e, 0x85, 0xf5, 0xa0, 0xa3, 0x61, 0xe3, 0xc0, +- 0x7a, 0x74, 0xa8, 0x5d, 0xce, 0x87, 0x88, 0x9a, 0x4d, 0xdd, 0x63, 0xae, 0x7d, 0x7b, 0x3b, 0x30, +- 0xdf, 0x15, 0x54, 0xb3, 0xa9, 0x67, 0x96, 0xbc, 0xb3, 0x00, 0xbd, 0x83, 0x9e, 0xc6, 0xc9, 0x5f, +- 0x04, 0xeb, 0xb1, 0x60, 0xed, 0x3b, 0xf2, 0xb9, 0x70, 0x8a, 0xe7, 0xc2, 0x39, 0x2b, 0x9e, 0x0b, +- 0xb7, 0xc5, 0x69, 0xaf, 0xfe, 0x1a, 0x18, 0x5e, 0xa7, 0xe4, 0xe2, 0x5e, 0xf4, 0x1d, 0xf4, 0x12, +- 0x72, 0xc9, 0xfc, 0x72, 0x43, 0xa8, 0xd5, 0x78, 0xd0, 0x4e, 0x75, 0x79, 0x58, 0xb5, 0x9e, 0xe8, +- 0x35, 0x80, 0xc6, 0xd1, 0x7c, 0x10, 0x87, 0x16, 0xc1, 0x85, 0x88, 0xb2, 0x34, 0x92, 0xd6, 0xc3, +- 0x84, 0xf0, 0x30, 0x4d, 0xc8, 0x04, 0x6c, 0x7d, 0x85, 0x2a, 0xbe, 0x72, 0x9b, 0xda, 0xe2, 0xb2, +- 0x9e, 0x57, 0xdb, 0x54, 0x45, 0xab, 0xbd, 0xfa, 0xcf, 0xdd, 0x86, 0xf7, 0xdc, 0xed, 0xef, 0xe1, +- 0xd3, 0x9d, 0xdd, 0xde, 0xe3, 0x2f, 0xe5, 0x99, 0x42, 0xde, 0x50, 0x5b, 0xf6, 0x5d, 0xa2, 0x42, +- 0x63, 0x31, 0x88, 0x39, 0xa1, 0xeb, 0x25, 0xa3, 0x7e, 0x84, 0x69, 0x64, 0x1d, 0x0d, 0x8d, 0x93, +- 0x23, 0x39, 0x88, 0x9e, 0xb4, 0xbf, 0xc5, 0x34, 0x42, 0xcf, 0xa0, 0x85, 0xb3, 0x4c, 0x42, 0x3a, +- 0x02, 0xd2, 0xc4, 0x59, 0xc6, 0x5d, 0xee, 0x8f, 0xd7, 0x5b, 0xdb, 0xb8, 0xd9, 0xda, 0xc6, 0xdf, +- 0x5b, 0xdb, 0xb8, 0xba, 0xb3, 0x6b, 0x37, 0x77, 0x76, 0xed, 0x8f, 0x3b, 0xbb, 0xf6, 0xd3, 0x57, +- 0x61, 0xcc, 0xa2, 0xf5, 0xdc, 0x59, 0xa4, 0xab, 0xb1, 0xfe, 0x23, 0xab, 0x8e, 0xf2, 0x6f, 0xba, +- 0xff, 0x1f, 0x9e, 0x37, 0x84, 0xfd, 0xcb, 0x7f, 0x03, 0x00, 0x00, 0xff, 0xff, 0x2b, 0x1a, 0xb9, +- 0x2e, 0xa2, 0x07, 0x00, 0x00, ++ // 829 bytes of a gzipped FileDescriptorProto ++ 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xa4, 0x55, 0x4f, 0x6f, 0xe3, 0x44, ++ 0x14, 0x8f, 0xc9, 0x6e, 0x93, 0x3c, 0x37, 0xc9, 0xee, 0x14, 0x21, 0x6f, 0x96, 0x75, 0xb2, 0xe1, ++ 0x8f, 0x2a, 0x0e, 0x8e, 0xb4, 0x1c, 0x10, 0x97, 0x45, 0xeb, 0x04, 0x68, 0xa4, 0x0a, 0x81, 0x5b, ++ 0xf5, 0xc0, 0xc5, 0x9a, 0xc4, 0x53, 0xdb, 0x22, 0xb1, 0x2d, 0xcf, 0x24, 0x94, 0x0f, 0xc0, 0xbd, ++ 0x57, 0xae, 0x7c, 0x9a, 0x1e, 0x7b, 0x44, 0x1c, 0x0a, 0x4a, 0xbf, 0x08, 0x9a, 0x3f, 0xb6, 0x27, ++ 0x09, 0x95, 0x8a, 0xb8, 0x8d, 0xdf, 0xfb, 0xbd, 0xdf, 0xfb, 0xbd, 0x37, 0xef, 0x8d, 0xe1, 0x43, ++ 0x46, 0x92, 0x80, 0xe4, 0xcb, 0x38, 0x61, 0x23, 0xca, 0x30, 0x23, 0x23, 0xf6, 0x4b, 0x46, 0xa8, ++ 0x93, 0xe5, 0x29, 0x4b, 0xd1, 0xb3, 0xca, 0xeb, 0x08, 0x6f, 0xef, 0xfd, 0x30, 0x0d, 0x53, 0xe1, ++ 0x1c, 0xf1, 0x93, 0xc4, 0xf5, 0x5e, 0x6a, 0x2c, 0x78, 0x36, 0x8f, 0x75, 0x92, 0x9e, 0x9e, 0x42, ++ 0xd8, 0xb7, 0xbc, 0x83, 0x3d, 0xef, 0x1a, 0x2f, 0xe2, 0x00, 0xb3, 0x34, 0x57, 0x88, 0x57, 0x7b, ++ 0x88, 0x0c, 0xe7, 0x78, 0x59, 0x10, 0xd8, 0x9a, 0x7b, 0x4d, 0x72, 0x1a, 0xa7, 0xc9, 0x56, 0x82, ++ 0x7e, 0x98, 0xa6, 0xe1, 0x82, 0x8c, 0xc4, 0xd7, 0x6c, 0x75, 0x39, 0x62, 0xf1, 0x92, 0x50, 0x86, ++ 0x97, 0x99, 0x04, 0x0c, 0xff, 0x34, 0xa0, 0xfd, 0xce, 0x1d, 0x4f, 0x3d, 0x42, 0xb3, 0x34, 0xa1, ++ 0x84, 0xa2, 0x31, 0x98, 0x01, 0x59, 0xc4, 0x6b, 0x92, 0xfb, 0xec, 0x8a, 0x5a, 0xc6, 0xa0, 0x7e, ++ 0x6c, 0xbe, 0x19, 0x3a, 0x5a, 0x33, 0x78, 0x91, 0x4e, 0x11, 0x30, 0x91, 0xd8, 0xf3, 0x2b, 0x0f, ++ 0x82, 0xe2, 0x48, 0xd1, 0x5b, 0x68, 0x91, 0x24, 0xf0, 0x67, 0x8b, 0x74, 0xfe, 0x93, 0xf5, 0xde, ++ 0xc0, 0x38, 0x36, 0xdf, 0xbc, 0x7e, 0x90, 0xe2, 0xeb, 0x24, 0x70, 0x39, 0xd0, 0x6b, 0x12, 0x75, ++ 0x42, 0x13, 0x30, 0x67, 0x24, 0x8c, 0x13, 0xc5, 0x50, 0x17, 0x0c, 0x1f, 0x3d, 0xc8, 0xe0, 0x72, ++ 0xac, 0xe4, 0x80, 0x59, 0x79, 0x1e, 0xfe, 0x6a, 0x40, 0xe7, 0xa2, 0x68, 0x28, 0x9d, 0x26, 0x97, ++ 0x29, 0x1a, 0x43, 0xbb, 0x6c, 0xb1, 0x4f, 0x09, 0xb3, 0x0c, 0x41, 0x6d, 0xeb, 0xd4, 0xb2, 0x81, ++ 0x65, 0xe0, 0x19, 0x61, 0xde, 0xe1, 0x5a, 0xfb, 0x42, 0x0e, 0x1c, 0x2d, 0x30, 0x65, 0x7e, 0x44, ++ 0xe2, 0x30, 0x62, 0xfe, 0x3c, 0xc2, 0x49, 0x48, 0x02, 0x51, 0x67, 0xdd, 0x7b, 0xce, 0x5d, 0x27, ++ 0xc2, 0x33, 0x96, 0x8e, 0xe1, 0x6f, 0x06, 0x1c, 0x8d, 0xb9, 0xce, 0x84, 0xae, 0xe8, 0xf7, 0xe2, ++ 0xfe, 0x84, 0x18, 0x0f, 0x9e, 0xcd, 0x0b, 0xb3, 0x2f, 0xef, 0x55, 0xe9, 0x79, 0xbd, 0xaf, 0x67, ++ 0x87, 0xc0, 0x7d, 0x72, 0x73, 0xd7, 0xaf, 0x79, 0xdd, 0xf9, 0xb6, 0xf9, 0x3f, 0x6b, 0xa3, 0xf0, ++ 0x7c, 0xeb, 0xfe, 0x85, 0xb0, 0x6f, 0xa0, 0xc3, 0xfb, 0xeb, 0xe7, 0x85, 0x55, 0xc9, 0xea, 0x3b, ++ 0xbb, 0x3b, 0xe1, 0x6c, 0x05, 0x7b, 0x6d, 0x1e, 0x56, 0xcd, 0xd2, 0x07, 0x70, 0x20, 0x75, 0xa8, ++ 0xfc, 0xea, 0x6b, 0x18, 0x41, 0xe3, 0x42, 0x4e, 0x2b, 0x7a, 0x07, 0xad, 0xb2, 0x04, 0x95, 0xe5, ++ 0x95, 0x9e, 0x45, 0x4d, 0x75, 0x55, 0xbe, 0x2a, 0xbc, 0x8a, 0x42, 0x3d, 0x68, 0xd2, 0xf4, 0x92, ++ 0xfd, 0x8c, 0x73, 0x22, 0xf2, 0xb4, 0xbc, 0xf2, 0x7b, 0xf8, 0x7b, 0x03, 0x9e, 0x9e, 0x71, 0xa1, ++ 0xe8, 0x4b, 0x68, 0x28, 0x2e, 0x95, 0xe6, 0xc5, 0x7e, 0x31, 0x4a, 0x94, 0x4a, 0x51, 0xe0, 0xd1, ++ 0xa7, 0xd0, 0x9c, 0x47, 0x38, 0x4e, 0xfc, 0x58, 0x36, 0xb2, 0xe5, 0x9a, 0x9b, 0xbb, 0x7e, 0x63, ++ 0xcc, 0x6d, 0xd3, 0x89, 0xd7, 0x10, 0xce, 0x69, 0x80, 0x3e, 0x81, 0x4e, 0x9c, 0xc4, 0x2c, 0xc6, ++ 0x0b, 0xd5, 0x7e, 0xab, 0x23, 0xca, 0x6e, 0x2b, 0xab, 0xec, 0x3c, 0xfa, 0x0c, 0xc4, 0x3d, 0xc8, ++ 0xd9, 0x2e, 0x90, 0x75, 0x81, 0xec, 0x72, 0x87, 0x18, 0x5e, 0x85, 0xf5, 0xa0, 0xad, 0x61, 0xe3, ++ 0xc0, 0x7a, 0xb2, 0xaf, 0x5d, 0xce, 0x87, 0x88, 0x9a, 0x4e, 0xdc, 0x23, 0xae, 0x7d, 0x73, 0xd7, ++ 0x37, 0x4f, 0x0b, 0xaa, 0xe9, 0xc4, 0x33, 0x4b, 0xde, 0x69, 0x80, 0x4e, 0xa1, 0xab, 0x71, 0xf2, ++ 0x17, 0xc1, 0x7a, 0x2a, 0x58, 0x7b, 0x8e, 0x7c, 0x2e, 0x9c, 0xe2, 0xb9, 0x70, 0xce, 0x8b, 0xe7, ++ 0xc2, 0x6d, 0x72, 0xda, 0xeb, 0xbf, 0xfa, 0x86, 0xd7, 0x2e, 0xb9, 0xb8, 0x17, 0x7d, 0x0b, 0xdd, ++ 0x84, 0x5c, 0x31, 0xbf, 0xdc, 0x10, 0x6a, 0x1d, 0x3c, 0x6a, 0xa7, 0x3a, 0x3c, 0xac, 0x5a, 0x4f, ++ 0xf4, 0x16, 0x40, 0xe3, 0x68, 0x3c, 0x8a, 0x43, 0x8b, 0xe0, 0x42, 0x44, 0x59, 0x1a, 0x49, 0xf3, ++ 0x71, 0x42, 0x78, 0x98, 0x26, 0x64, 0x0c, 0xb6, 0xbe, 0x42, 0x15, 0x5f, 0xb9, 0x4d, 0x2d, 0x71, ++ 0x59, 0x2f, 0xab, 0x6d, 0xaa, 0xa2, 0xd5, 0x5e, 0xfd, 0xeb, 0x6e, 0xc3, 0xff, 0xdc, 0xed, 0xef, ++ 0xe0, 0xe3, 0xad, 0xdd, 0xde, 0xe1, 0x2f, 0xe5, 0x99, 0x42, 0xde, 0x40, 0x5b, 0xf6, 0x6d, 0xa2, ++ 0x42, 0x63, 0x31, 0x88, 0x39, 0xa1, 0xab, 0x05, 0xa3, 0x7e, 0x84, 0x69, 0x64, 0x1d, 0x0e, 0x8c, ++ 0xe3, 0x43, 0x39, 0x88, 0x9e, 0xb4, 0x9f, 0x60, 0x1a, 0xa1, 0x17, 0xd0, 0xc4, 0x59, 0x26, 0x21, ++ 0x6d, 0x01, 0x69, 0xe0, 0x2c, 0x13, 0xae, 0xaf, 0xa0, 0xc9, 0x87, 0x28, 0x5d, 0x31, 0x6a, 0x75, ++ 0xf7, 0x37, 0x58, 0xbc, 0xd4, 0xe7, 0x0a, 0xc0, 0x9f, 0x17, 0x55, 0x5e, 0x19, 0xe4, 0xfe, 0x70, ++ 0xb3, 0xb1, 0x8d, 0xdb, 0x8d, 0x6d, 0xfc, 0xbd, 0xb1, 0x8d, 0xeb, 0x7b, 0xbb, 0x76, 0x7b, 0x6f, ++ 0xd7, 0xfe, 0xb8, 0xb7, 0x6b, 0x3f, 0x7e, 0x11, 0xc6, 0x2c, 0x5a, 0xcd, 0x9c, 0x79, 0xba, 0x1c, ++ 0xe9, 0x7f, 0xc2, 0xea, 0x28, 0x7f, 0xc7, 0xbb, 0x3f, 0xf2, 0xd9, 0x81, 0xb0, 0x7f, 0xfe, 0x4f, ++ 0x00, 0x00, 0x00, 0xff, 0xff, 0x8e, 0x97, 0xe8, 0x4f, 0xe3, 0x07, 0x00, 0x00, + } + + func (m *ABCIResponses) Marshal() (dAtA []byte, err error) { +@@ -768,6 +778,16 @@ func (m *State) MarshalToSizedBuffer(dAtA []byte) (int, error) { + _ = i + var l int + _ = l ++ { ++ size, err := m.Timeouts.MarshalToSizedBuffer(dAtA[:i]) ++ if err != nil { ++ return 0, err ++ } ++ i -= size ++ i = encodeVarintTypes(dAtA, i, uint64(size)) ++ } ++ i-- ++ dAtA[i] = 0x7a + if m.InitialHeight != 0 { + i = encodeVarintTypes(dAtA, i, uint64(m.InitialHeight)) + i-- +@@ -843,12 +863,12 @@ func (m *State) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i-- + dAtA[i] = 0x32 + } +- n11, err11 := github_com_gogo_protobuf_types.StdTimeMarshalTo(m.LastBlockTime, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdTime(m.LastBlockTime):]) +- if err11 != nil { +- return 0, err11 ++ n12, err12 := github_com_gogo_protobuf_types.StdTimeMarshalTo(m.LastBlockTime, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdTime(m.LastBlockTime):]) ++ if err12 != nil { ++ return 0, err12 + } +- i -= n11 +- i = encodeVarintTypes(dAtA, i, uint64(n11)) ++ i -= n12 ++ i = encodeVarintTypes(dAtA, i, uint64(n12)) + i-- + dAtA[i] = 0x2a + { +@@ -1031,6 +1051,8 @@ func (m *State) Size() (n int) { + if m.InitialHeight != 0 { + n += 1 + sovTypes(uint64(m.InitialHeight)) + } ++ l = m.Timeouts.Size() ++ n += 1 + l + sovTypes(uint64(l)) + return n + } + +@@ -2068,6 +2090,39 @@ func (m *State) Unmarshal(dAtA []byte) error { + break + } + } ++ case 15: ++ if wireType != 2 { ++ return fmt.Errorf("proto: wrong wireType = %d for field Timeouts", wireType) ++ } ++ var msglen int ++ for shift := uint(0); ; shift += 7 { ++ if shift >= 64 { ++ return ErrIntOverflowTypes ++ } ++ if iNdEx >= l { ++ return io.ErrUnexpectedEOF ++ } ++ b := dAtA[iNdEx] ++ iNdEx++ ++ msglen |= int(b&0x7F) << shift ++ if b < 0x80 { ++ break ++ } ++ } ++ if msglen < 0 { ++ return ErrInvalidLengthTypes ++ } ++ postIndex := iNdEx + msglen ++ if postIndex < 0 { ++ return ErrInvalidLengthTypes ++ } ++ if postIndex > l { ++ return io.ErrUnexpectedEOF ++ } ++ if err := m.Timeouts.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { ++ return err ++ } ++ iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipTypes(dAtA[iNdEx:]) diff --git a/patches/proto/tendermint/store/types.pb.go.patch b/patches/proto/tendermint/store/types.pb.go.patch new file mode 100644 index 00000000000..0f3055dc8ed --- /dev/null +++ b/patches/proto/tendermint/store/types.pb.go.patch @@ -0,0 +1,340 @@ +diff --git a/proto/tendermint/store/types.pb.go b/proto/tendermint/store/types.pb.go +index 384fbe52a..f89978654 100644 +--- a/proto/tendermint/store/types.pb.go ++++ b/proto/tendermint/store/types.pb.go +@@ -74,25 +74,102 @@ func (m *BlockStoreState) GetHeight() int64 { + return 0 + } + ++// TxInfo describes the location of a tx inside a committed block ++// as well as the result of executing the transaction and the error log output. ++type TxInfo struct { ++ Height int64 `protobuf:"varint,1,opt,name=height,proto3" json:"height,omitempty"` ++ Index uint32 `protobuf:"varint,2,opt,name=index,proto3" json:"index,omitempty"` ++ // The response code of executing the tx. 0 means ++ // successfully executed, all others are error codes. ++ Code uint32 `protobuf:"varint,3,opt,name=code,proto3" json:"code,omitempty"` ++ // The error log output generated if the transaction execution fails. ++ Error string `protobuf:"bytes,4,opt,name=error,proto3" json:"error,omitempty"` ++} ++ ++func (m *TxInfo) Reset() { *m = TxInfo{} } ++func (m *TxInfo) String() string { return proto.CompactTextString(m) } ++func (*TxInfo) ProtoMessage() {} ++func (*TxInfo) Descriptor() ([]byte, []int) { ++ return fileDescriptor_ff9e53a0a74267f7, []int{1} ++} ++func (m *TxInfo) XXX_Unmarshal(b []byte) error { ++ return m.Unmarshal(b) ++} ++func (m *TxInfo) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { ++ if deterministic { ++ return xxx_messageInfo_TxInfo.Marshal(b, m, deterministic) ++ } else { ++ b = b[:cap(b)] ++ n, err := m.MarshalToSizedBuffer(b) ++ if err != nil { ++ return nil, err ++ } ++ return b[:n], nil ++ } ++} ++func (m *TxInfo) XXX_Merge(src proto.Message) { ++ xxx_messageInfo_TxInfo.Merge(m, src) ++} ++func (m *TxInfo) XXX_Size() int { ++ return m.Size() ++} ++func (m *TxInfo) XXX_DiscardUnknown() { ++ xxx_messageInfo_TxInfo.DiscardUnknown(m) ++} ++ ++var xxx_messageInfo_TxInfo proto.InternalMessageInfo ++ ++func (m *TxInfo) GetHeight() int64 { ++ if m != nil { ++ return m.Height ++ } ++ return 0 ++} ++ ++func (m *TxInfo) GetIndex() uint32 { ++ if m != nil { ++ return m.Index ++ } ++ return 0 ++} ++ ++func (m *TxInfo) GetCode() uint32 { ++ if m != nil { ++ return m.Code ++ } ++ return 0 ++} ++ ++func (m *TxInfo) GetError() string { ++ if m != nil { ++ return m.Error ++ } ++ return "" ++} ++ + func init() { + proto.RegisterType((*BlockStoreState)(nil), "tendermint.store.BlockStoreState") ++ proto.RegisterType((*TxInfo)(nil), "tendermint.store.TxInfo") + } + + func init() { proto.RegisterFile("tendermint/store/types.proto", fileDescriptor_ff9e53a0a74267f7) } + + var fileDescriptor_ff9e53a0a74267f7 = []byte{ +- // 165 bytes of a gzipped FileDescriptorProto ++ // 223 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x92, 0x29, 0x49, 0xcd, 0x4b, + 0x49, 0x2d, 0xca, 0xcd, 0xcc, 0x2b, 0xd1, 0x2f, 0x2e, 0xc9, 0x2f, 0x4a, 0xd5, 0x2f, 0xa9, 0x2c, + 0x48, 0x2d, 0xd6, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, 0x12, 0x40, 0xc8, 0xea, 0x81, 0x65, 0x95, + 0x6c, 0xb9, 0xf8, 0x9d, 0x72, 0xf2, 0x93, 0xb3, 0x83, 0x41, 0xbc, 0xe0, 0x92, 0xc4, 0x92, 0x54, + 0x21, 0x21, 0x2e, 0x96, 0xa4, 0xc4, 0xe2, 0x54, 0x09, 0x46, 0x05, 0x46, 0x0d, 0xe6, 0x20, 0x30, + 0x5b, 0x48, 0x8c, 0x8b, 0x2d, 0x23, 0x35, 0x33, 0x3d, 0xa3, 0x44, 0x82, 0x09, 0x2c, 0x0a, 0xe5, +- 0x39, 0x05, 0x9e, 0x78, 0x24, 0xc7, 0x78, 0xe1, 0x91, 0x1c, 0xe3, 0x83, 0x47, 0x72, 0x8c, 0x13, +- 0x1e, 0xcb, 0x31, 0x5c, 0x78, 0x2c, 0xc7, 0x70, 0xe3, 0xb1, 0x1c, 0x43, 0x94, 0x79, 0x7a, 0x66, +- 0x49, 0x46, 0x69, 0x92, 0x5e, 0x72, 0x7e, 0xae, 0x3e, 0x92, 0x9b, 0x90, 0x98, 0x60, 0x27, 0xe9, +- 0xa3, 0xbb, 0x37, 0x89, 0x0d, 0x2c, 0x6e, 0x0c, 0x08, 0x00, 0x00, 0xff, 0xff, 0xef, 0xa6, 0x30, +- 0x63, 0xca, 0x00, 0x00, 0x00, ++ 0x29, 0x25, 0x70, 0xb1, 0x85, 0x54, 0x78, 0xe6, 0xa5, 0xe5, 0x23, 0xa9, 0x60, 0x44, 0x56, 0x21, ++ 0x24, 0xc2, 0xc5, 0x9a, 0x99, 0x97, 0x92, 0x5a, 0x01, 0xd6, 0xc8, 0x1b, 0x04, 0xe1, 0x80, 0xec, ++ 0x48, 0xce, 0x4f, 0x49, 0x95, 0x60, 0x06, 0x0b, 0x82, 0xd9, 0x20, 0x95, 0xa9, 0x45, 0x45, 0xf9, ++ 0x45, 0x12, 0x2c, 0x0a, 0x8c, 0x1a, 0x9c, 0x41, 0x10, 0x8e, 0x53, 0xe0, 0x89, 0x47, 0x72, 0x8c, ++ 0x17, 0x1e, 0xc9, 0x31, 0x3e, 0x78, 0x24, 0xc7, 0x38, 0xe1, 0xb1, 0x1c, 0xc3, 0x85, 0xc7, 0x72, ++ 0x0c, 0x37, 0x1e, 0xcb, 0x31, 0x44, 0x99, 0xa7, 0x67, 0x96, 0x64, 0x94, 0x26, 0xe9, 0x25, 0xe7, ++ 0xe7, 0xea, 0x23, 0xf9, 0x1a, 0x89, 0x09, 0xf6, 0xb4, 0x3e, 0x7a, 0x88, 0x24, 0xb1, 0x81, 0xc5, ++ 0x8d, 0x01, 0x01, 0x00, 0x00, 0xff, 0xff, 0xb0, 0x62, 0x47, 0xdb, 0x2c, 0x01, 0x00, 0x00, + } + + func (m *BlockStoreState) Marshal() (dAtA []byte, err error) { +@@ -128,6 +205,51 @@ func (m *BlockStoreState) MarshalToSizedBuffer(dAtA []byte) (int, error) { + return len(dAtA) - i, nil + } + ++func (m *TxInfo) Marshal() (dAtA []byte, err error) { ++ size := m.Size() ++ dAtA = make([]byte, size) ++ n, err := m.MarshalToSizedBuffer(dAtA[:size]) ++ if err != nil { ++ return nil, err ++ } ++ return dAtA[:n], nil ++} ++ ++func (m *TxInfo) MarshalTo(dAtA []byte) (int, error) { ++ size := m.Size() ++ return m.MarshalToSizedBuffer(dAtA[:size]) ++} ++ ++func (m *TxInfo) MarshalToSizedBuffer(dAtA []byte) (int, error) { ++ i := len(dAtA) ++ _ = i ++ var l int ++ _ = l ++ if len(m.Error) > 0 { ++ i -= len(m.Error) ++ copy(dAtA[i:], m.Error) ++ i = encodeVarintTypes(dAtA, i, uint64(len(m.Error))) ++ i-- ++ dAtA[i] = 0x22 ++ } ++ if m.Code != 0 { ++ i = encodeVarintTypes(dAtA, i, uint64(m.Code)) ++ i-- ++ dAtA[i] = 0x18 ++ } ++ if m.Index != 0 { ++ i = encodeVarintTypes(dAtA, i, uint64(m.Index)) ++ i-- ++ dAtA[i] = 0x10 ++ } ++ if m.Height != 0 { ++ i = encodeVarintTypes(dAtA, i, uint64(m.Height)) ++ i-- ++ dAtA[i] = 0x8 ++ } ++ return len(dAtA) - i, nil ++} ++ + func encodeVarintTypes(dAtA []byte, offset int, v uint64) int { + offset -= sovTypes(v) + base := offset +@@ -154,6 +276,28 @@ func (m *BlockStoreState) Size() (n int) { + return n + } + ++func (m *TxInfo) Size() (n int) { ++ if m == nil { ++ return 0 ++ } ++ var l int ++ _ = l ++ if m.Height != 0 { ++ n += 1 + sovTypes(uint64(m.Height)) ++ } ++ if m.Index != 0 { ++ n += 1 + sovTypes(uint64(m.Index)) ++ } ++ if m.Code != 0 { ++ n += 1 + sovTypes(uint64(m.Code)) ++ } ++ l = len(m.Error) ++ if l > 0 { ++ n += 1 + l + sovTypes(uint64(l)) ++ } ++ return n ++} ++ + func sovTypes(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 + } +@@ -248,6 +392,145 @@ func (m *BlockStoreState) Unmarshal(dAtA []byte) error { + } + return nil + } ++func (m *TxInfo) Unmarshal(dAtA []byte) error { ++ l := len(dAtA) ++ iNdEx := 0 ++ for iNdEx < l { ++ preIndex := iNdEx ++ var wire uint64 ++ for shift := uint(0); ; shift += 7 { ++ if shift >= 64 { ++ return ErrIntOverflowTypes ++ } ++ if iNdEx >= l { ++ return io.ErrUnexpectedEOF ++ } ++ b := dAtA[iNdEx] ++ iNdEx++ ++ wire |= uint64(b&0x7F) << shift ++ if b < 0x80 { ++ break ++ } ++ } ++ fieldNum := int32(wire >> 3) ++ wireType := int(wire & 0x7) ++ if wireType == 4 { ++ return fmt.Errorf("proto: TxInfo: wiretype end group for non-group") ++ } ++ if fieldNum <= 0 { ++ return fmt.Errorf("proto: TxInfo: illegal tag %d (wire type %d)", fieldNum, wire) ++ } ++ switch fieldNum { ++ case 1: ++ if wireType != 0 { ++ return fmt.Errorf("proto: wrong wireType = %d for field Height", wireType) ++ } ++ m.Height = 0 ++ for shift := uint(0); ; shift += 7 { ++ if shift >= 64 { ++ return ErrIntOverflowTypes ++ } ++ if iNdEx >= l { ++ return io.ErrUnexpectedEOF ++ } ++ b := dAtA[iNdEx] ++ iNdEx++ ++ m.Height |= int64(b&0x7F) << shift ++ if b < 0x80 { ++ break ++ } ++ } ++ case 2: ++ if wireType != 0 { ++ return fmt.Errorf("proto: wrong wireType = %d for field Index", wireType) ++ } ++ m.Index = 0 ++ for shift := uint(0); ; shift += 7 { ++ if shift >= 64 { ++ return ErrIntOverflowTypes ++ } ++ if iNdEx >= l { ++ return io.ErrUnexpectedEOF ++ } ++ b := dAtA[iNdEx] ++ iNdEx++ ++ m.Index |= uint32(b&0x7F) << shift ++ if b < 0x80 { ++ break ++ } ++ } ++ case 3: ++ if wireType != 0 { ++ return fmt.Errorf("proto: wrong wireType = %d for field Code", wireType) ++ } ++ m.Code = 0 ++ for shift := uint(0); ; shift += 7 { ++ if shift >= 64 { ++ return ErrIntOverflowTypes ++ } ++ if iNdEx >= l { ++ return io.ErrUnexpectedEOF ++ } ++ b := dAtA[iNdEx] ++ iNdEx++ ++ m.Code |= uint32(b&0x7F) << shift ++ if b < 0x80 { ++ break ++ } ++ } ++ case 4: ++ if wireType != 2 { ++ return fmt.Errorf("proto: wrong wireType = %d for field Error", wireType) ++ } ++ var stringLen uint64 ++ for shift := uint(0); ; shift += 7 { ++ if shift >= 64 { ++ return ErrIntOverflowTypes ++ } ++ if iNdEx >= l { ++ return io.ErrUnexpectedEOF ++ } ++ b := dAtA[iNdEx] ++ iNdEx++ ++ stringLen |= uint64(b&0x7F) << shift ++ if b < 0x80 { ++ break ++ } ++ } ++ intStringLen := int(stringLen) ++ if intStringLen < 0 { ++ return ErrInvalidLengthTypes ++ } ++ postIndex := iNdEx + intStringLen ++ if postIndex < 0 { ++ return ErrInvalidLengthTypes ++ } ++ if postIndex > l { ++ return io.ErrUnexpectedEOF ++ } ++ m.Error = string(dAtA[iNdEx:postIndex]) ++ iNdEx = postIndex ++ default: ++ iNdEx = preIndex ++ skippy, err := skipTypes(dAtA[iNdEx:]) ++ if err != nil { ++ return err ++ } ++ if (skippy < 0) || (iNdEx+skippy) < 0 { ++ return ErrInvalidLengthTypes ++ } ++ if (iNdEx + skippy) > l { ++ return io.ErrUnexpectedEOF ++ } ++ iNdEx += skippy ++ } ++ } ++ ++ if iNdEx > l { ++ return io.ErrUnexpectedEOF ++ } ++ return nil ++} + func skipTypes(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 diff --git a/patches/proto/tendermint/types/types.pb.go.patch b/patches/proto/tendermint/types/types.pb.go.patch new file mode 100644 index 00000000000..acb07a6c11b --- /dev/null +++ b/patches/proto/tendermint/types/types.pb.go.patch @@ -0,0 +1,6454 @@ +diff --git a/proto/tendermint/types/types.pb.go b/proto/tendermint/types/types.pb.go +index 6bf382394..3e778caf5 100644 +--- a/proto/tendermint/types/types.pb.go ++++ b/proto/tendermint/types/types.pb.go +@@ -415,12 +415,19 @@ func (m *Header) GetProposerAddress() []byte { + return nil + } + +-// Data contains the set of transactions included in the block ++// Data contains all the information needed for a consensus full node to ++// reconstruct an extended data square. + type Data struct { +- // Txs that will be applied by state @ block.Height+1. +- // NOTE: not all txs here are valid. We're just agreeing on the order first. +- // This means that block.AppHash does not include these txs. ++ // Txs that will be applied to state in block.Height + 1 because deferred execution. ++ // This means that the block.AppHash of this block does not include these txs. ++ // NOTE: not all txs here are valid. We're just agreeing on the order first. + Txs [][]byte `protobuf:"bytes,1,rep,name=txs,proto3" json:"txs,omitempty"` ++ // SquareSize is the number of rows or columns in the original data square. ++ SquareSize uint64 `protobuf:"varint,5,opt,name=square_size,json=squareSize,proto3" json:"square_size,omitempty"` ++ // Hash is the root of a binary Merkle tree where the leaves of the tree are ++ // the row and column roots of an extended data square. Hash is often referred ++ // to as the "data root". ++ Hash []byte `protobuf:"bytes,6,opt,name=hash,proto3" json:"hash,omitempty"` + } + + func (m *Data) Reset() { *m = Data{} } +@@ -463,6 +470,92 @@ func (m *Data) GetTxs() [][]byte { + return nil + } + ++func (m *Data) GetSquareSize() uint64 { ++ if m != nil { ++ return m.SquareSize ++ } ++ return 0 ++} ++ ++func (m *Data) GetHash() []byte { ++ if m != nil { ++ return m.Hash ++ } ++ return nil ++} ++ ++// Blob (named after binary large object) is a chunk of data submitted by a user ++// to be published to the Celestia blockchain. The data of a Blob is published ++// to a namespace and is encoded into shares based on the format specified by ++// share_version. ++type Blob struct { ++ NamespaceId []byte `protobuf:"bytes,1,opt,name=namespace_id,json=namespaceId,proto3" json:"namespace_id,omitempty"` ++ Data []byte `protobuf:"bytes,2,opt,name=data,proto3" json:"data,omitempty"` ++ ShareVersion uint32 `protobuf:"varint,3,opt,name=share_version,json=shareVersion,proto3" json:"share_version,omitempty"` ++ NamespaceVersion uint32 `protobuf:"varint,4,opt,name=namespace_version,json=namespaceVersion,proto3" json:"namespace_version,omitempty"` ++} ++ ++func (m *Blob) Reset() { *m = Blob{} } ++func (m *Blob) String() string { return proto.CompactTextString(m) } ++func (*Blob) ProtoMessage() {} ++func (*Blob) Descriptor() ([]byte, []int) { ++ return fileDescriptor_d3a6e55e2345de56, []int{5} ++} ++func (m *Blob) XXX_Unmarshal(b []byte) error { ++ return m.Unmarshal(b) ++} ++func (m *Blob) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { ++ if deterministic { ++ return xxx_messageInfo_Blob.Marshal(b, m, deterministic) ++ } else { ++ b = b[:cap(b)] ++ n, err := m.MarshalToSizedBuffer(b) ++ if err != nil { ++ return nil, err ++ } ++ return b[:n], nil ++ } ++} ++func (m *Blob) XXX_Merge(src proto.Message) { ++ xxx_messageInfo_Blob.Merge(m, src) ++} ++func (m *Blob) XXX_Size() int { ++ return m.Size() ++} ++func (m *Blob) XXX_DiscardUnknown() { ++ xxx_messageInfo_Blob.DiscardUnknown(m) ++} ++ ++var xxx_messageInfo_Blob proto.InternalMessageInfo ++ ++func (m *Blob) GetNamespaceId() []byte { ++ if m != nil { ++ return m.NamespaceId ++ } ++ return nil ++} ++ ++func (m *Blob) GetData() []byte { ++ if m != nil { ++ return m.Data ++ } ++ return nil ++} ++ ++func (m *Blob) GetShareVersion() uint32 { ++ if m != nil { ++ return m.ShareVersion ++ } ++ return 0 ++} ++ ++func (m *Blob) GetNamespaceVersion() uint32 { ++ if m != nil { ++ return m.NamespaceVersion ++ } ++ return 0 ++} ++ + // Vote represents a prevote, precommit, or commit vote from validators for + // consensus. + type Vote struct { +@@ -480,7 +573,7 @@ func (m *Vote) Reset() { *m = Vote{} } + func (m *Vote) String() string { return proto.CompactTextString(m) } + func (*Vote) ProtoMessage() {} + func (*Vote) Descriptor() ([]byte, []int) { +- return fileDescriptor_d3a6e55e2345de56, []int{5} ++ return fileDescriptor_d3a6e55e2345de56, []int{6} + } + func (m *Vote) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +@@ -577,7 +670,7 @@ func (m *Commit) Reset() { *m = Commit{} } + func (m *Commit) String() string { return proto.CompactTextString(m) } + func (*Commit) ProtoMessage() {} + func (*Commit) Descriptor() ([]byte, []int) { +- return fileDescriptor_d3a6e55e2345de56, []int{6} ++ return fileDescriptor_d3a6e55e2345de56, []int{7} + } + func (m *Commit) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +@@ -646,7 +739,7 @@ func (m *CommitSig) Reset() { *m = CommitSig{} } + func (m *CommitSig) String() string { return proto.CompactTextString(m) } + func (*CommitSig) ProtoMessage() {} + func (*CommitSig) Descriptor() ([]byte, []int) { +- return fileDescriptor_d3a6e55e2345de56, []int{7} ++ return fileDescriptor_d3a6e55e2345de56, []int{8} + } + func (m *CommitSig) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +@@ -717,7 +810,7 @@ func (m *Proposal) Reset() { *m = Proposal{} } + func (m *Proposal) String() string { return proto.CompactTextString(m) } + func (*Proposal) ProtoMessage() {} + func (*Proposal) Descriptor() ([]byte, []int) { +- return fileDescriptor_d3a6e55e2345de56, []int{8} ++ return fileDescriptor_d3a6e55e2345de56, []int{9} + } + func (m *Proposal) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +@@ -804,7 +897,7 @@ func (m *SignedHeader) Reset() { *m = SignedHeader{} } + func (m *SignedHeader) String() string { return proto.CompactTextString(m) } + func (*SignedHeader) ProtoMessage() {} + func (*SignedHeader) Descriptor() ([]byte, []int) { +- return fileDescriptor_d3a6e55e2345de56, []int{9} ++ return fileDescriptor_d3a6e55e2345de56, []int{10} + } + func (m *SignedHeader) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +@@ -856,7 +949,7 @@ func (m *LightBlock) Reset() { *m = LightBlock{} } + func (m *LightBlock) String() string { return proto.CompactTextString(m) } + func (*LightBlock) ProtoMessage() {} + func (*LightBlock) Descriptor() ([]byte, []int) { +- return fileDescriptor_d3a6e55e2345de56, []int{10} ++ return fileDescriptor_d3a6e55e2345de56, []int{11} + } + func (m *LightBlock) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +@@ -910,7 +1003,7 @@ func (m *BlockMeta) Reset() { *m = BlockMeta{} } + func (m *BlockMeta) String() string { return proto.CompactTextString(m) } + func (*BlockMeta) ProtoMessage() {} + func (*BlockMeta) Descriptor() ([]byte, []int) { +- return fileDescriptor_d3a6e55e2345de56, []int{11} ++ return fileDescriptor_d3a6e55e2345de56, []int{12} + } + func (m *BlockMeta) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +@@ -967,7 +1060,17 @@ func (m *BlockMeta) GetNumTxs() int64 { + return 0 + } + +-// TxProof represents a Merkle proof of the presence of a transaction in the Merkle tree. ++// TxProof represents a Merkle proof of the presence of a transaction in the ++// Merkle tree. ++// ++// Note: TxProof is not used in celestia-core because of modifications to the ++// data root. In a normal Cosmos chain, the data root is the root of a Merkle ++// tree of transactions in the block. However, in Celestia the data root is the ++// root of the row and column roots in the extended data square. See ++// https://github.com/celestiaorg/celestia-app/blob/852a229f11f0f269021b36f7621609f432bb858b/pkg/da/data_availability_header.go ++// for more details. Therefore, TxProof isn't sufficient to prove the existence ++// of a transaction in a Celestia block and ShareProof was defined instead. See ++// ShareProof for more details. + type TxProof struct { + RootHash []byte `protobuf:"bytes,1,opt,name=root_hash,json=rootHash,proto3" json:"root_hash,omitempty"` + Data []byte `protobuf:"bytes,2,opt,name=data,proto3" json:"data,omitempty"` +@@ -978,7 +1081,7 @@ func (m *TxProof) Reset() { *m = TxProof{} } + func (m *TxProof) String() string { return proto.CompactTextString(m) } + func (*TxProof) ProtoMessage() {} + func (*TxProof) Descriptor() ([]byte, []int) { +- return fileDescriptor_d3a6e55e2345de56, []int{12} ++ return fileDescriptor_d3a6e55e2345de56, []int{13} + } + func (m *TxProof) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +@@ -1028,360 +1131,505 @@ func (m *TxProof) GetProof() *crypto.Proof { + return nil + } + +-func init() { +- proto.RegisterEnum("tendermint.types.BlockIDFlag", BlockIDFlag_name, BlockIDFlag_value) +- proto.RegisterEnum("tendermint.types.SignedMsgType", SignedMsgType_name, SignedMsgType_value) +- proto.RegisterType((*PartSetHeader)(nil), "tendermint.types.PartSetHeader") +- proto.RegisterType((*Part)(nil), "tendermint.types.Part") +- proto.RegisterType((*BlockID)(nil), "tendermint.types.BlockID") +- proto.RegisterType((*Header)(nil), "tendermint.types.Header") +- proto.RegisterType((*Data)(nil), "tendermint.types.Data") +- proto.RegisterType((*Vote)(nil), "tendermint.types.Vote") +- proto.RegisterType((*Commit)(nil), "tendermint.types.Commit") +- proto.RegisterType((*CommitSig)(nil), "tendermint.types.CommitSig") +- proto.RegisterType((*Proposal)(nil), "tendermint.types.Proposal") +- proto.RegisterType((*SignedHeader)(nil), "tendermint.types.SignedHeader") +- proto.RegisterType((*LightBlock)(nil), "tendermint.types.LightBlock") +- proto.RegisterType((*BlockMeta)(nil), "tendermint.types.BlockMeta") +- proto.RegisterType((*TxProof)(nil), "tendermint.types.TxProof") ++// IndexWrapper adds index metadata to a transaction. This is used to track ++// transactions that pay for blobs, and where the blobs start in the square. ++type IndexWrapper struct { ++ Tx []byte `protobuf:"bytes,1,opt,name=tx,proto3" json:"tx,omitempty"` ++ ShareIndexes []uint32 `protobuf:"varint,2,rep,packed,name=share_indexes,json=shareIndexes,proto3" json:"share_indexes,omitempty"` ++ TypeId string `protobuf:"bytes,3,opt,name=type_id,json=typeId,proto3" json:"type_id,omitempty"` + } + +-func init() { proto.RegisterFile("tendermint/types/types.proto", fileDescriptor_d3a6e55e2345de56) } +- +-var fileDescriptor_d3a6e55e2345de56 = []byte{ +- // 1314 bytes of a gzipped FileDescriptorProto +- 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xbc, 0x57, 0x4f, 0x6f, 0x1b, 0x45, +- 0x14, 0xcf, 0xda, 0x9b, 0xd8, 0x7e, 0xb6, 0x13, 0x67, 0x95, 0xb6, 0xae, 0xdb, 0x38, 0x2b, 0x23, +- 0x20, 0x2d, 0x68, 0x53, 0x52, 0xc4, 0x9f, 0x03, 0x07, 0xdb, 0x49, 0x5b, 0xab, 0x89, 0x63, 0xd6, +- 0x6e, 0x11, 0x5c, 0x56, 0x6b, 0xef, 0xd4, 0x5e, 0xba, 0xde, 0x59, 0xed, 0x8c, 0x43, 0xd2, 0x4f, +- 0x80, 0x72, 0xea, 0x89, 0x5b, 0x4e, 0x70, 0xe0, 0xce, 0x17, 0x40, 0x9c, 0x7a, 0xec, 0x0d, 0x2e, +- 0x14, 0x94, 0x4a, 0x88, 0x8f, 0x81, 0xe6, 0x8f, 0xd7, 0xeb, 0x38, 0x86, 0xaa, 0xaa, 0xb8, 0x58, +- 0x3b, 0xef, 0xfd, 0xde, 0xcc, 0x7b, 0xbf, 0xf7, 0x9b, 0x3f, 0x86, 0xeb, 0x14, 0xf9, 0x0e, 0x0a, +- 0x87, 0xae, 0x4f, 0xb7, 0xe8, 0x71, 0x80, 0x88, 0xf8, 0x35, 0x82, 0x10, 0x53, 0xac, 0x15, 0x26, +- 0x5e, 0x83, 0xdb, 0x4b, 0x6b, 0x7d, 0xdc, 0xc7, 0xdc, 0xb9, 0xc5, 0xbe, 0x04, 0xae, 0xb4, 0xd1, +- 0xc7, 0xb8, 0xef, 0xa1, 0x2d, 0x3e, 0xea, 0x8e, 0x1e, 0x6d, 0x51, 0x77, 0x88, 0x08, 0xb5, 0x87, +- 0x81, 0x04, 0xac, 0xc7, 0x96, 0xe9, 0x85, 0xc7, 0x01, 0xc5, 0x0c, 0x8b, 0x1f, 0x49, 0x77, 0x39, +- 0xe6, 0x3e, 0x44, 0x21, 0x71, 0xb1, 0x1f, 0xcf, 0xa3, 0xa4, 0xcf, 0x64, 0x79, 0x68, 0x7b, 0xae, +- 0x63, 0x53, 0x1c, 0x0a, 0x44, 0xe5, 0x53, 0xc8, 0xb7, 0xec, 0x90, 0xb6, 0x11, 0xbd, 0x87, 0x6c, +- 0x07, 0x85, 0xda, 0x1a, 0x2c, 0x52, 0x4c, 0x6d, 0xaf, 0xa8, 0xe8, 0xca, 0x66, 0xde, 0x14, 0x03, +- 0x4d, 0x03, 0x75, 0x60, 0x93, 0x41, 0x31, 0xa1, 0x2b, 0x9b, 0x39, 0x93, 0x7f, 0x57, 0x06, 0xa0, +- 0xb2, 0x50, 0x16, 0xe1, 0xfa, 0x0e, 0x3a, 0x1a, 0x47, 0xf0, 0x01, 0xb3, 0x76, 0x8f, 0x29, 0x22, +- 0x32, 0x44, 0x0c, 0xb4, 0x0f, 0x61, 0x91, 0xe7, 0x5f, 0x4c, 0xea, 0xca, 0x66, 0x76, 0xbb, 0x68, +- 0xc4, 0x88, 0x12, 0xf5, 0x19, 0x2d, 0xe6, 0xaf, 0xa9, 0xcf, 0x5e, 0x6c, 0x2c, 0x98, 0x02, 0x5c, +- 0xf1, 0x20, 0x55, 0xf3, 0x70, 0xef, 0x71, 0x63, 0x27, 0x4a, 0x44, 0x99, 0x24, 0xa2, 0xed, 0xc3, +- 0x4a, 0x60, 0x87, 0xd4, 0x22, 0x88, 0x5a, 0x03, 0x5e, 0x05, 0x5f, 0x34, 0xbb, 0xbd, 0x61, 0x9c, +- 0xef, 0x83, 0x31, 0x55, 0xac, 0x5c, 0x25, 0x1f, 0xc4, 0x8d, 0x95, 0xbf, 0x54, 0x58, 0x92, 0x64, +- 0x7c, 0x06, 0x29, 0x49, 0x2b, 0x5f, 0x30, 0xbb, 0xbd, 0x1e, 0x9f, 0x51, 0xba, 0x8c, 0x3a, 0xf6, +- 0x09, 0xf2, 0xc9, 0x88, 0xc8, 0xf9, 0xc6, 0x31, 0xda, 0x3b, 0x90, 0xee, 0x0d, 0x6c, 0xd7, 0xb7, +- 0x5c, 0x87, 0x67, 0x94, 0xa9, 0x65, 0xcf, 0x5e, 0x6c, 0xa4, 0xea, 0xcc, 0xd6, 0xd8, 0x31, 0x53, +- 0xdc, 0xd9, 0x70, 0xb4, 0xcb, 0xb0, 0x34, 0x40, 0x6e, 0x7f, 0x40, 0x39, 0x2d, 0x49, 0x53, 0x8e, +- 0xb4, 0x4f, 0x40, 0x65, 0x82, 0x28, 0xaa, 0x7c, 0xed, 0x92, 0x21, 0xd4, 0x62, 0x8c, 0xd5, 0x62, +- 0x74, 0xc6, 0x6a, 0xa9, 0xa5, 0xd9, 0xc2, 0x4f, 0xff, 0xd8, 0x50, 0x4c, 0x1e, 0xa1, 0xd5, 0x21, +- 0xef, 0xd9, 0x84, 0x5a, 0x5d, 0x46, 0x1b, 0x5b, 0x7e, 0x91, 0x4f, 0x71, 0x75, 0x96, 0x10, 0x49, +- 0xac, 0x4c, 0x3d, 0xcb, 0xa2, 0x84, 0xc9, 0xd1, 0x36, 0xa1, 0xc0, 0x27, 0xe9, 0xe1, 0xe1, 0xd0, +- 0xa5, 0x16, 0xe7, 0x7d, 0x89, 0xf3, 0xbe, 0xcc, 0xec, 0x75, 0x6e, 0xbe, 0xc7, 0x3a, 0x70, 0x0d, +- 0x32, 0x8e, 0x4d, 0x6d, 0x01, 0x49, 0x71, 0x48, 0x9a, 0x19, 0xb8, 0xf3, 0x5d, 0x58, 0x89, 0x54, +- 0x47, 0x04, 0x24, 0x2d, 0x66, 0x99, 0x98, 0x39, 0xf0, 0x16, 0xac, 0xf9, 0xe8, 0x88, 0x5a, 0xe7, +- 0xd1, 0x19, 0x8e, 0xd6, 0x98, 0xef, 0xe1, 0x74, 0xc4, 0xdb, 0xb0, 0xdc, 0x1b, 0x93, 0x2f, 0xb0, +- 0xc0, 0xb1, 0xf9, 0xc8, 0xca, 0x61, 0x57, 0x21, 0x6d, 0x07, 0x81, 0x00, 0x64, 0x39, 0x20, 0x65, +- 0x07, 0x01, 0x77, 0xdd, 0x84, 0x55, 0x5e, 0x63, 0x88, 0xc8, 0xc8, 0xa3, 0x72, 0x92, 0x1c, 0xc7, +- 0xac, 0x30, 0x87, 0x29, 0xec, 0x1c, 0xfb, 0x16, 0xe4, 0xd1, 0xa1, 0xeb, 0x20, 0xbf, 0x87, 0x04, +- 0x2e, 0xcf, 0x71, 0xb9, 0xb1, 0x91, 0x83, 0x6e, 0x40, 0x21, 0x08, 0x71, 0x80, 0x09, 0x0a, 0x2d, +- 0xdb, 0x71, 0x42, 0x44, 0x48, 0x71, 0x59, 0xcc, 0x37, 0xb6, 0x57, 0x85, 0xb9, 0x52, 0x04, 0x75, +- 0xc7, 0xa6, 0xb6, 0x56, 0x80, 0x24, 0x3d, 0x22, 0x45, 0x45, 0x4f, 0x6e, 0xe6, 0x4c, 0xf6, 0x59, +- 0xf9, 0x3b, 0x01, 0xea, 0x43, 0x4c, 0x91, 0x76, 0x1b, 0x54, 0xd6, 0x26, 0xae, 0xbe, 0xe5, 0x8b, +- 0xf4, 0xdc, 0x76, 0xfb, 0x3e, 0x72, 0xf6, 0x49, 0xbf, 0x73, 0x1c, 0x20, 0x93, 0x83, 0x63, 0x72, +- 0x4a, 0x4c, 0xc9, 0x69, 0x0d, 0x16, 0x43, 0x3c, 0xf2, 0x1d, 0xae, 0xb2, 0x45, 0x53, 0x0c, 0xb4, +- 0x5d, 0x48, 0x47, 0x2a, 0x51, 0xff, 0x4b, 0x25, 0x2b, 0x4c, 0x25, 0x4c, 0xc3, 0xd2, 0x60, 0xa6, +- 0xba, 0x52, 0x2c, 0x35, 0xc8, 0x44, 0x87, 0x97, 0x54, 0xdb, 0xab, 0x09, 0x76, 0x12, 0xa6, 0xbd, +- 0x07, 0xab, 0x51, 0xef, 0x23, 0xf2, 0x84, 0xe2, 0x0a, 0x91, 0x43, 0xb2, 0x37, 0x25, 0x2b, 0x4b, +- 0x1c, 0x40, 0x29, 0x5e, 0xd7, 0x44, 0x56, 0x0d, 0x7e, 0x12, 0x5d, 0x87, 0x0c, 0x71, 0xfb, 0xbe, +- 0x4d, 0x47, 0x21, 0x92, 0xca, 0x9b, 0x18, 0x2a, 0x3f, 0x2b, 0xb0, 0x24, 0x94, 0x1c, 0xe3, 0x4d, +- 0xb9, 0x98, 0xb7, 0xc4, 0x3c, 0xde, 0x92, 0xaf, 0xcf, 0x5b, 0x15, 0x20, 0x4a, 0x86, 0x14, 0x55, +- 0x3d, 0xb9, 0x99, 0xdd, 0xbe, 0x36, 0x3b, 0x91, 0x48, 0xb1, 0xed, 0xf6, 0xe5, 0x46, 0x8d, 0x05, +- 0x55, 0x7e, 0x57, 0x20, 0x13, 0xf9, 0xb5, 0x2a, 0xe4, 0xc7, 0x79, 0x59, 0x8f, 0x3c, 0xbb, 0x2f, +- 0xb5, 0xb3, 0x3e, 0x37, 0xb9, 0x3b, 0x9e, 0xdd, 0x37, 0xb3, 0x32, 0x1f, 0x36, 0xb8, 0xb8, 0x0f, +- 0x89, 0x39, 0x7d, 0x98, 0x6a, 0x7c, 0xf2, 0xf5, 0x1a, 0x3f, 0xd5, 0x22, 0xf5, 0x7c, 0x8b, 0x7e, +- 0x4a, 0x40, 0xba, 0xc5, 0xf7, 0x8e, 0xed, 0xfd, 0x1f, 0x3b, 0xe2, 0x1a, 0x64, 0x02, 0xec, 0x59, +- 0xc2, 0xa3, 0x72, 0x4f, 0x3a, 0xc0, 0x9e, 0x39, 0xd3, 0xf6, 0xc5, 0x37, 0xb4, 0x5d, 0x96, 0xde, +- 0x00, 0x6b, 0xa9, 0xf3, 0xac, 0x85, 0x90, 0x13, 0x54, 0xc8, 0xbb, 0xec, 0x16, 0xe3, 0x80, 0x5f, +- 0x8e, 0xca, 0xec, 0xdd, 0x2b, 0xd2, 0x16, 0x48, 0x53, 0xe2, 0x58, 0x84, 0x38, 0xfa, 0xe5, 0x75, +- 0x5a, 0x9c, 0x27, 0x4b, 0x53, 0xe2, 0x2a, 0xdf, 0x29, 0x00, 0x7b, 0x8c, 0x59, 0x5e, 0x2f, 0xbb, +- 0x85, 0x08, 0x4f, 0xc1, 0x9a, 0x5a, 0xb9, 0x3c, 0xaf, 0x69, 0x72, 0xfd, 0x1c, 0x89, 0xe7, 0x5d, +- 0x87, 0xfc, 0x44, 0x8c, 0x04, 0x8d, 0x93, 0xb9, 0x60, 0x92, 0xe8, 0x72, 0x68, 0x23, 0x6a, 0xe6, +- 0x0e, 0x63, 0xa3, 0xca, 0x2f, 0x0a, 0x64, 0x78, 0x4e, 0xfb, 0x88, 0xda, 0x53, 0x3d, 0x54, 0x5e, +- 0xbf, 0x87, 0xeb, 0x00, 0x62, 0x1a, 0xe2, 0x3e, 0x41, 0x52, 0x59, 0x19, 0x6e, 0x69, 0xbb, 0x4f, +- 0x90, 0xf6, 0x51, 0x44, 0x78, 0xf2, 0xdf, 0x09, 0x97, 0x5b, 0x7a, 0x4c, 0xfb, 0x15, 0x48, 0xf9, +- 0xa3, 0xa1, 0xc5, 0xae, 0x04, 0x55, 0xa8, 0xd5, 0x1f, 0x0d, 0x3b, 0x47, 0xa4, 0xf2, 0x35, 0xa4, +- 0x3a, 0x47, 0xfc, 0x79, 0xc4, 0x24, 0x1a, 0x62, 0x2c, 0xef, 0x64, 0xf1, 0x16, 0x4a, 0x33, 0x03, +- 0xbf, 0x82, 0x34, 0x50, 0xd9, 0xe5, 0x3b, 0x7e, 0xac, 0xb1, 0x6f, 0xcd, 0x78, 0xc5, 0x87, 0x97, +- 0x7c, 0x72, 0xdd, 0xfc, 0x55, 0x81, 0x6c, 0xec, 0x7c, 0xd0, 0x3e, 0x80, 0x4b, 0xb5, 0xbd, 0x83, +- 0xfa, 0x7d, 0xab, 0xb1, 0x63, 0xdd, 0xd9, 0xab, 0xde, 0xb5, 0x1e, 0x34, 0xef, 0x37, 0x0f, 0xbe, +- 0x68, 0x16, 0x16, 0x4a, 0x97, 0x4f, 0x4e, 0x75, 0x2d, 0x86, 0x7d, 0xe0, 0x3f, 0xf6, 0xf1, 0x37, +- 0xbe, 0xb6, 0x05, 0x6b, 0xd3, 0x21, 0xd5, 0x5a, 0x7b, 0xb7, 0xd9, 0x29, 0x28, 0xa5, 0x4b, 0x27, +- 0xa7, 0xfa, 0x6a, 0x2c, 0xa2, 0xda, 0x25, 0xc8, 0xa7, 0xb3, 0x01, 0xf5, 0x83, 0xfd, 0xfd, 0x46, +- 0xa7, 0x90, 0x98, 0x09, 0x90, 0x07, 0xf6, 0x0d, 0x58, 0x9d, 0x0e, 0x68, 0x36, 0xf6, 0x0a, 0xc9, +- 0x92, 0x76, 0x72, 0xaa, 0x2f, 0xc7, 0xd0, 0x4d, 0xd7, 0x2b, 0xa5, 0xbf, 0xfd, 0xbe, 0xbc, 0xf0, +- 0xe3, 0x0f, 0x65, 0x85, 0x55, 0x96, 0x9f, 0x3a, 0x23, 0xb4, 0xf7, 0xe1, 0x4a, 0xbb, 0x71, 0xb7, +- 0xb9, 0xbb, 0x63, 0xed, 0xb7, 0xef, 0x5a, 0x9d, 0x2f, 0x5b, 0xbb, 0xb1, 0xea, 0x56, 0x4e, 0x4e, +- 0xf5, 0xac, 0x2c, 0x69, 0x1e, 0xba, 0x65, 0xee, 0x3e, 0x3c, 0xe8, 0xec, 0x16, 0x14, 0x81, 0x6e, +- 0x85, 0xe8, 0x10, 0x53, 0xc4, 0xd1, 0xb7, 0xe0, 0xea, 0x05, 0xe8, 0xa8, 0xb0, 0xd5, 0x93, 0x53, +- 0x3d, 0xdf, 0x0a, 0x91, 0xd8, 0x3f, 0x3c, 0xc2, 0x80, 0xe2, 0x6c, 0xc4, 0x41, 0xeb, 0xa0, 0x5d, +- 0xdd, 0x2b, 0xe8, 0xa5, 0xc2, 0xc9, 0xa9, 0x9e, 0x1b, 0x1f, 0x86, 0x0c, 0x3f, 0xa9, 0xac, 0xf6, +- 0xf9, 0xb3, 0xb3, 0xb2, 0xf2, 0xfc, 0xac, 0xac, 0xfc, 0x79, 0x56, 0x56, 0x9e, 0xbe, 0x2c, 0x2f, +- 0x3c, 0x7f, 0x59, 0x5e, 0xf8, 0xed, 0x65, 0x79, 0xe1, 0xab, 0x8f, 0xfb, 0x2e, 0x1d, 0x8c, 0xba, +- 0x46, 0x0f, 0x0f, 0xb7, 0xe2, 0x7f, 0x09, 0x26, 0x9f, 0xe2, 0xaf, 0xc9, 0xf9, 0xbf, 0x0b, 0xdd, +- 0x25, 0x6e, 0xbf, 0xfd, 0x4f, 0x00, 0x00, 0x00, 0xff, 0xff, 0x4c, 0x78, 0x43, 0xdf, 0xef, 0x0c, +- 0x00, 0x00, ++func (m *IndexWrapper) Reset() { *m = IndexWrapper{} } ++func (m *IndexWrapper) String() string { return proto.CompactTextString(m) } ++func (*IndexWrapper) ProtoMessage() {} ++func (*IndexWrapper) Descriptor() ([]byte, []int) { ++ return fileDescriptor_d3a6e55e2345de56, []int{14} + } +- +-func (m *PartSetHeader) Marshal() (dAtA []byte, err error) { +- size := m.Size() +- dAtA = make([]byte, size) +- n, err := m.MarshalToSizedBuffer(dAtA[:size]) +- if err != nil { +- return nil, err ++func (m *IndexWrapper) XXX_Unmarshal(b []byte) error { ++ return m.Unmarshal(b) ++} ++func (m *IndexWrapper) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { ++ if deterministic { ++ return xxx_messageInfo_IndexWrapper.Marshal(b, m, deterministic) ++ } else { ++ b = b[:cap(b)] ++ n, err := m.MarshalToSizedBuffer(b) ++ if err != nil { ++ return nil, err ++ } ++ return b[:n], nil + } +- return dAtA[:n], nil + } +- +-func (m *PartSetHeader) MarshalTo(dAtA []byte) (int, error) { +- size := m.Size() +- return m.MarshalToSizedBuffer(dAtA[:size]) ++func (m *IndexWrapper) XXX_Merge(src proto.Message) { ++ xxx_messageInfo_IndexWrapper.Merge(m, src) ++} ++func (m *IndexWrapper) XXX_Size() int { ++ return m.Size() ++} ++func (m *IndexWrapper) XXX_DiscardUnknown() { ++ xxx_messageInfo_IndexWrapper.DiscardUnknown(m) + } + +-func (m *PartSetHeader) MarshalToSizedBuffer(dAtA []byte) (int, error) { +- i := len(dAtA) +- _ = i +- var l int +- _ = l +- if len(m.Hash) > 0 { +- i -= len(m.Hash) +- copy(dAtA[i:], m.Hash) +- i = encodeVarintTypes(dAtA, i, uint64(len(m.Hash))) +- i-- +- dAtA[i] = 0x12 ++var xxx_messageInfo_IndexWrapper proto.InternalMessageInfo ++ ++func (m *IndexWrapper) GetTx() []byte { ++ if m != nil { ++ return m.Tx + } +- if m.Total != 0 { +- i = encodeVarintTypes(dAtA, i, uint64(m.Total)) +- i-- +- dAtA[i] = 0x8 ++ return nil ++} ++ ++func (m *IndexWrapper) GetShareIndexes() []uint32 { ++ if m != nil { ++ return m.ShareIndexes + } +- return len(dAtA) - i, nil ++ return nil + } + +-func (m *Part) Marshal() (dAtA []byte, err error) { +- size := m.Size() +- dAtA = make([]byte, size) +- n, err := m.MarshalToSizedBuffer(dAtA[:size]) +- if err != nil { +- return nil, err ++func (m *IndexWrapper) GetTypeId() string { ++ if m != nil { ++ return m.TypeId + } +- return dAtA[:n], nil ++ return "" + } + +-func (m *Part) MarshalTo(dAtA []byte) (int, error) { +- size := m.Size() +- return m.MarshalToSizedBuffer(dAtA[:size]) ++// BlobTx wraps an encoded sdk.Tx with a second field to contain blobs of data. ++// The raw bytes of the blobs are not signed over, instead we verify each blob ++// using the relevant MsgPayForBlobs that is signed over in the encoded sdk.Tx. ++type BlobTx struct { ++ Tx []byte `protobuf:"bytes,1,opt,name=tx,proto3" json:"tx,omitempty"` ++ Blobs []*Blob `protobuf:"bytes,2,rep,name=blobs,proto3" json:"blobs,omitempty"` ++ TypeId string `protobuf:"bytes,3,opt,name=type_id,json=typeId,proto3" json:"type_id,omitempty"` + } + +-func (m *Part) MarshalToSizedBuffer(dAtA []byte) (int, error) { +- i := len(dAtA) +- _ = i +- var l int +- _ = l +- { +- size, err := m.Proof.MarshalToSizedBuffer(dAtA[:i]) ++func (m *BlobTx) Reset() { *m = BlobTx{} } ++func (m *BlobTx) String() string { return proto.CompactTextString(m) } ++func (*BlobTx) ProtoMessage() {} ++func (*BlobTx) Descriptor() ([]byte, []int) { ++ return fileDescriptor_d3a6e55e2345de56, []int{15} ++} ++func (m *BlobTx) XXX_Unmarshal(b []byte) error { ++ return m.Unmarshal(b) ++} ++func (m *BlobTx) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { ++ if deterministic { ++ return xxx_messageInfo_BlobTx.Marshal(b, m, deterministic) ++ } else { ++ b = b[:cap(b)] ++ n, err := m.MarshalToSizedBuffer(b) + if err != nil { +- return 0, err ++ return nil, err + } +- i -= size +- i = encodeVarintTypes(dAtA, i, uint64(size)) ++ return b[:n], nil + } +- i-- +- dAtA[i] = 0x1a +- if len(m.Bytes) > 0 { +- i -= len(m.Bytes) +- copy(dAtA[i:], m.Bytes) +- i = encodeVarintTypes(dAtA, i, uint64(len(m.Bytes))) +- i-- +- dAtA[i] = 0x12 ++} ++func (m *BlobTx) XXX_Merge(src proto.Message) { ++ xxx_messageInfo_BlobTx.Merge(m, src) ++} ++func (m *BlobTx) XXX_Size() int { ++ return m.Size() ++} ++func (m *BlobTx) XXX_DiscardUnknown() { ++ xxx_messageInfo_BlobTx.DiscardUnknown(m) ++} ++ ++var xxx_messageInfo_BlobTx proto.InternalMessageInfo ++ ++func (m *BlobTx) GetTx() []byte { ++ if m != nil { ++ return m.Tx + } +- if m.Index != 0 { +- i = encodeVarintTypes(dAtA, i, uint64(m.Index)) +- i-- +- dAtA[i] = 0x8 ++ return nil ++} ++ ++func (m *BlobTx) GetBlobs() []*Blob { ++ if m != nil { ++ return m.Blobs + } +- return len(dAtA) - i, nil ++ return nil + } + +-func (m *BlockID) Marshal() (dAtA []byte, err error) { +- size := m.Size() +- dAtA = make([]byte, size) +- n, err := m.MarshalToSizedBuffer(dAtA[:size]) +- if err != nil { +- return nil, err ++func (m *BlobTx) GetTypeId() string { ++ if m != nil { ++ return m.TypeId + } +- return dAtA[:n], nil ++ return "" + } + +-func (m *BlockID) MarshalTo(dAtA []byte) (int, error) { +- size := m.Size() +- return m.MarshalToSizedBuffer(dAtA[:size]) ++// ShareProof is an NMT proof that a set of shares exist in a set of rows and a ++// Merkle proof that those rows exist in a Merkle tree with a given data root. ++type ShareProof struct { ++ Data [][]byte `protobuf:"bytes,1,rep,name=data,proto3" json:"data,omitempty"` ++ ShareProofs []*NMTProof `protobuf:"bytes,2,rep,name=share_proofs,json=shareProofs,proto3" json:"share_proofs,omitempty"` ++ NamespaceId []byte `protobuf:"bytes,3,opt,name=namespace_id,json=namespaceId,proto3" json:"namespace_id,omitempty"` ++ RowProof *RowProof `protobuf:"bytes,4,opt,name=row_proof,json=rowProof,proto3" json:"row_proof,omitempty"` ++ NamespaceVersion uint32 `protobuf:"varint,5,opt,name=namespace_version,json=namespaceVersion,proto3" json:"namespace_version,omitempty"` + } + +-func (m *BlockID) MarshalToSizedBuffer(dAtA []byte) (int, error) { +- i := len(dAtA) +- _ = i +- var l int +- _ = l +- { +- size, err := m.PartSetHeader.MarshalToSizedBuffer(dAtA[:i]) ++func (m *ShareProof) Reset() { *m = ShareProof{} } ++func (m *ShareProof) String() string { return proto.CompactTextString(m) } ++func (*ShareProof) ProtoMessage() {} ++func (*ShareProof) Descriptor() ([]byte, []int) { ++ return fileDescriptor_d3a6e55e2345de56, []int{16} ++} ++func (m *ShareProof) XXX_Unmarshal(b []byte) error { ++ return m.Unmarshal(b) ++} ++func (m *ShareProof) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { ++ if deterministic { ++ return xxx_messageInfo_ShareProof.Marshal(b, m, deterministic) ++ } else { ++ b = b[:cap(b)] ++ n, err := m.MarshalToSizedBuffer(b) + if err != nil { +- return 0, err ++ return nil, err + } +- i -= size +- i = encodeVarintTypes(dAtA, i, uint64(size)) +- } +- i-- +- dAtA[i] = 0x12 +- if len(m.Hash) > 0 { +- i -= len(m.Hash) +- copy(dAtA[i:], m.Hash) +- i = encodeVarintTypes(dAtA, i, uint64(len(m.Hash))) +- i-- +- dAtA[i] = 0xa ++ return b[:n], nil + } +- return len(dAtA) - i, nil ++} ++func (m *ShareProof) XXX_Merge(src proto.Message) { ++ xxx_messageInfo_ShareProof.Merge(m, src) ++} ++func (m *ShareProof) XXX_Size() int { ++ return m.Size() ++} ++func (m *ShareProof) XXX_DiscardUnknown() { ++ xxx_messageInfo_ShareProof.DiscardUnknown(m) + } + +-func (m *Header) Marshal() (dAtA []byte, err error) { +- size := m.Size() +- dAtA = make([]byte, size) +- n, err := m.MarshalToSizedBuffer(dAtA[:size]) +- if err != nil { +- return nil, err ++var xxx_messageInfo_ShareProof proto.InternalMessageInfo ++ ++func (m *ShareProof) GetData() [][]byte { ++ if m != nil { ++ return m.Data + } +- return dAtA[:n], nil ++ return nil + } + +-func (m *Header) MarshalTo(dAtA []byte) (int, error) { +- size := m.Size() +- return m.MarshalToSizedBuffer(dAtA[:size]) ++func (m *ShareProof) GetShareProofs() []*NMTProof { ++ if m != nil { ++ return m.ShareProofs ++ } ++ return nil + } + +-func (m *Header) MarshalToSizedBuffer(dAtA []byte) (int, error) { +- i := len(dAtA) +- _ = i +- var l int +- _ = l +- if len(m.ProposerAddress) > 0 { +- i -= len(m.ProposerAddress) +- copy(dAtA[i:], m.ProposerAddress) +- i = encodeVarintTypes(dAtA, i, uint64(len(m.ProposerAddress))) +- i-- +- dAtA[i] = 0x72 ++func (m *ShareProof) GetNamespaceId() []byte { ++ if m != nil { ++ return m.NamespaceId + } +- if len(m.EvidenceHash) > 0 { +- i -= len(m.EvidenceHash) +- copy(dAtA[i:], m.EvidenceHash) +- i = encodeVarintTypes(dAtA, i, uint64(len(m.EvidenceHash))) +- i-- +- dAtA[i] = 0x6a ++ return nil ++} ++ ++func (m *ShareProof) GetRowProof() *RowProof { ++ if m != nil { ++ return m.RowProof + } +- if len(m.LastResultsHash) > 0 { +- i -= len(m.LastResultsHash) +- copy(dAtA[i:], m.LastResultsHash) +- i = encodeVarintTypes(dAtA, i, uint64(len(m.LastResultsHash))) +- i-- +- dAtA[i] = 0x62 ++ return nil ++} ++ ++func (m *ShareProof) GetNamespaceVersion() uint32 { ++ if m != nil { ++ return m.NamespaceVersion + } +- if len(m.AppHash) > 0 { +- i -= len(m.AppHash) +- copy(dAtA[i:], m.AppHash) +- i = encodeVarintTypes(dAtA, i, uint64(len(m.AppHash))) +- i-- +- dAtA[i] = 0x5a ++ return 0 ++} ++ ++// RowProof is a Merkle proof that a set of rows exist in a Merkle tree with a ++// given data root. ++type RowProof struct { ++ RowRoots [][]byte `protobuf:"bytes,1,rep,name=row_roots,json=rowRoots,proto3" json:"row_roots,omitempty"` ++ Proofs []*crypto.Proof `protobuf:"bytes,2,rep,name=proofs,proto3" json:"proofs,omitempty"` ++ Root []byte `protobuf:"bytes,3,opt,name=root,proto3" json:"root,omitempty"` ++ StartRow uint32 `protobuf:"varint,4,opt,name=start_row,json=startRow,proto3" json:"start_row,omitempty"` ++ EndRow uint32 `protobuf:"varint,5,opt,name=end_row,json=endRow,proto3" json:"end_row,omitempty"` ++} ++ ++func (m *RowProof) Reset() { *m = RowProof{} } ++func (m *RowProof) String() string { return proto.CompactTextString(m) } ++func (*RowProof) ProtoMessage() {} ++func (*RowProof) Descriptor() ([]byte, []int) { ++ return fileDescriptor_d3a6e55e2345de56, []int{17} ++} ++func (m *RowProof) XXX_Unmarshal(b []byte) error { ++ return m.Unmarshal(b) ++} ++func (m *RowProof) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { ++ if deterministic { ++ return xxx_messageInfo_RowProof.Marshal(b, m, deterministic) ++ } else { ++ b = b[:cap(b)] ++ n, err := m.MarshalToSizedBuffer(b) ++ if err != nil { ++ return nil, err ++ } ++ return b[:n], nil + } +- if len(m.ConsensusHash) > 0 { +- i -= len(m.ConsensusHash) +- copy(dAtA[i:], m.ConsensusHash) +- i = encodeVarintTypes(dAtA, i, uint64(len(m.ConsensusHash))) +- i-- +- dAtA[i] = 0x52 ++} ++func (m *RowProof) XXX_Merge(src proto.Message) { ++ xxx_messageInfo_RowProof.Merge(m, src) ++} ++func (m *RowProof) XXX_Size() int { ++ return m.Size() ++} ++func (m *RowProof) XXX_DiscardUnknown() { ++ xxx_messageInfo_RowProof.DiscardUnknown(m) ++} ++ ++var xxx_messageInfo_RowProof proto.InternalMessageInfo ++ ++func (m *RowProof) GetRowRoots() [][]byte { ++ if m != nil { ++ return m.RowRoots + } +- if len(m.NextValidatorsHash) > 0 { +- i -= len(m.NextValidatorsHash) +- copy(dAtA[i:], m.NextValidatorsHash) +- i = encodeVarintTypes(dAtA, i, uint64(len(m.NextValidatorsHash))) +- i-- +- dAtA[i] = 0x4a ++ return nil ++} ++ ++func (m *RowProof) GetProofs() []*crypto.Proof { ++ if m != nil { ++ return m.Proofs + } +- if len(m.ValidatorsHash) > 0 { +- i -= len(m.ValidatorsHash) +- copy(dAtA[i:], m.ValidatorsHash) +- i = encodeVarintTypes(dAtA, i, uint64(len(m.ValidatorsHash))) +- i-- +- dAtA[i] = 0x42 ++ return nil ++} ++ ++func (m *RowProof) GetRoot() []byte { ++ if m != nil { ++ return m.Root + } +- if len(m.DataHash) > 0 { +- i -= len(m.DataHash) +- copy(dAtA[i:], m.DataHash) +- i = encodeVarintTypes(dAtA, i, uint64(len(m.DataHash))) +- i-- +- dAtA[i] = 0x3a ++ return nil ++} ++ ++func (m *RowProof) GetStartRow() uint32 { ++ if m != nil { ++ return m.StartRow + } +- if len(m.LastCommitHash) > 0 { +- i -= len(m.LastCommitHash) +- copy(dAtA[i:], m.LastCommitHash) +- i = encodeVarintTypes(dAtA, i, uint64(len(m.LastCommitHash))) +- i-- +- dAtA[i] = 0x32 ++ return 0 ++} ++ ++func (m *RowProof) GetEndRow() uint32 { ++ if m != nil { ++ return m.EndRow + } +- { +- size, err := m.LastBlockId.MarshalToSizedBuffer(dAtA[:i]) ++ return 0 ++} ++ ++// NMTProof is a proof of a namespace.ID in an NMT. ++// In case this proof proves the absence of a namespace.ID ++// in a tree it also contains the leaf hashes of the range ++// where that namespace would be. ++type NMTProof struct { ++ // Start index of this proof. ++ Start int32 `protobuf:"varint,1,opt,name=start,proto3" json:"start,omitempty"` ++ // End index of this proof. ++ End int32 `protobuf:"varint,2,opt,name=end,proto3" json:"end,omitempty"` ++ // Nodes that together with the corresponding leaf values can be used to ++ // recompute the root and verify this proof. Nodes should consist of the max ++ // and min namespaces along with the actual hash, resulting in each being 48 ++ // bytes each ++ Nodes [][]byte `protobuf:"bytes,3,rep,name=nodes,proto3" json:"nodes,omitempty"` ++ // leafHash are nil if the namespace is present in the NMT. In case the ++ // namespace to be proved is in the min/max range of the tree but absent, this ++ // will contain the leaf hash necessary to verify the proof of absence. Leaf ++ // hashes should consist of the namespace along with the actual hash, ++ // resulting 40 bytes total. ++ LeafHash []byte `protobuf:"bytes,4,opt,name=leaf_hash,json=leafHash,proto3" json:"leaf_hash,omitempty"` ++} ++ ++func (m *NMTProof) Reset() { *m = NMTProof{} } ++func (m *NMTProof) String() string { return proto.CompactTextString(m) } ++func (*NMTProof) ProtoMessage() {} ++func (*NMTProof) Descriptor() ([]byte, []int) { ++ return fileDescriptor_d3a6e55e2345de56, []int{18} ++} ++func (m *NMTProof) XXX_Unmarshal(b []byte) error { ++ return m.Unmarshal(b) ++} ++func (m *NMTProof) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { ++ if deterministic { ++ return xxx_messageInfo_NMTProof.Marshal(b, m, deterministic) ++ } else { ++ b = b[:cap(b)] ++ n, err := m.MarshalToSizedBuffer(b) + if err != nil { +- return 0, err ++ return nil, err + } +- i -= size +- i = encodeVarintTypes(dAtA, i, uint64(size)) ++ return b[:n], nil + } +- i-- +- dAtA[i] = 0x2a +- n4, err4 := github_com_gogo_protobuf_types.StdTimeMarshalTo(m.Time, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdTime(m.Time):]) +- if err4 != nil { +- return 0, err4 ++} ++func (m *NMTProof) XXX_Merge(src proto.Message) { ++ xxx_messageInfo_NMTProof.Merge(m, src) ++} ++func (m *NMTProof) XXX_Size() int { ++ return m.Size() ++} ++func (m *NMTProof) XXX_DiscardUnknown() { ++ xxx_messageInfo_NMTProof.DiscardUnknown(m) ++} ++ ++var xxx_messageInfo_NMTProof proto.InternalMessageInfo ++ ++func (m *NMTProof) GetStart() int32 { ++ if m != nil { ++ return m.Start + } +- i -= n4 +- i = encodeVarintTypes(dAtA, i, uint64(n4)) +- i-- +- dAtA[i] = 0x22 +- if m.Height != 0 { +- i = encodeVarintTypes(dAtA, i, uint64(m.Height)) +- i-- +- dAtA[i] = 0x18 ++ return 0 ++} ++ ++func (m *NMTProof) GetEnd() int32 { ++ if m != nil { ++ return m.End + } +- if len(m.ChainID) > 0 { +- i -= len(m.ChainID) +- copy(dAtA[i:], m.ChainID) +- i = encodeVarintTypes(dAtA, i, uint64(len(m.ChainID))) +- i-- +- dAtA[i] = 0x12 ++ return 0 ++} ++ ++func (m *NMTProof) GetNodes() [][]byte { ++ if m != nil { ++ return m.Nodes + } +- { +- size, err := m.Version.MarshalToSizedBuffer(dAtA[:i]) +- if err != nil { +- return 0, err +- } +- i -= size +- i = encodeVarintTypes(dAtA, i, uint64(size)) ++ return nil ++} ++ ++func (m *NMTProof) GetLeafHash() []byte { ++ if m != nil { ++ return m.LeafHash + } +- i-- +- dAtA[i] = 0xa +- return len(dAtA) - i, nil ++ return nil + } + +-func (m *Data) Marshal() (dAtA []byte, err error) { ++func init() { ++ proto.RegisterEnum("tendermint.types.BlockIDFlag", BlockIDFlag_name, BlockIDFlag_value) ++ proto.RegisterEnum("tendermint.types.SignedMsgType", SignedMsgType_name, SignedMsgType_value) ++ proto.RegisterType((*PartSetHeader)(nil), "tendermint.types.PartSetHeader") ++ proto.RegisterType((*Part)(nil), "tendermint.types.Part") ++ proto.RegisterType((*BlockID)(nil), "tendermint.types.BlockID") ++ proto.RegisterType((*Header)(nil), "tendermint.types.Header") ++ proto.RegisterType((*Data)(nil), "tendermint.types.Data") ++ proto.RegisterType((*Blob)(nil), "tendermint.types.Blob") ++ proto.RegisterType((*Vote)(nil), "tendermint.types.Vote") ++ proto.RegisterType((*Commit)(nil), "tendermint.types.Commit") ++ proto.RegisterType((*CommitSig)(nil), "tendermint.types.CommitSig") ++ proto.RegisterType((*Proposal)(nil), "tendermint.types.Proposal") ++ proto.RegisterType((*SignedHeader)(nil), "tendermint.types.SignedHeader") ++ proto.RegisterType((*LightBlock)(nil), "tendermint.types.LightBlock") ++ proto.RegisterType((*BlockMeta)(nil), "tendermint.types.BlockMeta") ++ proto.RegisterType((*TxProof)(nil), "tendermint.types.TxProof") ++ proto.RegisterType((*IndexWrapper)(nil), "tendermint.types.IndexWrapper") ++ proto.RegisterType((*BlobTx)(nil), "tendermint.types.BlobTx") ++ proto.RegisterType((*ShareProof)(nil), "tendermint.types.ShareProof") ++ proto.RegisterType((*RowProof)(nil), "tendermint.types.RowProof") ++ proto.RegisterType((*NMTProof)(nil), "tendermint.types.NMTProof") ++} ++ ++func init() { proto.RegisterFile("tendermint/types/types.proto", fileDescriptor_d3a6e55e2345de56) } ++ ++var fileDescriptor_d3a6e55e2345de56 = []byte{ ++ // 1661 bytes of a gzipped FileDescriptorProto ++ 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xbc, 0x58, 0x4f, 0x6f, 0xe3, 0xc6, ++ 0x15, 0x37, 0x25, 0x4a, 0xa2, 0x9e, 0x24, 0x9b, 0x26, 0x9c, 0x8d, 0x56, 0x9b, 0x95, 0x55, 0x15, ++ 0x6d, 0x9d, 0x34, 0x90, 0xb7, 0x4e, 0xd1, 0xb4, 0x87, 0x1c, 0x2c, 0xdb, 0xd9, 0x68, 0xe3, 0x7f, ++ 0xa5, 0x94, 0x0d, 0x5a, 0x14, 0x20, 0x28, 0x73, 0x56, 0x62, 0x43, 0x71, 0x58, 0xce, 0xc8, 0xf6, ++ 0xe6, 0x13, 0x14, 0xbe, 0x34, 0xa7, 0xde, 0x7c, 0x4a, 0x0f, 0xbd, 0xf7, 0x0b, 0x14, 0x3d, 0xe5, ++ 0x98, 0x5b, 0x7b, 0x69, 0x5a, 0xec, 0x02, 0x45, 0x3e, 0x46, 0x31, 0x6f, 0x86, 0x14, 0x65, 0x49, ++ 0x6d, 0xb0, 0x08, 0x72, 0x31, 0x38, 0xef, 0xfd, 0xde, 0x9b, 0xf7, 0x77, 0xde, 0x93, 0xe1, 0x0d, ++ 0x4e, 0x42, 0x8f, 0xc4, 0x13, 0x3f, 0xe4, 0xbb, 0xfc, 0x79, 0x44, 0x98, 0xfc, 0xdb, 0x89, 0x62, ++ 0xca, 0xa9, 0x65, 0xce, 0xb8, 0x1d, 0xa4, 0x37, 0xb6, 0x46, 0x74, 0x44, 0x91, 0xb9, 0x2b, 0xbe, ++ 0x24, 0xae, 0xb1, 0x3d, 0xa2, 0x74, 0x14, 0x90, 0x5d, 0x3c, 0x0d, 0xa7, 0xcf, 0x76, 0xb9, 0x3f, ++ 0x21, 0x8c, 0xbb, 0x93, 0x48, 0x01, 0x1e, 0x66, 0xae, 0xb9, 0x88, 0x9f, 0x47, 0x9c, 0x0a, 0x2c, ++ 0x7d, 0xa6, 0xd8, 0xcd, 0x0c, 0xfb, 0x92, 0xc4, 0xcc, 0xa7, 0x61, 0xd6, 0x8e, 0x46, 0x6b, 0xc1, ++ 0xca, 0x4b, 0x37, 0xf0, 0x3d, 0x97, 0xd3, 0x58, 0x22, 0xda, 0xbf, 0x80, 0xda, 0xb9, 0x1b, 0xf3, ++ 0x3e, 0xe1, 0x1f, 0x10, 0xd7, 0x23, 0xb1, 0xb5, 0x05, 0x05, 0x4e, 0xb9, 0x1b, 0xd4, 0xb5, 0x96, ++ 0xb6, 0x53, 0xb3, 0xe5, 0xc1, 0xb2, 0x40, 0x1f, 0xbb, 0x6c, 0x5c, 0xcf, 0xb5, 0xb4, 0x9d, 0xaa, ++ 0x8d, 0xdf, 0xed, 0x31, 0xe8, 0x42, 0x54, 0x48, 0xf8, 0xa1, 0x47, 0xae, 0x13, 0x09, 0x3c, 0x08, ++ 0xea, 0xf0, 0x39, 0x27, 0x4c, 0x89, 0xc8, 0x83, 0xf5, 0x53, 0x28, 0xa0, 0xfd, 0xf5, 0x7c, 0x4b, ++ 0xdb, 0xa9, 0xec, 0xd5, 0x3b, 0x99, 0x40, 0x49, 0xff, 0x3a, 0xe7, 0x82, 0xdf, 0xd5, 0xbf, 0xf8, ++ 0x6a, 0x7b, 0xcd, 0x96, 0xe0, 0x76, 0x00, 0xa5, 0x6e, 0x40, 0x2f, 0x3e, 0xe9, 0x1d, 0xa6, 0x86, ++ 0x68, 0x33, 0x43, 0xac, 0x13, 0xd8, 0x88, 0xdc, 0x98, 0x3b, 0x8c, 0x70, 0x67, 0x8c, 0x5e, 0xe0, ++ 0xa5, 0x95, 0xbd, 0xed, 0xce, 0xdd, 0x3c, 0x74, 0xe6, 0x9c, 0x55, 0xb7, 0xd4, 0xa2, 0x2c, 0xb1, ++ 0xfd, 0x1f, 0x1d, 0x8a, 0x2a, 0x18, 0xef, 0x41, 0x49, 0x85, 0x15, 0x2f, 0xac, 0xec, 0x3d, 0xcc, ++ 0x6a, 0x54, 0xac, 0xce, 0x01, 0x0d, 0x19, 0x09, 0xd9, 0x94, 0x29, 0x7d, 0x89, 0x8c, 0xf5, 0x43, ++ 0x30, 0x2e, 0xc6, 0xae, 0x1f, 0x3a, 0xbe, 0x87, 0x16, 0x95, 0xbb, 0x95, 0x17, 0x5f, 0x6d, 0x97, ++ 0x0e, 0x04, 0xad, 0x77, 0x68, 0x97, 0x90, 0xd9, 0xf3, 0xac, 0x7b, 0x50, 0x1c, 0x13, 0x7f, 0x34, ++ 0xe6, 0x18, 0x96, 0xbc, 0xad, 0x4e, 0xd6, 0xcf, 0x41, 0x17, 0x05, 0x51, 0xd7, 0xf1, 0xee, 0x46, ++ 0x47, 0x56, 0x4b, 0x27, 0xa9, 0x96, 0xce, 0x20, 0xa9, 0x96, 0xae, 0x21, 0x2e, 0xfe, 0xec, 0x5f, ++ 0xdb, 0x9a, 0x8d, 0x12, 0xd6, 0x01, 0xd4, 0x02, 0x97, 0x71, 0x67, 0x28, 0xc2, 0x26, 0xae, 0x2f, ++ 0xa0, 0x8a, 0xfb, 0x8b, 0x01, 0x51, 0x81, 0x55, 0xa6, 0x57, 0x84, 0x94, 0x24, 0x79, 0xd6, 0x0e, ++ 0x98, 0xa8, 0xe4, 0x82, 0x4e, 0x26, 0x3e, 0x77, 0x30, 0xee, 0x45, 0x8c, 0xfb, 0xba, 0xa0, 0x1f, ++ 0x20, 0xf9, 0x03, 0x91, 0x81, 0x07, 0x50, 0xf6, 0x5c, 0xee, 0x4a, 0x48, 0x09, 0x21, 0x86, 0x20, ++ 0x20, 0xf3, 0x47, 0xb0, 0x91, 0x56, 0x1d, 0x93, 0x10, 0x43, 0x6a, 0x99, 0x91, 0x11, 0xf8, 0x08, ++ 0xb6, 0x42, 0x72, 0xcd, 0x9d, 0xbb, 0xe8, 0x32, 0xa2, 0x2d, 0xc1, 0x7b, 0x3a, 0x2f, 0xf1, 0x03, ++ 0x58, 0xbf, 0x48, 0x82, 0x2f, 0xb1, 0x80, 0xd8, 0x5a, 0x4a, 0x45, 0xd8, 0x7d, 0x30, 0xdc, 0x28, ++ 0x92, 0x80, 0x0a, 0x02, 0x4a, 0x6e, 0x14, 0x21, 0xeb, 0x2d, 0xd8, 0x44, 0x1f, 0x63, 0xc2, 0xa6, ++ 0x01, 0x57, 0x4a, 0xaa, 0x88, 0xd9, 0x10, 0x0c, 0x5b, 0xd2, 0x11, 0xfb, 0x7d, 0xa8, 0x91, 0x4b, ++ 0xdf, 0x23, 0xe1, 0x05, 0x91, 0xb8, 0x1a, 0xe2, 0xaa, 0x09, 0x11, 0x41, 0x6f, 0x82, 0x19, 0xc5, ++ 0x34, 0xa2, 0x8c, 0xc4, 0x8e, 0xeb, 0x79, 0x31, 0x61, 0xac, 0xbe, 0x2e, 0xf5, 0x25, 0xf4, 0x7d, ++ 0x49, 0x6e, 0x3b, 0xa0, 0x1f, 0xba, 0xdc, 0xb5, 0x4c, 0xc8, 0xf3, 0x6b, 0x56, 0xd7, 0x5a, 0xf9, ++ 0x9d, 0xaa, 0x2d, 0x3e, 0xad, 0x6d, 0xa8, 0xb0, 0xdf, 0x4d, 0xdd, 0x98, 0x38, 0xcc, 0xff, 0x94, ++ 0x60, 0xf2, 0x74, 0x1b, 0x24, 0xa9, 0xef, 0x7f, 0x4a, 0xd2, 0x36, 0x28, 0xce, 0xda, 0xe0, 0x89, ++ 0x6e, 0xe4, 0xcc, 0xfc, 0x13, 0xdd, 0xc8, 0x9b, 0xfa, 0x13, 0xdd, 0xd0, 0xcd, 0x42, 0xfb, 0x0f, ++ 0x1a, 0xe8, 0xdd, 0x80, 0x0e, 0xad, 0xef, 0x41, 0x35, 0x74, 0x27, 0x84, 0x45, 0xee, 0x05, 0x11, ++ 0xd5, 0x20, 0xbb, 0xa7, 0x92, 0xd2, 0x7a, 0x9e, 0xd0, 0x28, 0x32, 0x96, 0x74, 0xb8, 0xf8, 0x16, ++ 0x0e, 0xb3, 0xb1, 0xb0, 0x22, 0x69, 0x82, 0x3c, 0x76, 0x78, 0x15, 0x89, 0x4f, 0x55, 0x91, 0xff, ++ 0x18, 0x36, 0x67, 0xba, 0x13, 0xa0, 0x8e, 0x40, 0x33, 0x65, 0x28, 0x70, 0xfb, 0xeb, 0x1c, 0xe8, ++ 0x4f, 0x29, 0x27, 0xd6, 0x3b, 0xa0, 0x8b, 0xfa, 0x43, 0x4b, 0xd6, 0x97, 0x35, 0x6a, 0xdf, 0x1f, ++ 0x85, 0xc4, 0x3b, 0x61, 0xa3, 0xc1, 0xf3, 0x88, 0xd8, 0x08, 0xce, 0xf4, 0x49, 0x6e, 0xae, 0x4f, ++ 0xb6, 0xa0, 0x10, 0xd3, 0x69, 0xe8, 0xa1, 0x7d, 0x05, 0x5b, 0x1e, 0xac, 0x23, 0x30, 0xd2, 0xf2, ++ 0xd7, 0xff, 0x5f, 0xf9, 0x6f, 0x88, 0xf2, 0x17, 0xcd, 0xa9, 0x08, 0x76, 0x69, 0xa8, 0xba, 0xa0, ++ 0x0b, 0xe5, 0xf4, 0x55, 0x56, 0x6d, 0xf4, 0xcd, 0x3a, 0x71, 0x26, 0x26, 0x62, 0x94, 0x16, 0x75, ++ 0x5a, 0x15, 0x32, 0x77, 0x66, 0xca, 0x50, 0x65, 0x31, 0xd7, 0x2f, 0x8e, 0x7c, 0x59, 0x4b, 0xe8, ++ 0xd7, 0xac, 0x5f, 0x7a, 0xf8, 0xc4, 0xbe, 0x01, 0x65, 0xe6, 0x8f, 0x42, 0x97, 0x4f, 0x63, 0xa2, ++ 0x5a, 0x6a, 0x46, 0x68, 0xff, 0x55, 0x83, 0xa2, 0x6c, 0xd1, 0x4c, 0xdc, 0xb4, 0xe5, 0x71, 0xcb, ++ 0xad, 0x8a, 0x5b, 0xfe, 0xd5, 0xe3, 0xb6, 0x0f, 0x90, 0x1a, 0xc3, 0xea, 0x7a, 0x2b, 0xbf, 0x53, ++ 0xd9, 0x7b, 0xb0, 0xa8, 0x48, 0x9a, 0xd8, 0xf7, 0x47, 0xea, 0x05, 0xca, 0x08, 0xb5, 0xff, 0xa9, ++ 0x41, 0x39, 0xe5, 0x5b, 0xfb, 0x50, 0x4b, 0xec, 0x72, 0x9e, 0x05, 0xee, 0x48, 0xd5, 0xce, 0xc3, ++ 0x95, 0xc6, 0xbd, 0x1f, 0xb8, 0x23, 0xbb, 0xa2, 0xec, 0x11, 0x87, 0xe5, 0x79, 0xc8, 0xad, 0xc8, ++ 0xc3, 0x5c, 0xe2, 0xf3, 0xaf, 0x96, 0xf8, 0xb9, 0x14, 0xe9, 0x77, 0x53, 0xf4, 0x97, 0x1c, 0x18, ++ 0xe7, 0xf8, 0x28, 0xb8, 0xc1, 0x77, 0xd1, 0x11, 0x0f, 0xa0, 0x1c, 0xd1, 0xc0, 0x91, 0x1c, 0x1d, ++ 0x39, 0x46, 0x44, 0x03, 0x7b, 0x21, 0xed, 0x85, 0x6f, 0xa9, 0x5d, 0x8a, 0xdf, 0x42, 0xd4, 0x4a, ++ 0x77, 0xa3, 0x16, 0x43, 0x55, 0x86, 0x42, 0x0d, 0xe9, 0x47, 0x22, 0x06, 0x38, 0xf5, 0xb5, 0xc5, ++ 0xa5, 0x42, 0x9a, 0x2d, 0x91, 0xb6, 0xc2, 0x09, 0x09, 0x39, 0xd3, 0xd4, 0x9e, 0x50, 0x5f, 0x55, ++ 0x96, 0xb6, 0xc2, 0xb5, 0xff, 0xa8, 0x01, 0x1c, 0x8b, 0xc8, 0xa2, 0xbf, 0x62, 0xbc, 0x32, 0x34, ++ 0xc1, 0x99, 0xbb, 0xb9, 0xb9, 0x2a, 0x69, 0xea, 0xfe, 0x2a, 0xcb, 0xda, 0x7d, 0x00, 0xb5, 0x59, ++ 0x31, 0x32, 0x92, 0x18, 0xb3, 0x44, 0x49, 0x3a, 0xf5, 0xfa, 0x84, 0xdb, 0xd5, 0xcb, 0xcc, 0xa9, ++ 0xfd, 0x37, 0x0d, 0xca, 0x68, 0xd3, 0x09, 0xe1, 0xee, 0x5c, 0x0e, 0xb5, 0x57, 0xcf, 0xe1, 0x43, ++ 0x00, 0xa9, 0x06, 0xa7, 0x8f, 0xac, 0xac, 0x32, 0x52, 0x70, 0xf8, 0xfc, 0x2c, 0x0d, 0x78, 0xfe, ++ 0x7f, 0x07, 0x5c, 0xb5, 0x74, 0x12, 0xf6, 0xd7, 0xa1, 0x14, 0x4e, 0x27, 0x8e, 0x98, 0x75, 0xba, ++ 0xac, 0xd6, 0x70, 0x3a, 0x19, 0x5c, 0xb3, 0xf6, 0x6f, 0xa1, 0x34, 0xb8, 0xc6, 0xbd, 0x4f, 0x94, ++ 0x68, 0x4c, 0xa9, 0x5a, 0x36, 0xe4, 0x98, 0x32, 0x04, 0x01, 0x67, 0xeb, 0xb2, 0x19, 0xd5, 0xf9, ++ 0x86, 0x1b, 0x65, 0xb2, 0x4b, 0xfe, 0x06, 0xaa, 0xf8, 0x7a, 0x7e, 0x1c, 0xbb, 0x51, 0x44, 0x62, ++ 0x6b, 0x1d, 0x72, 0xfc, 0x5a, 0xdd, 0x94, 0xe3, 0xd7, 0xb3, 0x99, 0x87, 0x2f, 0x2f, 0xee, 0xaf, ++ 0xf9, 0x74, 0xe6, 0xf5, 0x24, 0x4d, 0x78, 0x22, 0xfc, 0x4c, 0x5e, 0xc8, 0xb2, 0x5d, 0x14, 0xc7, ++ 0x9e, 0xd7, 0x76, 0xa0, 0x28, 0x06, 0xee, 0xe0, 0x7a, 0x41, 0xef, 0xdb, 0x50, 0x18, 0x06, 0x74, ++ 0x28, 0xf5, 0x55, 0xf6, 0xee, 0x2d, 0xcd, 0xcb, 0xd0, 0x96, 0xa0, 0xd5, 0x17, 0x7c, 0xad, 0x01, ++ 0xf4, 0x85, 0x29, 0x32, 0x5c, 0x49, 0x44, 0xe4, 0xee, 0x20, 0x23, 0xf2, 0x1e, 0x48, 0x63, 0x1d, ++ 0x74, 0x38, 0xb9, 0xb0, 0xb1, 0x78, 0xe1, 0xe9, 0xc9, 0x40, 0x86, 0xa6, 0xc2, 0x52, 0x8d, 0x6c, ++ 0x61, 0x57, 0xc8, 0x2f, 0xee, 0x0a, 0xef, 0x8a, 0x24, 0x5d, 0x49, 0xfd, 0xe9, 0x72, 0xba, 0xa0, ++ 0xde, 0xa6, 0x57, 0x52, 0xbd, 0x11, 0xab, 0xaf, 0xe5, 0xbb, 0x42, 0x61, 0xc5, 0xae, 0xf0, 0xb9, ++ 0x06, 0x46, 0xa2, 0x43, 0xd6, 0xc5, 0x95, 0x23, 0x4a, 0x21, 0xd9, 0x94, 0x84, 0x5a, 0x5b, 0x9c, ++ 0x45, 0x3f, 0xcf, 0xf9, 0xba, 0xba, 0x08, 0x14, 0x4e, 0xc4, 0x4d, 0xa8, 0x52, 0xce, 0xe1, 0xb7, ++ 0xb8, 0x82, 0x71, 0xf1, 0x3b, 0x22, 0xa6, 0x57, 0x6a, 0x81, 0x31, 0x90, 0x60, 0xd3, 0x2b, 0x91, ++ 0x10, 0x12, 0x7a, 0xc8, 0x92, 0xf6, 0x16, 0x49, 0xe8, 0xd9, 0xf4, 0xaa, 0x4d, 0xc0, 0x48, 0xe2, ++ 0x28, 0x5e, 0x5d, 0x14, 0xc0, 0xb4, 0x17, 0x6c, 0x79, 0x10, 0xeb, 0x1d, 0x49, 0x67, 0xac, 0xf8, ++ 0x14, 0xb8, 0x90, 0x7a, 0x84, 0xd5, 0xf3, 0xe8, 0x88, 0x3c, 0x88, 0xfb, 0x03, 0xe2, 0x3e, 0x93, ++ 0xa5, 0x2f, 0x67, 0x85, 0x21, 0x08, 0xa2, 0xf4, 0xdf, 0xfa, 0xbb, 0x06, 0x95, 0xcc, 0x58, 0xb3, ++ 0x7e, 0x02, 0xaf, 0x75, 0x8f, 0xcf, 0x0e, 0x3e, 0x74, 0x7a, 0x87, 0xce, 0xfb, 0xc7, 0xfb, 0x8f, ++ 0x9d, 0x8f, 0x4e, 0x3f, 0x3c, 0x3d, 0xfb, 0xf8, 0xd4, 0x5c, 0x6b, 0xdc, 0xbb, 0xb9, 0x6d, 0x59, ++ 0x19, 0xec, 0x47, 0xe1, 0x27, 0x21, 0xbd, 0x0a, 0xad, 0x5d, 0xd8, 0x9a, 0x17, 0xd9, 0xef, 0xf6, ++ 0x8f, 0x4e, 0x07, 0xa6, 0xd6, 0x78, 0xed, 0xe6, 0xb6, 0xb5, 0x99, 0x91, 0xd8, 0x1f, 0x32, 0x12, ++ 0xf2, 0x45, 0x81, 0x83, 0xb3, 0x93, 0x93, 0xde, 0xc0, 0xcc, 0x2d, 0x08, 0xa8, 0x3d, 0xe3, 0x4d, ++ 0xd8, 0x9c, 0x17, 0x38, 0xed, 0x1d, 0x9b, 0xf9, 0x86, 0x75, 0x73, 0xdb, 0x5a, 0xcf, 0xa0, 0x4f, ++ 0xfd, 0xa0, 0x61, 0xfc, 0xfe, 0xf3, 0xe6, 0xda, 0x9f, 0xff, 0xd4, 0xd4, 0x84, 0x67, 0xb5, 0xb9, ++ 0xd1, 0x66, 0xbd, 0x0d, 0xaf, 0xf7, 0x7b, 0x8f, 0x4f, 0x8f, 0x0e, 0x9d, 0x93, 0xfe, 0x63, 0x67, ++ 0xf0, 0xab, 0xf3, 0xa3, 0x8c, 0x77, 0x1b, 0x37, 0xb7, 0xad, 0x8a, 0x72, 0x69, 0x15, 0xfa, 0xdc, ++ 0x3e, 0x7a, 0x7a, 0x36, 0x38, 0x32, 0x35, 0x89, 0x3e, 0x8f, 0xc9, 0x25, 0xe5, 0x04, 0xd1, 0x8f, ++ 0xe0, 0xfe, 0x12, 0x74, 0xea, 0xd8, 0xe6, 0xcd, 0x6d, 0xab, 0x76, 0x1e, 0x13, 0xf9, 0xec, 0xa3, ++ 0x44, 0x07, 0xea, 0x8b, 0x12, 0x67, 0xe7, 0x67, 0xfd, 0xfd, 0x63, 0xb3, 0xd5, 0x30, 0x6f, 0x6e, ++ 0x5b, 0xd5, 0x64, 0x86, 0x0b, 0xfc, 0xcc, 0xb3, 0xee, 0x2f, 0xbf, 0x78, 0xd1, 0xd4, 0xbe, 0x7c, ++ 0xd1, 0xd4, 0xfe, 0xfd, 0xa2, 0xa9, 0x7d, 0xf6, 0xb2, 0xb9, 0xf6, 0xe5, 0xcb, 0xe6, 0xda, 0x3f, ++ 0x5e, 0x36, 0xd7, 0x7e, 0xfd, 0xee, 0xc8, 0xe7, 0xe3, 0xe9, 0xb0, 0x73, 0x41, 0x27, 0xbb, 0xd9, ++ 0x9f, 0xe8, 0xb3, 0x4f, 0xf9, 0xaf, 0x82, 0xbb, 0x3f, 0xdf, 0x87, 0x45, 0xa4, 0xbf, 0xf3, 0xdf, ++ 0x00, 0x00, 0x00, 0xff, 0xff, 0xea, 0x3f, 0x8f, 0x46, 0x7f, 0x10, 0x00, 0x00, ++} ++ ++func (m *PartSetHeader) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) +@@ -1391,29 +1639,32 @@ func (m *Data) Marshal() (dAtA []byte, err error) { + return dAtA[:n], nil + } + +-func (m *Data) MarshalTo(dAtA []byte) (int, error) { ++func (m *PartSetHeader) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) + } + +-func (m *Data) MarshalToSizedBuffer(dAtA []byte) (int, error) { ++func (m *PartSetHeader) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l +- if len(m.Txs) > 0 { +- for iNdEx := len(m.Txs) - 1; iNdEx >= 0; iNdEx-- { +- i -= len(m.Txs[iNdEx]) +- copy(dAtA[i:], m.Txs[iNdEx]) +- i = encodeVarintTypes(dAtA, i, uint64(len(m.Txs[iNdEx]))) +- i-- +- dAtA[i] = 0xa +- } ++ if len(m.Hash) > 0 { ++ i -= len(m.Hash) ++ copy(dAtA[i:], m.Hash) ++ i = encodeVarintTypes(dAtA, i, uint64(len(m.Hash))) ++ i-- ++ dAtA[i] = 0x12 ++ } ++ if m.Total != 0 { ++ i = encodeVarintTypes(dAtA, i, uint64(m.Total)) ++ i-- ++ dAtA[i] = 0x8 + } + return len(dAtA) - i, nil + } + +-func (m *Vote) Marshal() (dAtA []byte, err error) { ++func (m *Part) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) +@@ -1423,45 +1674,18 @@ func (m *Vote) Marshal() (dAtA []byte, err error) { + return dAtA[:n], nil + } + +-func (m *Vote) MarshalTo(dAtA []byte) (int, error) { ++func (m *Part) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) + } + +-func (m *Vote) MarshalToSizedBuffer(dAtA []byte) (int, error) { ++func (m *Part) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l +- if len(m.Signature) > 0 { +- i -= len(m.Signature) +- copy(dAtA[i:], m.Signature) +- i = encodeVarintTypes(dAtA, i, uint64(len(m.Signature))) +- i-- +- dAtA[i] = 0x42 +- } +- if m.ValidatorIndex != 0 { +- i = encodeVarintTypes(dAtA, i, uint64(m.ValidatorIndex)) +- i-- +- dAtA[i] = 0x38 +- } +- if len(m.ValidatorAddress) > 0 { +- i -= len(m.ValidatorAddress) +- copy(dAtA[i:], m.ValidatorAddress) +- i = encodeVarintTypes(dAtA, i, uint64(len(m.ValidatorAddress))) +- i-- +- dAtA[i] = 0x32 +- } +- n6, err6 := github_com_gogo_protobuf_types.StdTimeMarshalTo(m.Timestamp, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdTime(m.Timestamp):]) +- if err6 != nil { +- return 0, err6 +- } +- i -= n6 +- i = encodeVarintTypes(dAtA, i, uint64(n6)) +- i-- +- dAtA[i] = 0x2a + { +- size, err := m.BlockID.MarshalToSizedBuffer(dAtA[:i]) ++ size, err := m.Proof.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } +@@ -1469,26 +1693,23 @@ func (m *Vote) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i = encodeVarintTypes(dAtA, i, uint64(size)) + } + i-- +- dAtA[i] = 0x22 +- if m.Round != 0 { +- i = encodeVarintTypes(dAtA, i, uint64(m.Round)) +- i-- +- dAtA[i] = 0x18 +- } +- if m.Height != 0 { +- i = encodeVarintTypes(dAtA, i, uint64(m.Height)) ++ dAtA[i] = 0x1a ++ if len(m.Bytes) > 0 { ++ i -= len(m.Bytes) ++ copy(dAtA[i:], m.Bytes) ++ i = encodeVarintTypes(dAtA, i, uint64(len(m.Bytes))) + i-- +- dAtA[i] = 0x10 ++ dAtA[i] = 0x12 + } +- if m.Type != 0 { +- i = encodeVarintTypes(dAtA, i, uint64(m.Type)) ++ if m.Index != 0 { ++ i = encodeVarintTypes(dAtA, i, uint64(m.Index)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil + } + +-func (m *Commit) Marshal() (dAtA []byte, err error) { ++func (m *BlockID) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) +@@ -1498,32 +1719,18 @@ func (m *Commit) Marshal() (dAtA []byte, err error) { + return dAtA[:n], nil + } + +-func (m *Commit) MarshalTo(dAtA []byte) (int, error) { ++func (m *BlockID) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) + } + +-func (m *Commit) MarshalToSizedBuffer(dAtA []byte) (int, error) { ++func (m *BlockID) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l +- if len(m.Signatures) > 0 { +- for iNdEx := len(m.Signatures) - 1; iNdEx >= 0; iNdEx-- { +- { +- size, err := m.Signatures[iNdEx].MarshalToSizedBuffer(dAtA[:i]) +- if err != nil { +- return 0, err +- } +- i -= size +- i = encodeVarintTypes(dAtA, i, uint64(size)) +- } +- i-- +- dAtA[i] = 0x22 +- } +- } + { +- size, err := m.BlockID.MarshalToSizedBuffer(dAtA[:i]) ++ size, err := m.PartSetHeader.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } +@@ -1531,21 +1738,18 @@ func (m *Commit) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i = encodeVarintTypes(dAtA, i, uint64(size)) + } + i-- +- dAtA[i] = 0x1a +- if m.Round != 0 { +- i = encodeVarintTypes(dAtA, i, uint64(m.Round)) +- i-- +- dAtA[i] = 0x10 +- } +- if m.Height != 0 { +- i = encodeVarintTypes(dAtA, i, uint64(m.Height)) ++ dAtA[i] = 0x12 ++ if len(m.Hash) > 0 { ++ i -= len(m.Hash) ++ copy(dAtA[i:], m.Hash) ++ i = encodeVarintTypes(dAtA, i, uint64(len(m.Hash))) + i-- +- dAtA[i] = 0x8 ++ dAtA[i] = 0xa + } + return len(dAtA) - i, nil + } + +-func (m *CommitSig) Marshal() (dAtA []byte, err error) { ++func (m *Header) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) +@@ -1555,83 +1759,81 @@ func (m *CommitSig) Marshal() (dAtA []byte, err error) { + return dAtA[:n], nil + } + +-func (m *CommitSig) MarshalTo(dAtA []byte) (int, error) { ++func (m *Header) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) + } + +-func (m *CommitSig) MarshalToSizedBuffer(dAtA []byte) (int, error) { ++func (m *Header) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l +- if len(m.Signature) > 0 { +- i -= len(m.Signature) +- copy(dAtA[i:], m.Signature) +- i = encodeVarintTypes(dAtA, i, uint64(len(m.Signature))) ++ if len(m.ProposerAddress) > 0 { ++ i -= len(m.ProposerAddress) ++ copy(dAtA[i:], m.ProposerAddress) ++ i = encodeVarintTypes(dAtA, i, uint64(len(m.ProposerAddress))) + i-- +- dAtA[i] = 0x22 ++ dAtA[i] = 0x72 + } +- n9, err9 := github_com_gogo_protobuf_types.StdTimeMarshalTo(m.Timestamp, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdTime(m.Timestamp):]) +- if err9 != nil { +- return 0, err9 ++ if len(m.EvidenceHash) > 0 { ++ i -= len(m.EvidenceHash) ++ copy(dAtA[i:], m.EvidenceHash) ++ i = encodeVarintTypes(dAtA, i, uint64(len(m.EvidenceHash))) ++ i-- ++ dAtA[i] = 0x6a + } +- i -= n9 +- i = encodeVarintTypes(dAtA, i, uint64(n9)) +- i-- +- dAtA[i] = 0x1a +- if len(m.ValidatorAddress) > 0 { +- i -= len(m.ValidatorAddress) +- copy(dAtA[i:], m.ValidatorAddress) +- i = encodeVarintTypes(dAtA, i, uint64(len(m.ValidatorAddress))) ++ if len(m.LastResultsHash) > 0 { ++ i -= len(m.LastResultsHash) ++ copy(dAtA[i:], m.LastResultsHash) ++ i = encodeVarintTypes(dAtA, i, uint64(len(m.LastResultsHash))) + i-- +- dAtA[i] = 0x12 ++ dAtA[i] = 0x62 + } +- if m.BlockIdFlag != 0 { +- i = encodeVarintTypes(dAtA, i, uint64(m.BlockIdFlag)) ++ if len(m.AppHash) > 0 { ++ i -= len(m.AppHash) ++ copy(dAtA[i:], m.AppHash) ++ i = encodeVarintTypes(dAtA, i, uint64(len(m.AppHash))) + i-- +- dAtA[i] = 0x8 ++ dAtA[i] = 0x5a + } +- return len(dAtA) - i, nil +-} +- +-func (m *Proposal) Marshal() (dAtA []byte, err error) { +- size := m.Size() +- dAtA = make([]byte, size) +- n, err := m.MarshalToSizedBuffer(dAtA[:size]) +- if err != nil { +- return nil, err ++ if len(m.ConsensusHash) > 0 { ++ i -= len(m.ConsensusHash) ++ copy(dAtA[i:], m.ConsensusHash) ++ i = encodeVarintTypes(dAtA, i, uint64(len(m.ConsensusHash))) ++ i-- ++ dAtA[i] = 0x52 + } +- return dAtA[:n], nil +-} +- +-func (m *Proposal) MarshalTo(dAtA []byte) (int, error) { +- size := m.Size() +- return m.MarshalToSizedBuffer(dAtA[:size]) +-} +- +-func (m *Proposal) MarshalToSizedBuffer(dAtA []byte) (int, error) { +- i := len(dAtA) +- _ = i +- var l int +- _ = l +- if len(m.Signature) > 0 { +- i -= len(m.Signature) +- copy(dAtA[i:], m.Signature) +- i = encodeVarintTypes(dAtA, i, uint64(len(m.Signature))) ++ if len(m.NextValidatorsHash) > 0 { ++ i -= len(m.NextValidatorsHash) ++ copy(dAtA[i:], m.NextValidatorsHash) ++ i = encodeVarintTypes(dAtA, i, uint64(len(m.NextValidatorsHash))) ++ i-- ++ dAtA[i] = 0x4a ++ } ++ if len(m.ValidatorsHash) > 0 { ++ i -= len(m.ValidatorsHash) ++ copy(dAtA[i:], m.ValidatorsHash) ++ i = encodeVarintTypes(dAtA, i, uint64(len(m.ValidatorsHash))) ++ i-- ++ dAtA[i] = 0x42 ++ } ++ if len(m.DataHash) > 0 { ++ i -= len(m.DataHash) ++ copy(dAtA[i:], m.DataHash) ++ i = encodeVarintTypes(dAtA, i, uint64(len(m.DataHash))) + i-- + dAtA[i] = 0x3a + } +- n10, err10 := github_com_gogo_protobuf_types.StdTimeMarshalTo(m.Timestamp, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdTime(m.Timestamp):]) +- if err10 != nil { +- return 0, err10 ++ if len(m.LastCommitHash) > 0 { ++ i -= len(m.LastCommitHash) ++ copy(dAtA[i:], m.LastCommitHash) ++ i = encodeVarintTypes(dAtA, i, uint64(len(m.LastCommitHash))) ++ i-- ++ dAtA[i] = 0x32 + } +- i -= n10 +- i = encodeVarintTypes(dAtA, i, uint64(n10)) +- i-- +- dAtA[i] = 0x32 + { +- size, err := m.BlockID.MarshalToSizedBuffer(dAtA[:i]) ++ size, err := m.LastBlockId.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } +@@ -1640,30 +1842,40 @@ func (m *Proposal) MarshalToSizedBuffer(dAtA []byte) (int, error) { + } + i-- + dAtA[i] = 0x2a +- if m.PolRound != 0 { +- i = encodeVarintTypes(dAtA, i, uint64(m.PolRound)) +- i-- +- dAtA[i] = 0x20 +- } +- if m.Round != 0 { +- i = encodeVarintTypes(dAtA, i, uint64(m.Round)) +- i-- +- dAtA[i] = 0x18 ++ n4, err4 := github_com_gogo_protobuf_types.StdTimeMarshalTo(m.Time, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdTime(m.Time):]) ++ if err4 != nil { ++ return 0, err4 + } ++ i -= n4 ++ i = encodeVarintTypes(dAtA, i, uint64(n4)) ++ i-- ++ dAtA[i] = 0x22 + if m.Height != 0 { + i = encodeVarintTypes(dAtA, i, uint64(m.Height)) + i-- +- dAtA[i] = 0x10 ++ dAtA[i] = 0x18 + } +- if m.Type != 0 { +- i = encodeVarintTypes(dAtA, i, uint64(m.Type)) ++ if len(m.ChainID) > 0 { ++ i -= len(m.ChainID) ++ copy(dAtA[i:], m.ChainID) ++ i = encodeVarintTypes(dAtA, i, uint64(len(m.ChainID))) + i-- +- dAtA[i] = 0x8 ++ dAtA[i] = 0x12 ++ } ++ { ++ size, err := m.Version.MarshalToSizedBuffer(dAtA[:i]) ++ if err != nil { ++ return 0, err ++ } ++ i -= size ++ i = encodeVarintTypes(dAtA, i, uint64(size)) + } ++ i-- ++ dAtA[i] = 0xa + return len(dAtA) - i, nil + } + +-func (m *SignedHeader) Marshal() (dAtA []byte, err error) { ++func (m *Data) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) +@@ -1673,44 +1885,41 @@ func (m *SignedHeader) Marshal() (dAtA []byte, err error) { + return dAtA[:n], nil + } + +-func (m *SignedHeader) MarshalTo(dAtA []byte) (int, error) { ++func (m *Data) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) + } + +-func (m *SignedHeader) MarshalToSizedBuffer(dAtA []byte) (int, error) { ++func (m *Data) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l +- if m.Commit != nil { +- { +- size, err := m.Commit.MarshalToSizedBuffer(dAtA[:i]) +- if err != nil { +- return 0, err +- } +- i -= size +- i = encodeVarintTypes(dAtA, i, uint64(size)) +- } ++ if len(m.Hash) > 0 { ++ i -= len(m.Hash) ++ copy(dAtA[i:], m.Hash) ++ i = encodeVarintTypes(dAtA, i, uint64(len(m.Hash))) + i-- +- dAtA[i] = 0x12 ++ dAtA[i] = 0x32 + } +- if m.Header != nil { +- { +- size, err := m.Header.MarshalToSizedBuffer(dAtA[:i]) +- if err != nil { +- return 0, err +- } +- i -= size +- i = encodeVarintTypes(dAtA, i, uint64(size)) +- } ++ if m.SquareSize != 0 { ++ i = encodeVarintTypes(dAtA, i, uint64(m.SquareSize)) + i-- +- dAtA[i] = 0xa ++ dAtA[i] = 0x28 ++ } ++ if len(m.Txs) > 0 { ++ for iNdEx := len(m.Txs) - 1; iNdEx >= 0; iNdEx-- { ++ i -= len(m.Txs[iNdEx]) ++ copy(dAtA[i:], m.Txs[iNdEx]) ++ i = encodeVarintTypes(dAtA, i, uint64(len(m.Txs[iNdEx]))) ++ i-- ++ dAtA[i] = 0xa ++ } + } + return len(dAtA) - i, nil + } + +-func (m *LightBlock) Marshal() (dAtA []byte, err error) { ++func (m *Blob) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) +@@ -1720,44 +1929,44 @@ func (m *LightBlock) Marshal() (dAtA []byte, err error) { + return dAtA[:n], nil + } + +-func (m *LightBlock) MarshalTo(dAtA []byte) (int, error) { ++func (m *Blob) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) + } + +-func (m *LightBlock) MarshalToSizedBuffer(dAtA []byte) (int, error) { ++func (m *Blob) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l +- if m.ValidatorSet != nil { +- { +- size, err := m.ValidatorSet.MarshalToSizedBuffer(dAtA[:i]) +- if err != nil { +- return 0, err +- } +- i -= size +- i = encodeVarintTypes(dAtA, i, uint64(size)) +- } ++ if m.NamespaceVersion != 0 { ++ i = encodeVarintTypes(dAtA, i, uint64(m.NamespaceVersion)) ++ i-- ++ dAtA[i] = 0x20 ++ } ++ if m.ShareVersion != 0 { ++ i = encodeVarintTypes(dAtA, i, uint64(m.ShareVersion)) ++ i-- ++ dAtA[i] = 0x18 ++ } ++ if len(m.Data) > 0 { ++ i -= len(m.Data) ++ copy(dAtA[i:], m.Data) ++ i = encodeVarintTypes(dAtA, i, uint64(len(m.Data))) + i-- + dAtA[i] = 0x12 + } +- if m.SignedHeader != nil { +- { +- size, err := m.SignedHeader.MarshalToSizedBuffer(dAtA[:i]) +- if err != nil { +- return 0, err +- } +- i -= size +- i = encodeVarintTypes(dAtA, i, uint64(size)) +- } ++ if len(m.NamespaceId) > 0 { ++ i -= len(m.NamespaceId) ++ copy(dAtA[i:], m.NamespaceId) ++ i = encodeVarintTypes(dAtA, i, uint64(len(m.NamespaceId))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil + } + +-func (m *BlockMeta) Marshal() (dAtA []byte, err error) { ++func (m *Vote) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) +@@ -1767,23 +1976,45 @@ func (m *BlockMeta) Marshal() (dAtA []byte, err error) { + return dAtA[:n], nil + } + +-func (m *BlockMeta) MarshalTo(dAtA []byte) (int, error) { ++func (m *Vote) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) + } + +-func (m *BlockMeta) MarshalToSizedBuffer(dAtA []byte) (int, error) { ++func (m *Vote) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l +- if m.NumTxs != 0 { +- i = encodeVarintTypes(dAtA, i, uint64(m.NumTxs)) ++ if len(m.Signature) > 0 { ++ i -= len(m.Signature) ++ copy(dAtA[i:], m.Signature) ++ i = encodeVarintTypes(dAtA, i, uint64(len(m.Signature))) + i-- +- dAtA[i] = 0x20 ++ dAtA[i] = 0x42 ++ } ++ if m.ValidatorIndex != 0 { ++ i = encodeVarintTypes(dAtA, i, uint64(m.ValidatorIndex)) ++ i-- ++ dAtA[i] = 0x38 ++ } ++ if len(m.ValidatorAddress) > 0 { ++ i -= len(m.ValidatorAddress) ++ copy(dAtA[i:], m.ValidatorAddress) ++ i = encodeVarintTypes(dAtA, i, uint64(len(m.ValidatorAddress))) ++ i-- ++ dAtA[i] = 0x32 ++ } ++ n6, err6 := github_com_gogo_protobuf_types.StdTimeMarshalTo(m.Timestamp, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdTime(m.Timestamp):]) ++ if err6 != nil { ++ return 0, err6 + } ++ i -= n6 ++ i = encodeVarintTypes(dAtA, i, uint64(n6)) ++ i-- ++ dAtA[i] = 0x2a + { +- size, err := m.Header.MarshalToSizedBuffer(dAtA[:i]) ++ size, err := m.BlockID.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } +@@ -1791,12 +2022,59 @@ func (m *BlockMeta) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i = encodeVarintTypes(dAtA, i, uint64(size)) + } + i-- +- dAtA[i] = 0x1a +- if m.BlockSize != 0 { +- i = encodeVarintTypes(dAtA, i, uint64(m.BlockSize)) ++ dAtA[i] = 0x22 ++ if m.Round != 0 { ++ i = encodeVarintTypes(dAtA, i, uint64(m.Round)) ++ i-- ++ dAtA[i] = 0x18 ++ } ++ if m.Height != 0 { ++ i = encodeVarintTypes(dAtA, i, uint64(m.Height)) + i-- + dAtA[i] = 0x10 + } ++ if m.Type != 0 { ++ i = encodeVarintTypes(dAtA, i, uint64(m.Type)) ++ i-- ++ dAtA[i] = 0x8 ++ } ++ return len(dAtA) - i, nil ++} ++ ++func (m *Commit) Marshal() (dAtA []byte, err error) { ++ size := m.Size() ++ dAtA = make([]byte, size) ++ n, err := m.MarshalToSizedBuffer(dAtA[:size]) ++ if err != nil { ++ return nil, err ++ } ++ return dAtA[:n], nil ++} ++ ++func (m *Commit) MarshalTo(dAtA []byte) (int, error) { ++ size := m.Size() ++ return m.MarshalToSizedBuffer(dAtA[:size]) ++} ++ ++func (m *Commit) MarshalToSizedBuffer(dAtA []byte) (int, error) { ++ i := len(dAtA) ++ _ = i ++ var l int ++ _ = l ++ if len(m.Signatures) > 0 { ++ for iNdEx := len(m.Signatures) - 1; iNdEx >= 0; iNdEx-- { ++ { ++ size, err := m.Signatures[iNdEx].MarshalToSizedBuffer(dAtA[:i]) ++ if err != nil { ++ return 0, err ++ } ++ i -= size ++ i = encodeVarintTypes(dAtA, i, uint64(size)) ++ } ++ i-- ++ dAtA[i] = 0x22 ++ } ++ } + { + size, err := m.BlockID.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { +@@ -1806,11 +2084,21 @@ func (m *BlockMeta) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i = encodeVarintTypes(dAtA, i, uint64(size)) + } + i-- +- dAtA[i] = 0xa ++ dAtA[i] = 0x1a ++ if m.Round != 0 { ++ i = encodeVarintTypes(dAtA, i, uint64(m.Round)) ++ i-- ++ dAtA[i] = 0x10 ++ } ++ if m.Height != 0 { ++ i = encodeVarintTypes(dAtA, i, uint64(m.Height)) ++ i-- ++ dAtA[i] = 0x8 ++ } + return len(dAtA) - i, nil + } + +-func (m *TxProof) Marshal() (dAtA []byte, err error) { ++func (m *CommitSig) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) +@@ -1820,366 +2108,1946 @@ func (m *TxProof) Marshal() (dAtA []byte, err error) { + return dAtA[:n], nil + } + +-func (m *TxProof) MarshalTo(dAtA []byte) (int, error) { ++func (m *CommitSig) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) + } + +-func (m *TxProof) MarshalToSizedBuffer(dAtA []byte) (int, error) { ++func (m *CommitSig) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l +- if m.Proof != nil { +- { +- size, err := m.Proof.MarshalToSizedBuffer(dAtA[:i]) +- if err != nil { +- return 0, err +- } +- i -= size +- i = encodeVarintTypes(dAtA, i, uint64(size)) +- } ++ if len(m.Signature) > 0 { ++ i -= len(m.Signature) ++ copy(dAtA[i:], m.Signature) ++ i = encodeVarintTypes(dAtA, i, uint64(len(m.Signature))) + i-- +- dAtA[i] = 0x1a ++ dAtA[i] = 0x22 + } +- if len(m.Data) > 0 { +- i -= len(m.Data) +- copy(dAtA[i:], m.Data) +- i = encodeVarintTypes(dAtA, i, uint64(len(m.Data))) ++ n9, err9 := github_com_gogo_protobuf_types.StdTimeMarshalTo(m.Timestamp, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdTime(m.Timestamp):]) ++ if err9 != nil { ++ return 0, err9 ++ } ++ i -= n9 ++ i = encodeVarintTypes(dAtA, i, uint64(n9)) ++ i-- ++ dAtA[i] = 0x1a ++ if len(m.ValidatorAddress) > 0 { ++ i -= len(m.ValidatorAddress) ++ copy(dAtA[i:], m.ValidatorAddress) ++ i = encodeVarintTypes(dAtA, i, uint64(len(m.ValidatorAddress))) + i-- + dAtA[i] = 0x12 + } +- if len(m.RootHash) > 0 { +- i -= len(m.RootHash) +- copy(dAtA[i:], m.RootHash) +- i = encodeVarintTypes(dAtA, i, uint64(len(m.RootHash))) ++ if m.BlockIdFlag != 0 { ++ i = encodeVarintTypes(dAtA, i, uint64(m.BlockIdFlag)) + i-- +- dAtA[i] = 0xa ++ dAtA[i] = 0x8 + } + return len(dAtA) - i, nil + } + +-func encodeVarintTypes(dAtA []byte, offset int, v uint64) int { +- offset -= sovTypes(v) +- base := offset +- for v >= 1<<7 { +- dAtA[offset] = uint8(v&0x7f | 0x80) +- v >>= 7 +- offset++ ++func (m *Proposal) Marshal() (dAtA []byte, err error) { ++ size := m.Size() ++ dAtA = make([]byte, size) ++ n, err := m.MarshalToSizedBuffer(dAtA[:size]) ++ if err != nil { ++ return nil, err + } +- dAtA[offset] = uint8(v) +- return base ++ return dAtA[:n], nil + } +-func (m *PartSetHeader) Size() (n int) { +- if m == nil { +- return 0 +- } ++ ++func (m *Proposal) MarshalTo(dAtA []byte) (int, error) { ++ size := m.Size() ++ return m.MarshalToSizedBuffer(dAtA[:size]) ++} ++ ++func (m *Proposal) MarshalToSizedBuffer(dAtA []byte) (int, error) { ++ i := len(dAtA) ++ _ = i + var l int + _ = l +- if m.Total != 0 { +- n += 1 + sovTypes(uint64(m.Total)) ++ if len(m.Signature) > 0 { ++ i -= len(m.Signature) ++ copy(dAtA[i:], m.Signature) ++ i = encodeVarintTypes(dAtA, i, uint64(len(m.Signature))) ++ i-- ++ dAtA[i] = 0x3a + } +- l = len(m.Hash) +- if l > 0 { +- n += 1 + l + sovTypes(uint64(l)) ++ n10, err10 := github_com_gogo_protobuf_types.StdTimeMarshalTo(m.Timestamp, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdTime(m.Timestamp):]) ++ if err10 != nil { ++ return 0, err10 + } +- return n +-} +- +-func (m *Part) Size() (n int) { +- if m == nil { +- return 0 ++ i -= n10 ++ i = encodeVarintTypes(dAtA, i, uint64(n10)) ++ i-- ++ dAtA[i] = 0x32 ++ { ++ size, err := m.BlockID.MarshalToSizedBuffer(dAtA[:i]) ++ if err != nil { ++ return 0, err ++ } ++ i -= size ++ i = encodeVarintTypes(dAtA, i, uint64(size)) + } +- var l int +- _ = l +- if m.Index != 0 { +- n += 1 + sovTypes(uint64(m.Index)) ++ i-- ++ dAtA[i] = 0x2a ++ if m.PolRound != 0 { ++ i = encodeVarintTypes(dAtA, i, uint64(m.PolRound)) ++ i-- ++ dAtA[i] = 0x20 + } +- l = len(m.Bytes) +- if l > 0 { +- n += 1 + l + sovTypes(uint64(l)) ++ if m.Round != 0 { ++ i = encodeVarintTypes(dAtA, i, uint64(m.Round)) ++ i-- ++ dAtA[i] = 0x18 + } +- l = m.Proof.Size() +- n += 1 + l + sovTypes(uint64(l)) +- return n ++ if m.Height != 0 { ++ i = encodeVarintTypes(dAtA, i, uint64(m.Height)) ++ i-- ++ dAtA[i] = 0x10 ++ } ++ if m.Type != 0 { ++ i = encodeVarintTypes(dAtA, i, uint64(m.Type)) ++ i-- ++ dAtA[i] = 0x8 ++ } ++ return len(dAtA) - i, nil + } + +-func (m *BlockID) Size() (n int) { +- if m == nil { +- return 0 +- } +- var l int +- _ = l +- l = len(m.Hash) +- if l > 0 { +- n += 1 + l + sovTypes(uint64(l)) ++func (m *SignedHeader) Marshal() (dAtA []byte, err error) { ++ size := m.Size() ++ dAtA = make([]byte, size) ++ n, err := m.MarshalToSizedBuffer(dAtA[:size]) ++ if err != nil { ++ return nil, err + } +- l = m.PartSetHeader.Size() +- n += 1 + l + sovTypes(uint64(l)) +- return n ++ return dAtA[:n], nil + } + +-func (m *Header) Size() (n int) { +- if m == nil { +- return 0 +- } +- var l int +- _ = l +- l = m.Version.Size() +- n += 1 + l + sovTypes(uint64(l)) +- l = len(m.ChainID) +- if l > 0 { +- n += 1 + l + sovTypes(uint64(l)) +- } +- if m.Height != 0 { +- n += 1 + sovTypes(uint64(m.Height)) +- } +- l = github_com_gogo_protobuf_types.SizeOfStdTime(m.Time) +- n += 1 + l + sovTypes(uint64(l)) +- l = m.LastBlockId.Size() +- n += 1 + l + sovTypes(uint64(l)) +- l = len(m.LastCommitHash) +- if l > 0 { +- n += 1 + l + sovTypes(uint64(l)) +- } +- l = len(m.DataHash) +- if l > 0 { +- n += 1 + l + sovTypes(uint64(l)) +- } +- l = len(m.ValidatorsHash) +- if l > 0 { +- n += 1 + l + sovTypes(uint64(l)) +- } +- l = len(m.NextValidatorsHash) +- if l > 0 { +- n += 1 + l + sovTypes(uint64(l)) +- } +- l = len(m.ConsensusHash) +- if l > 0 { +- n += 1 + l + sovTypes(uint64(l)) +- } +- l = len(m.AppHash) +- if l > 0 { +- n += 1 + l + sovTypes(uint64(l)) +- } +- l = len(m.LastResultsHash) +- if l > 0 { +- n += 1 + l + sovTypes(uint64(l)) +- } +- l = len(m.EvidenceHash) +- if l > 0 { +- n += 1 + l + sovTypes(uint64(l)) +- } +- l = len(m.ProposerAddress) +- if l > 0 { +- n += 1 + l + sovTypes(uint64(l)) +- } +- return n ++func (m *SignedHeader) MarshalTo(dAtA []byte) (int, error) { ++ size := m.Size() ++ return m.MarshalToSizedBuffer(dAtA[:size]) + } + +-func (m *Data) Size() (n int) { +- if m == nil { +- return 0 +- } ++func (m *SignedHeader) MarshalToSizedBuffer(dAtA []byte) (int, error) { ++ i := len(dAtA) ++ _ = i + var l int + _ = l +- if len(m.Txs) > 0 { +- for _, b := range m.Txs { +- l = len(b) +- n += 1 + l + sovTypes(uint64(l)) ++ if m.Commit != nil { ++ { ++ size, err := m.Commit.MarshalToSizedBuffer(dAtA[:i]) ++ if err != nil { ++ return 0, err ++ } ++ i -= size ++ i = encodeVarintTypes(dAtA, i, uint64(size)) + } ++ i-- ++ dAtA[i] = 0x12 + } +- return n ++ if m.Header != nil { ++ { ++ size, err := m.Header.MarshalToSizedBuffer(dAtA[:i]) ++ if err != nil { ++ return 0, err ++ } ++ i -= size ++ i = encodeVarintTypes(dAtA, i, uint64(size)) ++ } ++ i-- ++ dAtA[i] = 0xa ++ } ++ return len(dAtA) - i, nil + } + +-func (m *Vote) Size() (n int) { +- if m == nil { +- return 0 +- } +- var l int +- _ = l +- if m.Type != 0 { +- n += 1 + sovTypes(uint64(m.Type)) +- } +- if m.Height != 0 { +- n += 1 + sovTypes(uint64(m.Height)) +- } +- if m.Round != 0 { +- n += 1 + sovTypes(uint64(m.Round)) +- } +- l = m.BlockID.Size() +- n += 1 + l + sovTypes(uint64(l)) +- l = github_com_gogo_protobuf_types.SizeOfStdTime(m.Timestamp) +- n += 1 + l + sovTypes(uint64(l)) +- l = len(m.ValidatorAddress) +- if l > 0 { +- n += 1 + l + sovTypes(uint64(l)) +- } +- if m.ValidatorIndex != 0 { +- n += 1 + sovTypes(uint64(m.ValidatorIndex)) +- } +- l = len(m.Signature) +- if l > 0 { +- n += 1 + l + sovTypes(uint64(l)) ++func (m *LightBlock) Marshal() (dAtA []byte, err error) { ++ size := m.Size() ++ dAtA = make([]byte, size) ++ n, err := m.MarshalToSizedBuffer(dAtA[:size]) ++ if err != nil { ++ return nil, err + } +- return n ++ return dAtA[:n], nil + } + +-func (m *Commit) Size() (n int) { +- if m == nil { +- return 0 +- } ++func (m *LightBlock) MarshalTo(dAtA []byte) (int, error) { ++ size := m.Size() ++ return m.MarshalToSizedBuffer(dAtA[:size]) ++} ++ ++func (m *LightBlock) MarshalToSizedBuffer(dAtA []byte) (int, error) { ++ i := len(dAtA) ++ _ = i + var l int + _ = l +- if m.Height != 0 { +- n += 1 + sovTypes(uint64(m.Height)) +- } +- if m.Round != 0 { +- n += 1 + sovTypes(uint64(m.Round)) ++ if m.ValidatorSet != nil { ++ { ++ size, err := m.ValidatorSet.MarshalToSizedBuffer(dAtA[:i]) ++ if err != nil { ++ return 0, err ++ } ++ i -= size ++ i = encodeVarintTypes(dAtA, i, uint64(size)) ++ } ++ i-- ++ dAtA[i] = 0x12 + } +- l = m.BlockID.Size() +- n += 1 + l + sovTypes(uint64(l)) +- if len(m.Signatures) > 0 { +- for _, e := range m.Signatures { +- l = e.Size() +- n += 1 + l + sovTypes(uint64(l)) ++ if m.SignedHeader != nil { ++ { ++ size, err := m.SignedHeader.MarshalToSizedBuffer(dAtA[:i]) ++ if err != nil { ++ return 0, err ++ } ++ i -= size ++ i = encodeVarintTypes(dAtA, i, uint64(size)) + } ++ i-- ++ dAtA[i] = 0xa + } +- return n ++ return len(dAtA) - i, nil + } + +-func (m *CommitSig) Size() (n int) { +- if m == nil { +- return 0 ++func (m *BlockMeta) Marshal() (dAtA []byte, err error) { ++ size := m.Size() ++ dAtA = make([]byte, size) ++ n, err := m.MarshalToSizedBuffer(dAtA[:size]) ++ if err != nil { ++ return nil, err + } ++ return dAtA[:n], nil ++} ++ ++func (m *BlockMeta) MarshalTo(dAtA []byte) (int, error) { ++ size := m.Size() ++ return m.MarshalToSizedBuffer(dAtA[:size]) ++} ++ ++func (m *BlockMeta) MarshalToSizedBuffer(dAtA []byte) (int, error) { ++ i := len(dAtA) ++ _ = i + var l int + _ = l +- if m.BlockIdFlag != 0 { +- n += 1 + sovTypes(uint64(m.BlockIdFlag)) ++ if m.NumTxs != 0 { ++ i = encodeVarintTypes(dAtA, i, uint64(m.NumTxs)) ++ i-- ++ dAtA[i] = 0x20 + } +- l = len(m.ValidatorAddress) +- if l > 0 { +- n += 1 + l + sovTypes(uint64(l)) ++ { ++ size, err := m.Header.MarshalToSizedBuffer(dAtA[:i]) ++ if err != nil { ++ return 0, err ++ } ++ i -= size ++ i = encodeVarintTypes(dAtA, i, uint64(size)) + } +- l = github_com_gogo_protobuf_types.SizeOfStdTime(m.Timestamp) +- n += 1 + l + sovTypes(uint64(l)) +- l = len(m.Signature) +- if l > 0 { +- n += 1 + l + sovTypes(uint64(l)) ++ i-- ++ dAtA[i] = 0x1a ++ if m.BlockSize != 0 { ++ i = encodeVarintTypes(dAtA, i, uint64(m.BlockSize)) ++ i-- ++ dAtA[i] = 0x10 + } +- return n ++ { ++ size, err := m.BlockID.MarshalToSizedBuffer(dAtA[:i]) ++ if err != nil { ++ return 0, err ++ } ++ i -= size ++ i = encodeVarintTypes(dAtA, i, uint64(size)) ++ } ++ i-- ++ dAtA[i] = 0xa ++ return len(dAtA) - i, nil + } + +-func (m *Proposal) Size() (n int) { +- if m == nil { +- return 0 ++func (m *TxProof) Marshal() (dAtA []byte, err error) { ++ size := m.Size() ++ dAtA = make([]byte, size) ++ n, err := m.MarshalToSizedBuffer(dAtA[:size]) ++ if err != nil { ++ return nil, err + } ++ return dAtA[:n], nil ++} ++ ++func (m *TxProof) MarshalTo(dAtA []byte) (int, error) { ++ size := m.Size() ++ return m.MarshalToSizedBuffer(dAtA[:size]) ++} ++ ++func (m *TxProof) MarshalToSizedBuffer(dAtA []byte) (int, error) { ++ i := len(dAtA) ++ _ = i + var l int + _ = l +- if m.Type != 0 { +- n += 1 + sovTypes(uint64(m.Type)) ++ if m.Proof != nil { ++ { ++ size, err := m.Proof.MarshalToSizedBuffer(dAtA[:i]) ++ if err != nil { ++ return 0, err ++ } ++ i -= size ++ i = encodeVarintTypes(dAtA, i, uint64(size)) ++ } ++ i-- ++ dAtA[i] = 0x1a + } +- if m.Height != 0 { +- n += 1 + sovTypes(uint64(m.Height)) ++ if len(m.Data) > 0 { ++ i -= len(m.Data) ++ copy(dAtA[i:], m.Data) ++ i = encodeVarintTypes(dAtA, i, uint64(len(m.Data))) ++ i-- ++ dAtA[i] = 0x12 + } +- if m.Round != 0 { +- n += 1 + sovTypes(uint64(m.Round)) ++ if len(m.RootHash) > 0 { ++ i -= len(m.RootHash) ++ copy(dAtA[i:], m.RootHash) ++ i = encodeVarintTypes(dAtA, i, uint64(len(m.RootHash))) ++ i-- ++ dAtA[i] = 0xa + } +- if m.PolRound != 0 { +- n += 1 + sovTypes(uint64(m.PolRound)) ++ return len(dAtA) - i, nil ++} ++ ++func (m *IndexWrapper) Marshal() (dAtA []byte, err error) { ++ size := m.Size() ++ dAtA = make([]byte, size) ++ n, err := m.MarshalToSizedBuffer(dAtA[:size]) ++ if err != nil { ++ return nil, err + } +- l = m.BlockID.Size() +- n += 1 + l + sovTypes(uint64(l)) +- l = github_com_gogo_protobuf_types.SizeOfStdTime(m.Timestamp) +- n += 1 + l + sovTypes(uint64(l)) +- l = len(m.Signature) +- if l > 0 { +- n += 1 + l + sovTypes(uint64(l)) ++ return dAtA[:n], nil ++} ++ ++func (m *IndexWrapper) MarshalTo(dAtA []byte) (int, error) { ++ size := m.Size() ++ return m.MarshalToSizedBuffer(dAtA[:size]) ++} ++ ++func (m *IndexWrapper) MarshalToSizedBuffer(dAtA []byte) (int, error) { ++ i := len(dAtA) ++ _ = i ++ var l int ++ _ = l ++ if len(m.TypeId) > 0 { ++ i -= len(m.TypeId) ++ copy(dAtA[i:], m.TypeId) ++ i = encodeVarintTypes(dAtA, i, uint64(len(m.TypeId))) ++ i-- ++ dAtA[i] = 0x1a + } +- return n ++ if len(m.ShareIndexes) > 0 { ++ dAtA20 := make([]byte, len(m.ShareIndexes)*10) ++ var j19 int ++ for _, num := range m.ShareIndexes { ++ for num >= 1<<7 { ++ dAtA20[j19] = uint8(uint64(num)&0x7f | 0x80) ++ num >>= 7 ++ j19++ ++ } ++ dAtA20[j19] = uint8(num) ++ j19++ ++ } ++ i -= j19 ++ copy(dAtA[i:], dAtA20[:j19]) ++ i = encodeVarintTypes(dAtA, i, uint64(j19)) ++ i-- ++ dAtA[i] = 0x12 ++ } ++ if len(m.Tx) > 0 { ++ i -= len(m.Tx) ++ copy(dAtA[i:], m.Tx) ++ i = encodeVarintTypes(dAtA, i, uint64(len(m.Tx))) ++ i-- ++ dAtA[i] = 0xa ++ } ++ return len(dAtA) - i, nil + } + +-func (m *SignedHeader) Size() (n int) { +- if m == nil { +- return 0 ++func (m *BlobTx) Marshal() (dAtA []byte, err error) { ++ size := m.Size() ++ dAtA = make([]byte, size) ++ n, err := m.MarshalToSizedBuffer(dAtA[:size]) ++ if err != nil { ++ return nil, err + } ++ return dAtA[:n], nil ++} ++ ++func (m *BlobTx) MarshalTo(dAtA []byte) (int, error) { ++ size := m.Size() ++ return m.MarshalToSizedBuffer(dAtA[:size]) ++} ++ ++func (m *BlobTx) MarshalToSizedBuffer(dAtA []byte) (int, error) { ++ i := len(dAtA) ++ _ = i + var l int + _ = l +- if m.Header != nil { +- l = m.Header.Size() +- n += 1 + l + sovTypes(uint64(l)) ++ if len(m.TypeId) > 0 { ++ i -= len(m.TypeId) ++ copy(dAtA[i:], m.TypeId) ++ i = encodeVarintTypes(dAtA, i, uint64(len(m.TypeId))) ++ i-- ++ dAtA[i] = 0x1a + } +- if m.Commit != nil { +- l = m.Commit.Size() +- n += 1 + l + sovTypes(uint64(l)) ++ if len(m.Blobs) > 0 { ++ for iNdEx := len(m.Blobs) - 1; iNdEx >= 0; iNdEx-- { ++ { ++ size, err := m.Blobs[iNdEx].MarshalToSizedBuffer(dAtA[:i]) ++ if err != nil { ++ return 0, err ++ } ++ i -= size ++ i = encodeVarintTypes(dAtA, i, uint64(size)) ++ } ++ i-- ++ dAtA[i] = 0x12 ++ } + } +- return n ++ if len(m.Tx) > 0 { ++ i -= len(m.Tx) ++ copy(dAtA[i:], m.Tx) ++ i = encodeVarintTypes(dAtA, i, uint64(len(m.Tx))) ++ i-- ++ dAtA[i] = 0xa ++ } ++ return len(dAtA) - i, nil + } + +-func (m *LightBlock) Size() (n int) { +- if m == nil { +- return 0 ++func (m *ShareProof) Marshal() (dAtA []byte, err error) { ++ size := m.Size() ++ dAtA = make([]byte, size) ++ n, err := m.MarshalToSizedBuffer(dAtA[:size]) ++ if err != nil { ++ return nil, err + } ++ return dAtA[:n], nil ++} ++ ++func (m *ShareProof) MarshalTo(dAtA []byte) (int, error) { ++ size := m.Size() ++ return m.MarshalToSizedBuffer(dAtA[:size]) ++} ++ ++func (m *ShareProof) MarshalToSizedBuffer(dAtA []byte) (int, error) { ++ i := len(dAtA) ++ _ = i + var l int + _ = l +- if m.SignedHeader != nil { +- l = m.SignedHeader.Size() +- n += 1 + l + sovTypes(uint64(l)) ++ if m.NamespaceVersion != 0 { ++ i = encodeVarintTypes(dAtA, i, uint64(m.NamespaceVersion)) ++ i-- ++ dAtA[i] = 0x28 + } +- if m.ValidatorSet != nil { +- l = m.ValidatorSet.Size() +- n += 1 + l + sovTypes(uint64(l)) ++ if m.RowProof != nil { ++ { ++ size, err := m.RowProof.MarshalToSizedBuffer(dAtA[:i]) ++ if err != nil { ++ return 0, err ++ } ++ i -= size ++ i = encodeVarintTypes(dAtA, i, uint64(size)) ++ } ++ i-- ++ dAtA[i] = 0x22 + } +- return n ++ if len(m.NamespaceId) > 0 { ++ i -= len(m.NamespaceId) ++ copy(dAtA[i:], m.NamespaceId) ++ i = encodeVarintTypes(dAtA, i, uint64(len(m.NamespaceId))) ++ i-- ++ dAtA[i] = 0x1a ++ } ++ if len(m.ShareProofs) > 0 { ++ for iNdEx := len(m.ShareProofs) - 1; iNdEx >= 0; iNdEx-- { ++ { ++ size, err := m.ShareProofs[iNdEx].MarshalToSizedBuffer(dAtA[:i]) ++ if err != nil { ++ return 0, err ++ } ++ i -= size ++ i = encodeVarintTypes(dAtA, i, uint64(size)) ++ } ++ i-- ++ dAtA[i] = 0x12 ++ } ++ } ++ if len(m.Data) > 0 { ++ for iNdEx := len(m.Data) - 1; iNdEx >= 0; iNdEx-- { ++ i -= len(m.Data[iNdEx]) ++ copy(dAtA[i:], m.Data[iNdEx]) ++ i = encodeVarintTypes(dAtA, i, uint64(len(m.Data[iNdEx]))) ++ i-- ++ dAtA[i] = 0xa ++ } ++ } ++ return len(dAtA) - i, nil + } + +-func (m *BlockMeta) Size() (n int) { +- if m == nil { +- return 0 ++func (m *RowProof) Marshal() (dAtA []byte, err error) { ++ size := m.Size() ++ dAtA = make([]byte, size) ++ n, err := m.MarshalToSizedBuffer(dAtA[:size]) ++ if err != nil { ++ return nil, err ++ } ++ return dAtA[:n], nil ++} ++ ++func (m *RowProof) MarshalTo(dAtA []byte) (int, error) { ++ size := m.Size() ++ return m.MarshalToSizedBuffer(dAtA[:size]) ++} ++ ++func (m *RowProof) MarshalToSizedBuffer(dAtA []byte) (int, error) { ++ i := len(dAtA) ++ _ = i ++ var l int ++ _ = l ++ if m.EndRow != 0 { ++ i = encodeVarintTypes(dAtA, i, uint64(m.EndRow)) ++ i-- ++ dAtA[i] = 0x28 ++ } ++ if m.StartRow != 0 { ++ i = encodeVarintTypes(dAtA, i, uint64(m.StartRow)) ++ i-- ++ dAtA[i] = 0x20 ++ } ++ if len(m.Root) > 0 { ++ i -= len(m.Root) ++ copy(dAtA[i:], m.Root) ++ i = encodeVarintTypes(dAtA, i, uint64(len(m.Root))) ++ i-- ++ dAtA[i] = 0x1a ++ } ++ if len(m.Proofs) > 0 { ++ for iNdEx := len(m.Proofs) - 1; iNdEx >= 0; iNdEx-- { ++ { ++ size, err := m.Proofs[iNdEx].MarshalToSizedBuffer(dAtA[:i]) ++ if err != nil { ++ return 0, err ++ } ++ i -= size ++ i = encodeVarintTypes(dAtA, i, uint64(size)) ++ } ++ i-- ++ dAtA[i] = 0x12 ++ } ++ } ++ if len(m.RowRoots) > 0 { ++ for iNdEx := len(m.RowRoots) - 1; iNdEx >= 0; iNdEx-- { ++ i -= len(m.RowRoots[iNdEx]) ++ copy(dAtA[i:], m.RowRoots[iNdEx]) ++ i = encodeVarintTypes(dAtA, i, uint64(len(m.RowRoots[iNdEx]))) ++ i-- ++ dAtA[i] = 0xa ++ } ++ } ++ return len(dAtA) - i, nil ++} ++ ++func (m *NMTProof) Marshal() (dAtA []byte, err error) { ++ size := m.Size() ++ dAtA = make([]byte, size) ++ n, err := m.MarshalToSizedBuffer(dAtA[:size]) ++ if err != nil { ++ return nil, err + } ++ return dAtA[:n], nil ++} ++ ++func (m *NMTProof) MarshalTo(dAtA []byte) (int, error) { ++ size := m.Size() ++ return m.MarshalToSizedBuffer(dAtA[:size]) ++} ++ ++func (m *NMTProof) MarshalToSizedBuffer(dAtA []byte) (int, error) { ++ i := len(dAtA) ++ _ = i + var l int + _ = l +- l = m.BlockID.Size() +- n += 1 + l + sovTypes(uint64(l)) +- if m.BlockSize != 0 { +- n += 1 + sovTypes(uint64(m.BlockSize)) ++ if len(m.LeafHash) > 0 { ++ i -= len(m.LeafHash) ++ copy(dAtA[i:], m.LeafHash) ++ i = encodeVarintTypes(dAtA, i, uint64(len(m.LeafHash))) ++ i-- ++ dAtA[i] = 0x22 + } +- l = m.Header.Size() +- n += 1 + l + sovTypes(uint64(l)) +- if m.NumTxs != 0 { +- n += 1 + sovTypes(uint64(m.NumTxs)) ++ if len(m.Nodes) > 0 { ++ for iNdEx := len(m.Nodes) - 1; iNdEx >= 0; iNdEx-- { ++ i -= len(m.Nodes[iNdEx]) ++ copy(dAtA[i:], m.Nodes[iNdEx]) ++ i = encodeVarintTypes(dAtA, i, uint64(len(m.Nodes[iNdEx]))) ++ i-- ++ dAtA[i] = 0x1a ++ } ++ } ++ if m.End != 0 { ++ i = encodeVarintTypes(dAtA, i, uint64(m.End)) ++ i-- ++ dAtA[i] = 0x10 ++ } ++ if m.Start != 0 { ++ i = encodeVarintTypes(dAtA, i, uint64(m.Start)) ++ i-- ++ dAtA[i] = 0x8 ++ } ++ return len(dAtA) - i, nil ++} ++ ++func encodeVarintTypes(dAtA []byte, offset int, v uint64) int { ++ offset -= sovTypes(v) ++ base := offset ++ for v >= 1<<7 { ++ dAtA[offset] = uint8(v&0x7f | 0x80) ++ v >>= 7 ++ offset++ ++ } ++ dAtA[offset] = uint8(v) ++ return base ++} ++func (m *PartSetHeader) Size() (n int) { ++ if m == nil { ++ return 0 ++ } ++ var l int ++ _ = l ++ if m.Total != 0 { ++ n += 1 + sovTypes(uint64(m.Total)) ++ } ++ l = len(m.Hash) ++ if l > 0 { ++ n += 1 + l + sovTypes(uint64(l)) ++ } ++ return n ++} ++ ++func (m *Part) Size() (n int) { ++ if m == nil { ++ return 0 ++ } ++ var l int ++ _ = l ++ if m.Index != 0 { ++ n += 1 + sovTypes(uint64(m.Index)) ++ } ++ l = len(m.Bytes) ++ if l > 0 { ++ n += 1 + l + sovTypes(uint64(l)) ++ } ++ l = m.Proof.Size() ++ n += 1 + l + sovTypes(uint64(l)) ++ return n ++} ++ ++func (m *BlockID) Size() (n int) { ++ if m == nil { ++ return 0 ++ } ++ var l int ++ _ = l ++ l = len(m.Hash) ++ if l > 0 { ++ n += 1 + l + sovTypes(uint64(l)) ++ } ++ l = m.PartSetHeader.Size() ++ n += 1 + l + sovTypes(uint64(l)) ++ return n ++} ++ ++func (m *Header) Size() (n int) { ++ if m == nil { ++ return 0 ++ } ++ var l int ++ _ = l ++ l = m.Version.Size() ++ n += 1 + l + sovTypes(uint64(l)) ++ l = len(m.ChainID) ++ if l > 0 { ++ n += 1 + l + sovTypes(uint64(l)) ++ } ++ if m.Height != 0 { ++ n += 1 + sovTypes(uint64(m.Height)) ++ } ++ l = github_com_gogo_protobuf_types.SizeOfStdTime(m.Time) ++ n += 1 + l + sovTypes(uint64(l)) ++ l = m.LastBlockId.Size() ++ n += 1 + l + sovTypes(uint64(l)) ++ l = len(m.LastCommitHash) ++ if l > 0 { ++ n += 1 + l + sovTypes(uint64(l)) ++ } ++ l = len(m.DataHash) ++ if l > 0 { ++ n += 1 + l + sovTypes(uint64(l)) ++ } ++ l = len(m.ValidatorsHash) ++ if l > 0 { ++ n += 1 + l + sovTypes(uint64(l)) ++ } ++ l = len(m.NextValidatorsHash) ++ if l > 0 { ++ n += 1 + l + sovTypes(uint64(l)) ++ } ++ l = len(m.ConsensusHash) ++ if l > 0 { ++ n += 1 + l + sovTypes(uint64(l)) ++ } ++ l = len(m.AppHash) ++ if l > 0 { ++ n += 1 + l + sovTypes(uint64(l)) ++ } ++ l = len(m.LastResultsHash) ++ if l > 0 { ++ n += 1 + l + sovTypes(uint64(l)) ++ } ++ l = len(m.EvidenceHash) ++ if l > 0 { ++ n += 1 + l + sovTypes(uint64(l)) ++ } ++ l = len(m.ProposerAddress) ++ if l > 0 { ++ n += 1 + l + sovTypes(uint64(l)) ++ } ++ return n ++} ++ ++func (m *Data) Size() (n int) { ++ if m == nil { ++ return 0 ++ } ++ var l int ++ _ = l ++ if len(m.Txs) > 0 { ++ for _, b := range m.Txs { ++ l = len(b) ++ n += 1 + l + sovTypes(uint64(l)) ++ } ++ } ++ if m.SquareSize != 0 { ++ n += 1 + sovTypes(uint64(m.SquareSize)) ++ } ++ l = len(m.Hash) ++ if l > 0 { ++ n += 1 + l + sovTypes(uint64(l)) ++ } ++ return n ++} ++ ++func (m *Blob) Size() (n int) { ++ if m == nil { ++ return 0 ++ } ++ var l int ++ _ = l ++ l = len(m.NamespaceId) ++ if l > 0 { ++ n += 1 + l + sovTypes(uint64(l)) ++ } ++ l = len(m.Data) ++ if l > 0 { ++ n += 1 + l + sovTypes(uint64(l)) ++ } ++ if m.ShareVersion != 0 { ++ n += 1 + sovTypes(uint64(m.ShareVersion)) ++ } ++ if m.NamespaceVersion != 0 { ++ n += 1 + sovTypes(uint64(m.NamespaceVersion)) ++ } ++ return n ++} ++ ++func (m *Vote) Size() (n int) { ++ if m == nil { ++ return 0 ++ } ++ var l int ++ _ = l ++ if m.Type != 0 { ++ n += 1 + sovTypes(uint64(m.Type)) ++ } ++ if m.Height != 0 { ++ n += 1 + sovTypes(uint64(m.Height)) ++ } ++ if m.Round != 0 { ++ n += 1 + sovTypes(uint64(m.Round)) ++ } ++ l = m.BlockID.Size() ++ n += 1 + l + sovTypes(uint64(l)) ++ l = github_com_gogo_protobuf_types.SizeOfStdTime(m.Timestamp) ++ n += 1 + l + sovTypes(uint64(l)) ++ l = len(m.ValidatorAddress) ++ if l > 0 { ++ n += 1 + l + sovTypes(uint64(l)) ++ } ++ if m.ValidatorIndex != 0 { ++ n += 1 + sovTypes(uint64(m.ValidatorIndex)) ++ } ++ l = len(m.Signature) ++ if l > 0 { ++ n += 1 + l + sovTypes(uint64(l)) ++ } ++ return n ++} ++ ++func (m *Commit) Size() (n int) { ++ if m == nil { ++ return 0 ++ } ++ var l int ++ _ = l ++ if m.Height != 0 { ++ n += 1 + sovTypes(uint64(m.Height)) ++ } ++ if m.Round != 0 { ++ n += 1 + sovTypes(uint64(m.Round)) ++ } ++ l = m.BlockID.Size() ++ n += 1 + l + sovTypes(uint64(l)) ++ if len(m.Signatures) > 0 { ++ for _, e := range m.Signatures { ++ l = e.Size() ++ n += 1 + l + sovTypes(uint64(l)) ++ } ++ } ++ return n ++} ++ ++func (m *CommitSig) Size() (n int) { ++ if m == nil { ++ return 0 ++ } ++ var l int ++ _ = l ++ if m.BlockIdFlag != 0 { ++ n += 1 + sovTypes(uint64(m.BlockIdFlag)) ++ } ++ l = len(m.ValidatorAddress) ++ if l > 0 { ++ n += 1 + l + sovTypes(uint64(l)) ++ } ++ l = github_com_gogo_protobuf_types.SizeOfStdTime(m.Timestamp) ++ n += 1 + l + sovTypes(uint64(l)) ++ l = len(m.Signature) ++ if l > 0 { ++ n += 1 + l + sovTypes(uint64(l)) ++ } ++ return n ++} ++ ++func (m *Proposal) Size() (n int) { ++ if m == nil { ++ return 0 ++ } ++ var l int ++ _ = l ++ if m.Type != 0 { ++ n += 1 + sovTypes(uint64(m.Type)) ++ } ++ if m.Height != 0 { ++ n += 1 + sovTypes(uint64(m.Height)) ++ } ++ if m.Round != 0 { ++ n += 1 + sovTypes(uint64(m.Round)) ++ } ++ if m.PolRound != 0 { ++ n += 1 + sovTypes(uint64(m.PolRound)) ++ } ++ l = m.BlockID.Size() ++ n += 1 + l + sovTypes(uint64(l)) ++ l = github_com_gogo_protobuf_types.SizeOfStdTime(m.Timestamp) ++ n += 1 + l + sovTypes(uint64(l)) ++ l = len(m.Signature) ++ if l > 0 { ++ n += 1 + l + sovTypes(uint64(l)) ++ } ++ return n ++} ++ ++func (m *SignedHeader) Size() (n int) { ++ if m == nil { ++ return 0 ++ } ++ var l int ++ _ = l ++ if m.Header != nil { ++ l = m.Header.Size() ++ n += 1 + l + sovTypes(uint64(l)) ++ } ++ if m.Commit != nil { ++ l = m.Commit.Size() ++ n += 1 + l + sovTypes(uint64(l)) ++ } ++ return n ++} ++ ++func (m *LightBlock) Size() (n int) { ++ if m == nil { ++ return 0 ++ } ++ var l int ++ _ = l ++ if m.SignedHeader != nil { ++ l = m.SignedHeader.Size() ++ n += 1 + l + sovTypes(uint64(l)) ++ } ++ if m.ValidatorSet != nil { ++ l = m.ValidatorSet.Size() ++ n += 1 + l + sovTypes(uint64(l)) ++ } ++ return n ++} ++ ++func (m *BlockMeta) Size() (n int) { ++ if m == nil { ++ return 0 ++ } ++ var l int ++ _ = l ++ l = m.BlockID.Size() ++ n += 1 + l + sovTypes(uint64(l)) ++ if m.BlockSize != 0 { ++ n += 1 + sovTypes(uint64(m.BlockSize)) ++ } ++ l = m.Header.Size() ++ n += 1 + l + sovTypes(uint64(l)) ++ if m.NumTxs != 0 { ++ n += 1 + sovTypes(uint64(m.NumTxs)) ++ } ++ return n ++} ++ ++func (m *TxProof) Size() (n int) { ++ if m == nil { ++ return 0 ++ } ++ var l int ++ _ = l ++ l = len(m.RootHash) ++ if l > 0 { ++ n += 1 + l + sovTypes(uint64(l)) ++ } ++ l = len(m.Data) ++ if l > 0 { ++ n += 1 + l + sovTypes(uint64(l)) ++ } ++ if m.Proof != nil { ++ l = m.Proof.Size() ++ n += 1 + l + sovTypes(uint64(l)) ++ } ++ return n ++} ++ ++func (m *IndexWrapper) Size() (n int) { ++ if m == nil { ++ return 0 ++ } ++ var l int ++ _ = l ++ l = len(m.Tx) ++ if l > 0 { ++ n += 1 + l + sovTypes(uint64(l)) ++ } ++ if len(m.ShareIndexes) > 0 { ++ l = 0 ++ for _, e := range m.ShareIndexes { ++ l += sovTypes(uint64(e)) ++ } ++ n += 1 + sovTypes(uint64(l)) + l ++ } ++ l = len(m.TypeId) ++ if l > 0 { ++ n += 1 + l + sovTypes(uint64(l)) ++ } ++ return n ++} ++ ++func (m *BlobTx) Size() (n int) { ++ if m == nil { ++ return 0 ++ } ++ var l int ++ _ = l ++ l = len(m.Tx) ++ if l > 0 { ++ n += 1 + l + sovTypes(uint64(l)) ++ } ++ if len(m.Blobs) > 0 { ++ for _, e := range m.Blobs { ++ l = e.Size() ++ n += 1 + l + sovTypes(uint64(l)) ++ } ++ } ++ l = len(m.TypeId) ++ if l > 0 { ++ n += 1 + l + sovTypes(uint64(l)) ++ } ++ return n ++} ++ ++func (m *ShareProof) Size() (n int) { ++ if m == nil { ++ return 0 ++ } ++ var l int ++ _ = l ++ if len(m.Data) > 0 { ++ for _, b := range m.Data { ++ l = len(b) ++ n += 1 + l + sovTypes(uint64(l)) ++ } ++ } ++ if len(m.ShareProofs) > 0 { ++ for _, e := range m.ShareProofs { ++ l = e.Size() ++ n += 1 + l + sovTypes(uint64(l)) ++ } ++ } ++ l = len(m.NamespaceId) ++ if l > 0 { ++ n += 1 + l + sovTypes(uint64(l)) ++ } ++ if m.RowProof != nil { ++ l = m.RowProof.Size() ++ n += 1 + l + sovTypes(uint64(l)) ++ } ++ if m.NamespaceVersion != 0 { ++ n += 1 + sovTypes(uint64(m.NamespaceVersion)) ++ } ++ return n ++} ++ ++func (m *RowProof) Size() (n int) { ++ if m == nil { ++ return 0 ++ } ++ var l int ++ _ = l ++ if len(m.RowRoots) > 0 { ++ for _, b := range m.RowRoots { ++ l = len(b) ++ n += 1 + l + sovTypes(uint64(l)) ++ } ++ } ++ if len(m.Proofs) > 0 { ++ for _, e := range m.Proofs { ++ l = e.Size() ++ n += 1 + l + sovTypes(uint64(l)) ++ } ++ } ++ l = len(m.Root) ++ if l > 0 { ++ n += 1 + l + sovTypes(uint64(l)) ++ } ++ if m.StartRow != 0 { ++ n += 1 + sovTypes(uint64(m.StartRow)) ++ } ++ if m.EndRow != 0 { ++ n += 1 + sovTypes(uint64(m.EndRow)) ++ } ++ return n ++} ++ ++func (m *NMTProof) Size() (n int) { ++ if m == nil { ++ return 0 ++ } ++ var l int ++ _ = l ++ if m.Start != 0 { ++ n += 1 + sovTypes(uint64(m.Start)) ++ } ++ if m.End != 0 { ++ n += 1 + sovTypes(uint64(m.End)) ++ } ++ if len(m.Nodes) > 0 { ++ for _, b := range m.Nodes { ++ l = len(b) ++ n += 1 + l + sovTypes(uint64(l)) ++ } ++ } ++ l = len(m.LeafHash) ++ if l > 0 { ++ n += 1 + l + sovTypes(uint64(l)) ++ } ++ return n ++} ++ ++func sovTypes(x uint64) (n int) { ++ return (math_bits.Len64(x|1) + 6) / 7 ++} ++func sozTypes(x uint64) (n int) { ++ return sovTypes(uint64((x << 1) ^ uint64((int64(x) >> 63)))) ++} ++func (m *PartSetHeader) Unmarshal(dAtA []byte) error { ++ l := len(dAtA) ++ iNdEx := 0 ++ for iNdEx < l { ++ preIndex := iNdEx ++ var wire uint64 ++ for shift := uint(0); ; shift += 7 { ++ if shift >= 64 { ++ return ErrIntOverflowTypes ++ } ++ if iNdEx >= l { ++ return io.ErrUnexpectedEOF ++ } ++ b := dAtA[iNdEx] ++ iNdEx++ ++ wire |= uint64(b&0x7F) << shift ++ if b < 0x80 { ++ break ++ } ++ } ++ fieldNum := int32(wire >> 3) ++ wireType := int(wire & 0x7) ++ if wireType == 4 { ++ return fmt.Errorf("proto: PartSetHeader: wiretype end group for non-group") ++ } ++ if fieldNum <= 0 { ++ return fmt.Errorf("proto: PartSetHeader: illegal tag %d (wire type %d)", fieldNum, wire) ++ } ++ switch fieldNum { ++ case 1: ++ if wireType != 0 { ++ return fmt.Errorf("proto: wrong wireType = %d for field Total", wireType) ++ } ++ m.Total = 0 ++ for shift := uint(0); ; shift += 7 { ++ if shift >= 64 { ++ return ErrIntOverflowTypes ++ } ++ if iNdEx >= l { ++ return io.ErrUnexpectedEOF ++ } ++ b := dAtA[iNdEx] ++ iNdEx++ ++ m.Total |= uint32(b&0x7F) << shift ++ if b < 0x80 { ++ break ++ } ++ } ++ case 2: ++ if wireType != 2 { ++ return fmt.Errorf("proto: wrong wireType = %d for field Hash", wireType) ++ } ++ var byteLen int ++ for shift := uint(0); ; shift += 7 { ++ if shift >= 64 { ++ return ErrIntOverflowTypes ++ } ++ if iNdEx >= l { ++ return io.ErrUnexpectedEOF ++ } ++ b := dAtA[iNdEx] ++ iNdEx++ ++ byteLen |= int(b&0x7F) << shift ++ if b < 0x80 { ++ break ++ } ++ } ++ if byteLen < 0 { ++ return ErrInvalidLengthTypes ++ } ++ postIndex := iNdEx + byteLen ++ if postIndex < 0 { ++ return ErrInvalidLengthTypes ++ } ++ if postIndex > l { ++ return io.ErrUnexpectedEOF ++ } ++ m.Hash = append(m.Hash[:0], dAtA[iNdEx:postIndex]...) ++ if m.Hash == nil { ++ m.Hash = []byte{} ++ } ++ iNdEx = postIndex ++ default: ++ iNdEx = preIndex ++ skippy, err := skipTypes(dAtA[iNdEx:]) ++ if err != nil { ++ return err ++ } ++ if (skippy < 0) || (iNdEx+skippy) < 0 { ++ return ErrInvalidLengthTypes ++ } ++ if (iNdEx + skippy) > l { ++ return io.ErrUnexpectedEOF ++ } ++ iNdEx += skippy ++ } ++ } ++ ++ if iNdEx > l { ++ return io.ErrUnexpectedEOF ++ } ++ return nil ++} ++func (m *Part) Unmarshal(dAtA []byte) error { ++ l := len(dAtA) ++ iNdEx := 0 ++ for iNdEx < l { ++ preIndex := iNdEx ++ var wire uint64 ++ for shift := uint(0); ; shift += 7 { ++ if shift >= 64 { ++ return ErrIntOverflowTypes ++ } ++ if iNdEx >= l { ++ return io.ErrUnexpectedEOF ++ } ++ b := dAtA[iNdEx] ++ iNdEx++ ++ wire |= uint64(b&0x7F) << shift ++ if b < 0x80 { ++ break ++ } ++ } ++ fieldNum := int32(wire >> 3) ++ wireType := int(wire & 0x7) ++ if wireType == 4 { ++ return fmt.Errorf("proto: Part: wiretype end group for non-group") ++ } ++ if fieldNum <= 0 { ++ return fmt.Errorf("proto: Part: illegal tag %d (wire type %d)", fieldNum, wire) ++ } ++ switch fieldNum { ++ case 1: ++ if wireType != 0 { ++ return fmt.Errorf("proto: wrong wireType = %d for field Index", wireType) ++ } ++ m.Index = 0 ++ for shift := uint(0); ; shift += 7 { ++ if shift >= 64 { ++ return ErrIntOverflowTypes ++ } ++ if iNdEx >= l { ++ return io.ErrUnexpectedEOF ++ } ++ b := dAtA[iNdEx] ++ iNdEx++ ++ m.Index |= uint32(b&0x7F) << shift ++ if b < 0x80 { ++ break ++ } ++ } ++ case 2: ++ if wireType != 2 { ++ return fmt.Errorf("proto: wrong wireType = %d for field Bytes", wireType) ++ } ++ var byteLen int ++ for shift := uint(0); ; shift += 7 { ++ if shift >= 64 { ++ return ErrIntOverflowTypes ++ } ++ if iNdEx >= l { ++ return io.ErrUnexpectedEOF ++ } ++ b := dAtA[iNdEx] ++ iNdEx++ ++ byteLen |= int(b&0x7F) << shift ++ if b < 0x80 { ++ break ++ } ++ } ++ if byteLen < 0 { ++ return ErrInvalidLengthTypes ++ } ++ postIndex := iNdEx + byteLen ++ if postIndex < 0 { ++ return ErrInvalidLengthTypes ++ } ++ if postIndex > l { ++ return io.ErrUnexpectedEOF ++ } ++ m.Bytes = append(m.Bytes[:0], dAtA[iNdEx:postIndex]...) ++ if m.Bytes == nil { ++ m.Bytes = []byte{} ++ } ++ iNdEx = postIndex ++ case 3: ++ if wireType != 2 { ++ return fmt.Errorf("proto: wrong wireType = %d for field Proof", wireType) ++ } ++ var msglen int ++ for shift := uint(0); ; shift += 7 { ++ if shift >= 64 { ++ return ErrIntOverflowTypes ++ } ++ if iNdEx >= l { ++ return io.ErrUnexpectedEOF ++ } ++ b := dAtA[iNdEx] ++ iNdEx++ ++ msglen |= int(b&0x7F) << shift ++ if b < 0x80 { ++ break ++ } ++ } ++ if msglen < 0 { ++ return ErrInvalidLengthTypes ++ } ++ postIndex := iNdEx + msglen ++ if postIndex < 0 { ++ return ErrInvalidLengthTypes ++ } ++ if postIndex > l { ++ return io.ErrUnexpectedEOF ++ } ++ if err := m.Proof.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { ++ return err ++ } ++ iNdEx = postIndex ++ default: ++ iNdEx = preIndex ++ skippy, err := skipTypes(dAtA[iNdEx:]) ++ if err != nil { ++ return err ++ } ++ if (skippy < 0) || (iNdEx+skippy) < 0 { ++ return ErrInvalidLengthTypes ++ } ++ if (iNdEx + skippy) > l { ++ return io.ErrUnexpectedEOF ++ } ++ iNdEx += skippy ++ } ++ } ++ ++ if iNdEx > l { ++ return io.ErrUnexpectedEOF ++ } ++ return nil ++} ++func (m *BlockID) Unmarshal(dAtA []byte) error { ++ l := len(dAtA) ++ iNdEx := 0 ++ for iNdEx < l { ++ preIndex := iNdEx ++ var wire uint64 ++ for shift := uint(0); ; shift += 7 { ++ if shift >= 64 { ++ return ErrIntOverflowTypes ++ } ++ if iNdEx >= l { ++ return io.ErrUnexpectedEOF ++ } ++ b := dAtA[iNdEx] ++ iNdEx++ ++ wire |= uint64(b&0x7F) << shift ++ if b < 0x80 { ++ break ++ } ++ } ++ fieldNum := int32(wire >> 3) ++ wireType := int(wire & 0x7) ++ if wireType == 4 { ++ return fmt.Errorf("proto: BlockID: wiretype end group for non-group") ++ } ++ if fieldNum <= 0 { ++ return fmt.Errorf("proto: BlockID: illegal tag %d (wire type %d)", fieldNum, wire) ++ } ++ switch fieldNum { ++ case 1: ++ if wireType != 2 { ++ return fmt.Errorf("proto: wrong wireType = %d for field Hash", wireType) ++ } ++ var byteLen int ++ for shift := uint(0); ; shift += 7 { ++ if shift >= 64 { ++ return ErrIntOverflowTypes ++ } ++ if iNdEx >= l { ++ return io.ErrUnexpectedEOF ++ } ++ b := dAtA[iNdEx] ++ iNdEx++ ++ byteLen |= int(b&0x7F) << shift ++ if b < 0x80 { ++ break ++ } ++ } ++ if byteLen < 0 { ++ return ErrInvalidLengthTypes ++ } ++ postIndex := iNdEx + byteLen ++ if postIndex < 0 { ++ return ErrInvalidLengthTypes ++ } ++ if postIndex > l { ++ return io.ErrUnexpectedEOF ++ } ++ m.Hash = append(m.Hash[:0], dAtA[iNdEx:postIndex]...) ++ if m.Hash == nil { ++ m.Hash = []byte{} ++ } ++ iNdEx = postIndex ++ case 2: ++ if wireType != 2 { ++ return fmt.Errorf("proto: wrong wireType = %d for field PartSetHeader", wireType) ++ } ++ var msglen int ++ for shift := uint(0); ; shift += 7 { ++ if shift >= 64 { ++ return ErrIntOverflowTypes ++ } ++ if iNdEx >= l { ++ return io.ErrUnexpectedEOF ++ } ++ b := dAtA[iNdEx] ++ iNdEx++ ++ msglen |= int(b&0x7F) << shift ++ if b < 0x80 { ++ break ++ } ++ } ++ if msglen < 0 { ++ return ErrInvalidLengthTypes ++ } ++ postIndex := iNdEx + msglen ++ if postIndex < 0 { ++ return ErrInvalidLengthTypes ++ } ++ if postIndex > l { ++ return io.ErrUnexpectedEOF ++ } ++ if err := m.PartSetHeader.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { ++ return err ++ } ++ iNdEx = postIndex ++ default: ++ iNdEx = preIndex ++ skippy, err := skipTypes(dAtA[iNdEx:]) ++ if err != nil { ++ return err ++ } ++ if (skippy < 0) || (iNdEx+skippy) < 0 { ++ return ErrInvalidLengthTypes ++ } ++ if (iNdEx + skippy) > l { ++ return io.ErrUnexpectedEOF ++ } ++ iNdEx += skippy ++ } ++ } ++ ++ if iNdEx > l { ++ return io.ErrUnexpectedEOF ++ } ++ return nil ++} ++func (m *Header) Unmarshal(dAtA []byte) error { ++ l := len(dAtA) ++ iNdEx := 0 ++ for iNdEx < l { ++ preIndex := iNdEx ++ var wire uint64 ++ for shift := uint(0); ; shift += 7 { ++ if shift >= 64 { ++ return ErrIntOverflowTypes ++ } ++ if iNdEx >= l { ++ return io.ErrUnexpectedEOF ++ } ++ b := dAtA[iNdEx] ++ iNdEx++ ++ wire |= uint64(b&0x7F) << shift ++ if b < 0x80 { ++ break ++ } ++ } ++ fieldNum := int32(wire >> 3) ++ wireType := int(wire & 0x7) ++ if wireType == 4 { ++ return fmt.Errorf("proto: Header: wiretype end group for non-group") ++ } ++ if fieldNum <= 0 { ++ return fmt.Errorf("proto: Header: illegal tag %d (wire type %d)", fieldNum, wire) ++ } ++ switch fieldNum { ++ case 1: ++ if wireType != 2 { ++ return fmt.Errorf("proto: wrong wireType = %d for field Version", wireType) ++ } ++ var msglen int ++ for shift := uint(0); ; shift += 7 { ++ if shift >= 64 { ++ return ErrIntOverflowTypes ++ } ++ if iNdEx >= l { ++ return io.ErrUnexpectedEOF ++ } ++ b := dAtA[iNdEx] ++ iNdEx++ ++ msglen |= int(b&0x7F) << shift ++ if b < 0x80 { ++ break ++ } ++ } ++ if msglen < 0 { ++ return ErrInvalidLengthTypes ++ } ++ postIndex := iNdEx + msglen ++ if postIndex < 0 { ++ return ErrInvalidLengthTypes ++ } ++ if postIndex > l { ++ return io.ErrUnexpectedEOF ++ } ++ if err := m.Version.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { ++ return err ++ } ++ iNdEx = postIndex ++ case 2: ++ if wireType != 2 { ++ return fmt.Errorf("proto: wrong wireType = %d for field ChainID", wireType) ++ } ++ var stringLen uint64 ++ for shift := uint(0); ; shift += 7 { ++ if shift >= 64 { ++ return ErrIntOverflowTypes ++ } ++ if iNdEx >= l { ++ return io.ErrUnexpectedEOF ++ } ++ b := dAtA[iNdEx] ++ iNdEx++ ++ stringLen |= uint64(b&0x7F) << shift ++ if b < 0x80 { ++ break ++ } ++ } ++ intStringLen := int(stringLen) ++ if intStringLen < 0 { ++ return ErrInvalidLengthTypes ++ } ++ postIndex := iNdEx + intStringLen ++ if postIndex < 0 { ++ return ErrInvalidLengthTypes ++ } ++ if postIndex > l { ++ return io.ErrUnexpectedEOF ++ } ++ m.ChainID = string(dAtA[iNdEx:postIndex]) ++ iNdEx = postIndex ++ case 3: ++ if wireType != 0 { ++ return fmt.Errorf("proto: wrong wireType = %d for field Height", wireType) ++ } ++ m.Height = 0 ++ for shift := uint(0); ; shift += 7 { ++ if shift >= 64 { ++ return ErrIntOverflowTypes ++ } ++ if iNdEx >= l { ++ return io.ErrUnexpectedEOF ++ } ++ b := dAtA[iNdEx] ++ iNdEx++ ++ m.Height |= int64(b&0x7F) << shift ++ if b < 0x80 { ++ break ++ } ++ } ++ case 4: ++ if wireType != 2 { ++ return fmt.Errorf("proto: wrong wireType = %d for field Time", wireType) ++ } ++ var msglen int ++ for shift := uint(0); ; shift += 7 { ++ if shift >= 64 { ++ return ErrIntOverflowTypes ++ } ++ if iNdEx >= l { ++ return io.ErrUnexpectedEOF ++ } ++ b := dAtA[iNdEx] ++ iNdEx++ ++ msglen |= int(b&0x7F) << shift ++ if b < 0x80 { ++ break ++ } ++ } ++ if msglen < 0 { ++ return ErrInvalidLengthTypes ++ } ++ postIndex := iNdEx + msglen ++ if postIndex < 0 { ++ return ErrInvalidLengthTypes ++ } ++ if postIndex > l { ++ return io.ErrUnexpectedEOF ++ } ++ if err := github_com_gogo_protobuf_types.StdTimeUnmarshal(&m.Time, dAtA[iNdEx:postIndex]); err != nil { ++ return err ++ } ++ iNdEx = postIndex ++ case 5: ++ if wireType != 2 { ++ return fmt.Errorf("proto: wrong wireType = %d for field LastBlockId", wireType) ++ } ++ var msglen int ++ for shift := uint(0); ; shift += 7 { ++ if shift >= 64 { ++ return ErrIntOverflowTypes ++ } ++ if iNdEx >= l { ++ return io.ErrUnexpectedEOF ++ } ++ b := dAtA[iNdEx] ++ iNdEx++ ++ msglen |= int(b&0x7F) << shift ++ if b < 0x80 { ++ break ++ } ++ } ++ if msglen < 0 { ++ return ErrInvalidLengthTypes ++ } ++ postIndex := iNdEx + msglen ++ if postIndex < 0 { ++ return ErrInvalidLengthTypes ++ } ++ if postIndex > l { ++ return io.ErrUnexpectedEOF ++ } ++ if err := m.LastBlockId.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { ++ return err ++ } ++ iNdEx = postIndex ++ case 6: ++ if wireType != 2 { ++ return fmt.Errorf("proto: wrong wireType = %d for field LastCommitHash", wireType) ++ } ++ var byteLen int ++ for shift := uint(0); ; shift += 7 { ++ if shift >= 64 { ++ return ErrIntOverflowTypes ++ } ++ if iNdEx >= l { ++ return io.ErrUnexpectedEOF ++ } ++ b := dAtA[iNdEx] ++ iNdEx++ ++ byteLen |= int(b&0x7F) << shift ++ if b < 0x80 { ++ break ++ } ++ } ++ if byteLen < 0 { ++ return ErrInvalidLengthTypes ++ } ++ postIndex := iNdEx + byteLen ++ if postIndex < 0 { ++ return ErrInvalidLengthTypes ++ } ++ if postIndex > l { ++ return io.ErrUnexpectedEOF ++ } ++ m.LastCommitHash = append(m.LastCommitHash[:0], dAtA[iNdEx:postIndex]...) ++ if m.LastCommitHash == nil { ++ m.LastCommitHash = []byte{} ++ } ++ iNdEx = postIndex ++ case 7: ++ if wireType != 2 { ++ return fmt.Errorf("proto: wrong wireType = %d for field DataHash", wireType) ++ } ++ var byteLen int ++ for shift := uint(0); ; shift += 7 { ++ if shift >= 64 { ++ return ErrIntOverflowTypes ++ } ++ if iNdEx >= l { ++ return io.ErrUnexpectedEOF ++ } ++ b := dAtA[iNdEx] ++ iNdEx++ ++ byteLen |= int(b&0x7F) << shift ++ if b < 0x80 { ++ break ++ } ++ } ++ if byteLen < 0 { ++ return ErrInvalidLengthTypes ++ } ++ postIndex := iNdEx + byteLen ++ if postIndex < 0 { ++ return ErrInvalidLengthTypes ++ } ++ if postIndex > l { ++ return io.ErrUnexpectedEOF ++ } ++ m.DataHash = append(m.DataHash[:0], dAtA[iNdEx:postIndex]...) ++ if m.DataHash == nil { ++ m.DataHash = []byte{} ++ } ++ iNdEx = postIndex ++ case 8: ++ if wireType != 2 { ++ return fmt.Errorf("proto: wrong wireType = %d for field ValidatorsHash", wireType) ++ } ++ var byteLen int ++ for shift := uint(0); ; shift += 7 { ++ if shift >= 64 { ++ return ErrIntOverflowTypes ++ } ++ if iNdEx >= l { ++ return io.ErrUnexpectedEOF ++ } ++ b := dAtA[iNdEx] ++ iNdEx++ ++ byteLen |= int(b&0x7F) << shift ++ if b < 0x80 { ++ break ++ } ++ } ++ if byteLen < 0 { ++ return ErrInvalidLengthTypes ++ } ++ postIndex := iNdEx + byteLen ++ if postIndex < 0 { ++ return ErrInvalidLengthTypes ++ } ++ if postIndex > l { ++ return io.ErrUnexpectedEOF ++ } ++ m.ValidatorsHash = append(m.ValidatorsHash[:0], dAtA[iNdEx:postIndex]...) ++ if m.ValidatorsHash == nil { ++ m.ValidatorsHash = []byte{} ++ } ++ iNdEx = postIndex ++ case 9: ++ if wireType != 2 { ++ return fmt.Errorf("proto: wrong wireType = %d for field NextValidatorsHash", wireType) ++ } ++ var byteLen int ++ for shift := uint(0); ; shift += 7 { ++ if shift >= 64 { ++ return ErrIntOverflowTypes ++ } ++ if iNdEx >= l { ++ return io.ErrUnexpectedEOF ++ } ++ b := dAtA[iNdEx] ++ iNdEx++ ++ byteLen |= int(b&0x7F) << shift ++ if b < 0x80 { ++ break ++ } ++ } ++ if byteLen < 0 { ++ return ErrInvalidLengthTypes ++ } ++ postIndex := iNdEx + byteLen ++ if postIndex < 0 { ++ return ErrInvalidLengthTypes ++ } ++ if postIndex > l { ++ return io.ErrUnexpectedEOF ++ } ++ m.NextValidatorsHash = append(m.NextValidatorsHash[:0], dAtA[iNdEx:postIndex]...) ++ if m.NextValidatorsHash == nil { ++ m.NextValidatorsHash = []byte{} ++ } ++ iNdEx = postIndex ++ case 10: ++ if wireType != 2 { ++ return fmt.Errorf("proto: wrong wireType = %d for field ConsensusHash", wireType) ++ } ++ var byteLen int ++ for shift := uint(0); ; shift += 7 { ++ if shift >= 64 { ++ return ErrIntOverflowTypes ++ } ++ if iNdEx >= l { ++ return io.ErrUnexpectedEOF ++ } ++ b := dAtA[iNdEx] ++ iNdEx++ ++ byteLen |= int(b&0x7F) << shift ++ if b < 0x80 { ++ break ++ } ++ } ++ if byteLen < 0 { ++ return ErrInvalidLengthTypes ++ } ++ postIndex := iNdEx + byteLen ++ if postIndex < 0 { ++ return ErrInvalidLengthTypes ++ } ++ if postIndex > l { ++ return io.ErrUnexpectedEOF ++ } ++ m.ConsensusHash = append(m.ConsensusHash[:0], dAtA[iNdEx:postIndex]...) ++ if m.ConsensusHash == nil { ++ m.ConsensusHash = []byte{} ++ } ++ iNdEx = postIndex ++ case 11: ++ if wireType != 2 { ++ return fmt.Errorf("proto: wrong wireType = %d for field AppHash", wireType) ++ } ++ var byteLen int ++ for shift := uint(0); ; shift += 7 { ++ if shift >= 64 { ++ return ErrIntOverflowTypes ++ } ++ if iNdEx >= l { ++ return io.ErrUnexpectedEOF ++ } ++ b := dAtA[iNdEx] ++ iNdEx++ ++ byteLen |= int(b&0x7F) << shift ++ if b < 0x80 { ++ break ++ } ++ } ++ if byteLen < 0 { ++ return ErrInvalidLengthTypes ++ } ++ postIndex := iNdEx + byteLen ++ if postIndex < 0 { ++ return ErrInvalidLengthTypes ++ } ++ if postIndex > l { ++ return io.ErrUnexpectedEOF ++ } ++ m.AppHash = append(m.AppHash[:0], dAtA[iNdEx:postIndex]...) ++ if m.AppHash == nil { ++ m.AppHash = []byte{} ++ } ++ iNdEx = postIndex ++ case 12: ++ if wireType != 2 { ++ return fmt.Errorf("proto: wrong wireType = %d for field LastResultsHash", wireType) ++ } ++ var byteLen int ++ for shift := uint(0); ; shift += 7 { ++ if shift >= 64 { ++ return ErrIntOverflowTypes ++ } ++ if iNdEx >= l { ++ return io.ErrUnexpectedEOF ++ } ++ b := dAtA[iNdEx] ++ iNdEx++ ++ byteLen |= int(b&0x7F) << shift ++ if b < 0x80 { ++ break ++ } ++ } ++ if byteLen < 0 { ++ return ErrInvalidLengthTypes ++ } ++ postIndex := iNdEx + byteLen ++ if postIndex < 0 { ++ return ErrInvalidLengthTypes ++ } ++ if postIndex > l { ++ return io.ErrUnexpectedEOF ++ } ++ m.LastResultsHash = append(m.LastResultsHash[:0], dAtA[iNdEx:postIndex]...) ++ if m.LastResultsHash == nil { ++ m.LastResultsHash = []byte{} ++ } ++ iNdEx = postIndex ++ case 13: ++ if wireType != 2 { ++ return fmt.Errorf("proto: wrong wireType = %d for field EvidenceHash", wireType) ++ } ++ var byteLen int ++ for shift := uint(0); ; shift += 7 { ++ if shift >= 64 { ++ return ErrIntOverflowTypes ++ } ++ if iNdEx >= l { ++ return io.ErrUnexpectedEOF ++ } ++ b := dAtA[iNdEx] ++ iNdEx++ ++ byteLen |= int(b&0x7F) << shift ++ if b < 0x80 { ++ break ++ } ++ } ++ if byteLen < 0 { ++ return ErrInvalidLengthTypes ++ } ++ postIndex := iNdEx + byteLen ++ if postIndex < 0 { ++ return ErrInvalidLengthTypes ++ } ++ if postIndex > l { ++ return io.ErrUnexpectedEOF ++ } ++ m.EvidenceHash = append(m.EvidenceHash[:0], dAtA[iNdEx:postIndex]...) ++ if m.EvidenceHash == nil { ++ m.EvidenceHash = []byte{} ++ } ++ iNdEx = postIndex ++ case 14: ++ if wireType != 2 { ++ return fmt.Errorf("proto: wrong wireType = %d for field ProposerAddress", wireType) ++ } ++ var byteLen int ++ for shift := uint(0); ; shift += 7 { ++ if shift >= 64 { ++ return ErrIntOverflowTypes ++ } ++ if iNdEx >= l { ++ return io.ErrUnexpectedEOF ++ } ++ b := dAtA[iNdEx] ++ iNdEx++ ++ byteLen |= int(b&0x7F) << shift ++ if b < 0x80 { ++ break ++ } ++ } ++ if byteLen < 0 { ++ return ErrInvalidLengthTypes ++ } ++ postIndex := iNdEx + byteLen ++ if postIndex < 0 { ++ return ErrInvalidLengthTypes ++ } ++ if postIndex > l { ++ return io.ErrUnexpectedEOF ++ } ++ m.ProposerAddress = append(m.ProposerAddress[:0], dAtA[iNdEx:postIndex]...) ++ if m.ProposerAddress == nil { ++ m.ProposerAddress = []byte{} ++ } ++ iNdEx = postIndex ++ default: ++ iNdEx = preIndex ++ skippy, err := skipTypes(dAtA[iNdEx:]) ++ if err != nil { ++ return err ++ } ++ if (skippy < 0) || (iNdEx+skippy) < 0 { ++ return ErrInvalidLengthTypes ++ } ++ if (iNdEx + skippy) > l { ++ return io.ErrUnexpectedEOF ++ } ++ iNdEx += skippy ++ } + } +- return n +-} + +-func (m *TxProof) Size() (n int) { +- if m == nil { +- return 0 +- } +- var l int +- _ = l +- l = len(m.RootHash) +- if l > 0 { +- n += 1 + l + sovTypes(uint64(l)) +- } +- l = len(m.Data) +- if l > 0 { +- n += 1 + l + sovTypes(uint64(l)) +- } +- if m.Proof != nil { +- l = m.Proof.Size() +- n += 1 + l + sovTypes(uint64(l)) ++ if iNdEx > l { ++ return io.ErrUnexpectedEOF + } +- return n +-} +- +-func sovTypes(x uint64) (n int) { +- return (math_bits.Len64(x|1) + 6) / 7 +-} +-func sozTypes(x uint64) (n int) { +- return sovTypes(uint64((x << 1) ^ uint64((int64(x) >> 63)))) ++ return nil + } +-func (m *PartSetHeader) Unmarshal(dAtA []byte) error { ++func (m *Data) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { +@@ -2202,17 +4070,49 @@ func (m *PartSetHeader) Unmarshal(dAtA []byte) error { + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { +- return fmt.Errorf("proto: PartSetHeader: wiretype end group for non-group") ++ return fmt.Errorf("proto: Data: wiretype end group for non-group") + } + if fieldNum <= 0 { +- return fmt.Errorf("proto: PartSetHeader: illegal tag %d (wire type %d)", fieldNum, wire) ++ return fmt.Errorf("proto: Data: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: ++ if wireType != 2 { ++ return fmt.Errorf("proto: wrong wireType = %d for field Txs", wireType) ++ } ++ var byteLen int ++ for shift := uint(0); ; shift += 7 { ++ if shift >= 64 { ++ return ErrIntOverflowTypes ++ } ++ if iNdEx >= l { ++ return io.ErrUnexpectedEOF ++ } ++ b := dAtA[iNdEx] ++ iNdEx++ ++ byteLen |= int(b&0x7F) << shift ++ if b < 0x80 { ++ break ++ } ++ } ++ if byteLen < 0 { ++ return ErrInvalidLengthTypes ++ } ++ postIndex := iNdEx + byteLen ++ if postIndex < 0 { ++ return ErrInvalidLengthTypes ++ } ++ if postIndex > l { ++ return io.ErrUnexpectedEOF ++ } ++ m.Txs = append(m.Txs, make([]byte, postIndex-iNdEx)) ++ copy(m.Txs[len(m.Txs)-1], dAtA[iNdEx:postIndex]) ++ iNdEx = postIndex ++ case 5: + if wireType != 0 { +- return fmt.Errorf("proto: wrong wireType = %d for field Total", wireType) ++ return fmt.Errorf("proto: wrong wireType = %d for field SquareSize", wireType) + } +- m.Total = 0 ++ m.SquareSize = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes +@@ -2222,12 +4122,12 @@ func (m *PartSetHeader) Unmarshal(dAtA []byte) error { + } + b := dAtA[iNdEx] + iNdEx++ +- m.Total |= uint32(b&0x7F) << shift ++ m.SquareSize |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } +- case 2: ++ case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Hash", wireType) + } +@@ -2282,7 +4182,163 @@ func (m *PartSetHeader) Unmarshal(dAtA []byte) error { + } + return nil + } +-func (m *Part) Unmarshal(dAtA []byte) error { ++func (m *Blob) Unmarshal(dAtA []byte) error { ++ l := len(dAtA) ++ iNdEx := 0 ++ for iNdEx < l { ++ preIndex := iNdEx ++ var wire uint64 ++ for shift := uint(0); ; shift += 7 { ++ if shift >= 64 { ++ return ErrIntOverflowTypes ++ } ++ if iNdEx >= l { ++ return io.ErrUnexpectedEOF ++ } ++ b := dAtA[iNdEx] ++ iNdEx++ ++ wire |= uint64(b&0x7F) << shift ++ if b < 0x80 { ++ break ++ } ++ } ++ fieldNum := int32(wire >> 3) ++ wireType := int(wire & 0x7) ++ if wireType == 4 { ++ return fmt.Errorf("proto: Blob: wiretype end group for non-group") ++ } ++ if fieldNum <= 0 { ++ return fmt.Errorf("proto: Blob: illegal tag %d (wire type %d)", fieldNum, wire) ++ } ++ switch fieldNum { ++ case 1: ++ if wireType != 2 { ++ return fmt.Errorf("proto: wrong wireType = %d for field NamespaceId", wireType) ++ } ++ var byteLen int ++ for shift := uint(0); ; shift += 7 { ++ if shift >= 64 { ++ return ErrIntOverflowTypes ++ } ++ if iNdEx >= l { ++ return io.ErrUnexpectedEOF ++ } ++ b := dAtA[iNdEx] ++ iNdEx++ ++ byteLen |= int(b&0x7F) << shift ++ if b < 0x80 { ++ break ++ } ++ } ++ if byteLen < 0 { ++ return ErrInvalidLengthTypes ++ } ++ postIndex := iNdEx + byteLen ++ if postIndex < 0 { ++ return ErrInvalidLengthTypes ++ } ++ if postIndex > l { ++ return io.ErrUnexpectedEOF ++ } ++ m.NamespaceId = append(m.NamespaceId[:0], dAtA[iNdEx:postIndex]...) ++ if m.NamespaceId == nil { ++ m.NamespaceId = []byte{} ++ } ++ iNdEx = postIndex ++ case 2: ++ if wireType != 2 { ++ return fmt.Errorf("proto: wrong wireType = %d for field Data", wireType) ++ } ++ var byteLen int ++ for shift := uint(0); ; shift += 7 { ++ if shift >= 64 { ++ return ErrIntOverflowTypes ++ } ++ if iNdEx >= l { ++ return io.ErrUnexpectedEOF ++ } ++ b := dAtA[iNdEx] ++ iNdEx++ ++ byteLen |= int(b&0x7F) << shift ++ if b < 0x80 { ++ break ++ } ++ } ++ if byteLen < 0 { ++ return ErrInvalidLengthTypes ++ } ++ postIndex := iNdEx + byteLen ++ if postIndex < 0 { ++ return ErrInvalidLengthTypes ++ } ++ if postIndex > l { ++ return io.ErrUnexpectedEOF ++ } ++ m.Data = append(m.Data[:0], dAtA[iNdEx:postIndex]...) ++ if m.Data == nil { ++ m.Data = []byte{} ++ } ++ iNdEx = postIndex ++ case 3: ++ if wireType != 0 { ++ return fmt.Errorf("proto: wrong wireType = %d for field ShareVersion", wireType) ++ } ++ m.ShareVersion = 0 ++ for shift := uint(0); ; shift += 7 { ++ if shift >= 64 { ++ return ErrIntOverflowTypes ++ } ++ if iNdEx >= l { ++ return io.ErrUnexpectedEOF ++ } ++ b := dAtA[iNdEx] ++ iNdEx++ ++ m.ShareVersion |= uint32(b&0x7F) << shift ++ if b < 0x80 { ++ break ++ } ++ } ++ case 4: ++ if wireType != 0 { ++ return fmt.Errorf("proto: wrong wireType = %d for field NamespaceVersion", wireType) ++ } ++ m.NamespaceVersion = 0 ++ for shift := uint(0); ; shift += 7 { ++ if shift >= 64 { ++ return ErrIntOverflowTypes ++ } ++ if iNdEx >= l { ++ return io.ErrUnexpectedEOF ++ } ++ b := dAtA[iNdEx] ++ iNdEx++ ++ m.NamespaceVersion |= uint32(b&0x7F) << shift ++ if b < 0x80 { ++ break ++ } ++ } ++ default: ++ iNdEx = preIndex ++ skippy, err := skipTypes(dAtA[iNdEx:]) ++ if err != nil { ++ return err ++ } ++ if (skippy < 0) || (iNdEx+skippy) < 0 { ++ return ErrInvalidLengthTypes ++ } ++ if (iNdEx + skippy) > l { ++ return io.ErrUnexpectedEOF ++ } ++ iNdEx += skippy ++ } ++ } ++ ++ if iNdEx > l { ++ return io.ErrUnexpectedEOF ++ } ++ return nil ++} ++func (m *Vote) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { +@@ -2295,27 +4351,65 @@ func (m *Part) Unmarshal(dAtA []byte) error { + if iNdEx >= l { + return io.ErrUnexpectedEOF + } +- b := dAtA[iNdEx] +- iNdEx++ +- wire |= uint64(b&0x7F) << shift +- if b < 0x80 { +- break ++ b := dAtA[iNdEx] ++ iNdEx++ ++ wire |= uint64(b&0x7F) << shift ++ if b < 0x80 { ++ break ++ } ++ } ++ fieldNum := int32(wire >> 3) ++ wireType := int(wire & 0x7) ++ if wireType == 4 { ++ return fmt.Errorf("proto: Vote: wiretype end group for non-group") ++ } ++ if fieldNum <= 0 { ++ return fmt.Errorf("proto: Vote: illegal tag %d (wire type %d)", fieldNum, wire) ++ } ++ switch fieldNum { ++ case 1: ++ if wireType != 0 { ++ return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) ++ } ++ m.Type = 0 ++ for shift := uint(0); ; shift += 7 { ++ if shift >= 64 { ++ return ErrIntOverflowTypes ++ } ++ if iNdEx >= l { ++ return io.ErrUnexpectedEOF ++ } ++ b := dAtA[iNdEx] ++ iNdEx++ ++ m.Type |= SignedMsgType(b&0x7F) << shift ++ if b < 0x80 { ++ break ++ } ++ } ++ case 2: ++ if wireType != 0 { ++ return fmt.Errorf("proto: wrong wireType = %d for field Height", wireType) ++ } ++ m.Height = 0 ++ for shift := uint(0); ; shift += 7 { ++ if shift >= 64 { ++ return ErrIntOverflowTypes ++ } ++ if iNdEx >= l { ++ return io.ErrUnexpectedEOF ++ } ++ b := dAtA[iNdEx] ++ iNdEx++ ++ m.Height |= int64(b&0x7F) << shift ++ if b < 0x80 { ++ break ++ } + } +- } +- fieldNum := int32(wire >> 3) +- wireType := int(wire & 0x7) +- if wireType == 4 { +- return fmt.Errorf("proto: Part: wiretype end group for non-group") +- } +- if fieldNum <= 0 { +- return fmt.Errorf("proto: Part: illegal tag %d (wire type %d)", fieldNum, wire) +- } +- switch fieldNum { +- case 1: ++ case 3: + if wireType != 0 { +- return fmt.Errorf("proto: wrong wireType = %d for field Index", wireType) ++ return fmt.Errorf("proto: wrong wireType = %d for field Round", wireType) + } +- m.Index = 0 ++ m.Round = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes +@@ -2325,16 +4419,16 @@ func (m *Part) Unmarshal(dAtA []byte) error { + } + b := dAtA[iNdEx] + iNdEx++ +- m.Index |= uint32(b&0x7F) << shift ++ m.Round |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } +- case 2: ++ case 4: + if wireType != 2 { +- return fmt.Errorf("proto: wrong wireType = %d for field Bytes", wireType) ++ return fmt.Errorf("proto: wrong wireType = %d for field BlockID", wireType) + } +- var byteLen int ++ var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes +@@ -2344,29 +4438,28 @@ func (m *Part) Unmarshal(dAtA []byte) error { + } + b := dAtA[iNdEx] + iNdEx++ +- byteLen |= int(b&0x7F) << shift ++ msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } +- if byteLen < 0 { ++ if msglen < 0 { + return ErrInvalidLengthTypes + } +- postIndex := iNdEx + byteLen ++ postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } +- m.Bytes = append(m.Bytes[:0], dAtA[iNdEx:postIndex]...) +- if m.Bytes == nil { +- m.Bytes = []byte{} ++ if err := m.BlockID.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { ++ return err + } + iNdEx = postIndex +- case 3: ++ case 5: + if wireType != 2 { +- return fmt.Errorf("proto: wrong wireType = %d for field Proof", wireType) ++ return fmt.Errorf("proto: wrong wireType = %d for field Timestamp", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { +@@ -2393,63 +4486,13 @@ func (m *Part) Unmarshal(dAtA []byte) error { + if postIndex > l { + return io.ErrUnexpectedEOF + } +- if err := m.Proof.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { ++ if err := github_com_gogo_protobuf_types.StdTimeUnmarshal(&m.Timestamp, dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex +- default: +- iNdEx = preIndex +- skippy, err := skipTypes(dAtA[iNdEx:]) +- if err != nil { +- return err +- } +- if (skippy < 0) || (iNdEx+skippy) < 0 { +- return ErrInvalidLengthTypes +- } +- if (iNdEx + skippy) > l { +- return io.ErrUnexpectedEOF +- } +- iNdEx += skippy +- } +- } +- +- if iNdEx > l { +- return io.ErrUnexpectedEOF +- } +- return nil +-} +-func (m *BlockID) Unmarshal(dAtA []byte) error { +- l := len(dAtA) +- iNdEx := 0 +- for iNdEx < l { +- preIndex := iNdEx +- var wire uint64 +- for shift := uint(0); ; shift += 7 { +- if shift >= 64 { +- return ErrIntOverflowTypes +- } +- if iNdEx >= l { +- return io.ErrUnexpectedEOF +- } +- b := dAtA[iNdEx] +- iNdEx++ +- wire |= uint64(b&0x7F) << shift +- if b < 0x80 { +- break +- } +- } +- fieldNum := int32(wire >> 3) +- wireType := int(wire & 0x7) +- if wireType == 4 { +- return fmt.Errorf("proto: BlockID: wiretype end group for non-group") +- } +- if fieldNum <= 0 { +- return fmt.Errorf("proto: BlockID: illegal tag %d (wire type %d)", fieldNum, wire) +- } +- switch fieldNum { +- case 1: ++ case 6: + if wireType != 2 { +- return fmt.Errorf("proto: wrong wireType = %d for field Hash", wireType) ++ return fmt.Errorf("proto: wrong wireType = %d for field ValidatorAddress", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { +@@ -2476,16 +4519,35 @@ func (m *BlockID) Unmarshal(dAtA []byte) error { + if postIndex > l { + return io.ErrUnexpectedEOF + } +- m.Hash = append(m.Hash[:0], dAtA[iNdEx:postIndex]...) +- if m.Hash == nil { +- m.Hash = []byte{} ++ m.ValidatorAddress = append(m.ValidatorAddress[:0], dAtA[iNdEx:postIndex]...) ++ if m.ValidatorAddress == nil { ++ m.ValidatorAddress = []byte{} + } + iNdEx = postIndex +- case 2: ++ case 7: ++ if wireType != 0 { ++ return fmt.Errorf("proto: wrong wireType = %d for field ValidatorIndex", wireType) ++ } ++ m.ValidatorIndex = 0 ++ for shift := uint(0); ; shift += 7 { ++ if shift >= 64 { ++ return ErrIntOverflowTypes ++ } ++ if iNdEx >= l { ++ return io.ErrUnexpectedEOF ++ } ++ b := dAtA[iNdEx] ++ iNdEx++ ++ m.ValidatorIndex |= int32(b&0x7F) << shift ++ if b < 0x80 { ++ break ++ } ++ } ++ case 8: + if wireType != 2 { +- return fmt.Errorf("proto: wrong wireType = %d for field PartSetHeader", wireType) ++ return fmt.Errorf("proto: wrong wireType = %d for field Signature", wireType) + } +- var msglen int ++ var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes +@@ -2495,23 +4557,24 @@ func (m *BlockID) Unmarshal(dAtA []byte) error { + } + b := dAtA[iNdEx] + iNdEx++ +- msglen |= int(b&0x7F) << shift ++ byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } +- if msglen < 0 { ++ if byteLen < 0 { + return ErrInvalidLengthTypes + } +- postIndex := iNdEx + msglen ++ postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } +- if err := m.PartSetHeader.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { +- return err ++ m.Signature = append(m.Signature[:0], dAtA[iNdEx:postIndex]...) ++ if m.Signature == nil { ++ m.Signature = []byte{} + } + iNdEx = postIndex + default: +@@ -2535,7 +4598,7 @@ func (m *BlockID) Unmarshal(dAtA []byte) error { + } + return nil + } +-func (m *Header) Unmarshal(dAtA []byte) error { ++func (m *Commit) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { +@@ -2558,15 +4621,53 @@ func (m *Header) Unmarshal(dAtA []byte) error { + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { +- return fmt.Errorf("proto: Header: wiretype end group for non-group") ++ return fmt.Errorf("proto: Commit: wiretype end group for non-group") + } + if fieldNum <= 0 { +- return fmt.Errorf("proto: Header: illegal tag %d (wire type %d)", fieldNum, wire) ++ return fmt.Errorf("proto: Commit: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: ++ if wireType != 0 { ++ return fmt.Errorf("proto: wrong wireType = %d for field Height", wireType) ++ } ++ m.Height = 0 ++ for shift := uint(0); ; shift += 7 { ++ if shift >= 64 { ++ return ErrIntOverflowTypes ++ } ++ if iNdEx >= l { ++ return io.ErrUnexpectedEOF ++ } ++ b := dAtA[iNdEx] ++ iNdEx++ ++ m.Height |= int64(b&0x7F) << shift ++ if b < 0x80 { ++ break ++ } ++ } ++ case 2: ++ if wireType != 0 { ++ return fmt.Errorf("proto: wrong wireType = %d for field Round", wireType) ++ } ++ m.Round = 0 ++ for shift := uint(0); ; shift += 7 { ++ if shift >= 64 { ++ return ErrIntOverflowTypes ++ } ++ if iNdEx >= l { ++ return io.ErrUnexpectedEOF ++ } ++ b := dAtA[iNdEx] ++ iNdEx++ ++ m.Round |= int32(b&0x7F) << shift ++ if b < 0x80 { ++ break ++ } ++ } ++ case 3: + if wireType != 2 { +- return fmt.Errorf("proto: wrong wireType = %d for field Version", wireType) ++ return fmt.Errorf("proto: wrong wireType = %d for field BlockID", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { +@@ -2593,15 +4694,15 @@ func (m *Header) Unmarshal(dAtA []byte) error { + if postIndex > l { + return io.ErrUnexpectedEOF + } +- if err := m.Version.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { ++ if err := m.BlockID.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex +- case 2: ++ case 4: + if wireType != 2 { +- return fmt.Errorf("proto: wrong wireType = %d for field ChainID", wireType) ++ return fmt.Errorf("proto: wrong wireType = %d for field Signatures", wireType) + } +- var stringLen uint64 ++ var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes +@@ -2611,29 +4712,81 @@ func (m *Header) Unmarshal(dAtA []byte) error { + } + b := dAtA[iNdEx] + iNdEx++ +- stringLen |= uint64(b&0x7F) << shift ++ msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } +- intStringLen := int(stringLen) +- if intStringLen < 0 { ++ if msglen < 0 { + return ErrInvalidLengthTypes + } +- postIndex := iNdEx + intStringLen ++ postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTypes + } +- if postIndex > l { ++ if postIndex > l { ++ return io.ErrUnexpectedEOF ++ } ++ m.Signatures = append(m.Signatures, CommitSig{}) ++ if err := m.Signatures[len(m.Signatures)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { ++ return err ++ } ++ iNdEx = postIndex ++ default: ++ iNdEx = preIndex ++ skippy, err := skipTypes(dAtA[iNdEx:]) ++ if err != nil { ++ return err ++ } ++ if (skippy < 0) || (iNdEx+skippy) < 0 { ++ return ErrInvalidLengthTypes ++ } ++ if (iNdEx + skippy) > l { ++ return io.ErrUnexpectedEOF ++ } ++ iNdEx += skippy ++ } ++ } ++ ++ if iNdEx > l { ++ return io.ErrUnexpectedEOF ++ } ++ return nil ++} ++func (m *CommitSig) Unmarshal(dAtA []byte) error { ++ l := len(dAtA) ++ iNdEx := 0 ++ for iNdEx < l { ++ preIndex := iNdEx ++ var wire uint64 ++ for shift := uint(0); ; shift += 7 { ++ if shift >= 64 { ++ return ErrIntOverflowTypes ++ } ++ if iNdEx >= l { + return io.ErrUnexpectedEOF + } +- m.ChainID = string(dAtA[iNdEx:postIndex]) +- iNdEx = postIndex +- case 3: ++ b := dAtA[iNdEx] ++ iNdEx++ ++ wire |= uint64(b&0x7F) << shift ++ if b < 0x80 { ++ break ++ } ++ } ++ fieldNum := int32(wire >> 3) ++ wireType := int(wire & 0x7) ++ if wireType == 4 { ++ return fmt.Errorf("proto: CommitSig: wiretype end group for non-group") ++ } ++ if fieldNum <= 0 { ++ return fmt.Errorf("proto: CommitSig: illegal tag %d (wire type %d)", fieldNum, wire) ++ } ++ switch fieldNum { ++ case 1: + if wireType != 0 { +- return fmt.Errorf("proto: wrong wireType = %d for field Height", wireType) ++ return fmt.Errorf("proto: wrong wireType = %d for field BlockIdFlag", wireType) + } +- m.Height = 0 ++ m.BlockIdFlag = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes +@@ -2643,16 +4796,16 @@ func (m *Header) Unmarshal(dAtA []byte) error { + } + b := dAtA[iNdEx] + iNdEx++ +- m.Height |= int64(b&0x7F) << shift ++ m.BlockIdFlag |= BlockIDFlag(b&0x7F) << shift + if b < 0x80 { + break + } + } +- case 4: ++ case 2: + if wireType != 2 { +- return fmt.Errorf("proto: wrong wireType = %d for field Time", wireType) ++ return fmt.Errorf("proto: wrong wireType = %d for field ValidatorAddress", wireType) + } +- var msglen int ++ var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes +@@ -2662,28 +4815,29 @@ func (m *Header) Unmarshal(dAtA []byte) error { + } + b := dAtA[iNdEx] + iNdEx++ +- msglen |= int(b&0x7F) << shift ++ byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } +- if msglen < 0 { ++ if byteLen < 0 { + return ErrInvalidLengthTypes + } +- postIndex := iNdEx + msglen ++ postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } +- if err := github_com_gogo_protobuf_types.StdTimeUnmarshal(&m.Time, dAtA[iNdEx:postIndex]); err != nil { +- return err ++ m.ValidatorAddress = append(m.ValidatorAddress[:0], dAtA[iNdEx:postIndex]...) ++ if m.ValidatorAddress == nil { ++ m.ValidatorAddress = []byte{} + } + iNdEx = postIndex +- case 5: ++ case 3: + if wireType != 2 { +- return fmt.Errorf("proto: wrong wireType = %d for field LastBlockId", wireType) ++ return fmt.Errorf("proto: wrong wireType = %d for field Timestamp", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { +@@ -2710,13 +4864,13 @@ func (m *Header) Unmarshal(dAtA []byte) error { + if postIndex > l { + return io.ErrUnexpectedEOF + } +- if err := m.LastBlockId.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { ++ if err := github_com_gogo_protobuf_types.StdTimeUnmarshal(&m.Timestamp, dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex +- case 6: ++ case 4: + if wireType != 2 { +- return fmt.Errorf("proto: wrong wireType = %d for field LastCommitHash", wireType) ++ return fmt.Errorf("proto: wrong wireType = %d for field Signature", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { +@@ -2743,50 +4897,66 @@ func (m *Header) Unmarshal(dAtA []byte) error { + if postIndex > l { + return io.ErrUnexpectedEOF + } +- m.LastCommitHash = append(m.LastCommitHash[:0], dAtA[iNdEx:postIndex]...) +- if m.LastCommitHash == nil { +- m.LastCommitHash = []byte{} ++ m.Signature = append(m.Signature[:0], dAtA[iNdEx:postIndex]...) ++ if m.Signature == nil { ++ m.Signature = []byte{} + } + iNdEx = postIndex +- case 7: +- if wireType != 2 { +- return fmt.Errorf("proto: wrong wireType = %d for field DataHash", wireType) +- } +- var byteLen int +- for shift := uint(0); ; shift += 7 { +- if shift >= 64 { +- return ErrIntOverflowTypes +- } +- if iNdEx >= l { +- return io.ErrUnexpectedEOF +- } +- b := dAtA[iNdEx] +- iNdEx++ +- byteLen |= int(b&0x7F) << shift +- if b < 0x80 { +- break +- } ++ default: ++ iNdEx = preIndex ++ skippy, err := skipTypes(dAtA[iNdEx:]) ++ if err != nil { ++ return err + } +- if byteLen < 0 { ++ if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthTypes + } +- postIndex := iNdEx + byteLen +- if postIndex < 0 { +- return ErrInvalidLengthTypes ++ if (iNdEx + skippy) > l { ++ return io.ErrUnexpectedEOF + } +- if postIndex > l { ++ iNdEx += skippy ++ } ++ } ++ ++ if iNdEx > l { ++ return io.ErrUnexpectedEOF ++ } ++ return nil ++} ++func (m *Proposal) Unmarshal(dAtA []byte) error { ++ l := len(dAtA) ++ iNdEx := 0 ++ for iNdEx < l { ++ preIndex := iNdEx ++ var wire uint64 ++ for shift := uint(0); ; shift += 7 { ++ if shift >= 64 { ++ return ErrIntOverflowTypes ++ } ++ if iNdEx >= l { + return io.ErrUnexpectedEOF + } +- m.DataHash = append(m.DataHash[:0], dAtA[iNdEx:postIndex]...) +- if m.DataHash == nil { +- m.DataHash = []byte{} ++ b := dAtA[iNdEx] ++ iNdEx++ ++ wire |= uint64(b&0x7F) << shift ++ if b < 0x80 { ++ break + } +- iNdEx = postIndex +- case 8: +- if wireType != 2 { +- return fmt.Errorf("proto: wrong wireType = %d for field ValidatorsHash", wireType) ++ } ++ fieldNum := int32(wire >> 3) ++ wireType := int(wire & 0x7) ++ if wireType == 4 { ++ return fmt.Errorf("proto: Proposal: wiretype end group for non-group") ++ } ++ if fieldNum <= 0 { ++ return fmt.Errorf("proto: Proposal: illegal tag %d (wire type %d)", fieldNum, wire) ++ } ++ switch fieldNum { ++ case 1: ++ if wireType != 0 { ++ return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) + } +- var byteLen int ++ m.Type = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes +@@ -2796,31 +4966,16 @@ func (m *Header) Unmarshal(dAtA []byte) error { + } + b := dAtA[iNdEx] + iNdEx++ +- byteLen |= int(b&0x7F) << shift ++ m.Type |= SignedMsgType(b&0x7F) << shift + if b < 0x80 { + break + } + } +- if byteLen < 0 { +- return ErrInvalidLengthTypes +- } +- postIndex := iNdEx + byteLen +- if postIndex < 0 { +- return ErrInvalidLengthTypes +- } +- if postIndex > l { +- return io.ErrUnexpectedEOF +- } +- m.ValidatorsHash = append(m.ValidatorsHash[:0], dAtA[iNdEx:postIndex]...) +- if m.ValidatorsHash == nil { +- m.ValidatorsHash = []byte{} +- } +- iNdEx = postIndex +- case 9: +- if wireType != 2 { +- return fmt.Errorf("proto: wrong wireType = %d for field NextValidatorsHash", wireType) ++ case 2: ++ if wireType != 0 { ++ return fmt.Errorf("proto: wrong wireType = %d for field Height", wireType) + } +- var byteLen int ++ m.Height = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes +@@ -2830,31 +4985,16 @@ func (m *Header) Unmarshal(dAtA []byte) error { + } + b := dAtA[iNdEx] + iNdEx++ +- byteLen |= int(b&0x7F) << shift ++ m.Height |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } +- if byteLen < 0 { +- return ErrInvalidLengthTypes +- } +- postIndex := iNdEx + byteLen +- if postIndex < 0 { +- return ErrInvalidLengthTypes +- } +- if postIndex > l { +- return io.ErrUnexpectedEOF +- } +- m.NextValidatorsHash = append(m.NextValidatorsHash[:0], dAtA[iNdEx:postIndex]...) +- if m.NextValidatorsHash == nil { +- m.NextValidatorsHash = []byte{} +- } +- iNdEx = postIndex +- case 10: +- if wireType != 2 { +- return fmt.Errorf("proto: wrong wireType = %d for field ConsensusHash", wireType) ++ case 3: ++ if wireType != 0 { ++ return fmt.Errorf("proto: wrong wireType = %d for field Round", wireType) + } +- var byteLen int ++ m.Round = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes +@@ -2864,31 +5004,16 @@ func (m *Header) Unmarshal(dAtA []byte) error { + } + b := dAtA[iNdEx] + iNdEx++ +- byteLen |= int(b&0x7F) << shift ++ m.Round |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } +- if byteLen < 0 { +- return ErrInvalidLengthTypes +- } +- postIndex := iNdEx + byteLen +- if postIndex < 0 { +- return ErrInvalidLengthTypes +- } +- if postIndex > l { +- return io.ErrUnexpectedEOF +- } +- m.ConsensusHash = append(m.ConsensusHash[:0], dAtA[iNdEx:postIndex]...) +- if m.ConsensusHash == nil { +- m.ConsensusHash = []byte{} +- } +- iNdEx = postIndex +- case 11: +- if wireType != 2 { +- return fmt.Errorf("proto: wrong wireType = %d for field AppHash", wireType) ++ case 4: ++ if wireType != 0 { ++ return fmt.Errorf("proto: wrong wireType = %d for field PolRound", wireType) + } +- var byteLen int ++ m.PolRound = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes +@@ -2898,31 +5023,16 @@ func (m *Header) Unmarshal(dAtA []byte) error { + } + b := dAtA[iNdEx] + iNdEx++ +- byteLen |= int(b&0x7F) << shift ++ m.PolRound |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } +- if byteLen < 0 { +- return ErrInvalidLengthTypes +- } +- postIndex := iNdEx + byteLen +- if postIndex < 0 { +- return ErrInvalidLengthTypes +- } +- if postIndex > l { +- return io.ErrUnexpectedEOF +- } +- m.AppHash = append(m.AppHash[:0], dAtA[iNdEx:postIndex]...) +- if m.AppHash == nil { +- m.AppHash = []byte{} +- } +- iNdEx = postIndex +- case 12: ++ case 5: + if wireType != 2 { +- return fmt.Errorf("proto: wrong wireType = %d for field LastResultsHash", wireType) ++ return fmt.Errorf("proto: wrong wireType = %d for field BlockID", wireType) + } +- var byteLen int ++ var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes +@@ -2932,31 +5042,30 @@ func (m *Header) Unmarshal(dAtA []byte) error { + } + b := dAtA[iNdEx] + iNdEx++ +- byteLen |= int(b&0x7F) << shift ++ msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } +- if byteLen < 0 { ++ if msglen < 0 { + return ErrInvalidLengthTypes + } +- postIndex := iNdEx + byteLen ++ postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } +- m.LastResultsHash = append(m.LastResultsHash[:0], dAtA[iNdEx:postIndex]...) +- if m.LastResultsHash == nil { +- m.LastResultsHash = []byte{} ++ if err := m.BlockID.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { ++ return err + } + iNdEx = postIndex +- case 13: ++ case 6: + if wireType != 2 { +- return fmt.Errorf("proto: wrong wireType = %d for field EvidenceHash", wireType) ++ return fmt.Errorf("proto: wrong wireType = %d for field Timestamp", wireType) + } +- var byteLen int ++ var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes +@@ -2966,29 +5075,28 @@ func (m *Header) Unmarshal(dAtA []byte) error { + } + b := dAtA[iNdEx] + iNdEx++ +- byteLen |= int(b&0x7F) << shift ++ msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } +- if byteLen < 0 { ++ if msglen < 0 { + return ErrInvalidLengthTypes + } +- postIndex := iNdEx + byteLen ++ postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } +- m.EvidenceHash = append(m.EvidenceHash[:0], dAtA[iNdEx:postIndex]...) +- if m.EvidenceHash == nil { +- m.EvidenceHash = []byte{} ++ if err := github_com_gogo_protobuf_types.StdTimeUnmarshal(&m.Timestamp, dAtA[iNdEx:postIndex]); err != nil { ++ return err + } + iNdEx = postIndex +- case 14: ++ case 7: + if wireType != 2 { +- return fmt.Errorf("proto: wrong wireType = %d for field ProposerAddress", wireType) ++ return fmt.Errorf("proto: wrong wireType = %d for field Signature", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { +@@ -3015,9 +5123,9 @@ func (m *Header) Unmarshal(dAtA []byte) error { + if postIndex > l { + return io.ErrUnexpectedEOF + } +- m.ProposerAddress = append(m.ProposerAddress[:0], dAtA[iNdEx:postIndex]...) +- if m.ProposerAddress == nil { +- m.ProposerAddress = []byte{} ++ m.Signature = append(m.Signature[:0], dAtA[iNdEx:postIndex]...) ++ if m.Signature == nil { ++ m.Signature = []byte{} + } + iNdEx = postIndex + default: +@@ -3041,7 +5149,7 @@ func (m *Header) Unmarshal(dAtA []byte) error { + } + return nil + } +-func (m *Data) Unmarshal(dAtA []byte) error { ++func (m *SignedHeader) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { +@@ -3064,17 +5172,17 @@ func (m *Data) Unmarshal(dAtA []byte) error { + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { +- return fmt.Errorf("proto: Data: wiretype end group for non-group") ++ return fmt.Errorf("proto: SignedHeader: wiretype end group for non-group") + } + if fieldNum <= 0 { +- return fmt.Errorf("proto: Data: illegal tag %d (wire type %d)", fieldNum, wire) ++ return fmt.Errorf("proto: SignedHeader: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { +- return fmt.Errorf("proto: wrong wireType = %d for field Txs", wireType) ++ return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType) + } +- var byteLen int ++ var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes +@@ -3084,23 +5192,63 @@ func (m *Data) Unmarshal(dAtA []byte) error { + } + b := dAtA[iNdEx] + iNdEx++ +- byteLen |= int(b&0x7F) << shift ++ msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } +- if byteLen < 0 { ++ if msglen < 0 { + return ErrInvalidLengthTypes + } +- postIndex := iNdEx + byteLen ++ postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } +- m.Txs = append(m.Txs, make([]byte, postIndex-iNdEx)) +- copy(m.Txs[len(m.Txs)-1], dAtA[iNdEx:postIndex]) ++ if m.Header == nil { ++ m.Header = &Header{} ++ } ++ if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { ++ return err ++ } ++ iNdEx = postIndex ++ case 2: ++ if wireType != 2 { ++ return fmt.Errorf("proto: wrong wireType = %d for field Commit", wireType) ++ } ++ var msglen int ++ for shift := uint(0); ; shift += 7 { ++ if shift >= 64 { ++ return ErrIntOverflowTypes ++ } ++ if iNdEx >= l { ++ return io.ErrUnexpectedEOF ++ } ++ b := dAtA[iNdEx] ++ iNdEx++ ++ msglen |= int(b&0x7F) << shift ++ if b < 0x80 { ++ break ++ } ++ } ++ if msglen < 0 { ++ return ErrInvalidLengthTypes ++ } ++ postIndex := iNdEx + msglen ++ if postIndex < 0 { ++ return ErrInvalidLengthTypes ++ } ++ if postIndex > l { ++ return io.ErrUnexpectedEOF ++ } ++ if m.Commit == nil { ++ m.Commit = &Commit{} ++ } ++ if err := m.Commit.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { ++ return err ++ } + iNdEx = postIndex + default: + iNdEx = preIndex +@@ -3123,7 +5271,7 @@ func (m *Data) Unmarshal(dAtA []byte) error { + } + return nil + } +-func (m *Vote) Unmarshal(dAtA []byte) error { ++func (m *LightBlock) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { +@@ -3146,72 +5294,15 @@ func (m *Vote) Unmarshal(dAtA []byte) error { + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { +- return fmt.Errorf("proto: Vote: wiretype end group for non-group") ++ return fmt.Errorf("proto: LightBlock: wiretype end group for non-group") + } + if fieldNum <= 0 { +- return fmt.Errorf("proto: Vote: illegal tag %d (wire type %d)", fieldNum, wire) ++ return fmt.Errorf("proto: LightBlock: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: +- if wireType != 0 { +- return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) +- } +- m.Type = 0 +- for shift := uint(0); ; shift += 7 { +- if shift >= 64 { +- return ErrIntOverflowTypes +- } +- if iNdEx >= l { +- return io.ErrUnexpectedEOF +- } +- b := dAtA[iNdEx] +- iNdEx++ +- m.Type |= SignedMsgType(b&0x7F) << shift +- if b < 0x80 { +- break +- } +- } +- case 2: +- if wireType != 0 { +- return fmt.Errorf("proto: wrong wireType = %d for field Height", wireType) +- } +- m.Height = 0 +- for shift := uint(0); ; shift += 7 { +- if shift >= 64 { +- return ErrIntOverflowTypes +- } +- if iNdEx >= l { +- return io.ErrUnexpectedEOF +- } +- b := dAtA[iNdEx] +- iNdEx++ +- m.Height |= int64(b&0x7F) << shift +- if b < 0x80 { +- break +- } +- } +- case 3: +- if wireType != 0 { +- return fmt.Errorf("proto: wrong wireType = %d for field Round", wireType) +- } +- m.Round = 0 +- for shift := uint(0); ; shift += 7 { +- if shift >= 64 { +- return ErrIntOverflowTypes +- } +- if iNdEx >= l { +- return io.ErrUnexpectedEOF +- } +- b := dAtA[iNdEx] +- iNdEx++ +- m.Round |= int32(b&0x7F) << shift +- if b < 0x80 { +- break +- } +- } +- case 4: + if wireType != 2 { +- return fmt.Errorf("proto: wrong wireType = %d for field BlockID", wireType) ++ return fmt.Errorf("proto: wrong wireType = %d for field SignedHeader", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { +@@ -3238,13 +5329,16 @@ func (m *Vote) Unmarshal(dAtA []byte) error { + if postIndex > l { + return io.ErrUnexpectedEOF + } +- if err := m.BlockID.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { ++ if m.SignedHeader == nil { ++ m.SignedHeader = &SignedHeader{} ++ } ++ if err := m.SignedHeader.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex +- case 5: ++ case 2: + if wireType != 2 { +- return fmt.Errorf("proto: wrong wireType = %d for field Timestamp", wireType) ++ return fmt.Errorf("proto: wrong wireType = %d for field ValidatorSet", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { +@@ -3271,95 +5365,11 @@ func (m *Vote) Unmarshal(dAtA []byte) error { + if postIndex > l { + return io.ErrUnexpectedEOF + } +- if err := github_com_gogo_protobuf_types.StdTimeUnmarshal(&m.Timestamp, dAtA[iNdEx:postIndex]); err != nil { +- return err +- } +- iNdEx = postIndex +- case 6: +- if wireType != 2 { +- return fmt.Errorf("proto: wrong wireType = %d for field ValidatorAddress", wireType) +- } +- var byteLen int +- for shift := uint(0); ; shift += 7 { +- if shift >= 64 { +- return ErrIntOverflowTypes +- } +- if iNdEx >= l { +- return io.ErrUnexpectedEOF +- } +- b := dAtA[iNdEx] +- iNdEx++ +- byteLen |= int(b&0x7F) << shift +- if b < 0x80 { +- break +- } +- } +- if byteLen < 0 { +- return ErrInvalidLengthTypes +- } +- postIndex := iNdEx + byteLen +- if postIndex < 0 { +- return ErrInvalidLengthTypes +- } +- if postIndex > l { +- return io.ErrUnexpectedEOF +- } +- m.ValidatorAddress = append(m.ValidatorAddress[:0], dAtA[iNdEx:postIndex]...) +- if m.ValidatorAddress == nil { +- m.ValidatorAddress = []byte{} +- } +- iNdEx = postIndex +- case 7: +- if wireType != 0 { +- return fmt.Errorf("proto: wrong wireType = %d for field ValidatorIndex", wireType) +- } +- m.ValidatorIndex = 0 +- for shift := uint(0); ; shift += 7 { +- if shift >= 64 { +- return ErrIntOverflowTypes +- } +- if iNdEx >= l { +- return io.ErrUnexpectedEOF +- } +- b := dAtA[iNdEx] +- iNdEx++ +- m.ValidatorIndex |= int32(b&0x7F) << shift +- if b < 0x80 { +- break +- } +- } +- case 8: +- if wireType != 2 { +- return fmt.Errorf("proto: wrong wireType = %d for field Signature", wireType) +- } +- var byteLen int +- for shift := uint(0); ; shift += 7 { +- if shift >= 64 { +- return ErrIntOverflowTypes +- } +- if iNdEx >= l { +- return io.ErrUnexpectedEOF +- } +- b := dAtA[iNdEx] +- iNdEx++ +- byteLen |= int(b&0x7F) << shift +- if b < 0x80 { +- break +- } +- } +- if byteLen < 0 { +- return ErrInvalidLengthTypes +- } +- postIndex := iNdEx + byteLen +- if postIndex < 0 { +- return ErrInvalidLengthTypes +- } +- if postIndex > l { +- return io.ErrUnexpectedEOF ++ if m.ValidatorSet == nil { ++ m.ValidatorSet = &ValidatorSet{} + } +- m.Signature = append(m.Signature[:0], dAtA[iNdEx:postIndex]...) +- if m.Signature == nil { +- m.Signature = []byte{} ++ if err := m.ValidatorSet.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { ++ return err + } + iNdEx = postIndex + default: +@@ -3383,7 +5393,7 @@ func (m *Vote) Unmarshal(dAtA []byte) error { + } + return nil + } +-func (m *Commit) Unmarshal(dAtA []byte) error { ++func (m *BlockMeta) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { +@@ -3406,17 +5416,17 @@ func (m *Commit) Unmarshal(dAtA []byte) error { + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { +- return fmt.Errorf("proto: Commit: wiretype end group for non-group") ++ return fmt.Errorf("proto: BlockMeta: wiretype end group for non-group") + } + if fieldNum <= 0 { +- return fmt.Errorf("proto: Commit: illegal tag %d (wire type %d)", fieldNum, wire) ++ return fmt.Errorf("proto: BlockMeta: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { +- case 1: +- if wireType != 0 { +- return fmt.Errorf("proto: wrong wireType = %d for field Height", wireType) ++ case 1: ++ if wireType != 2 { ++ return fmt.Errorf("proto: wrong wireType = %d for field BlockID", wireType) + } +- m.Height = 0 ++ var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes +@@ -3426,16 +5436,30 @@ func (m *Commit) Unmarshal(dAtA []byte) error { + } + b := dAtA[iNdEx] + iNdEx++ +- m.Height |= int64(b&0x7F) << shift ++ msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } ++ if msglen < 0 { ++ return ErrInvalidLengthTypes ++ } ++ postIndex := iNdEx + msglen ++ if postIndex < 0 { ++ return ErrInvalidLengthTypes ++ } ++ if postIndex > l { ++ return io.ErrUnexpectedEOF ++ } ++ if err := m.BlockID.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { ++ return err ++ } ++ iNdEx = postIndex + case 2: + if wireType != 0 { +- return fmt.Errorf("proto: wrong wireType = %d for field Round", wireType) ++ return fmt.Errorf("proto: wrong wireType = %d for field BlockSize", wireType) + } +- m.Round = 0 ++ m.BlockSize = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes +@@ -3445,14 +5469,14 @@ func (m *Commit) Unmarshal(dAtA []byte) error { + } + b := dAtA[iNdEx] + iNdEx++ +- m.Round |= int32(b&0x7F) << shift ++ m.BlockSize |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType != 2 { +- return fmt.Errorf("proto: wrong wireType = %d for field BlockID", wireType) ++ return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { +@@ -3479,15 +5503,15 @@ func (m *Commit) Unmarshal(dAtA []byte) error { + if postIndex > l { + return io.ErrUnexpectedEOF + } +- if err := m.BlockID.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { ++ if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: +- if wireType != 2 { +- return fmt.Errorf("proto: wrong wireType = %d for field Signatures", wireType) ++ if wireType != 0 { ++ return fmt.Errorf("proto: wrong wireType = %d for field NumTxs", wireType) + } +- var msglen int ++ m.NumTxs = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes +@@ -3497,26 +5521,11 @@ func (m *Commit) Unmarshal(dAtA []byte) error { + } + b := dAtA[iNdEx] + iNdEx++ +- msglen |= int(b&0x7F) << shift ++ m.NumTxs |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } +- if msglen < 0 { +- return ErrInvalidLengthTypes +- } +- postIndex := iNdEx + msglen +- if postIndex < 0 { +- return ErrInvalidLengthTypes +- } +- if postIndex > l { +- return io.ErrUnexpectedEOF +- } +- m.Signatures = append(m.Signatures, CommitSig{}) +- if err := m.Signatures[len(m.Signatures)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { +- return err +- } +- iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipTypes(dAtA[iNdEx:]) +@@ -3538,7 +5547,7 @@ func (m *Commit) Unmarshal(dAtA []byte) error { + } + return nil + } +-func (m *CommitSig) Unmarshal(dAtA []byte) error { ++func (m *TxProof) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { +@@ -3561,34 +5570,15 @@ func (m *CommitSig) Unmarshal(dAtA []byte) error { + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { +- return fmt.Errorf("proto: CommitSig: wiretype end group for non-group") ++ return fmt.Errorf("proto: TxProof: wiretype end group for non-group") + } + if fieldNum <= 0 { +- return fmt.Errorf("proto: CommitSig: illegal tag %d (wire type %d)", fieldNum, wire) ++ return fmt.Errorf("proto: TxProof: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: +- if wireType != 0 { +- return fmt.Errorf("proto: wrong wireType = %d for field BlockIdFlag", wireType) +- } +- m.BlockIdFlag = 0 +- for shift := uint(0); ; shift += 7 { +- if shift >= 64 { +- return ErrIntOverflowTypes +- } +- if iNdEx >= l { +- return io.ErrUnexpectedEOF +- } +- b := dAtA[iNdEx] +- iNdEx++ +- m.BlockIdFlag |= BlockIDFlag(b&0x7F) << shift +- if b < 0x80 { +- break +- } +- } +- case 2: + if wireType != 2 { +- return fmt.Errorf("proto: wrong wireType = %d for field ValidatorAddress", wireType) ++ return fmt.Errorf("proto: wrong wireType = %d for field RootHash", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { +@@ -3615,16 +5605,16 @@ func (m *CommitSig) Unmarshal(dAtA []byte) error { + if postIndex > l { + return io.ErrUnexpectedEOF + } +- m.ValidatorAddress = append(m.ValidatorAddress[:0], dAtA[iNdEx:postIndex]...) +- if m.ValidatorAddress == nil { +- m.ValidatorAddress = []byte{} ++ m.RootHash = append(m.RootHash[:0], dAtA[iNdEx:postIndex]...) ++ if m.RootHash == nil { ++ m.RootHash = []byte{} + } + iNdEx = postIndex +- case 3: ++ case 2: + if wireType != 2 { +- return fmt.Errorf("proto: wrong wireType = %d for field Timestamp", wireType) ++ return fmt.Errorf("proto: wrong wireType = %d for field Data", wireType) + } +- var msglen int ++ var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes +@@ -3634,30 +5624,31 @@ func (m *CommitSig) Unmarshal(dAtA []byte) error { + } + b := dAtA[iNdEx] + iNdEx++ +- msglen |= int(b&0x7F) << shift ++ byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } +- if msglen < 0 { ++ if byteLen < 0 { + return ErrInvalidLengthTypes + } +- postIndex := iNdEx + msglen ++ postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } +- if err := github_com_gogo_protobuf_types.StdTimeUnmarshal(&m.Timestamp, dAtA[iNdEx:postIndex]); err != nil { +- return err ++ m.Data = append(m.Data[:0], dAtA[iNdEx:postIndex]...) ++ if m.Data == nil { ++ m.Data = []byte{} + } + iNdEx = postIndex +- case 4: ++ case 3: + if wireType != 2 { +- return fmt.Errorf("proto: wrong wireType = %d for field Signature", wireType) ++ return fmt.Errorf("proto: wrong wireType = %d for field Proof", wireType) + } +- var byteLen int ++ var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes +@@ -3667,24 +5658,26 @@ func (m *CommitSig) Unmarshal(dAtA []byte) error { + } + b := dAtA[iNdEx] + iNdEx++ +- byteLen |= int(b&0x7F) << shift ++ msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } +- if byteLen < 0 { ++ if msglen < 0 { + return ErrInvalidLengthTypes + } +- postIndex := iNdEx + byteLen ++ postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } +- m.Signature = append(m.Signature[:0], dAtA[iNdEx:postIndex]...) +- if m.Signature == nil { +- m.Signature = []byte{} ++ if m.Proof == nil { ++ m.Proof = &crypto.Proof{} ++ } ++ if err := m.Proof.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { ++ return err + } + iNdEx = postIndex + default: +@@ -3708,7 +5701,7 @@ func (m *CommitSig) Unmarshal(dAtA []byte) error { + } + return nil + } +-func (m *Proposal) Unmarshal(dAtA []byte) error { ++func (m *IndexWrapper) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { +@@ -3731,17 +5724,17 @@ func (m *Proposal) Unmarshal(dAtA []byte) error { + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { +- return fmt.Errorf("proto: Proposal: wiretype end group for non-group") ++ return fmt.Errorf("proto: IndexWrapper: wiretype end group for non-group") + } + if fieldNum <= 0 { +- return fmt.Errorf("proto: Proposal: illegal tag %d (wire type %d)", fieldNum, wire) ++ return fmt.Errorf("proto: IndexWrapper: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: +- if wireType != 0 { +- return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) ++ if wireType != 2 { ++ return fmt.Errorf("proto: wrong wireType = %d for field Tx", wireType) + } +- m.Type = 0 ++ var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes +@@ -3751,35 +5744,107 @@ func (m *Proposal) Unmarshal(dAtA []byte) error { + } + b := dAtA[iNdEx] + iNdEx++ +- m.Type |= SignedMsgType(b&0x7F) << shift ++ byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } +- case 2: +- if wireType != 0 { +- return fmt.Errorf("proto: wrong wireType = %d for field Height", wireType) ++ if byteLen < 0 { ++ return ErrInvalidLengthTypes + } +- m.Height = 0 +- for shift := uint(0); ; shift += 7 { +- if shift >= 64 { +- return ErrIntOverflowTypes +- } +- if iNdEx >= l { ++ postIndex := iNdEx + byteLen ++ if postIndex < 0 { ++ return ErrInvalidLengthTypes ++ } ++ if postIndex > l { ++ return io.ErrUnexpectedEOF ++ } ++ m.Tx = append(m.Tx[:0], dAtA[iNdEx:postIndex]...) ++ if m.Tx == nil { ++ m.Tx = []byte{} ++ } ++ iNdEx = postIndex ++ case 2: ++ if wireType == 0 { ++ var v uint32 ++ for shift := uint(0); ; shift += 7 { ++ if shift >= 64 { ++ return ErrIntOverflowTypes ++ } ++ if iNdEx >= l { ++ return io.ErrUnexpectedEOF ++ } ++ b := dAtA[iNdEx] ++ iNdEx++ ++ v |= uint32(b&0x7F) << shift ++ if b < 0x80 { ++ break ++ } ++ } ++ m.ShareIndexes = append(m.ShareIndexes, v) ++ } else if wireType == 2 { ++ var packedLen int ++ for shift := uint(0); ; shift += 7 { ++ if shift >= 64 { ++ return ErrIntOverflowTypes ++ } ++ if iNdEx >= l { ++ return io.ErrUnexpectedEOF ++ } ++ b := dAtA[iNdEx] ++ iNdEx++ ++ packedLen |= int(b&0x7F) << shift ++ if b < 0x80 { ++ break ++ } ++ } ++ if packedLen < 0 { ++ return ErrInvalidLengthTypes ++ } ++ postIndex := iNdEx + packedLen ++ if postIndex < 0 { ++ return ErrInvalidLengthTypes ++ } ++ if postIndex > l { + return io.ErrUnexpectedEOF + } +- b := dAtA[iNdEx] +- iNdEx++ +- m.Height |= int64(b&0x7F) << shift +- if b < 0x80 { +- break +- } ++ var elementCount int ++ var count int ++ for _, integer := range dAtA[iNdEx:postIndex] { ++ if integer < 128 { ++ count++ ++ } ++ } ++ elementCount = count ++ if elementCount != 0 && len(m.ShareIndexes) == 0 { ++ m.ShareIndexes = make([]uint32, 0, elementCount) ++ } ++ for iNdEx < postIndex { ++ var v uint32 ++ for shift := uint(0); ; shift += 7 { ++ if shift >= 64 { ++ return ErrIntOverflowTypes ++ } ++ if iNdEx >= l { ++ return io.ErrUnexpectedEOF ++ } ++ b := dAtA[iNdEx] ++ iNdEx++ ++ v |= uint32(b&0x7F) << shift ++ if b < 0x80 { ++ break ++ } ++ } ++ m.ShareIndexes = append(m.ShareIndexes, v) ++ } ++ } else { ++ return fmt.Errorf("proto: wrong wireType = %d for field ShareIndexes", wireType) + } + case 3: +- if wireType != 0 { +- return fmt.Errorf("proto: wrong wireType = %d for field Round", wireType) ++ if wireType != 2 { ++ return fmt.Errorf("proto: wrong wireType = %d for field TypeId", wireType) + } +- m.Round = 0 ++ var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes +@@ -3789,35 +5854,79 @@ func (m *Proposal) Unmarshal(dAtA []byte) error { + } + b := dAtA[iNdEx] + iNdEx++ +- m.Round |= int32(b&0x7F) << shift ++ stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } +- case 4: +- if wireType != 0 { +- return fmt.Errorf("proto: wrong wireType = %d for field PolRound", wireType) ++ intStringLen := int(stringLen) ++ if intStringLen < 0 { ++ return ErrInvalidLengthTypes + } +- m.PolRound = 0 +- for shift := uint(0); ; shift += 7 { +- if shift >= 64 { +- return ErrIntOverflowTypes +- } +- if iNdEx >= l { +- return io.ErrUnexpectedEOF +- } +- b := dAtA[iNdEx] +- iNdEx++ +- m.PolRound |= int32(b&0x7F) << shift +- if b < 0x80 { +- break +- } ++ postIndex := iNdEx + intStringLen ++ if postIndex < 0 { ++ return ErrInvalidLengthTypes + } +- case 5: ++ if postIndex > l { ++ return io.ErrUnexpectedEOF ++ } ++ m.TypeId = string(dAtA[iNdEx:postIndex]) ++ iNdEx = postIndex ++ default: ++ iNdEx = preIndex ++ skippy, err := skipTypes(dAtA[iNdEx:]) ++ if err != nil { ++ return err ++ } ++ if (skippy < 0) || (iNdEx+skippy) < 0 { ++ return ErrInvalidLengthTypes ++ } ++ if (iNdEx + skippy) > l { ++ return io.ErrUnexpectedEOF ++ } ++ iNdEx += skippy ++ } ++ } ++ ++ if iNdEx > l { ++ return io.ErrUnexpectedEOF ++ } ++ return nil ++} ++func (m *BlobTx) Unmarshal(dAtA []byte) error { ++ l := len(dAtA) ++ iNdEx := 0 ++ for iNdEx < l { ++ preIndex := iNdEx ++ var wire uint64 ++ for shift := uint(0); ; shift += 7 { ++ if shift >= 64 { ++ return ErrIntOverflowTypes ++ } ++ if iNdEx >= l { ++ return io.ErrUnexpectedEOF ++ } ++ b := dAtA[iNdEx] ++ iNdEx++ ++ wire |= uint64(b&0x7F) << shift ++ if b < 0x80 { ++ break ++ } ++ } ++ fieldNum := int32(wire >> 3) ++ wireType := int(wire & 0x7) ++ if wireType == 4 { ++ return fmt.Errorf("proto: BlobTx: wiretype end group for non-group") ++ } ++ if fieldNum <= 0 { ++ return fmt.Errorf("proto: BlobTx: illegal tag %d (wire type %d)", fieldNum, wire) ++ } ++ switch fieldNum { ++ case 1: + if wireType != 2 { +- return fmt.Errorf("proto: wrong wireType = %d for field BlockID", wireType) ++ return fmt.Errorf("proto: wrong wireType = %d for field Tx", wireType) + } +- var msglen int ++ var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes +@@ -3827,28 +5936,29 @@ func (m *Proposal) Unmarshal(dAtA []byte) error { + } + b := dAtA[iNdEx] + iNdEx++ +- msglen |= int(b&0x7F) << shift ++ byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } +- if msglen < 0 { ++ if byteLen < 0 { + return ErrInvalidLengthTypes + } +- postIndex := iNdEx + msglen ++ postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } +- if err := m.BlockID.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { +- return err +- } ++ m.Tx = append(m.Tx[:0], dAtA[iNdEx:postIndex]...) ++ if m.Tx == nil { ++ m.Tx = []byte{} ++ } + iNdEx = postIndex +- case 6: ++ case 2: + if wireType != 2 { +- return fmt.Errorf("proto: wrong wireType = %d for field Timestamp", wireType) ++ return fmt.Errorf("proto: wrong wireType = %d for field Blobs", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { +@@ -3875,15 +5985,16 @@ func (m *Proposal) Unmarshal(dAtA []byte) error { + if postIndex > l { + return io.ErrUnexpectedEOF + } +- if err := github_com_gogo_protobuf_types.StdTimeUnmarshal(&m.Timestamp, dAtA[iNdEx:postIndex]); err != nil { ++ m.Blobs = append(m.Blobs, &Blob{}) ++ if err := m.Blobs[len(m.Blobs)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex +- case 7: ++ case 3: + if wireType != 2 { +- return fmt.Errorf("proto: wrong wireType = %d for field Signature", wireType) ++ return fmt.Errorf("proto: wrong wireType = %d for field TypeId", wireType) + } +- var byteLen int ++ var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes +@@ -3893,25 +6004,23 @@ func (m *Proposal) Unmarshal(dAtA []byte) error { + } + b := dAtA[iNdEx] + iNdEx++ +- byteLen |= int(b&0x7F) << shift ++ stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } +- if byteLen < 0 { ++ intStringLen := int(stringLen) ++ if intStringLen < 0 { + return ErrInvalidLengthTypes + } +- postIndex := iNdEx + byteLen ++ postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } +- m.Signature = append(m.Signature[:0], dAtA[iNdEx:postIndex]...) +- if m.Signature == nil { +- m.Signature = []byte{} +- } ++ m.TypeId = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex +@@ -3934,7 +6043,7 @@ func (m *Proposal) Unmarshal(dAtA []byte) error { + } + return nil + } +-func (m *SignedHeader) Unmarshal(dAtA []byte) error { ++func (m *ShareProof) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { +@@ -3957,17 +6066,17 @@ func (m *SignedHeader) Unmarshal(dAtA []byte) error { + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { +- return fmt.Errorf("proto: SignedHeader: wiretype end group for non-group") ++ return fmt.Errorf("proto: ShareProof: wiretype end group for non-group") + } + if fieldNum <= 0 { +- return fmt.Errorf("proto: SignedHeader: illegal tag %d (wire type %d)", fieldNum, wire) ++ return fmt.Errorf("proto: ShareProof: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { +- return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType) ++ return fmt.Errorf("proto: wrong wireType = %d for field Data", wireType) + } +- var msglen int ++ var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes +@@ -3977,31 +6086,27 @@ func (m *SignedHeader) Unmarshal(dAtA []byte) error { + } + b := dAtA[iNdEx] + iNdEx++ +- msglen |= int(b&0x7F) << shift ++ byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } +- if msglen < 0 { ++ if byteLen < 0 { + return ErrInvalidLengthTypes + } +- postIndex := iNdEx + msglen ++ postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } +- if m.Header == nil { +- m.Header = &Header{} +- } +- if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { +- return err +- } ++ m.Data = append(m.Data, make([]byte, postIndex-iNdEx)) ++ copy(m.Data[len(m.Data)-1], dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { +- return fmt.Errorf("proto: wrong wireType = %d for field Commit", wireType) ++ return fmt.Errorf("proto: wrong wireType = %d for field ShareProofs", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { +@@ -4028,68 +6133,16 @@ func (m *SignedHeader) Unmarshal(dAtA []byte) error { + if postIndex > l { + return io.ErrUnexpectedEOF + } +- if m.Commit == nil { +- m.Commit = &Commit{} +- } +- if err := m.Commit.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { ++ m.ShareProofs = append(m.ShareProofs, &NMTProof{}) ++ if err := m.ShareProofs[len(m.ShareProofs)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex +- default: +- iNdEx = preIndex +- skippy, err := skipTypes(dAtA[iNdEx:]) +- if err != nil { +- return err +- } +- if (skippy < 0) || (iNdEx+skippy) < 0 { +- return ErrInvalidLengthTypes +- } +- if (iNdEx + skippy) > l { +- return io.ErrUnexpectedEOF +- } +- iNdEx += skippy +- } +- } +- +- if iNdEx > l { +- return io.ErrUnexpectedEOF +- } +- return nil +-} +-func (m *LightBlock) Unmarshal(dAtA []byte) error { +- l := len(dAtA) +- iNdEx := 0 +- for iNdEx < l { +- preIndex := iNdEx +- var wire uint64 +- for shift := uint(0); ; shift += 7 { +- if shift >= 64 { +- return ErrIntOverflowTypes +- } +- if iNdEx >= l { +- return io.ErrUnexpectedEOF +- } +- b := dAtA[iNdEx] +- iNdEx++ +- wire |= uint64(b&0x7F) << shift +- if b < 0x80 { +- break +- } +- } +- fieldNum := int32(wire >> 3) +- wireType := int(wire & 0x7) +- if wireType == 4 { +- return fmt.Errorf("proto: LightBlock: wiretype end group for non-group") +- } +- if fieldNum <= 0 { +- return fmt.Errorf("proto: LightBlock: illegal tag %d (wire type %d)", fieldNum, wire) +- } +- switch fieldNum { +- case 1: ++ case 3: + if wireType != 2 { +- return fmt.Errorf("proto: wrong wireType = %d for field SignedHeader", wireType) ++ return fmt.Errorf("proto: wrong wireType = %d for field NamespaceId", wireType) + } +- var msglen int ++ var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes +@@ -4099,31 +6152,29 @@ func (m *LightBlock) Unmarshal(dAtA []byte) error { + } + b := dAtA[iNdEx] + iNdEx++ +- msglen |= int(b&0x7F) << shift ++ byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } +- if msglen < 0 { ++ if byteLen < 0 { + return ErrInvalidLengthTypes + } +- postIndex := iNdEx + msglen ++ postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } +- if m.SignedHeader == nil { +- m.SignedHeader = &SignedHeader{} +- } +- if err := m.SignedHeader.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { +- return err ++ m.NamespaceId = append(m.NamespaceId[:0], dAtA[iNdEx:postIndex]...) ++ if m.NamespaceId == nil { ++ m.NamespaceId = []byte{} + } + iNdEx = postIndex +- case 2: ++ case 4: + if wireType != 2 { +- return fmt.Errorf("proto: wrong wireType = %d for field ValidatorSet", wireType) ++ return fmt.Errorf("proto: wrong wireType = %d for field RowProof", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { +@@ -4150,13 +6201,32 @@ func (m *LightBlock) Unmarshal(dAtA []byte) error { + if postIndex > l { + return io.ErrUnexpectedEOF + } +- if m.ValidatorSet == nil { +- m.ValidatorSet = &ValidatorSet{} ++ if m.RowProof == nil { ++ m.RowProof = &RowProof{} + } +- if err := m.ValidatorSet.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { ++ if err := m.RowProof.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex ++ case 5: ++ if wireType != 0 { ++ return fmt.Errorf("proto: wrong wireType = %d for field NamespaceVersion", wireType) ++ } ++ m.NamespaceVersion = 0 ++ for shift := uint(0); ; shift += 7 { ++ if shift >= 64 { ++ return ErrIntOverflowTypes ++ } ++ if iNdEx >= l { ++ return io.ErrUnexpectedEOF ++ } ++ b := dAtA[iNdEx] ++ iNdEx++ ++ m.NamespaceVersion |= uint32(b&0x7F) << shift ++ if b < 0x80 { ++ break ++ } ++ } + default: + iNdEx = preIndex + skippy, err := skipTypes(dAtA[iNdEx:]) +@@ -4178,7 +6248,7 @@ func (m *LightBlock) Unmarshal(dAtA []byte) error { + } + return nil + } +-func (m *BlockMeta) Unmarshal(dAtA []byte) error { ++func (m *RowProof) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { +@@ -4201,17 +6271,17 @@ func (m *BlockMeta) Unmarshal(dAtA []byte) error { + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { +- return fmt.Errorf("proto: BlockMeta: wiretype end group for non-group") ++ return fmt.Errorf("proto: RowProof: wiretype end group for non-group") + } + if fieldNum <= 0 { +- return fmt.Errorf("proto: BlockMeta: illegal tag %d (wire type %d)", fieldNum, wire) ++ return fmt.Errorf("proto: RowProof: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { +- return fmt.Errorf("proto: wrong wireType = %d for field BlockID", wireType) ++ return fmt.Errorf("proto: wrong wireType = %d for field RowRoots", wireType) + } +- var msglen int ++ var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes +@@ -4221,30 +6291,29 @@ func (m *BlockMeta) Unmarshal(dAtA []byte) error { + } + b := dAtA[iNdEx] + iNdEx++ +- msglen |= int(b&0x7F) << shift ++ byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } +- if msglen < 0 { ++ if byteLen < 0 { + return ErrInvalidLengthTypes + } +- postIndex := iNdEx + msglen ++ postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } +- if err := m.BlockID.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { +- return err +- } ++ m.RowRoots = append(m.RowRoots, make([]byte, postIndex-iNdEx)) ++ copy(m.RowRoots[len(m.RowRoots)-1], dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: +- if wireType != 0 { +- return fmt.Errorf("proto: wrong wireType = %d for field BlockSize", wireType) ++ if wireType != 2 { ++ return fmt.Errorf("proto: wrong wireType = %d for field Proofs", wireType) + } +- m.BlockSize = 0 ++ var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes +@@ -4254,16 +6323,31 @@ func (m *BlockMeta) Unmarshal(dAtA []byte) error { + } + b := dAtA[iNdEx] + iNdEx++ +- m.BlockSize |= int64(b&0x7F) << shift ++ msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } ++ if msglen < 0 { ++ return ErrInvalidLengthTypes ++ } ++ postIndex := iNdEx + msglen ++ if postIndex < 0 { ++ return ErrInvalidLengthTypes ++ } ++ if postIndex > l { ++ return io.ErrUnexpectedEOF ++ } ++ m.Proofs = append(m.Proofs, &crypto.Proof{}) ++ if err := m.Proofs[len(m.Proofs)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { ++ return err ++ } ++ iNdEx = postIndex + case 3: + if wireType != 2 { +- return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType) ++ return fmt.Errorf("proto: wrong wireType = %d for field Root", wireType) + } +- var msglen int ++ var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes +@@ -4273,30 +6357,31 @@ func (m *BlockMeta) Unmarshal(dAtA []byte) error { + } + b := dAtA[iNdEx] + iNdEx++ +- msglen |= int(b&0x7F) << shift ++ byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } +- if msglen < 0 { ++ if byteLen < 0 { + return ErrInvalidLengthTypes + } +- postIndex := iNdEx + msglen ++ postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } +- if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { +- return err ++ m.Root = append(m.Root[:0], dAtA[iNdEx:postIndex]...) ++ if m.Root == nil { ++ m.Root = []byte{} + } + iNdEx = postIndex + case 4: + if wireType != 0 { +- return fmt.Errorf("proto: wrong wireType = %d for field NumTxs", wireType) ++ return fmt.Errorf("proto: wrong wireType = %d for field StartRow", wireType) + } +- m.NumTxs = 0 ++ m.StartRow = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes +@@ -4306,7 +6391,26 @@ func (m *BlockMeta) Unmarshal(dAtA []byte) error { + } + b := dAtA[iNdEx] + iNdEx++ +- m.NumTxs |= int64(b&0x7F) << shift ++ m.StartRow |= uint32(b&0x7F) << shift ++ if b < 0x80 { ++ break ++ } ++ } ++ case 5: ++ if wireType != 0 { ++ return fmt.Errorf("proto: wrong wireType = %d for field EndRow", wireType) ++ } ++ m.EndRow = 0 ++ for shift := uint(0); ; shift += 7 { ++ if shift >= 64 { ++ return ErrIntOverflowTypes ++ } ++ if iNdEx >= l { ++ return io.ErrUnexpectedEOF ++ } ++ b := dAtA[iNdEx] ++ iNdEx++ ++ m.EndRow |= uint32(b&0x7F) << shift + if b < 0x80 { + break + } +@@ -4332,7 +6436,7 @@ func (m *BlockMeta) Unmarshal(dAtA []byte) error { + } + return nil + } +-func (m *TxProof) Unmarshal(dAtA []byte) error { ++func (m *NMTProof) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { +@@ -4355,17 +6459,17 @@ func (m *TxProof) Unmarshal(dAtA []byte) error { + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { +- return fmt.Errorf("proto: TxProof: wiretype end group for non-group") ++ return fmt.Errorf("proto: NMTProof: wiretype end group for non-group") + } + if fieldNum <= 0 { +- return fmt.Errorf("proto: TxProof: illegal tag %d (wire type %d)", fieldNum, wire) ++ return fmt.Errorf("proto: NMTProof: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: +- if wireType != 2 { +- return fmt.Errorf("proto: wrong wireType = %d for field RootHash", wireType) ++ if wireType != 0 { ++ return fmt.Errorf("proto: wrong wireType = %d for field Start", wireType) + } +- var byteLen int ++ m.Start = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes +@@ -4375,29 +6479,33 @@ func (m *TxProof) Unmarshal(dAtA []byte) error { + } + b := dAtA[iNdEx] + iNdEx++ +- byteLen |= int(b&0x7F) << shift ++ m.Start |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } +- if byteLen < 0 { +- return ErrInvalidLengthTypes +- } +- postIndex := iNdEx + byteLen +- if postIndex < 0 { +- return ErrInvalidLengthTypes +- } +- if postIndex > l { +- return io.ErrUnexpectedEOF ++ case 2: ++ if wireType != 0 { ++ return fmt.Errorf("proto: wrong wireType = %d for field End", wireType) + } +- m.RootHash = append(m.RootHash[:0], dAtA[iNdEx:postIndex]...) +- if m.RootHash == nil { +- m.RootHash = []byte{} ++ m.End = 0 ++ for shift := uint(0); ; shift += 7 { ++ if shift >= 64 { ++ return ErrIntOverflowTypes ++ } ++ if iNdEx >= l { ++ return io.ErrUnexpectedEOF ++ } ++ b := dAtA[iNdEx] ++ iNdEx++ ++ m.End |= int32(b&0x7F) << shift ++ if b < 0x80 { ++ break ++ } + } +- iNdEx = postIndex +- case 2: ++ case 3: + if wireType != 2 { +- return fmt.Errorf("proto: wrong wireType = %d for field Data", wireType) ++ return fmt.Errorf("proto: wrong wireType = %d for field Nodes", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { +@@ -4424,16 +6532,14 @@ func (m *TxProof) Unmarshal(dAtA []byte) error { + if postIndex > l { + return io.ErrUnexpectedEOF + } +- m.Data = append(m.Data[:0], dAtA[iNdEx:postIndex]...) +- if m.Data == nil { +- m.Data = []byte{} +- } ++ m.Nodes = append(m.Nodes, make([]byte, postIndex-iNdEx)) ++ copy(m.Nodes[len(m.Nodes)-1], dAtA[iNdEx:postIndex]) + iNdEx = postIndex +- case 3: ++ case 4: + if wireType != 2 { +- return fmt.Errorf("proto: wrong wireType = %d for field Proof", wireType) ++ return fmt.Errorf("proto: wrong wireType = %d for field LeafHash", wireType) + } +- var msglen int ++ var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes +@@ -4443,26 +6549,24 @@ func (m *TxProof) Unmarshal(dAtA []byte) error { + } + b := dAtA[iNdEx] + iNdEx++ +- msglen |= int(b&0x7F) << shift ++ byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } +- if msglen < 0 { ++ if byteLen < 0 { + return ErrInvalidLengthTypes + } +- postIndex := iNdEx + msglen ++ postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } +- if m.Proof == nil { +- m.Proof = &crypto.Proof{} +- } +- if err := m.Proof.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { +- return err ++ m.LeafHash = append(m.LeafHash[:0], dAtA[iNdEx:postIndex]...) ++ if m.LeafHash == nil { ++ m.LeafHash = []byte{} + } + iNdEx = postIndex + default: diff --git a/patches/proxy/app_conn.go.patch b/patches/proxy/app_conn.go.patch new file mode 100644 index 00000000000..22cc1040ed0 --- /dev/null +++ b/patches/proxy/app_conn.go.patch @@ -0,0 +1,33 @@ +diff --git a/proxy/app_conn.go b/proxy/app_conn.go +index 690c08df9..f4bb888b7 100644 +--- a/proxy/app_conn.go ++++ b/proxy/app_conn.go +@@ -20,6 +20,9 @@ type AppConnConsensus interface { + DeliverTxAsync(types.RequestDeliverTx) *abcicli.ReqRes + EndBlockSync(types.RequestEndBlock) (*types.ResponseEndBlock, error) + CommitSync() (*types.ResponseCommit, error) ++ ++ PrepareProposalSync(types.RequestPrepareProposal) (*types.ResponsePrepareProposal, error) ++ ProcessProposalSync(types.RequestProcessProposal) (*types.ResponseProcessProposal, error) + } + + type AppConnMempool interface { +@@ -93,6 +96,18 @@ func (app *appConnConsensus) CommitSync() (*types.ResponseCommit, error) { + return app.appConn.CommitSync() + } + ++func (app *appConnConsensus) PrepareProposalSync( ++ req types.RequestPrepareProposal, ++) (*types.ResponsePrepareProposal, error) { ++ return app.appConn.PrepareProposalSync(req) ++} ++ ++func (app *appConnConsensus) ProcessProposalSync( ++ req types.RequestProcessProposal, ++) (*types.ResponseProcessProposal, error) { ++ return app.appConn.ProcessProposalSync(req) ++} ++ + //------------------------------------------------ + // Implements AppConnMempool (subset of abcicli.Client) + diff --git a/patches/proxy/mocks/app_conn_consensus.go.patch b/patches/proxy/mocks/app_conn_consensus.go.patch new file mode 100644 index 00000000000..8127e935ab5 --- /dev/null +++ b/patches/proxy/mocks/app_conn_consensus.go.patch @@ -0,0 +1,191 @@ +diff --git a/proxy/mocks/app_conn_consensus.go b/proxy/mocks/app_conn_consensus.go +index d70cd7cc1..29c96a221 100644 +--- a/proxy/mocks/app_conn_consensus.go ++++ b/proxy/mocks/app_conn_consensus.go +@@ -18,7 +18,15 @@ type AppConnConsensus struct { + func (_m *AppConnConsensus) BeginBlockSync(_a0 types.RequestBeginBlock) (*types.ResponseBeginBlock, error) { + ret := _m.Called(_a0) + ++ if len(ret) == 0 { ++ panic("no return value specified for BeginBlockSync") ++ } ++ + var r0 *types.ResponseBeginBlock ++ var r1 error ++ if rf, ok := ret.Get(0).(func(types.RequestBeginBlock) (*types.ResponseBeginBlock, error)); ok { ++ return rf(_a0) ++ } + if rf, ok := ret.Get(0).(func(types.RequestBeginBlock) *types.ResponseBeginBlock); ok { + r0 = rf(_a0) + } else { +@@ -27,7 +35,6 @@ func (_m *AppConnConsensus) BeginBlockSync(_a0 types.RequestBeginBlock) (*types. + } + } + +- var r1 error + if rf, ok := ret.Get(1).(func(types.RequestBeginBlock) error); ok { + r1 = rf(_a0) + } else { +@@ -41,7 +48,15 @@ func (_m *AppConnConsensus) BeginBlockSync(_a0 types.RequestBeginBlock) (*types. + func (_m *AppConnConsensus) CommitSync() (*types.ResponseCommit, error) { + ret := _m.Called() + ++ if len(ret) == 0 { ++ panic("no return value specified for CommitSync") ++ } ++ + var r0 *types.ResponseCommit ++ var r1 error ++ if rf, ok := ret.Get(0).(func() (*types.ResponseCommit, error)); ok { ++ return rf() ++ } + if rf, ok := ret.Get(0).(func() *types.ResponseCommit); ok { + r0 = rf() + } else { +@@ -50,7 +65,6 @@ func (_m *AppConnConsensus) CommitSync() (*types.ResponseCommit, error) { + } + } + +- var r1 error + if rf, ok := ret.Get(1).(func() error); ok { + r1 = rf() + } else { +@@ -64,6 +78,10 @@ func (_m *AppConnConsensus) CommitSync() (*types.ResponseCommit, error) { + func (_m *AppConnConsensus) DeliverTxAsync(_a0 types.RequestDeliverTx) *abcicli.ReqRes { + ret := _m.Called(_a0) + ++ if len(ret) == 0 { ++ panic("no return value specified for DeliverTxAsync") ++ } ++ + var r0 *abcicli.ReqRes + if rf, ok := ret.Get(0).(func(types.RequestDeliverTx) *abcicli.ReqRes); ok { + r0 = rf(_a0) +@@ -80,7 +98,15 @@ func (_m *AppConnConsensus) DeliverTxAsync(_a0 types.RequestDeliverTx) *abcicli. + func (_m *AppConnConsensus) EndBlockSync(_a0 types.RequestEndBlock) (*types.ResponseEndBlock, error) { + ret := _m.Called(_a0) + ++ if len(ret) == 0 { ++ panic("no return value specified for EndBlockSync") ++ } ++ + var r0 *types.ResponseEndBlock ++ var r1 error ++ if rf, ok := ret.Get(0).(func(types.RequestEndBlock) (*types.ResponseEndBlock, error)); ok { ++ return rf(_a0) ++ } + if rf, ok := ret.Get(0).(func(types.RequestEndBlock) *types.ResponseEndBlock); ok { + r0 = rf(_a0) + } else { +@@ -89,7 +115,6 @@ func (_m *AppConnConsensus) EndBlockSync(_a0 types.RequestEndBlock) (*types.Resp + } + } + +- var r1 error + if rf, ok := ret.Get(1).(func(types.RequestEndBlock) error); ok { + r1 = rf(_a0) + } else { +@@ -103,6 +128,10 @@ func (_m *AppConnConsensus) EndBlockSync(_a0 types.RequestEndBlock) (*types.Resp + func (_m *AppConnConsensus) Error() error { + ret := _m.Called() + ++ if len(ret) == 0 { ++ panic("no return value specified for Error") ++ } ++ + var r0 error + if rf, ok := ret.Get(0).(func() error); ok { + r0 = rf() +@@ -117,7 +146,15 @@ func (_m *AppConnConsensus) Error() error { + func (_m *AppConnConsensus) InitChainSync(_a0 types.RequestInitChain) (*types.ResponseInitChain, error) { + ret := _m.Called(_a0) + ++ if len(ret) == 0 { ++ panic("no return value specified for InitChainSync") ++ } ++ + var r0 *types.ResponseInitChain ++ var r1 error ++ if rf, ok := ret.Get(0).(func(types.RequestInitChain) (*types.ResponseInitChain, error)); ok { ++ return rf(_a0) ++ } + if rf, ok := ret.Get(0).(func(types.RequestInitChain) *types.ResponseInitChain); ok { + r0 = rf(_a0) + } else { +@@ -126,7 +163,6 @@ func (_m *AppConnConsensus) InitChainSync(_a0 types.RequestInitChain) (*types.Re + } + } + +- var r1 error + if rf, ok := ret.Get(1).(func(types.RequestInitChain) error); ok { + r1 = rf(_a0) + } else { +@@ -136,18 +172,63 @@ func (_m *AppConnConsensus) InitChainSync(_a0 types.RequestInitChain) (*types.Re + return r0, r1 + } + ++// PrepareProposalSync provides a mock function with given fields: _a0 ++func (_m *AppConnConsensus) PrepareProposalSync(_a0 types.RequestPrepareProposal) (*types.ResponsePrepareProposal, error) { ++ ret := _m.Called(_a0) ++ ++ var r0 *types.ResponsePrepareProposal ++ if rf, ok := ret.Get(0).(func(types.RequestPrepareProposal) *types.ResponsePrepareProposal); ok { ++ r0 = rf(_a0) ++ } else { ++ if ret.Get(0) != nil { ++ r0 = ret.Get(0).(*types.ResponsePrepareProposal) ++ } ++ } ++ ++ var r1 error ++ if rf, ok := ret.Get(1).(func(types.RequestPrepareProposal) error); ok { ++ r1 = rf(_a0) ++ } else { ++ r1 = ret.Error(1) ++ } ++ ++ return r0, r1 ++} ++ ++// ProcessProposalSync provides a mock function with given fields: _a0 ++func (_m *AppConnConsensus) ProcessProposalSync(_a0 types.RequestProcessProposal) (*types.ResponseProcessProposal, error) { ++ ret := _m.Called(_a0) ++ ++ var r0 *types.ResponseProcessProposal ++ if rf, ok := ret.Get(0).(func(types.RequestProcessProposal) *types.ResponseProcessProposal); ok { ++ r0 = rf(_a0) ++ } else { ++ if ret.Get(0) != nil { ++ r0 = ret.Get(0).(*types.ResponseProcessProposal) ++ } ++ } ++ ++ var r1 error ++ if rf, ok := ret.Get(1).(func(types.RequestProcessProposal) error); ok { ++ r1 = rf(_a0) ++ } else { ++ r1 = ret.Error(1) ++ } ++ ++ return r0, r1 ++} ++ + // SetResponseCallback provides a mock function with given fields: _a0 + func (_m *AppConnConsensus) SetResponseCallback(_a0 abcicli.Callback) { + _m.Called(_a0) + } + +-type mockConstructorTestingTNewAppConnConsensus interface { ++// NewAppConnConsensus creates a new instance of AppConnConsensus. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. ++// The first argument is typically a *testing.T value. ++func NewAppConnConsensus(t interface { + mock.TestingT + Cleanup(func()) +-} +- +-// NewAppConnConsensus creates a new instance of AppConnConsensus. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +-func NewAppConnConsensus(t mockConstructorTestingTNewAppConnConsensus) *AppConnConsensus { ++}) *AppConnConsensus { + mock := &AppConnConsensus{} + mock.Mock.Test(t) + diff --git a/patches/proxy/mocks/app_conn_mempool.go.patch b/patches/proxy/mocks/app_conn_mempool.go.patch new file mode 100644 index 00000000000..d5045fd84c5 --- /dev/null +++ b/patches/proxy/mocks/app_conn_mempool.go.patch @@ -0,0 +1,90 @@ +diff --git a/proxy/mocks/app_conn_mempool.go b/proxy/mocks/app_conn_mempool.go +index 05e23dd43..daf1d4315 100644 +--- a/proxy/mocks/app_conn_mempool.go ++++ b/proxy/mocks/app_conn_mempool.go +@@ -18,6 +18,10 @@ type AppConnMempool struct { + func (_m *AppConnMempool) CheckTxAsync(_a0 types.RequestCheckTx) *abcicli.ReqRes { + ret := _m.Called(_a0) + ++ if len(ret) == 0 { ++ panic("no return value specified for CheckTxAsync") ++ } ++ + var r0 *abcicli.ReqRes + if rf, ok := ret.Get(0).(func(types.RequestCheckTx) *abcicli.ReqRes); ok { + r0 = rf(_a0) +@@ -34,7 +38,15 @@ func (_m *AppConnMempool) CheckTxAsync(_a0 types.RequestCheckTx) *abcicli.ReqRes + func (_m *AppConnMempool) CheckTxSync(_a0 types.RequestCheckTx) (*types.ResponseCheckTx, error) { + ret := _m.Called(_a0) + ++ if len(ret) == 0 { ++ panic("no return value specified for CheckTxSync") ++ } ++ + var r0 *types.ResponseCheckTx ++ var r1 error ++ if rf, ok := ret.Get(0).(func(types.RequestCheckTx) (*types.ResponseCheckTx, error)); ok { ++ return rf(_a0) ++ } + if rf, ok := ret.Get(0).(func(types.RequestCheckTx) *types.ResponseCheckTx); ok { + r0 = rf(_a0) + } else { +@@ -43,7 +55,6 @@ func (_m *AppConnMempool) CheckTxSync(_a0 types.RequestCheckTx) (*types.Response + } + } + +- var r1 error + if rf, ok := ret.Get(1).(func(types.RequestCheckTx) error); ok { + r1 = rf(_a0) + } else { +@@ -57,6 +68,10 @@ func (_m *AppConnMempool) CheckTxSync(_a0 types.RequestCheckTx) (*types.Response + func (_m *AppConnMempool) Error() error { + ret := _m.Called() + ++ if len(ret) == 0 { ++ panic("no return value specified for Error") ++ } ++ + var r0 error + if rf, ok := ret.Get(0).(func() error); ok { + r0 = rf() +@@ -71,6 +86,10 @@ func (_m *AppConnMempool) Error() error { + func (_m *AppConnMempool) FlushAsync() *abcicli.ReqRes { + ret := _m.Called() + ++ if len(ret) == 0 { ++ panic("no return value specified for FlushAsync") ++ } ++ + var r0 *abcicli.ReqRes + if rf, ok := ret.Get(0).(func() *abcicli.ReqRes); ok { + r0 = rf() +@@ -87,6 +106,10 @@ func (_m *AppConnMempool) FlushAsync() *abcicli.ReqRes { + func (_m *AppConnMempool) FlushSync() error { + ret := _m.Called() + ++ if len(ret) == 0 { ++ panic("no return value specified for FlushSync") ++ } ++ + var r0 error + if rf, ok := ret.Get(0).(func() error); ok { + r0 = rf() +@@ -102,13 +125,12 @@ func (_m *AppConnMempool) SetResponseCallback(_a0 abcicli.Callback) { + _m.Called(_a0) + } + +-type mockConstructorTestingTNewAppConnMempool interface { ++// NewAppConnMempool creates a new instance of AppConnMempool. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. ++// The first argument is typically a *testing.T value. ++func NewAppConnMempool(t interface { + mock.TestingT + Cleanup(func()) +-} +- +-// NewAppConnMempool creates a new instance of AppConnMempool. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +-func NewAppConnMempool(t mockConstructorTestingTNewAppConnMempool) *AppConnMempool { ++}) *AppConnMempool { + mock := &AppConnMempool{} + mock.Mock.Test(t) + diff --git a/patches/proxy/mocks/app_conn_query.go.patch b/patches/proxy/mocks/app_conn_query.go.patch new file mode 100644 index 00000000000..9acda5c71aa --- /dev/null +++ b/patches/proxy/mocks/app_conn_query.go.patch @@ -0,0 +1,105 @@ +diff --git a/proxy/mocks/app_conn_query.go b/proxy/mocks/app_conn_query.go +index 544ab765e..f2b6185cf 100644 +--- a/proxy/mocks/app_conn_query.go ++++ b/proxy/mocks/app_conn_query.go +@@ -17,7 +17,15 @@ type AppConnQuery struct { + func (_m *AppConnQuery) EchoSync(_a0 string) (*types.ResponseEcho, error) { + ret := _m.Called(_a0) + ++ if len(ret) == 0 { ++ panic("no return value specified for EchoSync") ++ } ++ + var r0 *types.ResponseEcho ++ var r1 error ++ if rf, ok := ret.Get(0).(func(string) (*types.ResponseEcho, error)); ok { ++ return rf(_a0) ++ } + if rf, ok := ret.Get(0).(func(string) *types.ResponseEcho); ok { + r0 = rf(_a0) + } else { +@@ -26,7 +34,6 @@ func (_m *AppConnQuery) EchoSync(_a0 string) (*types.ResponseEcho, error) { + } + } + +- var r1 error + if rf, ok := ret.Get(1).(func(string) error); ok { + r1 = rf(_a0) + } else { +@@ -40,6 +47,10 @@ func (_m *AppConnQuery) EchoSync(_a0 string) (*types.ResponseEcho, error) { + func (_m *AppConnQuery) Error() error { + ret := _m.Called() + ++ if len(ret) == 0 { ++ panic("no return value specified for Error") ++ } ++ + var r0 error + if rf, ok := ret.Get(0).(func() error); ok { + r0 = rf() +@@ -54,7 +65,15 @@ func (_m *AppConnQuery) Error() error { + func (_m *AppConnQuery) InfoSync(_a0 types.RequestInfo) (*types.ResponseInfo, error) { + ret := _m.Called(_a0) + ++ if len(ret) == 0 { ++ panic("no return value specified for InfoSync") ++ } ++ + var r0 *types.ResponseInfo ++ var r1 error ++ if rf, ok := ret.Get(0).(func(types.RequestInfo) (*types.ResponseInfo, error)); ok { ++ return rf(_a0) ++ } + if rf, ok := ret.Get(0).(func(types.RequestInfo) *types.ResponseInfo); ok { + r0 = rf(_a0) + } else { +@@ -63,7 +82,6 @@ func (_m *AppConnQuery) InfoSync(_a0 types.RequestInfo) (*types.ResponseInfo, er + } + } + +- var r1 error + if rf, ok := ret.Get(1).(func(types.RequestInfo) error); ok { + r1 = rf(_a0) + } else { +@@ -77,7 +95,15 @@ func (_m *AppConnQuery) InfoSync(_a0 types.RequestInfo) (*types.ResponseInfo, er + func (_m *AppConnQuery) QuerySync(_a0 types.RequestQuery) (*types.ResponseQuery, error) { + ret := _m.Called(_a0) + ++ if len(ret) == 0 { ++ panic("no return value specified for QuerySync") ++ } ++ + var r0 *types.ResponseQuery ++ var r1 error ++ if rf, ok := ret.Get(0).(func(types.RequestQuery) (*types.ResponseQuery, error)); ok { ++ return rf(_a0) ++ } + if rf, ok := ret.Get(0).(func(types.RequestQuery) *types.ResponseQuery); ok { + r0 = rf(_a0) + } else { +@@ -86,7 +112,6 @@ func (_m *AppConnQuery) QuerySync(_a0 types.RequestQuery) (*types.ResponseQuery, + } + } + +- var r1 error + if rf, ok := ret.Get(1).(func(types.RequestQuery) error); ok { + r1 = rf(_a0) + } else { +@@ -96,13 +121,12 @@ func (_m *AppConnQuery) QuerySync(_a0 types.RequestQuery) (*types.ResponseQuery, + return r0, r1 + } + +-type mockConstructorTestingTNewAppConnQuery interface { ++// NewAppConnQuery creates a new instance of AppConnQuery. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. ++// The first argument is typically a *testing.T value. ++func NewAppConnQuery(t interface { + mock.TestingT + Cleanup(func()) +-} +- +-// NewAppConnQuery creates a new instance of AppConnQuery. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +-func NewAppConnQuery(t mockConstructorTestingTNewAppConnQuery) *AppConnQuery { ++}) *AppConnQuery { + mock := &AppConnQuery{} + mock.Mock.Test(t) + diff --git a/patches/proxy/mocks/app_conn_snapshot.go.patch b/patches/proxy/mocks/app_conn_snapshot.go.patch new file mode 100644 index 00000000000..3f0107272e7 --- /dev/null +++ b/patches/proxy/mocks/app_conn_snapshot.go.patch @@ -0,0 +1,129 @@ +diff --git a/proxy/mocks/app_conn_snapshot.go b/proxy/mocks/app_conn_snapshot.go +index e3d5cb6cd..26fc0157e 100644 +--- a/proxy/mocks/app_conn_snapshot.go ++++ b/proxy/mocks/app_conn_snapshot.go +@@ -17,7 +17,15 @@ type AppConnSnapshot struct { + func (_m *AppConnSnapshot) ApplySnapshotChunkSync(_a0 types.RequestApplySnapshotChunk) (*types.ResponseApplySnapshotChunk, error) { + ret := _m.Called(_a0) + ++ if len(ret) == 0 { ++ panic("no return value specified for ApplySnapshotChunkSync") ++ } ++ + var r0 *types.ResponseApplySnapshotChunk ++ var r1 error ++ if rf, ok := ret.Get(0).(func(types.RequestApplySnapshotChunk) (*types.ResponseApplySnapshotChunk, error)); ok { ++ return rf(_a0) ++ } + if rf, ok := ret.Get(0).(func(types.RequestApplySnapshotChunk) *types.ResponseApplySnapshotChunk); ok { + r0 = rf(_a0) + } else { +@@ -26,7 +34,6 @@ func (_m *AppConnSnapshot) ApplySnapshotChunkSync(_a0 types.RequestApplySnapshot + } + } + +- var r1 error + if rf, ok := ret.Get(1).(func(types.RequestApplySnapshotChunk) error); ok { + r1 = rf(_a0) + } else { +@@ -40,6 +47,10 @@ func (_m *AppConnSnapshot) ApplySnapshotChunkSync(_a0 types.RequestApplySnapshot + func (_m *AppConnSnapshot) Error() error { + ret := _m.Called() + ++ if len(ret) == 0 { ++ panic("no return value specified for Error") ++ } ++ + var r0 error + if rf, ok := ret.Get(0).(func() error); ok { + r0 = rf() +@@ -54,7 +65,15 @@ func (_m *AppConnSnapshot) Error() error { + func (_m *AppConnSnapshot) ListSnapshotsSync(_a0 types.RequestListSnapshots) (*types.ResponseListSnapshots, error) { + ret := _m.Called(_a0) + ++ if len(ret) == 0 { ++ panic("no return value specified for ListSnapshotsSync") ++ } ++ + var r0 *types.ResponseListSnapshots ++ var r1 error ++ if rf, ok := ret.Get(0).(func(types.RequestListSnapshots) (*types.ResponseListSnapshots, error)); ok { ++ return rf(_a0) ++ } + if rf, ok := ret.Get(0).(func(types.RequestListSnapshots) *types.ResponseListSnapshots); ok { + r0 = rf(_a0) + } else { +@@ -63,7 +82,6 @@ func (_m *AppConnSnapshot) ListSnapshotsSync(_a0 types.RequestListSnapshots) (*t + } + } + +- var r1 error + if rf, ok := ret.Get(1).(func(types.RequestListSnapshots) error); ok { + r1 = rf(_a0) + } else { +@@ -77,7 +95,15 @@ func (_m *AppConnSnapshot) ListSnapshotsSync(_a0 types.RequestListSnapshots) (*t + func (_m *AppConnSnapshot) LoadSnapshotChunkSync(_a0 types.RequestLoadSnapshotChunk) (*types.ResponseLoadSnapshotChunk, error) { + ret := _m.Called(_a0) + ++ if len(ret) == 0 { ++ panic("no return value specified for LoadSnapshotChunkSync") ++ } ++ + var r0 *types.ResponseLoadSnapshotChunk ++ var r1 error ++ if rf, ok := ret.Get(0).(func(types.RequestLoadSnapshotChunk) (*types.ResponseLoadSnapshotChunk, error)); ok { ++ return rf(_a0) ++ } + if rf, ok := ret.Get(0).(func(types.RequestLoadSnapshotChunk) *types.ResponseLoadSnapshotChunk); ok { + r0 = rf(_a0) + } else { +@@ -86,7 +112,6 @@ func (_m *AppConnSnapshot) LoadSnapshotChunkSync(_a0 types.RequestLoadSnapshotCh + } + } + +- var r1 error + if rf, ok := ret.Get(1).(func(types.RequestLoadSnapshotChunk) error); ok { + r1 = rf(_a0) + } else { +@@ -100,7 +125,15 @@ func (_m *AppConnSnapshot) LoadSnapshotChunkSync(_a0 types.RequestLoadSnapshotCh + func (_m *AppConnSnapshot) OfferSnapshotSync(_a0 types.RequestOfferSnapshot) (*types.ResponseOfferSnapshot, error) { + ret := _m.Called(_a0) + ++ if len(ret) == 0 { ++ panic("no return value specified for OfferSnapshotSync") ++ } ++ + var r0 *types.ResponseOfferSnapshot ++ var r1 error ++ if rf, ok := ret.Get(0).(func(types.RequestOfferSnapshot) (*types.ResponseOfferSnapshot, error)); ok { ++ return rf(_a0) ++ } + if rf, ok := ret.Get(0).(func(types.RequestOfferSnapshot) *types.ResponseOfferSnapshot); ok { + r0 = rf(_a0) + } else { +@@ -109,7 +142,6 @@ func (_m *AppConnSnapshot) OfferSnapshotSync(_a0 types.RequestOfferSnapshot) (*t + } + } + +- var r1 error + if rf, ok := ret.Get(1).(func(types.RequestOfferSnapshot) error); ok { + r1 = rf(_a0) + } else { +@@ -119,13 +151,12 @@ func (_m *AppConnSnapshot) OfferSnapshotSync(_a0 types.RequestOfferSnapshot) (*t + return r0, r1 + } + +-type mockConstructorTestingTNewAppConnSnapshot interface { ++// NewAppConnSnapshot creates a new instance of AppConnSnapshot. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. ++// The first argument is typically a *testing.T value. ++func NewAppConnSnapshot(t interface { + mock.TestingT + Cleanup(func()) +-} +- +-// NewAppConnSnapshot creates a new instance of AppConnSnapshot. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +-func NewAppConnSnapshot(t mockConstructorTestingTNewAppConnSnapshot) *AppConnSnapshot { ++}) *AppConnSnapshot { + mock := &AppConnSnapshot{} + mock.Mock.Test(t) + diff --git a/patches/proxy/mocks/client_creator.go.patch b/patches/proxy/mocks/client_creator.go.patch new file mode 100644 index 00000000000..3197b10cacb --- /dev/null +++ b/patches/proxy/mocks/client_creator.go.patch @@ -0,0 +1,46 @@ +diff --git a/proxy/mocks/client_creator.go b/proxy/mocks/client_creator.go +index eced0aeff..778eab548 100644 +--- a/proxy/mocks/client_creator.go ++++ b/proxy/mocks/client_creator.go +@@ -16,7 +16,15 @@ type ClientCreator struct { + func (_m *ClientCreator) NewABCIClient() (abcicli.Client, error) { + ret := _m.Called() + ++ if len(ret) == 0 { ++ panic("no return value specified for NewABCIClient") ++ } ++ + var r0 abcicli.Client ++ var r1 error ++ if rf, ok := ret.Get(0).(func() (abcicli.Client, error)); ok { ++ return rf() ++ } + if rf, ok := ret.Get(0).(func() abcicli.Client); ok { + r0 = rf() + } else { +@@ -25,7 +33,6 @@ func (_m *ClientCreator) NewABCIClient() (abcicli.Client, error) { + } + } + +- var r1 error + if rf, ok := ret.Get(1).(func() error); ok { + r1 = rf() + } else { +@@ -35,13 +42,12 @@ func (_m *ClientCreator) NewABCIClient() (abcicli.Client, error) { + return r0, r1 + } + +-type mockConstructorTestingTNewClientCreator interface { ++// NewClientCreator creates a new instance of ClientCreator. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. ++// The first argument is typically a *testing.T value. ++func NewClientCreator(t interface { + mock.TestingT + Cleanup(func()) +-} +- +-// NewClientCreator creates a new instance of ClientCreator. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +-func NewClientCreator(t mockConstructorTestingTNewClientCreator) *ClientCreator { ++}) *ClientCreator { + mock := &ClientCreator{} + mock.Mock.Test(t) + diff --git a/patches/rpc/client/http/http.go.patch b/patches/rpc/client/http/http.go.patch new file mode 100644 index 00000000000..5112e8fd208 --- /dev/null +++ b/patches/rpc/client/http/http.go.patch @@ -0,0 +1,197 @@ +diff --git a/rpc/client/http/http.go b/rpc/client/http/http.go +index dc3b0551b..7acb6a43c 100644 +--- a/rpc/client/http/http.go ++++ b/rpc/client/http/http.go +@@ -98,9 +98,11 @@ type baseRPCClient struct { + caller jsonrpcclient.Caller + } + +-var _ rpcClient = (*HTTP)(nil) +-var _ rpcClient = (*BatchHTTP)(nil) +-var _ rpcClient = (*baseRPCClient)(nil) ++var ( ++ _ rpcClient = (*HTTP)(nil) ++ _ rpcClient = (*BatchHTTP)(nil) ++ _ rpcClient = (*baseRPCClient)(nil) ++) + + //----------------------------------------------------------------------------- + // HTTP +@@ -116,12 +118,13 @@ func New(remote, wsEndpoint string) (*HTTP, error) { + return NewWithClient(remote, wsEndpoint, httpClient) + } + +-// Create timeout enabled http client ++// NewWithTimeout creates timeout enabled http client + func NewWithTimeout(remote, wsEndpoint string, timeout uint) (*HTTP, error) { + httpClient, err := jsonrpcclient.DefaultHTTPClient(remote) + if err != nil { + return nil, err + } ++ //nolint:gosec + httpClient.Timeout = time.Duration(timeout) * time.Second + return NewWithClient(remote, wsEndpoint, httpClient) + } +@@ -416,6 +419,19 @@ func (c *baseRPCClient) Block(ctx context.Context, height *int64) (*ctypes.Resul + return result, nil + } + ++func (c *baseRPCClient) SignedBlock(ctx context.Context, height *int64) (*ctypes.ResultSignedBlock, error) { ++ result := new(ctypes.ResultSignedBlock) ++ params := make(map[string]interface{}) ++ if height != nil { ++ params["height"] = height ++ } ++ _, err := c.caller.Call(ctx, "signed_block", params, result) ++ if err != nil { ++ return nil, err ++ } ++ return result, nil ++} ++ + func (c *baseRPCClient) BlockByHash(ctx context.Context, hash []byte) (*ctypes.ResultBlock, error) { + result := new(ctypes.ResultBlock) + params := map[string]interface{}{ +@@ -444,6 +460,31 @@ func (c *baseRPCClient) BlockResults( + return result, nil + } + ++func (c *baseRPCClient) Header(ctx context.Context, height *int64) (*ctypes.ResultHeader, error) { ++ result := new(ctypes.ResultHeader) ++ params := make(map[string]interface{}) ++ if height != nil { ++ params["height"] = height ++ } ++ _, err := c.caller.Call(ctx, "header", params, result) ++ if err != nil { ++ return nil, err ++ } ++ return result, nil ++} ++ ++func (c *baseRPCClient) HeaderByHash(ctx context.Context, hash bytes.HexBytes) (*ctypes.ResultHeader, error) { ++ result := new(ctypes.ResultHeader) ++ params := map[string]interface{}{ ++ "hash": hash, ++ } ++ _, err := c.caller.Call(ctx, "header_by_hash", params, result) ++ if err != nil { ++ return nil, err ++ } ++ return result, nil ++} ++ + func (c *baseRPCClient) Commit(ctx context.Context, height *int64) (*ctypes.ResultCommit, error) { + result := new(ctypes.ResultCommit) + params := make(map[string]interface{}) +@@ -457,6 +498,63 @@ func (c *baseRPCClient) Commit(ctx context.Context, height *int64) (*ctypes.Resu + return result, nil + } + ++func (c *baseRPCClient) DataCommitment( ++ ctx context.Context, ++ start uint64, ++ end uint64, ++) (*ctypes.ResultDataCommitment, error) { ++ result := new(ctypes.ResultDataCommitment) ++ params := map[string]interface{}{ ++ "start": start, ++ "end": end, ++ } ++ ++ _, err := c.caller.Call(ctx, "data_commitment", params, result) ++ if err != nil { ++ return nil, err ++ } ++ ++ return result, nil ++} ++ ++func (c *baseRPCClient) TxStatus( ++ ctx context.Context, ++ hash []byte, ++) (*ctypes.ResultTxStatus, error) { ++ result := new(ctypes.ResultTxStatus) ++ params := map[string]interface{}{ ++ "hash": hash, ++ } ++ ++ _, err := c.caller.Call(ctx, "tx_status", params, result) ++ if err != nil { ++ return nil, err ++ } ++ ++ return result, nil ++} ++ ++func (c *baseRPCClient) DataRootInclusionProof( ++ ctx context.Context, ++ height uint64, ++ start uint64, ++ end uint64, ++) (*ctypes.ResultDataRootInclusionProof, error) { ++ result := new(ctypes.ResultDataRootInclusionProof) ++ params := map[string]interface{}{ ++ "height": height, ++ "start": start, ++ "end": end, ++ } ++ ++ _, err := c.caller.Call(ctx, "data_root_inclusion_proof", params, result) ++ if err != nil { ++ return nil, err ++ } ++ ++ return result, nil ++} ++ + func (c *baseRPCClient) Tx(ctx context.Context, hash []byte, prove bool) (*ctypes.ResultTx, error) { + result := new(ctypes.ResultTx) + params := map[string]interface{}{ +@@ -470,6 +568,46 @@ func (c *baseRPCClient) Tx(ctx context.Context, hash []byte, prove bool) (*ctype + return result, nil + } + ++// ProveShares ++// Deprecated: Use ProveSharesV2 instead. ++func (c *baseRPCClient) ProveShares( ++ ctx context.Context, ++ height uint64, ++ startShare uint64, ++ endShare uint64, ++) (types.ShareProof, error) { ++ result := new(types.ShareProof) ++ params := map[string]interface{}{ ++ "height": height, ++ "startShare": startShare, ++ "endShare": endShare, ++ } ++ _, err := c.caller.Call(ctx, "prove_shares", params, result) ++ if err != nil { ++ return types.ShareProof{}, err ++ } ++ return *result, nil ++} ++ ++func (c *baseRPCClient) ProveSharesV2( ++ ctx context.Context, ++ height uint64, ++ startShare uint64, ++ endShare uint64, ++) (*ctypes.ResultShareProof, error) { ++ result := new(ctypes.ResultShareProof) ++ params := map[string]interface{}{ ++ "height": height, ++ "startShare": startShare, ++ "endShare": endShare, ++ } ++ _, err := c.caller.Call(ctx, "prove_shares_v2", params, result) ++ if err != nil { ++ return nil, err ++ } ++ return result, nil ++} ++ + func (c *baseRPCClient) TxSearch( + ctx context.Context, + query string, diff --git a/patches/rpc/client/interface.go.patch b/patches/rpc/client/interface.go.patch new file mode 100644 index 00000000000..ef944aed1c0 --- /dev/null +++ b/patches/rpc/client/interface.go.patch @@ -0,0 +1,53 @@ +diff --git a/rpc/client/interface.go b/rpc/client/interface.go +index 9c049e650..5fe6f7527 100644 +--- a/rpc/client/interface.go ++++ b/rpc/client/interface.go +@@ -7,7 +7,7 @@ to a CometBFT node, as well as higher-level functionality. + The main implementation for production code is client.HTTP, which + connects via http to the jsonrpc interface of the CometBFT node. + +-For connecting to a node running in the same process (eg. when ++For connecting to a node running in the same process (e.g., when + compiling the abci app in the same process), you can use the client.Local + implementation. + +@@ -65,12 +65,29 @@ type ABCIClient interface { + // and prove anything about the chain. + type SignClient interface { + Block(ctx context.Context, height *int64) (*ctypes.ResultBlock, error) ++ SignedBlock(ctx context.Context, height *int64) (*ctypes.ResultSignedBlock, error) + BlockByHash(ctx context.Context, hash []byte) (*ctypes.ResultBlock, error) + BlockResults(ctx context.Context, height *int64) (*ctypes.ResultBlockResults, error) ++ Header(ctx context.Context, height *int64) (*ctypes.ResultHeader, error) ++ HeaderByHash(ctx context.Context, hash bytes.HexBytes) (*ctypes.ResultHeader, error) + Commit(ctx context.Context, height *int64) (*ctypes.ResultCommit, error) ++ ++ DataCommitment(ctx context.Context, start, end uint64) (*ctypes.ResultDataCommitment, error) ++ DataRootInclusionProof( ++ ctx context.Context, ++ height uint64, ++ start, ++ end uint64, ++ ) (*ctypes.ResultDataRootInclusionProof, error) ++ + Validators(ctx context.Context, height *int64, page, perPage *int) (*ctypes.ResultValidators, error) + Tx(ctx context.Context, hash []byte, prove bool) (*ctypes.ResultTx, error) + ++ // ProveShares ++ // Deprecated: Use ProveSharesV2 instead. ++ ProveShares(_ context.Context, height uint64, startShare uint64, endShare uint64) (types.ShareProof, error) ++ ProveSharesV2(_ context.Context, height uint64, startShare uint64, endShare uint64) (*ctypes.ResultShareProof, error) ++ + // TxSearch defines a method to search for a paginated set of transactions by + // DeliverTx event search criteria. + TxSearch( +@@ -89,6 +106,9 @@ type SignClient interface { + page, perPage *int, + orderBy string, + ) (*ctypes.ResultBlockSearch, error) ++ ++ // TxStatus returns the transaction status for a given transaction hash. ++ TxStatus(ctx context.Context, hash []byte) (*ctypes.ResultTxStatus, error) + } + + // HistoryClient provides access to data from genesis to now in large chunks. diff --git a/patches/rpc/client/local/local.go.patch b/patches/rpc/client/local/local.go.patch new file mode 100644 index 00000000000..18e14b344f2 --- /dev/null +++ b/patches/rpc/client/local/local.go.patch @@ -0,0 +1,109 @@ +diff --git a/rpc/client/local/local.go b/rpc/client/local/local.go +index 126a1267d..783f209d4 100644 +--- a/rpc/client/local/local.go ++++ b/rpc/client/local/local.go +@@ -43,7 +43,7 @@ type Local struct { + ctx *rpctypes.Context + } + +-// NewLocal configures a client that calls the Node directly. ++// New configures a client that calls the Node directly. + // + // Note that given how rpc/core works with package singletons, that + // you can only have one node per process. So make sure test cases +@@ -161,6 +161,10 @@ func (c *Local) Block(ctx context.Context, height *int64) (*ctypes.ResultBlock, + return core.Block(c.ctx, height) + } + ++func (c *Local) SignedBlock(ctx context.Context, height *int64) (*ctypes.ResultSignedBlock, error) { ++ return core.SignedBlock(c.ctx, height) ++} ++ + func (c *Local) BlockByHash(ctx context.Context, hash []byte) (*ctypes.ResultBlock, error) { + return core.BlockByHash(c.ctx, hash) + } +@@ -169,10 +173,36 @@ func (c *Local) BlockResults(ctx context.Context, height *int64) (*ctypes.Result + return core.BlockResults(c.ctx, height) + } + ++func (c *Local) Header(ctx context.Context, height *int64) (*ctypes.ResultHeader, error) { ++ return core.Header(c.ctx, height) ++} ++ ++func (c *Local) HeaderByHash(ctx context.Context, hash bytes.HexBytes) (*ctypes.ResultHeader, error) { ++ return core.HeaderByHash(c.ctx, hash) ++} ++ + func (c *Local) Commit(ctx context.Context, height *int64) (*ctypes.ResultCommit, error) { + return core.Commit(c.ctx, height) + } + ++func (c *Local) DataCommitment( ++ _ context.Context, ++ start uint64, ++ end uint64, ++) (*ctypes.ResultDataCommitment, error) { ++ return core.DataCommitment(c.ctx, start, end) ++} ++ ++func (c *Local) DataRootInclusionProof( ++ _ context.Context, ++ height uint64, ++ start uint64, ++ end uint64, ++) (*ctypes.ResultDataRootInclusionProof, error) { ++ //nolint:gosec ++ return core.DataRootInclusionProof(c.ctx, int64(height), start, end) ++} ++ + func (c *Local) Validators(ctx context.Context, height *int64, page, perPage *int) (*ctypes.ResultValidators, error) { + return core.Validators(c.ctx, height, page, perPage) + } +@@ -181,6 +211,28 @@ func (c *Local) Tx(ctx context.Context, hash []byte, prove bool) (*ctypes.Result + return core.Tx(c.ctx, hash, prove) + } + ++// ProveShares ++// Deprecated: Use ProveSharesV2 instead. ++func (c *Local) ProveShares( ++ ctx context.Context, ++ height uint64, ++ startShare uint64, ++ endShare uint64, ++) (types.ShareProof, error) { ++ //nolint:gosec ++ return core.ProveShares(c.ctx, int64(height), startShare, endShare) ++} ++ ++func (c *Local) ProveSharesV2( ++ ctx context.Context, ++ height uint64, ++ startShare uint64, ++ endShare uint64, ++) (*ctypes.ResultShareProof, error) { ++ //nolint:gosec ++ return core.ProveSharesV2(c.ctx, int64(height), startShare, endShare) ++} ++ + func (c *Local) TxSearch( + _ context.Context, + query string, +@@ -201,6 +253,10 @@ func (c *Local) BlockSearch( + return core.BlockSearch(c.ctx, query, page, perPage, orderBy) + } + ++func (c *Local) TxStatus(ctx context.Context, hash []byte) (*ctypes.ResultTxStatus, error) { ++ return core.TxStatus(c.ctx, hash) ++} ++ + func (c *Local) BroadcastEvidence(ctx context.Context, ev types.Evidence) (*ctypes.ResultBroadcastEvidence, error) { + return core.BroadcastEvidence(c.ctx, ev) + } +@@ -284,6 +340,7 @@ func (c *Local) resubscribe(subscriber string, q cmtpubsub.Query) types.Subscrip + } + + attempts++ ++ //nolint:gosec + time.Sleep((10 << uint(attempts)) * time.Millisecond) // 10ms -> 20ms -> 40ms + } + } diff --git a/patches/rpc/client/main_test.go.patch b/patches/rpc/client/main_test.go.patch new file mode 100644 index 00000000000..3c0bd9296c5 --- /dev/null +++ b/patches/rpc/client/main_test.go.patch @@ -0,0 +1,13 @@ +diff --git a/rpc/client/main_test.go b/rpc/client/main_test.go +index b5bfbd718..3f72e92b2 100644 +--- a/rpc/client/main_test.go ++++ b/rpc/client/main_test.go +@@ -20,7 +20,7 @@ func TestMain(m *testing.M) { + + app := kvstore.NewPersistentKVStoreApplication(dir) + // If testing block event generation +- // app.SetGenBlockEvents() // needs to be called here (see TestBlockSearch in rpc_test.go) ++ // app.SetGenBlockEvents() needs to be called here + node = rpctest.StartTendermint(app) + + code := m.Run() diff --git a/patches/rpc/client/mock/client.go.patch b/patches/rpc/client/mock/client.go.patch new file mode 100644 index 00000000000..ef037aa6fbf --- /dev/null +++ b/patches/rpc/client/mock/client.go.patch @@ -0,0 +1,38 @@ +diff --git a/rpc/client/mock/client.go b/rpc/client/mock/client.go +index ec31e0d9f..7566be890 100644 +--- a/rpc/client/mock/client.go ++++ b/rpc/client/mock/client.go +@@ -54,7 +54,7 @@ type Call struct { + Error error + } + +-// GetResponse will generate the apporiate response for us, when ++// GetResponse will generate the appropriate response for us, when + // using the Call struct to configure a Mock handler. + // + // When configuring a response, if only one of Response or Error is +@@ -169,6 +169,24 @@ func (c Client) Commit(ctx context.Context, height *int64) (*ctypes.ResultCommit + return core.Commit(&rpctypes.Context{}, height) + } + ++func (c Client) DataCommitment( ++ ctx context.Context, ++ start uint64, ++ end uint64, ++) (*ctypes.ResultDataCommitment, error) { ++ return core.DataCommitment(&rpctypes.Context{}, start, end) ++} ++ ++func (c Client) DataRootInclusionProof( ++ ctx context.Context, ++ height uint64, ++ start uint64, ++ end uint64, ++) (*ctypes.ResultDataRootInclusionProof, error) { ++ //nolint:gosec ++ return core.DataRootInclusionProof(&rpctypes.Context{}, int64(height), start, end) ++} ++ + func (c Client) Validators(ctx context.Context, height *int64, page, perPage *int) (*ctypes.ResultValidators, error) { + return core.Validators(&rpctypes.Context{}, height, page, perPage) + } diff --git a/patches/rpc/client/mocks/client.go.patch b/patches/rpc/client/mocks/client.go.patch new file mode 100644 index 00000000000..18483fd45fa --- /dev/null +++ b/patches/rpc/client/mocks/client.go.patch @@ -0,0 +1,57 @@ +diff --git a/rpc/client/mocks/client.go b/rpc/client/mocks/client.go +index f8eb7a45c..a9709d94d 100644 +--- a/rpc/client/mocks/client.go ++++ b/rpc/client/mocks/client.go +@@ -459,6 +459,52 @@ func (_m *Client) GenesisChunked(_a0 context.Context, _a1 uint) (*coretypes.Resu + return r0, r1 + } + ++// Header provides a mock function with given fields: ctx, height ++func (_m *Client) Header(ctx context.Context, height *int64) (*coretypes.ResultHeader, error) { ++ ret := _m.Called(ctx, height) ++ ++ var r0 *coretypes.ResultHeader ++ if rf, ok := ret.Get(0).(func(context.Context, *int64) *coretypes.ResultHeader); ok { ++ r0 = rf(ctx, height) ++ } else { ++ if ret.Get(0) != nil { ++ r0 = ret.Get(0).(*coretypes.ResultHeader) ++ } ++ } ++ ++ var r1 error ++ if rf, ok := ret.Get(1).(func(context.Context, *int64) error); ok { ++ r1 = rf(ctx, height) ++ } else { ++ r1 = ret.Error(1) ++ } ++ ++ return r0, r1 ++} ++ ++// HeaderByHash provides a mock function with given fields: ctx, hash ++func (_m *Client) HeaderByHash(ctx context.Context, hash bytes.HexBytes) (*coretypes.ResultHeader, error) { ++ ret := _m.Called(ctx, hash) ++ ++ var r0 *coretypes.ResultHeader ++ if rf, ok := ret.Get(0).(func(context.Context, bytes.HexBytes) *coretypes.ResultHeader); ok { ++ r0 = rf(ctx, hash) ++ } else { ++ if ret.Get(0) != nil { ++ r0 = ret.Get(0).(*coretypes.ResultHeader) ++ } ++ } ++ ++ var r1 error ++ if rf, ok := ret.Get(1).(func(context.Context, bytes.HexBytes) error); ok { ++ r1 = rf(ctx, hash) ++ } else { ++ r1 = ret.Error(1) ++ } ++ ++ return r0, r1 ++} ++ + // Health provides a mock function with given fields: _a0 + func (_m *Client) Health(_a0 context.Context) (*coretypes.ResultHealth, error) { + ret := _m.Called(_a0) diff --git a/patches/rpc/client/rpc_test.go.patch b/patches/rpc/client/rpc_test.go.patch new file mode 100644 index 00000000000..bfd6d04c10b --- /dev/null +++ b/patches/rpc/client/rpc_test.go.patch @@ -0,0 +1,136 @@ +diff --git a/rpc/client/rpc_test.go b/rpc/client/rpc_test.go +index 0b7d93d69..12efe7ed5 100644 +--- a/rpc/client/rpc_test.go ++++ b/rpc/client/rpc_test.go +@@ -285,6 +285,15 @@ func TestAppCalls(t *testing.T) { + require.NoError(err) + require.Equal(block, blockByHash) + ++ // check that the header matches the block hash ++ header, err := c.Header(context.Background(), &apph) ++ require.NoError(err) ++ require.Equal(block.Block.Header, *header.Header) ++ ++ headerByHash, err := c.HeaderByHash(context.Background(), block.BlockID.Hash) ++ require.NoError(err) ++ require.Equal(header, headerByHash) ++ + // now check the results + blockResults, err := c.BlockResults(context.Background(), &txh) + require.Nil(err, "%d: %+v", i, err) +@@ -482,12 +491,6 @@ func TestTx(t *testing.T) { + assert.Zero(t, ptx.Index) + assert.True(t, ptx.TxResult.IsOK()) + assert.EqualValues(t, txHash, ptx.Hash) +- +- // time to verify the proof +- proof := ptx.Proof +- if tc.prove && assert.EqualValues(t, tx, proof.Data) { +- assert.NoError(t, proof.Proof.Verify(proof.RootHash, txHash)) +- } + } + } + } +@@ -532,6 +535,59 @@ func TestBlockSearch(t *testing.T) { + require.Equal(t, blockCount, 0) + + } ++ ++func TestTxStatus(t *testing.T) { ++ c := getHTTPClient() ++ require := require.New(t) ++ mempool := node.Mempool() ++ ++ // Create a new transaction ++ _, _, tx := MakeTxKV() ++ ++ // Get the initial size of the mempool ++ initMempoolSize := mempool.Size() ++ ++ // Add the transaction to the mempool ++ err := mempool.CheckTx(tx, nil, mempl.TxInfo{}) ++ require.NoError(err) ++ ++ // Check if the size of the mempool has increased ++ require.Equal(initMempoolSize+1, mempool.Size()) ++ ++ // Get the tx status from the mempool ++ result, err := c.TxStatus(context.Background(), types.Tx(tx).Hash()) ++ require.NoError(err) ++ require.EqualValues(0, result.Height) ++ require.EqualValues(0, result.Index) ++ require.Equal("PENDING", result.Status) ++ ++ // Flush the mempool ++ mempool.Flush() ++ require.Equal(0, mempool.Size()) ++ ++ // Get tx status after flushing it from the mempool ++ result, err = c.TxStatus(context.Background(), types.Tx(tx).Hash()) ++ require.NoError(err) ++ require.EqualValues(0, result.Height) ++ require.EqualValues(0, result.Index) ++ require.Equal("UNKNOWN", result.Status) ++ ++ // Broadcast the tx again ++ bres, err := c.BroadcastTxCommit(context.Background(), tx) ++ require.NoError(err) ++ require.True(bres.CheckTx.IsOK()) ++ require.True(bres.DeliverTx.IsOK()) ++ ++ // Get the tx status ++ result, err = c.TxStatus(context.Background(), types.Tx(tx).Hash()) ++ require.NoError(err) ++ require.EqualValues(bres.Height, result.Height) ++ require.EqualValues(0, result.Index) ++ require.Equal("COMMITTED", result.Status) ++ require.Equal(abci.CodeTypeOK, result.ExecutionCode) ++ require.Equal("", result.Error) ++} ++ + func TestTxSearch(t *testing.T) { + c := getHTTPClient() + +@@ -567,11 +623,6 @@ func TestTxSearch(t *testing.T) { + assert.True(t, ptx.TxResult.IsOK()) + assert.EqualValues(t, find.Hash, ptx.Hash) + +- // time to verify the proof +- if assert.EqualValues(t, find.Tx, ptx.Proof.Data) { +- assert.NoError(t, ptx.Proof.Proof.Verify(ptx.Proof.RootHash, find.Hash)) +- } +- + // query by height + result, err = c.TxSearch(context.Background(), fmt.Sprintf("tx.height=%d", find.Height), true, nil, nil, "asc") + require.Nil(t, err) +@@ -656,6 +707,30 @@ func TestTxSearch(t *testing.T) { + } + } + ++func TestDataCommitment(t *testing.T) { ++ c := getHTTPClient() ++ ++ // first we broadcast a few tx ++ expectedHeight := int64(3) ++ var bres *ctypes.ResultBroadcastTxCommit ++ var err error ++ for i := int64(0); i < expectedHeight; i++ { ++ _, _, tx := MakeTxKV() ++ bres, err = c.BroadcastTxCommit(context.Background(), tx) ++ require.Nil(t, err, "%+v when submitting tx %d", err, i) ++ } ++ ++ // check if height >= 3 ++ actualHeight := bres.Height ++ require.LessOrEqual(t, expectedHeight, actualHeight, "couldn't create enough blocks for testing the commitment.") ++ ++ // check if data commitment is not nil. ++ // Checking if the commitment is correct is done in `core/blocks_test.go`. ++ dataCommitment, err := c.DataCommitment(ctx, 1, uint64(expectedHeight)) ++ require.NotNil(t, dataCommitment, "data commitment shouldn't be nul.") ++ require.Nil(t, err, "%+v when creating data commitment.", err) ++} ++ + func TestBatchedJSONRPCCalls(t *testing.T) { + c := getHTTPClient() + testBatchedJSONRPCCalls(t, c) diff --git a/patches/rpc/core/abci.go.patch b/patches/rpc/core/abci.go.patch new file mode 100644 index 00000000000..228a3b636eb --- /dev/null +++ b/patches/rpc/core/abci.go.patch @@ -0,0 +1,22 @@ +diff --git a/rpc/core/abci.go b/rpc/core/abci.go +index ec0e67234..50ef3444c 100644 +--- a/rpc/core/abci.go ++++ b/rpc/core/abci.go +@@ -17,7 +17,7 @@ func ABCIQuery( + height int64, + prove bool, + ) (*ctypes.ResultABCIQuery, error) { +- resQuery, err := env.ProxyAppQuery.QuerySync(abci.RequestQuery{ ++ resQuery, err := GetEnvironment().ProxyAppQuery.QuerySync(abci.RequestQuery{ + Path: path, + Data: data, + Height: height, +@@ -33,7 +33,7 @@ func ABCIQuery( + // ABCIInfo gets some info about the application. + // More: https://docs.cometbft.com/v0.34/rpc/#/ABCI/abci_info + func ABCIInfo(ctx *rpctypes.Context) (*ctypes.ResultABCIInfo, error) { +- resInfo, err := env.ProxyAppQuery.InfoSync(proxy.RequestInfo) ++ resInfo, err := GetEnvironment().ProxyAppQuery.InfoSync(proxy.RequestInfo) + if err != nil { + return nil, err + } diff --git a/patches/rpc/core/blocks.go.patch b/patches/rpc/core/blocks.go.patch new file mode 100644 index 00000000000..e6534b401f3 --- /dev/null +++ b/patches/rpc/core/blocks.go.patch @@ -0,0 +1,444 @@ +diff --git a/rpc/core/blocks.go b/rpc/core/blocks.go +index 05b5572b6..cef2d9330 100644 +--- a/rpc/core/blocks.go ++++ b/rpc/core/blocks.go +@@ -1,10 +1,14 @@ + package core + + import ( ++ "encoding/hex" + "errors" + "fmt" + "sort" ++ "strconv" + ++ "github.com/tendermint/tendermint/crypto/merkle" ++ "github.com/tendermint/tendermint/libs/bytes" + cmtmath "github.com/tendermint/tendermint/libs/math" + cmtquery "github.com/tendermint/tendermint/libs/pubsub/query" + ctypes "github.com/tendermint/tendermint/rpc/core/types" +@@ -27,6 +31,7 @@ func BlockchainInfo(ctx *rpctypes.Context, minHeight, maxHeight int64) (*ctypes. + // maximum 20 block metas + const limit int64 = 20 + var err error ++ env := GetEnvironment() + minHeight, maxHeight, err = filterMinMax( + env.BlockStore.Base(), + env.BlockStore.Height(), +@@ -82,26 +87,88 @@ func filterMinMax(base, height, min, max, limit int64) (int64, int64, error) { + return min, max, nil + } + ++// Header gets block header at a given height. ++// If no height is provided, it will fetch the latest header. ++// More: https://docs.tendermint.com/master/rpc/#/Info/header ++func Header(ctx *rpctypes.Context, heightPtr *int64) (*ctypes.ResultHeader, error) { ++ height, err := getHeight(GetEnvironment().BlockStore.Height(), heightPtr) ++ if err != nil { ++ return nil, err ++ } ++ ++ blockMeta := GetEnvironment().BlockStore.LoadBlockMeta(height) ++ if blockMeta == nil { ++ return &ctypes.ResultHeader{}, nil ++ } ++ ++ return &ctypes.ResultHeader{Header: &blockMeta.Header}, nil ++} ++ ++// HeaderByHash gets header by hash. ++// More: https://docs.tendermint.com/master/rpc/#/Info/header_by_hash ++func HeaderByHash(ctx *rpctypes.Context, hash bytes.HexBytes) (*ctypes.ResultHeader, error) { ++ // N.B. The hash parameter is HexBytes so that the reflective parameter ++ // decoding logic in the HTTP service will correctly translate from JSON. ++ // See https://github.com/tendermint/tendermint/issues/6802 for context. ++ ++ blockMeta := GetEnvironment().BlockStore.LoadBlockMetaByHash(hash) ++ if blockMeta == nil { ++ return &ctypes.ResultHeader{}, nil ++ } ++ ++ return &ctypes.ResultHeader{Header: &blockMeta.Header}, nil ++} ++ + // Block gets block at a given height. + // If no height is provided, it will fetch the latest block. + // More: https://docs.cometbft.com/v0.34/rpc/#/Info/block + func Block(ctx *rpctypes.Context, heightPtr *int64) (*ctypes.ResultBlock, error) { +- height, err := getHeight(env.BlockStore.Height(), heightPtr) ++ height, err := getHeight(GetEnvironment().BlockStore.Height(), heightPtr) + if err != nil { + return nil, err + } + +- block := env.BlockStore.LoadBlock(height) +- blockMeta := env.BlockStore.LoadBlockMeta(height) ++ block := GetEnvironment().BlockStore.LoadBlock(height) ++ blockMeta := GetEnvironment().BlockStore.LoadBlockMeta(height) + if blockMeta == nil { + return &ctypes.ResultBlock{BlockID: types.BlockID{}, Block: block}, nil + } + return &ctypes.ResultBlock{BlockID: blockMeta.BlockID, Block: block}, nil + } + ++// SignedBlock fetches the set of transactions at a specified height and all the relevant ++// data to verify the transactions (i.e. using light client verification). ++func SignedBlock(ctx *rpctypes.Context, heightPtr *int64) (*ctypes.ResultSignedBlock, error) { ++ height, err := getHeight(GetEnvironment().BlockStore.Height(), heightPtr) ++ if err != nil { ++ return nil, err ++ } ++ ++ block := GetEnvironment().BlockStore.LoadBlock(height) ++ if block == nil { ++ return nil, errors.New("block not found") ++ } ++ seenCommit := GetEnvironment().BlockStore.LoadSeenCommit(height) ++ if seenCommit == nil { ++ return nil, errors.New("seen commit not found") ++ } ++ validatorSet, err := GetEnvironment().StateStore.LoadValidators(height) ++ if validatorSet == nil || err != nil { ++ return nil, err ++ } ++ ++ return &ctypes.ResultSignedBlock{ ++ Header: block.Header, ++ Commit: *seenCommit, ++ ValidatorSet: *validatorSet, ++ Data: block.Data, ++ }, nil ++} ++ + // BlockByHash gets block by hash. + // More: https://docs.cometbft.com/v0.34/rpc/#/Info/block_by_hash + func BlockByHash(ctx *rpctypes.Context, hash []byte) (*ctypes.ResultBlock, error) { ++ env := GetEnvironment() + block := env.BlockStore.LoadBlockByHash(hash) + if block == nil { + return &ctypes.ResultBlock{BlockID: types.BlockID{}, Block: nil}, nil +@@ -115,6 +182,7 @@ func BlockByHash(ctx *rpctypes.Context, hash []byte) (*ctypes.ResultBlock, error + // If no height is provided, it will fetch the commit for the latest block. + // More: https://docs.cometbft.com/v0.34/rpc/#/Info/commit + func Commit(ctx *rpctypes.Context, heightPtr *int64) (*ctypes.ResultCommit, error) { ++ env := GetEnvironment() + height, err := getHeight(env.BlockStore.Height(), heightPtr) + if err != nil { + return nil, err +@@ -138,6 +206,221 @@ func Commit(ctx *rpctypes.Context, heightPtr *int64) (*ctypes.ResultCommit, erro + return ctypes.NewResultCommit(&header, commit, true), nil + } + ++// DataCommitment collects the data roots over a provided ordered range of blocks, ++// and then creates a new Merkle root of those data roots. The range is end exclusive. ++func DataCommitment(ctx *rpctypes.Context, start, end uint64) (*ctypes.ResultDataCommitment, error) { ++ err := validateDataCommitmentRange(start, end) ++ if err != nil { ++ return nil, err ++ } ++ tuples, err := fetchDataRootTuples(start, end) ++ if err != nil { ++ return nil, err ++ } ++ root, err := hashDataRootTuples(tuples) ++ if err != nil { ++ return nil, err ++ } ++ // Create data commitment ++ return &ctypes.ResultDataCommitment{DataCommitment: root}, nil ++} ++ ++// DataRootInclusionProof creates an inclusion proof for the data root of block ++// height `height` in the set of blocks defined by `start` and `end`. The range ++// is end exclusive. ++func DataRootInclusionProof( ++ ctx *rpctypes.Context, ++ height int64, ++ start, ++ end uint64, ++) (*ctypes.ResultDataRootInclusionProof, error) { ++ //nolint:gosec ++ err := validateDataRootInclusionProofRequest(uint64(height), start, end) ++ if err != nil { ++ return nil, err ++ } ++ tuples, err := fetchDataRootTuples(start, end) ++ if err != nil { ++ return nil, err ++ } ++ proof, err := proveDataRootTuples(tuples, height) ++ if err != nil { ++ return nil, err ++ } ++ return &ctypes.ResultDataRootInclusionProof{Proof: *proof}, nil ++} ++ ++// padBytes Pad bytes to given length ++func padBytes(byt []byte, length int) ([]byte, error) { ++ l := len(byt) ++ if l > length { ++ return nil, fmt.Errorf( ++ "cannot pad bytes because length of bytes array: %d is greater than given length: %d", ++ l, ++ length, ++ ) ++ } ++ if l == length { ++ return byt, nil ++ } ++ tmp := make([]byte, length) ++ copy(tmp[length-l:], byt) ++ return tmp, nil ++} ++ ++// To32PaddedHexBytes takes a number and returns its hex representation padded to 32 bytes. ++// Used to mimic the result of `abi.encode(number)` in Ethereum. ++func To32PaddedHexBytes(number uint64) ([]byte, error) { ++ hexRepresentation := strconv.FormatUint(number, 16) ++ // Make sure hex representation has even length. ++ // The `strconv.FormatUint` can return odd length hex encodings. ++ // For example, `strconv.FormatUint(10, 16)` returns `a`. ++ // Thus, we need to pad it. ++ if len(hexRepresentation)%2 == 1 { ++ hexRepresentation = "0" + hexRepresentation ++ } ++ hexBytes, hexErr := hex.DecodeString(hexRepresentation) ++ if hexErr != nil { ++ return nil, hexErr ++ } ++ paddedBytes, padErr := padBytes(hexBytes, 32) ++ if padErr != nil { ++ return nil, padErr ++ } ++ return paddedBytes, nil ++} ++ ++// DataRootTuple contains the data that will be used to create the QGB commitments. ++// The commitments will be signed by orchestrators and submitted to an EVM chain via a relayer. ++// For more information: https://github.com/celestiaorg/quantum-gravity-bridge/blob/master/src/DataRootTuple.sol ++type DataRootTuple struct { ++ height uint64 ++ dataRoot [32]byte ++} ++ ++// EncodeDataRootTuple takes a height and a data root, and returns the equivalent of ++// `abi.encode(...)` in Ethereum. ++// The encoded type is a DataRootTuple, which has the following ABI: ++// ++// { ++// "components":[ ++// { ++// "internalType":"uint256", ++// "name":"height", ++// "type":"uint256" ++// }, ++// { ++// "internalType":"bytes32", ++// "name":"dataRoot", ++// "type":"bytes32" ++// }, ++// { ++// "internalType":"structDataRootTuple", ++// "name":"_tuple", ++// "type":"tuple" ++// } ++// ] ++// } ++// ++// padding the hex representation of the height padded to 32 bytes concatenated to the data root. ++// For more information, refer to: ++// https://github.com/celestiaorg/quantum-gravity-bridge/blob/master/src/DataRootTuple.sol ++func EncodeDataRootTuple(height uint64, dataRoot [32]byte) ([]byte, error) { ++ paddedHeight, err := To32PaddedHexBytes(height) ++ if err != nil { ++ return nil, err ++ } ++ return append(paddedHeight, dataRoot[:]...), nil ++} ++ ++// dataCommitmentBlocksLimit The maximum number of blocks to be used to create a data commitment. ++// It's a local parameter to protect the API from creating unnecessarily large commitments. ++const dataCommitmentBlocksLimit = 10_000 // ~33 hours of blocks assuming 12-second blocks. ++ ++// validateDataCommitmentRange runs basic checks on the asc sorted list of ++// heights that will be used subsequently in generating data commitments over ++// the defined set of heights. ++func validateDataCommitmentRange(start uint64, end uint64) error { ++ if start == 0 { ++ return fmt.Errorf("the first block is 0") ++ } ++ env := GetEnvironment() ++ heightsRange := end - start ++ if heightsRange > uint64(dataCommitmentBlocksLimit) { ++ return fmt.Errorf("the query exceeds the limit of allowed blocks %d", dataCommitmentBlocksLimit) ++ } ++ if heightsRange == 0 { ++ return fmt.Errorf("cannot create the data commitments for an empty set of blocks") ++ } ++ if start >= end { ++ return fmt.Errorf("last block is smaller than first block") ++ } ++ // the data commitment range is end exclusive ++ //nolint:gosec ++ if end > uint64(env.BlockStore.Height())+1 { ++ return fmt.Errorf( ++ "end block %d is higher than current chain height %d", ++ end, ++ env.BlockStore.Height(), ++ ) ++ } ++ return nil ++} ++ ++// hashDataRootTuples hashes a list of blocks data root tuples, i.e. height, data root and square size, ++// then returns their merkle root. ++func hashDataRootTuples(tuples []DataRootTuple) ([]byte, error) { ++ dataRootEncodedTuples := make([][]byte, 0, len(tuples)) ++ for _, tuple := range tuples { ++ encodedTuple, err := EncodeDataRootTuple( ++ tuple.height, ++ tuple.dataRoot, ++ ) ++ if err != nil { ++ return nil, err ++ } ++ dataRootEncodedTuples = append(dataRootEncodedTuples, encodedTuple) ++ } ++ root := merkle.HashFromByteSlices(dataRootEncodedTuples) ++ return root, nil ++} ++ ++// validateDataRootInclusionProofRequest validates the request to generate a data root ++// inclusion proof. ++func validateDataRootInclusionProofRequest(height uint64, start uint64, end uint64) error { ++ err := validateDataCommitmentRange(start, end) ++ if err != nil { ++ return err ++ } ++ if height < start || height >= end { ++ return fmt.Errorf( ++ "height %d should be in the end exclusive interval first_block %d last_block %d", ++ height, ++ start, ++ end, ++ ) ++ } ++ return nil ++} ++ ++// proveDataRootTuples returns the merkle inclusion proof for a height. ++func proveDataRootTuples(tuples []DataRootTuple, height int64) (*merkle.Proof, error) { ++ dataRootEncodedTuples := make([][]byte, 0, len(tuples)) ++ for _, tuple := range tuples { ++ encodedTuple, err := EncodeDataRootTuple( ++ tuple.height, ++ tuple.dataRoot, ++ ) ++ if err != nil { ++ return nil, err ++ } ++ dataRootEncodedTuples = append(dataRootEncodedTuples, encodedTuple) ++ } ++ _, proofs := merkle.ProofsFromByteSlices(dataRootEncodedTuples) ++ //nolint:gosec ++ return proofs[height-int64(tuples[0].height)], nil ++} ++ + // BlockResults gets ABCIResults at a given height. + // If no height is provided, it will fetch results for the latest block. + // When DiscardABCIResponses is enabled, an error will be returned. +@@ -147,6 +430,7 @@ func Commit(ctx *rpctypes.Context, heightPtr *int64) (*ctypes.ResultCommit, erro + // getBlock(h).Txs[5] + // More: https://docs.cometbft.com/v0.34/rpc/#/Info/block_results + func BlockResults(ctx *rpctypes.Context, heightPtr *int64) (*ctypes.ResultBlockResults, error) { ++ env := GetEnvironment() + height, err := getHeight(env.BlockStore.Height(), heightPtr) + if err != nil { + return nil, err +@@ -192,7 +476,7 @@ func BlockSearch( + ) (*ctypes.ResultBlockSearch, error) { + + // skip if block indexing is disabled +- if _, ok := env.BlockIndexer.(*blockidxnull.BlockerIndexer); ok { ++ if _, ok := GetEnvironment().BlockIndexer.(*blockidxnull.BlockerIndexer); ok { + return nil, errors.New("block indexing is disabled") + } + q, err := cmtquery.New(query) +@@ -200,21 +484,15 @@ func BlockSearch( + return nil, err + } + +- results, err := env.BlockIndexer.Search(ctx.Context(), q) ++ results, err := GetEnvironment().BlockIndexer.Search(ctx.Context(), q) + if err != nil { + return nil, err + } + + // sort results (must be done before pagination) +- switch orderBy { +- case "desc", "": +- sort.Slice(results, func(i, j int) bool { return results[i] > results[j] }) +- +- case "asc": +- sort.Slice(results, func(i, j int) bool { return results[i] < results[j] }) +- +- default: +- return nil, errors.New("expected order_by to be either `asc` or `desc` or empty") ++ err = sortBlocks(results, orderBy) ++ if err != nil { ++ return nil, err + } + + // paginate results +@@ -231,9 +509,9 @@ func BlockSearch( + + apiResults := make([]*ctypes.ResultBlock, 0, pageSize) + for i := skipCount; i < skipCount+pageSize; i++ { +- block := env.BlockStore.LoadBlock(results[i]) ++ block := GetEnvironment().BlockStore.LoadBlock(results[i]) + if block != nil { +- blockMeta := env.BlockStore.LoadBlockMeta(block.Height) ++ blockMeta := GetEnvironment().BlockStore.LoadBlockMeta(block.Height) + if blockMeta != nil { + apiResults = append(apiResults, &ctypes.ResultBlock{ + Block: block, +@@ -245,3 +523,39 @@ func BlockSearch( + + return &ctypes.ResultBlockSearch{Blocks: apiResults, TotalCount: totalCount}, nil + } ++ ++// sortBlocks takes a list of block heights and sorts them according to the order: "asc" or "desc". ++// If `orderBy` is blank, then it is considered descending. ++func sortBlocks(results []int64, orderBy string) error { ++ switch orderBy { ++ case "desc", "": ++ sort.Slice(results, func(i, j int) bool { return results[i] > results[j] }) ++ ++ case "asc": ++ sort.Slice(results, func(i, j int) bool { return results[i] < results[j] }) ++ ++ default: ++ return errors.New("expected order_by to be either `asc` or `desc` or empty") ++ } ++ return nil ++} ++ ++// fetchDataRootTuples takes an end exclusive range of heights and fetches its ++// corresponding data root tuples. ++func fetchDataRootTuples(start, end uint64) ([]DataRootTuple, error) { ++ env := GetEnvironment() ++ tuples := make([]DataRootTuple, 0, end-start) ++ for height := start; height < end; height++ { ++ //nolint:gosec ++ block := env.BlockStore.LoadBlock(int64(height)) ++ if block == nil { ++ return nil, fmt.Errorf("couldn't load block %d", height) ++ } ++ tuples = append(tuples, DataRootTuple{ ++ //nolint:gosec ++ height: uint64(block.Height), ++ dataRoot: *(*[32]byte)(block.DataHash), ++ }) ++ } ++ return tuples, nil ++} diff --git a/patches/rpc/core/blocks_test.go.patch b/patches/rpc/core/blocks_test.go.patch new file mode 100644 index 00000000000..e3eb8583715 --- /dev/null +++ b/patches/rpc/core/blocks_test.go.patch @@ -0,0 +1,326 @@ +diff --git a/rpc/core/blocks_test.go b/rpc/core/blocks_test.go +index 8ed76cde1..56e2ade3d 100644 +--- a/rpc/core/blocks_test.go ++++ b/rpc/core/blocks_test.go +@@ -1,6 +1,10 @@ + package core + + import ( ++ "bytes" ++ "context" ++ "encoding/binary" ++ "encoding/hex" + "fmt" + "testing" + +@@ -8,13 +12,17 @@ import ( + "github.com/stretchr/testify/require" + + dbm "github.com/cometbft/cometbft-db" ++ "github.com/tendermint/tendermint/crypto/merkle" ++ "github.com/tendermint/tendermint/libs/pubsub/query" ++ cmtrand "github.com/tendermint/tendermint/libs/rand" ++ "github.com/tendermint/tendermint/types" + + abci "github.com/tendermint/tendermint/abci/types" + cmtstate "github.com/tendermint/tendermint/proto/tendermint/state" ++ cmtstore "github.com/tendermint/tendermint/proto/tendermint/store" + ctypes "github.com/tendermint/tendermint/rpc/core/types" + rpctypes "github.com/tendermint/tendermint/rpc/jsonrpc/types" + sm "github.com/tendermint/tendermint/state" +- "github.com/tendermint/tendermint/types" + ) + + func TestBlockchainInfo(t *testing.T) { +@@ -80,13 +88,14 @@ func TestBlockResults(t *testing.T) { + BeginBlock: &abci.ResponseBeginBlock{}, + } + +- env = &Environment{} +- env.StateStore = sm.NewStore(dbm.NewMemDB(), sm.StoreOptions{ ++ globalEnv = &Environment{} ++ globalEnv.StateStore = sm.NewStore(dbm.NewMemDB(), sm.StoreOptions{ + DiscardABCIResponses: false, + }) +- err := env.StateStore.SaveABCIResponses(100, results) ++ err := globalEnv.StateStore.SaveABCIResponses(100, results) + require.NoError(t, err) +- env.BlockStore = mockBlockStore{height: 100} ++ globalEnv.BlockStore = mockBlockStore{height: 100} ++ SetEnvironment(globalEnv) + + testCases := []struct { + height int64 +@@ -117,20 +126,270 @@ func TestBlockResults(t *testing.T) { + } + } + ++func TestEncodeDataRootTuple(t *testing.T) { ++ height := uint64(2) ++ dataRoot, err := hex.DecodeString("82dc1607d84557d3579ce602a45f5872e821c36dbda7ec926dfa17ebc8d5c013") ++ require.NoError(t, err) ++ ++ expectedEncoding, err := hex.DecodeString( ++ // hex representation of height padded to 32 bytes ++ "0000000000000000000000000000000000000000000000000000000000000002" + ++ // data root ++ "82dc1607d84557d3579ce602a45f5872e821c36dbda7ec926dfa17ebc8d5c013", ++ ) ++ require.NoError(t, err) ++ require.NotNil(t, expectedEncoding) ++ ++ actualEncoding, err := EncodeDataRootTuple(height, *(*[32]byte)(dataRoot)) ++ require.NoError(t, err) ++ require.NotNil(t, actualEncoding) ++ ++ // Check that the length of packed data is correct ++ assert.Equal(t, len(actualEncoding), 64) ++ assert.Equal(t, expectedEncoding, actualEncoding) ++} ++ ++func TestDataCommitmentResults(t *testing.T) { ++ env := &Environment{} ++ height := int64(2826) ++ ++ blocks := randomBlocks(height) ++ blockStore := mockBlockStore{ ++ height: height, ++ blocks: blocks, ++ } ++ env.BlockStore = blockStore ++ ++ testCases := []struct { ++ beginQuery int ++ endQuery int ++ expectPass bool ++ }{ ++ {10, 15, true}, ++ {2727, 2828, false}, ++ {10, 9, false}, ++ {0, 1000, false}, ++ {0, 10, false}, ++ {10, 8, false}, ++ // to test the end exclusive support for ranges. ++ // the end block could be equal to (height+1), but the data commitment would only ++ // take up to height. So we should be able to send request having end block equal ++ // to (height+1). ++ {int(env.BlockStore.Height()) - 100, int(env.BlockStore.Height()) + 1, true}, ++ } ++ ++ for i, tc := range testCases { ++ env.BlockIndexer = mockBlockIndexer{ ++ height: height, ++ beginQueryBlock: tc.beginQuery, ++ endQueryBlock: tc.endQuery, ++ } ++ SetEnvironment(env) ++ ++ actualCommitment, err := DataCommitment(&rpctypes.Context{}, uint64(tc.beginQuery), uint64(tc.endQuery)) ++ if tc.expectPass { ++ require.Nil(t, err, "should generate the needed data commitment.") ++ ++ size := tc.endQuery - tc.beginQuery ++ dataRootEncodedTuples := make([][]byte, size) ++ for i := 0; i < size; i++ { ++ encodedTuple, err := EncodeDataRootTuple( ++ uint64(blocks[tc.beginQuery+i].Height), ++ *(*[32]byte)(blocks[tc.beginQuery+i].DataHash), ++ ) ++ require.NoError(t, err) ++ dataRootEncodedTuples[i] = encodedTuple ++ } ++ expectedCommitment := merkle.HashFromByteSlices(dataRootEncodedTuples) ++ ++ assert.Equal( ++ t, ++ expectedCommitment, ++ actualCommitment.DataCommitment.Bytes(), ++ i, ++ ) ++ } else { ++ require.NotNil(t, err, "couldn't generate the needed data commitment.") ++ } ++ } ++} ++ ++func TestDataRootInclusionProofResults(t *testing.T) { ++ env := &Environment{} ++ env.StateStore = sm.NewStore( ++ dbm.NewMemDB(), sm.StoreOptions{ ++ DiscardABCIResponses: false, ++ }, ++ ) ++ ++ height := int64(2826) ++ env.BlockStore = mockBlockStore{height: height} ++ SetEnvironment(env) ++ ++ blocks := randomBlocks(height) ++ blockStore := mockBlockStore{ ++ height: height, ++ blocks: blocks, ++ } ++ env.BlockStore = blockStore ++ ++ testCases := []struct { ++ height int ++ firstQuery int ++ lastQuery int ++ expectPass bool ++ }{ ++ {8, 10, 15, false}, ++ {10, 0, 15, false}, ++ {10, 10, 15, true}, ++ {13, 10, 15, true}, ++ {14, 10, 15, true}, ++ {15, 10, 15, false}, ++ {17, 10, 15, false}, ++ } ++ ++ for i, tc := range testCases { ++ env.BlockIndexer = mockBlockIndexer{ ++ height: height, ++ beginQueryBlock: tc.firstQuery, ++ endQueryBlock: tc.lastQuery, ++ } ++ ++ proof, err := DataRootInclusionProof( ++ &rpctypes.Context{}, ++ int64(tc.height), ++ uint64(tc.firstQuery), ++ uint64(tc.lastQuery), ++ ) ++ if tc.expectPass { ++ require.Nil(t, err, "should generate block height data root inclusion proof.", i) ++ ++ size := tc.lastQuery - tc.firstQuery ++ dataRootEncodedTuples := make([][]byte, size) ++ for i := 0; i < size; i++ { ++ encodedTuple, err := EncodeDataRootTuple( ++ uint64(blocks[tc.firstQuery+i].Height), ++ *(*[32]byte)(blocks[tc.firstQuery+i].DataHash), ++ ) ++ require.NoError(t, err) ++ dataRootEncodedTuples[i] = encodedTuple ++ } ++ commitment := merkle.HashFromByteSlices(dataRootEncodedTuples) ++ ++ err = proof.Proof.Verify(commitment, dataRootEncodedTuples[tc.height-tc.firstQuery]) ++ require.NoError(t, err) ++ } else { ++ require.NotNil(t, err, "shouldn't be able to generate proof.") ++ } ++ } ++} ++ + type mockBlockStore struct { + height int64 ++ blocks []*types.Block + } + + func (mockBlockStore) Base() int64 { return 1 } + func (store mockBlockStore) Height() int64 { return store.height } + func (store mockBlockStore) Size() int64 { return store.height } + func (mockBlockStore) LoadBaseMeta() *types.BlockMeta { return nil } +-func (mockBlockStore) LoadBlockMeta(height int64) *types.BlockMeta { return nil } +-func (mockBlockStore) LoadBlock(height int64) *types.Block { return nil } + func (mockBlockStore) LoadBlockByHash(hash []byte) *types.Block { return nil } + func (mockBlockStore) LoadBlockPart(height int64, index int) *types.Part { return nil } ++func (mockBlockStore) LoadBlockMetaByHash(hash []byte) *types.BlockMeta { return nil } + func (mockBlockStore) LoadBlockCommit(height int64) *types.Commit { return nil } + func (mockBlockStore) LoadSeenCommit(height int64) *types.Commit { return nil } + func (mockBlockStore) PruneBlocks(height int64) (uint64, error) { return 0, nil } + func (mockBlockStore) SaveBlock(block *types.Block, blockParts *types.PartSet, seenCommit *types.Commit) { + } ++func (mockBlockStore) SaveTxInfo(block *types.Block, txResponseCodes []uint32, logs []string) error { ++ return nil ++} ++ ++func (store mockBlockStore) LoadBlockMeta(height int64) *types.BlockMeta { ++ if height > store.height { ++ return nil ++ } ++ block := store.blocks[height] ++ return &types.BlockMeta{ ++ BlockID: types.BlockID{Hash: block.Hash(), PartSetHeader: block.MakePartSet(types.BlockPartSizeBytes).Header()}, ++ Header: block.Header, ++ } ++} ++ ++func (store mockBlockStore) LoadBlock(height int64) *types.Block { ++ if height > store.height { ++ return nil ++ } ++ return store.blocks[height] ++} ++ ++func (store mockBlockStore) LoadTxInfo(hash []byte) *cmtstore.TxInfo { ++ for _, block := range store.blocks { ++ for i, tx := range block.Data.Txs { ++ // Check if transaction hash matches ++ if bytes.Equal(tx.Hash(), hash) { ++ return &cmtstore.TxInfo{ ++ Height: block.Header.Height, ++ //nolint:gosec ++ Index: uint32(i), ++ Code: uint32(0), ++ } ++ } ++ } ++ } ++ return nil ++} ++ ++// mockBlockIndexer used to mock the set of indexed blocks and return a predefined one. ++type mockBlockIndexer struct { ++ height int64 ++ beginQueryBlock int // used not to have to parse any query ++ endQueryBlock int // used not to have to parse any query ++} ++ ++func (indexer mockBlockIndexer) Has(height int64) (bool, error) { return true, nil } ++func (indexer mockBlockIndexer) Index(types.EventDataNewBlockHeader) error { return nil } ++ ++// Search returns a list of block heights corresponding to the values of `indexer.endQueryBlock` ++// and `indexer.beginQueryBlock`. ++// Doesn't use the query parameter for anything. ++func (indexer mockBlockIndexer) Search(ctx context.Context, _ *query.Query) ([]int64, error) { ++ size := indexer.endQueryBlock - indexer.beginQueryBlock + 1 ++ results := make([]int64, size) ++ for i := 0; i < size; i++ { ++ results[i] = int64(indexer.beginQueryBlock + i) ++ } ++ return results, nil ++} ++ ++// randomBlocks generates a set of random blocks up to (and including) the provided height. ++func randomBlocks(height int64) []*types.Block { ++ blocks := make([]*types.Block, height+1) ++ for i := int64(0); i <= height; i++ { ++ blocks[i] = randomBlock(i) ++ } ++ return blocks ++} ++ ++func makeTxs(height int64) (txs []types.Tx) { ++ for i := 0; i < 10; i++ { ++ numBytes := make([]byte, 8) ++ binary.BigEndian.PutUint64(numBytes, uint64(height)) ++ ++ txs = append(txs, types.Tx(append(numBytes, byte(i)))) ++ } ++ return txs ++} ++ ++// randomBlock generates a Block with a certain height and random data hash. ++func randomBlock(height int64) *types.Block { ++ return &types.Block{ ++ Header: types.Header{ ++ Height: height, ++ DataHash: cmtrand.Bytes(32), ++ }, ++ Data: types.Data{ ++ Txs: makeTxs(height), ++ }, ++ } ++} diff --git a/patches/rpc/core/consensus.go.patch b/patches/rpc/core/consensus.go.patch new file mode 100644 index 00000000000..4197ea9e18f --- /dev/null +++ b/patches/rpc/core/consensus.go.patch @@ -0,0 +1,49 @@ +diff --git a/rpc/core/consensus.go b/rpc/core/consensus.go +index 936a39b7e..1f303d339 100644 +--- a/rpc/core/consensus.go ++++ b/rpc/core/consensus.go +@@ -22,7 +22,7 @@ func Validators(ctx *rpctypes.Context, heightPtr *int64, pagePtr, perPagePtr *in + return nil, err + } + +- validators, err := env.StateStore.LoadValidators(height) ++ validators, err := GetEnvironment().StateStore.LoadValidators(height) + if err != nil { + return nil, err + } +@@ -50,7 +50,7 @@ func Validators(ctx *rpctypes.Context, heightPtr *int64, pagePtr, perPagePtr *in + // More: https://docs.cometbft.com/v0.34/rpc/#/Info/dump_consensus_state + func DumpConsensusState(ctx *rpctypes.Context) (*ctypes.ResultDumpConsensusState, error) { + // Get Peer consensus states. +- peers := env.P2PPeers.Peers().List() ++ peers := GetEnvironment().P2PPeers.Peers().List() + peerStates := make([]ctypes.PeerStateInfo, len(peers)) + for i, peer := range peers { + peerState, ok := peer.Get(types.PeerStateKey).(*cm.PeerState) +@@ -69,7 +69,7 @@ func DumpConsensusState(ctx *rpctypes.Context) (*ctypes.ResultDumpConsensusState + } + } + // Get self round state. +- roundState, err := env.ConsensusState.GetRoundStateJSON() ++ roundState, err := GetEnvironment().ConsensusState.GetRoundStateJSON() + if err != nil { + return nil, err + } +@@ -83,7 +83,7 @@ func DumpConsensusState(ctx *rpctypes.Context) (*ctypes.ResultDumpConsensusState + // More: https://docs.cometbft.com/v0.34/rpc/#/Info/consensus_state + func ConsensusState(ctx *rpctypes.Context) (*ctypes.ResultConsensusState, error) { + // Get self round state. +- bz, err := env.ConsensusState.GetRoundStateSimpleJSON() ++ bz, err := GetEnvironment().ConsensusState.GetRoundStateSimpleJSON() + return &ctypes.ResultConsensusState{RoundState: bz}, err + } + +@@ -98,7 +98,7 @@ func ConsensusParams(ctx *rpctypes.Context, heightPtr *int64) (*ctypes.ResultCon + return nil, err + } + +- consensusParams, err := env.StateStore.LoadConsensusParams(height) ++ consensusParams, err := GetEnvironment().StateStore.LoadConsensusParams(height) + if err != nil { + return nil, err + } diff --git a/patches/rpc/core/dev.go.patch b/patches/rpc/core/dev.go.patch new file mode 100644 index 00000000000..2461efdde43 --- /dev/null +++ b/patches/rpc/core/dev.go.patch @@ -0,0 +1,12 @@ +diff --git a/rpc/core/dev.go b/rpc/core/dev.go +index b70f5f1e1..538365352 100644 +--- a/rpc/core/dev.go ++++ b/rpc/core/dev.go +@@ -7,6 +7,6 @@ import ( + + // UnsafeFlushMempool removes all transactions from the mempool. + func UnsafeFlushMempool(ctx *rpctypes.Context) (*ctypes.ResultUnsafeFlushMempool, error) { +- env.Mempool.Flush() ++ GetEnvironment().Mempool.Flush() + return &ctypes.ResultUnsafeFlushMempool{}, nil + } diff --git a/patches/rpc/core/env.go.patch b/patches/rpc/core/env.go.patch new file mode 100644 index 00000000000..68ea1b5bafe --- /dev/null +++ b/patches/rpc/core/env.go.patch @@ -0,0 +1,91 @@ +diff --git a/rpc/core/env.go b/rpc/core/env.go +index 145f62254..d71ccfbbe 100644 +--- a/rpc/core/env.go ++++ b/rpc/core/env.go +@@ -3,6 +3,7 @@ package core + import ( + "encoding/base64" + "fmt" ++ "sync" + "time" + + cfg "github.com/tendermint/tendermint/config" +@@ -35,13 +36,23 @@ const ( + + var ( + // set by Node +- env *Environment ++ mut = &sync.Mutex{} ++ globalEnv *Environment + ) + +-// SetEnvironment sets up the given Environment. +-// It will race if multiple Node call SetEnvironment. ++// SetEnvironment sets the global environment to e. The globalEnv var that this ++// function modifies is protected by a sync.Once so multiple calls within the ++// same process will not be effective. + func SetEnvironment(e *Environment) { +- env = e ++ mut.Lock() ++ defer mut.Unlock() ++ globalEnv = e ++} ++ ++func GetEnvironment() *Environment { ++ mut.Lock() ++ defer mut.Unlock() ++ return globalEnv + } + + //---------------------------------------------- +@@ -142,19 +153,20 @@ func validatePerPage(perPagePtr *int) int { + // InitGenesisChunks configures the environment and should be called on service + // startup. + func InitGenesisChunks() error { +- if env.genChunks != nil { ++ if GetEnvironment().genChunks != nil { + return nil + } + +- if env.GenDoc == nil { ++ if GetEnvironment().GenDoc == nil { + return nil + } + +- data, err := cmtjson.Marshal(env.GenDoc) ++ data, err := cmtjson.Marshal(GetEnvironment().GenDoc) + if err != nil { + return err + } +- ++ mut.Lock() ++ defer mut.Unlock() + for i := 0; i < len(data); i += genesisChunkSize { + end := i + genesisChunkSize + +@@ -162,7 +174,7 @@ func InitGenesisChunks() error { + end = len(data) + } + +- env.genChunks = append(env.genChunks, base64.StdEncoding.EncodeToString(data[i:end])) ++ globalEnv.genChunks = append(globalEnv.genChunks, base64.StdEncoding.EncodeToString(data[i:end])) + } + + return nil +@@ -188,7 +200,7 @@ func getHeight(latestHeight int64, heightPtr *int64) (int64, error) { + return 0, fmt.Errorf("height %d must be less than or equal to the current blockchain height %d", + height, latestHeight) + } +- base := env.BlockStore.Base() ++ base := GetEnvironment().BlockStore.Base() + if height < base { + return 0, fmt.Errorf("height %d is not available, lowest height is %d", + height, base) +@@ -199,6 +211,7 @@ func getHeight(latestHeight int64, heightPtr *int64) (int64, error) { + } + + func latestUncommittedHeight() int64 { ++ env := GetEnvironment() + nodeIsSyncing := env.ConsensusReactor.WaitSync() + if nodeIsSyncing { + return env.BlockStore.Height() diff --git a/patches/rpc/core/events.go.patch b/patches/rpc/core/events.go.patch new file mode 100644 index 00000000000..1978c679bb4 --- /dev/null +++ b/patches/rpc/core/events.go.patch @@ -0,0 +1,37 @@ +diff --git a/rpc/core/events.go b/rpc/core/events.go +index e91021ee6..2e5f02b57 100644 +--- a/rpc/core/events.go ++++ b/rpc/core/events.go +@@ -22,6 +22,7 @@ const ( + // More: https://docs.cometbft.com/v0.34/rpc/#/Websocket/subscribe + func Subscribe(ctx *rpctypes.Context, query string) (*ctypes.ResultSubscribe, error) { + addr := ctx.RemoteAddr() ++ env := GetEnvironment() + + if env.EventBus.NumClients() >= env.Config.MaxSubscriptionClients { + return nil, fmt.Errorf("max_subscription_clients %d reached", env.Config.MaxSubscriptionClients) +@@ -51,7 +52,7 @@ func Subscribe(ctx *rpctypes.Context, query string) (*ctypes.ResultSubscribe, er + // Capture the current ID, since it can change in the future. + subscriptionID := ctx.JSONReq.ID + go func() { +- for { ++ for sub != nil { + select { + case msg := <-sub.Out(): + var ( +@@ -105,6 +106,7 @@ func Subscribe(ctx *rpctypes.Context, query string) (*ctypes.ResultSubscribe, er + // More: https://docs.cometbft.com/v0.34/rpc/#/Websocket/unsubscribe + func Unsubscribe(ctx *rpctypes.Context, query string) (*ctypes.ResultUnsubscribe, error) { + addr := ctx.RemoteAddr() ++ env := GetEnvironment() + env.Logger.Info("Unsubscribe from query", "remote", addr, "query", query) + q, err := cmtquery.New(query) + if err != nil { +@@ -121,6 +123,7 @@ func Unsubscribe(ctx *rpctypes.Context, query string) (*ctypes.ResultUnsubscribe + // More: https://docs.cometbft.com/v0.34/rpc/#/Websocket/unsubscribe_all + func UnsubscribeAll(ctx *rpctypes.Context) (*ctypes.ResultUnsubscribe, error) { + addr := ctx.RemoteAddr() ++ env := GetEnvironment() + env.Logger.Info("Unsubscribe from all", "remote", addr) + err := env.EventBus.UnsubscribeAll(context.Background(), addr) + if err != nil { diff --git a/patches/rpc/core/evidence.go.patch b/patches/rpc/core/evidence.go.patch new file mode 100644 index 00000000000..a5d7b5be0f2 --- /dev/null +++ b/patches/rpc/core/evidence.go.patch @@ -0,0 +1,13 @@ +diff --git a/rpc/core/evidence.go b/rpc/core/evidence.go +index bbfc1cbb5..bdb05293e 100644 +--- a/rpc/core/evidence.go ++++ b/rpc/core/evidence.go +@@ -20,7 +20,7 @@ func BroadcastEvidence(ctx *rpctypes.Context, ev types.Evidence) (*ctypes.Result + return nil, fmt.Errorf("evidence.ValidateBasic failed: %w", err) + } + +- if err := env.EvidencePool.AddEvidence(ev); err != nil { ++ if err := GetEnvironment().EvidencePool.AddEvidence(ev); err != nil { + return nil, fmt.Errorf("failed to add evidence: %w", err) + } + return &ctypes.ResultBroadcastEvidence{Hash: ev.Hash()}, nil diff --git a/patches/rpc/core/mempool.go.patch b/patches/rpc/core/mempool.go.patch new file mode 100644 index 00000000000..aab66ff0d78 --- /dev/null +++ b/patches/rpc/core/mempool.go.patch @@ -0,0 +1,78 @@ +diff --git a/rpc/core/mempool.go b/rpc/core/mempool.go +index 1eea1e8e2..0aec02cff 100644 +--- a/rpc/core/mempool.go ++++ b/rpc/core/mempool.go +@@ -13,6 +13,8 @@ import ( + "github.com/tendermint/tendermint/types" + ) + ++var ErrTimedOutWaitingForTx = errors.New("timed out waiting for tx to be included in a block") ++ + //----------------------------------------------------------------------------- + // NOTE: tx should be signed, but this is only checked at the app level (not by CometBFT!) + +@@ -20,7 +22,7 @@ import ( + // CheckTx nor DeliverTx results. + // More: https://docs.cometbft.com/v0.34/rpc/#/Tx/broadcast_tx_async + func BroadcastTxAsync(ctx *rpctypes.Context, tx types.Tx) (*ctypes.ResultBroadcastTx, error) { +- err := env.Mempool.CheckTx(tx, nil, mempl.TxInfo{}) ++ err := GetEnvironment().Mempool.CheckTx(tx, nil, mempl.TxInfo{}) + + if err != nil { + return nil, err +@@ -33,7 +35,7 @@ func BroadcastTxAsync(ctx *rpctypes.Context, tx types.Tx) (*ctypes.ResultBroadca + // More: https://docs.cometbft.com/v0.34/rpc/#/Tx/broadcast_tx_sync + func BroadcastTxSync(ctx *rpctypes.Context, tx types.Tx) (*ctypes.ResultBroadcastTx, error) { + resCh := make(chan *abci.Response, 1) +- err := env.Mempool.CheckTx(tx, func(res *abci.Response) { ++ err := GetEnvironment().Mempool.CheckTx(tx, func(res *abci.Response) { + select { + case <-ctx.Context().Done(): + case resCh <- res: +@@ -59,10 +61,12 @@ func BroadcastTxSync(ctx *rpctypes.Context, tx types.Tx) (*ctypes.ResultBroadcas + } + } + ++// DEPRECATED: Use BroadcastTxSync or BroadcastTxAsync instead. + // BroadcastTxCommit returns with the responses from CheckTx and DeliverTx. + // More: https://docs.cometbft.com/v0.34/rpc/#/Tx/broadcast_tx_commit + func BroadcastTxCommit(ctx *rpctypes.Context, tx types.Tx) (*ctypes.ResultBroadcastTxCommit, error) { + subscriber := ctx.RemoteAddr() ++ env := GetEnvironment() + + if env.EventBus.NumClients() >= env.Config.MaxSubscriptionClients { + return nil, fmt.Errorf("max_subscription_clients %d reached", env.Config.MaxSubscriptionClients) +@@ -136,7 +140,7 @@ func BroadcastTxCommit(ctx *rpctypes.Context, tx types.Tx) (*ctypes.ResultBroadc + Hash: tx.Hash(), + }, err + case <-time.After(env.Config.TimeoutBroadcastTxCommit): +- err = errors.New("timed out waiting for tx to be included in a block") ++ err = ErrTimedOutWaitingForTx + env.Logger.Error("Error on broadcastTxCommit", "err", err) + return &ctypes.ResultBroadcastTxCommit{ + CheckTx: *checkTxRes, +@@ -153,6 +157,7 @@ func BroadcastTxCommit(ctx *rpctypes.Context, tx types.Tx) (*ctypes.ResultBroadc + func UnconfirmedTxs(ctx *rpctypes.Context, limitPtr *int) (*ctypes.ResultUnconfirmedTxs, error) { + // reuse per_page validator + limit := validatePerPage(limitPtr) ++ env := GetEnvironment() + + txs := env.Mempool.ReapMaxTxs(limit) + return &ctypes.ResultUnconfirmedTxs{ +@@ -165,6 +170,7 @@ func UnconfirmedTxs(ctx *rpctypes.Context, limitPtr *int) (*ctypes.ResultUnconfi + // NumUnconfirmedTxs gets number of unconfirmed transactions. + // More: https://docs.cometbft.com/v0.34/rpc/#/Info/num_unconfirmed_txs + func NumUnconfirmedTxs(ctx *rpctypes.Context) (*ctypes.ResultUnconfirmedTxs, error) { ++ env := GetEnvironment() + return &ctypes.ResultUnconfirmedTxs{ + Count: env.Mempool.Size(), + Total: env.Mempool.Size(), +@@ -175,7 +181,7 @@ func NumUnconfirmedTxs(ctx *rpctypes.Context) (*ctypes.ResultUnconfirmedTxs, err + // be added to the mempool either. + // More: https://docs.cometbft.com/v0.34/rpc/#/Tx/check_tx + func CheckTx(ctx *rpctypes.Context, tx types.Tx) (*ctypes.ResultCheckTx, error) { +- res, err := env.ProxyAppMempool.CheckTxSync(abci.RequestCheckTx{Tx: tx}) ++ res, err := GetEnvironment().ProxyAppMempool.CheckTxSync(abci.RequestCheckTx{Tx: tx}) + if err != nil { + return nil, err + } diff --git a/patches/rpc/core/mocks/mempool.go.patch b/patches/rpc/core/mocks/mempool.go.patch new file mode 100644 index 00000000000..f8e02667c9b --- /dev/null +++ b/patches/rpc/core/mocks/mempool.go.patch @@ -0,0 +1,246 @@ +diff --git a/rpc/core/mocks/mempool.go b/rpc/core/mocks/mempool.go +new file mode 100644 +index 000000000..bdf127f43 +--- /dev/null ++++ b/rpc/core/mocks/mempool.go +@@ -0,0 +1,240 @@ ++// Code generated by MockGen. DO NOT EDIT. ++// Source: ./mempool/mempool.go ++ ++// Package mock_mempool is a generated GoMock package. ++package mock_mempool ++ ++import ( ++ reflect "reflect" ++ ++ gomock "github.com/golang/mock/gomock" ++ types "github.com/tendermint/tendermint/abci/types" ++ mempool "github.com/tendermint/tendermint/mempool" ++ types0 "github.com/tendermint/tendermint/types" ++) ++ ++// MockMempool is a mock of Mempool interface. ++type MockMempool struct { ++ ctrl *gomock.Controller ++ recorder *MockMempoolMockRecorder ++} ++ ++// MockMempoolMockRecorder is the mock recorder for MockMempool. ++type MockMempoolMockRecorder struct { ++ mock *MockMempool ++} ++ ++// NewMockMempool creates a new mock instance. ++func NewMockMempool(ctrl *gomock.Controller) *MockMempool { ++ mock := &MockMempool{ctrl: ctrl} ++ mock.recorder = &MockMempoolMockRecorder{mock} ++ return mock ++} ++ ++// EXPECT returns an object that allows the caller to indicate expected use. ++func (m *MockMempool) EXPECT() *MockMempoolMockRecorder { ++ return m.recorder ++} ++ ++// CheckTx mocks base method. ++func (m *MockMempool) CheckTx(tx types0.Tx, callback func(*types.Response), txInfo mempool.TxInfo) error { ++ m.ctrl.T.Helper() ++ ret := m.ctrl.Call(m, "CheckTx", tx, callback, txInfo) ++ ret0, _ := ret[0].(error) ++ return ret0 ++} ++ ++// CheckTx indicates an expected call of CheckTx. ++func (mr *MockMempoolMockRecorder) CheckTx(tx, callback, txInfo interface{}) *gomock.Call { ++ mr.mock.ctrl.T.Helper() ++ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CheckTx", reflect.TypeOf((*MockMempool)(nil).CheckTx), tx, callback, txInfo) ++} ++ ++// EnableTxsAvailable mocks base method. ++func (m *MockMempool) EnableTxsAvailable() { ++ m.ctrl.T.Helper() ++ m.ctrl.Call(m, "EnableTxsAvailable") ++} ++ ++// EnableTxsAvailable indicates an expected call of EnableTxsAvailable. ++func (mr *MockMempoolMockRecorder) EnableTxsAvailable() *gomock.Call { ++ mr.mock.ctrl.T.Helper() ++ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "EnableTxsAvailable", reflect.TypeOf((*MockMempool)(nil).EnableTxsAvailable)) ++} ++ ++// Flush mocks base method. ++func (m *MockMempool) Flush() { ++ m.ctrl.T.Helper() ++ m.ctrl.Call(m, "Flush") ++} ++ ++// Flush indicates an expected call of Flush. ++func (mr *MockMempoolMockRecorder) Flush() *gomock.Call { ++ mr.mock.ctrl.T.Helper() ++ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Flush", reflect.TypeOf((*MockMempool)(nil).Flush)) ++} ++ ++// FlushAppConn mocks base method. ++func (m *MockMempool) FlushAppConn() error { ++ m.ctrl.T.Helper() ++ ret := m.ctrl.Call(m, "FlushAppConn") ++ ret0, _ := ret[0].(error) ++ return ret0 ++} ++ ++// FlushAppConn indicates an expected call of FlushAppConn. ++func (mr *MockMempoolMockRecorder) FlushAppConn() *gomock.Call { ++ mr.mock.ctrl.T.Helper() ++ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "FlushAppConn", reflect.TypeOf((*MockMempool)(nil).FlushAppConn)) ++} ++ ++// GetTxByKey mocks base method. ++func (m *MockMempool) GetTxByKey(key types0.TxKey) (types0.Tx, bool) { ++ m.ctrl.T.Helper() ++ ret := m.ctrl.Call(m, "GetTxByKey", key) ++ ret0, _ := ret[0].(types0.Tx) ++ ret1, _ := ret[1].(bool) ++ return ret0, ret1 ++} ++ ++// GetTxByKey indicates an expected call of GetTxByKey. ++func (mr *MockMempoolMockRecorder) GetTxByKey(key interface{}) *gomock.Call { ++ mr.mock.ctrl.T.Helper() ++ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetTxByKey", reflect.TypeOf((*MockMempool)(nil).GetTxByKey), key) ++} ++ ++// Lock mocks base method. ++func (m *MockMempool) Lock() { ++ m.ctrl.T.Helper() ++ m.ctrl.Call(m, "Lock") ++} ++ ++// Lock indicates an expected call of Lock. ++func (mr *MockMempoolMockRecorder) Lock() *gomock.Call { ++ mr.mock.ctrl.T.Helper() ++ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Lock", reflect.TypeOf((*MockMempool)(nil).Lock)) ++} ++ ++// ReapMaxBytesMaxGas mocks base method. ++func (m *MockMempool) ReapMaxBytesMaxGas(maxBytes, maxGas int64) types0.Txs { ++ m.ctrl.T.Helper() ++ ret := m.ctrl.Call(m, "ReapMaxBytesMaxGas", maxBytes, maxGas) ++ ret0, _ := ret[0].(types0.Txs) ++ return ret0 ++} ++ ++// ReapMaxBytesMaxGas indicates an expected call of ReapMaxBytesMaxGas. ++func (mr *MockMempoolMockRecorder) ReapMaxBytesMaxGas(maxBytes, maxGas interface{}) *gomock.Call { ++ mr.mock.ctrl.T.Helper() ++ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ReapMaxBytesMaxGas", reflect.TypeOf((*MockMempool)(nil).ReapMaxBytesMaxGas), maxBytes, maxGas) ++} ++ ++// ReapMaxTxs mocks base method. ++func (m *MockMempool) ReapMaxTxs(max int) types0.Txs { ++ m.ctrl.T.Helper() ++ ret := m.ctrl.Call(m, "ReapMaxTxs", max) ++ ret0, _ := ret[0].(types0.Txs) ++ return ret0 ++} ++ ++// ReapMaxTxs indicates an expected call of ReapMaxTxs. ++func (mr *MockMempoolMockRecorder) ReapMaxTxs(max interface{}) *gomock.Call { ++ mr.mock.ctrl.T.Helper() ++ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ReapMaxTxs", reflect.TypeOf((*MockMempool)(nil).ReapMaxTxs), max) ++} ++ ++// RemoveTxByKey mocks base method. ++func (m *MockMempool) RemoveTxByKey(txKey types0.TxKey) error { ++ m.ctrl.T.Helper() ++ ret := m.ctrl.Call(m, "RemoveTxByKey", txKey) ++ ret0, _ := ret[0].(error) ++ return ret0 ++} ++ ++// RemoveTxByKey indicates an expected call of RemoveTxByKey. ++func (mr *MockMempoolMockRecorder) RemoveTxByKey(txKey interface{}) *gomock.Call { ++ mr.mock.ctrl.T.Helper() ++ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "RemoveTxByKey", reflect.TypeOf((*MockMempool)(nil).RemoveTxByKey), txKey) ++} ++ ++// Size mocks base method. ++func (m *MockMempool) Size() int { ++ m.ctrl.T.Helper() ++ ret := m.ctrl.Call(m, "Size") ++ ret0, _ := ret[0].(int) ++ return ret0 ++} ++ ++// Size indicates an expected call of Size. ++func (mr *MockMempoolMockRecorder) Size() *gomock.Call { ++ mr.mock.ctrl.T.Helper() ++ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Size", reflect.TypeOf((*MockMempool)(nil).Size)) ++} ++ ++// SizeBytes mocks base method. ++func (m *MockMempool) SizeBytes() int64 { ++ m.ctrl.T.Helper() ++ ret := m.ctrl.Call(m, "SizeBytes") ++ ret0, _ := ret[0].(int64) ++ return ret0 ++} ++ ++// SizeBytes indicates an expected call of SizeBytes. ++func (mr *MockMempoolMockRecorder) SizeBytes() *gomock.Call { ++ mr.mock.ctrl.T.Helper() ++ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SizeBytes", reflect.TypeOf((*MockMempool)(nil).SizeBytes)) ++} ++ ++// TxsAvailable mocks base method. ++func (m *MockMempool) TxsAvailable() <-chan struct{} { ++ m.ctrl.T.Helper() ++ ret := m.ctrl.Call(m, "TxsAvailable") ++ ret0, _ := ret[0].(<-chan struct{}) ++ return ret0 ++} ++ ++// TxsAvailable indicates an expected call of TxsAvailable. ++func (mr *MockMempoolMockRecorder) TxsAvailable() *gomock.Call { ++ mr.mock.ctrl.T.Helper() ++ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "TxsAvailable", reflect.TypeOf((*MockMempool)(nil).TxsAvailable)) ++} ++ ++// Unlock mocks base method. ++func (m *MockMempool) Unlock() { ++ m.ctrl.T.Helper() ++ m.ctrl.Call(m, "Unlock") ++} ++ ++// Unlock indicates an expected call of Unlock. ++func (mr *MockMempoolMockRecorder) Unlock() *gomock.Call { ++ mr.mock.ctrl.T.Helper() ++ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Unlock", reflect.TypeOf((*MockMempool)(nil).Unlock)) ++} ++ ++// Update mocks base method. ++func (m *MockMempool) Update(blockHeight int64, blockTxs types0.Txs, deliverTxResponses []*types.ResponseDeliverTx, newPreFn mempool.PreCheckFunc, newPostFn mempool.PostCheckFunc) error { ++ m.ctrl.T.Helper() ++ ret := m.ctrl.Call(m, "Update", blockHeight, blockTxs, deliverTxResponses, newPreFn, newPostFn) ++ ret0, _ := ret[0].(error) ++ return ret0 ++} ++ ++// Update indicates an expected call of Update. ++func (mr *MockMempoolMockRecorder) Update(blockHeight, blockTxs, deliverTxResponses, newPreFn, newPostFn interface{}) *gomock.Call { ++ mr.mock.ctrl.T.Helper() ++ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Update", reflect.TypeOf((*MockMempool)(nil).Update), blockHeight, blockTxs, deliverTxResponses, newPreFn, newPostFn) ++} ++ ++// WasRecentlyEvicted mocks base method. ++func (m *MockMempool) WasRecentlyEvicted(key types0.TxKey) bool { ++ m.ctrl.T.Helper() ++ ret := m.ctrl.Call(m, "WasRecentlyEvicted", key) ++ ret0, _ := ret[0].(bool) ++ return ret0 ++} ++ ++// WasRecentlyEvicted indicates an expected call of WasRecentlyEvicted. ++func (mr *MockMempoolMockRecorder) WasRecentlyEvicted(key interface{}) *gomock.Call { ++ mr.mock.ctrl.T.Helper() ++ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "WasRecentlyEvicted", reflect.TypeOf((*MockMempool)(nil).WasRecentlyEvicted), key) ++} diff --git a/patches/rpc/core/net.go.patch b/patches/rpc/core/net.go.patch new file mode 100644 index 00000000000..1dccfcc3def --- /dev/null +++ b/patches/rpc/core/net.go.patch @@ -0,0 +1,52 @@ +diff --git a/rpc/core/net.go b/rpc/core/net.go +index c42016ec7..a17a3fc5b 100644 +--- a/rpc/core/net.go ++++ b/rpc/core/net.go +@@ -13,6 +13,7 @@ import ( + // NetInfo returns network info. + // More: https://docs.cometbft.com/v0.34/rpc/#/Info/net_info + func NetInfo(ctx *rpctypes.Context) (*ctypes.ResultNetInfo, error) { ++ env := GetEnvironment() + peersList := env.P2PPeers.Peers().List() + peers := make([]ctypes.Peer, 0, len(peersList)) + for _, peer := range peersList { +@@ -43,6 +44,7 @@ func UnsafeDialSeeds(ctx *rpctypes.Context, seeds []string) (*ctypes.ResultDialS + if len(seeds) == 0 { + return &ctypes.ResultDialSeeds{}, errors.New("no seeds provided") + } ++ env := GetEnvironment() + env.Logger.Info("DialSeeds", "seeds", seeds) + if err := env.P2PPeers.DialPeersAsync(seeds); err != nil { + return &ctypes.ResultDialSeeds{}, err +@@ -63,6 +65,7 @@ func UnsafeDialPeers(ctx *rpctypes.Context, peers []string, persistent, uncondit + return &ctypes.ResultDialPeers{}, err + } + ++ env := GetEnvironment() + env.Logger.Info("DialPeers", "peers", peers, "persistent", + persistent, "unconditional", unconditional, "private", private) + +@@ -94,6 +97,7 @@ func UnsafeDialPeers(ctx *rpctypes.Context, peers []string, persistent, uncondit + // Genesis returns genesis file. + // More: https://docs.cometbft.com/v0.34/rpc/#/Info/genesis + func Genesis(ctx *rpctypes.Context) (*ctypes.ResultGenesis, error) { ++ env := GetEnvironment() + if len(env.genChunks) > 1 { + return nil, errors.New("genesis response is large, please use the genesis_chunked API instead") + } +@@ -102,6 +106,7 @@ func Genesis(ctx *rpctypes.Context) (*ctypes.ResultGenesis, error) { + } + + func GenesisChunked(ctx *rpctypes.Context, chunk uint) (*ctypes.ResultGenesisChunk, error) { ++ env := GetEnvironment() + if env.genChunks == nil { + return nil, fmt.Errorf("service configuration error, genesis chunks are not initialized") + } +@@ -110,6 +115,7 @@ func GenesisChunked(ctx *rpctypes.Context, chunk uint) (*ctypes.ResultGenesisChu + return nil, fmt.Errorf("service configuration error, there are no chunks") + } + ++ //nolint:gosec + id := int(chunk) + + if id > len(env.genChunks)-1 { diff --git a/patches/rpc/core/net_test.go.patch b/patches/rpc/core/net_test.go.patch new file mode 100644 index 00000000000..07e9d5fa4e8 --- /dev/null +++ b/patches/rpc/core/net_test.go.patch @@ -0,0 +1,20 @@ +diff --git a/rpc/core/net_test.go b/rpc/core/net_test.go +index c971776f3..e44d5cf5b 100644 +--- a/rpc/core/net_test.go ++++ b/rpc/core/net_test.go +@@ -22,6 +22,7 @@ func TestUnsafeDialSeeds(t *testing.T) { + t.Error(err) + } + }) ++ env := GetEnvironment() + + env.Logger = log.TestingLogger() + env.P2PPeers = sw +@@ -62,6 +63,7 @@ func TestUnsafeDialPeers(t *testing.T) { + } + }) + ++ env := GetEnvironment() + env.Logger = log.TestingLogger() + env.P2PPeers = sw + diff --git a/patches/rpc/core/routes.go.patch b/patches/rpc/core/routes.go.patch new file mode 100644 index 00000000000..e158273e136 --- /dev/null +++ b/patches/rpc/core/routes.go.patch @@ -0,0 +1,59 @@ +diff --git a/rpc/core/routes.go b/rpc/core/routes.go +index 65fe7365a..b2c8d69ee 100644 +--- a/rpc/core/routes.go ++++ b/rpc/core/routes.go +@@ -14,26 +14,34 @@ var Routes = map[string]*rpc.RPCFunc{ + "unsubscribe_all": rpc.NewWSRPCFunc(UnsubscribeAll, ""), + + // info API +- "health": rpc.NewRPCFunc(Health, ""), +- "status": rpc.NewRPCFunc(Status, ""), +- "net_info": rpc.NewRPCFunc(NetInfo, ""), +- "blockchain": rpc.NewRPCFunc(BlockchainInfo, "minHeight,maxHeight", rpc.Cacheable()), +- "genesis": rpc.NewRPCFunc(Genesis, "", rpc.Cacheable()), +- "genesis_chunked": rpc.NewRPCFunc(GenesisChunked, "chunk", rpc.Cacheable()), +- "block": rpc.NewRPCFunc(Block, "height", rpc.Cacheable("height")), +- "block_by_hash": rpc.NewRPCFunc(BlockByHash, "hash", rpc.Cacheable()), +- "block_results": rpc.NewRPCFunc(BlockResults, "height", rpc.Cacheable("height")), +- "commit": rpc.NewRPCFunc(Commit, "height", rpc.Cacheable("height")), +- "check_tx": rpc.NewRPCFunc(CheckTx, "tx"), +- "tx": rpc.NewRPCFunc(Tx, "hash,prove", rpc.Cacheable()), +- "tx_search": rpc.NewRPCFunc(TxSearchMatchEvents, "query,prove,page,per_page,order_by,match_events"), +- "block_search": rpc.NewRPCFunc(BlockSearchMatchEvents, "query,page,per_page,order_by,match_events"), +- "validators": rpc.NewRPCFunc(Validators, "height,page,per_page", rpc.Cacheable("height")), +- "dump_consensus_state": rpc.NewRPCFunc(DumpConsensusState, ""), +- "consensus_state": rpc.NewRPCFunc(ConsensusState, ""), +- "consensus_params": rpc.NewRPCFunc(ConsensusParams, "height", rpc.Cacheable("height")), +- "unconfirmed_txs": rpc.NewRPCFunc(UnconfirmedTxs, "limit"), +- "num_unconfirmed_txs": rpc.NewRPCFunc(NumUnconfirmedTxs, ""), ++ "health": rpc.NewRPCFunc(Health, ""), ++ "status": rpc.NewRPCFunc(Status, ""), ++ "net_info": rpc.NewRPCFunc(NetInfo, ""), ++ "blockchain": rpc.NewRPCFunc(BlockchainInfo, "minHeight,maxHeight", rpc.Cacheable()), ++ "genesis": rpc.NewRPCFunc(Genesis, "", rpc.Cacheable()), ++ "genesis_chunked": rpc.NewRPCFunc(GenesisChunked, "chunk", rpc.Cacheable()), ++ "block": rpc.NewRPCFunc(Block, "height", rpc.Cacheable("height")), ++ "signed_block": rpc.NewRPCFunc(SignedBlock, "height", rpc.Cacheable("height")), ++ "block_by_hash": rpc.NewRPCFunc(BlockByHash, "hash", rpc.Cacheable()), ++ "block_results": rpc.NewRPCFunc(BlockResults, "height", rpc.Cacheable("height")), ++ "commit": rpc.NewRPCFunc(Commit, "height", rpc.Cacheable("height")), ++ "header": rpc.NewRPCFunc(Header, "height", rpc.Cacheable("height")), ++ "header_by_hash": rpc.NewRPCFunc(HeaderByHash, "hash"), ++ "data_commitment": rpc.NewRPCFunc(DataCommitment, "start,end"), ++ "check_tx": rpc.NewRPCFunc(CheckTx, "tx"), ++ "tx": rpc.NewRPCFunc(Tx, "hash,prove", rpc.Cacheable()), ++ "prove_shares": rpc.NewRPCFunc(ProveShares, "height,startShare,endShare"), ++ "prove_shares_v2": rpc.NewRPCFunc(ProveSharesV2, "height,startShare,endShare"), ++ "data_root_inclusion_proof": rpc.NewRPCFunc(DataRootInclusionProof, "height,start,end"), ++ "tx_search": rpc.NewRPCFunc(TxSearchMatchEvents, "query,prove,page,per_page,order_by,match_events"), ++ "block_search": rpc.NewRPCFunc(BlockSearchMatchEvents, "query,page,per_page,order_by,match_events"), ++ "validators": rpc.NewRPCFunc(Validators, "height,page,per_page", rpc.Cacheable("height")), ++ "dump_consensus_state": rpc.NewRPCFunc(DumpConsensusState, ""), ++ "consensus_state": rpc.NewRPCFunc(ConsensusState, ""), ++ "consensus_params": rpc.NewRPCFunc(ConsensusParams, "height", rpc.Cacheable("height")), ++ "unconfirmed_txs": rpc.NewRPCFunc(UnconfirmedTxs, "limit"), ++ "num_unconfirmed_txs": rpc.NewRPCFunc(NumUnconfirmedTxs, ""), ++ "tx_status": rpc.NewRPCFunc(TxStatus, "hash"), + + // tx broadcast API + "broadcast_tx_commit": rpc.NewRPCFunc(BroadcastTxCommit, "tx"), diff --git a/patches/rpc/core/status.go.patch b/patches/rpc/core/status.go.patch new file mode 100644 index 00000000000..a0a88a0e72e --- /dev/null +++ b/patches/rpc/core/status.go.patch @@ -0,0 +1,66 @@ +diff --git a/rpc/core/status.go b/rpc/core/status.go +index 605d3107d..1261d6d6e 100644 +--- a/rpc/core/status.go ++++ b/rpc/core/status.go +@@ -13,7 +13,7 @@ import ( + // Status returns CometBFT status including node info, pubkey, latest block + // hash, app hash, block height and time. + // More: https://docs.cometbft.com/v0.34/rpc/#/Info/status +-func Status(ctx *rpctypes.Context) (*ctypes.ResultStatus, error) { ++func Status(_ *rpctypes.Context) (*ctypes.ResultStatus, error) { + var ( + earliestBlockHeight int64 + earliestBlockHash cmtbytes.HexBytes +@@ -21,6 +21,7 @@ func Status(ctx *rpctypes.Context) (*ctypes.ResultStatus, error) { + earliestBlockTimeNano int64 + ) + ++ env := GetEnvironment() + if earliestBlockMeta := env.BlockStore.LoadBaseMeta(); earliestBlockMeta != nil { + earliestBlockHeight = earliestBlockMeta.Header.Height + earliestAppHash = earliestBlockMeta.Header.AppHash +@@ -52,7 +53,7 @@ func Status(ctx *rpctypes.Context) (*ctypes.ResultStatus, error) { + } + + result := &ctypes.ResultStatus{ +- NodeInfo: env.P2PTransport.NodeInfo().(p2p.DefaultNodeInfo), ++ NodeInfo: GetNodeInfo(env, latestHeight), + SyncInfo: ctypes.SyncInfo{ + LatestBlockHash: latestBlockHash, + LatestAppHash: latestAppHash, +@@ -75,6 +76,7 @@ func Status(ctx *rpctypes.Context) (*ctypes.ResultStatus, error) { + } + + func validatorAtHeight(h int64) *types.Validator { ++ env := GetEnvironment() + vals, err := env.StateStore.LoadValidators(h) + if err != nil { + return nil +@@ -83,3 +85,27 @@ func validatorAtHeight(h int64) *types.Validator { + _, val := vals.GetByAddress(privValAddress) + return val + } ++ ++// GetNodeInfo returns the node info with the app version set to the latest app ++// version from the state store. ++// ++// This function is necessary because upstream CometBFT does not support ++// upgrading app versions for a running binary. Therefore the ++// env.P2PTransport.NodeInfo.ProtocolVersion.App is expected to be set on node ++// start-up and never updated. Celestia supports upgrading the app version for a ++// running binary so the env.P2PTransport.NodeInfo.ProtocolVersion.App will be ++// incorrect if a node upgraded app versions without restarting. This function ++// corrects that issue by fetching the latest app version from the state store. ++func GetNodeInfo(env *Environment, latestHeight int64) p2p.DefaultNodeInfo { ++ nodeInfo := env.P2PTransport.NodeInfo().(p2p.DefaultNodeInfo) ++ ++ consensusParams, err := env.StateStore.LoadConsensusParams(latestHeight) ++ if err != nil { ++ // use the default app version if we can't load the consensus params (i.e. height 0) ++ return nodeInfo ++ } ++ ++ // override the default app version with the latest app version ++ nodeInfo.ProtocolVersion.App = consensusParams.Version.AppVersion ++ return nodeInfo ++} diff --git a/patches/rpc/core/status_test.go.patch b/patches/rpc/core/status_test.go.patch new file mode 100644 index 00000000000..d883d3cec74 --- /dev/null +++ b/patches/rpc/core/status_test.go.patch @@ -0,0 +1,82 @@ +diff --git a/rpc/core/status_test.go b/rpc/core/status_test.go +new file mode 100644 +index 000000000..2fed1a2d9 +--- /dev/null ++++ b/rpc/core/status_test.go +@@ -0,0 +1,76 @@ ++package core_test ++ ++import ( ++ "testing" ++ ++ "github.com/stretchr/testify/assert" ++ "github.com/tendermint/tendermint/p2p" ++ "github.com/tendermint/tendermint/proto/tendermint/types" ++ "github.com/tendermint/tendermint/rpc/core" ++ "github.com/tendermint/tendermint/state/mocks" ++) ++ ++func TestGetNodeInfo(t *testing.T) { ++ p2pTransport := mockTransport{} ++ stateStore := &mocks.Store{} ++ stateStore.On("LoadConsensusParams", int64(1)).Return(types.ConsensusParams{Version: types.VersionParams{AppVersion: 1}}, nil) ++ stateStore.On("LoadConsensusParams", int64(2)).Return(types.ConsensusParams{Version: types.VersionParams{AppVersion: 2}}, nil) ++ ++ type testCase struct { ++ name string ++ env *core.Environment ++ latestHeight int64 ++ want uint64 ++ } ++ testCases := []testCase{ ++ { ++ name: "want 1 when consensus params app version is 1", ++ env: &core.Environment{P2PTransport: p2pTransport, StateStore: stateStore}, ++ latestHeight: 1, ++ want: 1, ++ }, ++ { ++ name: "want 2 if consensus params app version is 2", ++ env: &core.Environment{P2PTransport: p2pTransport, StateStore: stateStore}, ++ latestHeight: 2, ++ want: 2, ++ }, ++ } ++ ++ for _, tc := range testCases { ++ t.Run(tc.name, func(t *testing.T) { ++ nodeInfo := core.GetNodeInfo(tc.env, tc.latestHeight) ++ assert.Equal(t, tc.want, nodeInfo.ProtocolVersion.App) ++ }) ++ } ++} ++ ++// transport is copy + pasted from the core package because it isn't exported. ++// https://github.com/celestiaorg/celestia-core/blob/640d115aec834609022c842b2497fc568df53692/rpc/core/env.go#L69-L73 ++type transport interface { ++ Listeners() []string ++ IsListening() bool ++ NodeInfo() p2p.NodeInfo ++} ++ ++// mockTransport implements the transport interface. ++var _ transport = (*mockTransport)(nil) ++ ++type mockTransport struct{} ++ ++func (m mockTransport) Listeners() []string { ++ return []string{} ++} ++func (m mockTransport) IsListening() bool { ++ return false ++} ++ ++func (m mockTransport) NodeInfo() p2p.NodeInfo { ++ return p2p.DefaultNodeInfo{ ++ ProtocolVersion: p2p.ProtocolVersion{ ++ P2P: 0, ++ Block: 0, ++ App: 0, ++ }, ++ } ++} diff --git a/patches/rpc/core/tx.go.patch b/patches/rpc/core/tx.go.patch new file mode 100644 index 00000000000..311443a6c07 --- /dev/null +++ b/patches/rpc/core/tx.go.patch @@ -0,0 +1,237 @@ +diff --git a/rpc/core/tx.go b/rpc/core/tx.go +index fe24f5c17..96e994a25 100644 +--- a/rpc/core/tx.go ++++ b/rpc/core/tx.go +@@ -5,19 +5,31 @@ import ( + "fmt" + "sort" + ++ abcitypes "github.com/tendermint/tendermint/abci/types" + cmtmath "github.com/tendermint/tendermint/libs/math" + cmtquery "github.com/tendermint/tendermint/libs/pubsub/query" ++ "github.com/tendermint/tendermint/pkg/consts" ++ cmtproto "github.com/tendermint/tendermint/proto/tendermint/types" + ctypes "github.com/tendermint/tendermint/rpc/core/types" + rpctypes "github.com/tendermint/tendermint/rpc/jsonrpc/types" ++ "github.com/tendermint/tendermint/state" + "github.com/tendermint/tendermint/state/txindex/null" + "github.com/tendermint/tendermint/types" + ) + ++const ( ++ TxStatusUnknown string = "UNKNOWN" ++ TxStatusPending string = "PENDING" ++ TxStatusEvicted string = "EVICTED" ++ TxStatusCommitted string = "COMMITTED" ++) ++ + // Tx allows you to query the transaction results. `nil` could mean the + // transaction is in the mempool, invalidated, or was not sent in the first + // place. + // More: https://docs.cometbft.com/v0.34/rpc/#/Info/tx + func Tx(ctx *rpctypes.Context, hash []byte, prove bool) (*ctypes.ResultTx, error) { ++ env := GetEnvironment() + // if index is disabled, return error + if _, ok := env.TxIndexer.(*null.TxIndex); ok { + return nil, fmt.Errorf("transaction indexing is disabled") +@@ -35,10 +47,12 @@ func Tx(ctx *rpctypes.Context, hash []byte, prove bool) (*ctypes.ResultTx, error + height := r.Height + index := r.Index + +- var proof types.TxProof ++ var shareProof types.ShareProof + if prove { +- block := env.BlockStore.LoadBlock(height) +- proof = block.Data.Txs.Proof(int(index)) // XXX: overflow on 32-bit machines ++ shareProof, err = proveTx(height, index) ++ if err != nil { ++ return nil, err ++ } + } + + return &ctypes.ResultTx{ +@@ -47,7 +61,7 @@ func Tx(ctx *rpctypes.Context, hash []byte, prove bool) (*ctypes.ResultTx, error + Index: index, + TxResult: r.Result, + Tx: r.Tx, +- Proof: proof, ++ Proof: shareProof, + }, nil + } + +@@ -62,6 +76,7 @@ func TxSearch( + orderBy string, + ) (*ctypes.ResultTxSearch, error) { + ++ env := GetEnvironment() + // if index is disabled, return error + if _, ok := env.TxIndexer.(*null.TxIndex); ok { + return nil, errors.New("transaction indexing is disabled") +@@ -115,10 +130,12 @@ func TxSearch( + for i := skipCount; i < skipCount+pageSize; i++ { + r := results[i] + +- var proof types.TxProof ++ var shareProof types.ShareProof + if prove { +- block := env.BlockStore.LoadBlock(r.Height) +- proof = block.Data.Txs.Proof(int(r.Index)) // XXX: overflow on 32-bit machines ++ shareProof, err = proveTx(r.Height, r.Index) ++ if err != nil { ++ return nil, err ++ } + } + + apiResults = append(apiResults, &ctypes.ResultTx{ +@@ -127,13 +144,150 @@ func TxSearch( + Index: r.Index, + TxResult: r.Result, + Tx: r.Tx, +- Proof: proof, ++ Proof: shareProof, + }) + } + + return &ctypes.ResultTxSearch{Txs: apiResults, TotalCount: totalCount}, nil + } + ++func proveTx(height int64, index uint32) (types.ShareProof, error) { ++ var ( ++ pShareProof cmtproto.ShareProof ++ shareProof types.ShareProof ++ ) ++ env := GetEnvironment() ++ rawBlock, err := loadRawBlock(env.BlockStore, height) ++ if err != nil { ++ return shareProof, err ++ } ++ res, err := env.ProxyAppQuery.QuerySync(abcitypes.RequestQuery{ ++ Data: rawBlock, ++ Path: fmt.Sprintf(consts.TxInclusionProofQueryPath, index), ++ }) ++ if err != nil { ++ return shareProof, err ++ } ++ err = pShareProof.Unmarshal(res.Value) ++ if err != nil { ++ return shareProof, err ++ } ++ shareProof, err = types.ShareProofFromProto(pShareProof) ++ if err != nil { ++ return shareProof, err ++ } ++ return shareProof, nil ++} ++ ++// ProveShares creates an NMT proof for a set of shares to a set of rows. It is ++// end exclusive. ++// Deprecated: Use ProveSharesV2 instead. ++func ProveShares( ++ _ *rpctypes.Context, ++ height int64, ++ startShare uint64, ++ endShare uint64, ++) (types.ShareProof, error) { ++ var ( ++ pShareProof cmtproto.ShareProof ++ shareProof types.ShareProof ++ ) ++ env := GetEnvironment() ++ rawBlock, err := loadRawBlock(env.BlockStore, height) ++ if err != nil { ++ return shareProof, err ++ } ++ res, err := env.ProxyAppQuery.QuerySync(abcitypes.RequestQuery{ ++ Data: rawBlock, ++ Path: fmt.Sprintf(consts.ShareInclusionProofQueryPath, startShare, endShare), ++ }) ++ if err != nil { ++ return shareProof, err ++ } ++ if res.Value == nil && res.Log != "" { ++ // we can make the assumption that for custom queries, if the value is nil ++ // and some logs have been emitted, then an error happened. ++ return types.ShareProof{}, errors.New(res.Log) ++ } ++ err = pShareProof.Unmarshal(res.Value) ++ if err != nil { ++ return shareProof, err ++ } ++ shareProof, err = types.ShareProofFromProto(pShareProof) ++ if err != nil { ++ return shareProof, err ++ } ++ return shareProof, nil ++} ++ ++// TxStatus retrieves the status of a transaction by its hash. It returns a ResultTxStatus ++// with the transaction's height and index if committed, or its pending, evicted, or unknown status. ++// It also includes the execution code and log for failed txs. ++func TxStatus(ctx *rpctypes.Context, hash []byte) (*ctypes.ResultTxStatus, error) { ++ env := GetEnvironment() ++ ++ // Check if the tx has been committed ++ txInfo := env.BlockStore.LoadTxInfo(hash) ++ if txInfo != nil { ++ return &ctypes.ResultTxStatus{Height: txInfo.Height, Index: txInfo.Index, ExecutionCode: txInfo.Code, Error: txInfo.Error, Status: TxStatusCommitted}, nil ++ } ++ ++ // Get the tx key from the hash ++ txKey, err := types.TxKeyFromBytes(hash) ++ if err != nil { ++ return nil, fmt.Errorf("failed to get tx key from hash: %v", err) ++ } ++ ++ // Check if the tx is in the mempool ++ txInMempool, ok := env.Mempool.GetTxByKey(txKey) ++ if txInMempool != nil && ok { ++ return &ctypes.ResultTxStatus{Status: TxStatusPending}, nil ++ } ++ ++ // Check if the tx is evicted ++ isEvicted := env.Mempool.WasRecentlyEvicted(txKey) ++ if isEvicted { ++ return &ctypes.ResultTxStatus{Status: TxStatusEvicted}, nil ++ } ++ ++ // If the tx is not in the mempool, evicted, or committed, return unknown ++ return &ctypes.ResultTxStatus{Status: TxStatusUnknown}, nil ++} ++ ++// ProveSharesV2 creates a proof for a set of shares to the data root. ++// The range is end exclusive. ++func ProveSharesV2( ++ ctx *rpctypes.Context, ++ height int64, ++ startShare uint64, ++ endShare uint64, ++) (*ctypes.ResultShareProof, error) { ++ shareProof, err := ProveShares(ctx, height, startShare, endShare) ++ if err != nil { ++ return nil, err ++ } ++ return &ctypes.ResultShareProof{ShareProof: shareProof}, nil ++} ++ ++func loadRawBlock(bs state.BlockStore, height int64) ([]byte, error) { ++ var blockMeta = bs.LoadBlockMeta(height) ++ if blockMeta == nil { ++ return nil, fmt.Errorf("no block found for height %d", height) ++ } ++ ++ buf := []byte{} ++ for i := 0; i < int(blockMeta.BlockID.PartSetHeader.Total); i++ { ++ part := bs.LoadBlockPart(height, i) ++ // If the part is missing (e.g. since it has been deleted after we ++ // loaded the block meta) we consider the whole block to be missing. ++ if part == nil { ++ return nil, fmt.Errorf("missing block part at height %d part %d", height, i) ++ } ++ buf = append(buf, part.Bytes...) ++ } ++ return buf, nil ++} ++ + // TxSearchMatchEvents allows you to query for multiple transactions results and match the + // query attributes to a common event. It returns a + // list of transactions (maximum ?per_page entries) and the total count. diff --git a/patches/rpc/core/tx_status_test.go.patch b/patches/rpc/core/tx_status_test.go.patch new file mode 100644 index 00000000000..7fc64105185 --- /dev/null +++ b/patches/rpc/core/tx_status_test.go.patch @@ -0,0 +1,141 @@ +diff --git a/rpc/core/tx_status_test.go b/rpc/core/tx_status_test.go +new file mode 100644 +index 000000000..729dde821 +--- /dev/null ++++ b/rpc/core/tx_status_test.go +@@ -0,0 +1,135 @@ ++package core ++ ++import ( ++ "testing" ++ ++ "github.com/golang/mock/gomock" ++ "github.com/stretchr/testify/assert" ++ mock "github.com/tendermint/tendermint/rpc/core/mocks" ++ rpctypes "github.com/tendermint/tendermint/rpc/jsonrpc/types" ++ types "github.com/tendermint/tendermint/types" ++) ++ ++// TestTxStatus tests the TxStatus function in the RPC core ++// making sure it fetches the correct status for each transaction. ++func TestTxStatus(t *testing.T) { ++ // Create a controller ++ ctrl := gomock.NewController(t) ++ defer ctrl.Finish() ++ ++ // Create a new environment ++ env := &Environment{} ++ ++ // Create a new mempool and block store ++ mempool := mock.NewMockMempool(ctrl) ++ env.Mempool = mempool ++ blockStore := mockBlockStore{ ++ height: 0, ++ blocks: nil, ++ } ++ env.BlockStore = blockStore ++ SetEnvironment(env) ++ ++ tests := []struct { ++ name string ++ setup func(*Environment, []types.Tx) ++ expectedStatus string ++ }{ ++ { ++ name: "Committed", ++ setup: func(env *Environment, txs []types.Tx) { ++ height := int64(5) ++ blocks := randomBlocks(height) ++ blockStore = mockBlockStore{ ++ height: height, ++ blocks: blocks, ++ } ++ env.BlockStore = blockStore ++ }, ++ expectedStatus: "COMMITTED", ++ }, ++ { ++ name: "Unknown", ++ setup: func(env *Environment, txs []types.Tx) { ++ env.BlockStore = mockBlockStore{ ++ height: 0, ++ blocks: nil, ++ } ++ for _, tx := range txs { ++ // Set GetTxByKey to return nil and false for all transactions ++ mempool.EXPECT().GetTxByKey(tx.Key()).Return(nil, false).AnyTimes() ++ // Set WasRecentlyEvicted to return false for all transactions ++ mempool.EXPECT().WasRecentlyEvicted(tx.Key()).Return(false).AnyTimes() ++ } ++ }, ++ ++ expectedStatus: "UNKNOWN", ++ }, ++ { ++ name: "Pending", ++ setup: func(env *Environment, txs []types.Tx) { ++ env.BlockStore = mockBlockStore{ ++ height: 0, ++ blocks: nil, ++ } ++ // Reset the mempool ++ mempool = mock.NewMockMempool(ctrl) ++ env.Mempool = mempool ++ ++ for _, tx := range txs { ++ // Set GetTxByKey to return the transaction and true for all transactions ++ mempool.EXPECT().GetTxByKey(tx.Key()).Return(tx, true).AnyTimes() ++ } ++ }, ++ expectedStatus: "PENDING", ++ }, ++ { ++ name: "Evicted", ++ setup: func(env *Environment, txs []types.Tx) { ++ env.BlockStore = mockBlockStore{ ++ height: 0, ++ blocks: nil, ++ } ++ // Reset the mempool ++ mempool = mock.NewMockMempool(ctrl) ++ env.Mempool = mempool ++ ++ for _, tx := range txs { ++ // Set GetTxByKey to return nil and false for all transactions ++ mempool.EXPECT().GetTxByKey(tx.Key()).Return(nil, false).AnyTimes() ++ // Set WasRecentlyEvicted to return true for all transactions ++ mempool.EXPECT().WasRecentlyEvicted(tx.Key()).Return(true).AnyTimes() ++ } ++ }, ++ expectedStatus: "EVICTED", ++ }, ++ } ++ ++ for _, tt := range tests { ++ t.Run(tt.name, func(t *testing.T) { ++ height := int64(2) ++ // Create a set of transactions on the specified height ++ txs := makeTxs(height) ++ ++ tt.setup(env, txs) ++ ++ // Check the status of each transaction ++ for i, tx := range txs { ++ txStatus, _ := TxStatus(&rpctypes.Context{}, tx.Hash()) ++ assert.Equal(t, tt.expectedStatus, txStatus.Status) ++ ++ // Check the height and index of transactions that are committed ++ if blockStore.height > 0 && tt.expectedStatus == "COMMITTED" { ++ txStatus, _ := TxStatus(&rpctypes.Context{}, tx.Hash()) ++ ++ assert.Equal(t, txStatus.Status, tt.expectedStatus) ++ assert.Equal(t, height, txStatus.Height) ++ assert.Equal(t, uint32(i), txStatus.Index) ++ assert.Equal(t, uint32(0), txStatus.ExecutionCode) ++ assert.Equal(t, "", txStatus.Error) ++ } ++ } ++ ++ }) ++ } ++} diff --git a/patches/rpc/core/types/responses.go.patch b/patches/rpc/core/types/responses.go.patch new file mode 100644 index 00000000000..d53a9bcd662 --- /dev/null +++ b/patches/rpc/core/types/responses.go.patch @@ -0,0 +1,82 @@ +diff --git a/rpc/core/types/responses.go b/rpc/core/types/responses.go +index c6ed6eb19..07ab65fd5 100644 +--- a/rpc/core/types/responses.go ++++ b/rpc/core/types/responses.go +@@ -4,6 +4,8 @@ import ( + "encoding/json" + "time" + ++ "github.com/tendermint/tendermint/crypto/merkle" ++ + abci "github.com/tendermint/tendermint/abci/types" + "github.com/tendermint/tendermint/crypto" + "github.com/tendermint/tendermint/libs/bytes" +@@ -39,12 +41,35 @@ type ResultBlock struct { + Block *types.Block `json:"block"` + } + ++// Single block with all data for validation ++type ResultSignedBlock struct { ++ Header types.Header `json:"header"` ++ Commit types.Commit `json:"commit"` ++ Data types.Data `json:"data"` ++ ValidatorSet types.ValidatorSet `json:"validator_set"` ++} ++ ++// ResultHeader represents the response for a Header RPC Client query ++type ResultHeader struct { ++ Header *types.Header `json:"header"` ++} ++ + // Commit and Header + type ResultCommit struct { + types.SignedHeader `json:"signed_header"` + CanonicalCommit bool `json:"canonical"` + } + ++// ResultTxStatus represents the status of a transaction during its life cycle. ++// It contains info to locate a tx in a committed block as well as its execution code, log if it fails and status. ++type ResultTxStatus struct { ++ Height int64 `json:"height"` ++ Index uint32 `json:"index"` ++ ExecutionCode uint32 `json:"execution_code"` ++ Error string `json:"error"` ++ Status string `json:"status"` ++} ++ + // ABCI results from a block + type ResultBlockResults struct { + Height int64 `json:"height"` +@@ -55,6 +80,14 @@ type ResultBlockResults struct { + ConsensusParamUpdates *abci.ConsensusParams `json:"consensus_param_updates"` + } + ++type ResultDataCommitment struct { ++ DataCommitment bytes.HexBytes `json:"data_commitment"` ++} ++ ++type ResultDataRootInclusionProof struct { ++ Proof merkle.Proof `json:"proof"` ++} ++ + // NewResultCommit is a helper to initialize the ResultCommit with + // the embedded struct + func NewResultCommit(header *types.Header, commit *types.Commit, +@@ -196,7 +229,7 @@ type ResultTx struct { + Index uint32 `json:"index"` + TxResult abci.ResponseDeliverTx `json:"tx_result"` + Tx types.Tx `json:"tx"` +- Proof types.TxProof `json:"proof,omitempty"` ++ Proof types.ShareProof `json:"proof,omitempty"` + } + + // Result of searching for txs +@@ -249,3 +282,8 @@ type ResultEvent struct { + Data types.TMEventData `json:"data"` + Events map[string][]string `json:"events"` + } ++ ++// ResultShareProof is an API response that contains a ShareProof. ++type ResultShareProof struct { ++ ShareProof types.ShareProof `json:"share_proof"` ++} diff --git a/patches/rpc/grpc/api.go.patch b/patches/rpc/grpc/api.go.patch new file mode 100644 index 00000000000..1da31fc0554 --- /dev/null +++ b/patches/rpc/grpc/api.go.patch @@ -0,0 +1,399 @@ +diff --git a/rpc/grpc/api.go b/rpc/grpc/api.go +index 62c6b66c1..b985ef498 100644 +--- a/rpc/grpc/api.go ++++ b/rpc/grpc/api.go +@@ -2,10 +2,20 @@ package coregrpc + + import ( + "context" ++ "errors" ++ "fmt" ++ "sync" ++ "time" + + abci "github.com/tendermint/tendermint/abci/types" +- core "github.com/tendermint/tendermint/rpc/core" ++ "github.com/tendermint/tendermint/crypto/encoding" ++ "github.com/tendermint/tendermint/libs/pubsub" ++ "github.com/tendermint/tendermint/libs/rand" ++ "github.com/tendermint/tendermint/proto/tendermint/crypto" ++ "github.com/tendermint/tendermint/proto/tendermint/types" ++ "github.com/tendermint/tendermint/rpc/core" + rpctypes "github.com/tendermint/tendermint/rpc/jsonrpc/types" ++ eventstypes "github.com/tendermint/tendermint/types" + ) + + type broadcastAPI struct { +@@ -37,3 +47,372 @@ func (bapi *broadcastAPI) BroadcastTx(ctx context.Context, req *RequestBroadcast + }, + }, nil + } ++ ++type BlockAPI struct { ++ sync.Mutex ++ heightListeners map[chan NewHeightEvent]struct{} ++ newBlockSubscription eventstypes.Subscription ++ subscriptionID string ++ subscriptionQuery pubsub.Query ++} ++ ++func NewBlockAPI() *BlockAPI { ++ return &BlockAPI{ ++ heightListeners: make(map[chan NewHeightEvent]struct{}, 1000), ++ subscriptionID: fmt.Sprintf("block-api-subscription-%s", rand.Str(6)), ++ subscriptionQuery: eventstypes.EventQueryNewBlock, ++ } ++} ++ ++func (blockAPI *BlockAPI) StartNewBlockEventListener(ctx context.Context) error { ++ env := core.GetEnvironment() ++ if blockAPI.newBlockSubscription == nil { ++ var err error ++ blockAPI.newBlockSubscription, err = env.EventBus.Subscribe( ++ ctx, ++ blockAPI.subscriptionID, ++ blockAPI.subscriptionQuery, ++ 500, ++ ) ++ if err != nil { ++ env.Logger.Error("Failed to subscribe to new blocks", "err", err) ++ return err ++ } ++ } ++ for { ++ select { ++ case <-ctx.Done(): ++ return nil ++ case <-blockAPI.newBlockSubscription.Cancelled(): ++ env.Logger.Error("cancelled grpc subscription. retrying") ++ ok, err := blockAPI.retryNewBlocksSubscription(ctx) ++ if err != nil { ++ return err ++ } ++ if !ok { ++ // this will happen when the context is done. we can stop here ++ return nil ++ } ++ case event, ok := <-blockAPI.newBlockSubscription.Out(): ++ if !ok { ++ env.Logger.Error("new blocks subscription closed. re-subscribing") ++ ok, err := blockAPI.retryNewBlocksSubscription(ctx) ++ if err != nil { ++ return err ++ } ++ if !ok { ++ // this will happen when the context is done. we can stop here ++ return nil ++ } ++ continue ++ } ++ newBlockEvent, ok := event.Events()[eventstypes.EventTypeKey] ++ if !ok || len(newBlockEvent) == 0 || newBlockEvent[0] != eventstypes.EventNewBlock { ++ continue ++ } ++ data, ok := event.Data().(eventstypes.EventDataNewBlock) ++ if !ok { ++ env.Logger.Error("couldn't cast event data to new block") ++ return fmt.Errorf("couldn't cast event data to new block. Events: %s", event.Events()) ++ } ++ blockAPI.broadcastToListeners(ctx, data.Block.Height, data.Block.Hash()) ++ } ++ } ++} ++ ++// RetryAttempts the number of retry times when the subscription is closed. ++const RetryAttempts = 6 ++ ++// SubscriptionCapacity the maximum number of pending blocks in the subscription. ++const SubscriptionCapacity = 500 ++ ++func (blockAPI *BlockAPI) retryNewBlocksSubscription(ctx context.Context) (bool, error) { ++ env := core.GetEnvironment() ++ ticker := time.NewTicker(time.Second) ++ defer ticker.Stop() ++ blockAPI.Lock() ++ defer blockAPI.Unlock() ++ for i := 1; i < RetryAttempts; i++ { ++ select { ++ case <-ctx.Done(): ++ return false, nil ++ case <-ticker.C: ++ var err error ++ blockAPI.newBlockSubscription, err = env.EventBus.Subscribe( ++ ctx, ++ fmt.Sprintf("block-api-subscription-%s", rand.Str(6)), ++ blockAPI.subscriptionQuery, ++ SubscriptionCapacity, ++ ) ++ if err != nil { ++ env.Logger.Error("Failed to subscribe to new blocks. retrying", "err", err, "retry_number", i) ++ } else { ++ return true, nil ++ } ++ } ++ } ++ return false, errors.New("couldn't recover from failed blocks subscription. stopping listeners") ++} ++ ++func (blockAPI *BlockAPI) broadcastToListeners(ctx context.Context, height int64, hash []byte) { ++ blockAPI.Lock() ++ defer blockAPI.Unlock() ++ for ch := range blockAPI.heightListeners { ++ func() { ++ defer func() { ++ if r := recover(); r != nil { ++ // logging the error then removing the heights listener ++ core.GetEnvironment().Logger.Debug("failed to write to heights listener", "err", r) ++ blockAPI.removeHeightListener(ch) ++ } ++ }() ++ select { ++ case <-ctx.Done(): ++ return ++ case ch <- NewHeightEvent{Height: height, Hash: hash}: ++ } ++ }() ++ } ++} ++ ++func (blockAPI *BlockAPI) addHeightListener() chan NewHeightEvent { ++ blockAPI.Lock() ++ defer blockAPI.Unlock() ++ ch := make(chan NewHeightEvent, 50) ++ blockAPI.heightListeners[ch] = struct{}{} ++ return ch ++} ++ ++func (blockAPI *BlockAPI) removeHeightListener(ch chan NewHeightEvent) { ++ blockAPI.Lock() ++ defer blockAPI.Unlock() ++ delete(blockAPI.heightListeners, ch) ++} ++ ++func (blockAPI *BlockAPI) closeAllListeners() { ++ blockAPI.Lock() ++ defer blockAPI.Unlock() ++ if blockAPI.heightListeners == nil { ++ // if this is nil, then there is no need to close anything ++ return ++ } ++ for channel := range blockAPI.heightListeners { ++ delete(blockAPI.heightListeners, channel) ++ } ++} ++ ++// Stop cleans up the BlockAPI instance by closing all listeners ++// and ensuring no further events are processed. ++func (blockAPI *BlockAPI) Stop(ctx context.Context) error { ++ blockAPI.Lock() ++ defer blockAPI.Unlock() ++ ++ // close all height listeners ++ blockAPI.closeAllListeners() ++ ++ var err error ++ // stop the events subscription ++ if blockAPI.newBlockSubscription != nil { ++ err = core.GetEnvironment().EventBus.Unsubscribe(ctx, blockAPI.subscriptionID, blockAPI.subscriptionQuery) ++ blockAPI.newBlockSubscription = nil ++ } ++ ++ core.GetEnvironment().Logger.Info("gRPC streaming API has been stopped") ++ return err ++} ++ ++func (blockAPI *BlockAPI) BlockByHash(req *BlockByHashRequest, stream BlockAPI_BlockByHashServer) error { ++ blockStore := core.GetEnvironment().BlockStore ++ blockMeta := blockStore.LoadBlockMetaByHash(req.Hash) ++ if blockMeta == nil { ++ return fmt.Errorf("nil block meta for block hash %d", req.Hash) ++ } ++ commit := blockStore.LoadBlockCommit(blockMeta.Header.Height) ++ if commit == nil { ++ return fmt.Errorf("nil commit for block hash %d", req.Hash) ++ } ++ protoCommit := commit.ToProto() ++ ++ validatorSet, err := core.GetEnvironment().StateStore.LoadValidators(blockMeta.Header.Height) ++ if err != nil { ++ return err ++ } ++ protoValidatorSet, err := validatorSet.ToProto() ++ if err != nil { ++ return err ++ } ++ ++ for i := 0; i < int(blockMeta.BlockID.PartSetHeader.Total); i++ { ++ part, err := blockStore.LoadBlockPart(blockMeta.Header.Height, i).ToProto() ++ if err != nil { ++ return err ++ } ++ if part == nil { ++ return fmt.Errorf("nil block part %d for block hash %d", i, req.Hash) ++ } ++ if !req.Prove { ++ part.Proof = crypto.Proof{} ++ } ++ isLastPart := i == int(blockMeta.BlockID.PartSetHeader.Total)-1 ++ resp := StreamedBlockByHashResponse{ ++ BlockPart: part, ++ IsLast: isLastPart, ++ } ++ if i == 0 { ++ resp.ValidatorSet = protoValidatorSet ++ resp.Commit = protoCommit ++ } ++ err = stream.Send(&resp) ++ if err != nil { ++ return err ++ } ++ } ++ return nil ++} ++ ++func (blockAPI *BlockAPI) BlockByHeight(req *BlockByHeightRequest, stream BlockAPI_BlockByHeightServer) error { ++ blockStore := core.GetEnvironment().BlockStore ++ height := req.Height ++ if height == 0 { ++ height = blockStore.Height() ++ } ++ ++ blockMeta := blockStore.LoadBlockMeta(height) ++ if blockMeta == nil { ++ return fmt.Errorf("nil block meta for height %d", height) ++ } ++ ++ commit := blockStore.LoadSeenCommit(height) ++ if commit == nil { ++ return fmt.Errorf("nil block commit for height %d", height) ++ } ++ protoCommit := commit.ToProto() ++ ++ validatorSet, err := core.GetEnvironment().StateStore.LoadValidators(height) ++ if err != nil { ++ return err ++ } ++ protoValidatorSet, err := validatorSet.ToProto() ++ if err != nil { ++ return err ++ } ++ ++ for i := 0; i < int(blockMeta.BlockID.PartSetHeader.Total); i++ { ++ part, err := blockStore.LoadBlockPart(height, i).ToProto() ++ if err != nil { ++ return err ++ } ++ if part == nil { ++ return fmt.Errorf("nil block part %d for height %d", i, height) ++ } ++ if !req.Prove { ++ part.Proof = crypto.Proof{} ++ } ++ isLastPart := i == int(blockMeta.BlockID.PartSetHeader.Total)-1 ++ resp := StreamedBlockByHeightResponse{ ++ BlockPart: part, ++ IsLast: isLastPart, ++ } ++ if i == 0 { ++ resp.ValidatorSet = protoValidatorSet ++ resp.Commit = protoCommit ++ } ++ err = stream.Send(&resp) ++ if err != nil { ++ return err ++ } ++ } ++ return nil ++} ++ ++func (blockAPI *BlockAPI) Status(_ context.Context, _ *StatusRequest) (*StatusResponse, error) { ++ status, err := core.Status(nil) ++ if err != nil { ++ return nil, err ++ } ++ ++ protoPubKey, err := encoding.PubKeyToProto(status.ValidatorInfo.PubKey) ++ if err != nil { ++ return nil, err ++ } ++ return &StatusResponse{ ++ NodeInfo: status.NodeInfo.ToProto(), ++ SyncInfo: &SyncInfo{ ++ LatestBlockHash: status.SyncInfo.LatestBlockHash, ++ LatestAppHash: status.SyncInfo.LatestAppHash, ++ LatestBlockHeight: status.SyncInfo.LatestBlockHeight, ++ LatestBlockTime: status.SyncInfo.LatestBlockTime, ++ EarliestBlockHash: status.SyncInfo.EarliestBlockHash, ++ EarliestAppHash: status.SyncInfo.EarliestAppHash, ++ EarliestBlockHeight: status.SyncInfo.EarliestBlockHeight, ++ EarliestBlockTime: status.SyncInfo.EarliestBlockTime, ++ CatchingUp: status.SyncInfo.CatchingUp, ++ }, ++ ValidatorInfo: &ValidatorInfo{ ++ Address: status.ValidatorInfo.Address, ++ PubKey: &protoPubKey, ++ VotingPower: status.ValidatorInfo.VotingPower, ++ }, ++ }, nil ++} ++ ++func (blockAPI *BlockAPI) Commit(_ context.Context, req *CommitRequest) (*CommitResponse, error) { ++ blockStore := core.GetEnvironment().BlockStore ++ height := req.Height ++ if height == 0 { ++ height = blockStore.Height() ++ } ++ commit := blockStore.LoadSeenCommit(height) ++ if commit == nil { ++ return nil, fmt.Errorf("nil block commit for height %d", height) ++ } ++ protoCommit := commit.ToProto() ++ ++ return &CommitResponse{ ++ Commit: &types.Commit{ ++ Height: protoCommit.Height, ++ Round: protoCommit.Round, ++ BlockID: protoCommit.BlockID, ++ Signatures: protoCommit.Signatures, ++ }, ++ }, nil ++} ++ ++func (blockAPI *BlockAPI) ValidatorSet(_ context.Context, req *ValidatorSetRequest) (*ValidatorSetResponse, error) { ++ blockStore := core.GetEnvironment().BlockStore ++ height := req.Height ++ if height == 0 { ++ height = blockStore.Height() ++ } ++ validatorSet, err := core.GetEnvironment().StateStore.LoadValidators(height) ++ if err != nil { ++ return nil, err ++ } ++ protoValidatorSet, err := validatorSet.ToProto() ++ if err != nil { ++ return nil, err ++ } ++ return &ValidatorSetResponse{ ++ ValidatorSet: protoValidatorSet, ++ Height: height, ++ }, nil ++} ++ ++func (blockAPI *BlockAPI) SubscribeNewHeights(_ *SubscribeNewHeightsRequest, stream BlockAPI_SubscribeNewHeightsServer) error { ++ heightListener := blockAPI.addHeightListener() ++ defer blockAPI.removeHeightListener(heightListener) ++ ++ for { ++ select { ++ case event, ok := <-heightListener: ++ if !ok { ++ return errors.New("blocks subscription closed from the service side") ++ } ++ if err := stream.Send(&event); err != nil { ++ return err ++ } ++ case <-stream.Context().Done(): ++ return nil ++ } ++ } ++} diff --git a/patches/rpc/grpc/client_server.go.patch b/patches/rpc/grpc/client_server.go.patch new file mode 100644 index 00000000000..bf810c92e26 --- /dev/null +++ b/patches/rpc/grpc/client_server.go.patch @@ -0,0 +1,75 @@ +diff --git a/rpc/grpc/client_server.go b/rpc/grpc/client_server.go +index 39c6859c1..f63537fb7 100644 +--- a/rpc/grpc/client_server.go ++++ b/rpc/grpc/client_server.go +@@ -3,8 +3,11 @@ package coregrpc + import ( + "net" + ++ "github.com/tendermint/tendermint/rpc/core" ++ + "golang.org/x/net/context" + "google.golang.org/grpc" ++ "google.golang.org/grpc/credentials/insecure" + + cmtnet "github.com/tendermint/tendermint/libs/net" + ) +@@ -20,20 +23,55 @@ type Config struct { + func StartGRPCServer(ln net.Listener) error { + grpcServer := grpc.NewServer() + RegisterBroadcastAPIServer(grpcServer, &broadcastAPI{}) +- return grpcServer.Serve(ln) ++ api := NewBlockAPI() ++ RegisterBlockAPIServer(grpcServer, api) ++ errCh := make(chan error, 2) ++ ctx, cancel := context.WithCancel(context.Background()) ++ defer cancel() ++ go func() { ++ errCh <- api.StartNewBlockEventListener(ctx) ++ }() ++ go func() { ++ errCh <- grpcServer.Serve(ln) ++ }() ++ defer grpcServer.GracefulStop() ++ defer func(api *BlockAPI, ctx context.Context) { ++ err := api.Stop(ctx) ++ if err != nil { ++ core.GetEnvironment().Logger.Error("error stopping block api", "err", err) ++ } ++ }(api, ctx) ++ // blocks until one errors or returns nil ++ return <-errCh + } + + // StartGRPCClient dials the gRPC server using protoAddr and returns a new + // BroadcastAPIClient. + func StartGRPCClient(protoAddr string) BroadcastAPIClient { +- //nolint:staticcheck // SA1019 Existing use of deprecated but supported dial option. +- conn, err := grpc.Dial(protoAddr, grpc.WithInsecure(), grpc.WithContextDialer(dialerFunc)) ++ conn, err := grpc.Dial(protoAddr, grpc.WithTransportCredentials(insecure.NewCredentials()), grpc.WithContextDialer(dialerFunc)) //nolint:staticcheck + if err != nil { + panic(err) + } + return NewBroadcastAPIClient(conn) + } + ++// StartBlockAPIGRPCClient dials the gRPC server using protoAddr and returns a new ++// BlockAPIClient. ++func StartBlockAPIGRPCClient(protoAddr string, opts ...grpc.DialOption) (BlockAPIClient, error) { ++ if len(opts) == 0 { ++ opts = append(opts, grpc.WithTransportCredentials(insecure.NewCredentials())) ++ } ++ opts = append(opts, grpc.WithContextDialer(dialerFunc)) ++ conn, err := grpc.Dial( //nolint:staticcheck ++ protoAddr, ++ opts..., ++ ) ++ if err != nil { ++ return nil, err ++ } ++ return NewBlockAPIClient(conn), nil ++} ++ + func dialerFunc(ctx context.Context, addr string) (net.Conn, error) { + return cmtnet.Connect(addr) + } diff --git a/patches/rpc/grpc/grpc_test.go.patch b/patches/rpc/grpc/grpc_test.go.patch new file mode 100644 index 00000000000..bd92f92a6e8 --- /dev/null +++ b/patches/rpc/grpc/grpc_test.go.patch @@ -0,0 +1,384 @@ +diff --git a/rpc/grpc/grpc_test.go b/rpc/grpc/grpc_test.go +index 073ff51c9..b76d935fb 100644 +--- a/rpc/grpc/grpc_test.go ++++ b/rpc/grpc/grpc_test.go +@@ -1,9 +1,19 @@ ++//nolint:dupl + package coregrpc_test + + import ( + "context" + "os" + "testing" ++ "time" ++ ++ "github.com/stretchr/testify/assert" ++ "github.com/tendermint/tendermint/libs/rand" ++ "github.com/tendermint/tendermint/proto/tendermint/crypto" ++ rpcclient "github.com/tendermint/tendermint/rpc/client" ++ rpchttp "github.com/tendermint/tendermint/rpc/client/http" ++ "github.com/tendermint/tendermint/rpc/core" ++ "github.com/tendermint/tendermint/types" + + "github.com/stretchr/testify/require" + +@@ -33,3 +43,359 @@ func TestBroadcastTx(t *testing.T) { + require.EqualValues(t, 0, res.CheckTx.Code) + require.EqualValues(t, 0, res.DeliverTx.Code) + } ++ ++func setupClient(t *testing.T) core_grpc.BlockAPIClient { ++ client, err := rpctest.GetBlockAPIClient() ++ require.NoError(t, err) ++ return client ++} ++ ++func TestBlockByHash(t *testing.T) { ++ client := setupClient(t) ++ waitForHeight(t, 2) ++ expectedBlockMeta := core.GetEnvironment().BlockStore.LoadBlockMeta(1) ++ require.NotNil(t, expectedBlockMeta) ++ ++ // query the block along with the part proofs ++ res, err := client.BlockByHash(context.Background(), &core_grpc.BlockByHashRequest{ ++ Hash: expectedBlockMeta.BlockID.Hash, ++ Prove: true, ++ }) ++ require.NoError(t, err) ++ ++ part, err := res.Recv() ++ require.NoError(t, err) ++ ++ require.NotNil(t, part.BlockPart) ++ require.NotNil(t, part.ValidatorSet) ++ require.NotNil(t, part.Commit) ++ ++ assert.NotEqual(t, part.BlockPart.Proof, crypto.Proof{}) ++ assert.Equal(t, part.Commit.Height, expectedBlockMeta.Header.Height) ++ ++ // query the block along without the part proofs ++ res, err = client.BlockByHash(context.Background(), &core_grpc.BlockByHashRequest{ ++ Hash: expectedBlockMeta.BlockID.Hash, ++ Prove: false, ++ }) ++ require.NoError(t, err) ++ ++ part, err = res.Recv() ++ require.NoError(t, err) ++ ++ require.NotNil(t, part.BlockPart) ++ require.NotNil(t, part.ValidatorSet) ++ require.NotNil(t, part.Commit) ++ ++ assert.Equal(t, part.BlockPart.Proof, crypto.Proof{}) ++ assert.Equal(t, part.Commit.Height, expectedBlockMeta.Header.Height) ++} ++ ++func TestCommit(t *testing.T) { ++ client := setupClient(t) ++ waitForHeight(t, 2) ++ expectedBlockCommit := core.GetEnvironment().BlockStore.LoadSeenCommit(1) ++ ++ res, err := client.Commit(context.Background(), &core_grpc.CommitRequest{ ++ Height: 1, ++ }) ++ require.NoError(t, err) ++ ++ assert.Equal(t, expectedBlockCommit.BlockID.Hash.Bytes(), res.Commit.BlockID.Hash) ++} ++ ++func TestLatestCommit(t *testing.T) { ++ client := setupClient(t) ++ waitForHeight(t, 3) ++ ++ res, err := client.Commit(context.Background(), &core_grpc.CommitRequest{ ++ Height: 0, ++ }) ++ require.NoError(t, err) ++ ++ assert.Greater(t, res.Commit.Height, int64(2)) ++} ++ ++func TestValidatorSet(t *testing.T) { ++ client := setupClient(t) ++ waitForHeight(t, 2) ++ expectedValidatorSet, err := core.GetEnvironment().StateStore.LoadValidators(1) ++ require.NoError(t, err) ++ ++ res, err := client.ValidatorSet(context.Background(), &core_grpc.ValidatorSetRequest{ ++ Height: 1, ++ }) ++ require.NoError(t, err) ++ ++ assert.Equal(t, len(expectedValidatorSet.Validators), len(res.ValidatorSet.Validators)) ++} ++ ++func TestLatestValidatorSet(t *testing.T) { ++ client := setupClient(t) ++ waitForHeight(t, 3) ++ ++ res, err := client.ValidatorSet(context.Background(), &core_grpc.ValidatorSetRequest{ ++ Height: 0, ++ }) ++ require.NoError(t, err) ++ ++ assert.Greater(t, res.Height, int64(2)) ++} ++ ++func TestStatus(t *testing.T) { ++ client := setupClient(t) ++ expectedStatus, err := core.Status(nil) ++ require.NoError(t, err) ++ ++ res, err := client.Status(context.Background(), &core_grpc.StatusRequest{}) ++ require.NoError(t, err) ++ assert.Equal(t, string(expectedStatus.NodeInfo.DefaultNodeID), res.NodeInfo.DefaultNodeID) ++} ++ ++func TestSubscribeNewHeights(t *testing.T) { ++ client := setupClient(t) ++ ctx, cancel := context.WithCancel(context.Background()) ++ defer cancel() ++ stream, err := client.SubscribeNewHeights(ctx, &core_grpc.SubscribeNewHeightsRequest{}) ++ require.NoError(t, err) ++ store := core.GetEnvironment().BlockStore ++ ++ go func() { ++ listenedHeightsCount := 0 ++ defer func() { ++ assert.Greater(t, listenedHeightsCount, 0) ++ }() ++ for { ++ res, err := stream.Recv() ++ if ctx.Err() != nil { ++ return ++ } ++ require.NoError(t, err) ++ require.Greater(t, res.Height, int64(0)) ++ assert.Equal(t, store.LoadBlockMeta(res.Height).BlockID.Hash.Bytes(), res.Hash) ++ listenedHeightsCount++ ++ } ++ }() ++ ++ time.Sleep(5 * time.Second) ++} ++ ++func TestBlockByHash_Streaming(t *testing.T) { ++ client := setupClient(t) ++ ++ // send a big transaction that would result in a block ++ // containing multiple block parts ++ txRes, err := rpctest.GetGRPCClient().BroadcastTx( ++ context.Background(), ++ &core_grpc.RequestBroadcastTx{Tx: rand.NewRand().Bytes(1000000)}, ++ ) ++ require.NoError(t, err) ++ require.EqualValues(t, 0, txRes.CheckTx.Code) ++ require.EqualValues(t, 0, txRes.DeliverTx.Code) ++ ++ var expectedBlockMeta types.BlockMeta ++ for i := int64(1); i < 500; i++ { ++ waitForHeight(t, i+1) ++ blockMeta := core.GetEnvironment().BlockStore.LoadBlockMeta(i) ++ if blockMeta.BlockID.PartSetHeader.Total > 1 { ++ expectedBlockMeta = *blockMeta ++ break ++ } ++ } ++ ++ // query the block without the part proofs ++ res, err := client.BlockByHash(context.Background(), &core_grpc.BlockByHashRequest{ ++ Hash: expectedBlockMeta.BlockID.Hash, ++ Prove: false, ++ }) ++ require.NoError(t, err) ++ ++ part1, err := res.Recv() ++ require.NoError(t, err) ++ ++ require.NotNil(t, part1.BlockPart) ++ require.NotNil(t, part1.ValidatorSet) ++ require.NotNil(t, part1.Commit) ++ ++ assert.Equal(t, part1.BlockPart.Proof, crypto.Proof{}) ++ assert.Equal(t, part1.Commit.Height, expectedBlockMeta.Header.Height) ++ ++ part2, err := res.Recv() ++ require.NoError(t, err) ++ ++ require.NotNil(t, part2.BlockPart) ++ require.Nil(t, part2.ValidatorSet) ++ require.Nil(t, part2.Commit) ++ ++ assert.Equal(t, part2.BlockPart.Proof, crypto.Proof{}) ++ ++ // query the block along with the part proofs ++ res, err = client.BlockByHash(context.Background(), &core_grpc.BlockByHashRequest{ ++ Hash: expectedBlockMeta.BlockID.Hash, ++ Prove: true, ++ }) ++ require.NoError(t, err) ++ ++ part1, err = res.Recv() ++ require.NoError(t, err) ++ ++ require.NotNil(t, part1.BlockPart) ++ require.NotNil(t, part1.ValidatorSet) ++ require.NotNil(t, part1.Commit) ++ ++ assert.NotEqual(t, part1.BlockPart.Proof, crypto.Proof{}) ++ assert.Equal(t, part1.Commit.Height, expectedBlockMeta.Header.Height) ++ ++ part2, err = res.Recv() ++ require.NoError(t, err) ++ ++ require.NotNil(t, part2.BlockPart) ++ require.Nil(t, part2.ValidatorSet) ++ require.Nil(t, part2.Commit) ++ ++ assert.NotEqual(t, part2.BlockPart.Proof, crypto.Proof{}) ++} ++ ++func TestBlockByHeight(t *testing.T) { ++ client := setupClient(t) ++ waitForHeight(t, 2) ++ expectedBlockMeta := core.GetEnvironment().BlockStore.LoadBlockMeta(1) ++ ++ // query the block along with the part proofs ++ res, err := client.BlockByHeight(context.Background(), &core_grpc.BlockByHeightRequest{ ++ Height: expectedBlockMeta.Header.Height, ++ Prove: true, ++ }) ++ require.NoError(t, err) ++ ++ part, err := res.Recv() ++ require.NoError(t, err) ++ ++ require.NotNil(t, part.BlockPart) ++ require.NotNil(t, part.ValidatorSet) ++ require.NotNil(t, part.Commit) ++ ++ assert.NotEqual(t, part.BlockPart.Proof, crypto.Proof{}) ++ assert.Equal(t, part.Commit.Height, expectedBlockMeta.Header.Height) ++ ++ // query the block along without the part proofs ++ res, err = client.BlockByHeight(context.Background(), &core_grpc.BlockByHeightRequest{ ++ Height: expectedBlockMeta.Header.Height, ++ Prove: false, ++ }) ++ require.NoError(t, err) ++ ++ part, err = res.Recv() ++ require.NoError(t, err) ++ ++ require.NotNil(t, part.BlockPart) ++ require.NotNil(t, part.ValidatorSet) ++ require.NotNil(t, part.Commit) ++ ++ assert.Equal(t, part.BlockPart.Proof, crypto.Proof{}) ++ assert.Equal(t, part.Commit.Height, expectedBlockMeta.Header.Height) ++} ++ ++func TestLatestBlockByHeight(t *testing.T) { ++ client := setupClient(t) ++ waitForHeight(t, 2) ++ ++ // query the block along with the part proofs ++ res, err := client.BlockByHeight(context.Background(), &core_grpc.BlockByHeightRequest{ ++ Height: 0, ++ }) ++ require.NoError(t, err) ++ ++ part, err := res.Recv() ++ require.NoError(t, err) ++ ++ require.NotNil(t, part.BlockPart) ++ require.NotNil(t, part.ValidatorSet) ++ require.NotNil(t, part.Commit) ++ ++ assert.Greater(t, part.Commit.Height, int64(2)) ++} ++ ++func TestBlockQuery_Streaming(t *testing.T) { ++ client := setupClient(t) ++ ++ // send a big transaction that would result in a block ++ // containing multiple block parts ++ txRes, err := rpctest.GetGRPCClient().BroadcastTx( ++ context.Background(), ++ &core_grpc.RequestBroadcastTx{Tx: rand.NewRand().Bytes(1000000)}, ++ ) ++ require.NoError(t, err) ++ require.EqualValues(t, 0, txRes.CheckTx.Code) ++ require.EqualValues(t, 0, txRes.DeliverTx.Code) ++ ++ var expectedBlockMeta types.BlockMeta ++ for i := int64(1); i < 500; i++ { ++ waitForHeight(t, i+1) ++ blockMeta := core.GetEnvironment().BlockStore.LoadBlockMeta(i) ++ if blockMeta.BlockID.PartSetHeader.Total > 1 { ++ expectedBlockMeta = *blockMeta ++ break ++ } ++ } ++ ++ // query the block without the part proofs ++ res, err := client.BlockByHeight(context.Background(), &core_grpc.BlockByHeightRequest{ ++ Height: expectedBlockMeta.Header.Height, ++ Prove: false, ++ }) ++ require.NoError(t, err) ++ ++ part1, err := res.Recv() ++ require.NoError(t, err) ++ ++ require.NotNil(t, part1.BlockPart) ++ require.NotNil(t, part1.ValidatorSet) ++ require.NotNil(t, part1.Commit) ++ ++ assert.Equal(t, part1.BlockPart.Proof, crypto.Proof{}) ++ assert.Equal(t, part1.Commit.Height, expectedBlockMeta.Header.Height) ++ ++ part2, err := res.Recv() ++ require.NoError(t, err) ++ ++ require.NotNil(t, part2.BlockPart) ++ require.Nil(t, part2.ValidatorSet) ++ require.Nil(t, part2.Commit) ++ ++ assert.Equal(t, part2.BlockPart.Proof, crypto.Proof{}) ++ ++ // query the block along with the part proofs ++ res, err = client.BlockByHeight(context.Background(), &core_grpc.BlockByHeightRequest{ ++ Height: expectedBlockMeta.Header.Height, ++ Prove: true, ++ }) ++ require.NoError(t, err) ++ ++ part1, err = res.Recv() ++ require.NoError(t, err) ++ ++ require.NotNil(t, part1.BlockPart) ++ require.NotNil(t, part1.ValidatorSet) ++ require.NotNil(t, part1.Commit) ++ ++ assert.NotEqual(t, part1.BlockPart.Proof, crypto.Proof{}) ++ assert.Equal(t, part1.Commit.Height, expectedBlockMeta.Header.Height) ++ ++ part2, err = res.Recv() ++ require.NoError(t, err) ++ ++ require.NotNil(t, part2.BlockPart) ++ require.Nil(t, part2.ValidatorSet) ++ require.Nil(t, part2.Commit) ++ ++ assert.NotEqual(t, part2.BlockPart.Proof, crypto.Proof{}) ++} ++ ++func waitForHeight(t *testing.T, height int64) { ++ rpcAddr := rpctest.GetConfig().RPC.ListenAddress ++ c, err := rpchttp.New(rpcAddr, "/websocket") ++ require.NoError(t, err) ++ err = rpcclient.WaitForHeight(c, height, nil) ++ require.NoError(t, err) ++} diff --git a/patches/rpc/grpc/types.pb.go.patch b/patches/rpc/grpc/types.pb.go.patch new file mode 100644 index 00000000000..eeb0ddc29d7 --- /dev/null +++ b/patches/rpc/grpc/types.pb.go.patch @@ -0,0 +1,4744 @@ +diff --git a/rpc/grpc/types.pb.go b/rpc/grpc/types.pb.go +index b9cbee03f..9f50d2cd1 100644 +--- a/rpc/grpc/types.pb.go ++++ b/rpc/grpc/types.pb.go +@@ -6,20 +6,28 @@ package coregrpc + import ( + context "context" + fmt "fmt" ++ _ "github.com/gogo/protobuf/gogoproto" + proto "github.com/gogo/protobuf/proto" ++ _ "github.com/gogo/protobuf/types" ++ github_com_gogo_protobuf_types "github.com/gogo/protobuf/types" + types "github.com/tendermint/tendermint/abci/types" ++ crypto "github.com/tendermint/tendermint/proto/tendermint/crypto" ++ p2p "github.com/tendermint/tendermint/proto/tendermint/p2p" ++ types1 "github.com/tendermint/tendermint/proto/tendermint/types" + grpc "google.golang.org/grpc" + codes "google.golang.org/grpc/codes" + status "google.golang.org/grpc/status" + io "io" + math "math" + math_bits "math/bits" ++ time "time" + ) + + // Reference imports to suppress errors if they are not otherwise used. + var _ = proto.Marshal + var _ = fmt.Errorf + var _ = math.Inf ++var _ = time.Kitchen + + // This is a compile-time assertion to ensure that this generated file + // is compatible with the proto package it is being compiled against. +@@ -107,6 +115,280 @@ func (m *RequestBroadcastTx) GetTx() []byte { + return nil + } + ++type BlockByHashRequest struct { ++ Hash []byte `protobuf:"bytes,1,opt,name=hash,proto3" json:"hash,omitempty"` ++ Prove bool `protobuf:"varint,2,opt,name=prove,proto3" json:"prove,omitempty"` ++} ++ ++func (m *BlockByHashRequest) Reset() { *m = BlockByHashRequest{} } ++func (m *BlockByHashRequest) String() string { return proto.CompactTextString(m) } ++func (*BlockByHashRequest) ProtoMessage() {} ++func (*BlockByHashRequest) Descriptor() ([]byte, []int) { ++ return fileDescriptor_0ffff5682c662b95, []int{2} ++} ++func (m *BlockByHashRequest) XXX_Unmarshal(b []byte) error { ++ return m.Unmarshal(b) ++} ++func (m *BlockByHashRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { ++ if deterministic { ++ return xxx_messageInfo_BlockByHashRequest.Marshal(b, m, deterministic) ++ } else { ++ b = b[:cap(b)] ++ n, err := m.MarshalToSizedBuffer(b) ++ if err != nil { ++ return nil, err ++ } ++ return b[:n], nil ++ } ++} ++func (m *BlockByHashRequest) XXX_Merge(src proto.Message) { ++ xxx_messageInfo_BlockByHashRequest.Merge(m, src) ++} ++func (m *BlockByHashRequest) XXX_Size() int { ++ return m.Size() ++} ++func (m *BlockByHashRequest) XXX_DiscardUnknown() { ++ xxx_messageInfo_BlockByHashRequest.DiscardUnknown(m) ++} ++ ++var xxx_messageInfo_BlockByHashRequest proto.InternalMessageInfo ++ ++func (m *BlockByHashRequest) GetHash() []byte { ++ if m != nil { ++ return m.Hash ++ } ++ return nil ++} ++ ++func (m *BlockByHashRequest) GetProve() bool { ++ if m != nil { ++ return m.Prove ++ } ++ return false ++} ++ ++type BlockByHeightRequest struct { ++ // Height the requested block height. ++ // If height is equal to 0, the latest height stored in the block store ++ // will be used. ++ Height int64 `protobuf:"varint,1,opt,name=height,proto3" json:"height,omitempty"` ++ // Prove set to true to return the parts proofs. ++ Prove bool `protobuf:"varint,2,opt,name=prove,proto3" json:"prove,omitempty"` ++} ++ ++func (m *BlockByHeightRequest) Reset() { *m = BlockByHeightRequest{} } ++func (m *BlockByHeightRequest) String() string { return proto.CompactTextString(m) } ++func (*BlockByHeightRequest) ProtoMessage() {} ++func (*BlockByHeightRequest) Descriptor() ([]byte, []int) { ++ return fileDescriptor_0ffff5682c662b95, []int{3} ++} ++func (m *BlockByHeightRequest) XXX_Unmarshal(b []byte) error { ++ return m.Unmarshal(b) ++} ++func (m *BlockByHeightRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { ++ if deterministic { ++ return xxx_messageInfo_BlockByHeightRequest.Marshal(b, m, deterministic) ++ } else { ++ b = b[:cap(b)] ++ n, err := m.MarshalToSizedBuffer(b) ++ if err != nil { ++ return nil, err ++ } ++ return b[:n], nil ++ } ++} ++func (m *BlockByHeightRequest) XXX_Merge(src proto.Message) { ++ xxx_messageInfo_BlockByHeightRequest.Merge(m, src) ++} ++func (m *BlockByHeightRequest) XXX_Size() int { ++ return m.Size() ++} ++func (m *BlockByHeightRequest) XXX_DiscardUnknown() { ++ xxx_messageInfo_BlockByHeightRequest.DiscardUnknown(m) ++} ++ ++var xxx_messageInfo_BlockByHeightRequest proto.InternalMessageInfo ++ ++func (m *BlockByHeightRequest) GetHeight() int64 { ++ if m != nil { ++ return m.Height ++ } ++ return 0 ++} ++ ++func (m *BlockByHeightRequest) GetProve() bool { ++ if m != nil { ++ return m.Prove ++ } ++ return false ++} ++ ++type CommitRequest struct { ++ // Height the requested block commit height. ++ // If height is equal to 0, the latest height stored in the block store ++ // will be used. ++ Height int64 `protobuf:"varint,1,opt,name=height,proto3" json:"height,omitempty"` ++} ++ ++func (m *CommitRequest) Reset() { *m = CommitRequest{} } ++func (m *CommitRequest) String() string { return proto.CompactTextString(m) } ++func (*CommitRequest) ProtoMessage() {} ++func (*CommitRequest) Descriptor() ([]byte, []int) { ++ return fileDescriptor_0ffff5682c662b95, []int{4} ++} ++func (m *CommitRequest) XXX_Unmarshal(b []byte) error { ++ return m.Unmarshal(b) ++} ++func (m *CommitRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { ++ if deterministic { ++ return xxx_messageInfo_CommitRequest.Marshal(b, m, deterministic) ++ } else { ++ b = b[:cap(b)] ++ n, err := m.MarshalToSizedBuffer(b) ++ if err != nil { ++ return nil, err ++ } ++ return b[:n], nil ++ } ++} ++func (m *CommitRequest) XXX_Merge(src proto.Message) { ++ xxx_messageInfo_CommitRequest.Merge(m, src) ++} ++func (m *CommitRequest) XXX_Size() int { ++ return m.Size() ++} ++func (m *CommitRequest) XXX_DiscardUnknown() { ++ xxx_messageInfo_CommitRequest.DiscardUnknown(m) ++} ++ ++var xxx_messageInfo_CommitRequest proto.InternalMessageInfo ++ ++func (m *CommitRequest) GetHeight() int64 { ++ if m != nil { ++ return m.Height ++ } ++ return 0 ++} ++ ++type ValidatorSetRequest struct { ++ // Height the requested validator set height. ++ // If height is equal to 0, the latest height stored in the block store ++ // will be used. ++ Height int64 `protobuf:"varint,1,opt,name=height,proto3" json:"height,omitempty"` ++} ++ ++func (m *ValidatorSetRequest) Reset() { *m = ValidatorSetRequest{} } ++func (m *ValidatorSetRequest) String() string { return proto.CompactTextString(m) } ++func (*ValidatorSetRequest) ProtoMessage() {} ++func (*ValidatorSetRequest) Descriptor() ([]byte, []int) { ++ return fileDescriptor_0ffff5682c662b95, []int{5} ++} ++func (m *ValidatorSetRequest) XXX_Unmarshal(b []byte) error { ++ return m.Unmarshal(b) ++} ++func (m *ValidatorSetRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { ++ if deterministic { ++ return xxx_messageInfo_ValidatorSetRequest.Marshal(b, m, deterministic) ++ } else { ++ b = b[:cap(b)] ++ n, err := m.MarshalToSizedBuffer(b) ++ if err != nil { ++ return nil, err ++ } ++ return b[:n], nil ++ } ++} ++func (m *ValidatorSetRequest) XXX_Merge(src proto.Message) { ++ xxx_messageInfo_ValidatorSetRequest.Merge(m, src) ++} ++func (m *ValidatorSetRequest) XXX_Size() int { ++ return m.Size() ++} ++func (m *ValidatorSetRequest) XXX_DiscardUnknown() { ++ xxx_messageInfo_ValidatorSetRequest.DiscardUnknown(m) ++} ++ ++var xxx_messageInfo_ValidatorSetRequest proto.InternalMessageInfo ++ ++func (m *ValidatorSetRequest) GetHeight() int64 { ++ if m != nil { ++ return m.Height ++ } ++ return 0 ++} ++ ++type SubscribeNewHeightsRequest struct { ++} ++ ++func (m *SubscribeNewHeightsRequest) Reset() { *m = SubscribeNewHeightsRequest{} } ++func (m *SubscribeNewHeightsRequest) String() string { return proto.CompactTextString(m) } ++func (*SubscribeNewHeightsRequest) ProtoMessage() {} ++func (*SubscribeNewHeightsRequest) Descriptor() ([]byte, []int) { ++ return fileDescriptor_0ffff5682c662b95, []int{6} ++} ++func (m *SubscribeNewHeightsRequest) XXX_Unmarshal(b []byte) error { ++ return m.Unmarshal(b) ++} ++func (m *SubscribeNewHeightsRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { ++ if deterministic { ++ return xxx_messageInfo_SubscribeNewHeightsRequest.Marshal(b, m, deterministic) ++ } else { ++ b = b[:cap(b)] ++ n, err := m.MarshalToSizedBuffer(b) ++ if err != nil { ++ return nil, err ++ } ++ return b[:n], nil ++ } ++} ++func (m *SubscribeNewHeightsRequest) XXX_Merge(src proto.Message) { ++ xxx_messageInfo_SubscribeNewHeightsRequest.Merge(m, src) ++} ++func (m *SubscribeNewHeightsRequest) XXX_Size() int { ++ return m.Size() ++} ++func (m *SubscribeNewHeightsRequest) XXX_DiscardUnknown() { ++ xxx_messageInfo_SubscribeNewHeightsRequest.DiscardUnknown(m) ++} ++ ++var xxx_messageInfo_SubscribeNewHeightsRequest proto.InternalMessageInfo ++ ++type StatusRequest struct { ++} ++ ++func (m *StatusRequest) Reset() { *m = StatusRequest{} } ++func (m *StatusRequest) String() string { return proto.CompactTextString(m) } ++func (*StatusRequest) ProtoMessage() {} ++func (*StatusRequest) Descriptor() ([]byte, []int) { ++ return fileDescriptor_0ffff5682c662b95, []int{7} ++} ++func (m *StatusRequest) XXX_Unmarshal(b []byte) error { ++ return m.Unmarshal(b) ++} ++func (m *StatusRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { ++ if deterministic { ++ return xxx_messageInfo_StatusRequest.Marshal(b, m, deterministic) ++ } else { ++ b = b[:cap(b)] ++ n, err := m.MarshalToSizedBuffer(b) ++ if err != nil { ++ return nil, err ++ } ++ return b[:n], nil ++ } ++} ++func (m *StatusRequest) XXX_Merge(src proto.Message) { ++ xxx_messageInfo_StatusRequest.Merge(m, src) ++} ++func (m *StatusRequest) XXX_Size() int { ++ return m.Size() ++} ++func (m *StatusRequest) XXX_DiscardUnknown() { ++ xxx_messageInfo_StatusRequest.DiscardUnknown(m) ++} ++ ++var xxx_messageInfo_StatusRequest proto.InternalMessageInfo ++ + type ResponsePing struct { + } + +@@ -114,7 +396,7 @@ func (m *ResponsePing) Reset() { *m = ResponsePing{} } + func (m *ResponsePing) String() string { return proto.CompactTextString(m) } + func (*ResponsePing) ProtoMessage() {} + func (*ResponsePing) Descriptor() ([]byte, []int) { +- return fileDescriptor_0ffff5682c662b95, []int{2} ++ return fileDescriptor_0ffff5682c662b95, []int{8} + } + func (m *ResponsePing) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +@@ -152,7 +434,7 @@ func (m *ResponseBroadcastTx) Reset() { *m = ResponseBroadcastTx{} } + func (m *ResponseBroadcastTx) String() string { return proto.CompactTextString(m) } + func (*ResponseBroadcastTx) ProtoMessage() {} + func (*ResponseBroadcastTx) Descriptor() ([]byte, []int) { +- return fileDescriptor_0ffff5682c662b95, []int{3} ++ return fileDescriptor_0ffff5682c662b95, []int{9} + } + func (m *ResponseBroadcastTx) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +@@ -195,344 +477,3675 @@ func (m *ResponseBroadcastTx) GetDeliverTx() *types.ResponseDeliverTx { + return nil + } + +-func init() { +- proto.RegisterType((*RequestPing)(nil), "tendermint.rpc.grpc.RequestPing") +- proto.RegisterType((*RequestBroadcastTx)(nil), "tendermint.rpc.grpc.RequestBroadcastTx") +- proto.RegisterType((*ResponsePing)(nil), "tendermint.rpc.grpc.ResponsePing") +- proto.RegisterType((*ResponseBroadcastTx)(nil), "tendermint.rpc.grpc.ResponseBroadcastTx") ++type StreamedBlockByHashResponse struct { ++ BlockPart *types1.Part `protobuf:"bytes,1,opt,name=block_part,json=blockPart,proto3" json:"block_part,omitempty"` ++ // Commit is only set in the first part, and ++ // it stays nil in the remaining ones. ++ Commit *types1.Commit `protobuf:"bytes,2,opt,name=commit,proto3" json:"commit,omitempty"` ++ // ValidatorSet is only set in the first part, and ++ // it stays nil in the remaining ones. ++ ValidatorSet *types1.ValidatorSet `protobuf:"bytes,3,opt,name=validator_set,json=validatorSet,proto3" json:"validator_set,omitempty"` ++ IsLast bool `protobuf:"varint,4,opt,name=is_last,json=isLast,proto3" json:"is_last,omitempty"` + } + +-func init() { proto.RegisterFile("tendermint/rpc/grpc/types.proto", fileDescriptor_0ffff5682c662b95) } +- +-var fileDescriptor_0ffff5682c662b95 = []byte{ +- // 316 bytes of a gzipped FileDescriptorProto +- 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x92, 0x2f, 0x49, 0xcd, 0x4b, +- 0x49, 0x2d, 0xca, 0xcd, 0xcc, 0x2b, 0xd1, 0x2f, 0x2a, 0x48, 0xd6, 0x4f, 0x07, 0x11, 0x25, 0x95, +- 0x05, 0xa9, 0xc5, 0x7a, 0x05, 0x45, 0xf9, 0x25, 0xf9, 0x42, 0xc2, 0x08, 0x05, 0x7a, 0x45, 0x05, +- 0xc9, 0x7a, 0x20, 0x05, 0x52, 0xd2, 0x48, 0xba, 0x12, 0x93, 0x92, 0x33, 0x91, 0x75, 0x28, 0xf1, +- 0x72, 0x71, 0x07, 0xa5, 0x16, 0x96, 0xa6, 0x16, 0x97, 0x04, 0x64, 0xe6, 0xa5, 0x2b, 0xa9, 0x70, +- 0x09, 0x41, 0xb9, 0x4e, 0x45, 0xf9, 0x89, 0x29, 0xc9, 0x89, 0xc5, 0x25, 0x21, 0x15, 0x42, 0x7c, +- 0x5c, 0x4c, 0x25, 0x15, 0x12, 0x8c, 0x0a, 0x8c, 0x1a, 0x3c, 0x41, 0x4c, 0x25, 0x15, 0x4a, 0x7c, +- 0x5c, 0x3c, 0x41, 0xa9, 0xc5, 0x05, 0xf9, 0x79, 0xc5, 0xa9, 0x60, 0x5d, 0x53, 0x19, 0xb9, 0x84, +- 0x61, 0x02, 0xc8, 0xfa, 0xac, 0xb9, 0x38, 0x92, 0x33, 0x52, 0x93, 0xb3, 0xe3, 0xa1, 0xba, 0xb9, +- 0x8d, 0x14, 0xf4, 0x90, 0x5c, 0x08, 0x72, 0x8c, 0x1e, 0x4c, 0x9f, 0x33, 0x48, 0x61, 0x48, 0x45, +- 0x10, 0x7b, 0x32, 0x84, 0x21, 0xe4, 0xc8, 0xc5, 0x95, 0x92, 0x9a, 0x93, 0x59, 0x96, 0x5a, 0x04, +- 0xd2, 0xce, 0x04, 0xd6, 0xae, 0x84, 0x53, 0xbb, 0x0b, 0x44, 0x69, 0x48, 0x45, 0x10, 0x67, 0x0a, +- 0x8c, 0x69, 0xb4, 0x97, 0x91, 0x8b, 0x07, 0xee, 0x1e, 0xc7, 0x00, 0x4f, 0x21, 0x6f, 0x2e, 0x16, +- 0x90, 0x83, 0x85, 0x50, 0x9c, 0x01, 0x0b, 0x28, 0x3d, 0xa4, 0x80, 0x90, 0x52, 0xc4, 0xa1, 0x02, +- 0xe1, 0x6b, 0xa1, 0x04, 0x2e, 0x6e, 0x64, 0xcf, 0xaa, 0xe3, 0x33, 0x13, 0x49, 0xa1, 0x94, 0x06, +- 0x5e, 0xa3, 0x91, 0x54, 0x3a, 0xf9, 0x9c, 0x78, 0x24, 0xc7, 0x78, 0xe1, 0x91, 0x1c, 0xe3, 0x83, +- 0x47, 0x72, 0x8c, 0x13, 0x1e, 0xcb, 0x31, 0x5c, 0x78, 0x2c, 0xc7, 0x70, 0xe3, 0xb1, 0x1c, 0x43, +- 0x94, 0x51, 0x7a, 0x66, 0x49, 0x46, 0x69, 0x92, 0x5e, 0x72, 0x7e, 0xae, 0x3e, 0x52, 0xf4, 0x62, +- 0x49, 0x1f, 0xd6, 0xc9, 0xf9, 0x45, 0xa9, 0x20, 0x46, 0x12, 0x1b, 0x38, 0xc6, 0x8d, 0x01, 0x01, +- 0x00, 0x00, 0xff, 0xff, 0xf6, 0x4b, 0x02, 0xd8, 0x46, 0x02, 0x00, 0x00, ++func (m *StreamedBlockByHashResponse) Reset() { *m = StreamedBlockByHashResponse{} } ++func (m *StreamedBlockByHashResponse) String() string { return proto.CompactTextString(m) } ++func (*StreamedBlockByHashResponse) ProtoMessage() {} ++func (*StreamedBlockByHashResponse) Descriptor() ([]byte, []int) { ++ return fileDescriptor_0ffff5682c662b95, []int{10} + } +- +-// Reference imports to suppress errors if they are not otherwise used. +-var _ context.Context +-var _ grpc.ClientConn +- +-// This is a compile-time assertion to ensure that this generated file +-// is compatible with the grpc package it is being compiled against. +-const _ = grpc.SupportPackageIsVersion4 +- +-// BroadcastAPIClient is the client API for BroadcastAPI service. +-// +-// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. +-type BroadcastAPIClient interface { +- Ping(ctx context.Context, in *RequestPing, opts ...grpc.CallOption) (*ResponsePing, error) +- BroadcastTx(ctx context.Context, in *RequestBroadcastTx, opts ...grpc.CallOption) (*ResponseBroadcastTx, error) ++func (m *StreamedBlockByHashResponse) XXX_Unmarshal(b []byte) error { ++ return m.Unmarshal(b) + } +- +-type broadcastAPIClient struct { +- cc *grpc.ClientConn ++func (m *StreamedBlockByHashResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { ++ if deterministic { ++ return xxx_messageInfo_StreamedBlockByHashResponse.Marshal(b, m, deterministic) ++ } else { ++ b = b[:cap(b)] ++ n, err := m.MarshalToSizedBuffer(b) ++ if err != nil { ++ return nil, err ++ } ++ return b[:n], nil ++ } ++} ++func (m *StreamedBlockByHashResponse) XXX_Merge(src proto.Message) { ++ xxx_messageInfo_StreamedBlockByHashResponse.Merge(m, src) ++} ++func (m *StreamedBlockByHashResponse) XXX_Size() int { ++ return m.Size() ++} ++func (m *StreamedBlockByHashResponse) XXX_DiscardUnknown() { ++ xxx_messageInfo_StreamedBlockByHashResponse.DiscardUnknown(m) + } + +-func NewBroadcastAPIClient(cc *grpc.ClientConn) BroadcastAPIClient { +- return &broadcastAPIClient{cc} ++var xxx_messageInfo_StreamedBlockByHashResponse proto.InternalMessageInfo ++ ++func (m *StreamedBlockByHashResponse) GetBlockPart() *types1.Part { ++ if m != nil { ++ return m.BlockPart ++ } ++ return nil + } + +-func (c *broadcastAPIClient) Ping(ctx context.Context, in *RequestPing, opts ...grpc.CallOption) (*ResponsePing, error) { +- out := new(ResponsePing) +- err := c.cc.Invoke(ctx, "/tendermint.rpc.grpc.BroadcastAPI/Ping", in, out, opts...) +- if err != nil { +- return nil, err ++func (m *StreamedBlockByHashResponse) GetCommit() *types1.Commit { ++ if m != nil { ++ return m.Commit + } +- return out, nil ++ return nil + } + +-func (c *broadcastAPIClient) BroadcastTx(ctx context.Context, in *RequestBroadcastTx, opts ...grpc.CallOption) (*ResponseBroadcastTx, error) { +- out := new(ResponseBroadcastTx) +- err := c.cc.Invoke(ctx, "/tendermint.rpc.grpc.BroadcastAPI/BroadcastTx", in, out, opts...) +- if err != nil { +- return nil, err ++func (m *StreamedBlockByHashResponse) GetValidatorSet() *types1.ValidatorSet { ++ if m != nil { ++ return m.ValidatorSet + } +- return out, nil ++ return nil + } + +-// BroadcastAPIServer is the server API for BroadcastAPI service. +-type BroadcastAPIServer interface { +- Ping(context.Context, *RequestPing) (*ResponsePing, error) +- BroadcastTx(context.Context, *RequestBroadcastTx) (*ResponseBroadcastTx, error) ++func (m *StreamedBlockByHashResponse) GetIsLast() bool { ++ if m != nil { ++ return m.IsLast ++ } ++ return false + } + +-// UnimplementedBroadcastAPIServer can be embedded to have forward compatible implementations. +-type UnimplementedBroadcastAPIServer struct { ++type StreamedBlockByHeightResponse struct { ++ BlockPart *types1.Part `protobuf:"bytes,1,opt,name=block_part,json=blockPart,proto3" json:"block_part,omitempty"` ++ // Commit is only set in the first part, and ++ // it stays nil in the remaining ones. ++ Commit *types1.Commit `protobuf:"bytes,2,opt,name=commit,proto3" json:"commit,omitempty"` ++ // ValidatorSet is only set in the first part, and ++ // it stays nil in the remaining ones. ++ ValidatorSet *types1.ValidatorSet `protobuf:"bytes,3,opt,name=validator_set,json=validatorSet,proto3" json:"validator_set,omitempty"` ++ IsLast bool `protobuf:"varint,4,opt,name=is_last,json=isLast,proto3" json:"is_last,omitempty"` + } + +-func (*UnimplementedBroadcastAPIServer) Ping(ctx context.Context, req *RequestPing) (*ResponsePing, error) { +- return nil, status.Errorf(codes.Unimplemented, "method Ping not implemented") ++func (m *StreamedBlockByHeightResponse) Reset() { *m = StreamedBlockByHeightResponse{} } ++func (m *StreamedBlockByHeightResponse) String() string { return proto.CompactTextString(m) } ++func (*StreamedBlockByHeightResponse) ProtoMessage() {} ++func (*StreamedBlockByHeightResponse) Descriptor() ([]byte, []int) { ++ return fileDescriptor_0ffff5682c662b95, []int{11} + } +-func (*UnimplementedBroadcastAPIServer) BroadcastTx(ctx context.Context, req *RequestBroadcastTx) (*ResponseBroadcastTx, error) { +- return nil, status.Errorf(codes.Unimplemented, "method BroadcastTx not implemented") ++func (m *StreamedBlockByHeightResponse) XXX_Unmarshal(b []byte) error { ++ return m.Unmarshal(b) ++} ++func (m *StreamedBlockByHeightResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { ++ if deterministic { ++ return xxx_messageInfo_StreamedBlockByHeightResponse.Marshal(b, m, deterministic) ++ } else { ++ b = b[:cap(b)] ++ n, err := m.MarshalToSizedBuffer(b) ++ if err != nil { ++ return nil, err ++ } ++ return b[:n], nil ++ } ++} ++func (m *StreamedBlockByHeightResponse) XXX_Merge(src proto.Message) { ++ xxx_messageInfo_StreamedBlockByHeightResponse.Merge(m, src) ++} ++func (m *StreamedBlockByHeightResponse) XXX_Size() int { ++ return m.Size() ++} ++func (m *StreamedBlockByHeightResponse) XXX_DiscardUnknown() { ++ xxx_messageInfo_StreamedBlockByHeightResponse.DiscardUnknown(m) + } + +-func RegisterBroadcastAPIServer(s *grpc.Server, srv BroadcastAPIServer) { +- s.RegisterService(&_BroadcastAPI_serviceDesc, srv) ++var xxx_messageInfo_StreamedBlockByHeightResponse proto.InternalMessageInfo ++ ++func (m *StreamedBlockByHeightResponse) GetBlockPart() *types1.Part { ++ if m != nil { ++ return m.BlockPart ++ } ++ return nil + } + +-func _BroadcastAPI_Ping_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { +- in := new(RequestPing) +- if err := dec(in); err != nil { +- return nil, err ++func (m *StreamedBlockByHeightResponse) GetCommit() *types1.Commit { ++ if m != nil { ++ return m.Commit + } +- if interceptor == nil { +- return srv.(BroadcastAPIServer).Ping(ctx, in) +- } +- info := &grpc.UnaryServerInfo{ +- Server: srv, +- FullMethod: "/tendermint.rpc.grpc.BroadcastAPI/Ping", +- } +- handler := func(ctx context.Context, req interface{}) (interface{}, error) { +- return srv.(BroadcastAPIServer).Ping(ctx, req.(*RequestPing)) +- } +- return interceptor(ctx, in, info, handler) ++ return nil + } + +-func _BroadcastAPI_BroadcastTx_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { +- in := new(RequestBroadcastTx) +- if err := dec(in); err != nil { +- return nil, err +- } +- if interceptor == nil { +- return srv.(BroadcastAPIServer).BroadcastTx(ctx, in) +- } +- info := &grpc.UnaryServerInfo{ +- Server: srv, +- FullMethod: "/tendermint.rpc.grpc.BroadcastAPI/BroadcastTx", ++func (m *StreamedBlockByHeightResponse) GetValidatorSet() *types1.ValidatorSet { ++ if m != nil { ++ return m.ValidatorSet + } +- handler := func(ctx context.Context, req interface{}) (interface{}, error) { +- return srv.(BroadcastAPIServer).BroadcastTx(ctx, req.(*RequestBroadcastTx)) ++ return nil ++} ++ ++func (m *StreamedBlockByHeightResponse) GetIsLast() bool { ++ if m != nil { ++ return m.IsLast + } +- return interceptor(ctx, in, info, handler) ++ return false + } + +-var _BroadcastAPI_serviceDesc = grpc.ServiceDesc{ +- ServiceName: "tendermint.rpc.grpc.BroadcastAPI", +- HandlerType: (*BroadcastAPIServer)(nil), +- Methods: []grpc.MethodDesc{ +- { +- MethodName: "Ping", +- Handler: _BroadcastAPI_Ping_Handler, +- }, +- { +- MethodName: "BroadcastTx", +- Handler: _BroadcastAPI_BroadcastTx_Handler, +- }, +- }, +- Streams: []grpc.StreamDesc{}, +- Metadata: "tendermint/rpc/grpc/types.proto", ++type CommitResponse struct { ++ Commit *types1.Commit `protobuf:"bytes,1,opt,name=commit,proto3" json:"commit,omitempty"` + } + +-func (m *RequestPing) Marshal() (dAtA []byte, err error) { +- size := m.Size() +- dAtA = make([]byte, size) +- n, err := m.MarshalToSizedBuffer(dAtA[:size]) +- if err != nil { +- return nil, err ++func (m *CommitResponse) Reset() { *m = CommitResponse{} } ++func (m *CommitResponse) String() string { return proto.CompactTextString(m) } ++func (*CommitResponse) ProtoMessage() {} ++func (*CommitResponse) Descriptor() ([]byte, []int) { ++ return fileDescriptor_0ffff5682c662b95, []int{12} ++} ++func (m *CommitResponse) XXX_Unmarshal(b []byte) error { ++ return m.Unmarshal(b) ++} ++func (m *CommitResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { ++ if deterministic { ++ return xxx_messageInfo_CommitResponse.Marshal(b, m, deterministic) ++ } else { ++ b = b[:cap(b)] ++ n, err := m.MarshalToSizedBuffer(b) ++ if err != nil { ++ return nil, err ++ } ++ return b[:n], nil + } +- return dAtA[:n], nil + } +- +-func (m *RequestPing) MarshalTo(dAtA []byte) (int, error) { +- size := m.Size() +- return m.MarshalToSizedBuffer(dAtA[:size]) ++func (m *CommitResponse) XXX_Merge(src proto.Message) { ++ xxx_messageInfo_CommitResponse.Merge(m, src) + } +- +-func (m *RequestPing) MarshalToSizedBuffer(dAtA []byte) (int, error) { +- i := len(dAtA) +- _ = i +- var l int +- _ = l +- return len(dAtA) - i, nil ++func (m *CommitResponse) XXX_Size() int { ++ return m.Size() ++} ++func (m *CommitResponse) XXX_DiscardUnknown() { ++ xxx_messageInfo_CommitResponse.DiscardUnknown(m) + } + +-func (m *RequestBroadcastTx) Marshal() (dAtA []byte, err error) { +- size := m.Size() +- dAtA = make([]byte, size) +- n, err := m.MarshalToSizedBuffer(dAtA[:size]) +- if err != nil { +- return nil, err ++var xxx_messageInfo_CommitResponse proto.InternalMessageInfo ++ ++func (m *CommitResponse) GetCommit() *types1.Commit { ++ if m != nil { ++ return m.Commit + } +- return dAtA[:n], nil ++ return nil + } + +-func (m *RequestBroadcastTx) MarshalTo(dAtA []byte) (int, error) { +- size := m.Size() +- return m.MarshalToSizedBuffer(dAtA[:size]) ++type ValidatorSetResponse struct { ++ // ValidatorSet the requested validator set. ++ ValidatorSet *types1.ValidatorSet `protobuf:"bytes,1,opt,name=validator_set,json=validatorSet,proto3" json:"validator_set,omitempty"` ++ // Height the height corresponding to the returned ++ // validator set. ++ Height int64 `protobuf:"varint,2,opt,name=height,proto3" json:"height,omitempty"` + } + +-func (m *RequestBroadcastTx) MarshalToSizedBuffer(dAtA []byte) (int, error) { +- i := len(dAtA) +- _ = i +- var l int +- _ = l +- if len(m.Tx) > 0 { +- i -= len(m.Tx) +- copy(dAtA[i:], m.Tx) +- i = encodeVarintTypes(dAtA, i, uint64(len(m.Tx))) +- i-- +- dAtA[i] = 0xa ++func (m *ValidatorSetResponse) Reset() { *m = ValidatorSetResponse{} } ++func (m *ValidatorSetResponse) String() string { return proto.CompactTextString(m) } ++func (*ValidatorSetResponse) ProtoMessage() {} ++func (*ValidatorSetResponse) Descriptor() ([]byte, []int) { ++ return fileDescriptor_0ffff5682c662b95, []int{13} ++} ++func (m *ValidatorSetResponse) XXX_Unmarshal(b []byte) error { ++ return m.Unmarshal(b) ++} ++func (m *ValidatorSetResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { ++ if deterministic { ++ return xxx_messageInfo_ValidatorSetResponse.Marshal(b, m, deterministic) ++ } else { ++ b = b[:cap(b)] ++ n, err := m.MarshalToSizedBuffer(b) ++ if err != nil { ++ return nil, err ++ } ++ return b[:n], nil + } +- return len(dAtA) - i, nil ++} ++func (m *ValidatorSetResponse) XXX_Merge(src proto.Message) { ++ xxx_messageInfo_ValidatorSetResponse.Merge(m, src) ++} ++func (m *ValidatorSetResponse) XXX_Size() int { ++ return m.Size() ++} ++func (m *ValidatorSetResponse) XXX_DiscardUnknown() { ++ xxx_messageInfo_ValidatorSetResponse.DiscardUnknown(m) + } + +-func (m *ResponsePing) Marshal() (dAtA []byte, err error) { +- size := m.Size() +- dAtA = make([]byte, size) +- n, err := m.MarshalToSizedBuffer(dAtA[:size]) +- if err != nil { +- return nil, err ++var xxx_messageInfo_ValidatorSetResponse proto.InternalMessageInfo ++ ++func (m *ValidatorSetResponse) GetValidatorSet() *types1.ValidatorSet { ++ if m != nil { ++ return m.ValidatorSet + } +- return dAtA[:n], nil ++ return nil + } + +-func (m *ResponsePing) MarshalTo(dAtA []byte) (int, error) { +- size := m.Size() +- return m.MarshalToSizedBuffer(dAtA[:size]) ++func (m *ValidatorSetResponse) GetHeight() int64 { ++ if m != nil { ++ return m.Height ++ } ++ return 0 + } + +-func (m *ResponsePing) MarshalToSizedBuffer(dAtA []byte) (int, error) { +- i := len(dAtA) +- _ = i +- var l int +- _ = l +- return len(dAtA) - i, nil ++type NewHeightEvent struct { ++ Height int64 `protobuf:"varint,1,opt,name=height,proto3" json:"height,omitempty"` ++ Hash []byte `protobuf:"bytes,2,opt,name=hash,proto3" json:"hash,omitempty"` + } + +-func (m *ResponseBroadcastTx) Marshal() (dAtA []byte, err error) { +- size := m.Size() +- dAtA = make([]byte, size) +- n, err := m.MarshalToSizedBuffer(dAtA[:size]) +- if err != nil { +- return nil, err ++func (m *NewHeightEvent) Reset() { *m = NewHeightEvent{} } ++func (m *NewHeightEvent) String() string { return proto.CompactTextString(m) } ++func (*NewHeightEvent) ProtoMessage() {} ++func (*NewHeightEvent) Descriptor() ([]byte, []int) { ++ return fileDescriptor_0ffff5682c662b95, []int{14} ++} ++func (m *NewHeightEvent) XXX_Unmarshal(b []byte) error { ++ return m.Unmarshal(b) ++} ++func (m *NewHeightEvent) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { ++ if deterministic { ++ return xxx_messageInfo_NewHeightEvent.Marshal(b, m, deterministic) ++ } else { ++ b = b[:cap(b)] ++ n, err := m.MarshalToSizedBuffer(b) ++ if err != nil { ++ return nil, err ++ } ++ return b[:n], nil + } +- return dAtA[:n], nil ++} ++func (m *NewHeightEvent) XXX_Merge(src proto.Message) { ++ xxx_messageInfo_NewHeightEvent.Merge(m, src) ++} ++func (m *NewHeightEvent) XXX_Size() int { ++ return m.Size() ++} ++func (m *NewHeightEvent) XXX_DiscardUnknown() { ++ xxx_messageInfo_NewHeightEvent.DiscardUnknown(m) + } + +-func (m *ResponseBroadcastTx) MarshalTo(dAtA []byte) (int, error) { +- size := m.Size() +- return m.MarshalToSizedBuffer(dAtA[:size]) ++var xxx_messageInfo_NewHeightEvent proto.InternalMessageInfo ++ ++func (m *NewHeightEvent) GetHeight() int64 { ++ if m != nil { ++ return m.Height ++ } ++ return 0 + } + +-func (m *ResponseBroadcastTx) MarshalToSizedBuffer(dAtA []byte) (int, error) { +- i := len(dAtA) +- _ = i +- var l int +- _ = l +- if m.DeliverTx != nil { +- { +- size, err := m.DeliverTx.MarshalToSizedBuffer(dAtA[:i]) +- if err != nil { +- return 0, err +- } +- i -= size +- i = encodeVarintTypes(dAtA, i, uint64(size)) +- } +- i-- +- dAtA[i] = 0x12 ++func (m *NewHeightEvent) GetHash() []byte { ++ if m != nil { ++ return m.Hash + } +- if m.CheckTx != nil { +- { +- size, err := m.CheckTx.MarshalToSizedBuffer(dAtA[:i]) +- if err != nil { +- return 0, err +- } +- i -= size +- i = encodeVarintTypes(dAtA, i, uint64(size)) ++ return nil ++} ++ ++type StatusResponse struct { ++ NodeInfo *p2p.DefaultNodeInfo `protobuf:"bytes,1,opt,name=node_info,json=nodeInfo,proto3" json:"node_info,omitempty"` ++ SyncInfo *SyncInfo `protobuf:"bytes,2,opt,name=sync_info,json=syncInfo,proto3" json:"sync_info,omitempty"` ++ ValidatorInfo *ValidatorInfo `protobuf:"bytes,3,opt,name=validator_info,json=validatorInfo,proto3" json:"validator_info,omitempty"` ++} ++ ++func (m *StatusResponse) Reset() { *m = StatusResponse{} } ++func (m *StatusResponse) String() string { return proto.CompactTextString(m) } ++func (*StatusResponse) ProtoMessage() {} ++func (*StatusResponse) Descriptor() ([]byte, []int) { ++ return fileDescriptor_0ffff5682c662b95, []int{15} ++} ++func (m *StatusResponse) XXX_Unmarshal(b []byte) error { ++ return m.Unmarshal(b) ++} ++func (m *StatusResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { ++ if deterministic { ++ return xxx_messageInfo_StatusResponse.Marshal(b, m, deterministic) ++ } else { ++ b = b[:cap(b)] ++ n, err := m.MarshalToSizedBuffer(b) ++ if err != nil { ++ return nil, err + } +- i-- +- dAtA[i] = 0xa ++ return b[:n], nil + } +- return len(dAtA) - i, nil ++} ++func (m *StatusResponse) XXX_Merge(src proto.Message) { ++ xxx_messageInfo_StatusResponse.Merge(m, src) ++} ++func (m *StatusResponse) XXX_Size() int { ++ return m.Size() ++} ++func (m *StatusResponse) XXX_DiscardUnknown() { ++ xxx_messageInfo_StatusResponse.DiscardUnknown(m) + } + +-func encodeVarintTypes(dAtA []byte, offset int, v uint64) int { +- offset -= sovTypes(v) +- base := offset +- for v >= 1<<7 { +- dAtA[offset] = uint8(v&0x7f | 0x80) +- v >>= 7 +- offset++ ++var xxx_messageInfo_StatusResponse proto.InternalMessageInfo ++ ++func (m *StatusResponse) GetNodeInfo() *p2p.DefaultNodeInfo { ++ if m != nil { ++ return m.NodeInfo + } +- dAtA[offset] = uint8(v) +- return base ++ return nil + } +-func (m *RequestPing) Size() (n int) { +- if m == nil { +- return 0 ++ ++func (m *StatusResponse) GetSyncInfo() *SyncInfo { ++ if m != nil { ++ return m.SyncInfo + } +- var l int +- _ = l +- return n ++ return nil + } + +-func (m *RequestBroadcastTx) Size() (n int) { +- if m == nil { +- return 0 +- } +- var l int +- _ = l +- l = len(m.Tx) +- if l > 0 { +- n += 1 + l + sovTypes(uint64(l)) ++func (m *StatusResponse) GetValidatorInfo() *ValidatorInfo { ++ if m != nil { ++ return m.ValidatorInfo + } +- return n ++ return nil + } + +-func (m *ResponsePing) Size() (n int) { +- if m == nil { +- return 0 +- } +- var l int +- _ = l +- return n ++type SyncInfo struct { ++ LatestBlockHash []byte `protobuf:"bytes,1,opt,name=latest_block_hash,json=latestBlockHash,proto3" json:"latest_block_hash,omitempty"` ++ LatestAppHash []byte `protobuf:"bytes,2,opt,name=latest_app_hash,json=latestAppHash,proto3" json:"latest_app_hash,omitempty"` ++ LatestBlockHeight int64 `protobuf:"varint,3,opt,name=latest_block_height,json=latestBlockHeight,proto3" json:"latest_block_height,omitempty"` ++ LatestBlockTime time.Time `protobuf:"bytes,4,opt,name=latest_block_time,json=latestBlockTime,proto3,stdtime" json:"latest_block_time"` ++ EarliestBlockHash []byte `protobuf:"bytes,5,opt,name=earliest_block_hash,json=earliestBlockHash,proto3" json:"earliest_block_hash,omitempty"` ++ EarliestAppHash []byte `protobuf:"bytes,6,opt,name=earliest_app_hash,json=earliestAppHash,proto3" json:"earliest_app_hash,omitempty"` ++ EarliestBlockHeight int64 `protobuf:"varint,7,opt,name=earliest_block_height,json=earliestBlockHeight,proto3" json:"earliest_block_height,omitempty"` ++ EarliestBlockTime time.Time `protobuf:"bytes,8,opt,name=earliest_block_time,json=earliestBlockTime,proto3,stdtime" json:"earliest_block_time"` ++ CatchingUp bool `protobuf:"varint,9,opt,name=catching_up,json=catchingUp,proto3" json:"catching_up,omitempty"` + } + +-func (m *ResponseBroadcastTx) Size() (n int) { +- if m == nil { +- return 0 +- } ++func (m *SyncInfo) Reset() { *m = SyncInfo{} } ++func (m *SyncInfo) String() string { return proto.CompactTextString(m) } ++func (*SyncInfo) ProtoMessage() {} ++func (*SyncInfo) Descriptor() ([]byte, []int) { ++ return fileDescriptor_0ffff5682c662b95, []int{16} ++} ++func (m *SyncInfo) XXX_Unmarshal(b []byte) error { ++ return m.Unmarshal(b) ++} ++func (m *SyncInfo) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { ++ if deterministic { ++ return xxx_messageInfo_SyncInfo.Marshal(b, m, deterministic) ++ } else { ++ b = b[:cap(b)] ++ n, err := m.MarshalToSizedBuffer(b) ++ if err != nil { ++ return nil, err ++ } ++ return b[:n], nil ++ } ++} ++func (m *SyncInfo) XXX_Merge(src proto.Message) { ++ xxx_messageInfo_SyncInfo.Merge(m, src) ++} ++func (m *SyncInfo) XXX_Size() int { ++ return m.Size() ++} ++func (m *SyncInfo) XXX_DiscardUnknown() { ++ xxx_messageInfo_SyncInfo.DiscardUnknown(m) ++} ++ ++var xxx_messageInfo_SyncInfo proto.InternalMessageInfo ++ ++func (m *SyncInfo) GetLatestBlockHash() []byte { ++ if m != nil { ++ return m.LatestBlockHash ++ } ++ return nil ++} ++ ++func (m *SyncInfo) GetLatestAppHash() []byte { ++ if m != nil { ++ return m.LatestAppHash ++ } ++ return nil ++} ++ ++func (m *SyncInfo) GetLatestBlockHeight() int64 { ++ if m != nil { ++ return m.LatestBlockHeight ++ } ++ return 0 ++} ++ ++func (m *SyncInfo) GetLatestBlockTime() time.Time { ++ if m != nil { ++ return m.LatestBlockTime ++ } ++ return time.Time{} ++} ++ ++func (m *SyncInfo) GetEarliestBlockHash() []byte { ++ if m != nil { ++ return m.EarliestBlockHash ++ } ++ return nil ++} ++ ++func (m *SyncInfo) GetEarliestAppHash() []byte { ++ if m != nil { ++ return m.EarliestAppHash ++ } ++ return nil ++} ++ ++func (m *SyncInfo) GetEarliestBlockHeight() int64 { ++ if m != nil { ++ return m.EarliestBlockHeight ++ } ++ return 0 ++} ++ ++func (m *SyncInfo) GetEarliestBlockTime() time.Time { ++ if m != nil { ++ return m.EarliestBlockTime ++ } ++ return time.Time{} ++} ++ ++func (m *SyncInfo) GetCatchingUp() bool { ++ if m != nil { ++ return m.CatchingUp ++ } ++ return false ++} ++ ++type ValidatorInfo struct { ++ Address []byte `protobuf:"bytes,1,opt,name=address,proto3" json:"address,omitempty"` ++ PubKey *crypto.PublicKey `protobuf:"bytes,2,opt,name=pub_key,json=pubKey,proto3" json:"pub_key,omitempty"` ++ VotingPower int64 `protobuf:"varint,3,opt,name=voting_power,json=votingPower,proto3" json:"voting_power,omitempty"` ++} ++ ++func (m *ValidatorInfo) Reset() { *m = ValidatorInfo{} } ++func (m *ValidatorInfo) String() string { return proto.CompactTextString(m) } ++func (*ValidatorInfo) ProtoMessage() {} ++func (*ValidatorInfo) Descriptor() ([]byte, []int) { ++ return fileDescriptor_0ffff5682c662b95, []int{17} ++} ++func (m *ValidatorInfo) XXX_Unmarshal(b []byte) error { ++ return m.Unmarshal(b) ++} ++func (m *ValidatorInfo) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { ++ if deterministic { ++ return xxx_messageInfo_ValidatorInfo.Marshal(b, m, deterministic) ++ } else { ++ b = b[:cap(b)] ++ n, err := m.MarshalToSizedBuffer(b) ++ if err != nil { ++ return nil, err ++ } ++ return b[:n], nil ++ } ++} ++func (m *ValidatorInfo) XXX_Merge(src proto.Message) { ++ xxx_messageInfo_ValidatorInfo.Merge(m, src) ++} ++func (m *ValidatorInfo) XXX_Size() int { ++ return m.Size() ++} ++func (m *ValidatorInfo) XXX_DiscardUnknown() { ++ xxx_messageInfo_ValidatorInfo.DiscardUnknown(m) ++} ++ ++var xxx_messageInfo_ValidatorInfo proto.InternalMessageInfo ++ ++func (m *ValidatorInfo) GetAddress() []byte { ++ if m != nil { ++ return m.Address ++ } ++ return nil ++} ++ ++func (m *ValidatorInfo) GetPubKey() *crypto.PublicKey { ++ if m != nil { ++ return m.PubKey ++ } ++ return nil ++} ++ ++func (m *ValidatorInfo) GetVotingPower() int64 { ++ if m != nil { ++ return m.VotingPower ++ } ++ return 0 ++} ++ ++func init() { ++ proto.RegisterType((*RequestPing)(nil), "tendermint.rpc.grpc.RequestPing") ++ proto.RegisterType((*RequestBroadcastTx)(nil), "tendermint.rpc.grpc.RequestBroadcastTx") ++ proto.RegisterType((*BlockByHashRequest)(nil), "tendermint.rpc.grpc.BlockByHashRequest") ++ proto.RegisterType((*BlockByHeightRequest)(nil), "tendermint.rpc.grpc.BlockByHeightRequest") ++ proto.RegisterType((*CommitRequest)(nil), "tendermint.rpc.grpc.CommitRequest") ++ proto.RegisterType((*ValidatorSetRequest)(nil), "tendermint.rpc.grpc.ValidatorSetRequest") ++ proto.RegisterType((*SubscribeNewHeightsRequest)(nil), "tendermint.rpc.grpc.SubscribeNewHeightsRequest") ++ proto.RegisterType((*StatusRequest)(nil), "tendermint.rpc.grpc.StatusRequest") ++ proto.RegisterType((*ResponsePing)(nil), "tendermint.rpc.grpc.ResponsePing") ++ proto.RegisterType((*ResponseBroadcastTx)(nil), "tendermint.rpc.grpc.ResponseBroadcastTx") ++ proto.RegisterType((*StreamedBlockByHashResponse)(nil), "tendermint.rpc.grpc.StreamedBlockByHashResponse") ++ proto.RegisterType((*StreamedBlockByHeightResponse)(nil), "tendermint.rpc.grpc.StreamedBlockByHeightResponse") ++ proto.RegisterType((*CommitResponse)(nil), "tendermint.rpc.grpc.CommitResponse") ++ proto.RegisterType((*ValidatorSetResponse)(nil), "tendermint.rpc.grpc.ValidatorSetResponse") ++ proto.RegisterType((*NewHeightEvent)(nil), "tendermint.rpc.grpc.NewHeightEvent") ++ proto.RegisterType((*StatusResponse)(nil), "tendermint.rpc.grpc.StatusResponse") ++ proto.RegisterType((*SyncInfo)(nil), "tendermint.rpc.grpc.SyncInfo") ++ proto.RegisterType((*ValidatorInfo)(nil), "tendermint.rpc.grpc.ValidatorInfo") ++} ++ ++func init() { proto.RegisterFile("tendermint/rpc/grpc/types.proto", fileDescriptor_0ffff5682c662b95) } ++ ++var fileDescriptor_0ffff5682c662b95 = []byte{ ++ // 1102 bytes of a gzipped FileDescriptorProto ++ 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xdc, 0x57, 0x4f, 0x6f, 0x1b, 0x45, ++ 0x14, 0xcf, 0x3a, 0xa9, 0xe3, 0x3c, 0xff, 0xa9, 0x32, 0x0e, 0xc5, 0xda, 0xa6, 0x76, 0xba, 0x20, ++ 0x9a, 0x56, 0x62, 0x1d, 0x19, 0xf5, 0x42, 0x2b, 0xa4, 0x38, 0x41, 0x22, 0x4a, 0x55, 0x19, 0x27, ++ 0x70, 0xe0, 0x62, 0xc6, 0xeb, 0x89, 0xbd, 0xc4, 0xde, 0x19, 0x76, 0xc6, 0x6e, 0x7c, 0xe6, 0x0b, ++ 0xf4, 0xc2, 0xc7, 0xe1, 0xde, 0x63, 0x2f, 0x48, 0x9c, 0x00, 0x25, 0x08, 0xbe, 0x06, 0xda, 0x99, ++ 0x59, 0x7b, 0x36, 0xf6, 0xa6, 0x81, 0x63, 0x2f, 0xd6, 0x9b, 0x37, 0xef, 0xbd, 0xfd, 0xbd, 0x37, ++ 0xef, 0x9f, 0xa1, 0x26, 0x48, 0xd0, 0x23, 0xe1, 0xc8, 0x0f, 0x44, 0x3d, 0x64, 0x5e, 0xbd, 0x1f, ++ 0xfd, 0x88, 0x29, 0x23, 0xdc, 0x65, 0x21, 0x15, 0x14, 0x95, 0xe7, 0x02, 0x6e, 0xc8, 0x3c, 0x37, ++ 0x12, 0xb0, 0xef, 0x1b, 0x5a, 0xb8, 0xeb, 0xf9, 0xa6, 0x86, 0xbd, 0x6d, 0x5c, 0x4a, 0x7e, 0xe2, ++ 0xd6, 0x36, 0x6e, 0x59, 0x83, 0xa5, 0x6a, 0x7a, 0xe1, 0x94, 0x09, 0x5a, 0x3f, 0x27, 0xd3, 0xf8, ++ 0x76, 0x67, 0xc1, 0xee, 0x04, 0x0f, 0xfd, 0x1e, 0x16, 0x34, 0xd4, 0x12, 0xb5, 0x3e, 0xa5, 0xfd, ++ 0x21, 0xa9, 0xcb, 0x53, 0x77, 0x7c, 0x56, 0x17, 0xfe, 0x88, 0x70, 0x81, 0x47, 0x4c, 0x0b, 0x6c, ++ 0xf5, 0x69, 0x9f, 0x4a, 0xb2, 0x1e, 0x51, 0x8a, 0xeb, 0x14, 0x21, 0xdf, 0x26, 0x3f, 0x8e, 0x09, ++ 0x17, 0x2d, 0x3f, 0xe8, 0x3b, 0x1f, 0x03, 0xd2, 0xc7, 0x66, 0x48, 0x71, 0xcf, 0xc3, 0x5c, 0x9c, ++ 0x5e, 0xa0, 0x12, 0x64, 0xc4, 0x45, 0xc5, 0xda, 0xb1, 0x76, 0x0b, 0xed, 0x8c, 0xb8, 0x70, 0xbe, ++ 0x00, 0xd4, 0x1c, 0x52, 0xef, 0xbc, 0x39, 0xfd, 0x0a, 0xf3, 0x81, 0x56, 0x40, 0x08, 0xd6, 0x06, ++ 0x98, 0x0f, 0xb4, 0x9c, 0xa4, 0xd1, 0x16, 0xdc, 0x61, 0x21, 0x9d, 0x90, 0x4a, 0x66, 0xc7, 0xda, ++ 0xcd, 0xb5, 0xd5, 0xc1, 0x39, 0x84, 0xad, 0x58, 0x9f, 0xf8, 0xfd, 0x81, 0x88, 0x2d, 0xdc, 0x83, ++ 0xec, 0x40, 0x32, 0xa4, 0x8d, 0xd5, 0xb6, 0x3e, 0xa5, 0x58, 0x79, 0x04, 0xc5, 0x03, 0x3a, 0x1a, ++ 0xf9, 0xef, 0x52, 0x77, 0x3e, 0x85, 0xf2, 0xb7, 0x71, 0xb4, 0x4e, 0xc8, 0x3b, 0xc5, 0xb7, 0xc1, ++ 0x3e, 0x19, 0x77, 0xb9, 0x17, 0xfa, 0x5d, 0xf2, 0x92, 0xbc, 0x52, 0x10, 0xb9, 0xd6, 0x72, 0xee, ++ 0x42, 0xf1, 0x44, 0x60, 0x31, 0x9e, 0x31, 0x4a, 0x50, 0x68, 0x13, 0xce, 0x68, 0xc0, 0x89, 0x0c, ++ 0xe1, 0xcf, 0x16, 0x94, 0x63, 0x86, 0x19, 0xc4, 0x67, 0x90, 0xf3, 0x06, 0xc4, 0x3b, 0xef, 0xe8, ++ 0x50, 0xe6, 0x1b, 0x3b, 0xae, 0x91, 0x5f, 0x51, 0x2a, 0xb9, 0xb1, 0xde, 0x41, 0x24, 0x78, 0x7a, ++ 0xd1, 0x5e, 0xf7, 0x14, 0x81, 0xf6, 0x01, 0x7a, 0x64, 0xe8, 0x4f, 0x48, 0x18, 0xa9, 0x67, 0xa4, ++ 0xba, 0x93, 0xaa, 0x7e, 0xa8, 0x44, 0x4f, 0x2f, 0xda, 0x1b, 0xbd, 0x98, 0x74, 0xfe, 0xb2, 0xe0, ++ 0xfe, 0x89, 0x08, 0x09, 0x1e, 0x91, 0x5e, 0xe2, 0xf5, 0x94, 0x0e, 0x7a, 0x0a, 0xd0, 0x8d, 0xd8, ++ 0x1d, 0x86, 0x43, 0xa1, 0x11, 0xde, 0x33, 0x3f, 0xa1, 0xb2, 0xb5, 0x85, 0x43, 0xd1, 0xde, 0x90, ++ 0x92, 0x11, 0x89, 0xf6, 0x20, 0xeb, 0xc9, 0x57, 0xd0, 0xa8, 0x2a, 0x8b, 0x2a, 0xfa, 0x95, 0xb4, ++ 0x1c, 0x3a, 0x80, 0xe2, 0x2c, 0x79, 0x3b, 0x9c, 0x88, 0xca, 0xaa, 0x54, 0xac, 0x2e, 0x2a, 0x26, ++ 0x5e, 0xad, 0x30, 0x31, 0x4e, 0xe8, 0x43, 0x58, 0xf7, 0x79, 0x67, 0x88, 0xb9, 0xa8, 0xac, 0xc9, ++ 0xa4, 0xc8, 0xfa, 0xfc, 0x05, 0xe6, 0xc2, 0xf9, 0xdb, 0x82, 0x07, 0xd7, 0xdd, 0xd4, 0x49, 0xf6, ++ 0x7e, 0x39, 0xda, 0x84, 0x52, 0x9c, 0xfe, 0xda, 0xb1, 0x39, 0x42, 0xeb, 0x76, 0x08, 0x1d, 0x0e, ++ 0x5b, 0xc9, 0xca, 0xd0, 0x96, 0x16, 0x90, 0x5b, 0xff, 0x03, 0xf9, 0xbc, 0xbe, 0x32, 0x89, 0xfa, ++ 0x7a, 0x0e, 0xa5, 0x59, 0x59, 0x7d, 0x39, 0x21, 0x41, 0x7a, 0xdd, 0xc7, 0x1d, 0x25, 0x33, 0xef, ++ 0x28, 0xce, 0xaf, 0x16, 0x94, 0xe2, 0x02, 0xd4, 0x68, 0x9f, 0xc3, 0x46, 0x40, 0x7b, 0xa4, 0xe3, ++ 0x07, 0x67, 0x54, 0x23, 0xad, 0x99, 0x48, 0x59, 0x83, 0xb9, 0x87, 0xe4, 0x0c, 0x8f, 0x87, 0xe2, ++ 0x25, 0xed, 0x91, 0xa3, 0xe0, 0x8c, 0xb6, 0x73, 0x81, 0xa6, 0xd0, 0xe7, 0xb0, 0xc1, 0xa7, 0x81, ++ 0xa7, 0xb4, 0xd5, 0xd3, 0x3e, 0x70, 0x97, 0x34, 0x7e, 0xf7, 0x64, 0x1a, 0x78, 0x4a, 0x97, 0x6b, ++ 0x0a, 0x1d, 0x41, 0x69, 0x1e, 0x27, 0x69, 0x60, 0x75, 0xb1, 0x34, 0x67, 0x06, 0x66, 0xb1, 0x92, ++ 0x56, 0xe6, 0x11, 0x8e, 0x8e, 0xce, 0x3f, 0xab, 0x90, 0x8b, 0xbf, 0x80, 0x9e, 0xc0, 0xe6, 0x10, ++ 0x0b, 0xc2, 0x45, 0x47, 0x65, 0xaa, 0xd1, 0x57, 0xef, 0xaa, 0x0b, 0x99, 0xda, 0x51, 0xfd, 0xa2, ++ 0x4f, 0x40, 0xb3, 0x3a, 0x98, 0xb1, 0x8e, 0x11, 0xaf, 0xa2, 0x62, 0xef, 0x33, 0x26, 0xe5, 0x5c, ++ 0x28, 0x27, 0x6d, 0xaa, 0x88, 0xaf, 0xca, 0x88, 0x6f, 0x9a, 0x56, 0x55, 0xf0, 0x5b, 0xd7, 0x30, ++ 0x44, 0xf3, 0x44, 0xa6, 0x60, 0xbe, 0x61, 0xbb, 0x6a, 0xd8, 0xb8, 0xf1, 0xb0, 0x71, 0x4f, 0xe3, ++ 0x61, 0xd3, 0xcc, 0xbd, 0xf9, 0xbd, 0xb6, 0xf2, 0xfa, 0x8f, 0x9a, 0x95, 0x40, 0x1a, 0xdd, 0x47, ++ 0x08, 0x08, 0x0e, 0x87, 0xfe, 0x35, 0xbf, 0xee, 0x48, 0xb4, 0x9b, 0xf1, 0xd5, 0xdc, 0xb3, 0x27, ++ 0x30, 0x63, 0xce, 0x7d, 0xcb, 0xaa, 0x28, 0xc4, 0x17, 0xb1, 0x77, 0x0d, 0xf8, 0xe0, 0xba, 0x6d, ++ 0xe5, 0xdf, 0xba, 0xf4, 0xaf, 0x9c, 0xb4, 0xae, 0x3c, 0x3c, 0x5d, 0xc0, 0x23, 0x7d, 0xcc, 0xfd, ++ 0x07, 0x1f, 0x93, 0xa8, 0xa5, 0x97, 0x35, 0xc8, 0x7b, 0x58, 0x78, 0x03, 0x3f, 0xe8, 0x77, 0xc6, ++ 0xac, 0xb2, 0x21, 0x8b, 0x16, 0x62, 0xd6, 0x37, 0xcc, 0xf9, 0xc9, 0x82, 0x62, 0x22, 0x15, 0x50, ++ 0x05, 0xd6, 0x71, 0xaf, 0x17, 0x12, 0xce, 0xf5, 0x23, 0xc7, 0x47, 0xf4, 0x14, 0xd6, 0xd9, 0xb8, ++ 0xdb, 0x39, 0x27, 0x53, 0x9d, 0x9a, 0xdb, 0x66, 0x66, 0xa9, 0x3d, 0xc1, 0x6d, 0x8d, 0xbb, 0x43, ++ 0xdf, 0x3b, 0x26, 0xd3, 0x76, 0x96, 0x8d, 0xbb, 0xc7, 0x64, 0x8a, 0x1e, 0x42, 0x61, 0x42, 0x45, ++ 0x84, 0x80, 0xd1, 0x57, 0x24, 0xd4, 0x8f, 0x9c, 0x57, 0xbc, 0x56, 0xc4, 0x6a, 0xfc, 0x62, 0x41, ++ 0x61, 0x36, 0x9e, 0xf6, 0x5b, 0x47, 0xe8, 0x18, 0xd6, 0xa2, 0xf9, 0x85, 0x76, 0x96, 0xe6, 0xae, ++ 0xb1, 0x24, 0xd8, 0x0f, 0x53, 0x24, 0xe6, 0x43, 0x10, 0x7d, 0x0f, 0x79, 0x73, 0xf6, 0x3d, 0xba, ++ 0xc9, 0xa6, 0x21, 0x68, 0xef, 0xde, 0x68, 0xda, 0x90, 0x6c, 0x5c, 0xae, 0x41, 0x4e, 0x06, 0x3d, ++ 0xc2, 0xfe, 0x03, 0xe4, 0x8d, 0x91, 0x96, 0xf2, 0xb9, 0xc5, 0x95, 0xc5, 0xde, 0x5b, 0x5e, 0xe8, ++ 0xe9, 0x53, 0x72, 0xcf, 0x42, 0x0c, 0x8a, 0x89, 0xb9, 0x82, 0x1e, 0xdf, 0xf8, 0x35, 0x73, 0xc1, ++ 0xb1, 0x1b, 0xb7, 0xfa, 0x5e, 0x62, 0x5c, 0xed, 0x59, 0xe8, 0x6b, 0xc8, 0xaa, 0xbe, 0x8d, 0x96, ++ 0xf7, 0x95, 0xc4, 0x16, 0x64, 0x7f, 0x74, 0xa3, 0x8c, 0x6e, 0x99, 0x1e, 0x14, 0xcc, 0xce, 0x8d, ++ 0x76, 0x6f, 0x6e, 0x58, 0xf3, 0xad, 0xc9, 0x7e, 0x7c, 0x0b, 0x49, 0xfd, 0x91, 0x11, 0x94, 0x97, ++ 0x2c, 0x52, 0xa8, 0xbe, 0x3c, 0x08, 0xa9, 0x2b, 0x57, 0x8a, 0x47, 0xc9, 0x19, 0xa2, 0xc2, 0xa4, ++ 0x06, 0x43, 0x4a, 0x98, 0x12, 0x6b, 0x5b, 0x8a, 0xd1, 0xe4, 0x64, 0x69, 0xbe, 0x78, 0x73, 0x59, ++ 0xb5, 0xde, 0x5e, 0x56, 0xad, 0x3f, 0x2f, 0xab, 0xd6, 0xeb, 0xab, 0xea, 0xca, 0xdb, 0xab, 0xea, ++ 0xca, 0x6f, 0x57, 0xd5, 0x95, 0xef, 0x1a, 0x7d, 0x5f, 0x0c, 0xc6, 0x5d, 0xd7, 0xa3, 0xa3, 0xba, ++ 0xb9, 0x9b, 0x2f, 0xfe, 0xa3, 0x78, 0xe6, 0xd1, 0x90, 0x44, 0x44, 0x37, 0x2b, 0x5b, 0xc9, 0x67, ++ 0xff, 0x06, 0x00, 0x00, 0xff, 0xff, 0xe7, 0x4a, 0xdc, 0x8a, 0x78, 0x0c, 0x00, 0x00, ++} ++ ++// Reference imports to suppress errors if they are not otherwise used. ++var _ context.Context ++var _ grpc.ClientConn ++ ++// This is a compile-time assertion to ensure that this generated file ++// is compatible with the grpc package it is being compiled against. ++const _ = grpc.SupportPackageIsVersion4 ++ ++// BroadcastAPIClient is the client API for BroadcastAPI service. ++// ++// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. ++type BroadcastAPIClient interface { ++ Ping(ctx context.Context, in *RequestPing, opts ...grpc.CallOption) (*ResponsePing, error) ++ BroadcastTx(ctx context.Context, in *RequestBroadcastTx, opts ...grpc.CallOption) (*ResponseBroadcastTx, error) ++} ++ ++type broadcastAPIClient struct { ++ cc *grpc.ClientConn ++} ++ ++func NewBroadcastAPIClient(cc *grpc.ClientConn) BroadcastAPIClient { ++ return &broadcastAPIClient{cc} ++} ++ ++func (c *broadcastAPIClient) Ping(ctx context.Context, in *RequestPing, opts ...grpc.CallOption) (*ResponsePing, error) { ++ out := new(ResponsePing) ++ err := c.cc.Invoke(ctx, "/tendermint.rpc.grpc.BroadcastAPI/Ping", in, out, opts...) ++ if err != nil { ++ return nil, err ++ } ++ return out, nil ++} ++ ++func (c *broadcastAPIClient) BroadcastTx(ctx context.Context, in *RequestBroadcastTx, opts ...grpc.CallOption) (*ResponseBroadcastTx, error) { ++ out := new(ResponseBroadcastTx) ++ err := c.cc.Invoke(ctx, "/tendermint.rpc.grpc.BroadcastAPI/BroadcastTx", in, out, opts...) ++ if err != nil { ++ return nil, err ++ } ++ return out, nil ++} ++ ++// BroadcastAPIServer is the server API for BroadcastAPI service. ++type BroadcastAPIServer interface { ++ Ping(context.Context, *RequestPing) (*ResponsePing, error) ++ BroadcastTx(context.Context, *RequestBroadcastTx) (*ResponseBroadcastTx, error) ++} ++ ++// UnimplementedBroadcastAPIServer can be embedded to have forward compatible implementations. ++type UnimplementedBroadcastAPIServer struct { ++} ++ ++func (*UnimplementedBroadcastAPIServer) Ping(ctx context.Context, req *RequestPing) (*ResponsePing, error) { ++ return nil, status.Errorf(codes.Unimplemented, "method Ping not implemented") ++} ++func (*UnimplementedBroadcastAPIServer) BroadcastTx(ctx context.Context, req *RequestBroadcastTx) (*ResponseBroadcastTx, error) { ++ return nil, status.Errorf(codes.Unimplemented, "method BroadcastTx not implemented") ++} ++ ++func RegisterBroadcastAPIServer(s *grpc.Server, srv BroadcastAPIServer) { ++ s.RegisterService(&_BroadcastAPI_serviceDesc, srv) ++} ++ ++func _BroadcastAPI_Ping_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { ++ in := new(RequestPing) ++ if err := dec(in); err != nil { ++ return nil, err ++ } ++ if interceptor == nil { ++ return srv.(BroadcastAPIServer).Ping(ctx, in) ++ } ++ info := &grpc.UnaryServerInfo{ ++ Server: srv, ++ FullMethod: "/tendermint.rpc.grpc.BroadcastAPI/Ping", ++ } ++ handler := func(ctx context.Context, req interface{}) (interface{}, error) { ++ return srv.(BroadcastAPIServer).Ping(ctx, req.(*RequestPing)) ++ } ++ return interceptor(ctx, in, info, handler) ++} ++ ++func _BroadcastAPI_BroadcastTx_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { ++ in := new(RequestBroadcastTx) ++ if err := dec(in); err != nil { ++ return nil, err ++ } ++ if interceptor == nil { ++ return srv.(BroadcastAPIServer).BroadcastTx(ctx, in) ++ } ++ info := &grpc.UnaryServerInfo{ ++ Server: srv, ++ FullMethod: "/tendermint.rpc.grpc.BroadcastAPI/BroadcastTx", ++ } ++ handler := func(ctx context.Context, req interface{}) (interface{}, error) { ++ return srv.(BroadcastAPIServer).BroadcastTx(ctx, req.(*RequestBroadcastTx)) ++ } ++ return interceptor(ctx, in, info, handler) ++} ++ ++var _BroadcastAPI_serviceDesc = grpc.ServiceDesc{ ++ ServiceName: "tendermint.rpc.grpc.BroadcastAPI", ++ HandlerType: (*BroadcastAPIServer)(nil), ++ Methods: []grpc.MethodDesc{ ++ { ++ MethodName: "Ping", ++ Handler: _BroadcastAPI_Ping_Handler, ++ }, ++ { ++ MethodName: "BroadcastTx", ++ Handler: _BroadcastAPI_BroadcastTx_Handler, ++ }, ++ }, ++ Streams: []grpc.StreamDesc{}, ++ Metadata: "tendermint/rpc/grpc/types.proto", ++} ++ ++// BlockAPIClient is the client API for BlockAPI service. ++// ++// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. ++type BlockAPIClient interface { ++ BlockByHash(ctx context.Context, in *BlockByHashRequest, opts ...grpc.CallOption) (BlockAPI_BlockByHashClient, error) ++ BlockByHeight(ctx context.Context, in *BlockByHeightRequest, opts ...grpc.CallOption) (BlockAPI_BlockByHeightClient, error) ++ Commit(ctx context.Context, in *CommitRequest, opts ...grpc.CallOption) (*CommitResponse, error) ++ ValidatorSet(ctx context.Context, in *ValidatorSetRequest, opts ...grpc.CallOption) (*ValidatorSetResponse, error) ++ SubscribeNewHeights(ctx context.Context, in *SubscribeNewHeightsRequest, opts ...grpc.CallOption) (BlockAPI_SubscribeNewHeightsClient, error) ++ Status(ctx context.Context, in *StatusRequest, opts ...grpc.CallOption) (*StatusResponse, error) ++} ++ ++type blockAPIClient struct { ++ cc *grpc.ClientConn ++} ++ ++func NewBlockAPIClient(cc *grpc.ClientConn) BlockAPIClient { ++ return &blockAPIClient{cc} ++} ++ ++func (c *blockAPIClient) BlockByHash(ctx context.Context, in *BlockByHashRequest, opts ...grpc.CallOption) (BlockAPI_BlockByHashClient, error) { ++ stream, err := c.cc.NewStream(ctx, &_BlockAPI_serviceDesc.Streams[0], "/tendermint.rpc.grpc.BlockAPI/BlockByHash", opts...) ++ if err != nil { ++ return nil, err ++ } ++ x := &blockAPIBlockByHashClient{stream} ++ if err := x.ClientStream.SendMsg(in); err != nil { ++ return nil, err ++ } ++ if err := x.ClientStream.CloseSend(); err != nil { ++ return nil, err ++ } ++ return x, nil ++} ++ ++type BlockAPI_BlockByHashClient interface { ++ Recv() (*StreamedBlockByHashResponse, error) ++ grpc.ClientStream ++} ++ ++type blockAPIBlockByHashClient struct { ++ grpc.ClientStream ++} ++ ++func (x *blockAPIBlockByHashClient) Recv() (*StreamedBlockByHashResponse, error) { ++ m := new(StreamedBlockByHashResponse) ++ if err := x.ClientStream.RecvMsg(m); err != nil { ++ return nil, err ++ } ++ return m, nil ++} ++ ++func (c *blockAPIClient) BlockByHeight(ctx context.Context, in *BlockByHeightRequest, opts ...grpc.CallOption) (BlockAPI_BlockByHeightClient, error) { ++ stream, err := c.cc.NewStream(ctx, &_BlockAPI_serviceDesc.Streams[1], "/tendermint.rpc.grpc.BlockAPI/BlockByHeight", opts...) ++ if err != nil { ++ return nil, err ++ } ++ x := &blockAPIBlockByHeightClient{stream} ++ if err := x.ClientStream.SendMsg(in); err != nil { ++ return nil, err ++ } ++ if err := x.ClientStream.CloseSend(); err != nil { ++ return nil, err ++ } ++ return x, nil ++} ++ ++type BlockAPI_BlockByHeightClient interface { ++ Recv() (*StreamedBlockByHeightResponse, error) ++ grpc.ClientStream ++} ++ ++type blockAPIBlockByHeightClient struct { ++ grpc.ClientStream ++} ++ ++func (x *blockAPIBlockByHeightClient) Recv() (*StreamedBlockByHeightResponse, error) { ++ m := new(StreamedBlockByHeightResponse) ++ if err := x.ClientStream.RecvMsg(m); err != nil { ++ return nil, err ++ } ++ return m, nil ++} ++ ++func (c *blockAPIClient) Commit(ctx context.Context, in *CommitRequest, opts ...grpc.CallOption) (*CommitResponse, error) { ++ out := new(CommitResponse) ++ err := c.cc.Invoke(ctx, "/tendermint.rpc.grpc.BlockAPI/Commit", in, out, opts...) ++ if err != nil { ++ return nil, err ++ } ++ return out, nil ++} ++ ++func (c *blockAPIClient) ValidatorSet(ctx context.Context, in *ValidatorSetRequest, opts ...grpc.CallOption) (*ValidatorSetResponse, error) { ++ out := new(ValidatorSetResponse) ++ err := c.cc.Invoke(ctx, "/tendermint.rpc.grpc.BlockAPI/ValidatorSet", in, out, opts...) ++ if err != nil { ++ return nil, err ++ } ++ return out, nil ++} ++ ++func (c *blockAPIClient) SubscribeNewHeights(ctx context.Context, in *SubscribeNewHeightsRequest, opts ...grpc.CallOption) (BlockAPI_SubscribeNewHeightsClient, error) { ++ stream, err := c.cc.NewStream(ctx, &_BlockAPI_serviceDesc.Streams[2], "/tendermint.rpc.grpc.BlockAPI/SubscribeNewHeights", opts...) ++ if err != nil { ++ return nil, err ++ } ++ x := &blockAPISubscribeNewHeightsClient{stream} ++ if err := x.ClientStream.SendMsg(in); err != nil { ++ return nil, err ++ } ++ if err := x.ClientStream.CloseSend(); err != nil { ++ return nil, err ++ } ++ return x, nil ++} ++ ++type BlockAPI_SubscribeNewHeightsClient interface { ++ Recv() (*NewHeightEvent, error) ++ grpc.ClientStream ++} ++ ++type blockAPISubscribeNewHeightsClient struct { ++ grpc.ClientStream ++} ++ ++func (x *blockAPISubscribeNewHeightsClient) Recv() (*NewHeightEvent, error) { ++ m := new(NewHeightEvent) ++ if err := x.ClientStream.RecvMsg(m); err != nil { ++ return nil, err ++ } ++ return m, nil ++} ++ ++func (c *blockAPIClient) Status(ctx context.Context, in *StatusRequest, opts ...grpc.CallOption) (*StatusResponse, error) { ++ out := new(StatusResponse) ++ err := c.cc.Invoke(ctx, "/tendermint.rpc.grpc.BlockAPI/Status", in, out, opts...) ++ if err != nil { ++ return nil, err ++ } ++ return out, nil ++} ++ ++// BlockAPIServer is the server API for BlockAPI service. ++type BlockAPIServer interface { ++ BlockByHash(*BlockByHashRequest, BlockAPI_BlockByHashServer) error ++ BlockByHeight(*BlockByHeightRequest, BlockAPI_BlockByHeightServer) error ++ Commit(context.Context, *CommitRequest) (*CommitResponse, error) ++ ValidatorSet(context.Context, *ValidatorSetRequest) (*ValidatorSetResponse, error) ++ SubscribeNewHeights(*SubscribeNewHeightsRequest, BlockAPI_SubscribeNewHeightsServer) error ++ Status(context.Context, *StatusRequest) (*StatusResponse, error) ++} ++ ++// UnimplementedBlockAPIServer can be embedded to have forward compatible implementations. ++type UnimplementedBlockAPIServer struct { ++} ++ ++func (*UnimplementedBlockAPIServer) BlockByHash(req *BlockByHashRequest, srv BlockAPI_BlockByHashServer) error { ++ return status.Errorf(codes.Unimplemented, "method BlockByHash not implemented") ++} ++func (*UnimplementedBlockAPIServer) BlockByHeight(req *BlockByHeightRequest, srv BlockAPI_BlockByHeightServer) error { ++ return status.Errorf(codes.Unimplemented, "method BlockByHeight not implemented") ++} ++func (*UnimplementedBlockAPIServer) Commit(ctx context.Context, req *CommitRequest) (*CommitResponse, error) { ++ return nil, status.Errorf(codes.Unimplemented, "method Commit not implemented") ++} ++func (*UnimplementedBlockAPIServer) ValidatorSet(ctx context.Context, req *ValidatorSetRequest) (*ValidatorSetResponse, error) { ++ return nil, status.Errorf(codes.Unimplemented, "method ValidatorSet not implemented") ++} ++func (*UnimplementedBlockAPIServer) SubscribeNewHeights(req *SubscribeNewHeightsRequest, srv BlockAPI_SubscribeNewHeightsServer) error { ++ return status.Errorf(codes.Unimplemented, "method SubscribeNewHeights not implemented") ++} ++func (*UnimplementedBlockAPIServer) Status(ctx context.Context, req *StatusRequest) (*StatusResponse, error) { ++ return nil, status.Errorf(codes.Unimplemented, "method Status not implemented") ++} ++ ++func RegisterBlockAPIServer(s *grpc.Server, srv BlockAPIServer) { ++ s.RegisterService(&_BlockAPI_serviceDesc, srv) ++} ++ ++func _BlockAPI_BlockByHash_Handler(srv interface{}, stream grpc.ServerStream) error { ++ m := new(BlockByHashRequest) ++ if err := stream.RecvMsg(m); err != nil { ++ return err ++ } ++ return srv.(BlockAPIServer).BlockByHash(m, &blockAPIBlockByHashServer{stream}) ++} ++ ++type BlockAPI_BlockByHashServer interface { ++ Send(*StreamedBlockByHashResponse) error ++ grpc.ServerStream ++} ++ ++type blockAPIBlockByHashServer struct { ++ grpc.ServerStream ++} ++ ++func (x *blockAPIBlockByHashServer) Send(m *StreamedBlockByHashResponse) error { ++ return x.ServerStream.SendMsg(m) ++} ++ ++func _BlockAPI_BlockByHeight_Handler(srv interface{}, stream grpc.ServerStream) error { ++ m := new(BlockByHeightRequest) ++ if err := stream.RecvMsg(m); err != nil { ++ return err ++ } ++ return srv.(BlockAPIServer).BlockByHeight(m, &blockAPIBlockByHeightServer{stream}) ++} ++ ++type BlockAPI_BlockByHeightServer interface { ++ Send(*StreamedBlockByHeightResponse) error ++ grpc.ServerStream ++} ++ ++type blockAPIBlockByHeightServer struct { ++ grpc.ServerStream ++} ++ ++func (x *blockAPIBlockByHeightServer) Send(m *StreamedBlockByHeightResponse) error { ++ return x.ServerStream.SendMsg(m) ++} ++ ++func _BlockAPI_Commit_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { ++ in := new(CommitRequest) ++ if err := dec(in); err != nil { ++ return nil, err ++ } ++ if interceptor == nil { ++ return srv.(BlockAPIServer).Commit(ctx, in) ++ } ++ info := &grpc.UnaryServerInfo{ ++ Server: srv, ++ FullMethod: "/tendermint.rpc.grpc.BlockAPI/Commit", ++ } ++ handler := func(ctx context.Context, req interface{}) (interface{}, error) { ++ return srv.(BlockAPIServer).Commit(ctx, req.(*CommitRequest)) ++ } ++ return interceptor(ctx, in, info, handler) ++} ++ ++func _BlockAPI_ValidatorSet_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { ++ in := new(ValidatorSetRequest) ++ if err := dec(in); err != nil { ++ return nil, err ++ } ++ if interceptor == nil { ++ return srv.(BlockAPIServer).ValidatorSet(ctx, in) ++ } ++ info := &grpc.UnaryServerInfo{ ++ Server: srv, ++ FullMethod: "/tendermint.rpc.grpc.BlockAPI/ValidatorSet", ++ } ++ handler := func(ctx context.Context, req interface{}) (interface{}, error) { ++ return srv.(BlockAPIServer).ValidatorSet(ctx, req.(*ValidatorSetRequest)) ++ } ++ return interceptor(ctx, in, info, handler) ++} ++ ++func _BlockAPI_SubscribeNewHeights_Handler(srv interface{}, stream grpc.ServerStream) error { ++ m := new(SubscribeNewHeightsRequest) ++ if err := stream.RecvMsg(m); err != nil { ++ return err ++ } ++ return srv.(BlockAPIServer).SubscribeNewHeights(m, &blockAPISubscribeNewHeightsServer{stream}) ++} ++ ++type BlockAPI_SubscribeNewHeightsServer interface { ++ Send(*NewHeightEvent) error ++ grpc.ServerStream ++} ++ ++type blockAPISubscribeNewHeightsServer struct { ++ grpc.ServerStream ++} ++ ++func (x *blockAPISubscribeNewHeightsServer) Send(m *NewHeightEvent) error { ++ return x.ServerStream.SendMsg(m) ++} ++ ++func _BlockAPI_Status_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { ++ in := new(StatusRequest) ++ if err := dec(in); err != nil { ++ return nil, err ++ } ++ if interceptor == nil { ++ return srv.(BlockAPIServer).Status(ctx, in) ++ } ++ info := &grpc.UnaryServerInfo{ ++ Server: srv, ++ FullMethod: "/tendermint.rpc.grpc.BlockAPI/Status", ++ } ++ handler := func(ctx context.Context, req interface{}) (interface{}, error) { ++ return srv.(BlockAPIServer).Status(ctx, req.(*StatusRequest)) ++ } ++ return interceptor(ctx, in, info, handler) ++} ++ ++var _BlockAPI_serviceDesc = grpc.ServiceDesc{ ++ ServiceName: "tendermint.rpc.grpc.BlockAPI", ++ HandlerType: (*BlockAPIServer)(nil), ++ Methods: []grpc.MethodDesc{ ++ { ++ MethodName: "Commit", ++ Handler: _BlockAPI_Commit_Handler, ++ }, ++ { ++ MethodName: "ValidatorSet", ++ Handler: _BlockAPI_ValidatorSet_Handler, ++ }, ++ { ++ MethodName: "Status", ++ Handler: _BlockAPI_Status_Handler, ++ }, ++ }, ++ Streams: []grpc.StreamDesc{ ++ { ++ StreamName: "BlockByHash", ++ Handler: _BlockAPI_BlockByHash_Handler, ++ ServerStreams: true, ++ }, ++ { ++ StreamName: "BlockByHeight", ++ Handler: _BlockAPI_BlockByHeight_Handler, ++ ServerStreams: true, ++ }, ++ { ++ StreamName: "SubscribeNewHeights", ++ Handler: _BlockAPI_SubscribeNewHeights_Handler, ++ ServerStreams: true, ++ }, ++ }, ++ Metadata: "tendermint/rpc/grpc/types.proto", ++} ++ ++func (m *RequestPing) Marshal() (dAtA []byte, err error) { ++ size := m.Size() ++ dAtA = make([]byte, size) ++ n, err := m.MarshalToSizedBuffer(dAtA[:size]) ++ if err != nil { ++ return nil, err ++ } ++ return dAtA[:n], nil ++} ++ ++func (m *RequestPing) MarshalTo(dAtA []byte) (int, error) { ++ size := m.Size() ++ return m.MarshalToSizedBuffer(dAtA[:size]) ++} ++ ++func (m *RequestPing) MarshalToSizedBuffer(dAtA []byte) (int, error) { ++ i := len(dAtA) ++ _ = i ++ var l int ++ _ = l ++ return len(dAtA) - i, nil ++} ++ ++func (m *RequestBroadcastTx) Marshal() (dAtA []byte, err error) { ++ size := m.Size() ++ dAtA = make([]byte, size) ++ n, err := m.MarshalToSizedBuffer(dAtA[:size]) ++ if err != nil { ++ return nil, err ++ } ++ return dAtA[:n], nil ++} ++ ++func (m *RequestBroadcastTx) MarshalTo(dAtA []byte) (int, error) { ++ size := m.Size() ++ return m.MarshalToSizedBuffer(dAtA[:size]) ++} ++ ++func (m *RequestBroadcastTx) MarshalToSizedBuffer(dAtA []byte) (int, error) { ++ i := len(dAtA) ++ _ = i ++ var l int ++ _ = l ++ if len(m.Tx) > 0 { ++ i -= len(m.Tx) ++ copy(dAtA[i:], m.Tx) ++ i = encodeVarintTypes(dAtA, i, uint64(len(m.Tx))) ++ i-- ++ dAtA[i] = 0xa ++ } ++ return len(dAtA) - i, nil ++} ++ ++func (m *BlockByHashRequest) Marshal() (dAtA []byte, err error) { ++ size := m.Size() ++ dAtA = make([]byte, size) ++ n, err := m.MarshalToSizedBuffer(dAtA[:size]) ++ if err != nil { ++ return nil, err ++ } ++ return dAtA[:n], nil ++} ++ ++func (m *BlockByHashRequest) MarshalTo(dAtA []byte) (int, error) { ++ size := m.Size() ++ return m.MarshalToSizedBuffer(dAtA[:size]) ++} ++ ++func (m *BlockByHashRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { ++ i := len(dAtA) ++ _ = i ++ var l int ++ _ = l ++ if m.Prove { ++ i-- ++ if m.Prove { ++ dAtA[i] = 1 ++ } else { ++ dAtA[i] = 0 ++ } ++ i-- ++ dAtA[i] = 0x10 ++ } ++ if len(m.Hash) > 0 { ++ i -= len(m.Hash) ++ copy(dAtA[i:], m.Hash) ++ i = encodeVarintTypes(dAtA, i, uint64(len(m.Hash))) ++ i-- ++ dAtA[i] = 0xa ++ } ++ return len(dAtA) - i, nil ++} ++ ++func (m *BlockByHeightRequest) Marshal() (dAtA []byte, err error) { ++ size := m.Size() ++ dAtA = make([]byte, size) ++ n, err := m.MarshalToSizedBuffer(dAtA[:size]) ++ if err != nil { ++ return nil, err ++ } ++ return dAtA[:n], nil ++} ++ ++func (m *BlockByHeightRequest) MarshalTo(dAtA []byte) (int, error) { ++ size := m.Size() ++ return m.MarshalToSizedBuffer(dAtA[:size]) ++} ++ ++func (m *BlockByHeightRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { ++ i := len(dAtA) ++ _ = i ++ var l int ++ _ = l ++ if m.Prove { ++ i-- ++ if m.Prove { ++ dAtA[i] = 1 ++ } else { ++ dAtA[i] = 0 ++ } ++ i-- ++ dAtA[i] = 0x10 ++ } ++ if m.Height != 0 { ++ i = encodeVarintTypes(dAtA, i, uint64(m.Height)) ++ i-- ++ dAtA[i] = 0x8 ++ } ++ return len(dAtA) - i, nil ++} ++ ++func (m *CommitRequest) Marshal() (dAtA []byte, err error) { ++ size := m.Size() ++ dAtA = make([]byte, size) ++ n, err := m.MarshalToSizedBuffer(dAtA[:size]) ++ if err != nil { ++ return nil, err ++ } ++ return dAtA[:n], nil ++} ++ ++func (m *CommitRequest) MarshalTo(dAtA []byte) (int, error) { ++ size := m.Size() ++ return m.MarshalToSizedBuffer(dAtA[:size]) ++} ++ ++func (m *CommitRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { ++ i := len(dAtA) ++ _ = i ++ var l int ++ _ = l ++ if m.Height != 0 { ++ i = encodeVarintTypes(dAtA, i, uint64(m.Height)) ++ i-- ++ dAtA[i] = 0x8 ++ } ++ return len(dAtA) - i, nil ++} ++ ++func (m *ValidatorSetRequest) Marshal() (dAtA []byte, err error) { ++ size := m.Size() ++ dAtA = make([]byte, size) ++ n, err := m.MarshalToSizedBuffer(dAtA[:size]) ++ if err != nil { ++ return nil, err ++ } ++ return dAtA[:n], nil ++} ++ ++func (m *ValidatorSetRequest) MarshalTo(dAtA []byte) (int, error) { ++ size := m.Size() ++ return m.MarshalToSizedBuffer(dAtA[:size]) ++} ++ ++func (m *ValidatorSetRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { ++ i := len(dAtA) ++ _ = i ++ var l int ++ _ = l ++ if m.Height != 0 { ++ i = encodeVarintTypes(dAtA, i, uint64(m.Height)) ++ i-- ++ dAtA[i] = 0x8 ++ } ++ return len(dAtA) - i, nil ++} ++ ++func (m *SubscribeNewHeightsRequest) Marshal() (dAtA []byte, err error) { ++ size := m.Size() ++ dAtA = make([]byte, size) ++ n, err := m.MarshalToSizedBuffer(dAtA[:size]) ++ if err != nil { ++ return nil, err ++ } ++ return dAtA[:n], nil ++} ++ ++func (m *SubscribeNewHeightsRequest) MarshalTo(dAtA []byte) (int, error) { ++ size := m.Size() ++ return m.MarshalToSizedBuffer(dAtA[:size]) ++} ++ ++func (m *SubscribeNewHeightsRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { ++ i := len(dAtA) ++ _ = i ++ var l int ++ _ = l ++ return len(dAtA) - i, nil ++} ++ ++func (m *StatusRequest) Marshal() (dAtA []byte, err error) { ++ size := m.Size() ++ dAtA = make([]byte, size) ++ n, err := m.MarshalToSizedBuffer(dAtA[:size]) ++ if err != nil { ++ return nil, err ++ } ++ return dAtA[:n], nil ++} ++ ++func (m *StatusRequest) MarshalTo(dAtA []byte) (int, error) { ++ size := m.Size() ++ return m.MarshalToSizedBuffer(dAtA[:size]) ++} ++ ++func (m *StatusRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { ++ i := len(dAtA) ++ _ = i ++ var l int ++ _ = l ++ return len(dAtA) - i, nil ++} ++ ++func (m *ResponsePing) Marshal() (dAtA []byte, err error) { ++ size := m.Size() ++ dAtA = make([]byte, size) ++ n, err := m.MarshalToSizedBuffer(dAtA[:size]) ++ if err != nil { ++ return nil, err ++ } ++ return dAtA[:n], nil ++} ++ ++func (m *ResponsePing) MarshalTo(dAtA []byte) (int, error) { ++ size := m.Size() ++ return m.MarshalToSizedBuffer(dAtA[:size]) ++} ++ ++func (m *ResponsePing) MarshalToSizedBuffer(dAtA []byte) (int, error) { ++ i := len(dAtA) ++ _ = i ++ var l int ++ _ = l ++ return len(dAtA) - i, nil ++} ++ ++func (m *ResponseBroadcastTx) Marshal() (dAtA []byte, err error) { ++ size := m.Size() ++ dAtA = make([]byte, size) ++ n, err := m.MarshalToSizedBuffer(dAtA[:size]) ++ if err != nil { ++ return nil, err ++ } ++ return dAtA[:n], nil ++} ++ ++func (m *ResponseBroadcastTx) MarshalTo(dAtA []byte) (int, error) { ++ size := m.Size() ++ return m.MarshalToSizedBuffer(dAtA[:size]) ++} ++ ++func (m *ResponseBroadcastTx) MarshalToSizedBuffer(dAtA []byte) (int, error) { ++ i := len(dAtA) ++ _ = i ++ var l int ++ _ = l ++ if m.DeliverTx != nil { ++ { ++ size, err := m.DeliverTx.MarshalToSizedBuffer(dAtA[:i]) ++ if err != nil { ++ return 0, err ++ } ++ i -= size ++ i = encodeVarintTypes(dAtA, i, uint64(size)) ++ } ++ i-- ++ dAtA[i] = 0x12 ++ } ++ if m.CheckTx != nil { ++ { ++ size, err := m.CheckTx.MarshalToSizedBuffer(dAtA[:i]) ++ if err != nil { ++ return 0, err ++ } ++ i -= size ++ i = encodeVarintTypes(dAtA, i, uint64(size)) ++ } ++ i-- ++ dAtA[i] = 0xa ++ } ++ return len(dAtA) - i, nil ++} ++ ++func (m *StreamedBlockByHashResponse) Marshal() (dAtA []byte, err error) { ++ size := m.Size() ++ dAtA = make([]byte, size) ++ n, err := m.MarshalToSizedBuffer(dAtA[:size]) ++ if err != nil { ++ return nil, err ++ } ++ return dAtA[:n], nil ++} ++ ++func (m *StreamedBlockByHashResponse) MarshalTo(dAtA []byte) (int, error) { ++ size := m.Size() ++ return m.MarshalToSizedBuffer(dAtA[:size]) ++} ++ ++func (m *StreamedBlockByHashResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { ++ i := len(dAtA) ++ _ = i ++ var l int ++ _ = l ++ if m.IsLast { ++ i-- ++ if m.IsLast { ++ dAtA[i] = 1 ++ } else { ++ dAtA[i] = 0 ++ } ++ i-- ++ dAtA[i] = 0x20 ++ } ++ if m.ValidatorSet != nil { ++ { ++ size, err := m.ValidatorSet.MarshalToSizedBuffer(dAtA[:i]) ++ if err != nil { ++ return 0, err ++ } ++ i -= size ++ i = encodeVarintTypes(dAtA, i, uint64(size)) ++ } ++ i-- ++ dAtA[i] = 0x1a ++ } ++ if m.Commit != nil { ++ { ++ size, err := m.Commit.MarshalToSizedBuffer(dAtA[:i]) ++ if err != nil { ++ return 0, err ++ } ++ i -= size ++ i = encodeVarintTypes(dAtA, i, uint64(size)) ++ } ++ i-- ++ dAtA[i] = 0x12 ++ } ++ if m.BlockPart != nil { ++ { ++ size, err := m.BlockPart.MarshalToSizedBuffer(dAtA[:i]) ++ if err != nil { ++ return 0, err ++ } ++ i -= size ++ i = encodeVarintTypes(dAtA, i, uint64(size)) ++ } ++ i-- ++ dAtA[i] = 0xa ++ } ++ return len(dAtA) - i, nil ++} ++ ++func (m *StreamedBlockByHeightResponse) Marshal() (dAtA []byte, err error) { ++ size := m.Size() ++ dAtA = make([]byte, size) ++ n, err := m.MarshalToSizedBuffer(dAtA[:size]) ++ if err != nil { ++ return nil, err ++ } ++ return dAtA[:n], nil ++} ++ ++func (m *StreamedBlockByHeightResponse) MarshalTo(dAtA []byte) (int, error) { ++ size := m.Size() ++ return m.MarshalToSizedBuffer(dAtA[:size]) ++} ++ ++func (m *StreamedBlockByHeightResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { ++ i := len(dAtA) ++ _ = i ++ var l int ++ _ = l ++ if m.IsLast { ++ i-- ++ if m.IsLast { ++ dAtA[i] = 1 ++ } else { ++ dAtA[i] = 0 ++ } ++ i-- ++ dAtA[i] = 0x20 ++ } ++ if m.ValidatorSet != nil { ++ { ++ size, err := m.ValidatorSet.MarshalToSizedBuffer(dAtA[:i]) ++ if err != nil { ++ return 0, err ++ } ++ i -= size ++ i = encodeVarintTypes(dAtA, i, uint64(size)) ++ } ++ i-- ++ dAtA[i] = 0x1a ++ } ++ if m.Commit != nil { ++ { ++ size, err := m.Commit.MarshalToSizedBuffer(dAtA[:i]) ++ if err != nil { ++ return 0, err ++ } ++ i -= size ++ i = encodeVarintTypes(dAtA, i, uint64(size)) ++ } ++ i-- ++ dAtA[i] = 0x12 ++ } ++ if m.BlockPart != nil { ++ { ++ size, err := m.BlockPart.MarshalToSizedBuffer(dAtA[:i]) ++ if err != nil { ++ return 0, err ++ } ++ i -= size ++ i = encodeVarintTypes(dAtA, i, uint64(size)) ++ } ++ i-- ++ dAtA[i] = 0xa ++ } ++ return len(dAtA) - i, nil ++} ++ ++func (m *CommitResponse) Marshal() (dAtA []byte, err error) { ++ size := m.Size() ++ dAtA = make([]byte, size) ++ n, err := m.MarshalToSizedBuffer(dAtA[:size]) ++ if err != nil { ++ return nil, err ++ } ++ return dAtA[:n], nil ++} ++ ++func (m *CommitResponse) MarshalTo(dAtA []byte) (int, error) { ++ size := m.Size() ++ return m.MarshalToSizedBuffer(dAtA[:size]) ++} ++ ++func (m *CommitResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { ++ i := len(dAtA) ++ _ = i ++ var l int ++ _ = l ++ if m.Commit != nil { ++ { ++ size, err := m.Commit.MarshalToSizedBuffer(dAtA[:i]) ++ if err != nil { ++ return 0, err ++ } ++ i -= size ++ i = encodeVarintTypes(dAtA, i, uint64(size)) ++ } ++ i-- ++ dAtA[i] = 0xa ++ } ++ return len(dAtA) - i, nil ++} ++ ++func (m *ValidatorSetResponse) Marshal() (dAtA []byte, err error) { ++ size := m.Size() ++ dAtA = make([]byte, size) ++ n, err := m.MarshalToSizedBuffer(dAtA[:size]) ++ if err != nil { ++ return nil, err ++ } ++ return dAtA[:n], nil ++} ++ ++func (m *ValidatorSetResponse) MarshalTo(dAtA []byte) (int, error) { ++ size := m.Size() ++ return m.MarshalToSizedBuffer(dAtA[:size]) ++} ++ ++func (m *ValidatorSetResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { ++ i := len(dAtA) ++ _ = i ++ var l int ++ _ = l ++ if m.Height != 0 { ++ i = encodeVarintTypes(dAtA, i, uint64(m.Height)) ++ i-- ++ dAtA[i] = 0x10 ++ } ++ if m.ValidatorSet != nil { ++ { ++ size, err := m.ValidatorSet.MarshalToSizedBuffer(dAtA[:i]) ++ if err != nil { ++ return 0, err ++ } ++ i -= size ++ i = encodeVarintTypes(dAtA, i, uint64(size)) ++ } ++ i-- ++ dAtA[i] = 0xa ++ } ++ return len(dAtA) - i, nil ++} ++ ++func (m *NewHeightEvent) Marshal() (dAtA []byte, err error) { ++ size := m.Size() ++ dAtA = make([]byte, size) ++ n, err := m.MarshalToSizedBuffer(dAtA[:size]) ++ if err != nil { ++ return nil, err ++ } ++ return dAtA[:n], nil ++} ++ ++func (m *NewHeightEvent) MarshalTo(dAtA []byte) (int, error) { ++ size := m.Size() ++ return m.MarshalToSizedBuffer(dAtA[:size]) ++} ++ ++func (m *NewHeightEvent) MarshalToSizedBuffer(dAtA []byte) (int, error) { ++ i := len(dAtA) ++ _ = i ++ var l int ++ _ = l ++ if len(m.Hash) > 0 { ++ i -= len(m.Hash) ++ copy(dAtA[i:], m.Hash) ++ i = encodeVarintTypes(dAtA, i, uint64(len(m.Hash))) ++ i-- ++ dAtA[i] = 0x12 ++ } ++ if m.Height != 0 { ++ i = encodeVarintTypes(dAtA, i, uint64(m.Height)) ++ i-- ++ dAtA[i] = 0x8 ++ } ++ return len(dAtA) - i, nil ++} ++ ++func (m *StatusResponse) Marshal() (dAtA []byte, err error) { ++ size := m.Size() ++ dAtA = make([]byte, size) ++ n, err := m.MarshalToSizedBuffer(dAtA[:size]) ++ if err != nil { ++ return nil, err ++ } ++ return dAtA[:n], nil ++} ++ ++func (m *StatusResponse) MarshalTo(dAtA []byte) (int, error) { ++ size := m.Size() ++ return m.MarshalToSizedBuffer(dAtA[:size]) ++} ++ ++func (m *StatusResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { ++ i := len(dAtA) ++ _ = i ++ var l int ++ _ = l ++ if m.ValidatorInfo != nil { ++ { ++ size, err := m.ValidatorInfo.MarshalToSizedBuffer(dAtA[:i]) ++ if err != nil { ++ return 0, err ++ } ++ i -= size ++ i = encodeVarintTypes(dAtA, i, uint64(size)) ++ } ++ i-- ++ dAtA[i] = 0x1a ++ } ++ if m.SyncInfo != nil { ++ { ++ size, err := m.SyncInfo.MarshalToSizedBuffer(dAtA[:i]) ++ if err != nil { ++ return 0, err ++ } ++ i -= size ++ i = encodeVarintTypes(dAtA, i, uint64(size)) ++ } ++ i-- ++ dAtA[i] = 0x12 ++ } ++ if m.NodeInfo != nil { ++ { ++ size, err := m.NodeInfo.MarshalToSizedBuffer(dAtA[:i]) ++ if err != nil { ++ return 0, err ++ } ++ i -= size ++ i = encodeVarintTypes(dAtA, i, uint64(size)) ++ } ++ i-- ++ dAtA[i] = 0xa ++ } ++ return len(dAtA) - i, nil ++} ++ ++func (m *SyncInfo) Marshal() (dAtA []byte, err error) { ++ size := m.Size() ++ dAtA = make([]byte, size) ++ n, err := m.MarshalToSizedBuffer(dAtA[:size]) ++ if err != nil { ++ return nil, err ++ } ++ return dAtA[:n], nil ++} ++ ++func (m *SyncInfo) MarshalTo(dAtA []byte) (int, error) { ++ size := m.Size() ++ return m.MarshalToSizedBuffer(dAtA[:size]) ++} ++ ++func (m *SyncInfo) MarshalToSizedBuffer(dAtA []byte) (int, error) { ++ i := len(dAtA) ++ _ = i ++ var l int ++ _ = l ++ if m.CatchingUp { ++ i-- ++ if m.CatchingUp { ++ dAtA[i] = 1 ++ } else { ++ dAtA[i] = 0 ++ } ++ i-- ++ dAtA[i] = 0x48 ++ } ++ n14, err14 := github_com_gogo_protobuf_types.StdTimeMarshalTo(m.EarliestBlockTime, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdTime(m.EarliestBlockTime):]) ++ if err14 != nil { ++ return 0, err14 ++ } ++ i -= n14 ++ i = encodeVarintTypes(dAtA, i, uint64(n14)) ++ i-- ++ dAtA[i] = 0x42 ++ if m.EarliestBlockHeight != 0 { ++ i = encodeVarintTypes(dAtA, i, uint64(m.EarliestBlockHeight)) ++ i-- ++ dAtA[i] = 0x38 ++ } ++ if len(m.EarliestAppHash) > 0 { ++ i -= len(m.EarliestAppHash) ++ copy(dAtA[i:], m.EarliestAppHash) ++ i = encodeVarintTypes(dAtA, i, uint64(len(m.EarliestAppHash))) ++ i-- ++ dAtA[i] = 0x32 ++ } ++ if len(m.EarliestBlockHash) > 0 { ++ i -= len(m.EarliestBlockHash) ++ copy(dAtA[i:], m.EarliestBlockHash) ++ i = encodeVarintTypes(dAtA, i, uint64(len(m.EarliestBlockHash))) ++ i-- ++ dAtA[i] = 0x2a ++ } ++ n15, err15 := github_com_gogo_protobuf_types.StdTimeMarshalTo(m.LatestBlockTime, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdTime(m.LatestBlockTime):]) ++ if err15 != nil { ++ return 0, err15 ++ } ++ i -= n15 ++ i = encodeVarintTypes(dAtA, i, uint64(n15)) ++ i-- ++ dAtA[i] = 0x22 ++ if m.LatestBlockHeight != 0 { ++ i = encodeVarintTypes(dAtA, i, uint64(m.LatestBlockHeight)) ++ i-- ++ dAtA[i] = 0x18 ++ } ++ if len(m.LatestAppHash) > 0 { ++ i -= len(m.LatestAppHash) ++ copy(dAtA[i:], m.LatestAppHash) ++ i = encodeVarintTypes(dAtA, i, uint64(len(m.LatestAppHash))) ++ i-- ++ dAtA[i] = 0x12 ++ } ++ if len(m.LatestBlockHash) > 0 { ++ i -= len(m.LatestBlockHash) ++ copy(dAtA[i:], m.LatestBlockHash) ++ i = encodeVarintTypes(dAtA, i, uint64(len(m.LatestBlockHash))) ++ i-- ++ dAtA[i] = 0xa ++ } ++ return len(dAtA) - i, nil ++} ++ ++func (m *ValidatorInfo) Marshal() (dAtA []byte, err error) { ++ size := m.Size() ++ dAtA = make([]byte, size) ++ n, err := m.MarshalToSizedBuffer(dAtA[:size]) ++ if err != nil { ++ return nil, err ++ } ++ return dAtA[:n], nil ++} ++ ++func (m *ValidatorInfo) MarshalTo(dAtA []byte) (int, error) { ++ size := m.Size() ++ return m.MarshalToSizedBuffer(dAtA[:size]) ++} ++ ++func (m *ValidatorInfo) MarshalToSizedBuffer(dAtA []byte) (int, error) { ++ i := len(dAtA) ++ _ = i ++ var l int ++ _ = l ++ if m.VotingPower != 0 { ++ i = encodeVarintTypes(dAtA, i, uint64(m.VotingPower)) ++ i-- ++ dAtA[i] = 0x18 ++ } ++ if m.PubKey != nil { ++ { ++ size, err := m.PubKey.MarshalToSizedBuffer(dAtA[:i]) ++ if err != nil { ++ return 0, err ++ } ++ i -= size ++ i = encodeVarintTypes(dAtA, i, uint64(size)) ++ } ++ i-- ++ dAtA[i] = 0x12 ++ } ++ if len(m.Address) > 0 { ++ i -= len(m.Address) ++ copy(dAtA[i:], m.Address) ++ i = encodeVarintTypes(dAtA, i, uint64(len(m.Address))) ++ i-- ++ dAtA[i] = 0xa ++ } ++ return len(dAtA) - i, nil ++} ++ ++func encodeVarintTypes(dAtA []byte, offset int, v uint64) int { ++ offset -= sovTypes(v) ++ base := offset ++ for v >= 1<<7 { ++ dAtA[offset] = uint8(v&0x7f | 0x80) ++ v >>= 7 ++ offset++ ++ } ++ dAtA[offset] = uint8(v) ++ return base ++} ++func (m *RequestPing) Size() (n int) { ++ if m == nil { ++ return 0 ++ } ++ var l int ++ _ = l ++ return n ++} ++ ++func (m *RequestBroadcastTx) Size() (n int) { ++ if m == nil { ++ return 0 ++ } ++ var l int ++ _ = l ++ l = len(m.Tx) ++ if l > 0 { ++ n += 1 + l + sovTypes(uint64(l)) ++ } ++ return n ++} ++ ++func (m *BlockByHashRequest) Size() (n int) { ++ if m == nil { ++ return 0 ++ } ++ var l int ++ _ = l ++ l = len(m.Hash) ++ if l > 0 { ++ n += 1 + l + sovTypes(uint64(l)) ++ } ++ if m.Prove { ++ n += 2 ++ } ++ return n ++} ++ ++func (m *BlockByHeightRequest) Size() (n int) { ++ if m == nil { ++ return 0 ++ } ++ var l int ++ _ = l ++ if m.Height != 0 { ++ n += 1 + sovTypes(uint64(m.Height)) ++ } ++ if m.Prove { ++ n += 2 ++ } ++ return n ++} ++ ++func (m *CommitRequest) Size() (n int) { ++ if m == nil { ++ return 0 ++ } ++ var l int ++ _ = l ++ if m.Height != 0 { ++ n += 1 + sovTypes(uint64(m.Height)) ++ } ++ return n ++} ++ ++func (m *ValidatorSetRequest) Size() (n int) { ++ if m == nil { ++ return 0 ++ } ++ var l int ++ _ = l ++ if m.Height != 0 { ++ n += 1 + sovTypes(uint64(m.Height)) ++ } ++ return n ++} ++ ++func (m *SubscribeNewHeightsRequest) Size() (n int) { ++ if m == nil { ++ return 0 ++ } ++ var l int ++ _ = l ++ return n ++} ++ ++func (m *StatusRequest) Size() (n int) { ++ if m == nil { ++ return 0 ++ } ++ var l int ++ _ = l ++ return n ++} ++ ++func (m *ResponsePing) Size() (n int) { ++ if m == nil { ++ return 0 ++ } ++ var l int ++ _ = l ++ return n ++} ++ ++func (m *ResponseBroadcastTx) Size() (n int) { ++ if m == nil { ++ return 0 ++ } ++ var l int ++ _ = l ++ if m.CheckTx != nil { ++ l = m.CheckTx.Size() ++ n += 1 + l + sovTypes(uint64(l)) ++ } ++ if m.DeliverTx != nil { ++ l = m.DeliverTx.Size() ++ n += 1 + l + sovTypes(uint64(l)) ++ } ++ return n ++} ++ ++func (m *StreamedBlockByHashResponse) Size() (n int) { ++ if m == nil { ++ return 0 ++ } ++ var l int ++ _ = l ++ if m.BlockPart != nil { ++ l = m.BlockPart.Size() ++ n += 1 + l + sovTypes(uint64(l)) ++ } ++ if m.Commit != nil { ++ l = m.Commit.Size() ++ n += 1 + l + sovTypes(uint64(l)) ++ } ++ if m.ValidatorSet != nil { ++ l = m.ValidatorSet.Size() ++ n += 1 + l + sovTypes(uint64(l)) ++ } ++ if m.IsLast { ++ n += 2 ++ } ++ return n ++} ++ ++func (m *StreamedBlockByHeightResponse) Size() (n int) { ++ if m == nil { ++ return 0 ++ } ++ var l int ++ _ = l ++ if m.BlockPart != nil { ++ l = m.BlockPart.Size() ++ n += 1 + l + sovTypes(uint64(l)) ++ } ++ if m.Commit != nil { ++ l = m.Commit.Size() ++ n += 1 + l + sovTypes(uint64(l)) ++ } ++ if m.ValidatorSet != nil { ++ l = m.ValidatorSet.Size() ++ n += 1 + l + sovTypes(uint64(l)) ++ } ++ if m.IsLast { ++ n += 2 ++ } ++ return n ++} ++ ++func (m *CommitResponse) Size() (n int) { ++ if m == nil { ++ return 0 ++ } ++ var l int ++ _ = l ++ if m.Commit != nil { ++ l = m.Commit.Size() ++ n += 1 + l + sovTypes(uint64(l)) ++ } ++ return n ++} ++ ++func (m *ValidatorSetResponse) Size() (n int) { ++ if m == nil { ++ return 0 ++ } ++ var l int ++ _ = l ++ if m.ValidatorSet != nil { ++ l = m.ValidatorSet.Size() ++ n += 1 + l + sovTypes(uint64(l)) ++ } ++ if m.Height != 0 { ++ n += 1 + sovTypes(uint64(m.Height)) ++ } ++ return n ++} ++ ++func (m *NewHeightEvent) Size() (n int) { ++ if m == nil { ++ return 0 ++ } ++ var l int ++ _ = l ++ if m.Height != 0 { ++ n += 1 + sovTypes(uint64(m.Height)) ++ } ++ l = len(m.Hash) ++ if l > 0 { ++ n += 1 + l + sovTypes(uint64(l)) ++ } ++ return n ++} ++ ++func (m *StatusResponse) Size() (n int) { ++ if m == nil { ++ return 0 ++ } ++ var l int ++ _ = l ++ if m.NodeInfo != nil { ++ l = m.NodeInfo.Size() ++ n += 1 + l + sovTypes(uint64(l)) ++ } ++ if m.SyncInfo != nil { ++ l = m.SyncInfo.Size() ++ n += 1 + l + sovTypes(uint64(l)) ++ } ++ if m.ValidatorInfo != nil { ++ l = m.ValidatorInfo.Size() ++ n += 1 + l + sovTypes(uint64(l)) ++ } ++ return n ++} ++ ++func (m *SyncInfo) Size() (n int) { ++ if m == nil { ++ return 0 ++ } ++ var l int ++ _ = l ++ l = len(m.LatestBlockHash) ++ if l > 0 { ++ n += 1 + l + sovTypes(uint64(l)) ++ } ++ l = len(m.LatestAppHash) ++ if l > 0 { ++ n += 1 + l + sovTypes(uint64(l)) ++ } ++ if m.LatestBlockHeight != 0 { ++ n += 1 + sovTypes(uint64(m.LatestBlockHeight)) ++ } ++ l = github_com_gogo_protobuf_types.SizeOfStdTime(m.LatestBlockTime) ++ n += 1 + l + sovTypes(uint64(l)) ++ l = len(m.EarliestBlockHash) ++ if l > 0 { ++ n += 1 + l + sovTypes(uint64(l)) ++ } ++ l = len(m.EarliestAppHash) ++ if l > 0 { ++ n += 1 + l + sovTypes(uint64(l)) ++ } ++ if m.EarliestBlockHeight != 0 { ++ n += 1 + sovTypes(uint64(m.EarliestBlockHeight)) ++ } ++ l = github_com_gogo_protobuf_types.SizeOfStdTime(m.EarliestBlockTime) ++ n += 1 + l + sovTypes(uint64(l)) ++ if m.CatchingUp { ++ n += 2 ++ } ++ return n ++} ++ ++func (m *ValidatorInfo) Size() (n int) { ++ if m == nil { ++ return 0 ++ } + var l int + _ = l +- if m.CheckTx != nil { +- l = m.CheckTx.Size() ++ l = len(m.Address) ++ if l > 0 { + n += 1 + l + sovTypes(uint64(l)) + } +- if m.DeliverTx != nil { +- l = m.DeliverTx.Size() +- n += 1 + l + sovTypes(uint64(l)) ++ if m.PubKey != nil { ++ l = m.PubKey.Size() ++ n += 1 + l + sovTypes(uint64(l)) ++ } ++ if m.VotingPower != 0 { ++ n += 1 + sovTypes(uint64(m.VotingPower)) ++ } ++ return n ++} ++ ++func sovTypes(x uint64) (n int) { ++ return (math_bits.Len64(x|1) + 6) / 7 ++} ++func sozTypes(x uint64) (n int) { ++ return sovTypes(uint64((x << 1) ^ uint64((int64(x) >> 63)))) ++} ++func (m *RequestPing) Unmarshal(dAtA []byte) error { ++ l := len(dAtA) ++ iNdEx := 0 ++ for iNdEx < l { ++ preIndex := iNdEx ++ var wire uint64 ++ for shift := uint(0); ; shift += 7 { ++ if shift >= 64 { ++ return ErrIntOverflowTypes ++ } ++ if iNdEx >= l { ++ return io.ErrUnexpectedEOF ++ } ++ b := dAtA[iNdEx] ++ iNdEx++ ++ wire |= uint64(b&0x7F) << shift ++ if b < 0x80 { ++ break ++ } ++ } ++ fieldNum := int32(wire >> 3) ++ wireType := int(wire & 0x7) ++ if wireType == 4 { ++ return fmt.Errorf("proto: RequestPing: wiretype end group for non-group") ++ } ++ if fieldNum <= 0 { ++ return fmt.Errorf("proto: RequestPing: illegal tag %d (wire type %d)", fieldNum, wire) ++ } ++ switch fieldNum { ++ default: ++ iNdEx = preIndex ++ skippy, err := skipTypes(dAtA[iNdEx:]) ++ if err != nil { ++ return err ++ } ++ if (skippy < 0) || (iNdEx+skippy) < 0 { ++ return ErrInvalidLengthTypes ++ } ++ if (iNdEx + skippy) > l { ++ return io.ErrUnexpectedEOF ++ } ++ iNdEx += skippy ++ } ++ } ++ ++ if iNdEx > l { ++ return io.ErrUnexpectedEOF ++ } ++ return nil ++} ++func (m *RequestBroadcastTx) Unmarshal(dAtA []byte) error { ++ l := len(dAtA) ++ iNdEx := 0 ++ for iNdEx < l { ++ preIndex := iNdEx ++ var wire uint64 ++ for shift := uint(0); ; shift += 7 { ++ if shift >= 64 { ++ return ErrIntOverflowTypes ++ } ++ if iNdEx >= l { ++ return io.ErrUnexpectedEOF ++ } ++ b := dAtA[iNdEx] ++ iNdEx++ ++ wire |= uint64(b&0x7F) << shift ++ if b < 0x80 { ++ break ++ } ++ } ++ fieldNum := int32(wire >> 3) ++ wireType := int(wire & 0x7) ++ if wireType == 4 { ++ return fmt.Errorf("proto: RequestBroadcastTx: wiretype end group for non-group") ++ } ++ if fieldNum <= 0 { ++ return fmt.Errorf("proto: RequestBroadcastTx: illegal tag %d (wire type %d)", fieldNum, wire) ++ } ++ switch fieldNum { ++ case 1: ++ if wireType != 2 { ++ return fmt.Errorf("proto: wrong wireType = %d for field Tx", wireType) ++ } ++ var byteLen int ++ for shift := uint(0); ; shift += 7 { ++ if shift >= 64 { ++ return ErrIntOverflowTypes ++ } ++ if iNdEx >= l { ++ return io.ErrUnexpectedEOF ++ } ++ b := dAtA[iNdEx] ++ iNdEx++ ++ byteLen |= int(b&0x7F) << shift ++ if b < 0x80 { ++ break ++ } ++ } ++ if byteLen < 0 { ++ return ErrInvalidLengthTypes ++ } ++ postIndex := iNdEx + byteLen ++ if postIndex < 0 { ++ return ErrInvalidLengthTypes ++ } ++ if postIndex > l { ++ return io.ErrUnexpectedEOF ++ } ++ m.Tx = append(m.Tx[:0], dAtA[iNdEx:postIndex]...) ++ if m.Tx == nil { ++ m.Tx = []byte{} ++ } ++ iNdEx = postIndex ++ default: ++ iNdEx = preIndex ++ skippy, err := skipTypes(dAtA[iNdEx:]) ++ if err != nil { ++ return err ++ } ++ if (skippy < 0) || (iNdEx+skippy) < 0 { ++ return ErrInvalidLengthTypes ++ } ++ if (iNdEx + skippy) > l { ++ return io.ErrUnexpectedEOF ++ } ++ iNdEx += skippy ++ } ++ } ++ ++ if iNdEx > l { ++ return io.ErrUnexpectedEOF ++ } ++ return nil ++} ++func (m *BlockByHashRequest) Unmarshal(dAtA []byte) error { ++ l := len(dAtA) ++ iNdEx := 0 ++ for iNdEx < l { ++ preIndex := iNdEx ++ var wire uint64 ++ for shift := uint(0); ; shift += 7 { ++ if shift >= 64 { ++ return ErrIntOverflowTypes ++ } ++ if iNdEx >= l { ++ return io.ErrUnexpectedEOF ++ } ++ b := dAtA[iNdEx] ++ iNdEx++ ++ wire |= uint64(b&0x7F) << shift ++ if b < 0x80 { ++ break ++ } ++ } ++ fieldNum := int32(wire >> 3) ++ wireType := int(wire & 0x7) ++ if wireType == 4 { ++ return fmt.Errorf("proto: BlockByHashRequest: wiretype end group for non-group") ++ } ++ if fieldNum <= 0 { ++ return fmt.Errorf("proto: BlockByHashRequest: illegal tag %d (wire type %d)", fieldNum, wire) ++ } ++ switch fieldNum { ++ case 1: ++ if wireType != 2 { ++ return fmt.Errorf("proto: wrong wireType = %d for field Hash", wireType) ++ } ++ var byteLen int ++ for shift := uint(0); ; shift += 7 { ++ if shift >= 64 { ++ return ErrIntOverflowTypes ++ } ++ if iNdEx >= l { ++ return io.ErrUnexpectedEOF ++ } ++ b := dAtA[iNdEx] ++ iNdEx++ ++ byteLen |= int(b&0x7F) << shift ++ if b < 0x80 { ++ break ++ } ++ } ++ if byteLen < 0 { ++ return ErrInvalidLengthTypes ++ } ++ postIndex := iNdEx + byteLen ++ if postIndex < 0 { ++ return ErrInvalidLengthTypes ++ } ++ if postIndex > l { ++ return io.ErrUnexpectedEOF ++ } ++ m.Hash = append(m.Hash[:0], dAtA[iNdEx:postIndex]...) ++ if m.Hash == nil { ++ m.Hash = []byte{} ++ } ++ iNdEx = postIndex ++ case 2: ++ if wireType != 0 { ++ return fmt.Errorf("proto: wrong wireType = %d for field Prove", wireType) ++ } ++ var v int ++ for shift := uint(0); ; shift += 7 { ++ if shift >= 64 { ++ return ErrIntOverflowTypes ++ } ++ if iNdEx >= l { ++ return io.ErrUnexpectedEOF ++ } ++ b := dAtA[iNdEx] ++ iNdEx++ ++ v |= int(b&0x7F) << shift ++ if b < 0x80 { ++ break ++ } ++ } ++ m.Prove = bool(v != 0) ++ default: ++ iNdEx = preIndex ++ skippy, err := skipTypes(dAtA[iNdEx:]) ++ if err != nil { ++ return err ++ } ++ if (skippy < 0) || (iNdEx+skippy) < 0 { ++ return ErrInvalidLengthTypes ++ } ++ if (iNdEx + skippy) > l { ++ return io.ErrUnexpectedEOF ++ } ++ iNdEx += skippy ++ } ++ } ++ ++ if iNdEx > l { ++ return io.ErrUnexpectedEOF ++ } ++ return nil ++} ++func (m *BlockByHeightRequest) Unmarshal(dAtA []byte) error { ++ l := len(dAtA) ++ iNdEx := 0 ++ for iNdEx < l { ++ preIndex := iNdEx ++ var wire uint64 ++ for shift := uint(0); ; shift += 7 { ++ if shift >= 64 { ++ return ErrIntOverflowTypes ++ } ++ if iNdEx >= l { ++ return io.ErrUnexpectedEOF ++ } ++ b := dAtA[iNdEx] ++ iNdEx++ ++ wire |= uint64(b&0x7F) << shift ++ if b < 0x80 { ++ break ++ } ++ } ++ fieldNum := int32(wire >> 3) ++ wireType := int(wire & 0x7) ++ if wireType == 4 { ++ return fmt.Errorf("proto: BlockByHeightRequest: wiretype end group for non-group") ++ } ++ if fieldNum <= 0 { ++ return fmt.Errorf("proto: BlockByHeightRequest: illegal tag %d (wire type %d)", fieldNum, wire) ++ } ++ switch fieldNum { ++ case 1: ++ if wireType != 0 { ++ return fmt.Errorf("proto: wrong wireType = %d for field Height", wireType) ++ } ++ m.Height = 0 ++ for shift := uint(0); ; shift += 7 { ++ if shift >= 64 { ++ return ErrIntOverflowTypes ++ } ++ if iNdEx >= l { ++ return io.ErrUnexpectedEOF ++ } ++ b := dAtA[iNdEx] ++ iNdEx++ ++ m.Height |= int64(b&0x7F) << shift ++ if b < 0x80 { ++ break ++ } ++ } ++ case 2: ++ if wireType != 0 { ++ return fmt.Errorf("proto: wrong wireType = %d for field Prove", wireType) ++ } ++ var v int ++ for shift := uint(0); ; shift += 7 { ++ if shift >= 64 { ++ return ErrIntOverflowTypes ++ } ++ if iNdEx >= l { ++ return io.ErrUnexpectedEOF ++ } ++ b := dAtA[iNdEx] ++ iNdEx++ ++ v |= int(b&0x7F) << shift ++ if b < 0x80 { ++ break ++ } ++ } ++ m.Prove = bool(v != 0) ++ default: ++ iNdEx = preIndex ++ skippy, err := skipTypes(dAtA[iNdEx:]) ++ if err != nil { ++ return err ++ } ++ if (skippy < 0) || (iNdEx+skippy) < 0 { ++ return ErrInvalidLengthTypes ++ } ++ if (iNdEx + skippy) > l { ++ return io.ErrUnexpectedEOF ++ } ++ iNdEx += skippy ++ } ++ } ++ ++ if iNdEx > l { ++ return io.ErrUnexpectedEOF ++ } ++ return nil ++} ++func (m *CommitRequest) Unmarshal(dAtA []byte) error { ++ l := len(dAtA) ++ iNdEx := 0 ++ for iNdEx < l { ++ preIndex := iNdEx ++ var wire uint64 ++ for shift := uint(0); ; shift += 7 { ++ if shift >= 64 { ++ return ErrIntOverflowTypes ++ } ++ if iNdEx >= l { ++ return io.ErrUnexpectedEOF ++ } ++ b := dAtA[iNdEx] ++ iNdEx++ ++ wire |= uint64(b&0x7F) << shift ++ if b < 0x80 { ++ break ++ } ++ } ++ fieldNum := int32(wire >> 3) ++ wireType := int(wire & 0x7) ++ if wireType == 4 { ++ return fmt.Errorf("proto: CommitRequest: wiretype end group for non-group") ++ } ++ if fieldNum <= 0 { ++ return fmt.Errorf("proto: CommitRequest: illegal tag %d (wire type %d)", fieldNum, wire) ++ } ++ switch fieldNum { ++ case 1: ++ if wireType != 0 { ++ return fmt.Errorf("proto: wrong wireType = %d for field Height", wireType) ++ } ++ m.Height = 0 ++ for shift := uint(0); ; shift += 7 { ++ if shift >= 64 { ++ return ErrIntOverflowTypes ++ } ++ if iNdEx >= l { ++ return io.ErrUnexpectedEOF ++ } ++ b := dAtA[iNdEx] ++ iNdEx++ ++ m.Height |= int64(b&0x7F) << shift ++ if b < 0x80 { ++ break ++ } ++ } ++ default: ++ iNdEx = preIndex ++ skippy, err := skipTypes(dAtA[iNdEx:]) ++ if err != nil { ++ return err ++ } ++ if (skippy < 0) || (iNdEx+skippy) < 0 { ++ return ErrInvalidLengthTypes ++ } ++ if (iNdEx + skippy) > l { ++ return io.ErrUnexpectedEOF ++ } ++ iNdEx += skippy ++ } ++ } ++ ++ if iNdEx > l { ++ return io.ErrUnexpectedEOF ++ } ++ return nil ++} ++func (m *ValidatorSetRequest) Unmarshal(dAtA []byte) error { ++ l := len(dAtA) ++ iNdEx := 0 ++ for iNdEx < l { ++ preIndex := iNdEx ++ var wire uint64 ++ for shift := uint(0); ; shift += 7 { ++ if shift >= 64 { ++ return ErrIntOverflowTypes ++ } ++ if iNdEx >= l { ++ return io.ErrUnexpectedEOF ++ } ++ b := dAtA[iNdEx] ++ iNdEx++ ++ wire |= uint64(b&0x7F) << shift ++ if b < 0x80 { ++ break ++ } ++ } ++ fieldNum := int32(wire >> 3) ++ wireType := int(wire & 0x7) ++ if wireType == 4 { ++ return fmt.Errorf("proto: ValidatorSetRequest: wiretype end group for non-group") ++ } ++ if fieldNum <= 0 { ++ return fmt.Errorf("proto: ValidatorSetRequest: illegal tag %d (wire type %d)", fieldNum, wire) ++ } ++ switch fieldNum { ++ case 1: ++ if wireType != 0 { ++ return fmt.Errorf("proto: wrong wireType = %d for field Height", wireType) ++ } ++ m.Height = 0 ++ for shift := uint(0); ; shift += 7 { ++ if shift >= 64 { ++ return ErrIntOverflowTypes ++ } ++ if iNdEx >= l { ++ return io.ErrUnexpectedEOF ++ } ++ b := dAtA[iNdEx] ++ iNdEx++ ++ m.Height |= int64(b&0x7F) << shift ++ if b < 0x80 { ++ break ++ } ++ } ++ default: ++ iNdEx = preIndex ++ skippy, err := skipTypes(dAtA[iNdEx:]) ++ if err != nil { ++ return err ++ } ++ if (skippy < 0) || (iNdEx+skippy) < 0 { ++ return ErrInvalidLengthTypes ++ } ++ if (iNdEx + skippy) > l { ++ return io.ErrUnexpectedEOF ++ } ++ iNdEx += skippy ++ } ++ } ++ ++ if iNdEx > l { ++ return io.ErrUnexpectedEOF ++ } ++ return nil ++} ++func (m *SubscribeNewHeightsRequest) Unmarshal(dAtA []byte) error { ++ l := len(dAtA) ++ iNdEx := 0 ++ for iNdEx < l { ++ preIndex := iNdEx ++ var wire uint64 ++ for shift := uint(0); ; shift += 7 { ++ if shift >= 64 { ++ return ErrIntOverflowTypes ++ } ++ if iNdEx >= l { ++ return io.ErrUnexpectedEOF ++ } ++ b := dAtA[iNdEx] ++ iNdEx++ ++ wire |= uint64(b&0x7F) << shift ++ if b < 0x80 { ++ break ++ } ++ } ++ fieldNum := int32(wire >> 3) ++ wireType := int(wire & 0x7) ++ if wireType == 4 { ++ return fmt.Errorf("proto: SubscribeNewHeightsRequest: wiretype end group for non-group") ++ } ++ if fieldNum <= 0 { ++ return fmt.Errorf("proto: SubscribeNewHeightsRequest: illegal tag %d (wire type %d)", fieldNum, wire) ++ } ++ switch fieldNum { ++ default: ++ iNdEx = preIndex ++ skippy, err := skipTypes(dAtA[iNdEx:]) ++ if err != nil { ++ return err ++ } ++ if (skippy < 0) || (iNdEx+skippy) < 0 { ++ return ErrInvalidLengthTypes ++ } ++ if (iNdEx + skippy) > l { ++ return io.ErrUnexpectedEOF ++ } ++ iNdEx += skippy ++ } ++ } ++ ++ if iNdEx > l { ++ return io.ErrUnexpectedEOF ++ } ++ return nil ++} ++func (m *StatusRequest) Unmarshal(dAtA []byte) error { ++ l := len(dAtA) ++ iNdEx := 0 ++ for iNdEx < l { ++ preIndex := iNdEx ++ var wire uint64 ++ for shift := uint(0); ; shift += 7 { ++ if shift >= 64 { ++ return ErrIntOverflowTypes ++ } ++ if iNdEx >= l { ++ return io.ErrUnexpectedEOF ++ } ++ b := dAtA[iNdEx] ++ iNdEx++ ++ wire |= uint64(b&0x7F) << shift ++ if b < 0x80 { ++ break ++ } ++ } ++ fieldNum := int32(wire >> 3) ++ wireType := int(wire & 0x7) ++ if wireType == 4 { ++ return fmt.Errorf("proto: StatusRequest: wiretype end group for non-group") ++ } ++ if fieldNum <= 0 { ++ return fmt.Errorf("proto: StatusRequest: illegal tag %d (wire type %d)", fieldNum, wire) ++ } ++ switch fieldNum { ++ default: ++ iNdEx = preIndex ++ skippy, err := skipTypes(dAtA[iNdEx:]) ++ if err != nil { ++ return err ++ } ++ if (skippy < 0) || (iNdEx+skippy) < 0 { ++ return ErrInvalidLengthTypes ++ } ++ if (iNdEx + skippy) > l { ++ return io.ErrUnexpectedEOF ++ } ++ iNdEx += skippy ++ } ++ } ++ ++ if iNdEx > l { ++ return io.ErrUnexpectedEOF ++ } ++ return nil ++} ++func (m *ResponsePing) Unmarshal(dAtA []byte) error { ++ l := len(dAtA) ++ iNdEx := 0 ++ for iNdEx < l { ++ preIndex := iNdEx ++ var wire uint64 ++ for shift := uint(0); ; shift += 7 { ++ if shift >= 64 { ++ return ErrIntOverflowTypes ++ } ++ if iNdEx >= l { ++ return io.ErrUnexpectedEOF ++ } ++ b := dAtA[iNdEx] ++ iNdEx++ ++ wire |= uint64(b&0x7F) << shift ++ if b < 0x80 { ++ break ++ } ++ } ++ fieldNum := int32(wire >> 3) ++ wireType := int(wire & 0x7) ++ if wireType == 4 { ++ return fmt.Errorf("proto: ResponsePing: wiretype end group for non-group") ++ } ++ if fieldNum <= 0 { ++ return fmt.Errorf("proto: ResponsePing: illegal tag %d (wire type %d)", fieldNum, wire) ++ } ++ switch fieldNum { ++ default: ++ iNdEx = preIndex ++ skippy, err := skipTypes(dAtA[iNdEx:]) ++ if err != nil { ++ return err ++ } ++ if (skippy < 0) || (iNdEx+skippy) < 0 { ++ return ErrInvalidLengthTypes ++ } ++ if (iNdEx + skippy) > l { ++ return io.ErrUnexpectedEOF ++ } ++ iNdEx += skippy ++ } ++ } ++ ++ if iNdEx > l { ++ return io.ErrUnexpectedEOF ++ } ++ return nil ++} ++func (m *ResponseBroadcastTx) Unmarshal(dAtA []byte) error { ++ l := len(dAtA) ++ iNdEx := 0 ++ for iNdEx < l { ++ preIndex := iNdEx ++ var wire uint64 ++ for shift := uint(0); ; shift += 7 { ++ if shift >= 64 { ++ return ErrIntOverflowTypes ++ } ++ if iNdEx >= l { ++ return io.ErrUnexpectedEOF ++ } ++ b := dAtA[iNdEx] ++ iNdEx++ ++ wire |= uint64(b&0x7F) << shift ++ if b < 0x80 { ++ break ++ } ++ } ++ fieldNum := int32(wire >> 3) ++ wireType := int(wire & 0x7) ++ if wireType == 4 { ++ return fmt.Errorf("proto: ResponseBroadcastTx: wiretype end group for non-group") ++ } ++ if fieldNum <= 0 { ++ return fmt.Errorf("proto: ResponseBroadcastTx: illegal tag %d (wire type %d)", fieldNum, wire) ++ } ++ switch fieldNum { ++ case 1: ++ if wireType != 2 { ++ return fmt.Errorf("proto: wrong wireType = %d for field CheckTx", wireType) ++ } ++ var msglen int ++ for shift := uint(0); ; shift += 7 { ++ if shift >= 64 { ++ return ErrIntOverflowTypes ++ } ++ if iNdEx >= l { ++ return io.ErrUnexpectedEOF ++ } ++ b := dAtA[iNdEx] ++ iNdEx++ ++ msglen |= int(b&0x7F) << shift ++ if b < 0x80 { ++ break ++ } ++ } ++ if msglen < 0 { ++ return ErrInvalidLengthTypes ++ } ++ postIndex := iNdEx + msglen ++ if postIndex < 0 { ++ return ErrInvalidLengthTypes ++ } ++ if postIndex > l { ++ return io.ErrUnexpectedEOF ++ } ++ if m.CheckTx == nil { ++ m.CheckTx = &types.ResponseCheckTx{} ++ } ++ if err := m.CheckTx.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { ++ return err ++ } ++ iNdEx = postIndex ++ case 2: ++ if wireType != 2 { ++ return fmt.Errorf("proto: wrong wireType = %d for field DeliverTx", wireType) ++ } ++ var msglen int ++ for shift := uint(0); ; shift += 7 { ++ if shift >= 64 { ++ return ErrIntOverflowTypes ++ } ++ if iNdEx >= l { ++ return io.ErrUnexpectedEOF ++ } ++ b := dAtA[iNdEx] ++ iNdEx++ ++ msglen |= int(b&0x7F) << shift ++ if b < 0x80 { ++ break ++ } ++ } ++ if msglen < 0 { ++ return ErrInvalidLengthTypes ++ } ++ postIndex := iNdEx + msglen ++ if postIndex < 0 { ++ return ErrInvalidLengthTypes ++ } ++ if postIndex > l { ++ return io.ErrUnexpectedEOF ++ } ++ if m.DeliverTx == nil { ++ m.DeliverTx = &types.ResponseDeliverTx{} ++ } ++ if err := m.DeliverTx.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { ++ return err ++ } ++ iNdEx = postIndex ++ default: ++ iNdEx = preIndex ++ skippy, err := skipTypes(dAtA[iNdEx:]) ++ if err != nil { ++ return err ++ } ++ if (skippy < 0) || (iNdEx+skippy) < 0 { ++ return ErrInvalidLengthTypes ++ } ++ if (iNdEx + skippy) > l { ++ return io.ErrUnexpectedEOF ++ } ++ iNdEx += skippy ++ } ++ } ++ ++ if iNdEx > l { ++ return io.ErrUnexpectedEOF ++ } ++ return nil ++} ++func (m *StreamedBlockByHashResponse) Unmarshal(dAtA []byte) error { ++ l := len(dAtA) ++ iNdEx := 0 ++ for iNdEx < l { ++ preIndex := iNdEx ++ var wire uint64 ++ for shift := uint(0); ; shift += 7 { ++ if shift >= 64 { ++ return ErrIntOverflowTypes ++ } ++ if iNdEx >= l { ++ return io.ErrUnexpectedEOF ++ } ++ b := dAtA[iNdEx] ++ iNdEx++ ++ wire |= uint64(b&0x7F) << shift ++ if b < 0x80 { ++ break ++ } ++ } ++ fieldNum := int32(wire >> 3) ++ wireType := int(wire & 0x7) ++ if wireType == 4 { ++ return fmt.Errorf("proto: StreamedBlockByHashResponse: wiretype end group for non-group") ++ } ++ if fieldNum <= 0 { ++ return fmt.Errorf("proto: StreamedBlockByHashResponse: illegal tag %d (wire type %d)", fieldNum, wire) ++ } ++ switch fieldNum { ++ case 1: ++ if wireType != 2 { ++ return fmt.Errorf("proto: wrong wireType = %d for field BlockPart", wireType) ++ } ++ var msglen int ++ for shift := uint(0); ; shift += 7 { ++ if shift >= 64 { ++ return ErrIntOverflowTypes ++ } ++ if iNdEx >= l { ++ return io.ErrUnexpectedEOF ++ } ++ b := dAtA[iNdEx] ++ iNdEx++ ++ msglen |= int(b&0x7F) << shift ++ if b < 0x80 { ++ break ++ } ++ } ++ if msglen < 0 { ++ return ErrInvalidLengthTypes ++ } ++ postIndex := iNdEx + msglen ++ if postIndex < 0 { ++ return ErrInvalidLengthTypes ++ } ++ if postIndex > l { ++ return io.ErrUnexpectedEOF ++ } ++ if m.BlockPart == nil { ++ m.BlockPart = &types1.Part{} ++ } ++ if err := m.BlockPart.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { ++ return err ++ } ++ iNdEx = postIndex ++ case 2: ++ if wireType != 2 { ++ return fmt.Errorf("proto: wrong wireType = %d for field Commit", wireType) ++ } ++ var msglen int ++ for shift := uint(0); ; shift += 7 { ++ if shift >= 64 { ++ return ErrIntOverflowTypes ++ } ++ if iNdEx >= l { ++ return io.ErrUnexpectedEOF ++ } ++ b := dAtA[iNdEx] ++ iNdEx++ ++ msglen |= int(b&0x7F) << shift ++ if b < 0x80 { ++ break ++ } ++ } ++ if msglen < 0 { ++ return ErrInvalidLengthTypes ++ } ++ postIndex := iNdEx + msglen ++ if postIndex < 0 { ++ return ErrInvalidLengthTypes ++ } ++ if postIndex > l { ++ return io.ErrUnexpectedEOF ++ } ++ if m.Commit == nil { ++ m.Commit = &types1.Commit{} ++ } ++ if err := m.Commit.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { ++ return err ++ } ++ iNdEx = postIndex ++ case 3: ++ if wireType != 2 { ++ return fmt.Errorf("proto: wrong wireType = %d for field ValidatorSet", wireType) ++ } ++ var msglen int ++ for shift := uint(0); ; shift += 7 { ++ if shift >= 64 { ++ return ErrIntOverflowTypes ++ } ++ if iNdEx >= l { ++ return io.ErrUnexpectedEOF ++ } ++ b := dAtA[iNdEx] ++ iNdEx++ ++ msglen |= int(b&0x7F) << shift ++ if b < 0x80 { ++ break ++ } ++ } ++ if msglen < 0 { ++ return ErrInvalidLengthTypes ++ } ++ postIndex := iNdEx + msglen ++ if postIndex < 0 { ++ return ErrInvalidLengthTypes ++ } ++ if postIndex > l { ++ return io.ErrUnexpectedEOF ++ } ++ if m.ValidatorSet == nil { ++ m.ValidatorSet = &types1.ValidatorSet{} ++ } ++ if err := m.ValidatorSet.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { ++ return err ++ } ++ iNdEx = postIndex ++ case 4: ++ if wireType != 0 { ++ return fmt.Errorf("proto: wrong wireType = %d for field IsLast", wireType) ++ } ++ var v int ++ for shift := uint(0); ; shift += 7 { ++ if shift >= 64 { ++ return ErrIntOverflowTypes ++ } ++ if iNdEx >= l { ++ return io.ErrUnexpectedEOF ++ } ++ b := dAtA[iNdEx] ++ iNdEx++ ++ v |= int(b&0x7F) << shift ++ if b < 0x80 { ++ break ++ } ++ } ++ m.IsLast = bool(v != 0) ++ default: ++ iNdEx = preIndex ++ skippy, err := skipTypes(dAtA[iNdEx:]) ++ if err != nil { ++ return err ++ } ++ if (skippy < 0) || (iNdEx+skippy) < 0 { ++ return ErrInvalidLengthTypes ++ } ++ if (iNdEx + skippy) > l { ++ return io.ErrUnexpectedEOF ++ } ++ iNdEx += skippy ++ } ++ } ++ ++ if iNdEx > l { ++ return io.ErrUnexpectedEOF ++ } ++ return nil ++} ++func (m *StreamedBlockByHeightResponse) Unmarshal(dAtA []byte) error { ++ l := len(dAtA) ++ iNdEx := 0 ++ for iNdEx < l { ++ preIndex := iNdEx ++ var wire uint64 ++ for shift := uint(0); ; shift += 7 { ++ if shift >= 64 { ++ return ErrIntOverflowTypes ++ } ++ if iNdEx >= l { ++ return io.ErrUnexpectedEOF ++ } ++ b := dAtA[iNdEx] ++ iNdEx++ ++ wire |= uint64(b&0x7F) << shift ++ if b < 0x80 { ++ break ++ } ++ } ++ fieldNum := int32(wire >> 3) ++ wireType := int(wire & 0x7) ++ if wireType == 4 { ++ return fmt.Errorf("proto: StreamedBlockByHeightResponse: wiretype end group for non-group") ++ } ++ if fieldNum <= 0 { ++ return fmt.Errorf("proto: StreamedBlockByHeightResponse: illegal tag %d (wire type %d)", fieldNum, wire) ++ } ++ switch fieldNum { ++ case 1: ++ if wireType != 2 { ++ return fmt.Errorf("proto: wrong wireType = %d for field BlockPart", wireType) ++ } ++ var msglen int ++ for shift := uint(0); ; shift += 7 { ++ if shift >= 64 { ++ return ErrIntOverflowTypes ++ } ++ if iNdEx >= l { ++ return io.ErrUnexpectedEOF ++ } ++ b := dAtA[iNdEx] ++ iNdEx++ ++ msglen |= int(b&0x7F) << shift ++ if b < 0x80 { ++ break ++ } ++ } ++ if msglen < 0 { ++ return ErrInvalidLengthTypes ++ } ++ postIndex := iNdEx + msglen ++ if postIndex < 0 { ++ return ErrInvalidLengthTypes ++ } ++ if postIndex > l { ++ return io.ErrUnexpectedEOF ++ } ++ if m.BlockPart == nil { ++ m.BlockPart = &types1.Part{} ++ } ++ if err := m.BlockPart.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { ++ return err ++ } ++ iNdEx = postIndex ++ case 2: ++ if wireType != 2 { ++ return fmt.Errorf("proto: wrong wireType = %d for field Commit", wireType) ++ } ++ var msglen int ++ for shift := uint(0); ; shift += 7 { ++ if shift >= 64 { ++ return ErrIntOverflowTypes ++ } ++ if iNdEx >= l { ++ return io.ErrUnexpectedEOF ++ } ++ b := dAtA[iNdEx] ++ iNdEx++ ++ msglen |= int(b&0x7F) << shift ++ if b < 0x80 { ++ break ++ } ++ } ++ if msglen < 0 { ++ return ErrInvalidLengthTypes ++ } ++ postIndex := iNdEx + msglen ++ if postIndex < 0 { ++ return ErrInvalidLengthTypes ++ } ++ if postIndex > l { ++ return io.ErrUnexpectedEOF ++ } ++ if m.Commit == nil { ++ m.Commit = &types1.Commit{} ++ } ++ if err := m.Commit.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { ++ return err ++ } ++ iNdEx = postIndex ++ case 3: ++ if wireType != 2 { ++ return fmt.Errorf("proto: wrong wireType = %d for field ValidatorSet", wireType) ++ } ++ var msglen int ++ for shift := uint(0); ; shift += 7 { ++ if shift >= 64 { ++ return ErrIntOverflowTypes ++ } ++ if iNdEx >= l { ++ return io.ErrUnexpectedEOF ++ } ++ b := dAtA[iNdEx] ++ iNdEx++ ++ msglen |= int(b&0x7F) << shift ++ if b < 0x80 { ++ break ++ } ++ } ++ if msglen < 0 { ++ return ErrInvalidLengthTypes ++ } ++ postIndex := iNdEx + msglen ++ if postIndex < 0 { ++ return ErrInvalidLengthTypes ++ } ++ if postIndex > l { ++ return io.ErrUnexpectedEOF ++ } ++ if m.ValidatorSet == nil { ++ m.ValidatorSet = &types1.ValidatorSet{} ++ } ++ if err := m.ValidatorSet.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { ++ return err ++ } ++ iNdEx = postIndex ++ case 4: ++ if wireType != 0 { ++ return fmt.Errorf("proto: wrong wireType = %d for field IsLast", wireType) ++ } ++ var v int ++ for shift := uint(0); ; shift += 7 { ++ if shift >= 64 { ++ return ErrIntOverflowTypes ++ } ++ if iNdEx >= l { ++ return io.ErrUnexpectedEOF ++ } ++ b := dAtA[iNdEx] ++ iNdEx++ ++ v |= int(b&0x7F) << shift ++ if b < 0x80 { ++ break ++ } ++ } ++ m.IsLast = bool(v != 0) ++ default: ++ iNdEx = preIndex ++ skippy, err := skipTypes(dAtA[iNdEx:]) ++ if err != nil { ++ return err ++ } ++ if (skippy < 0) || (iNdEx+skippy) < 0 { ++ return ErrInvalidLengthTypes ++ } ++ if (iNdEx + skippy) > l { ++ return io.ErrUnexpectedEOF ++ } ++ iNdEx += skippy ++ } ++ } ++ ++ if iNdEx > l { ++ return io.ErrUnexpectedEOF ++ } ++ return nil ++} ++func (m *CommitResponse) Unmarshal(dAtA []byte) error { ++ l := len(dAtA) ++ iNdEx := 0 ++ for iNdEx < l { ++ preIndex := iNdEx ++ var wire uint64 ++ for shift := uint(0); ; shift += 7 { ++ if shift >= 64 { ++ return ErrIntOverflowTypes ++ } ++ if iNdEx >= l { ++ return io.ErrUnexpectedEOF ++ } ++ b := dAtA[iNdEx] ++ iNdEx++ ++ wire |= uint64(b&0x7F) << shift ++ if b < 0x80 { ++ break ++ } ++ } ++ fieldNum := int32(wire >> 3) ++ wireType := int(wire & 0x7) ++ if wireType == 4 { ++ return fmt.Errorf("proto: CommitResponse: wiretype end group for non-group") ++ } ++ if fieldNum <= 0 { ++ return fmt.Errorf("proto: CommitResponse: illegal tag %d (wire type %d)", fieldNum, wire) ++ } ++ switch fieldNum { ++ case 1: ++ if wireType != 2 { ++ return fmt.Errorf("proto: wrong wireType = %d for field Commit", wireType) ++ } ++ var msglen int ++ for shift := uint(0); ; shift += 7 { ++ if shift >= 64 { ++ return ErrIntOverflowTypes ++ } ++ if iNdEx >= l { ++ return io.ErrUnexpectedEOF ++ } ++ b := dAtA[iNdEx] ++ iNdEx++ ++ msglen |= int(b&0x7F) << shift ++ if b < 0x80 { ++ break ++ } ++ } ++ if msglen < 0 { ++ return ErrInvalidLengthTypes ++ } ++ postIndex := iNdEx + msglen ++ if postIndex < 0 { ++ return ErrInvalidLengthTypes ++ } ++ if postIndex > l { ++ return io.ErrUnexpectedEOF ++ } ++ if m.Commit == nil { ++ m.Commit = &types1.Commit{} ++ } ++ if err := m.Commit.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { ++ return err ++ } ++ iNdEx = postIndex ++ default: ++ iNdEx = preIndex ++ skippy, err := skipTypes(dAtA[iNdEx:]) ++ if err != nil { ++ return err ++ } ++ if (skippy < 0) || (iNdEx+skippy) < 0 { ++ return ErrInvalidLengthTypes ++ } ++ if (iNdEx + skippy) > l { ++ return io.ErrUnexpectedEOF ++ } ++ iNdEx += skippy ++ } ++ } ++ ++ if iNdEx > l { ++ return io.ErrUnexpectedEOF ++ } ++ return nil ++} ++func (m *ValidatorSetResponse) Unmarshal(dAtA []byte) error { ++ l := len(dAtA) ++ iNdEx := 0 ++ for iNdEx < l { ++ preIndex := iNdEx ++ var wire uint64 ++ for shift := uint(0); ; shift += 7 { ++ if shift >= 64 { ++ return ErrIntOverflowTypes ++ } ++ if iNdEx >= l { ++ return io.ErrUnexpectedEOF ++ } ++ b := dAtA[iNdEx] ++ iNdEx++ ++ wire |= uint64(b&0x7F) << shift ++ if b < 0x80 { ++ break ++ } ++ } ++ fieldNum := int32(wire >> 3) ++ wireType := int(wire & 0x7) ++ if wireType == 4 { ++ return fmt.Errorf("proto: ValidatorSetResponse: wiretype end group for non-group") ++ } ++ if fieldNum <= 0 { ++ return fmt.Errorf("proto: ValidatorSetResponse: illegal tag %d (wire type %d)", fieldNum, wire) ++ } ++ switch fieldNum { ++ case 1: ++ if wireType != 2 { ++ return fmt.Errorf("proto: wrong wireType = %d for field ValidatorSet", wireType) ++ } ++ var msglen int ++ for shift := uint(0); ; shift += 7 { ++ if shift >= 64 { ++ return ErrIntOverflowTypes ++ } ++ if iNdEx >= l { ++ return io.ErrUnexpectedEOF ++ } ++ b := dAtA[iNdEx] ++ iNdEx++ ++ msglen |= int(b&0x7F) << shift ++ if b < 0x80 { ++ break ++ } ++ } ++ if msglen < 0 { ++ return ErrInvalidLengthTypes ++ } ++ postIndex := iNdEx + msglen ++ if postIndex < 0 { ++ return ErrInvalidLengthTypes ++ } ++ if postIndex > l { ++ return io.ErrUnexpectedEOF ++ } ++ if m.ValidatorSet == nil { ++ m.ValidatorSet = &types1.ValidatorSet{} ++ } ++ if err := m.ValidatorSet.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { ++ return err ++ } ++ iNdEx = postIndex ++ case 2: ++ if wireType != 0 { ++ return fmt.Errorf("proto: wrong wireType = %d for field Height", wireType) ++ } ++ m.Height = 0 ++ for shift := uint(0); ; shift += 7 { ++ if shift >= 64 { ++ return ErrIntOverflowTypes ++ } ++ if iNdEx >= l { ++ return io.ErrUnexpectedEOF ++ } ++ b := dAtA[iNdEx] ++ iNdEx++ ++ m.Height |= int64(b&0x7F) << shift ++ if b < 0x80 { ++ break ++ } ++ } ++ default: ++ iNdEx = preIndex ++ skippy, err := skipTypes(dAtA[iNdEx:]) ++ if err != nil { ++ return err ++ } ++ if (skippy < 0) || (iNdEx+skippy) < 0 { ++ return ErrInvalidLengthTypes ++ } ++ if (iNdEx + skippy) > l { ++ return io.ErrUnexpectedEOF ++ } ++ iNdEx += skippy ++ } ++ } ++ ++ if iNdEx > l { ++ return io.ErrUnexpectedEOF ++ } ++ return nil ++} ++func (m *NewHeightEvent) Unmarshal(dAtA []byte) error { ++ l := len(dAtA) ++ iNdEx := 0 ++ for iNdEx < l { ++ preIndex := iNdEx ++ var wire uint64 ++ for shift := uint(0); ; shift += 7 { ++ if shift >= 64 { ++ return ErrIntOverflowTypes ++ } ++ if iNdEx >= l { ++ return io.ErrUnexpectedEOF ++ } ++ b := dAtA[iNdEx] ++ iNdEx++ ++ wire |= uint64(b&0x7F) << shift ++ if b < 0x80 { ++ break ++ } ++ } ++ fieldNum := int32(wire >> 3) ++ wireType := int(wire & 0x7) ++ if wireType == 4 { ++ return fmt.Errorf("proto: NewHeightEvent: wiretype end group for non-group") ++ } ++ if fieldNum <= 0 { ++ return fmt.Errorf("proto: NewHeightEvent: illegal tag %d (wire type %d)", fieldNum, wire) ++ } ++ switch fieldNum { ++ case 1: ++ if wireType != 0 { ++ return fmt.Errorf("proto: wrong wireType = %d for field Height", wireType) ++ } ++ m.Height = 0 ++ for shift := uint(0); ; shift += 7 { ++ if shift >= 64 { ++ return ErrIntOverflowTypes ++ } ++ if iNdEx >= l { ++ return io.ErrUnexpectedEOF ++ } ++ b := dAtA[iNdEx] ++ iNdEx++ ++ m.Height |= int64(b&0x7F) << shift ++ if b < 0x80 { ++ break ++ } ++ } ++ case 2: ++ if wireType != 2 { ++ return fmt.Errorf("proto: wrong wireType = %d for field Hash", wireType) ++ } ++ var byteLen int ++ for shift := uint(0); ; shift += 7 { ++ if shift >= 64 { ++ return ErrIntOverflowTypes ++ } ++ if iNdEx >= l { ++ return io.ErrUnexpectedEOF ++ } ++ b := dAtA[iNdEx] ++ iNdEx++ ++ byteLen |= int(b&0x7F) << shift ++ if b < 0x80 { ++ break ++ } ++ } ++ if byteLen < 0 { ++ return ErrInvalidLengthTypes ++ } ++ postIndex := iNdEx + byteLen ++ if postIndex < 0 { ++ return ErrInvalidLengthTypes ++ } ++ if postIndex > l { ++ return io.ErrUnexpectedEOF ++ } ++ m.Hash = append(m.Hash[:0], dAtA[iNdEx:postIndex]...) ++ if m.Hash == nil { ++ m.Hash = []byte{} ++ } ++ iNdEx = postIndex ++ default: ++ iNdEx = preIndex ++ skippy, err := skipTypes(dAtA[iNdEx:]) ++ if err != nil { ++ return err ++ } ++ if (skippy < 0) || (iNdEx+skippy) < 0 { ++ return ErrInvalidLengthTypes ++ } ++ if (iNdEx + skippy) > l { ++ return io.ErrUnexpectedEOF ++ } ++ iNdEx += skippy ++ } ++ } ++ ++ if iNdEx > l { ++ return io.ErrUnexpectedEOF + } +- return n ++ return nil + } ++func (m *StatusResponse) Unmarshal(dAtA []byte) error { ++ l := len(dAtA) ++ iNdEx := 0 ++ for iNdEx < l { ++ preIndex := iNdEx ++ var wire uint64 ++ for shift := uint(0); ; shift += 7 { ++ if shift >= 64 { ++ return ErrIntOverflowTypes ++ } ++ if iNdEx >= l { ++ return io.ErrUnexpectedEOF ++ } ++ b := dAtA[iNdEx] ++ iNdEx++ ++ wire |= uint64(b&0x7F) << shift ++ if b < 0x80 { ++ break ++ } ++ } ++ fieldNum := int32(wire >> 3) ++ wireType := int(wire & 0x7) ++ if wireType == 4 { ++ return fmt.Errorf("proto: StatusResponse: wiretype end group for non-group") ++ } ++ if fieldNum <= 0 { ++ return fmt.Errorf("proto: StatusResponse: illegal tag %d (wire type %d)", fieldNum, wire) ++ } ++ switch fieldNum { ++ case 1: ++ if wireType != 2 { ++ return fmt.Errorf("proto: wrong wireType = %d for field NodeInfo", wireType) ++ } ++ var msglen int ++ for shift := uint(0); ; shift += 7 { ++ if shift >= 64 { ++ return ErrIntOverflowTypes ++ } ++ if iNdEx >= l { ++ return io.ErrUnexpectedEOF ++ } ++ b := dAtA[iNdEx] ++ iNdEx++ ++ msglen |= int(b&0x7F) << shift ++ if b < 0x80 { ++ break ++ } ++ } ++ if msglen < 0 { ++ return ErrInvalidLengthTypes ++ } ++ postIndex := iNdEx + msglen ++ if postIndex < 0 { ++ return ErrInvalidLengthTypes ++ } ++ if postIndex > l { ++ return io.ErrUnexpectedEOF ++ } ++ if m.NodeInfo == nil { ++ m.NodeInfo = &p2p.DefaultNodeInfo{} ++ } ++ if err := m.NodeInfo.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { ++ return err ++ } ++ iNdEx = postIndex ++ case 2: ++ if wireType != 2 { ++ return fmt.Errorf("proto: wrong wireType = %d for field SyncInfo", wireType) ++ } ++ var msglen int ++ for shift := uint(0); ; shift += 7 { ++ if shift >= 64 { ++ return ErrIntOverflowTypes ++ } ++ if iNdEx >= l { ++ return io.ErrUnexpectedEOF ++ } ++ b := dAtA[iNdEx] ++ iNdEx++ ++ msglen |= int(b&0x7F) << shift ++ if b < 0x80 { ++ break ++ } ++ } ++ if msglen < 0 { ++ return ErrInvalidLengthTypes ++ } ++ postIndex := iNdEx + msglen ++ if postIndex < 0 { ++ return ErrInvalidLengthTypes ++ } ++ if postIndex > l { ++ return io.ErrUnexpectedEOF ++ } ++ if m.SyncInfo == nil { ++ m.SyncInfo = &SyncInfo{} ++ } ++ if err := m.SyncInfo.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { ++ return err ++ } ++ iNdEx = postIndex ++ case 3: ++ if wireType != 2 { ++ return fmt.Errorf("proto: wrong wireType = %d for field ValidatorInfo", wireType) ++ } ++ var msglen int ++ for shift := uint(0); ; shift += 7 { ++ if shift >= 64 { ++ return ErrIntOverflowTypes ++ } ++ if iNdEx >= l { ++ return io.ErrUnexpectedEOF ++ } ++ b := dAtA[iNdEx] ++ iNdEx++ ++ msglen |= int(b&0x7F) << shift ++ if b < 0x80 { ++ break ++ } ++ } ++ if msglen < 0 { ++ return ErrInvalidLengthTypes ++ } ++ postIndex := iNdEx + msglen ++ if postIndex < 0 { ++ return ErrInvalidLengthTypes ++ } ++ if postIndex > l { ++ return io.ErrUnexpectedEOF ++ } ++ if m.ValidatorInfo == nil { ++ m.ValidatorInfo = &ValidatorInfo{} ++ } ++ if err := m.ValidatorInfo.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { ++ return err ++ } ++ iNdEx = postIndex ++ default: ++ iNdEx = preIndex ++ skippy, err := skipTypes(dAtA[iNdEx:]) ++ if err != nil { ++ return err ++ } ++ if (skippy < 0) || (iNdEx+skippy) < 0 { ++ return ErrInvalidLengthTypes ++ } ++ if (iNdEx + skippy) > l { ++ return io.ErrUnexpectedEOF ++ } ++ iNdEx += skippy ++ } ++ } + +-func sovTypes(x uint64) (n int) { +- return (math_bits.Len64(x|1) + 6) / 7 +-} +-func sozTypes(x uint64) (n int) { +- return sovTypes(uint64((x << 1) ^ uint64((int64(x) >> 63)))) ++ if iNdEx > l { ++ return io.ErrUnexpectedEOF ++ } ++ return nil + } +-func (m *RequestPing) Unmarshal(dAtA []byte) error { ++func (m *SyncInfo) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { +@@ -542,78 +4155,182 @@ func (m *RequestPing) Unmarshal(dAtA []byte) error { + if shift >= 64 { + return ErrIntOverflowTypes + } +- if iNdEx >= l { +- return io.ErrUnexpectedEOF ++ if iNdEx >= l { ++ return io.ErrUnexpectedEOF ++ } ++ b := dAtA[iNdEx] ++ iNdEx++ ++ wire |= uint64(b&0x7F) << shift ++ if b < 0x80 { ++ break ++ } ++ } ++ fieldNum := int32(wire >> 3) ++ wireType := int(wire & 0x7) ++ if wireType == 4 { ++ return fmt.Errorf("proto: SyncInfo: wiretype end group for non-group") ++ } ++ if fieldNum <= 0 { ++ return fmt.Errorf("proto: SyncInfo: illegal tag %d (wire type %d)", fieldNum, wire) ++ } ++ switch fieldNum { ++ case 1: ++ if wireType != 2 { ++ return fmt.Errorf("proto: wrong wireType = %d for field LatestBlockHash", wireType) ++ } ++ var byteLen int ++ for shift := uint(0); ; shift += 7 { ++ if shift >= 64 { ++ return ErrIntOverflowTypes ++ } ++ if iNdEx >= l { ++ return io.ErrUnexpectedEOF ++ } ++ b := dAtA[iNdEx] ++ iNdEx++ ++ byteLen |= int(b&0x7F) << shift ++ if b < 0x80 { ++ break ++ } ++ } ++ if byteLen < 0 { ++ return ErrInvalidLengthTypes ++ } ++ postIndex := iNdEx + byteLen ++ if postIndex < 0 { ++ return ErrInvalidLengthTypes ++ } ++ if postIndex > l { ++ return io.ErrUnexpectedEOF ++ } ++ m.LatestBlockHash = append(m.LatestBlockHash[:0], dAtA[iNdEx:postIndex]...) ++ if m.LatestBlockHash == nil { ++ m.LatestBlockHash = []byte{} ++ } ++ iNdEx = postIndex ++ case 2: ++ if wireType != 2 { ++ return fmt.Errorf("proto: wrong wireType = %d for field LatestAppHash", wireType) ++ } ++ var byteLen int ++ for shift := uint(0); ; shift += 7 { ++ if shift >= 64 { ++ return ErrIntOverflowTypes ++ } ++ if iNdEx >= l { ++ return io.ErrUnexpectedEOF ++ } ++ b := dAtA[iNdEx] ++ iNdEx++ ++ byteLen |= int(b&0x7F) << shift ++ if b < 0x80 { ++ break ++ } ++ } ++ if byteLen < 0 { ++ return ErrInvalidLengthTypes ++ } ++ postIndex := iNdEx + byteLen ++ if postIndex < 0 { ++ return ErrInvalidLengthTypes ++ } ++ if postIndex > l { ++ return io.ErrUnexpectedEOF ++ } ++ m.LatestAppHash = append(m.LatestAppHash[:0], dAtA[iNdEx:postIndex]...) ++ if m.LatestAppHash == nil { ++ m.LatestAppHash = []byte{} ++ } ++ iNdEx = postIndex ++ case 3: ++ if wireType != 0 { ++ return fmt.Errorf("proto: wrong wireType = %d for field LatestBlockHeight", wireType) ++ } ++ m.LatestBlockHeight = 0 ++ for shift := uint(0); ; shift += 7 { ++ if shift >= 64 { ++ return ErrIntOverflowTypes ++ } ++ if iNdEx >= l { ++ return io.ErrUnexpectedEOF ++ } ++ b := dAtA[iNdEx] ++ iNdEx++ ++ m.LatestBlockHeight |= int64(b&0x7F) << shift ++ if b < 0x80 { ++ break ++ } + } +- b := dAtA[iNdEx] +- iNdEx++ +- wire |= uint64(b&0x7F) << shift +- if b < 0x80 { +- break ++ case 4: ++ if wireType != 2 { ++ return fmt.Errorf("proto: wrong wireType = %d for field LatestBlockTime", wireType) + } +- } +- fieldNum := int32(wire >> 3) +- wireType := int(wire & 0x7) +- if wireType == 4 { +- return fmt.Errorf("proto: RequestPing: wiretype end group for non-group") +- } +- if fieldNum <= 0 { +- return fmt.Errorf("proto: RequestPing: illegal tag %d (wire type %d)", fieldNum, wire) +- } +- switch fieldNum { +- default: +- iNdEx = preIndex +- skippy, err := skipTypes(dAtA[iNdEx:]) +- if err != nil { +- return err ++ var msglen int ++ for shift := uint(0); ; shift += 7 { ++ if shift >= 64 { ++ return ErrIntOverflowTypes ++ } ++ if iNdEx >= l { ++ return io.ErrUnexpectedEOF ++ } ++ b := dAtA[iNdEx] ++ iNdEx++ ++ msglen |= int(b&0x7F) << shift ++ if b < 0x80 { ++ break ++ } + } +- if (skippy < 0) || (iNdEx+skippy) < 0 { ++ if msglen < 0 { + return ErrInvalidLengthTypes + } +- if (iNdEx + skippy) > l { ++ postIndex := iNdEx + msglen ++ if postIndex < 0 { ++ return ErrInvalidLengthTypes ++ } ++ if postIndex > l { + return io.ErrUnexpectedEOF + } +- iNdEx += skippy +- } +- } +- +- if iNdEx > l { +- return io.ErrUnexpectedEOF +- } +- return nil +-} +-func (m *RequestBroadcastTx) Unmarshal(dAtA []byte) error { +- l := len(dAtA) +- iNdEx := 0 +- for iNdEx < l { +- preIndex := iNdEx +- var wire uint64 +- for shift := uint(0); ; shift += 7 { +- if shift >= 64 { +- return ErrIntOverflowTypes ++ if err := github_com_gogo_protobuf_types.StdTimeUnmarshal(&m.LatestBlockTime, dAtA[iNdEx:postIndex]); err != nil { ++ return err + } +- if iNdEx >= l { ++ iNdEx = postIndex ++ case 5: ++ if wireType != 2 { ++ return fmt.Errorf("proto: wrong wireType = %d for field EarliestBlockHash", wireType) ++ } ++ var byteLen int ++ for shift := uint(0); ; shift += 7 { ++ if shift >= 64 { ++ return ErrIntOverflowTypes ++ } ++ if iNdEx >= l { ++ return io.ErrUnexpectedEOF ++ } ++ b := dAtA[iNdEx] ++ iNdEx++ ++ byteLen |= int(b&0x7F) << shift ++ if b < 0x80 { ++ break ++ } ++ } ++ if byteLen < 0 { ++ return ErrInvalidLengthTypes ++ } ++ postIndex := iNdEx + byteLen ++ if postIndex < 0 { ++ return ErrInvalidLengthTypes ++ } ++ if postIndex > l { + return io.ErrUnexpectedEOF + } +- b := dAtA[iNdEx] +- iNdEx++ +- wire |= uint64(b&0x7F) << shift +- if b < 0x80 { +- break ++ m.EarliestBlockHash = append(m.EarliestBlockHash[:0], dAtA[iNdEx:postIndex]...) ++ if m.EarliestBlockHash == nil { ++ m.EarliestBlockHash = []byte{} + } +- } +- fieldNum := int32(wire >> 3) +- wireType := int(wire & 0x7) +- if wireType == 4 { +- return fmt.Errorf("proto: RequestBroadcastTx: wiretype end group for non-group") +- } +- if fieldNum <= 0 { +- return fmt.Errorf("proto: RequestBroadcastTx: illegal tag %d (wire type %d)", fieldNum, wire) +- } +- switch fieldNum { +- case 1: ++ iNdEx = postIndex ++ case 6: + if wireType != 2 { +- return fmt.Errorf("proto: wrong wireType = %d for field Tx", wireType) ++ return fmt.Errorf("proto: wrong wireType = %d for field EarliestAppHash", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { +@@ -640,61 +4357,83 @@ func (m *RequestBroadcastTx) Unmarshal(dAtA []byte) error { + if postIndex > l { + return io.ErrUnexpectedEOF + } +- m.Tx = append(m.Tx[:0], dAtA[iNdEx:postIndex]...) +- if m.Tx == nil { +- m.Tx = []byte{} ++ m.EarliestAppHash = append(m.EarliestAppHash[:0], dAtA[iNdEx:postIndex]...) ++ if m.EarliestAppHash == nil { ++ m.EarliestAppHash = []byte{} + } + iNdEx = postIndex +- default: +- iNdEx = preIndex +- skippy, err := skipTypes(dAtA[iNdEx:]) +- if err != nil { +- return err ++ case 7: ++ if wireType != 0 { ++ return fmt.Errorf("proto: wrong wireType = %d for field EarliestBlockHeight", wireType) + } +- if (skippy < 0) || (iNdEx+skippy) < 0 { ++ m.EarliestBlockHeight = 0 ++ for shift := uint(0); ; shift += 7 { ++ if shift >= 64 { ++ return ErrIntOverflowTypes ++ } ++ if iNdEx >= l { ++ return io.ErrUnexpectedEOF ++ } ++ b := dAtA[iNdEx] ++ iNdEx++ ++ m.EarliestBlockHeight |= int64(b&0x7F) << shift ++ if b < 0x80 { ++ break ++ } ++ } ++ case 8: ++ if wireType != 2 { ++ return fmt.Errorf("proto: wrong wireType = %d for field EarliestBlockTime", wireType) ++ } ++ var msglen int ++ for shift := uint(0); ; shift += 7 { ++ if shift >= 64 { ++ return ErrIntOverflowTypes ++ } ++ if iNdEx >= l { ++ return io.ErrUnexpectedEOF ++ } ++ b := dAtA[iNdEx] ++ iNdEx++ ++ msglen |= int(b&0x7F) << shift ++ if b < 0x80 { ++ break ++ } ++ } ++ if msglen < 0 { + return ErrInvalidLengthTypes + } +- if (iNdEx + skippy) > l { ++ postIndex := iNdEx + msglen ++ if postIndex < 0 { ++ return ErrInvalidLengthTypes ++ } ++ if postIndex > l { + return io.ErrUnexpectedEOF + } +- iNdEx += skippy +- } +- } +- +- if iNdEx > l { +- return io.ErrUnexpectedEOF +- } +- return nil +-} +-func (m *ResponsePing) Unmarshal(dAtA []byte) error { +- l := len(dAtA) +- iNdEx := 0 +- for iNdEx < l { +- preIndex := iNdEx +- var wire uint64 +- for shift := uint(0); ; shift += 7 { +- if shift >= 64 { +- return ErrIntOverflowTypes ++ if err := github_com_gogo_protobuf_types.StdTimeUnmarshal(&m.EarliestBlockTime, dAtA[iNdEx:postIndex]); err != nil { ++ return err + } +- if iNdEx >= l { +- return io.ErrUnexpectedEOF ++ iNdEx = postIndex ++ case 9: ++ if wireType != 0 { ++ return fmt.Errorf("proto: wrong wireType = %d for field CatchingUp", wireType) + } +- b := dAtA[iNdEx] +- iNdEx++ +- wire |= uint64(b&0x7F) << shift +- if b < 0x80 { +- break ++ var v int ++ for shift := uint(0); ; shift += 7 { ++ if shift >= 64 { ++ return ErrIntOverflowTypes ++ } ++ if iNdEx >= l { ++ return io.ErrUnexpectedEOF ++ } ++ b := dAtA[iNdEx] ++ iNdEx++ ++ v |= int(b&0x7F) << shift ++ if b < 0x80 { ++ break ++ } + } +- } +- fieldNum := int32(wire >> 3) +- wireType := int(wire & 0x7) +- if wireType == 4 { +- return fmt.Errorf("proto: ResponsePing: wiretype end group for non-group") +- } +- if fieldNum <= 0 { +- return fmt.Errorf("proto: ResponsePing: illegal tag %d (wire type %d)", fieldNum, wire) +- } +- switch fieldNum { ++ m.CatchingUp = bool(v != 0) + default: + iNdEx = preIndex + skippy, err := skipTypes(dAtA[iNdEx:]) +@@ -716,7 +4455,7 @@ func (m *ResponsePing) Unmarshal(dAtA []byte) error { + } + return nil + } +-func (m *ResponseBroadcastTx) Unmarshal(dAtA []byte) error { ++func (m *ValidatorInfo) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { +@@ -739,17 +4478,17 @@ func (m *ResponseBroadcastTx) Unmarshal(dAtA []byte) error { + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { +- return fmt.Errorf("proto: ResponseBroadcastTx: wiretype end group for non-group") ++ return fmt.Errorf("proto: ValidatorInfo: wiretype end group for non-group") + } + if fieldNum <= 0 { +- return fmt.Errorf("proto: ResponseBroadcastTx: illegal tag %d (wire type %d)", fieldNum, wire) ++ return fmt.Errorf("proto: ValidatorInfo: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { +- return fmt.Errorf("proto: wrong wireType = %d for field CheckTx", wireType) ++ return fmt.Errorf("proto: wrong wireType = %d for field Address", wireType) + } +- var msglen int ++ var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes +@@ -759,31 +4498,29 @@ func (m *ResponseBroadcastTx) Unmarshal(dAtA []byte) error { + } + b := dAtA[iNdEx] + iNdEx++ +- msglen |= int(b&0x7F) << shift ++ byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } +- if msglen < 0 { ++ if byteLen < 0 { + return ErrInvalidLengthTypes + } +- postIndex := iNdEx + msglen ++ postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } +- if m.CheckTx == nil { +- m.CheckTx = &types.ResponseCheckTx{} +- } +- if err := m.CheckTx.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { +- return err ++ m.Address = append(m.Address[:0], dAtA[iNdEx:postIndex]...) ++ if m.Address == nil { ++ m.Address = []byte{} + } + iNdEx = postIndex + case 2: + if wireType != 2 { +- return fmt.Errorf("proto: wrong wireType = %d for field DeliverTx", wireType) ++ return fmt.Errorf("proto: wrong wireType = %d for field PubKey", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { +@@ -810,13 +4547,32 @@ func (m *ResponseBroadcastTx) Unmarshal(dAtA []byte) error { + if postIndex > l { + return io.ErrUnexpectedEOF + } +- if m.DeliverTx == nil { +- m.DeliverTx = &types.ResponseDeliverTx{} ++ if m.PubKey == nil { ++ m.PubKey = &crypto.PublicKey{} + } +- if err := m.DeliverTx.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { ++ if err := m.PubKey.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex ++ case 3: ++ if wireType != 0 { ++ return fmt.Errorf("proto: wrong wireType = %d for field VotingPower", wireType) ++ } ++ m.VotingPower = 0 ++ for shift := uint(0); ; shift += 7 { ++ if shift >= 64 { ++ return ErrIntOverflowTypes ++ } ++ if iNdEx >= l { ++ return io.ErrUnexpectedEOF ++ } ++ b := dAtA[iNdEx] ++ iNdEx++ ++ m.VotingPower |= int64(b&0x7F) << shift ++ if b < 0x80 { ++ break ++ } ++ } + default: + iNdEx = preIndex + skippy, err := skipTypes(dAtA[iNdEx:]) diff --git a/patches/rpc/jsonrpc/client/ws_client.go.patch b/patches/rpc/jsonrpc/client/ws_client.go.patch new file mode 100644 index 00000000000..b62cbcd2068 --- /dev/null +++ b/patches/rpc/jsonrpc/client/ws_client.go.patch @@ -0,0 +1,12 @@ +diff --git a/rpc/jsonrpc/client/ws_client.go b/rpc/jsonrpc/client/ws_client.go +index 69a1489e0..bdcfced62 100644 +--- a/rpc/jsonrpc/client/ws_client.go ++++ b/rpc/jsonrpc/client/ws_client.go +@@ -291,6 +291,7 @@ func (c *WSClient) reconnect() error { + + for { + jitter := time.Duration(cmtrand.Float64() * float64(time.Second)) // 1s == (1e9 ns) ++ //nolint:gosec + backoffDuration := jitter + ((1 << uint(attempt)) * time.Second) + + c.Logger.Info("reconnecting", "attempt", attempt+1, "backoff_duration", backoffDuration) diff --git a/patches/rpc/jsonrpc/server/http_server.go.patch b/patches/rpc/jsonrpc/server/http_server.go.patch new file mode 100644 index 00000000000..3ac91855e9c --- /dev/null +++ b/patches/rpc/jsonrpc/server/http_server.go.patch @@ -0,0 +1,22 @@ +diff --git a/rpc/jsonrpc/server/http_server.go b/rpc/jsonrpc/server/http_server.go +index 29eae9fc3..bdd757e7e 100644 +--- a/rpc/jsonrpc/server/http_server.go ++++ b/rpc/jsonrpc/server/http_server.go +@@ -19,7 +19,7 @@ import ( + types "github.com/tendermint/tendermint/rpc/jsonrpc/types" + ) + +-// Config is a RPC server configuration. ++// Config is an RPC server configuration. + type Config struct { + // see netutil.LimitListener + MaxOpenConnections int +@@ -64,7 +64,7 @@ func Serve(listener net.Listener, handler http.Handler, logger log.Logger, confi + return err + } + +-// Serve creates a http.Server and calls ServeTLS with the given listener, ++// ServeTLS creates a http.Server and calls ServeTLS with the given listener, + // certFile and keyFile. It wraps handler with RecoverAndLogHandler and a + // handler, which limits the max body size to config.MaxBodyBytes. + // diff --git a/patches/rpc/test/helpers.go.patch b/patches/rpc/test/helpers.go.patch new file mode 100644 index 00000000000..fde2621d727 --- /dev/null +++ b/patches/rpc/test/helpers.go.patch @@ -0,0 +1,41 @@ +diff --git a/rpc/test/helpers.go b/rpc/test/helpers.go +index 34dc19a2d..8cc9a1d34 100644 +--- a/rpc/test/helpers.go ++++ b/rpc/test/helpers.go +@@ -27,6 +27,8 @@ import ( + type Options struct { + suppressStdout bool + recreateConfig bool ++ // SpecificConfig will replace the global config if not nil ++ SpecificConfig *cfg.Config + } + + var globalConfig *cfg.Config +@@ -115,6 +117,15 @@ func GetGRPCClient() core_grpc.BroadcastAPIClient { + return core_grpc.StartGRPCClient(grpcAddr) + } + ++func GetBlockAPIClient() (core_grpc.BlockAPIClient, error) { ++ grpcAddr := globalConfig.RPC.GRPCListenAddress ++ client, err := core_grpc.StartBlockAPIGRPCClient(grpcAddr) ++ if err != nil { ++ return nil, err ++ } ++ return client, nil ++} ++ + // StartTendermint starts a test CometBFT server in a go routine and returns when it is initialized + func StartTendermint(app abci.Application, opts ...func(*Options)) *nm.Node { + nodeOpts := defaultOptions +@@ -151,7 +162,11 @@ func StopTendermint(node *nm.Node) { + // NewTendermint creates a new CometBFT server and sleeps forever + func NewTendermint(app abci.Application, opts *Options) *nm.Node { + // Create & start node ++ if opts.SpecificConfig != nil { ++ globalConfig = opts.SpecificConfig ++ } + config := GetConfig(opts.recreateConfig) ++ + var logger log.Logger + if opts.suppressStdout { + logger = log.NewNopLogger() diff --git a/patches/state/execution.go.patch b/patches/state/execution.go.patch new file mode 100644 index 00000000000..dbafc4129b8 --- /dev/null +++ b/patches/state/execution.go.patch @@ -0,0 +1,291 @@ +diff --git a/state/execution.go b/state/execution.go +index fe49e6542..21aff937a 100644 +--- a/state/execution.go ++++ b/state/execution.go +@@ -26,6 +26,9 @@ type BlockExecutor struct { + // save state, validators, consensus params, abci responses here + store Store + ++ // blockStore is optional and used to store txInfo ++ blockStore BlockStore ++ + // execute the app against this + proxyApp proxy.AppConnConsensus + +@@ -50,6 +53,13 @@ func BlockExecutorWithMetrics(metrics *Metrics) BlockExecutorOption { + } + } + ++// WithBlockStore optionally stores txInfo ++func WithBlockStore(blockStore BlockStore) BlockExecutorOption { ++ return func(blockExec *BlockExecutor) { ++ blockExec.blockStore = blockStore ++ } ++} ++ + // NewBlockExecutor returns a new BlockExecutor with a NopEventBus. + // Call SetEventBus to provide one. + func NewBlockExecutor( +@@ -91,6 +101,8 @@ func (blockExec *BlockExecutor) SetEventBus(eventBus types.BlockEventPublisher) + // and txs from the mempool. The max bytes must be big enough to fit the commit. + // Up to 1/10th of the block space is allcoated for maximum sized evidence. + // The rest is given to txs, up to the max gas. ++// ++// Contract: application will not return more bytes than are sent over the wire. + func (blockExec *BlockExecutor) CreateProposalBlock( + height int64, + state State, commit *types.Commit, +@@ -102,18 +114,93 @@ func (blockExec *BlockExecutor) CreateProposalBlock( + + evidence, evSize := blockExec.evpool.PendingEvidence(state.ConsensusParams.Evidence.MaxBytes) + +- // Fetch a limited amount of valid txs + maxDataBytes := types.MaxDataBytes(maxBytes, evSize, state.Validators.Size()) + + txs := blockExec.mempool.ReapMaxBytesMaxGas(maxDataBytes, maxGas) + +- return state.MakeBlock(height, txs, commit, evidence, proposerAddr) ++ var timestamp time.Time ++ if height == state.InitialHeight { ++ timestamp = state.LastBlockTime // genesis time ++ } else { ++ timestamp = MedianTime(commit, state.LastValidators) ++ } ++ ++ preparedProposal, err := blockExec.proxyApp.PrepareProposalSync( ++ abci.RequestPrepareProposal{ ++ BlockData: &cmtproto.Data{Txs: txs.ToSliceOfBytes()}, ++ BlockDataSize: maxDataBytes, ++ ChainId: state.ChainID, ++ Height: height, ++ Time: timestamp, ++ }, ++ ) ++ if err != nil { ++ // The App MUST ensure that only valid (and hence 'processable') transactions ++ // enter the mempool. Hence, at this point, we can't have any non-processable ++ // transaction causing an error. ++ // ++ // Also, the App can simply skip any transaction that could cause any kind of trouble. ++ // Either way, we can not recover in a meaningful way, unless we skip proposing ++ // this block, repair what caused the error and try again. Hence, we panic on ++ // purpose for now. ++ panic(err) ++ } ++ rawNewData := preparedProposal.GetBlockData() ++ ++ rejectedTxs := len(rawNewData.Txs) - len(txs) ++ if rejectedTxs > 0 { ++ blockExec.metrics.RejectedTransactions.Add(float64(rejectedTxs)) ++ } ++ ++ var blockDataSize int ++ for _, tx := range rawNewData.GetTxs() { ++ blockDataSize += len(tx) ++ ++ if maxDataBytes < int64(blockDataSize) { ++ panic("block data exceeds max amount of allowed bytes") ++ } ++ } ++ ++ newData, err := types.DataFromProto(rawNewData) ++ if err != nil { ++ // todo(evan): see if we can get rid of this panic ++ panic(err) ++ } ++ ++ return state.MakeBlock( ++ height, ++ newData, ++ commit, ++ evidence, ++ proposerAddr, ++ ) ++} ++ ++func (blockExec *BlockExecutor) ProcessProposal( ++ block *types.Block, ++) (bool, error) { ++ pData := block.Data.ToProto() ++ req := abci.RequestProcessProposal{ ++ BlockData: &pData, ++ Header: *block.Header.ToProto(), ++ } ++ ++ resp, err := blockExec.proxyApp.ProcessProposalSync(req) ++ if err != nil { ++ return false, ErrInvalidBlock(err) ++ } ++ ++ if resp.IsRejected() { ++ blockExec.metrics.ProcessProposalRejected.Add(1) ++ } ++ ++ return resp.IsOK(), nil + } + + // ValidateBlock validates the given block against the given state. + // If the block is invalid, it returns an error. + // Validation does not mutate state, but does require historical information from the stateDB, +-// ie. to verify evidence from a validator at an old height. ++// i.e., to verify evidence from a validator at an old height. + func (blockExec *BlockExecutor) ValidateBlock(state State, block *types.Block) error { + err := validateBlock(state, block) + if err != nil { +@@ -129,7 +216,10 @@ func (blockExec *BlockExecutor) ValidateBlock(state State, block *types.Block) e + // from outside this package to process and commit an entire block. + // It takes a blockID to avoid recomputing the parts hash. + func (blockExec *BlockExecutor) ApplyBlock( +- state State, blockID types.BlockID, block *types.Block, ++ state State, ++ blockID types.BlockID, ++ block *types.Block, ++ commit *types.Commit, + ) (State, int64, error) { + + if err := validateBlock(state, block); err != nil { +@@ -153,6 +243,17 @@ func (blockExec *BlockExecutor) ApplyBlock( + return state, 0, err + } + ++ // Save indexing info of the transaction. ++ // This needs to be done prior to saving state ++ // for correct crash recovery ++ if blockExec.blockStore != nil { ++ respCodes := getResponseCodes(abciResponses.DeliverTxs) ++ logs := getLogs(abciResponses.DeliverTxs) ++ if err := blockExec.blockStore.SaveTxInfo(block, respCodes, logs); err != nil { ++ return state, 0, err ++ } ++ } ++ + fail.Fail() // XXX + + // validate the validator updates and convert to CometBFT types +@@ -197,7 +298,7 @@ func (blockExec *BlockExecutor) ApplyBlock( + + // Events are fired after everything else. + // NOTE: if we crash between Commit and Save, events wont be fired during replay +- fireEvents(blockExec.logger, blockExec.eventBus, block, abciResponses, validatorUpdates) ++ fireEvents(blockExec.logger, blockExec.eventBus, block, abciResponses, validatorUpdates, state.LastValidators, commit) + + return state, retainHeight, nil + } +@@ -317,6 +418,11 @@ func execBlockOnProxyApp( + + // run txs of block + for _, tx := range block.Txs { ++ // Unwrap the blob tx if necessary. ++ blobTx, isBlobTx := types.UnmarshalBlobTx(tx) ++ if isBlobTx { ++ tx = blobTx.Tx ++ } + proxyAppConn.DeliverTxAsync(abci.RequestDeliverTx{Tx: tx}) + if err := proxyAppConn.Error(); err != nil { + return nil, err +@@ -408,7 +514,7 @@ func updateState( + validatorUpdates []*types.Validator, + ) (State, error) { + +- // Copy the valset so we can apply changes from EndBlock ++ // Copy the valset so that we can apply changes from EndBlock + // and update s.LastValidators and s.Validators. + nValSet := state.NextValidators.Copy() + +@@ -419,7 +525,7 @@ func updateState( + if err != nil { + return state, fmt.Errorf("error changing validator set: %v", err) + } +- // Change results from this height but only applies to the next next height. ++ // Change results from this height but only applies to the next height. + lastHeightValsChanged = header.Height + 1 + 1 + } + +@@ -447,7 +553,7 @@ func updateState( + + // NOTE: the AppHash has not been populated. + // It will be filled on state.Save. +- return State{ ++ s := State{ + Version: nextVersion, + ChainID: state.ChainID, + InitialHeight: state.InitialHeight, +@@ -462,7 +568,11 @@ func updateState( + LastHeightConsensusParamsChanged: lastHeightParamsChanged, + LastResultsHash: ABCIResponsesResultsHash(abciResponses), + AppHash: nil, +- }, nil ++ TimeoutCommit: abciResponses.EndBlock.Timeouts.TimeoutCommit, ++ TimeoutPropose: abciResponses.EndBlock.Timeouts.TimeoutPropose, ++ } ++ ++ return s, nil + } + + // Fire NewBlock, NewBlockHeader. +@@ -474,6 +584,8 @@ func fireEvents( + block *types.Block, + abciResponses *cmtstate.ABCIResponses, + validatorUpdates []*types.Validator, ++ currentValidatorSet *types.ValidatorSet, ++ seenCommit *types.Commit, + ) { + if err := eventBus.PublishEventNewBlock(types.EventDataNewBlock{ + Block: block, +@@ -483,6 +595,18 @@ func fireEvents( + logger.Error("failed publishing new block", "err", err) + } + ++ if seenCommit != nil { ++ err := eventBus.PublishEventNewSignedBlock(types.EventDataSignedBlock{ ++ Header: block.Header, ++ Commit: *seenCommit, ++ ValidatorSet: *currentValidatorSet, ++ Data: block.Data, ++ }) ++ if err != nil { ++ logger.Error("failed publishing new signed block", "err", err) ++ } ++ } ++ + if err := eventBus.PublishEventNewBlockHeader(types.EventDataNewBlockHeader{ + Header: block.Header, + NumTxs: int64(len(block.Txs)), +@@ -504,8 +628,15 @@ func fireEvents( + } + + for i, tx := range block.Data.Txs { ++ // Unwrap the blob tx and just publish the PFB without the blobs. We want ++ // the tx indexer to only be concerned with PFBs. ++ blobTx, isBlobTx := types.UnmarshalBlobTx(tx) ++ if isBlobTx { ++ tx = blobTx.Tx ++ } + if err := eventBus.PublishEventTx(types.EventDataTx{TxResult: abci.TxResult{ + Height: block.Height, ++ //nolint:gosec + Index: uint32(i), + Tx: tx, + Result: *(abciResponses.DeliverTxs[i]), +@@ -550,3 +681,21 @@ func ExecCommitBlock( + // ResponseCommit has no error or log, just data + return res.Data, nil + } ++ ++// getResponseCodes gets response codes from a list of ResponseDeliverTx. ++func getResponseCodes(responses []*abci.ResponseDeliverTx) []uint32 { ++ responseCodes := make([]uint32, len(responses)) ++ for i, response := range responses { ++ responseCodes[i] = response.Code ++ } ++ return responseCodes ++} ++ ++// getLogs gets logs from a list of ResponseDeliverTx. ++func getLogs(responses []*abci.ResponseDeliverTx) []string { ++ logs := make([]string, len(responses)) ++ for i, response := range responses { ++ logs[i] = response.Log ++ } ++ return logs ++} diff --git a/patches/state/execution_test.go.patch b/patches/state/execution_test.go.patch new file mode 100644 index 00000000000..da3c80f6d6c --- /dev/null +++ b/patches/state/execution_test.go.patch @@ -0,0 +1,346 @@ +diff --git a/state/execution_test.go b/state/execution_test.go +index 474f6f830..6c8415f3f 100644 +--- a/state/execution_test.go ++++ b/state/execution_test.go +@@ -1,14 +1,23 @@ + package state_test + + import ( ++ "bytes" + "context" ++ "io" ++ ++ "net/http" ++ "net/http/httptest" ++ "strconv" ++ "strings" + "testing" + "time" + ++ "github.com/prometheus/client_golang/prometheus/promhttp" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/mock" + "github.com/stretchr/testify/require" + ++ db "github.com/cometbft/cometbft-db" + abci "github.com/tendermint/tendermint/abci/types" + "github.com/tendermint/tendermint/crypto" + "github.com/tendermint/tendermint/crypto/ed25519" +@@ -21,6 +30,8 @@ import ( + "github.com/tendermint/tendermint/proxy" + sm "github.com/tendermint/tendermint/state" + "github.com/tendermint/tendermint/state/mocks" ++ sf "github.com/tendermint/tendermint/state/test/factory" ++ "github.com/tendermint/tendermint/test/factory" + "github.com/tendermint/tendermint/types" + cmttime "github.com/tendermint/tendermint/types/time" + "github.com/tendermint/tendermint/version" +@@ -30,6 +41,8 @@ var ( + chainID = "execution_chain" + testPartSize uint32 = types.BlockPartSizeBytes + nTxsPerBlock = 10 ++ namespace = "namespace" ++ height = 1 + ) + + func TestApplyBlock(t *testing.T) { +@@ -51,7 +64,7 @@ func TestApplyBlock(t *testing.T) { + block := makeBlock(state, 1) + blockID := types.BlockID{Hash: block.Hash(), PartSetHeader: block.MakePartSet(testPartSize).Header()} + +- state, retainHeight, err := blockExec.ApplyBlock(state, blockID, block) ++ state, retainHeight, err := blockExec.ApplyBlock(state, blockID, block, nil) + require.Nil(t, err) + assert.EqualValues(t, retainHeight, 1) + +@@ -59,6 +72,33 @@ func TestApplyBlock(t *testing.T) { + assert.EqualValues(t, 1, state.Version.Consensus.App, "App version wasn't updated") + } + ++func TestApplyBlockWithBlockStore(t *testing.T) { ++ app := &testApp{} ++ cc := proxy.NewLocalClientCreator(app) ++ proxyApp := proxy.NewAppConns(cc) ++ err := proxyApp.Start() ++ require.Nil(t, err) ++ defer proxyApp.Stop() //nolint:errcheck // ignore for tests ++ blockStore := mocks.NewBlockStore(t) ++ ++ state, stateDB, _ := makeState(1, 1) ++ stateStore := sm.NewStore(stateDB, sm.StoreOptions{ ++ DiscardABCIResponses: false, ++ }) ++ ++ blockExec := sm.NewBlockExecutor(stateStore, log.TestingLogger(), proxyApp.Consensus(), ++ mmock.Mempool{}, sm.EmptyEvidencePool{}, sm.WithBlockStore(blockStore)) ++ ++ block := makeBlock(state, 1) ++ blockID := types.BlockID{Hash: block.Hash(), PartSetHeader: block.MakePartSet(testPartSize).Header()} ++ ++ // Check that SaveTxInfo is called with correct arguments ++ blockStore.On("SaveTxInfo", block, mock.AnythingOfType("[]uint32"), mock.AnythingOfType("[]string")).Return(nil) ++ ++ _, _, err = blockExec.ApplyBlock(state, blockID, block, nil) ++ require.Nil(t, err) ++} ++ + // TestBeginBlockValidators ensures we send absent validators list. + func TestBeginBlockValidators(t *testing.T) { + app := &testApp{} +@@ -104,7 +144,13 @@ func TestBeginBlockValidators(t *testing.T) { + lastCommit := types.NewCommit(1, 0, prevBlockID, tc.lastCommitSigs) + + // block for height 2 +- block, _ := state.MakeBlock(2, makeTxs(2), lastCommit, nil, state.Validators.GetProposer().Address) ++ block, _ := state.MakeBlock( ++ 2, ++ factory.MakeData(factory.MakeTenTxs(2)), ++ lastCommit, ++ nil, ++ state.Validators.GetProposer().Address, ++ ) + + _, err = sm.ExecCommitBlock(proxyApp.Consensus(), block, log.TestingLogger(), stateStore, 1) + require.Nil(t, err, tc.desc) +@@ -212,7 +258,7 @@ func TestBeginBlockByzantineValidators(t *testing.T) { + block.Header.EvidenceHash = block.Evidence.Hash() + blockID = types.BlockID{Hash: block.Hash(), PartSetHeader: block.MakePartSet(testPartSize).Header()} + +- state, retainHeight, err := blockExec.ApplyBlock(state, blockID, block) ++ state, retainHeight, err := blockExec.ApplyBlock(state, blockID, block, nil) + require.Nil(t, err) + assert.EqualValues(t, retainHeight, 1) + +@@ -220,6 +266,126 @@ func TestBeginBlockByzantineValidators(t *testing.T) { + assert.Equal(t, abciEv, app.ByzantineValidators) + } + ++func TestProcessProposal(t *testing.T) { ++ height := 1 ++ runTest := func(txs types.Txs, expectAccept bool) { ++ app := &testApp{} ++ cc := proxy.NewLocalClientCreator(app) ++ proxyApp := proxy.NewAppConns(cc) ++ err := proxyApp.Start() ++ require.Nil(t, err) ++ defer proxyApp.Stop() //nolint:errcheck // ignore for tests ++ ++ state, stateDB, _ := makeState(1, height) ++ stateStore := sm.NewStore(stateDB, sm.StoreOptions{ ++ DiscardABCIResponses: false, ++ }) ++ ++ blockExec := sm.NewBlockExecutor(stateStore, log.TestingLogger(), proxyApp.Consensus(), ++ mmock.Mempool{}, sm.EmptyEvidencePool{}) ++ ++ block := sf.MakeBlock(state, int64(height), new(types.Commit)) ++ block.Txs = txs ++ acceptBlock, err := blockExec.ProcessProposal(block) ++ require.Nil(t, err) ++ require.Equal(t, expectAccept, acceptBlock) ++ } ++ goodTxs := factory.MakeTenTxs(int64(height)) ++ runTest(goodTxs, true) ++ // testApp has process proposal fail if any tx is 0-len ++ badTxs := factory.MakeTenTxs(int64(height)) ++ badTxs[0] = types.Tx{} ++ runTest(badTxs, false) ++} ++ ++func TestProcessProposalRejectedMetric(t *testing.T) { ++ server := httptest.NewServer(promhttp.Handler()) ++ defer server.Close() ++ ++ getPrometheusOutput := func() string { ++ resp, err := http.Get(server.URL) ++ require.NoError(t, err) ++ defer resp.Body.Close() ++ ++ buf, _ := io.ReadAll(resp.Body) ++ return string(buf) ++ } ++ metrics := sm.PrometheusMetrics(namespace) ++ state, stateDB, _ := makeState(1, height) ++ ++ accptedBlock := makeAcceptedBlock(state, height) ++ rejectedBlock := makeRejectedBlock(state, height) ++ ++ type testCase struct { ++ name string ++ block *types.Block ++ wantProcessProposalRejectedCount int ++ } ++ tests := []testCase{ ++ // HACKHACK since Prometheus metrics are registered globally, these ++ // tests cases are ordering dependent. In other words, since the counter ++ // metric type is monotonically increasing, the expected metric count of ++ // a test case is the cumulative sum of the metric count in previous ++ // test cases. ++ {"accepted block has a process proposal rejected count of 0", accptedBlock, 0}, ++ {"rejected block has a process proposal rejected count of 1", rejectedBlock, 1}, ++ } ++ ++ for _, test := range tests { ++ blockExec := makeBlockExec(t, test.name, test.block, stateDB, metrics) ++ ++ _, err := blockExec.ProcessProposal(test.block) ++ require.Nil(t, err, test.name) ++ ++ prometheusOutput := getPrometheusOutput() ++ got := getProcessProposalRejectedCount(t, prometheusOutput) ++ ++ require.Equal(t, got, test.wantProcessProposalRejectedCount, test.name) ++ } ++} ++ ++func makeBlockExec(t *testing.T, testName string, block *types.Block, stateDB db.DB, ++ metrics *sm.Metrics) (blockExec *sm.BlockExecutor) { ++ app := &testApp{} ++ clientCreator := proxy.NewLocalClientCreator(app) ++ proxyApp := proxy.NewAppConns(clientCreator) ++ ++ err := proxyApp.Start() ++ require.Nil(t, err, testName) ++ ++ defer func() { ++ err := proxyApp.Stop() ++ require.Nil(t, err, testName) ++ }() ++ ++ return sm.NewBlockExecutor( ++ sm.NewStore(stateDB, sm.StoreOptions{ ++ DiscardABCIResponses: false, ++ }), ++ log.TestingLogger(), ++ proxyApp.Consensus(), ++ mmock.Mempool{}, ++ sm.EmptyEvidencePool{}, ++ sm.BlockExecutorWithMetrics(metrics), ++ ) ++} ++ ++func getProcessProposalRejectedCount(t *testing.T, prometheusOutput string) (count int) { ++ metricName := strings.Join([]string{namespace, sm.MetricsSubsystem, "process_proposal_rejected"}, "_") ++ lines := strings.Split(prometheusOutput, "\n") ++ ++ for _, line := range lines { ++ if strings.HasPrefix(line, metricName) { ++ parts := strings.Split(line, " ") ++ count, err := strconv.Atoi(parts[1]) ++ require.Nil(t, err) ++ return count ++ } ++ } ++ ++ return 0 ++} ++ + func TestValidateValidatorUpdates(t *testing.T) { + pubkey1 := ed25519.GenPrivKey().PubKey() + pubkey2 := ed25519.GenPrivKey().PubKey() +@@ -396,7 +562,7 @@ func TestEndBlockValidatorUpdates(t *testing.T) { + {PubKey: pk, Power: 10}, + } + +- state, _, err = blockExec.ApplyBlock(state, blockID, block) ++ state, _, err = blockExec.ApplyBlock(state, blockID, block, nil) + require.Nil(t, err) + // test new validator was added to NextValidators + if assert.Equal(t, state.Validators.Size()+1, state.NextValidators.Size()) { +@@ -454,11 +620,78 @@ func TestEndBlockValidatorUpdatesResultingInEmptySet(t *testing.T) { + {PubKey: vp, Power: 0}, + } + +- assert.NotPanics(t, func() { state, _, err = blockExec.ApplyBlock(state, blockID, block) }) ++ assert.NotPanics(t, func() { state, _, err = blockExec.ApplyBlock(state, blockID, block, nil) }) + assert.NotNil(t, err) + assert.NotEmpty(t, state.NextValidators.Validators) + } + ++func TestFireEventSignedBlockEvent(t *testing.T) { ++ app := &testApp{} ++ cc := proxy.NewLocalClientCreator(app) ++ proxyApp := proxy.NewAppConns(cc) ++ err := proxyApp.Start() ++ require.NoError(t, err) ++ defer proxyApp.Stop() //nolint:errcheck // ignore for tests ++ ++ state, stateDB, _ := makeState(2, 1) ++ // modify the current validators so it's different to the last validators ++ state.Validators.Validators[0].VotingPower = 10 ++ stateStore := sm.NewStore(stateDB, sm.StoreOptions{ ++ DiscardABCIResponses: false, ++ }) ++ blockExec := sm.NewBlockExecutor( ++ stateStore, ++ log.TestingLogger(), ++ proxyApp.Consensus(), ++ mmock.Mempool{}, ++ sm.EmptyEvidencePool{}, ++ ) ++ eventBus := types.NewEventBus() ++ err = eventBus.Start() ++ require.NoError(t, err) ++ defer eventBus.Stop() //nolint:errcheck ++ ++ ctx, cancel := context.WithCancel(context.Background()) ++ defer cancel() ++ sub, err := eventBus.Subscribe(ctx, "test-client", types.EventQueryNewSignedBlock) ++ require.NoError(t, err) ++ blockExec.SetEventBus(eventBus) ++ ++ block := makeBlock(state, 1) ++ blockID := types.BlockID{Hash: block.Hash(), PartSetHeader: block.MakePartSet(testPartSize).Header()} ++ ++ commit := &types.Commit{ ++ Height: block.Height, ++ Round: 0, ++ BlockID: blockID, ++ Signatures: []types.CommitSig{ ++ types.NewCommitSigAbsent(), ++ }, ++ } ++ ++ state, _, err = blockExec.ApplyBlock(state, blockID, block, commit) ++ require.NoError(t, err) ++ ++ select { ++ case msg := <-sub.Out(): ++ signedBlock, ok := msg.Data().(types.EventDataSignedBlock) ++ require.True(t, ok) ++ ++ // check that the published data are all from the same height ++ if signedBlock.Header.Height != signedBlock.Commit.Height { ++ t.Fatalf("expected commit height and header height to match") ++ } ++ ++ if valHash := signedBlock.ValidatorSet.Hash(); !bytes.Equal(signedBlock.Header.ValidatorsHash, valHash) { ++ t.Fatalf("expected validator hashes to match") ++ } ++ case <-sub.Cancelled(): ++ t.Fatalf("subscription was unexpectedly cancelled") ++ case <-time.After(5 * time.Second): ++ t.Fatalf("test timed out waiting for signed block") ++ } ++} ++ + func makeBlockID(hash []byte, partSetSize uint32, partSetHash []byte) types.BlockID { + var ( + h = make([]byte, tmhash.Size) +@@ -474,3 +707,16 @@ func makeBlockID(hash []byte, partSetSize uint32, partSetHash []byte) types.Bloc + }, + } + } ++ ++func makeAcceptedBlock(state sm.State, height int) (block *types.Block) { ++ block = sf.MakeBlock(state, int64(height), new(types.Commit)) ++ goodTxs := factory.MakeTenTxs(int64(height)) ++ block.Txs = goodTxs ++ return block ++} ++ ++func makeRejectedBlock(state sm.State, height int) (block *types.Block) { ++ block = makeAcceptedBlock(state, height) ++ block.Txs[0] = types.Tx{} ++ return block ++} diff --git a/patches/state/helpers_test.go.patch b/patches/state/helpers_test.go.patch new file mode 100644 index 00000000000..45f5c973f80 --- /dev/null +++ b/patches/state/helpers_test.go.patch @@ -0,0 +1,56 @@ +diff --git a/state/helpers_test.go b/state/helpers_test.go +index cdaffe8b6..bc534e6dc 100644 +--- a/state/helpers_test.go ++++ b/state/helpers_test.go +@@ -15,6 +15,7 @@ import ( + cmtproto "github.com/tendermint/tendermint/proto/tendermint/types" + "github.com/tendermint/tendermint/proxy" + sm "github.com/tendermint/tendermint/state" ++ "github.com/tendermint/tendermint/test/factory" + "github.com/tendermint/tendermint/types" + cmttime "github.com/tendermint/tendermint/types/time" + ) +@@ -54,13 +55,19 @@ func makeAndCommitGoodBlock( + + func makeAndApplyGoodBlock(state sm.State, height int64, lastCommit *types.Commit, proposerAddr []byte, + blockExec *sm.BlockExecutor, evidence []types.Evidence) (sm.State, types.BlockID, error) { +- block, _ := state.MakeBlock(height, makeTxs(height), lastCommit, evidence, proposerAddr) ++ block, _ := state.MakeBlock( ++ height, ++ factory.MakeData(factory.MakeTenTxs(height)), ++ lastCommit, ++ evidence, ++ proposerAddr, ++ ) + if err := blockExec.ValidateBlock(state, block); err != nil { + return state, types.BlockID{}, err + } + blockID := types.BlockID{Hash: block.Hash(), + PartSetHeader: types.PartSetHeader{Total: 3, Hash: cmtrand.Bytes(32)}} +- state, _, err := blockExec.ApplyBlock(state, blockID, block) ++ state, _, err := blockExec.ApplyBlock(state, blockID, block, lastCommit) + if err != nil { + return state, types.BlockID{}, err + } +@@ -136,7 +143,7 @@ func makeState(nVals, height int) (sm.State, dbm.DB, map[string]types.PrivValida + func makeBlock(state sm.State, height int64) *types.Block { + block, _ := state.MakeBlock( + height, +- makeTxs(state.LastBlockHeight), ++ factory.MakeData(makeTxs(state.LastBlockHeight)), + new(types.Commit), + nil, + state.Validators.GetProposer().Address, +@@ -275,3 +282,12 @@ func (app *testApp) Commit() abci.ResponseCommit { + func (app *testApp) Query(reqQuery abci.RequestQuery) (resQuery abci.ResponseQuery) { + return + } ++ ++func (app *testApp) ProcessProposal(req abci.RequestProcessProposal) abci.ResponseProcessProposal { ++ for _, tx := range req.BlockData.Txs { ++ if len(tx) == 0 { ++ return abci.ResponseProcessProposal{Result: abci.ResponseProcessProposal_REJECT} ++ } ++ } ++ return abci.ResponseProcessProposal{Result: abci.ResponseProcessProposal_ACCEPT} ++} diff --git a/patches/state/indexer/mocks/block_indexer.go.patch b/patches/state/indexer/mocks/block_indexer.go.patch new file mode 100644 index 00000000000..7f3258b9527 --- /dev/null +++ b/patches/state/indexer/mocks/block_indexer.go.patch @@ -0,0 +1,80 @@ +diff --git a/state/indexer/mocks/block_indexer.go b/state/indexer/mocks/block_indexer.go +index 2c0f0ecb0..4ad0603e0 100644 +--- a/state/indexer/mocks/block_indexer.go ++++ b/state/indexer/mocks/block_indexer.go +@@ -21,14 +21,21 @@ type BlockIndexer struct { + func (_m *BlockIndexer) Has(height int64) (bool, error) { + ret := _m.Called(height) + ++ if len(ret) == 0 { ++ panic("no return value specified for Has") ++ } ++ + var r0 bool ++ var r1 error ++ if rf, ok := ret.Get(0).(func(int64) (bool, error)); ok { ++ return rf(height) ++ } + if rf, ok := ret.Get(0).(func(int64) bool); ok { + r0 = rf(height) + } else { + r0 = ret.Get(0).(bool) + } + +- var r1 error + if rf, ok := ret.Get(1).(func(int64) error); ok { + r1 = rf(height) + } else { +@@ -42,6 +49,10 @@ func (_m *BlockIndexer) Has(height int64) (bool, error) { + func (_m *BlockIndexer) Index(_a0 types.EventDataNewBlockHeader) error { + ret := _m.Called(_a0) + ++ if len(ret) == 0 { ++ panic("no return value specified for Index") ++ } ++ + var r0 error + if rf, ok := ret.Get(0).(func(types.EventDataNewBlockHeader) error); ok { + r0 = rf(_a0) +@@ -56,7 +67,15 @@ func (_m *BlockIndexer) Index(_a0 types.EventDataNewBlockHeader) error { + func (_m *BlockIndexer) Search(ctx context.Context, q *query.Query) ([]int64, error) { + ret := _m.Called(ctx, q) + ++ if len(ret) == 0 { ++ panic("no return value specified for Search") ++ } ++ + var r0 []int64 ++ var r1 error ++ if rf, ok := ret.Get(0).(func(context.Context, *query.Query) ([]int64, error)); ok { ++ return rf(ctx, q) ++ } + if rf, ok := ret.Get(0).(func(context.Context, *query.Query) []int64); ok { + r0 = rf(ctx, q) + } else { +@@ -65,7 +84,6 @@ func (_m *BlockIndexer) Search(ctx context.Context, q *query.Query) ([]int64, er + } + } + +- var r1 error + if rf, ok := ret.Get(1).(func(context.Context, *query.Query) error); ok { + r1 = rf(ctx, q) + } else { +@@ -75,13 +93,12 @@ func (_m *BlockIndexer) Search(ctx context.Context, q *query.Query) ([]int64, er + return r0, r1 + } + +-type mockConstructorTestingTNewBlockIndexer interface { ++// NewBlockIndexer creates a new instance of BlockIndexer. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. ++// The first argument is typically a *testing.T value. ++func NewBlockIndexer(t interface { + mock.TestingT + Cleanup(func()) +-} +- +-// NewBlockIndexer creates a new instance of BlockIndexer. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +-func NewBlockIndexer(t mockConstructorTestingTNewBlockIndexer) *BlockIndexer { ++}) *BlockIndexer { + mock := &BlockIndexer{} + mock.Mock.Test(t) + diff --git a/patches/state/metrics.go.patch b/patches/state/metrics.go.patch new file mode 100644 index 00000000000..fb4f592c944 --- /dev/null +++ b/patches/state/metrics.go.patch @@ -0,0 +1,43 @@ +diff --git a/state/metrics.go b/state/metrics.go +index bcd713f5f..31c674e60 100644 +--- a/state/metrics.go ++++ b/state/metrics.go +@@ -17,6 +17,10 @@ const ( + type Metrics struct { + // Time between BeginBlock and EndBlock. + BlockProcessingTime metrics.Histogram ++ // Count of times a block was rejected via ProcessProposal ++ ProcessProposalRejected metrics.Counter ++ // Count of transactions rejected by application. ++ RejectedTransactions metrics.Counter + } + + // PrometheusMetrics returns Metrics build using Prometheus client library. +@@ -35,12 +39,26 @@ func PrometheusMetrics(namespace string, labelsAndValues ...string) *Metrics { + Help: "Time between BeginBlock and EndBlock in ms.", + Buckets: stdprometheus.LinearBuckets(1, 10, 10), + }, labels).With(labelsAndValues...), ++ ProcessProposalRejected: prometheus.NewCounterFrom(stdprometheus.CounterOpts{ ++ Namespace: namespace, ++ Subsystem: MetricsSubsystem, ++ Name: "process_proposal_rejected", ++ Help: "Count of times a block was rejected via ProcessProposal", ++ }, labels).With(labelsAndValues...), ++ RejectedTransactions: prometheus.NewCounterFrom(stdprometheus.CounterOpts{ ++ Namespace: namespace, ++ Subsystem: MetricsSubsystem, ++ Name: "rejected_transactions", ++ Help: "Count of transactions rejected by application", ++ }, labels).With(labelsAndValues...), + } + } + + // NopMetrics returns no-op Metrics. + func NopMetrics() *Metrics { + return &Metrics{ +- BlockProcessingTime: discard.NewHistogram(), ++ BlockProcessingTime: discard.NewHistogram(), ++ ProcessProposalRejected: discard.NewCounter(), ++ RejectedTransactions: discard.NewCounter(), + } + } diff --git a/patches/state/mocks/block_store.go.patch b/patches/state/mocks/block_store.go.patch new file mode 100644 index 00000000000..1304db20f6f --- /dev/null +++ b/patches/state/mocks/block_store.go.patch @@ -0,0 +1,224 @@ +diff --git a/state/mocks/block_store.go b/state/mocks/block_store.go +index 4493a6e3f..eccf9c27d 100644 +--- a/state/mocks/block_store.go ++++ b/state/mocks/block_store.go +@@ -5,6 +5,7 @@ package mocks + import ( + mock "github.com/stretchr/testify/mock" + ++ cmtstore "github.com/tendermint/tendermint/proto/tendermint/store" + types "github.com/tendermint/tendermint/types" + ) + +@@ -17,6 +18,10 @@ type BlockStore struct { + func (_m *BlockStore) Base() int64 { + ret := _m.Called() + ++ if len(ret) == 0 { ++ panic("no return value specified for Base") ++ } ++ + var r0 int64 + if rf, ok := ret.Get(0).(func() int64); ok { + r0 = rf() +@@ -31,6 +36,10 @@ func (_m *BlockStore) Base() int64 { + func (_m *BlockStore) Height() int64 { + ret := _m.Called() + ++ if len(ret) == 0 { ++ panic("no return value specified for Height") ++ } ++ + var r0 int64 + if rf, ok := ret.Get(0).(func() int64); ok { + r0 = rf() +@@ -45,6 +54,10 @@ func (_m *BlockStore) Height() int64 { + func (_m *BlockStore) LoadBaseMeta() *types.BlockMeta { + ret := _m.Called() + ++ if len(ret) == 0 { ++ panic("no return value specified for LoadBaseMeta") ++ } ++ + var r0 *types.BlockMeta + if rf, ok := ret.Get(0).(func() *types.BlockMeta); ok { + r0 = rf() +@@ -61,6 +74,10 @@ func (_m *BlockStore) LoadBaseMeta() *types.BlockMeta { + func (_m *BlockStore) LoadBlock(height int64) *types.Block { + ret := _m.Called(height) + ++ if len(ret) == 0 { ++ panic("no return value specified for LoadBlock") ++ } ++ + var r0 *types.Block + if rf, ok := ret.Get(0).(func(int64) *types.Block); ok { + r0 = rf(height) +@@ -73,10 +90,30 @@ func (_m *BlockStore) LoadBlock(height int64) *types.Block { + return r0 + } + ++// LoadTxInfo provides a mock function with given fields: hash ++func (_m *BlockStore) LoadTxInfo(hash []byte) *cmtstore.TxInfo { ++ ret := _m.Called(hash) ++ ++ var r0 *cmtstore.TxInfo ++ if rf, ok := ret.Get(0).(func([]byte) *cmtstore.TxInfo); ok { ++ r0 = rf(hash) ++ } else { ++ if ret.Get(0) != nil { ++ r0 = ret.Get(0).(*cmtstore.TxInfo) ++ } ++ } ++ ++ return r0 ++} ++ + // LoadBlockByHash provides a mock function with given fields: hash + func (_m *BlockStore) LoadBlockByHash(hash []byte) *types.Block { + ret := _m.Called(hash) + ++ if len(ret) == 0 { ++ panic("no return value specified for LoadBlockByHash") ++ } ++ + var r0 *types.Block + if rf, ok := ret.Get(0).(func([]byte) *types.Block); ok { + r0 = rf(hash) +@@ -93,6 +130,10 @@ func (_m *BlockStore) LoadBlockByHash(hash []byte) *types.Block { + func (_m *BlockStore) LoadBlockCommit(height int64) *types.Commit { + ret := _m.Called(height) + ++ if len(ret) == 0 { ++ panic("no return value specified for LoadBlockCommit") ++ } ++ + var r0 *types.Commit + if rf, ok := ret.Get(0).(func(int64) *types.Commit); ok { + r0 = rf(height) +@@ -109,6 +150,10 @@ func (_m *BlockStore) LoadBlockCommit(height int64) *types.Commit { + func (_m *BlockStore) LoadBlockMeta(height int64) *types.BlockMeta { + ret := _m.Called(height) + ++ if len(ret) == 0 { ++ panic("no return value specified for LoadBlockMeta") ++ } ++ + var r0 *types.BlockMeta + if rf, ok := ret.Get(0).(func(int64) *types.BlockMeta); ok { + r0 = rf(height) +@@ -121,10 +166,30 @@ func (_m *BlockStore) LoadBlockMeta(height int64) *types.BlockMeta { + return r0 + } + ++// LoadBlockMetaByHash provides a mock function with given fields: hash ++func (_m *BlockStore) LoadBlockMetaByHash(hash []byte) *types.BlockMeta { ++ ret := _m.Called(hash) ++ ++ var r0 *types.BlockMeta ++ if rf, ok := ret.Get(0).(func([]byte) *types.BlockMeta); ok { ++ r0 = rf(hash) ++ } else { ++ if ret.Get(0) != nil { ++ r0 = ret.Get(0).(*types.BlockMeta) ++ } ++ } ++ ++ return r0 ++} ++ + // LoadBlockPart provides a mock function with given fields: height, index + func (_m *BlockStore) LoadBlockPart(height int64, index int) *types.Part { + ret := _m.Called(height, index) + ++ if len(ret) == 0 { ++ panic("no return value specified for LoadBlockPart") ++ } ++ + var r0 *types.Part + if rf, ok := ret.Get(0).(func(int64, int) *types.Part); ok { + r0 = rf(height, index) +@@ -141,6 +206,10 @@ func (_m *BlockStore) LoadBlockPart(height int64, index int) *types.Part { + func (_m *BlockStore) LoadSeenCommit(height int64) *types.Commit { + ret := _m.Called(height) + ++ if len(ret) == 0 { ++ panic("no return value specified for LoadSeenCommit") ++ } ++ + var r0 *types.Commit + if rf, ok := ret.Get(0).(func(int64) *types.Commit); ok { + r0 = rf(height) +@@ -157,14 +226,21 @@ func (_m *BlockStore) LoadSeenCommit(height int64) *types.Commit { + func (_m *BlockStore) PruneBlocks(height int64) (uint64, error) { + ret := _m.Called(height) + ++ if len(ret) == 0 { ++ panic("no return value specified for PruneBlocks") ++ } ++ + var r0 uint64 ++ var r1 error ++ if rf, ok := ret.Get(0).(func(int64) (uint64, error)); ok { ++ return rf(height) ++ } + if rf, ok := ret.Get(0).(func(int64) uint64); ok { + r0 = rf(height) + } else { + r0 = ret.Get(0).(uint64) + } + +- var r1 error + if rf, ok := ret.Get(1).(func(int64) error); ok { + r1 = rf(height) + } else { +@@ -174,6 +250,20 @@ func (_m *BlockStore) PruneBlocks(height int64) (uint64, error) { + return r0, r1 + } + ++// SaveTxInfo provides a mock function with given fields: block, txResponseCode ++func (_m *BlockStore) SaveTxInfo(block *types.Block, txResponseCodes []uint32, logs []string) error { ++ ret := _m.Called(block, txResponseCodes, logs) ++ ++ var r0 error ++ if rf, ok := ret.Get(0).(func(*types.Block, []uint32, []string) error); ok { ++ r0 = rf(block, txResponseCodes, logs) ++ } else { ++ r0 = ret.Error(0) ++ } ++ ++ return r0 ++} ++ + // SaveBlock provides a mock function with given fields: block, blockParts, seenCommit + func (_m *BlockStore) SaveBlock(block *types.Block, blockParts *types.PartSet, seenCommit *types.Commit) { + _m.Called(block, blockParts, seenCommit) +@@ -183,6 +273,10 @@ func (_m *BlockStore) SaveBlock(block *types.Block, blockParts *types.PartSet, s + func (_m *BlockStore) Size() int64 { + ret := _m.Called() + ++ if len(ret) == 0 { ++ panic("no return value specified for Size") ++ } ++ + var r0 int64 + if rf, ok := ret.Get(0).(func() int64); ok { + r0 = rf() +@@ -193,13 +287,12 @@ func (_m *BlockStore) Size() int64 { + return r0 + } + +-type mockConstructorTestingTNewBlockStore interface { ++// NewBlockStore creates a new instance of BlockStore. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. ++// The first argument is typically a *testing.T value. ++func NewBlockStore(t interface { + mock.TestingT + Cleanup(func()) +-} +- +-// NewBlockStore creates a new instance of BlockStore. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +-func NewBlockStore(t mockConstructorTestingTNewBlockStore) *BlockStore { ++}) *BlockStore { + mock := &BlockStore{} + mock.Mock.Test(t) + diff --git a/patches/state/mocks/evidence_pool.go.patch b/patches/state/mocks/evidence_pool.go.patch new file mode 100644 index 00000000000..84fa453a1bb --- /dev/null +++ b/patches/state/mocks/evidence_pool.go.patch @@ -0,0 +1,68 @@ +diff --git a/state/mocks/evidence_pool.go b/state/mocks/evidence_pool.go +index 7279d36f7..bead6065a 100644 +--- a/state/mocks/evidence_pool.go ++++ b/state/mocks/evidence_pool.go +@@ -18,6 +18,10 @@ type EvidencePool struct { + func (_m *EvidencePool) AddEvidence(_a0 types.Evidence) error { + ret := _m.Called(_a0) + ++ if len(ret) == 0 { ++ panic("no return value specified for AddEvidence") ++ } ++ + var r0 error + if rf, ok := ret.Get(0).(func(types.Evidence) error); ok { + r0 = rf(_a0) +@@ -32,6 +36,10 @@ func (_m *EvidencePool) AddEvidence(_a0 types.Evidence) error { + func (_m *EvidencePool) CheckEvidence(_a0 types.EvidenceList) error { + ret := _m.Called(_a0) + ++ if len(ret) == 0 { ++ panic("no return value specified for CheckEvidence") ++ } ++ + var r0 error + if rf, ok := ret.Get(0).(func(types.EvidenceList) error); ok { + r0 = rf(_a0) +@@ -46,7 +54,15 @@ func (_m *EvidencePool) CheckEvidence(_a0 types.EvidenceList) error { + func (_m *EvidencePool) PendingEvidence(maxBytes int64) ([]types.Evidence, int64) { + ret := _m.Called(maxBytes) + ++ if len(ret) == 0 { ++ panic("no return value specified for PendingEvidence") ++ } ++ + var r0 []types.Evidence ++ var r1 int64 ++ if rf, ok := ret.Get(0).(func(int64) ([]types.Evidence, int64)); ok { ++ return rf(maxBytes) ++ } + if rf, ok := ret.Get(0).(func(int64) []types.Evidence); ok { + r0 = rf(maxBytes) + } else { +@@ -55,7 +71,6 @@ func (_m *EvidencePool) PendingEvidence(maxBytes int64) ([]types.Evidence, int64 + } + } + +- var r1 int64 + if rf, ok := ret.Get(1).(func(int64) int64); ok { + r1 = rf(maxBytes) + } else { +@@ -70,13 +85,12 @@ func (_m *EvidencePool) Update(_a0 state.State, _a1 types.EvidenceList) { + _m.Called(_a0, _a1) + } + +-type mockConstructorTestingTNewEvidencePool interface { ++// NewEvidencePool creates a new instance of EvidencePool. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. ++// The first argument is typically a *testing.T value. ++func NewEvidencePool(t interface { + mock.TestingT + Cleanup(func()) +-} +- +-// NewEvidencePool creates a new instance of EvidencePool. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +-func NewEvidencePool(t mockConstructorTestingTNewEvidencePool) *EvidencePool { ++}) *EvidencePool { + mock := &EvidencePool{} + mock.Mock.Test(t) + diff --git a/patches/state/mocks/store.go.patch b/patches/state/mocks/store.go.patch new file mode 100644 index 00000000000..8f1b25423f7 --- /dev/null +++ b/patches/state/mocks/store.go.patch @@ -0,0 +1,241 @@ +diff --git a/state/mocks/store.go b/state/mocks/store.go +index 8cbe49080..b232f0df9 100644 +--- a/state/mocks/store.go ++++ b/state/mocks/store.go +@@ -22,6 +22,10 @@ type Store struct { + func (_m *Store) Bootstrap(_a0 state.State) error { + ret := _m.Called(_a0) + ++ if len(ret) == 0 { ++ panic("no return value specified for Bootstrap") ++ } ++ + var r0 error + if rf, ok := ret.Get(0).(func(state.State) error); ok { + r0 = rf(_a0) +@@ -36,6 +40,10 @@ func (_m *Store) Bootstrap(_a0 state.State) error { + func (_m *Store) Close() error { + ret := _m.Called() + ++ if len(ret) == 0 { ++ panic("no return value specified for Close") ++ } ++ + var r0 error + if rf, ok := ret.Get(0).(func() error); ok { + r0 = rf() +@@ -50,14 +58,21 @@ func (_m *Store) Close() error { + func (_m *Store) Load() (state.State, error) { + ret := _m.Called() + ++ if len(ret) == 0 { ++ panic("no return value specified for Load") ++ } ++ + var r0 state.State ++ var r1 error ++ if rf, ok := ret.Get(0).(func() (state.State, error)); ok { ++ return rf() ++ } + if rf, ok := ret.Get(0).(func() state.State); ok { + r0 = rf() + } else { + r0 = ret.Get(0).(state.State) + } + +- var r1 error + if rf, ok := ret.Get(1).(func() error); ok { + r1 = rf() + } else { +@@ -71,7 +86,15 @@ func (_m *Store) Load() (state.State, error) { + func (_m *Store) LoadABCIResponses(_a0 int64) (*tendermintstate.ABCIResponses, error) { + ret := _m.Called(_a0) + ++ if len(ret) == 0 { ++ panic("no return value specified for LoadABCIResponses") ++ } ++ + var r0 *tendermintstate.ABCIResponses ++ var r1 error ++ if rf, ok := ret.Get(0).(func(int64) (*tendermintstate.ABCIResponses, error)); ok { ++ return rf(_a0) ++ } + if rf, ok := ret.Get(0).(func(int64) *tendermintstate.ABCIResponses); ok { + r0 = rf(_a0) + } else { +@@ -80,7 +103,6 @@ func (_m *Store) LoadABCIResponses(_a0 int64) (*tendermintstate.ABCIResponses, e + } + } + +- var r1 error + if rf, ok := ret.Get(1).(func(int64) error); ok { + r1 = rf(_a0) + } else { +@@ -94,14 +116,21 @@ func (_m *Store) LoadABCIResponses(_a0 int64) (*tendermintstate.ABCIResponses, e + func (_m *Store) LoadConsensusParams(_a0 int64) (types.ConsensusParams, error) { + ret := _m.Called(_a0) + ++ if len(ret) == 0 { ++ panic("no return value specified for LoadConsensusParams") ++ } ++ + var r0 types.ConsensusParams ++ var r1 error ++ if rf, ok := ret.Get(0).(func(int64) (types.ConsensusParams, error)); ok { ++ return rf(_a0) ++ } + if rf, ok := ret.Get(0).(func(int64) types.ConsensusParams); ok { + r0 = rf(_a0) + } else { + r0 = ret.Get(0).(types.ConsensusParams) + } + +- var r1 error + if rf, ok := ret.Get(1).(func(int64) error); ok { + r1 = rf(_a0) + } else { +@@ -115,14 +144,21 @@ func (_m *Store) LoadConsensusParams(_a0 int64) (types.ConsensusParams, error) { + func (_m *Store) LoadFromDBOrGenesisDoc(_a0 *tenderminttypes.GenesisDoc) (state.State, error) { + ret := _m.Called(_a0) + ++ if len(ret) == 0 { ++ panic("no return value specified for LoadFromDBOrGenesisDoc") ++ } ++ + var r0 state.State ++ var r1 error ++ if rf, ok := ret.Get(0).(func(*tenderminttypes.GenesisDoc) (state.State, error)); ok { ++ return rf(_a0) ++ } + if rf, ok := ret.Get(0).(func(*tenderminttypes.GenesisDoc) state.State); ok { + r0 = rf(_a0) + } else { + r0 = ret.Get(0).(state.State) + } + +- var r1 error + if rf, ok := ret.Get(1).(func(*tenderminttypes.GenesisDoc) error); ok { + r1 = rf(_a0) + } else { +@@ -136,14 +172,21 @@ func (_m *Store) LoadFromDBOrGenesisDoc(_a0 *tenderminttypes.GenesisDoc) (state. + func (_m *Store) LoadFromDBOrGenesisFile(_a0 string) (state.State, error) { + ret := _m.Called(_a0) + ++ if len(ret) == 0 { ++ panic("no return value specified for LoadFromDBOrGenesisFile") ++ } ++ + var r0 state.State ++ var r1 error ++ if rf, ok := ret.Get(0).(func(string) (state.State, error)); ok { ++ return rf(_a0) ++ } + if rf, ok := ret.Get(0).(func(string) state.State); ok { + r0 = rf(_a0) + } else { + r0 = ret.Get(0).(state.State) + } + +- var r1 error + if rf, ok := ret.Get(1).(func(string) error); ok { + r1 = rf(_a0) + } else { +@@ -157,7 +200,15 @@ func (_m *Store) LoadFromDBOrGenesisFile(_a0 string) (state.State, error) { + func (_m *Store) LoadLastABCIResponse(_a0 int64) (*tendermintstate.ABCIResponses, error) { + ret := _m.Called(_a0) + ++ if len(ret) == 0 { ++ panic("no return value specified for LoadLastABCIResponse") ++ } ++ + var r0 *tendermintstate.ABCIResponses ++ var r1 error ++ if rf, ok := ret.Get(0).(func(int64) (*tendermintstate.ABCIResponses, error)); ok { ++ return rf(_a0) ++ } + if rf, ok := ret.Get(0).(func(int64) *tendermintstate.ABCIResponses); ok { + r0 = rf(_a0) + } else { +@@ -166,7 +217,6 @@ func (_m *Store) LoadLastABCIResponse(_a0 int64) (*tendermintstate.ABCIResponses + } + } + +- var r1 error + if rf, ok := ret.Get(1).(func(int64) error); ok { + r1 = rf(_a0) + } else { +@@ -180,7 +230,15 @@ func (_m *Store) LoadLastABCIResponse(_a0 int64) (*tendermintstate.ABCIResponses + func (_m *Store) LoadValidators(_a0 int64) (*tenderminttypes.ValidatorSet, error) { + ret := _m.Called(_a0) + ++ if len(ret) == 0 { ++ panic("no return value specified for LoadValidators") ++ } ++ + var r0 *tenderminttypes.ValidatorSet ++ var r1 error ++ if rf, ok := ret.Get(0).(func(int64) (*tenderminttypes.ValidatorSet, error)); ok { ++ return rf(_a0) ++ } + if rf, ok := ret.Get(0).(func(int64) *tenderminttypes.ValidatorSet); ok { + r0 = rf(_a0) + } else { +@@ -189,7 +247,6 @@ func (_m *Store) LoadValidators(_a0 int64) (*tenderminttypes.ValidatorSet, error + } + } + +- var r1 error + if rf, ok := ret.Get(1).(func(int64) error); ok { + r1 = rf(_a0) + } else { +@@ -203,6 +260,10 @@ func (_m *Store) LoadValidators(_a0 int64) (*tenderminttypes.ValidatorSet, error + func (_m *Store) PruneStates(_a0 int64, _a1 int64) error { + ret := _m.Called(_a0, _a1) + ++ if len(ret) == 0 { ++ panic("no return value specified for PruneStates") ++ } ++ + var r0 error + if rf, ok := ret.Get(0).(func(int64, int64) error); ok { + r0 = rf(_a0, _a1) +@@ -217,6 +278,10 @@ func (_m *Store) PruneStates(_a0 int64, _a1 int64) error { + func (_m *Store) Save(_a0 state.State) error { + ret := _m.Called(_a0) + ++ if len(ret) == 0 { ++ panic("no return value specified for Save") ++ } ++ + var r0 error + if rf, ok := ret.Get(0).(func(state.State) error); ok { + r0 = rf(_a0) +@@ -231,6 +296,10 @@ func (_m *Store) Save(_a0 state.State) error { + func (_m *Store) SaveABCIResponses(_a0 int64, _a1 *tendermintstate.ABCIResponses) error { + ret := _m.Called(_a0, _a1) + ++ if len(ret) == 0 { ++ panic("no return value specified for SaveABCIResponses") ++ } ++ + var r0 error + if rf, ok := ret.Get(0).(func(int64, *tendermintstate.ABCIResponses) error); ok { + r0 = rf(_a0, _a1) +@@ -241,13 +310,12 @@ func (_m *Store) SaveABCIResponses(_a0 int64, _a1 *tendermintstate.ABCIResponses + return r0 + } + +-type mockConstructorTestingTNewStore interface { ++// NewStore creates a new instance of Store. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. ++// The first argument is typically a *testing.T value. ++func NewStore(t interface { + mock.TestingT + Cleanup(func()) +-} +- +-// NewStore creates a new instance of Store. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +-func NewStore(t mockConstructorTestingTNewStore) *Store { ++}) *Store { + mock := &Store{} + mock.Mock.Test(t) + diff --git a/patches/state/services.go.patch b/patches/state/services.go.patch new file mode 100644 index 00000000000..d945e8f8562 --- /dev/null +++ b/patches/state/services.go.patch @@ -0,0 +1,31 @@ +diff --git a/state/services.go b/state/services.go +index 2b6c16fed..22fd741de 100644 +--- a/state/services.go ++++ b/state/services.go +@@ -1,6 +1,7 @@ + package state + + import ( ++ cmtstore "github.com/tendermint/tendermint/proto/tendermint/store" + "github.com/tendermint/tendermint/types" + ) + +@@ -25,14 +26,18 @@ type BlockStore interface { + LoadBlock(height int64) *types.Block + + SaveBlock(block *types.Block, blockParts *types.PartSet, seenCommit *types.Commit) ++ SaveTxInfo(block *types.Block, txResponseCodes []uint32, logs []string) error + + PruneBlocks(height int64) (uint64, error) + + LoadBlockByHash(hash []byte) *types.Block ++ LoadBlockMetaByHash(hash []byte) *types.BlockMeta + LoadBlockPart(height int64, index int) *types.Part + + LoadBlockCommit(height int64) *types.Commit + LoadSeenCommit(height int64) *types.Commit ++ ++ LoadTxInfo(hash []byte) *cmtstore.TxInfo + } + + //----------------------------------------------------------------------------- diff --git a/patches/state/state.go.patch b/patches/state/state.go.patch new file mode 100644 index 00000000000..e8a21ff9200 --- /dev/null +++ b/patches/state/state.go.patch @@ -0,0 +1,112 @@ +diff --git a/state/state.go b/state/state.go +index 5fea7d224..3354f3eb4 100644 +--- a/state/state.go ++++ b/state/state.go +@@ -24,16 +24,15 @@ var ( + + //----------------------------------------------------------------------------- + +-// InitStateVersion sets the Consensus.Block and Software versions, +-// but leaves the Consensus.App version blank. +-// The Consensus.App version will be set during the Handshake, once +-// we hear from the app what protocol version it is running. +-var InitStateVersion = cmtstate.Version{ +- Consensus: cmtversion.Consensus{ +- Block: version.BlockProtocol, +- App: 0, +- }, +- Software: version.TMCoreSemVer, ++// InitStateVersion sets the Consensus.Block, Consensus.App and Software versions ++func InitStateVersion(appVersion uint64) cmtstate.Version { ++ return cmtstate.Version{ ++ Consensus: cmtversion.Consensus{ ++ Block: version.BlockProtocol, ++ App: appVersion, ++ }, ++ Software: version.TMCoreSemVer, ++ } + } + + //----------------------------------------------------------------------------- +@@ -78,6 +77,10 @@ type State struct { + + // the latest AppHash we've received from calling abci.Commit() + AppHash []byte ++ ++ // timeouts received from app, after ABCI EndBlock call, to be used in the next height ++ TimeoutPropose time.Duration ++ TimeoutCommit time.Duration + } + + // Copy makes a copy of the State for mutating. +@@ -102,6 +105,9 @@ func (state State) Copy() State { + AppHash: state.AppHash, + + LastResultsHash: state.LastResultsHash, ++ ++ TimeoutCommit: state.TimeoutCommit, ++ TimeoutPropose: state.TimeoutPropose, + } + } + +@@ -171,6 +177,9 @@ func (state *State) ToProto() (*cmtstate.State, error) { + sm.LastResultsHash = state.LastResultsHash + sm.AppHash = state.AppHash + ++ sm.Timeouts.TimeoutPropose = state.TimeoutPropose ++ sm.Timeouts.TimeoutCommit = state.TimeoutCommit ++ + return sm, nil + } + +@@ -221,6 +230,8 @@ func FromProto(pb *cmtstate.State) (*State, error) { //nolint:golint + state.LastHeightConsensusParamsChanged = pb.LastHeightConsensusParamsChanged + state.LastResultsHash = pb.LastResultsHash + state.AppHash = pb.AppHash ++ state.TimeoutCommit = pb.Timeouts.TimeoutCommit ++ state.TimeoutPropose = pb.Timeouts.TimeoutPropose + + return state, nil + } +@@ -233,13 +244,13 @@ func FromProto(pb *cmtstate.State) (*State, error) { //nolint:golint + // track rounds, and hence does not know the correct proposer. TODO: fix this! + func (state State) MakeBlock( + height int64, +- txs []types.Tx, ++ data types.Data, + commit *types.Commit, + evidence []types.Evidence, + proposerAddress []byte, + ) (*types.Block, *types.PartSet) { + // Build base block with block data. +- block := types.MakeBlock(height, txs, commit, evidence) ++ block := types.MakeBlock(height, data, commit, evidence) + + // Set time. + var timestamp time.Time +@@ -332,8 +343,10 @@ func MakeGenesisState(genDoc *types.GenesisDoc) (State, error) { + nextValidatorSet = types.NewValidatorSet(validators).CopyIncrementProposerPriority(1) + } + ++ appVersion := getAppVersion(genDoc) ++ + return State{ +- Version: InitStateVersion, ++ Version: InitStateVersion(appVersion), + ChainID: genDoc.ChainID, + InitialHeight: genDoc.InitialHeight, + +@@ -352,3 +365,13 @@ func MakeGenesisState(genDoc *types.GenesisDoc) (State, error) { + AppHash: genDoc.AppHash, + }, nil + } ++ ++func getAppVersion(genDoc *types.GenesisDoc) uint64 { ++ if genDoc.ConsensusParams != nil && ++ genDoc.ConsensusParams.Version.AppVersion != 0 { ++ return genDoc.ConsensusParams.Version.AppVersion ++ } ++ // Default to app version 1 because some chains (e.g. mocha-4) did not set ++ // an explicit app version in genesis.json. ++ return uint64(1) ++} diff --git a/patches/state/state_test.go.patch b/patches/state/state_test.go.patch new file mode 100644 index 00000000000..1962089aba4 --- /dev/null +++ b/patches/state/state_test.go.patch @@ -0,0 +1,185 @@ +diff --git a/state/state_test.go b/state/state_test.go +index 4ce87ddce..67c22cf29 100644 +--- a/state/state_test.go ++++ b/state/state_test.go +@@ -7,6 +7,7 @@ import ( + "math/big" + "os" + "testing" ++ "time" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +@@ -22,6 +23,7 @@ import ( + cmtproto "github.com/tendermint/tendermint/proto/tendermint/types" + sm "github.com/tendermint/tendermint/state" + "github.com/tendermint/tendermint/types" ++ "github.com/tendermint/tendermint/version" + ) + + // setupTestCase does setup common to all test cases. +@@ -48,17 +50,54 @@ func TestStateCopy(t *testing.T) { + tearDown, _, state := setupTestCase(t) + defer tearDown(t) + assert := assert.New(t) ++ // the timeouts coming from the setupTestCase are 0, ++ // we change it here just to ensure that they have non-zero values in the ++ // tests below ++ state.TimeoutPropose = 10 * time.Second ++ state.TimeoutCommit = 20 * time.Second ++ ++ tests := []struct { ++ name string ++ modifyState func(sm.State) sm.State ++ expected bool ++ }{ ++ { ++ name: "no modification", ++ modifyState: func(s sm.State) sm.State { ++ stateCopy := s.Copy() ++ return stateCopy ++ }, ++ expected: true, ++ }, ++ { ++ name: "modify block height and validators", ++ modifyState: func(s sm.State) sm.State { ++ stateCopy := s.Copy() ++ stateCopy.LastBlockHeight++ ++ stateCopy.LastValidators = s.Validators ++ return stateCopy ++ }, ++ expected: false, ++ }, ++ { ++ name: "modify timeouts", ++ modifyState: func(s sm.State) sm.State { ++ stateCopy := s.Copy() ++ stateCopy.TimeoutPropose = 1 * time.Second ++ stateCopy.TimeoutCommit = 2 * time.Second ++ return stateCopy ++ }, ++ expected: false, ++ }, ++ } ++ for _, tt := range tests { ++ t.Run(tt.name, func(t *testing.T) { ++ newState := tt.modifyState(state) ++ assert.Equal(tt.expected, state.Equals(newState), ++ fmt.Sprintf("expected state: %v\n got: %v\n", state, newState)) ++ }) + +- stateCopy := state.Copy() +- +- assert.True(state.Equals(stateCopy), +- fmt.Sprintf("expected state and its copy to be identical.\ngot: %v\nexpected: %v\n", +- stateCopy, state)) +- +- stateCopy.LastBlockHeight++ +- stateCopy.LastValidators = state.Validators +- assert.False(state.Equals(stateCopy), fmt.Sprintf(`expected states to be different. got same +- %v`, state)) ++ } + } + + // TestMakeGenesisStateNilValidators tests state's consistency when genesis file's validators field is nil. +@@ -74,6 +113,33 @@ func TestMakeGenesisStateNilValidators(t *testing.T) { + require.Equal(t, 0, len(state.NextValidators.Validators)) + } + ++func TestMakeGenesisStateSetsAppVersion(t *testing.T) { ++ cp := types.DefaultConsensusParams() ++ appVersion := uint64(5) ++ cp.Version.AppVersion = appVersion ++ doc := types.GenesisDoc{ ++ ChainID: "dummy", ++ ConsensusParams: cp, ++ } ++ require.Nil(t, doc.ValidateAndComplete()) ++ state, err := sm.MakeGenesisState(&doc) ++ require.Nil(t, err) ++ require.Equal(t, appVersion, state.Version.Consensus.App) ++ require.Equal(t, version.BlockProtocol, state.Version.Consensus.Block) ++ t.Run("MakeGenesisState defaults to 1 if app version is not set", func(t *testing.T) { ++ cp := types.DefaultConsensusParams() ++ cp.Version = cmtproto.VersionParams{} // zero value ++ doc := types.GenesisDoc{ ++ ChainID: "chain-id", ++ ConsensusParams: cp, ++ } ++ require.NoError(t, doc.ValidateAndComplete()) ++ state, err := sm.MakeGenesisState(&doc) ++ require.NoError(t, err) ++ require.Equal(t, uint64(1), state.Version.Consensus.App) ++ }) ++} ++ + // TestStateSaveLoad tests saving and loading State from a db. + func TestStateSaveLoad(t *testing.T) { + tearDown, stateDB, state := setupTestCase(t) +@@ -83,6 +149,11 @@ func TestStateSaveLoad(t *testing.T) { + }) + assert := assert.New(t) + ++ // the timeouts coming from the setupTestCase are 0, ++ // we change it here just to ensure that they have non-zero values in the ++ // tests below ++ state.TimeoutCommit = 10 * time.Second ++ state.TimeoutPropose = 5 * time.Second + state.LastBlockHeight++ + state.LastValidators = state.Validators + err := stateStore.Save(state) +@@ -93,6 +164,11 @@ func TestStateSaveLoad(t *testing.T) { + assert.True(state.Equals(loadedState), + fmt.Sprintf("expected state and its copy to be identical.\ngot: %v\nexpected: %v\n", + loadedState, state)) ++ ++ // the following assertions are just for additional assurance ++ assert.Equal(state.TimeoutCommit, loadedState.TimeoutCommit) ++ assert.Equal(state.TimeoutPropose, loadedState.TimeoutPropose, fmt.Sprintf("expected TimeoutPropose to be equal."+ ++ "\ngot: %v\nexpected: %v\n", loadedState.TimeoutPropose, state.TimeoutPropose)) + } + + // TestABCIResponsesSaveLoad tests saving and loading ABCIResponses. +@@ -115,9 +191,9 @@ func TestABCIResponsesSaveLoad1(t *testing.T) { + + abciResponses.DeliverTxs[0] = &abci.ResponseDeliverTx{Data: []byte("foo"), Events: nil} + abciResponses.DeliverTxs[1] = &abci.ResponseDeliverTx{Data: []byte("bar"), Log: "ok", Events: nil} +- abciResponses.EndBlock = &abci.ResponseEndBlock{ValidatorUpdates: []abci.ValidatorUpdate{ +- types.TM2PB.NewValidatorUpdate(ed25519.GenPrivKey().PubKey(), 10), +- }} ++ abciResponses.EndBlock = &abci.ResponseEndBlock{ ++ ValidatorUpdates: []abci.ValidatorUpdate{types.TM2PB.NewValidatorUpdate(ed25519.GenPrivKey().PubKey(), 10)}, ++ Timeouts: abci.TimeoutsInfo{TimeoutPropose: 1 * time.Second, TimeoutCommit: 2 * time.Second}} + + err := stateStore.SaveABCIResponses(block.Height, abciResponses) + require.NoError(t, err) +@@ -126,6 +202,7 @@ func TestABCIResponsesSaveLoad1(t *testing.T) { + assert.Equal(abciResponses, loadedABCIResponses, + fmt.Sprintf("ABCIResponses don't match:\ngot: %v\nexpected: %v\n", + loadedABCIResponses, abciResponses)) ++ + } + + // TestResultsSaveLoad tests saving and loading ABCI results. +@@ -1073,6 +1150,12 @@ func TestStateProto(t *testing.T) { + tearDown, _, state := setupTestCase(t) + defer tearDown(t) + ++ // for assurance, ++ // we make another state with non-zero timeouts to see if conversion works ++ stateCopyWithTimeouts := state.Copy() ++ stateCopyWithTimeouts.TimeoutCommit = 10 * time.Second ++ stateCopyWithTimeouts.TimeoutPropose = 11 * time.Second ++ + tc := []struct { + testName string + state *sm.State +@@ -1082,6 +1165,7 @@ func TestStateProto(t *testing.T) { + {"empty state", &sm.State{}, true, false}, + {"nil failure state", nil, false, false}, + {"success state", &state, true, true}, ++ {"success state with timeouts", &stateCopyWithTimeouts, true, true}, + } + + for _, tt := range tc { diff --git a/patches/state/store.go.patch b/patches/state/store.go.patch new file mode 100644 index 00000000000..0629300a6ce --- /dev/null +++ b/patches/state/store.go.patch @@ -0,0 +1,75 @@ +diff --git a/state/store.go b/state/store.go +index 3fbadccdc..7b5ec202a 100644 +--- a/state/store.go ++++ b/state/store.go +@@ -87,7 +87,7 @@ type dbStore struct { + + type StoreOptions struct { + +- // DiscardABCIResponses determines whether or not the store ++ // DiscardABCIResponses determines whether the store + // retains all ABCIResponses. If DiscardABCiResponses is enabled, + // the store will maintain only the response object from the latest + // height. +@@ -101,7 +101,7 @@ func NewStore(db dbm.DB, options StoreOptions) Store { + return dbStore{db, options} + } + +-// LoadStateFromDBOrGenesisFile loads the most recent state from the database, ++// LoadFromDBOrGenesisFile loads the most recent state from the database, + // or creates a new one from the given genesisFilePath. + func (store dbStore) LoadFromDBOrGenesisFile(genesisFilePath string) (State, error) { + state, err := store.Load() +@@ -119,7 +119,7 @@ func (store dbStore) LoadFromDBOrGenesisFile(genesisFilePath string) (State, err + return state, nil + } + +-// LoadStateFromDBOrGenesisDoc loads the most recent state from the database, ++// LoadFromDBOrGenesisDoc loads the most recent state from the database, + // or creates a new one from the given genesisDoc. + func (store dbStore) LoadFromDBOrGenesisDoc(genesisDoc *types.GenesisDoc) (State, error) { + state, err := store.Load() +@@ -138,7 +138,7 @@ func (store dbStore) LoadFromDBOrGenesisDoc(genesisDoc *types.GenesisDoc) (State + return state, nil + } + +-// LoadState loads the State from the database. ++// Load loads the State from the database. + func (store dbStore) Load() (State, error) { + return store.loadState(stateKey) + } +@@ -196,6 +196,7 @@ func (store dbStore) save(state State, key []byte) error { + state.LastHeightConsensusParamsChanged, state.ConsensusParams); err != nil { + return err + } ++ + err := store.db.SetSync(key, state.Bytes()) + if err != nil { + return err +@@ -203,7 +204,7 @@ func (store dbStore) save(state State, key []byte) error { + return nil + } + +-// BootstrapState saves a new state, used e.g. by state sync when starting from non-zero height. ++// Bootstrap saves a new state, used e.g. by state sync when starting from non-zero height. + func (store dbStore) Bootstrap(state State) error { + height := state.LastBlockHeight + 1 + if height == 1 { +@@ -236,7 +237,7 @@ func (store dbStore) Bootstrap(state State) error { + // guaranteed to delete all states, since the last checkpointed state and states being pointed to by + // e.g. `LastHeightChanged` must remain. The state at to must also exist. + // +-// The from parameter is necessary since we can't do a key scan in a performant way due to the key ++// The `from` parameter is necessary since we can't do a key scan in a performant way due to the key + // encoding not preserving ordering: https://github.com/tendermint/tendermint/issues/4567 + // This will cause some old states to be left behind when doing incremental partial prunes, + // specifically older checkpoints and LastHeightChanged targets. +@@ -403,7 +404,7 @@ func (store dbStore) LoadABCIResponses(height int64) (*cmtstate.ABCIResponses, e + return abciResponses, nil + } + +-// LoadLastABCIResponses loads the ABCIResponses from the most recent height. ++// LoadLastABCIResponse loads the ABCIResponses from the most recent height. + // The height parameter is used to ensure that the response corresponds to the latest height. + // If not, an error is returned. + // diff --git a/patches/state/test/factory/block.go.patch b/patches/state/test/factory/block.go.patch new file mode 100644 index 00000000000..90c139af14d --- /dev/null +++ b/patches/state/test/factory/block.go.patch @@ -0,0 +1,113 @@ +diff --git a/state/test/factory/block.go b/state/test/factory/block.go +new file mode 100644 +index 000000000..0ad807e86 +--- /dev/null ++++ b/state/test/factory/block.go +@@ -0,0 +1,107 @@ ++package factory ++ ++import ( ++ "time" ++ ++ sm "github.com/tendermint/tendermint/state" ++ "github.com/tendermint/tendermint/test/factory" ++ ++ tmproto "github.com/tendermint/tendermint/proto/tendermint/types" ++ "github.com/tendermint/tendermint/types" ++) ++ ++func MakeBlocks(n int, state *sm.State, privVal types.PrivValidator) []*types.Block { ++ blocks := make([]*types.Block, 0) ++ ++ var ( ++ prevBlock *types.Block ++ prevBlockMeta *types.BlockMeta ++ ) ++ ++ appHeight := byte(0x01) ++ for i := 0; i < n; i++ { ++ height := int64(i + 1) ++ ++ block, parts := makeBlockAndPartSet(*state, prevBlock, prevBlockMeta, privVal, height) ++ blocks = append(blocks, block) ++ ++ prevBlock = block ++ prevBlockMeta = types.NewBlockMeta(block, parts) ++ ++ // update state ++ state.AppHash = []byte{appHeight} ++ appHeight++ ++ state.LastBlockHeight = height ++ } ++ ++ return blocks ++} ++ ++func MakeBlock(state sm.State, height int64, c *types.Commit) *types.Block { ++ block, _ := state.MakeBlock( ++ height, ++ MakeData(factory.MakeTenTxs(state.LastBlockHeight)), ++ c, ++ nil, ++ state.Validators.GetProposer().Address, ++ ) ++ return block ++} ++ ++func MakeData(txs []types.Tx) types.Data { ++ return types.Data{ ++ Txs: txs, ++ } ++} ++ ++func makeBlockAndPartSet(state sm.State, lastBlock *types.Block, lastBlockMeta *types.BlockMeta, ++ privVal types.PrivValidator, height int64) (*types.Block, *types.PartSet) { ++ ++ lastCommit := types.NewCommit(height-1, 0, types.BlockID{}, nil) ++ if height > 1 { ++ vote, _ := MakeVote( ++ privVal, ++ lastBlock.Header.ChainID, ++ 1, lastBlock.Header.Height, 0, 2, ++ lastBlockMeta.BlockID, ++ time.Now()) ++ lastCommit = types.NewCommit(vote.Height, vote.Round, ++ lastBlockMeta.BlockID, []types.CommitSig{vote.CommitSig()}) ++ } ++ ++ return state.MakeBlock(height, MakeData([]types.Tx{}), lastCommit, nil, state.Validators.GetProposer().Address) ++} ++ ++func MakeVote( ++ val types.PrivValidator, ++ chainID string, ++ valIndex int32, ++ height int64, ++ round int32, ++ step int, ++ blockID types.BlockID, ++ time time.Time, ++) (*types.Vote, error) { ++ pubKey, err := val.GetPubKey() ++ if err != nil { ++ return nil, err ++ } ++ v := &types.Vote{ ++ ValidatorAddress: pubKey.Address(), ++ ValidatorIndex: valIndex, ++ Height: height, ++ Round: round, ++ //nolint:gosec ++ Type: tmproto.SignedMsgType(step), ++ BlockID: blockID, ++ Timestamp: time, ++ } ++ ++ vpb := v.ToProto() ++ err = val.SignVote(chainID, vpb) ++ if err != nil { ++ panic(err) ++ } ++ v.Signature = vpb.Signature ++ return v, nil ++} diff --git a/patches/state/txindex/kv/kv.go.patch b/patches/state/txindex/kv/kv.go.patch new file mode 100644 index 00000000000..721207add8c --- /dev/null +++ b/patches/state/txindex/kv/kv.go.patch @@ -0,0 +1,93 @@ +diff --git a/state/txindex/kv/kv.go b/state/txindex/kv/kv.go +index 8ed57934d..e6d5d164e 100644 +--- a/state/txindex/kv/kv.go ++++ b/state/txindex/kv/kv.go +@@ -73,26 +73,7 @@ func (txi *TxIndex) AddBatch(b *txindex.Batch) error { + defer storeBatch.Close() + + for _, result := range b.Ops { +- hash := types.Tx(result.Tx).Hash() +- +- // index tx by events +- err := txi.indexEvents(result, hash, storeBatch) +- if err != nil { +- return err +- } +- +- // index by height (always) +- err = storeBatch.Set(keyForHeight(result), hash) +- if err != nil { +- return err +- } +- +- rawBytes, err := proto.Marshal(result) +- if err != nil { +- return err +- } +- // index by hash (always) +- err = storeBatch.Set(hash, rawBytes) ++ err := txi.indexResult(storeBatch, result) + if err != nil { + return err + } +@@ -181,6 +162,47 @@ func (txi *TxIndex) indexEvents(result *abci.TxResult, hash []byte, store dbm.Ba + return nil + } + ++func (txi *TxIndex) indexResult(batch dbm.Batch, result *abci.TxResult) error { ++ hash := types.Tx(result.Tx).Hash() ++ ++ rawBytes, err := proto.Marshal(result) ++ if err != nil { ++ return err ++ } ++ ++ if !result.Result.IsOK() { ++ oldResult, err := txi.Get(hash) ++ if err != nil { ++ return err ++ } ++ ++ // if the new transaction failed and it's already indexed in an older block and was successful ++ // we skip it as we want users to get the older successful transaction when they query. ++ if oldResult != nil && oldResult.Result.Code == abci.CodeTypeOK { ++ return nil ++ } ++ } ++ ++ // index tx by events ++ err = txi.indexEvents(result, hash, batch) ++ if err != nil { ++ return err ++ } ++ ++ // index by height (always) ++ err = batch.Set(keyForHeight(result), hash) ++ if err != nil { ++ return err ++ } ++ ++ // index by hash (always) ++ err = batch.Set(hash, rawBytes) ++ if err != nil { ++ return err ++ } ++ return nil ++} ++ + // Search performs a search using the given query. + // + // It breaks the query into conditions (like "tx.height > 5"). For each +@@ -360,7 +382,11 @@ func (txi *TxIndex) setTmpHashes(tmpHeights map[string][]byte, it dbm.Iterator, + eventSeq := extractEventSeqFromKey(it.Key()) + tmpHeights[string(it.Value())+eventSeq] = it.Value() + } else { +- tmpHeights[string(it.Value())] = it.Value() ++ // Copy it.Value() to ensure tmpHeights stores independent values, as iterators reuse ++ // the same memory for it.Value(), causing overwrites on each iteration. ++ valueCopy := make([]byte, len(it.Value())) ++ copy(valueCopy, it.Value()) ++ tmpHeights[string(it.Value())] = valueCopy + } + } + diff --git a/patches/state/txindex/kv/kv_test.go.patch b/patches/state/txindex/kv/kv_test.go.patch new file mode 100644 index 00000000000..ca719111e18 --- /dev/null +++ b/patches/state/txindex/kv/kv_test.go.patch @@ -0,0 +1,60 @@ +diff --git a/state/txindex/kv/kv_test.go b/state/txindex/kv/kv_test.go +index 60b4047fc..d805cc3c8 100644 +--- a/state/txindex/kv/kv_test.go ++++ b/state/txindex/kv/kv_test.go +@@ -136,6 +136,35 @@ func TestTxIndex(t *testing.T) { + assert.True(t, proto.Equal(txResult2, loadedTxResult2)) + } + ++func TestWrappedTxIndex(t *testing.T) { ++ indexer := NewTxIndex(db.NewMemDB()) ++ ++ tx := types.Tx("HELLO WORLD") ++ wrappedTx, err := types.MarshalIndexWrapper(tx, 11) ++ require.NoError(t, err) ++ txResult := &abci.TxResult{ ++ Height: 1, ++ Index: 0, ++ Tx: wrappedTx, ++ Result: abci.ResponseDeliverTx{ ++ Data: []byte{0}, ++ Code: abci.CodeTypeOK, Log: "", Events: nil, ++ }, ++ } ++ hash := tx.Hash() ++ ++ batch := txindex.NewBatch(1) ++ if err := batch.Add(txResult); err != nil { ++ t.Error(err) ++ } ++ err = indexer.AddBatch(batch) ++ require.NoError(t, err) ++ ++ loadedTxResult, err := indexer.Get(hash) ++ require.NoError(t, err) ++ assert.True(t, proto.Equal(txResult, loadedTxResult)) ++} ++ + func TestTxSearch(t *testing.T) { + indexer := NewTxIndex(db.NewMemDB()) + +@@ -663,8 +692,18 @@ func TestTxSearchMultipleTxs(t *testing.T) { + + results, err := indexer.Search(ctx, query.MustParse("account.number >= 1")) + assert.NoError(t, err) +- + require.Len(t, results, 3) ++ ++ // since two txs were added at height 1 and 2, we should have two unique transactions ++ // for both heights ++ results, err = indexer.Search(ctx, query.MustParse("tx.height=1")) ++ assert.NoError(t, err) ++ require.Len(t, results, 2) ++ ++ results, err = indexer.Search(ctx, query.MustParse("tx.height=2")) ++ assert.NoError(t, err) ++ require.Len(t, results, 2) ++ + } + + func txResultWithEvents(events []abci.Event) *abci.TxResult { diff --git a/patches/state/txindex/mocks/tx_indexer.go.patch b/patches/state/txindex/mocks/tx_indexer.go.patch new file mode 100644 index 00000000000..031b0f60ea8 --- /dev/null +++ b/patches/state/txindex/mocks/tx_indexer.go.patch @@ -0,0 +1,92 @@ +diff --git a/state/txindex/mocks/tx_indexer.go b/state/txindex/mocks/tx_indexer.go +index 93d0eb9c2..4cf73d82a 100644 +--- a/state/txindex/mocks/tx_indexer.go ++++ b/state/txindex/mocks/tx_indexer.go +@@ -22,6 +22,10 @@ type TxIndexer struct { + func (_m *TxIndexer) AddBatch(b *txindex.Batch) error { + ret := _m.Called(b) + ++ if len(ret) == 0 { ++ panic("no return value specified for AddBatch") ++ } ++ + var r0 error + if rf, ok := ret.Get(0).(func(*txindex.Batch) error); ok { + r0 = rf(b) +@@ -36,7 +40,15 @@ func (_m *TxIndexer) AddBatch(b *txindex.Batch) error { + func (_m *TxIndexer) Get(hash []byte) (*types.TxResult, error) { + ret := _m.Called(hash) + ++ if len(ret) == 0 { ++ panic("no return value specified for Get") ++ } ++ + var r0 *types.TxResult ++ var r1 error ++ if rf, ok := ret.Get(0).(func([]byte) (*types.TxResult, error)); ok { ++ return rf(hash) ++ } + if rf, ok := ret.Get(0).(func([]byte) *types.TxResult); ok { + r0 = rf(hash) + } else { +@@ -45,7 +57,6 @@ func (_m *TxIndexer) Get(hash []byte) (*types.TxResult, error) { + } + } + +- var r1 error + if rf, ok := ret.Get(1).(func([]byte) error); ok { + r1 = rf(hash) + } else { +@@ -59,6 +70,10 @@ func (_m *TxIndexer) Get(hash []byte) (*types.TxResult, error) { + func (_m *TxIndexer) Index(result *types.TxResult) error { + ret := _m.Called(result) + ++ if len(ret) == 0 { ++ panic("no return value specified for Index") ++ } ++ + var r0 error + if rf, ok := ret.Get(0).(func(*types.TxResult) error); ok { + r0 = rf(result) +@@ -73,7 +88,15 @@ func (_m *TxIndexer) Index(result *types.TxResult) error { + func (_m *TxIndexer) Search(ctx context.Context, q *query.Query) ([]*types.TxResult, error) { + ret := _m.Called(ctx, q) + ++ if len(ret) == 0 { ++ panic("no return value specified for Search") ++ } ++ + var r0 []*types.TxResult ++ var r1 error ++ if rf, ok := ret.Get(0).(func(context.Context, *query.Query) ([]*types.TxResult, error)); ok { ++ return rf(ctx, q) ++ } + if rf, ok := ret.Get(0).(func(context.Context, *query.Query) []*types.TxResult); ok { + r0 = rf(ctx, q) + } else { +@@ -82,7 +105,6 @@ func (_m *TxIndexer) Search(ctx context.Context, q *query.Query) ([]*types.TxRes + } + } + +- var r1 error + if rf, ok := ret.Get(1).(func(context.Context, *query.Query) error); ok { + r1 = rf(ctx, q) + } else { +@@ -92,13 +114,12 @@ func (_m *TxIndexer) Search(ctx context.Context, q *query.Query) ([]*types.TxRes + return r0, r1 + } + +-type mockConstructorTestingTNewTxIndexer interface { ++// NewTxIndexer creates a new instance of TxIndexer. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. ++// The first argument is typically a *testing.T value. ++func NewTxIndexer(t interface { + mock.TestingT + Cleanup(func()) +-} +- +-// NewTxIndexer creates a new instance of TxIndexer. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +-func NewTxIndexer(t mockConstructorTestingTNewTxIndexer) *TxIndexer { ++}) *TxIndexer { + mock := &TxIndexer{} + mock.Mock.Test(t) + diff --git a/patches/state/validation_test.go.patch b/patches/state/validation_test.go.patch new file mode 100644 index 00000000000..93afbef8e6f --- /dev/null +++ b/patches/state/validation_test.go.patch @@ -0,0 +1,102 @@ +diff --git a/state/validation_test.go b/state/validation_test.go +index 209b88800..0014f45da 100644 +--- a/state/validation_test.go ++++ b/state/validation_test.go +@@ -16,6 +16,7 @@ import ( + cmtproto "github.com/tendermint/tendermint/proto/tendermint/types" + sm "github.com/tendermint/tendermint/state" + "github.com/tendermint/tendermint/state/mocks" ++ "github.com/tendermint/tendermint/test/factory" + "github.com/tendermint/tendermint/types" + cmttime "github.com/tendermint/tendermint/types/time" + ) +@@ -60,7 +61,6 @@ func TestValidateBlockHeader(t *testing.T) { + + {"LastBlockID wrong", func(block *types.Block) { block.LastBlockID.PartSetHeader.Total += 10 }}, + {"LastCommitHash wrong", func(block *types.Block) { block.LastCommitHash = wrongHash }}, +- {"DataHash wrong", func(block *types.Block) { block.DataHash = wrongHash }}, + + {"ValidatorsHash wrong", func(block *types.Block) { block.ValidatorsHash = wrongHash }}, + {"NextValidatorsHash wrong", func(block *types.Block) { block.NextValidatorsHash = wrongHash }}, +@@ -80,7 +80,7 @@ func TestValidateBlockHeader(t *testing.T) { + Invalid blocks don't pass + */ + for _, tc := range testCases { +- block, _ := state.MakeBlock(height, makeTxs(height), lastCommit, nil, proposerAddr) ++ block, _ := state.MakeBlock(height, factory.MakeData(makeTxs(height)), lastCommit, nil, proposerAddr) + tc.malleateBlock(block) + err := blockExec.ValidateBlock(state, block) + require.Error(t, err, tc.name) +@@ -93,6 +93,19 @@ func TestValidateBlockHeader(t *testing.T) { + state, _, lastCommit, err = makeAndCommitGoodBlock(state, height, lastCommit, proposerAddr, blockExec, privVals, nil) + require.NoError(t, err, "height %d", height) + } ++ ++ nextHeight := validationTestsStopHeight ++ block, _ := state.MakeBlock( ++ nextHeight, ++ factory.MakeData(factory.MakeTenTxs(nextHeight)), ++ lastCommit, ++ nil, ++ state.Validators.GetProposer().Address, ++ ) ++ state.InitialHeight = nextHeight + 1 ++ err := blockExec.ValidateBlock(state, block) ++ require.Error(t, err, "expected an error when state is ahead of block") ++ assert.Contains(t, err.Error(), "lower than initial height") + } + + func TestValidateBlockCommit(t *testing.T) { +@@ -137,7 +150,13 @@ func TestValidateBlockCommit(t *testing.T) { + state.LastBlockID, + []types.CommitSig{wrongHeightVote.CommitSig()}, + ) +- block, _ := state.MakeBlock(height, makeTxs(height), wrongHeightCommit, nil, proposerAddr) ++ block, _ := state.MakeBlock( ++ height, ++ factory.MakeData(factory.MakeTenTxs(height)), ++ wrongHeightCommit, ++ nil, ++ proposerAddr, ++ ) + err = blockExec.ValidateBlock(state, block) + _, isErrInvalidCommitHeight := err.(types.ErrInvalidCommitHeight) + require.True(t, isErrInvalidCommitHeight, "expected ErrInvalidCommitHeight at height %d but got: %v", height, err) +@@ -145,7 +164,13 @@ func TestValidateBlockCommit(t *testing.T) { + /* + #2589: test len(block.LastCommit.Signatures) == state.LastValidators.Size() + */ +- block, _ = state.MakeBlock(height, makeTxs(height), wrongSigsCommit, nil, proposerAddr) ++ block, _ = state.MakeBlock( ++ height, ++ factory.MakeData(factory.MakeTenTxs(height)), ++ wrongSigsCommit, ++ nil, ++ proposerAddr, ++ ) + err = blockExec.ValidateBlock(state, block) + _, isErrInvalidCommitSignatures := err.(types.ErrInvalidCommitSignatures) + require.True(t, isErrInvalidCommitSignatures, +@@ -211,6 +236,7 @@ func TestValidateBlockCommit(t *testing.T) { + } + } + ++// TODO potentially delete + func TestValidateBlockEvidence(t *testing.T) { + proxyApp := newTestApp() + require.NoError(t, proxyApp.Start()) +@@ -254,7 +280,13 @@ func TestValidateBlockEvidence(t *testing.T) { + evidence = append(evidence, newEv) + currentBytes += int64(len(newEv.Bytes())) + } +- block, _ := state.MakeBlock(height, makeTxs(height), lastCommit, evidence, proposerAddr) ++ block, _ := state.MakeBlock( ++ height, ++ factory.MakeData(factory.MakeTenTxs(height)), ++ lastCommit, ++ evidence, ++ proposerAddr, ++ ) + err := blockExec.ValidateBlock(state, block) + if assert.Error(t, err) { + _, ok := err.(*types.ErrEvidenceOverflow) diff --git a/patches/statesync/chunks.go.patch b/patches/statesync/chunks.go.patch new file mode 100644 index 00000000000..8073992639c --- /dev/null +++ b/patches/statesync/chunks.go.patch @@ -0,0 +1,12 @@ +diff --git a/statesync/chunks.go b/statesync/chunks.go +index 4948f10b8..095a55770 100644 +--- a/statesync/chunks.go ++++ b/statesync/chunks.go +@@ -108,6 +108,7 @@ func (q *chunkQueue) Allocate() (uint32, error) { + if q.snapshot == nil { + return 0, errDone + } ++ //nolint:gosec + if uint32(len(q.chunkAllocated)) >= q.snapshot.Chunks { + return 0, errDone + } diff --git a/patches/statesync/mocks/state_provider.go.patch b/patches/statesync/mocks/state_provider.go.patch new file mode 100644 index 00000000000..35fd096a959 --- /dev/null +++ b/patches/statesync/mocks/state_provider.go.patch @@ -0,0 +1,93 @@ +diff --git a/statesync/mocks/state_provider.go b/statesync/mocks/state_provider.go +index f52b9e33d..f0313c65d 100644 +--- a/statesync/mocks/state_provider.go ++++ b/statesync/mocks/state_provider.go +@@ -20,7 +20,15 @@ type StateProvider struct { + func (_m *StateProvider) AppHash(ctx context.Context, height uint64) ([]byte, error) { + ret := _m.Called(ctx, height) + ++ if len(ret) == 0 { ++ panic("no return value specified for AppHash") ++ } ++ + var r0 []byte ++ var r1 error ++ if rf, ok := ret.Get(0).(func(context.Context, uint64) ([]byte, error)); ok { ++ return rf(ctx, height) ++ } + if rf, ok := ret.Get(0).(func(context.Context, uint64) []byte); ok { + r0 = rf(ctx, height) + } else { +@@ -29,7 +37,6 @@ func (_m *StateProvider) AppHash(ctx context.Context, height uint64) ([]byte, er + } + } + +- var r1 error + if rf, ok := ret.Get(1).(func(context.Context, uint64) error); ok { + r1 = rf(ctx, height) + } else { +@@ -43,7 +50,15 @@ func (_m *StateProvider) AppHash(ctx context.Context, height uint64) ([]byte, er + func (_m *StateProvider) Commit(ctx context.Context, height uint64) (*types.Commit, error) { + ret := _m.Called(ctx, height) + ++ if len(ret) == 0 { ++ panic("no return value specified for Commit") ++ } ++ + var r0 *types.Commit ++ var r1 error ++ if rf, ok := ret.Get(0).(func(context.Context, uint64) (*types.Commit, error)); ok { ++ return rf(ctx, height) ++ } + if rf, ok := ret.Get(0).(func(context.Context, uint64) *types.Commit); ok { + r0 = rf(ctx, height) + } else { +@@ -52,7 +67,6 @@ func (_m *StateProvider) Commit(ctx context.Context, height uint64) (*types.Comm + } + } + +- var r1 error + if rf, ok := ret.Get(1).(func(context.Context, uint64) error); ok { + r1 = rf(ctx, height) + } else { +@@ -66,14 +80,21 @@ func (_m *StateProvider) Commit(ctx context.Context, height uint64) (*types.Comm + func (_m *StateProvider) State(ctx context.Context, height uint64) (state.State, error) { + ret := _m.Called(ctx, height) + ++ if len(ret) == 0 { ++ panic("no return value specified for State") ++ } ++ + var r0 state.State ++ var r1 error ++ if rf, ok := ret.Get(0).(func(context.Context, uint64) (state.State, error)); ok { ++ return rf(ctx, height) ++ } + if rf, ok := ret.Get(0).(func(context.Context, uint64) state.State); ok { + r0 = rf(ctx, height) + } else { + r0 = ret.Get(0).(state.State) + } + +- var r1 error + if rf, ok := ret.Get(1).(func(context.Context, uint64) error); ok { + r1 = rf(ctx, height) + } else { +@@ -83,13 +104,12 @@ func (_m *StateProvider) State(ctx context.Context, height uint64) (state.State, + return r0, r1 + } + +-type mockConstructorTestingTNewStateProvider interface { ++// NewStateProvider creates a new instance of StateProvider. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. ++// The first argument is typically a *testing.T value. ++func NewStateProvider(t interface { + mock.TestingT + Cleanup(func()) +-} +- +-// NewStateProvider creates a new instance of StateProvider. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +-func NewStateProvider(t mockConstructorTestingTNewStateProvider) *StateProvider { ++}) *StateProvider { + mock := &StateProvider{} + mock.Mock.Test(t) + diff --git a/patches/statesync/reactor.go.patch b/patches/statesync/reactor.go.patch new file mode 100644 index 00000000000..8d774f9b7ea --- /dev/null +++ b/patches/statesync/reactor.go.patch @@ -0,0 +1,13 @@ +diff --git a/statesync/reactor.go b/statesync/reactor.go +index d8aa5b2a7..59f85f9aa 100644 +--- a/statesync/reactor.go ++++ b/statesync/reactor.go +@@ -103,7 +103,7 @@ func (r *Reactor) RemovePeer(peer p2p.Peer, reason interface{}) { + } + } + +-// Receive implements p2p.Reactor. ++// ReceiveEnvelope implements p2p.Reactor. + func (r *Reactor) ReceiveEnvelope(e p2p.Envelope) { + if !r.IsRunning() { + return diff --git a/patches/statesync/snapshots.go.patch b/patches/statesync/snapshots.go.patch new file mode 100644 index 00000000000..57cd8532778 --- /dev/null +++ b/patches/statesync/snapshots.go.patch @@ -0,0 +1,14 @@ +diff --git a/statesync/snapshots.go b/statesync/snapshots.go +index efe705cc2..aac950bb5 100644 +--- a/statesync/snapshots.go ++++ b/statesync/snapshots.go +@@ -21,7 +21,8 @@ type snapshot struct { + Hash []byte + Metadata []byte + +- trustedAppHash []byte // populated by light client ++ trustedAppHash []byte // populated by light client ++ trustedAppVersion uint64 // populated by light client + } + + // Key generates a snapshot key, used for lookups. It takes into account not only the height and diff --git a/patches/statesync/stateprovider.go.patch b/patches/statesync/stateprovider.go.patch new file mode 100644 index 00000000000..987e5030d92 --- /dev/null +++ b/patches/statesync/stateprovider.go.patch @@ -0,0 +1,46 @@ +diff --git a/statesync/stateprovider.go b/statesync/stateprovider.go +index 2dbca0893..3b7a9b022 100644 +--- a/statesync/stateprovider.go ++++ b/statesync/stateprovider.go +@@ -91,6 +91,7 @@ func (s *lightClientStateProvider) AppHash(ctx context.Context, height uint64) ( + defer s.Unlock() + + // We have to fetch the next height, which contains the app hash for the previous height. ++ //nolint:gosec + header, err := s.lc.VerifyLightBlockAtHeight(ctx, int64(height+1), time.Now()) + if err != nil { + return nil, err +@@ -103,6 +104,7 @@ func (s *lightClientStateProvider) AppHash(ctx context.Context, height uint64) ( + // breaking it. We should instead have a Has(ctx, height) method which checks + // that the state provider has access to the necessary data for the height. + // We piggyback on AppHash() since it's called when adding snapshots to the pool. ++ //nolint:gosec + _, err = s.lc.VerifyLightBlockAtHeight(ctx, int64(height+2), time.Now()) + if err != nil { + return nil, err +@@ -114,6 +116,7 @@ func (s *lightClientStateProvider) AppHash(ctx context.Context, height uint64) ( + func (s *lightClientStateProvider) Commit(ctx context.Context, height uint64) (*types.Commit, error) { + s.Lock() + defer s.Unlock() ++ //nolint:gosec + header, err := s.lc.VerifyLightBlockAtHeight(ctx, int64(height), time.Now()) + if err != nil { + return nil, err +@@ -143,14 +146,17 @@ func (s *lightClientStateProvider) State(ctx context.Context, height uint64) (sm + // + // We need to fetch the NextValidators from height+2 because if the application changed + // the validator set at the snapshot height then this only takes effect at height+2. ++ //nolint:gosec + lastLightBlock, err := s.lc.VerifyLightBlockAtHeight(ctx, int64(height), time.Now()) + if err != nil { + return sm.State{}, err + } ++ //nolint:gosec + currentLightBlock, err := s.lc.VerifyLightBlockAtHeight(ctx, int64(height+1), time.Now()) + if err != nil { + return sm.State{}, err + } ++ //nolint:gosec + nextLightBlock, err := s.lc.VerifyLightBlockAtHeight(ctx, int64(height+2), time.Now()) + if err != nil { + return sm.State{}, err diff --git a/patches/statesync/syncer.go.patch b/patches/statesync/syncer.go.patch new file mode 100644 index 00000000000..7d53497ba4b --- /dev/null +++ b/patches/statesync/syncer.go.patch @@ -0,0 +1,112 @@ +diff --git a/statesync/syncer.go b/statesync/syncer.go +index c057c324e..89f66a939 100644 +--- a/statesync/syncer.go ++++ b/statesync/syncer.go +@@ -265,6 +265,19 @@ func (s *syncer) Sync(snapshot *snapshot, chunks *chunkQueue) (sm.State, *types. + } + snapshot.trustedAppHash = appHash + ++ pctx, pcancel := context.WithTimeout(context.TODO(), 30*time.Second) ++ defer pcancel() ++ // Optimistically build new state, so we don't discover any light client failures at the end. ++ state, err := s.stateProvider.State(pctx, snapshot.Height) ++ if err != nil { ++ s.logger.Info("failed to fetch and verify CometBFT state", "err", err) ++ if err == light.ErrNoWitnesses { ++ return sm.State{}, nil, err ++ } ++ return sm.State{}, nil, errRejectSnapshot ++ } ++ snapshot.trustedAppVersion = state.ConsensusParams.Version.AppVersion ++ + // Offer snapshot to ABCI app. + err = s.offerSnapshot(snapshot) + if err != nil { +@@ -278,18 +291,6 @@ func (s *syncer) Sync(snapshot *snapshot, chunks *chunkQueue) (sm.State, *types. + go s.fetchChunks(fetchCtx, snapshot, chunks) + } + +- pctx, pcancel := context.WithTimeout(context.TODO(), 30*time.Second) +- defer pcancel() +- +- // Optimistically build new state, so we don't discover any light client failures at the end. +- state, err := s.stateProvider.State(pctx, snapshot.Height) +- if err != nil { +- s.logger.Info("failed to fetch and verify CometBFT state", "err", err) +- if err == light.ErrNoWitnesses { +- return sm.State{}, nil, err +- } +- return sm.State{}, nil, errRejectSnapshot +- } + commit, err := s.stateProvider.Commit(pctx, snapshot.Height) + if err != nil { + s.logger.Info("failed to fetch and verify commit", "err", err) +@@ -306,10 +307,14 @@ func (s *syncer) Sync(snapshot *snapshot, chunks *chunkQueue) (sm.State, *types. + } + + // Verify app and app version +- if err := s.verifyApp(snapshot, state.Version.Consensus.App); err != nil { ++ timeouts, err := s.verifyApp(snapshot, state.Version.Consensus.App) ++ if err != nil { + return sm.State{}, nil, err + } + ++ state.TimeoutCommit = timeouts.TimeoutCommit ++ state.TimeoutPropose = timeouts.TimeoutPropose ++ + // Done! 🎉 + s.logger.Info("Snapshot restored", "height", snapshot.Height, "format", snapshot.Format, + "hash", snapshot.Hash) +@@ -330,7 +335,8 @@ func (s *syncer) offerSnapshot(snapshot *snapshot) error { + Hash: snapshot.Hash, + Metadata: snapshot.Metadata, + }, +- AppHash: snapshot.trustedAppHash, ++ AppHash: snapshot.trustedAppHash, ++ AppVersion: snapshot.trustedAppVersion, + }) + if err != nil { + return fmt.Errorf("failed to offer snapshot: %w", err) +@@ -482,10 +488,10 @@ func (s *syncer) requestChunk(snapshot *snapshot, chunk uint32) { + } + + // verifyApp verifies the sync, checking the app hash, last block height and app version +-func (s *syncer) verifyApp(snapshot *snapshot, appVersion uint64) error { ++func (s *syncer) verifyApp(snapshot *snapshot, appVersion uint64) (abci.TimeoutsInfo, error) { + resp, err := s.connQuery.InfoSync(proxy.RequestInfo) + if err != nil { +- return fmt.Errorf("failed to query ABCI app for appHash: %w", err) ++ return abci.TimeoutsInfo{}, fmt.Errorf("failed to query ABCI app for appHash: %w", err) + } + + // sanity check that the app version in the block matches the application's own record +@@ -493,24 +499,25 @@ func (s *syncer) verifyApp(snapshot *snapshot, appVersion uint64) error { + if resp.AppVersion != appVersion { + // An error here most likely means that the app hasn't inplemented state sync + // or the Info call correctly +- return fmt.Errorf("app version mismatch. Expected: %d, got: %d", ++ return abci.TimeoutsInfo{}, fmt.Errorf("app version mismatch. Expected: %d, got: %d", + appVersion, resp.AppVersion) + } + if !bytes.Equal(snapshot.trustedAppHash, resp.LastBlockAppHash) { + s.logger.Error("appHash verification failed", + "expected", snapshot.trustedAppHash, + "actual", resp.LastBlockAppHash) +- return errVerifyFailed ++ return abci.TimeoutsInfo{}, errVerifyFailed + } ++ //nolint:gosec + if uint64(resp.LastBlockHeight) != snapshot.Height { + s.logger.Error( + "ABCI app reported unexpected last block height", + "expected", snapshot.Height, + "actual", resp.LastBlockHeight, + ) +- return errVerifyFailed ++ return abci.TimeoutsInfo{}, errVerifyFailed + } + + s.logger.Info("Verified ABCI app", "height", snapshot.Height, "appHash", snapshot.trustedAppHash) +- return nil ++ return resp.Timeouts, nil + } diff --git a/patches/statesync/syncer_test.go.patch b/patches/statesync/syncer_test.go.patch new file mode 100644 index 00000000000..7272b81a1c7 --- /dev/null +++ b/patches/statesync/syncer_test.go.patch @@ -0,0 +1,35 @@ +diff --git a/statesync/syncer_test.go b/statesync/syncer_test.go +index 769107d7e..9da872dce 100644 +--- a/statesync/syncer_test.go ++++ b/statesync/syncer_test.go +@@ -34,6 +34,10 @@ func setupOfferSyncer(t *testing.T) (*syncer, *proxymocks.AppConnSnapshot) { + connSnapshot := &proxymocks.AppConnSnapshot{} + stateProvider := &mocks.StateProvider{} + stateProvider.On("AppHash", mock.Anything, mock.Anything).Return([]byte("app_hash"), nil) ++ stateProvider.On("State", mock.AnythingOfType("*context.timerCtx"), uint64(1)).Return(sm.State{}, nil) ++ stateProvider.On("State", mock.AnythingOfType("*context.timerCtx"), uint64(2)).Return(sm.State{}, nil) ++ stateProvider.On("State", mock.AnythingOfType("*context.timerCtx"), uint64(4)).Return(sm.State{}, nil) ++ + cfg := config.DefaultStateSyncConfig() + syncer := newSyncer(*cfg, log.NewNopLogger(), connSnapshot, connQuery, stateProvider, "") + +@@ -84,7 +88,9 @@ func TestSyncer_SyncAny(t *testing.T) { + stateProvider.On("AppHash", mock.Anything, uint64(1)).Return(state.AppHash, nil) + stateProvider.On("AppHash", mock.Anything, uint64(2)).Return([]byte("app_hash_2"), nil) + stateProvider.On("Commit", mock.Anything, uint64(1)).Return(commit, nil) +- stateProvider.On("State", mock.Anything, uint64(1)).Return(state, nil) ++ stateProvider.On("State", mock.AnythingOfType("*context.timerCtx"), uint64(1)).Return(state, nil) ++ stateProvider.On("State", mock.AnythingOfType("*context.timerCtx"), uint64(2)).Return(state, nil) ++ + connSnapshot := &proxymocks.AppConnSnapshot{} + connQuery := &proxymocks.AppConnQuery{} + +@@ -675,7 +681,7 @@ func TestSyncer_verifyApp(t *testing.T) { + syncer := newSyncer(*cfg, log.NewNopLogger(), connSnapshot, connQuery, stateProvider, "") + + connQuery.On("InfoSync", proxy.RequestInfo).Return(tc.response, tc.err) +- err := syncer.verifyApp(s, appVersion) ++ _, err := syncer.verifyApp(s, appVersion) + unwrapped := errors.Unwrap(err) + if unwrapped != nil { + err = unwrapped diff --git a/patches/store/store.go.patch b/patches/store/store.go.patch new file mode 100644 index 00000000000..a4a9cd3a132 --- /dev/null +++ b/patches/store/store.go.patch @@ -0,0 +1,159 @@ +diff --git a/store/store.go b/store/store.go +index 3a2fd1aa7..5ddea3bbf 100644 +--- a/store/store.go ++++ b/store/store.go +@@ -7,6 +7,7 @@ import ( + dbm "github.com/cometbft/cometbft-db" + "github.com/gogo/protobuf/proto" + ++ abci "github.com/tendermint/tendermint/abci/types" + cmtsync "github.com/tendermint/tendermint/libs/sync" + cmtstore "github.com/tendermint/tendermint/proto/tendermint/store" + cmtproto "github.com/tendermint/tendermint/proto/tendermint/types" +@@ -37,7 +38,7 @@ type BlockStore struct { + // fine-grained concurrency control for its data, and thus this mutex does not apply to + // database contents. The only reason for keeping these fields in the struct is that the data + // can't efficiently be queried from the database since the key encoding we use is not +- // lexicographically ordered (see https://github.com/tendermint/tendermint/issues/4567). ++ // lexicographically ordered. + mtx cmtsync.RWMutex + base int64 + height int64 +@@ -78,7 +79,7 @@ func (bs *BlockStore) Size() int64 { + return bs.height - bs.base + 1 + } + +-// LoadBase atomically loads the base block meta, or returns nil if no base is found. ++// LoadBaseMeta atomically loads the base block meta, or returns nil if no base is found. + func (bs *BlockStore) LoadBaseMeta() *types.BlockMeta { + bs.mtx.RLock() + defer bs.mtx.RUnlock() +@@ -196,6 +197,25 @@ func (bs *BlockStore) LoadBlockMeta(height int64) *types.BlockMeta { + return blockMeta + } + ++// LoadBlockMetaByHash returns the blockmeta whose header corresponds to the given hash. If none is found, returns nil. ++func (bs *BlockStore) LoadBlockMetaByHash(hash []byte) *types.BlockMeta { ++ bz, err := bs.db.Get(calcBlockHashKey(hash)) ++ if err != nil { ++ panic(err) ++ } ++ if len(bz) == 0 { ++ return nil ++ } ++ ++ s := string(bz) ++ height, err := strconv.ParseInt(s, 10, 64) ++ ++ if err != nil { ++ panic(fmt.Sprintf("failed to extract height from %s: %v", s, err)) ++ } ++ return bs.LoadBlockMeta(height) ++} ++ + // LoadBlockCommit returns the Commit for the given height. + // This commit consists of the +2/3 and other Precommit-votes for block at `height`, + // and it comes from the block.LastCommit for `height+1`. +@@ -272,8 +292,7 @@ func (bs *BlockStore) PruneBlocks(height int64) (uint64, error) { + bs.mtx.Unlock() + bs.saveState() + +- err := batch.WriteSync() +- if err != nil { ++ if err := batch.WriteSync(); err != nil { + return fmt.Errorf("failed to prune up to height %v: %w", base, err) + } + batch.Close() +@@ -285,6 +304,12 @@ func (bs *BlockStore) PruneBlocks(height int64) (uint64, error) { + if meta == nil { // assume already deleted + continue + } ++ block := bs.LoadBlock(h) ++ for _, tx := range block.Txs { ++ if err := batch.Delete(calcTxHashKey(tx.Hash())); err != nil { ++ return 0, err ++ } ++ } + if err := batch.Delete(calcBlockMetaKey(h)); err != nil { + return 0, err + } +@@ -425,6 +450,44 @@ func (bs *BlockStore) SaveSeenCommit(height int64, seenCommit *types.Commit) err + return bs.db.Set(calcSeenCommitKey(height), seenCommitBytes) + } + ++// SaveTxInfo indexes the txs from the block with the given response codes and logs from execution. ++// Only the error logs are saved for failed transactions. ++func (bs *BlockStore) SaveTxInfo(block *types.Block, txResponseCodes []uint32, logs []string) error { ++ if len(txResponseCodes) != len(block.Txs) { ++ return fmt.Errorf("txResponseCodes length mismatch with block txs length") ++ } ++ if len(logs) != len(block.Txs) { ++ return fmt.Errorf("logs length mismatch with block txs length") ++ } ++ ++ // Create a new batch ++ batch := bs.db.NewBatch() ++ ++ // Batch and save txs from the block ++ for i, tx := range block.Txs { ++ txInfo := cmtstore.TxInfo{ ++ Height: block.Height, ++ //nolint:gosec ++ Index: uint32(i), ++ Code: txResponseCodes[i], ++ } ++ // Set error log for failed txs ++ if txResponseCodes[i] != abci.CodeTypeOK { ++ txInfo.Error = logs[i] ++ } ++ txInfoBytes, err := proto.Marshal(&txInfo) ++ if err != nil { ++ return fmt.Errorf("unable to marshal tx: %w", err) ++ } ++ if err := batch.Set(calcTxHashKey(tx.Hash()), txInfoBytes); err != nil { ++ return err ++ } ++ } ++ ++ // Write the batch to the db ++ return batch.WriteSync() ++} ++ + func (bs *BlockStore) Close() error { + return bs.db.Close() + } +@@ -451,6 +514,10 @@ func calcBlockHashKey(hash []byte) []byte { + return []byte(fmt.Sprintf("BH:%x", hash)) + } + ++func calcTxHashKey(hash []byte) []byte { ++ return []byte(fmt.Sprintf("TH:%x", hash)) ++} ++ + //----------------------------------------------------------------------------- + + var blockStoreKey = []byte("blockStore") +@@ -493,6 +560,23 @@ func LoadBlockStoreState(db dbm.DB) cmtstore.BlockStoreState { + return bsj + } + ++// LoadTxInfo loads the TxInfo from disk given its hash. ++func (bs *BlockStore) LoadTxInfo(txHash []byte) *cmtstore.TxInfo { ++ bz, err := bs.db.Get(calcTxHashKey(txHash)) ++ if err != nil { ++ panic(err) ++ } ++ if len(bz) == 0 { ++ return nil ++ } ++ ++ var txi cmtstore.TxInfo ++ if err = proto.Unmarshal(bz, &txi); err != nil { ++ panic(fmt.Errorf("unmarshal to TxInfo failed: %w", err)) ++ } ++ return &txi ++} ++ + // mustEncode proto encodes a proto.message and panics if fails + func mustEncode(pb proto.Message) []byte { + bz, err := proto.Marshal(pb) diff --git a/patches/store/store_test.go.patch b/patches/store/store_test.go.patch new file mode 100644 index 00000000000..37d3957d212 --- /dev/null +++ b/patches/store/store_test.go.patch @@ -0,0 +1,250 @@ +diff --git a/store/store_test.go b/store/store_test.go +index 0e0e80832..be119dc3f 100644 +--- a/store/store_test.go ++++ b/store/store_test.go +@@ -15,6 +15,7 @@ import ( + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + ++ abci "github.com/tendermint/tendermint/abci/types" + cfg "github.com/tendermint/tendermint/config" + "github.com/tendermint/tendermint/crypto" + "github.com/tendermint/tendermint/libs/log" +@@ -22,6 +23,7 @@ import ( + cmtstore "github.com/tendermint/tendermint/proto/tendermint/store" + cmtversion "github.com/tendermint/tendermint/proto/tendermint/version" + sm "github.com/tendermint/tendermint/state" ++ "github.com/tendermint/tendermint/test/factory" + "github.com/tendermint/tendermint/types" + cmttime "github.com/tendermint/tendermint/types/time" + "github.com/tendermint/tendermint/version" +@@ -45,7 +47,10 @@ func makeTestCommit(height int64, timestamp time.Time) *types.Commit { + + func makeBlock(height int64, state sm.State, lastCommit *types.Commit) *types.Block { + txs := []types.Tx{make([]byte, types.BlockPartSizeBytes)} // TX taking one block part alone +- block, _ := state.MakeBlock(height, txs, lastCommit, nil, state.Validators.GetProposer().Address) ++ data := types.Data{ ++ Txs: txs, ++ } ++ block, _ := state.MakeBlock(height, data, lastCommit, nil, state.Validators.GetProposer().Address) + return block + } + +@@ -153,7 +158,6 @@ func TestMain(m *testing.M) { + } + + // TODO: This test should be simplified ... +- + func TestBlockStoreSaveLoadBlock(t *testing.T) { + state, bs, cleanup := makeStateAndBlockStore(log.NewTMLogger(new(bytes.Buffer))) + defer cleanup() +@@ -170,13 +174,14 @@ func TestBlockStoreSaveLoadBlock(t *testing.T) { + + // save a block big enough to have two block parts + txs := []types.Tx{make([]byte, types.BlockPartSizeBytes)} // TX taking one block part alone +- block, _ := state.MakeBlock(bs.Height()+1, txs, new(types.Commit), nil, state.Validators.GetProposer().Address) ++ data := factory.MakeData(txs) ++ block, _ := state.MakeBlock(bs.Height()+1, data, new(types.Commit), nil, state.Validators.GetProposer().Address) + validPartSet := block.MakePartSet(types.BlockPartSizeBytes) + require.GreaterOrEqual(t, validPartSet.Total(), uint32(2)) + part2 = validPartSet.GetPart(1) +- + seenCommit := makeTestCommit(block.Header.Height, cmttime.Now()) + bs.SaveBlock(block, validPartSet, seenCommit) ++ + require.EqualValues(t, 1, bs.Base(), "expecting the new height to be changed") + require.EqualValues(t, block.Header.Height, bs.Height(), "expecting the new height to be changed") + +@@ -194,7 +199,6 @@ func TestBlockStoreSaveLoadBlock(t *testing.T) { + } + + // End of setup, test data +- + commitAtH10 := makeTestCommit(10, cmttime.Now()) + tuples := []struct { + block *types.Block +@@ -367,6 +371,78 @@ func TestBlockStoreSaveLoadBlock(t *testing.T) { + } + } + ++func makeUniqueBlock(height int64, state sm.State, lastCommit *types.Commit) *types.Block { ++ data := types.Data{ ++ Txs: []types.Tx{types.Tx([]byte{byte(height)})}, ++ } ++ block, _ := state.MakeBlock(height, data, lastCommit, nil, state.Validators.GetProposer().Address) ++ return block ++} ++ ++func TestSaveTxInfo(t *testing.T) { ++ // Create a state and a block store ++ state, blockStore, cleanup := makeStateAndBlockStore(log.NewTMLogger(new(bytes.Buffer))) ++ defer cleanup() ++ ++ var allTxResponseCodes []uint32 ++ var allTxLogs []string ++ ++ // Create 10 blocks each with 1 tx ++ for h := int64(1); h <= 10; h++ { ++ block := makeUniqueBlock(h, state, new(types.Commit)) ++ partSet := block.MakePartSet(types.BlockPartSizeBytes) ++ seenCommit := makeTestCommit(h, cmttime.Now()) ++ blockStore.SaveBlock(block, partSet, seenCommit) ++ ++ var txResponseCode uint32 ++ var txLog string ++ ++ if h%2 == 0 { ++ txResponseCode = 0 ++ txLog = "success" ++ } else { ++ txResponseCode = 1 ++ txLog = "failure" ++ } ++ ++ // Save the tx info ++ err := blockStore.SaveTxInfo(block, []uint32{txResponseCode}, []string{txLog}) ++ require.NoError(t, err) ++ allTxResponseCodes = append(allTxResponseCodes, txResponseCode) ++ allTxLogs = append(allTxLogs, txLog) ++ } ++ ++ txIndex := 0 ++ // Get the blocks from blockstore up to the height ++ for h := int64(1); h <= 10; h++ { ++ block := blockStore.LoadBlock(h) ++ // Check that transactions exist in the block ++ for i, tx := range block.Txs { ++ txInfo := blockStore.LoadTxInfo(tx.Hash()) ++ require.Equal(t, block.Height, txInfo.Height) ++ require.Equal(t, uint32(i), txInfo.Index) ++ require.Equal(t, allTxResponseCodes[txIndex], txInfo.Code) ++ // We don't save the logs for successful transactions ++ if allTxResponseCodes[txIndex] == abci.CodeTypeOK { ++ require.Equal(t, "", txInfo.Error) ++ } else { ++ require.Equal(t, allTxLogs[txIndex], txInfo.Error) ++ } ++ txIndex++ ++ } ++ } ++ ++ // Get a random transaction and make sure it's indexed properly ++ block := blockStore.LoadBlock(7) ++ tx := block.Txs[0] ++ txInfo := blockStore.LoadTxInfo(tx.Hash()) ++ require.Equal(t, block.Height, txInfo.Height) ++ require.Equal(t, block.Height, int64(7)) ++ require.Equal(t, txInfo.Height, int64(7)) ++ require.Equal(t, uint32(1), txInfo.Code) ++ require.Equal(t, "failure", txInfo.Error) ++} ++ + func TestLoadBaseMeta(t *testing.T) { + config := cfg.ResetTestRoot("blockchain_reactor_test") + defer os.RemoveAll(config.RootDir) +@@ -483,6 +559,7 @@ func TestPruneBlocks(t *testing.T) { + require.Nil(t, bs.LoadBlockByHash(prunedBlock.Hash())) + require.Nil(t, bs.LoadBlockCommit(1199)) + require.Nil(t, bs.LoadBlockMeta(1199)) ++ require.Nil(t, bs.LoadBlockMetaByHash(prunedBlock.Hash())) + require.Nil(t, bs.LoadBlockPart(1199, 1)) + + for i := int64(1); i < 1200; i++ { +@@ -520,6 +597,70 @@ func TestPruneBlocks(t *testing.T) { + assert.Nil(t, bs.LoadBlock(1501)) + } + ++func TestPruneBlocksPrunesTxs(t *testing.T) { ++ config := cfg.ResetTestRoot("blockchain_reactor_test") ++ defer os.RemoveAll(config.RootDir) ++ ++ stateStore := sm.NewStore(dbm.NewMemDB(), sm.StoreOptions{DiscardABCIResponses: false}) ++ state, err := stateStore.LoadFromDBOrGenesisFile(config.GenesisFile()) ++ require.NoError(t, err) ++ ++ db := dbm.NewMemDB() ++ blockStore := NewBlockStore(db) ++ maxHeight := int64(15) ++ ++ var indexedTxHashes [][]byte ++ for height := int64(1); height <= maxHeight; height++ { ++ block := makeUniqueBlock(height, state, new(types.Commit)) ++ partSet := block.MakePartSet(types.BlockPartSizeBytes) ++ seenCommit := makeTestCommit(height, cmttime.Now()) ++ blockStore.SaveBlock(block, partSet, seenCommit) ++ err := blockStore.SaveTxInfo(block, make([]uint32, len(block.Txs)), make([]string, len(block.Txs))) ++ require.NoError(t, err) ++ for _, tx := range block.Txs { ++ indexedTxHashes = append(indexedTxHashes, tx.Hash()) ++ } ++ } ++ require.Len(t, indexedTxHashes, 15) ++ ++ // Check that the saved txs exist in the block store. ++ for _, hash := range indexedTxHashes { ++ txInfo := blockStore.LoadTxInfo(hash) ++ require.NoError(t, err) ++ require.NotNil(t, txInfo, "transaction was not saved in the database") ++ } ++ ++ pruned, err := blockStore.PruneBlocks(12) // prune blocks 1 to 11. ++ require.NoError(t, err) ++ assert.EqualValues(t, 11, pruned) ++ ++ // Check that the transactions in the pruned blocks have been removed. We ++ // removed 11 blocks, each block has 1 tx so 11 txs should no longer ++ // exist in the db. ++ for i, hash := range indexedTxHashes { ++ txInfo := blockStore.LoadTxInfo(hash) ++ if int64(i) < 11 { ++ require.Nil(t, txInfo) ++ } else { ++ require.NotNil(t, txInfo) ++ } ++ } ++ ++ // Check that transactions in remaining blocks are still there ++ for height := int64(pruned + 1); height <= maxHeight; height++ { ++ block := blockStore.LoadBlock(height) ++ for i, tx := range block.Txs { ++ hash := tx.Hash() ++ txInfo := blockStore.LoadTxInfo(hash) ++ require.NoError(t, err) ++ require.NotNil(t, txInfo) ++ require.Equal(t, height, txInfo.Height) ++ require.Equal(t, uint32(i), txInfo.Index) ++ require.Equal(t, uint32(0), txInfo.Code) ++ } ++ } ++} ++ + func TestLoadBlockMeta(t *testing.T) { + bs, db := freshBlockStore() + height := int64(10) +@@ -559,6 +700,26 @@ func TestLoadBlockMeta(t *testing.T) { + } + } + ++func TestLoadBlockMetaByHash(t *testing.T) { ++ config := cfg.ResetTestRoot("blockchain_reactor_test") ++ defer os.RemoveAll(config.RootDir) ++ stateStore := sm.NewStore(dbm.NewMemDB(), sm.StoreOptions{ ++ DiscardABCIResponses: false, ++ }) ++ state, err := stateStore.LoadFromDBOrGenesisFile(config.GenesisFile()) ++ require.NoError(t, err) ++ bs := NewBlockStore(dbm.NewMemDB()) ++ ++ b1, partSet := state.MakeBlock(state.LastBlockHeight+1, types.Data{Txs: factory.MakeTxs(state.LastBlockHeight+1, 10)}, new(types.Commit), nil, state.Validators.GetProposer().Address) ++ seenCommit := makeTestCommit(1, cmttime.Now()) ++ bs.SaveBlock(b1, partSet, seenCommit) ++ ++ baseBlock := bs.LoadBlockMetaByHash(b1.Hash()) ++ assert.EqualValues(t, b1.Header.Height, baseBlock.Header.Height) ++ assert.EqualValues(t, b1.Header.LastBlockID, baseBlock.Header.LastBlockID) ++ assert.EqualValues(t, b1.Header.ChainID, baseBlock.Header.ChainID) ++} ++ + func TestBlockFetchAtHeight(t *testing.T) { + state, bs, cleanup := makeStateAndBlockStore(log.NewTMLogger(new(bytes.Buffer))) + defer cleanup() diff --git a/patches/test/e2e/app/app.go.patch b/patches/test/e2e/app/app.go.patch new file mode 100644 index 00000000000..46cd27c24b4 --- /dev/null +++ b/patches/test/e2e/app/app.go.patch @@ -0,0 +1,65 @@ +diff --git a/test/e2e/app/app.go b/test/e2e/app/app.go +index 60fe89dea..724294fc3 100644 +--- a/test/e2e/app/app.go ++++ b/test/e2e/app/app.go +@@ -101,8 +101,9 @@ func NewApplication(cfg *Config) (*Application, error) { + // Info implements ABCI. + func (app *Application) Info(req abci.RequestInfo) abci.ResponseInfo { + return abci.ResponseInfo{ +- Version: version.ABCIVersion, +- AppVersion: appVersion, ++ Version: version.ABCIVersion, ++ AppVersion: appVersion, ++ //nolint:gosec + LastBlockHeight: int64(app.state.Height), + LastBlockAppHash: app.state.Hash, + } +@@ -111,6 +112,7 @@ func (app *Application) Info(req abci.RequestInfo) abci.ResponseInfo { + // Info implements ABCI. + func (app *Application) InitChain(req abci.RequestInitChain) abci.ResponseInitChain { + var err error ++ //nolint:gosec + app.state.initialHeight = uint64(req.InitialHeight) + if len(req.AppStateBytes) > 0 { + err = app.state.Import(0, req.AppStateBytes) +@@ -164,6 +166,7 @@ func (app *Application) BeginBlock(req abci.RequestBeginBlock) abci.ResponseBegi + + // EndBlock implements ABCI. + func (app *Application) EndBlock(req abci.RequestEndBlock) abci.ResponseEndBlock { ++ //nolint:gosec + valUpdates, err := app.validatorUpdates(uint64(req.Height)) + if err != nil { + panic(err) +@@ -204,6 +207,7 @@ func (app *Application) Commit() abci.ResponseCommit { + } + retainHeight := int64(0) + if app.cfg.RetainBlocks > 0 { ++ //nolint:gosec + retainHeight = int64(height - app.cfg.RetainBlocks + 1) + } + return abci.ResponseCommit{ +@@ -215,6 +219,7 @@ func (app *Application) Commit() abci.ResponseCommit { + // Query implements ABCI. + func (app *Application) Query(req abci.RequestQuery) abci.ResponseQuery { + return abci.ResponseQuery{ ++ //nolint:gosec + Height: int64(app.state.Height), + Key: req.Data, + Value: []byte(app.state.Get(string(req.Data))), +@@ -270,6 +275,16 @@ func (app *Application) ApplySnapshotChunk(req abci.RequestApplySnapshotChunk) a + return abci.ResponseApplySnapshotChunk{Result: abci.ResponseApplySnapshotChunk_ACCEPT} + } + ++func (app *Application) PrepareProposal( ++ req abci.RequestPrepareProposal) abci.ResponsePrepareProposal { ++ return abci.ResponsePrepareProposal{BlockData: req.BlockData} ++} ++ ++func (app *Application) ProcessProposal( ++ req abci.RequestProcessProposal) abci.ResponseProcessProposal { ++ return abci.ResponseProcessProposal{Result: abci.ResponseProcessProposal_ACCEPT} ++} ++ + func (app *Application) Rollback() error { + return app.state.Rollback() + } diff --git a/patches/test/e2e/generator/generate.go.patch b/patches/test/e2e/generator/generate.go.patch new file mode 100644 index 00000000000..faafe80c05d --- /dev/null +++ b/patches/test/e2e/generator/generate.go.patch @@ -0,0 +1,88 @@ +diff --git a/test/e2e/generator/generate.go b/test/e2e/generator/generate.go +index a78608328..037076d43 100644 +--- a/test/e2e/generator/generate.go ++++ b/test/e2e/generator/generate.go +@@ -11,6 +11,7 @@ import ( + "github.com/Masterminds/semver/v3" + "github.com/go-git/go-git/v5" + "github.com/go-git/go-git/v5/plumbing/object" ++ "github.com/tendermint/tendermint/libs/math" + e2e "github.com/tendermint/tendermint/test/e2e/pkg" + "github.com/tendermint/tendermint/version" + ) +@@ -19,7 +20,7 @@ var ( + // testnetCombinations defines global testnet options, where we generate a + // separate testnet for each combination (Cartesian product) of options. + testnetCombinations = map[string][]interface{}{ +- "topology": {"single", "quad", "large"}, ++ "topology": {"single", "quad", "large_connected", "large_partially_connected"}, + "initialHeight": {0, 1000}, + "initialState": { + map[string]string{}, +@@ -40,7 +41,7 @@ var ( + // FIXME: v2 disabled due to flake + nodeFastSyncs = uniformChoice{"v0"} // "v2" + nodeStateSyncs = uniformChoice{false, true} +- nodeMempools = uniformChoice{"v0", "v1"} ++ nodeMempools = uniformChoice{"v0", "v1", "v2"} + nodePersistIntervals = uniformChoice{0, 1, 5} + nodeSnapshotIntervals = uniformChoice{0, 3} + nodeRetainBlocks = uniformChoice{0, 1, 5} +@@ -138,7 +139,7 @@ func generateTestnet(r *rand.Rand, opt map[string]interface{}, upgradeVersion st + numValidators = 1 + case "quad": + numValidators = 4 +- case "large": ++ case "large_connected", "large-partially_connected": + // FIXME Networks are kept small since large ones use too much CPU. + numSeeds = r.Intn(2) + numLightClients = r.Intn(3) +@@ -148,6 +149,15 @@ func generateTestnet(r *rand.Rand, opt map[string]interface{}, upgradeVersion st + return manifest, fmt.Errorf("unknown topology %q", opt["topology"]) + } + ++ if opt["typologoy"].(string) == "large_partially_connected" { ++ // currently this is at max 11 and minimum 4 ++ totalPossibleConnections := numSeeds + numValidators + numFulls - 1 ++ // this value should be between 3 and 2 ++ manifest.MaxOutboundConnections = math.MaxInt(totalPossibleConnections/3, 2) ++ // this value should be between 5 and 2 ++ manifest.MaxInboundConnections = math.MaxInt(totalPossibleConnections/2, 2) ++ } ++ + // First we generate seed nodes, starting at the initial height. + for i := 1; i <= numSeeds; i++ { + manifest.Nodes[fmt.Sprintf("seed%02d", i)] = generateNode( +@@ -262,18 +272,21 @@ func generateNode( + r *rand.Rand, mode e2e.Mode, startAt int64, initialHeight int64, forceArchive bool, + ) *e2e.ManifestNode { + node := e2e.ManifestNode{ +- Version: nodeVersions.Choose(r).(string), +- Mode: string(mode), +- StartAt: startAt, +- Database: nodeDatabases.Choose(r).(string), +- PrivvalProtocol: nodePrivvalProtocols.Choose(r).(string), +- FastSync: nodeFastSyncs.Choose(r).(string), +- Mempool: nodeMempools.Choose(r).(string), +- StateSync: nodeStateSyncs.Choose(r).(bool) && startAt > 0, +- PersistInterval: ptrUint64(uint64(nodePersistIntervals.Choose(r).(int))), ++ Version: nodeVersions.Choose(r).(string), ++ Mode: string(mode), ++ StartAt: startAt, ++ Database: nodeDatabases.Choose(r).(string), ++ PrivvalProtocol: nodePrivvalProtocols.Choose(r).(string), ++ FastSync: nodeFastSyncs.Choose(r).(string), ++ Mempool: nodeMempools.Choose(r).(string), ++ StateSync: nodeStateSyncs.Choose(r).(bool) && startAt > 0, ++ //nolint:gosec ++ PersistInterval: ptrUint64(uint64(nodePersistIntervals.Choose(r).(int))), ++ //nolint:gosec + SnapshotInterval: uint64(nodeSnapshotIntervals.Choose(r).(int)), +- RetainBlocks: uint64(nodeRetainBlocks.Choose(r).(int)), +- Perturb: nodePerturbations.Choose(r), ++ //nolint:gosec ++ RetainBlocks: uint64(nodeRetainBlocks.Choose(r).(int)), ++ Perturb: nodePerturbations.Choose(r), + } + + // If this node is forced to be an archive node, retain all blocks and diff --git a/patches/test/e2e/generator/random.go.patch b/patches/test/e2e/generator/random.go.patch new file mode 100644 index 00000000000..091c5a90dc1 --- /dev/null +++ b/patches/test/e2e/generator/random.go.patch @@ -0,0 +1,19 @@ +diff --git a/test/e2e/generator/random.go b/test/e2e/generator/random.go +index ec59a01b2..75e5fc5a2 100644 +--- a/test/e2e/generator/random.go ++++ b/test/e2e/generator/random.go +@@ -63,12 +63,14 @@ func (wc weightedChoice) Choose(r *rand.Rand) interface{} { + total := 0 + choices := make([]interface{}, 0, len(wc)) + for choice, weight := range wc { ++ //nolint:gosec + total += int(weight) + choices = append(choices, choice) + } + + rem := r.Intn(total) + for _, choice := range choices { ++ //nolint:gosec + rem -= int(wc[choice]) + if rem <= 0 { + return choice diff --git a/patches/test/e2e/node/main.go.patch b/patches/test/e2e/node/main.go.patch new file mode 100644 index 00000000000..c5fcd15e06b --- /dev/null +++ b/patches/test/e2e/node/main.go.patch @@ -0,0 +1,13 @@ +diff --git a/test/e2e/node/main.go b/test/e2e/node/main.go +index 1105cb795..8a02ed733 100644 +--- a/test/e2e/node/main.go ++++ b/test/e2e/node/main.go +@@ -130,6 +130,8 @@ func startNode(cfg *Config) error { + return fmt.Errorf("failed to setup config: %w", err) + } + ++ cmtcfg.Instrumentation.TraceType = "local" ++ + n, err := node.NewNode(cmtcfg, + privval.LoadOrGenFilePV(cmtcfg.PrivValidatorKeyFile(), cmtcfg.PrivValidatorStateFile()), + nodeKey, diff --git a/patches/test/e2e/pkg/infrastructure.go.patch b/patches/test/e2e/pkg/infrastructure.go.patch new file mode 100644 index 00000000000..5993782a41a --- /dev/null +++ b/patches/test/e2e/pkg/infrastructure.go.patch @@ -0,0 +1,27 @@ +diff --git a/test/e2e/pkg/infrastructure.go b/test/e2e/pkg/infrastructure.go +index 2fc0e4bac..d829c2d96 100644 +--- a/test/e2e/pkg/infrastructure.go ++++ b/test/e2e/pkg/infrastructure.go +@@ -32,6 +32,22 @@ type InfrastructureData struct { + // Network is the CIDR notation range of IP addresses that all of the instances' + // IP addresses are expected to be within. + Network string `json:"network"` ++ ++ // TracePushConfig is the URL of the server to push trace data to. ++ TracePushConfig string `json:"trace_push_config,omitempty"` ++ ++ // TracePullAddress is the address to listen on for pulling trace data. ++ TracePullAddress string `json:"trace_pull_address,omitempty"` ++ ++ // PyroscopeURL is the URL of the pyroscope instance to use for continuous ++ // profiling. If not specified, data will not be collected. ++ PyroscopeURL string `json:"pyroscope_url,omitempty"` ++ ++ // PyroscopeTrace enables adding trace data to pyroscope profiling. ++ PyroscopeTrace bool `json:"pyroscope_trace,omitempty"` ++ ++ // PyroscopeProfileTypes is the list of profile types to collect. ++ PyroscopeProfileTypes []string `json:"pyroscope_profile_types,omitempty"` + } + + // InstanceData contains the relevant information for a machine instance backing diff --git a/patches/test/e2e/pkg/manifest.go.patch b/patches/test/e2e/pkg/manifest.go.patch new file mode 100644 index 00000000000..f5cbd8265e1 --- /dev/null +++ b/patches/test/e2e/pkg/manifest.go.patch @@ -0,0 +1,29 @@ +diff --git a/test/e2e/pkg/manifest.go b/test/e2e/pkg/manifest.go +index b64d5c239..37345bce9 100644 +--- a/test/e2e/pkg/manifest.go ++++ b/test/e2e/pkg/manifest.go +@@ -57,6 +57,13 @@ type Manifest struct { + // launch it instead of launching a separate CometBFT process. + ABCIProtocol string `toml:"abci_protocol"` + ++ // MaxInboundConnections and MaxOutboundConnection are the maximum number ++ // of connections a node has. This can be used to throttle the degree of ++ // connectivity of the network. If not specified, the default is taken ++ // from config/config.go ++ MaxInboundConnections int `toml:"max_inbound_connections"` ++ MaxOutboundConnections int `toml:"max_outbound_connections"` ++ + // UpgradeVersion specifies to which version this nodes need to upgrade. + // Currently only uncoordinated upgrade is supported + UpgradeVersion string `toml:"upgrade_version"` +@@ -116,8 +123,8 @@ type ManifestNode struct { + // Defaults to disabled. + FastSync string `toml:"fast_sync"` + +- // Mempool specifies which version of mempool to use. Either "v0" or "v1" +- // This defaults to v0. ++ // Mempool specifies which version of mempool to use. Either "v0" or "v1", or "v2" ++ // (cat). This defaults to v2. + Mempool string `toml:"mempool_version"` + + // StateSync enables state sync. The runner automatically configures trusted diff --git a/patches/test/e2e/pkg/testnet.go.patch b/patches/test/e2e/pkg/testnet.go.patch new file mode 100644 index 00000000000..2e6d98b168e --- /dev/null +++ b/patches/test/e2e/pkg/testnet.go.patch @@ -0,0 +1,202 @@ +diff --git a/test/e2e/pkg/testnet.go b/test/e2e/pkg/testnet.go +index 2e8035554..c6483a645 100644 +--- a/test/e2e/pkg/testnet.go ++++ b/test/e2e/pkg/testnet.go +@@ -11,6 +11,7 @@ import ( + "strconv" + "strings" + ++ "github.com/tendermint/tendermint/config" + "github.com/tendermint/tendermint/crypto" + "github.com/tendermint/tendermint/crypto/ed25519" + "github.com/tendermint/tendermint/crypto/secp256k1" +@@ -66,6 +67,8 @@ type Testnet struct { + Validators map[*Node]int64 + ValidatorUpdates map[int64]map[*Node]int64 + Nodes []*Node ++ MaxInboundConnections int ++ MaxOutboundConnections int + KeyType string + Evidence int + LoadTxSizeBytes int +@@ -81,31 +84,36 @@ type Testnet struct { + + // Node represents a CometBFT node in a testnet. + type Node struct { +- Name string +- Version string +- Testnet *Testnet +- Mode Mode +- PrivvalKey crypto.PrivKey +- NodeKey crypto.PrivKey +- IP net.IP +- ProxyPort uint32 +- StartAt int64 +- FastSync string +- StateSync bool +- Mempool string +- Database string +- ABCIProtocol Protocol +- PrivvalProtocol Protocol +- PersistInterval uint64 +- SnapshotInterval uint64 +- RetainBlocks uint64 +- Seeds []*Node +- PersistentPeers []*Node +- Perturbations []Perturbation +- Misbehaviors map[int64]string +- SendNoLoad bool +- Prometheus bool +- PrometheusProxyPort uint32 ++ Name string ++ Version string ++ Testnet *Testnet ++ Mode Mode ++ PrivvalKey crypto.PrivKey ++ NodeKey crypto.PrivKey ++ IP net.IP ++ ProxyPort uint32 ++ StartAt int64 ++ FastSync string ++ StateSync bool ++ Mempool string ++ Database string ++ ABCIProtocol Protocol ++ PrivvalProtocol Protocol ++ PersistInterval uint64 ++ SnapshotInterval uint64 ++ RetainBlocks uint64 ++ Seeds []*Node ++ PersistentPeers []*Node ++ Perturbations []Perturbation ++ Misbehaviors map[int64]string ++ SendNoLoad bool ++ Prometheus bool ++ PrometheusProxyPort uint32 ++ TracePushConfig string ++ TracePullAddress string ++ PyroscopeURL string ++ PyroscopeTrace bool ++ PyroscopeProfileTypes []string + } + + // LoadTestnet loads a testnet from a manifest file, using the filename to +@@ -124,22 +132,24 @@ func LoadTestnet(manifest Manifest, fname string, ifd InfrastructureData) (*Test + } + + testnet := &Testnet{ +- Name: filepath.Base(dir), +- File: fname, +- Dir: dir, +- IP: ipNet, +- InitialHeight: 1, +- InitialState: manifest.InitialState, +- Validators: map[*Node]int64{}, +- ValidatorUpdates: map[int64]map[*Node]int64{}, +- Nodes: []*Node{}, +- LoadTxSizeBytes: manifest.LoadTxSizeBytes, +- LoadTxBatchSize: manifest.LoadTxBatchSize, +- LoadTxConnections: manifest.LoadTxConnections, +- LoadMaxTxs: manifest.LoadMaxTxs, +- ABCIProtocol: manifest.ABCIProtocol, +- UpgradeVersion: manifest.UpgradeVersion, +- Prometheus: manifest.Prometheus, ++ Name: filepath.Base(dir), ++ File: fname, ++ Dir: dir, ++ IP: ipNet, ++ InitialHeight: 1, ++ InitialState: manifest.InitialState, ++ Validators: map[*Node]int64{}, ++ ValidatorUpdates: map[int64]map[*Node]int64{}, ++ Nodes: []*Node{}, ++ MaxInboundConnections: manifest.MaxInboundConnections, ++ MaxOutboundConnections: manifest.MaxOutboundConnections, ++ LoadTxSizeBytes: manifest.LoadTxSizeBytes, ++ LoadTxBatchSize: manifest.LoadTxBatchSize, ++ LoadTxConnections: manifest.LoadTxConnections, ++ LoadMaxTxs: manifest.LoadMaxTxs, ++ ABCIProtocol: manifest.ABCIProtocol, ++ UpgradeVersion: manifest.UpgradeVersion, ++ Prometheus: manifest.Prometheus, + ExperimentalMaxGossipConnectionsToPersistentPeers: manifest.ExperimentalMaxGossipConnectionsToPersistentPeers, + ExperimentalMaxGossipConnectionsToNonPersistentPeers: manifest.ExperimentalMaxGossipConnectionsToNonPersistentPeers, + } +@@ -184,28 +194,33 @@ func LoadTestnet(manifest Manifest, fname string, ifd InfrastructureData) (*Test + } + + node := &Node{ +- Name: name, +- Version: v, +- Testnet: testnet, +- PrivvalKey: keyGen.Generate(manifest.KeyType), +- NodeKey: keyGen.Generate("ed25519"), +- IP: ind.IPAddress, +- ProxyPort: proxyPortGen.Next(), +- Mode: ModeValidator, +- Database: "goleveldb", +- ABCIProtocol: Protocol(testnet.ABCIProtocol), +- PrivvalProtocol: ProtocolFile, +- StartAt: nodeManifest.StartAt, +- FastSync: nodeManifest.FastSync, +- Mempool: nodeManifest.Mempool, +- StateSync: nodeManifest.StateSync, +- PersistInterval: 1, +- SnapshotInterval: nodeManifest.SnapshotInterval, +- RetainBlocks: nodeManifest.RetainBlocks, +- Perturbations: []Perturbation{}, +- Misbehaviors: make(map[int64]string), +- SendNoLoad: nodeManifest.SendNoLoad, +- Prometheus: testnet.Prometheus, ++ Name: name, ++ Version: v, ++ Testnet: testnet, ++ PrivvalKey: keyGen.Generate(manifest.KeyType), ++ NodeKey: keyGen.Generate("ed25519"), ++ IP: ind.IPAddress, ++ ProxyPort: proxyPortGen.Next(), ++ Mode: ModeValidator, ++ Database: "goleveldb", ++ ABCIProtocol: Protocol(testnet.ABCIProtocol), ++ PrivvalProtocol: ProtocolFile, ++ StartAt: nodeManifest.StartAt, ++ FastSync: nodeManifest.FastSync, ++ Mempool: nodeManifest.Mempool, ++ StateSync: nodeManifest.StateSync, ++ PersistInterval: 1, ++ SnapshotInterval: nodeManifest.SnapshotInterval, ++ RetainBlocks: nodeManifest.RetainBlocks, ++ Perturbations: []Perturbation{}, ++ Misbehaviors: make(map[int64]string), ++ SendNoLoad: nodeManifest.SendNoLoad, ++ TracePushConfig: ifd.TracePushConfig, ++ TracePullAddress: ifd.TracePullAddress, ++ PyroscopeURL: ifd.PyroscopeURL, ++ PyroscopeTrace: ifd.PyroscopeTrace, ++ PyroscopeProfileTypes: ifd.PyroscopeProfileTypes, ++ Prometheus: testnet.Prometheus, + } + if node.StartAt == testnet.InitialHeight { + node.StartAt = 0 // normalize to 0 for initial nodes, since code expects this +@@ -319,6 +334,12 @@ func (t Testnet) Validate() error { + if t.IP == nil { + return errors.New("network has no IP") + } ++ if t.MaxInboundConnections < 0 { ++ return errors.New("MaxInboundConnections must not be negative") ++ } ++ if t.MaxOutboundConnections < 0 { ++ return errors.New("MaxOutboundConnections must not be negative") ++ } + if len(t.Nodes) == 0 { + return errors.New("network has no nodes") + } +@@ -367,7 +388,7 @@ func (n Node) Validate(testnet Testnet) error { + + } + switch n.Mempool { +- case "", "v0", "v1": ++ case "", config.MempoolV0, config.MempoolV1, config.MempoolV2: + default: + return fmt.Errorf("invalid mempool version %q", n.Mempool) + } diff --git a/patches/test/e2e/runner/exec.go.patch b/patches/test/e2e/runner/exec.go.patch new file mode 100644 index 00000000000..c9411937e2c --- /dev/null +++ b/patches/test/e2e/runner/exec.go.patch @@ -0,0 +1,28 @@ +diff --git a/test/e2e/runner/exec.go b/test/e2e/runner/exec.go +index 6cccbf607..2a511182e 100644 +--- a/test/e2e/runner/exec.go ++++ b/test/e2e/runner/exec.go +@@ -39,20 +39,20 @@ func execVerbose(args ...string) error { + // execCompose runs a Docker Compose command for a testnet. + func execCompose(dir string, args ...string) error { + return exec(append( +- []string{"docker-compose", "-f", filepath.Join(dir, "docker-compose.yml")}, ++ []string{"docker", "compose", "-f", filepath.Join(dir, "docker-compose.yml")}, + args...)...) + } + + func execComposeOutput(dir string, args ...string) ([]byte, error) { + return execOutput(append( +- []string{"docker-compose", "-f", filepath.Join(dir, "docker-compose.yml")}, ++ []string{"docker", "compose", "-f", filepath.Join(dir, "docker-compose.yml")}, + args...)...) + } + + // execComposeVerbose runs a Docker Compose command for a testnet and displays its output. + func execComposeVerbose(dir string, args ...string) error { + return execVerbose(append( +- []string{"docker-compose", "-f", filepath.Join(dir, "docker-compose.yml")}, ++ []string{"docker", "compose", "-f", filepath.Join(dir, "docker-compose.yml")}, + args...)...) + } + diff --git a/patches/test/e2e/runner/load.go.patch b/patches/test/e2e/runner/load.go.patch new file mode 100644 index 00000000000..bdbb6955eae --- /dev/null +++ b/patches/test/e2e/runner/load.go.patch @@ -0,0 +1,20 @@ +diff --git a/test/e2e/runner/load.go b/test/e2e/runner/load.go +index 6ad32d41c..1ab20f4cd 100644 +--- a/test/e2e/runner/load.go ++++ b/test/e2e/runner/load.go +@@ -120,9 +120,12 @@ func createTxBatch(ctx context.Context, txCh chan<- types.Tx, testnet *e2e.Testn + defer wg.Done() + for range genCh { + tx, err := payload.NewBytes(&payload.Payload{ +- Id: id, +- Size: uint64(testnet.LoadTxSizeBytes), +- Rate: uint64(testnet.LoadTxBatchSize), ++ Id: id, ++ //nolint:gosec ++ Size: uint64(testnet.LoadTxSizeBytes), ++ //nolint:gosec ++ Rate: uint64(testnet.LoadTxBatchSize), ++ //nolint:gosec + Connections: uint64(testnet.LoadTxConnections), + }) + if err != nil { diff --git a/patches/test/e2e/runner/main.go.patch b/patches/test/e2e/runner/main.go.patch new file mode 100644 index 00000000000..948ea089143 --- /dev/null +++ b/patches/test/e2e/runner/main.go.patch @@ -0,0 +1,82 @@ +diff --git a/test/e2e/runner/main.go b/test/e2e/runner/main.go +index f4c8fd60a..30d0d3232 100644 +--- a/test/e2e/runner/main.go ++++ b/test/e2e/runner/main.go +@@ -9,6 +9,7 @@ import ( + "github.com/spf13/cobra" + + "github.com/tendermint/tendermint/libs/log" ++ "github.com/tendermint/tendermint/pkg/trace" + e2e "github.com/tendermint/tendermint/test/e2e/pkg" + "github.com/tendermint/tendermint/test/e2e/pkg/infra" + "github.com/tendermint/tendermint/test/e2e/pkg/infra/docker" +@@ -77,6 +78,32 @@ func NewCLI() *CLI { + return fmt.Errorf("unknown infrastructure type '%s'", inft) + } + ++ iurl, err := cmd.Flags().GetString(trace.FlagTracePushConfig) ++ if err != nil { ++ return err ++ } ++ itoken, err := cmd.Flags().GetString(trace.FlagTracePullAddress) ++ if err != nil { ++ return err ++ } ++ if ifd.TracePushConfig == "" { ++ ifd.TracePushConfig = iurl ++ ifd.TracePullAddress = itoken ++ } ++ ++ purl, err := cmd.Flags().GetString(trace.FlagPyroscopeURL) ++ if err != nil { ++ return err ++ } ++ pTrace, err := cmd.Flags().GetBool(trace.FlagPyroscopeTrace) ++ if err != nil { ++ return err ++ } ++ if ifd.PyroscopeURL == "" { ++ ifd.PyroscopeURL = purl ++ ifd.PyroscopeTrace = pTrace ++ } ++ + testnet, err := e2e.LoadTestnet(m, file, ifd) + if err != nil { + return fmt.Errorf("loading testnet: %s", err) +@@ -159,6 +186,14 @@ func NewCLI() *CLI { + + cli.root.PersistentFlags().StringP("infrastructure-data", "", "", "path to the json file containing the infrastructure data. Only used if the 'infrastructure-type' is set to a value other than 'docker'") + ++ cli.root.PersistentFlags().String(trace.FlagTracePushConfig, "", trace.FlagTracePushConfigDescription) ++ ++ cli.root.PersistentFlags().String(trace.FlagTracePullAddress, "", trace.FlagTracePullAddressDescription) ++ ++ cli.root.PersistentFlags().String(trace.FlagPyroscopeURL, "", trace.FlagPyroscopeURLDescription) ++ ++ cli.root.PersistentFlags().Bool(trace.FlagPyroscopeTrace, false, trace.FlagPyroscopeTraceDescription) ++ + cli.root.Flags().BoolVarP(&cli.preserve, "preserve", "p", false, + "Preserves the running of the test net after tests are completed") + +@@ -259,7 +294,7 @@ func NewCLI() *CLI { + Min Block Interval + Max Block Interval + over a 100 block sampling period. +- ++ + Does not run any perturbations. + `, + RunE: func(cmd *cobra.Command, args []string) error { +@@ -299,11 +334,7 @@ Does not run any perturbations. + return err + } + +- if err := Cleanup(cli.testnet); err != nil { +- return err +- } +- +- return nil ++ return Cleanup(cli.testnet) + }, + }) + diff --git a/patches/test/e2e/runner/perturb.go.patch b/patches/test/e2e/runner/perturb.go.patch new file mode 100644 index 00000000000..cf09f239ebb --- /dev/null +++ b/patches/test/e2e/runner/perturb.go.patch @@ -0,0 +1,36 @@ +diff --git a/test/e2e/runner/perturb.go b/test/e2e/runner/perturb.go +index 992986700..90c9dd936 100644 +--- a/test/e2e/runner/perturb.go ++++ b/test/e2e/runner/perturb.go +@@ -2,6 +2,7 @@ package main + + import ( + "fmt" ++ "path/filepath" + "time" + + "github.com/tendermint/tendermint/libs/log" +@@ -27,6 +28,9 @@ func Perturb(testnet *e2e.Testnet) error { + // after recovering. + func PerturbNode(node *e2e.Node, perturbation e2e.Perturbation) (*rpctypes.ResultStatus, error) { + testnet := node.Testnet ++ baseDir := filepath.Base(testnet.Dir) ++ testnetName := fmt.Sprintf("%s_%s", baseDir, testnet.Name) ++ + out, err := execComposeOutput(testnet.Dir, "ps", "-q", node.Name) + if err != nil { + return nil, err +@@ -44,11 +48,11 @@ func PerturbNode(node *e2e.Node, perturbation e2e.Perturbation) (*rpctypes.Resul + switch perturbation { + case e2e.PerturbationDisconnect: + logger.Info("perturb node", "msg", log.NewLazySprintf("Disconnecting node %v...", node.Name)) +- if err := execDocker("network", "disconnect", testnet.Name+"_"+testnet.Name, name); err != nil { ++ if err := execDocker("network", "disconnect", testnetName, name); err != nil { + return nil, err + } + time.Sleep(10 * time.Second) +- if err := execDocker("network", "connect", testnet.Name+"_"+testnet.Name, name); err != nil { ++ if err := execDocker("network", "connect", testnetName, name); err != nil { + return nil, err + } + diff --git a/patches/test/e2e/runner/setup.go.patch b/patches/test/e2e/runner/setup.go.patch new file mode 100644 index 00000000000..850cb4bafe2 --- /dev/null +++ b/patches/test/e2e/runner/setup.go.patch @@ -0,0 +1,32 @@ +diff --git a/test/e2e/runner/setup.go b/test/e2e/runner/setup.go +index 594d28c49..c90f83f9a 100644 +--- a/test/e2e/runner/setup.go ++++ b/test/e2e/runner/setup.go +@@ -168,6 +168,13 @@ func MakeConfig(node *e2e.Node) (*config.Config, error) { + cfg.Mempool.ExperimentalMaxGossipConnectionsToNonPersistentPeers = int(node.Testnet.ExperimentalMaxGossipConnectionsToNonPersistentPeers) + cfg.Mempool.ExperimentalMaxGossipConnectionsToPersistentPeers = int(node.Testnet.ExperimentalMaxGossipConnectionsToPersistentPeers) + ++ cfg.Instrumentation.TraceType = "celestia" ++ cfg.Instrumentation.TracePushConfig = node.TracePushConfig ++ cfg.Instrumentation.TracePullAddress = node.TracePullAddress ++ cfg.Instrumentation.PyroscopeTrace = node.PyroscopeTrace ++ cfg.Instrumentation.PyroscopeURL = node.PyroscopeURL ++ cfg.Instrumentation.PyroscopeProfileTypes = node.PyroscopeProfileTypes ++ + switch node.ABCIProtocol { + case e2e.ProtocolUNIX: + cfg.ProxyApp = AppAddressUNIX +@@ -250,7 +257,12 @@ func MakeConfig(node *e2e.Node) (*config.Config, error) { + } + cfg.P2P.PersistentPeers += peer.AddressP2P(true) + } +- ++ if node.Testnet.MaxInboundConnections != 0 { ++ cfg.P2P.MaxNumInboundPeers = node.Testnet.MaxInboundConnections ++ } ++ if node.Testnet.MaxOutboundConnections != 0 { ++ cfg.P2P.MaxNumOutboundPeers = node.Testnet.MaxOutboundConnections ++ } + if node.Prometheus { + cfg.Instrumentation.Prometheus = true + } diff --git a/patches/test/e2e/runner/start.go.patch b/patches/test/e2e/runner/start.go.patch new file mode 100644 index 00000000000..b469060d20c --- /dev/null +++ b/patches/test/e2e/runner/start.go.patch @@ -0,0 +1,26 @@ +diff --git a/test/e2e/runner/start.go b/test/e2e/runner/start.go +index b9e1d8ad1..c08afec2f 100644 +--- a/test/e2e/runner/start.go ++++ b/test/e2e/runner/start.go +@@ -6,6 +6,7 @@ import ( + "time" + + "github.com/tendermint/tendermint/libs/log" ++ "github.com/tendermint/tendermint/p2p" + e2e "github.com/tendermint/tendermint/test/e2e/pkg" + ) + +@@ -49,10 +50,10 @@ func Start(testnet *e2e.Testnet) error { + if _, err := waitForNode(node, 0, 15*time.Second); err != nil { + return err + } ++ nid := p2p.NodeKey{PrivKey: node.NodeKey} ++ logger.Info("start", "msg", log.NewLazySprintf("Node %v up on http://127.0.0.1:%v chain-id %s node-id %s", node.Name, node.ProxyPort, testnet.Name, nid.ID())) + if node.PrometheusProxyPort > 0 { +- logger.Info("start", "msg", log.NewLazySprintf("Node %v up on http://127.0.0.1:%v; with Prometheus on http://127.0.0.1:%v/metrics", node.Name, node.ProxyPort, node.PrometheusProxyPort)) +- } else { +- logger.Info("start", "msg", log.NewLazySprintf("Node %v up on http://127.0.0.1:%v", node.Name, node.ProxyPort)) ++ logger.Info("start", "msg", log.NewLazySprintf("with Prometheus on http://127.0.0.1:%v/metrics", node.Name, node.ProxyPort, node.PrometheusProxyPort)) + } + } + diff --git a/patches/test/e2e/tests/block_test.go.patch b/patches/test/e2e/tests/block_test.go.patch new file mode 100644 index 00000000000..a67738ba8f0 --- /dev/null +++ b/patches/test/e2e/tests/block_test.go.patch @@ -0,0 +1,39 @@ +diff --git a/test/e2e/tests/block_test.go b/test/e2e/tests/block_test.go +index b3f4e9139..b21fd6a4e 100644 +--- a/test/e2e/tests/block_test.go ++++ b/test/e2e/tests/block_test.go +@@ -1,6 +1,8 @@ + package e2e_test + + import ( ++ "bytes" ++ "context" + "testing" + + "github.com/stretchr/testify/assert" +@@ -93,3 +95,25 @@ func TestBlock_Range(t *testing.T) { + } + }) + } ++ ++func TestBlock_SignedData(t *testing.T) { ++ testNode(t, func(t *testing.T, node e2e.Node) { ++ client, err := node.Client() ++ require.NoError(t, err) ++ ++ resp, err := client.SignedBlock(context.Background(), nil) ++ require.NoError(t, err) ++ require.Equal(t, resp.Header.Height, resp.Commit.Height) ++ ++ err = resp.ValidatorSet.VerifyCommit(resp.Header.ChainID, resp.Commit.BlockID, resp.Header.Height, &resp.Commit) ++ require.NoError(t, err) ++ ++ if !bytes.Equal(resp.Commit.BlockID.Hash, resp.Header.Hash()) { ++ t.Fatal("commit is for a different block") ++ } ++ ++ if !bytes.Equal(resp.Header.DataHash, resp.Data.Hash()) { ++ t.Fatal("data does not match header data hash") ++ } ++ }) ++} diff --git a/patches/test/factory/tx.go.patch b/patches/test/factory/tx.go.patch new file mode 100644 index 00000000000..42bc0d6f0c3 --- /dev/null +++ b/patches/test/factory/tx.go.patch @@ -0,0 +1,28 @@ +diff --git a/test/factory/tx.go b/test/factory/tx.go +new file mode 100644 +index 000000000..12b321fd3 +--- /dev/null ++++ b/test/factory/tx.go +@@ -0,0 +1,22 @@ ++package factory ++ ++import "github.com/tendermint/tendermint/types" ++ ++// MakeTxs is a helper function to generate mock transactions by given the block height ++// and the transaction numbers. ++func MakeTxs(height int64, num int) (txs []types.Tx) { ++ for i := 0; i < num; i++ { ++ txs = append(txs, types.Tx([]byte{byte(height), byte(i)})) ++ } ++ return txs ++} ++ ++func MakeTenTxs(height int64) (txs []types.Tx) { ++ return MakeTxs(height, 10) ++} ++ ++func MakeData(txs []types.Tx) types.Data { ++ return types.Data{ ++ Txs: txs, ++ } ++} diff --git a/patches/test/fuzz/p2p/pex/reactor_receive.go.patch b/patches/test/fuzz/p2p/pex/reactor_receive.go.patch new file mode 100644 index 00000000000..4a11e037ec7 --- /dev/null +++ b/patches/test/fuzz/p2p/pex/reactor_receive.go.patch @@ -0,0 +1,17 @@ +diff --git a/test/fuzz/p2p/pex/reactor_receive.go b/test/fuzz/p2p/pex/reactor_receive.go +index be9c6bba0..7cf94c71f 100644 +--- a/test/fuzz/p2p/pex/reactor_receive.go ++++ b/test/fuzz/p2p/pex/reactor_receive.go +@@ -74,8 +74,10 @@ func (fp *fuzzPeer) RemoteIP() net.IP { return net.IPv4(0, 0, 0, 0) } + func (fp *fuzzPeer) RemoteAddr() net.Addr { + return &net.TCPAddr{IP: fp.RemoteIP(), Port: 98991, Zone: ""} + } +-func (fp *fuzzPeer) IsOutbound() bool { return false } +-func (fp *fuzzPeer) IsPersistent() bool { return false } ++func (fp *fuzzPeer) IsOutbound() bool { return false } ++func (fp *fuzzPeer) IsPersistent() bool { return false } ++func (fp *fuzzPeer) HasIPChanged() bool { return false } ++ + func (fp *fuzzPeer) CloseConn() error { return nil } + func (fp *fuzzPeer) NodeInfo() p2p.NodeInfo { return defaultNodeInfo } + func (fp *fuzzPeer) Status() p2p.ConnectionStatus { var cs p2p.ConnectionStatus; return cs } diff --git a/patches/test/loadtime/cmd/load/main.go.patch b/patches/test/loadtime/cmd/load/main.go.patch new file mode 100644 index 00000000000..2411ceccb70 --- /dev/null +++ b/patches/test/loadtime/cmd/load/main.go.patch @@ -0,0 +1,31 @@ +diff --git a/test/loadtime/cmd/load/main.go b/test/loadtime/cmd/load/main.go +index a0dba8ece..a90447ba9 100644 +--- a/test/loadtime/cmd/load/main.go ++++ b/test/loadtime/cmd/load/main.go +@@ -3,8 +3,8 @@ package main + import ( + "fmt" + ++ "github.com/cometbft/cometbft-load-test/pkg/loadtest" + "github.com/google/uuid" +- "github.com/informalsystems/tm-load-test/pkg/loadtest" + + "github.com/tendermint/tendermint/test/loadtime/payload" + ) +@@ -56,10 +56,13 @@ func (f *ClientFactory) ValidateConfig(cfg loadtest.Config) error { + + func (f *ClientFactory) NewClient(cfg loadtest.Config) (loadtest.Client, error) { + return &TxGenerator{ +- id: f.ID, ++ id: f.ID, ++ //nolint:gosec + conns: uint64(cfg.Connections), +- rate: uint64(cfg.Rate), +- size: uint64(cfg.Size), ++ //nolint:gosec ++ rate: uint64(cfg.Rate), ++ //nolint:gosec ++ size: uint64(cfg.Size), + }, nil + } + diff --git a/patches/test/loadtime/cmd/report/main.go.patch b/patches/test/loadtime/cmd/report/main.go.patch new file mode 100644 index 00000000000..c3ce974c2de --- /dev/null +++ b/patches/test/loadtime/cmd/report/main.go.patch @@ -0,0 +1,16 @@ +diff --git a/test/loadtime/cmd/report/main.go b/test/loadtime/cmd/report/main.go +index cc7198549..50c3db3d7 100644 +--- a/test/loadtime/cmd/report/main.go ++++ b/test/loadtime/cmd/report/main.go +@@ -92,8 +92,11 @@ func toCSVRecords(rs []report.Report) [][]string { + offset := 1 + for _, r := range rs { + idStr := r.ID.String() ++ //nolint:gosec + connStr := strconv.FormatInt(int64(r.Connections), 10) ++ //nolint:gosec + rateStr := strconv.FormatInt(int64(r.Rate), 10) ++ //nolint:gosec + sizeStr := strconv.FormatInt(int64(r.Size), 10) + for i, v := range r.All { + res[offset+i] = []string{idStr, strconv.FormatInt(v.BlockTime.UnixNano(), 10), strconv.FormatInt(int64(v.Duration), 10), fmt.Sprintf("%X", v.Hash), connStr, rateStr, sizeStr} //nolint: lll diff --git a/patches/test/maverick/consensus/misbehavior.go.patch b/patches/test/maverick/consensus/misbehavior.go.patch new file mode 100644 index 00000000000..f50c9080a3f --- /dev/null +++ b/patches/test/maverick/consensus/misbehavior.go.patch @@ -0,0 +1,24 @@ +diff --git a/test/maverick/consensus/misbehavior.go b/test/maverick/consensus/misbehavior.go +index 96905a558..86d492e70 100644 +--- a/test/maverick/consensus/misbehavior.go ++++ b/test/maverick/consensus/misbehavior.go +@@ -302,14 +302,14 @@ func defaultReceivePrevote(cs *State, vote *types.Vote) { + + // Update Valid* if we can. + // NOTE: our proposal block may be nil or not what received a polka.. +- if len(blockID.Hash) != 0 && (cs.ValidRound < vote.Round) && (vote.Round == cs.Round) { ++ if len(blockID.Hash) != 0 && (cs.TwoThirdPrevoteRound < vote.Round) && (vote.Round == cs.Round) { + + if cs.ProposalBlock.HashesTo(blockID.Hash) { + cs.Logger.Info( +- "Updating ValidBlock because of POL.", "validRound", cs.ValidRound, "POLRound", vote.Round) +- cs.ValidRound = vote.Round +- cs.ValidBlock = cs.ProposalBlock +- cs.ValidBlockParts = cs.ProposalBlockParts ++ "Updating ValidBlock because of POL.", "validRound", cs.TwoThirdPrevoteRound, "POLRound", vote.Round) ++ cs.TwoThirdPrevoteRound = vote.Round ++ cs.TwoThirdPrevoteBlock = cs.ProposalBlock ++ cs.TwoThirdPrevoteBlockParts = cs.ProposalBlockParts + } else { + cs.Logger.Info( + "valid block we do not know about; set ProposalBlock=nil", diff --git a/patches/test/maverick/consensus/reactor.go.patch b/patches/test/maverick/consensus/reactor.go.patch new file mode 100644 index 00000000000..c242a2a0e2f --- /dev/null +++ b/patches/test/maverick/consensus/reactor.go.patch @@ -0,0 +1,12 @@ +diff --git a/test/maverick/consensus/reactor.go b/test/maverick/consensus/reactor.go +index e59ecaa79..b817c1eb8 100644 +--- a/test/maverick/consensus/reactor.go ++++ b/test/maverick/consensus/reactor.go +@@ -1157,6 +1157,7 @@ func (ps *PeerState) PickVoteToSend(votes types.VoteSetReader) (vote *types.Vote + return nil, false // Not something worth sending + } + if index, ok := votes.BitArray().Sub(psVotes).PickRandom(); ok { ++ //nolint:gosec + return votes.GetByIndex(int32(index)), true + } + return nil, false diff --git a/patches/test/maverick/consensus/replay.go.patch b/patches/test/maverick/consensus/replay.go.patch new file mode 100644 index 00000000000..e5f7b34282f --- /dev/null +++ b/patches/test/maverick/consensus/replay.go.patch @@ -0,0 +1,23 @@ +diff --git a/test/maverick/consensus/replay.go b/test/maverick/consensus/replay.go +index 339de721c..ca863e0d0 100644 +--- a/test/maverick/consensus/replay.go ++++ b/test/maverick/consensus/replay.go +@@ -493,15 +493,16 @@ func (h *Handshaker) replayBlocks( + // ApplyBlock on the proxyApp with the last block. + func (h *Handshaker) replayBlock(state sm.State, height int64, proxyApp proxy.AppConnConsensus) (sm.State, error) { + block := h.store.LoadBlock(height) ++ seenCommit := h.store.LoadSeenCommit(height) + meta := h.store.LoadBlockMeta(height) + + // Use stubs for both mempool and evidence pool since no transactions nor + // evidence are needed here - block already exists. +- blockExec := sm.NewBlockExecutor(h.stateStore, h.logger, proxyApp, emptyMempool{}, sm.EmptyEvidencePool{}) ++ blockExec := sm.NewBlockExecutor(h.stateStore, h.logger, proxyApp, emptyMempool{}, sm.EmptyEvidencePool{}, sm.WithBlockStore(h.store)) + blockExec.SetEventBus(h.eventBus) + + var err error +- state, _, err = blockExec.ApplyBlock(state, meta.BlockID, block) ++ state, _, err = blockExec.ApplyBlock(state, meta.BlockID, block, seenCommit) + if err != nil { + return sm.State{}, err + } diff --git a/patches/test/maverick/consensus/replay_stubs.go.patch b/patches/test/maverick/consensus/replay_stubs.go.patch new file mode 100644 index 00000000000..1fdf9de1a64 --- /dev/null +++ b/patches/test/maverick/consensus/replay_stubs.go.patch @@ -0,0 +1,14 @@ +diff --git a/test/maverick/consensus/replay_stubs.go b/test/maverick/consensus/replay_stubs.go +index 033a797f1..ea0d122ba 100644 +--- a/test/maverick/consensus/replay_stubs.go ++++ b/test/maverick/consensus/replay_stubs.go +@@ -40,6 +40,9 @@ func (emptyMempool) TxsAvailable() <-chan struct{} { return make(chan struct{}) + func (emptyMempool) EnableTxsAvailable() {} + func (emptyMempool) TxsBytes() int64 { return 0 } + ++func (emptyMempool) GetTxByKey(txKey types.TxKey) (types.Tx, bool) { return nil, false } ++func (emptyMempool) WasRecentlyEvicted(txKey types.TxKey) bool { return false } ++ + func (emptyMempool) TxsFront() *clist.CElement { return nil } + func (emptyMempool) TxsWaitChan() <-chan struct{} { return nil } + diff --git a/patches/test/maverick/consensus/state.go.patch b/patches/test/maverick/consensus/state.go.patch new file mode 100644 index 00000000000..f9b5f2f3d9d --- /dev/null +++ b/patches/test/maverick/consensus/state.go.patch @@ -0,0 +1,95 @@ +diff --git a/test/maverick/consensus/state.go b/test/maverick/consensus/state.go +index 4e9bde081..ae5afad0a 100644 +--- a/test/maverick/consensus/state.go ++++ b/test/maverick/consensus/state.go +@@ -912,9 +912,9 @@ func (cs *State) updateToState(state sm.State) { + cs.LockedRound = -1 + cs.LockedBlock = nil + cs.LockedBlockParts = nil +- cs.ValidRound = -1 +- cs.ValidBlock = nil +- cs.ValidBlockParts = nil ++ cs.TwoThirdPrevoteRound = -1 ++ cs.TwoThirdPrevoteBlock = nil ++ cs.TwoThirdPrevoteBlockParts = nil + cs.Votes = cstypes.NewHeightVoteSet(state.ChainID, height, validators) + cs.CommitRound = -1 + cs.LastValidators = state.LastValidators +@@ -1196,9 +1196,9 @@ func (cs *State) defaultDecideProposal(height int64, round int32) { + var blockParts *types.PartSet + + // Decide on block +- if cs.ValidBlock != nil { ++ if cs.TwoThirdPrevoteBlock != nil { + // If there is valid block, choose that. +- block, blockParts = cs.ValidBlock, cs.ValidBlockParts ++ block, blockParts = cs.TwoThirdPrevoteBlock, cs.TwoThirdPrevoteBlockParts + } else { + // Create a new proposal block from state/txs from the mempool. + block, blockParts = cs.createProposalBlock() +@@ -1215,7 +1215,7 @@ func (cs *State) defaultDecideProposal(height int64, round int32) { + + // Make proposal + propBlockID := types.BlockID{Hash: block.Hash(), PartSetHeader: blockParts.Header()} +- proposal := types.NewProposal(height, round, cs.ValidRound, propBlockID) ++ proposal := types.NewProposal(height, round, cs.TwoThirdPrevoteRound, propBlockID) + p := proposal.ToProto() + if err := cs.privValidator.SignProposal(cs.state.ChainID, p); err == nil { + proposal.Signature = p.Signature +@@ -1488,11 +1488,12 @@ func (cs *State) finalizeCommit(height int64) { + fail.Fail() // XXX + + // Save to blockStore. ++ var seenCommit *types.Commit + if cs.blockStore.Height() < block.Height { + // NOTE: the seenCommit is local justification to commit this block, + // but may differ from the LastCommit included in the next block + precommits := cs.Votes.Precommits(cs.CommitRound) +- seenCommit := precommits.MakeCommit() ++ seenCommit = precommits.MakeCommit() + cs.blockStore.SaveBlock(block, blockParts, seenCommit) + } else { + // Happens during replay if we already saved the block but didn't commit +@@ -1532,7 +1533,9 @@ func (cs *State) finalizeCommit(height int64) { + stateCopy, retainHeight, err = cs.blockExec.ApplyBlock( + stateCopy, + types.BlockID{Hash: block.Hash(), PartSetHeader: blockParts.Header()}, +- block) ++ block, ++ seenCommit, ++ ) + if err != nil { + cs.Logger.Error("Error on ApplyBlock", "err", err) + return +@@ -1665,9 +1668,10 @@ func (cs *State) recordMetrics(height int64, block *types.Block) { + if height > 1 { + lastBlockMeta := cs.blockStore.LoadBlockMeta(height - 1) + if lastBlockMeta != nil { +- cs.metrics.BlockIntervalSeconds.Observe( +- block.Time.Sub(lastBlockMeta.Header.Time).Seconds(), +- ) ++ elapsedTime := block.Time.Sub(lastBlockMeta.Header.Time).Seconds() ++ cs.metrics.BlockIntervalSeconds.Observe(elapsedTime) ++ cs.metrics.BlockTimeSeconds.Set(elapsedTime) ++ + } + } + +@@ -1736,13 +1740,13 @@ func (cs *State) addProposalBlockPart(msg *cmtcon.BlockPartMessage, peerID p2p.I + // Update Valid* if we can. + prevotes := cs.Votes.Prevotes(cs.Round) + blockID, hasTwoThirds := prevotes.TwoThirdsMajority() +- if hasTwoThirds && !blockID.IsZero() && (cs.ValidRound < cs.Round) { ++ if hasTwoThirds && !blockID.IsZero() && (cs.TwoThirdPrevoteRound < cs.Round) { + if cs.ProposalBlock.HashesTo(blockID.Hash) { + cs.Logger.Info("Updating valid block to new proposal block", + "valid-round", cs.Round, "valid-block-hash", cs.ProposalBlock.Hash()) +- cs.ValidRound = cs.Round +- cs.ValidBlock = cs.ProposalBlock +- cs.ValidBlockParts = cs.ProposalBlockParts ++ cs.TwoThirdPrevoteRound = cs.Round ++ cs.TwoThirdPrevoteBlock = cs.ProposalBlock ++ cs.TwoThirdPrevoteBlockParts = cs.ProposalBlockParts + } + // TODO: In case there is +2/3 majority in Prevotes set for some + // block and cs.ProposalBlock contains different block, either diff --git a/patches/test/maverick/consensus/wal.go.patch b/patches/test/maverick/consensus/wal.go.patch new file mode 100644 index 00000000000..824d0f48acf --- /dev/null +++ b/patches/test/maverick/consensus/wal.go.patch @@ -0,0 +1,21 @@ +diff --git a/test/maverick/consensus/wal.go b/test/maverick/consensus/wal.go +index 2c7ccc2a2..adfc24fc0 100644 +--- a/test/maverick/consensus/wal.go ++++ b/test/maverick/consensus/wal.go +@@ -177,7 +177,7 @@ func (wal *BaseWAL) WriteSync(msg cmtcon.WALMessage) error { + } + + if err := wal.FlushAndSync(); err != nil { +- wal.Logger.Error(`WriteSync failed to flush consensus wal. ++ wal.Logger.Error(`WriteSync failed to flush consensus wal. + WARNING: may result in creating alternative proposals / votes for the current height iff the node restarted`, + "err", err) + return err +@@ -284,6 +284,7 @@ func (enc *WALEncoder) Encode(v *cmtcon.TimedWALMessage) error { + } + + crc := crc32.Checksum(data, crc32c) ++ //nolint:gosec + length := uint32(len(data)) + if length > maxMsgSizeBytes { + return fmt.Errorf("msg is too big: %d bytes, max: %d bytes", length, maxMsgSizeBytes) diff --git a/patches/test/maverick/node/node.go.patch b/patches/test/maverick/node/node.go.patch new file mode 100644 index 00000000000..d29abd994b4 --- /dev/null +++ b/patches/test/maverick/node/node.go.patch @@ -0,0 +1,69 @@ +diff --git a/test/maverick/node/node.go b/test/maverick/node/node.go +index 1d6050686..bcc198c66 100644 +--- a/test/maverick/node/node.go ++++ b/test/maverick/node/node.go +@@ -32,10 +32,12 @@ import ( + "github.com/tendermint/tendermint/libs/service" + "github.com/tendermint/tendermint/light" + mempl "github.com/tendermint/tendermint/mempool" ++ mempoolv2 "github.com/tendermint/tendermint/mempool/cat" + mempoolv0 "github.com/tendermint/tendermint/mempool/v0" + mempoolv1 "github.com/tendermint/tendermint/mempool/v1" + "github.com/tendermint/tendermint/p2p" + "github.com/tendermint/tendermint/p2p/pex" ++ "github.com/tendermint/tendermint/pkg/trace" + "github.com/tendermint/tendermint/privval" + "github.com/tendermint/tendermint/proxy" + rpccore "github.com/tendermint/tendermint/rpc/core" +@@ -383,6 +385,34 @@ func createMempoolAndMempoolReactor(config *cfg.Config, proxyApp proxy.AppConns, + state sm.State, memplMetrics *mempl.Metrics, logger log.Logger, + ) (p2p.Reactor, mempl.Mempool) { + switch config.Mempool.Version { ++ case cfg.MempoolV2: ++ mp := mempoolv2.NewTxPool( ++ logger, ++ config.Mempool, ++ proxyApp.Mempool(), ++ state.LastBlockHeight, ++ mempoolv2.WithMetrics(memplMetrics), ++ mempoolv2.WithPreCheck(sm.TxPreCheck(state)), ++ mempoolv2.WithPostCheck(sm.TxPostCheck(state)), ++ ) ++ ++ reactor, err := mempoolv2.NewReactor( ++ mp, ++ &mempoolv2.ReactorOptions{ ++ ListenOnly: !config.Mempool.Broadcast, ++ MaxTxSize: config.Mempool.MaxTxBytes, ++ MaxGossipDelay: config.Mempool.MaxGossipDelay, ++ }, ++ ) ++ if err != nil { ++ // TODO: find a more polite way of handling this error ++ panic(err) ++ } ++ if config.Consensus.WaitForTxs() { ++ mp.EnableTxsAvailable() ++ } ++ ++ return reactor, mp + case cfg.MempoolV1: + mp := mempoolv1.NewTxMempool( + logger, +@@ -397,6 +427,7 @@ func createMempoolAndMempoolReactor(config *cfg.Config, proxyApp proxy.AppConns, + reactor := mempoolv1.NewReactor( + config.Mempool, + mp, ++ trace.NoOpTracer(), + ) + if config.Consensus.WaitForTxs() { + mp.EnableTxsAvailable() +@@ -520,7 +551,7 @@ func createTransport( + ) { + var ( + mConnConfig = p2p.MConnConfig(config.P2P) +- transport = p2p.NewMultiplexTransport(nodeInfo, *nodeKey, mConnConfig) ++ transport = p2p.NewMultiplexTransport(nodeInfo, *nodeKey, mConnConfig, trace.NoOpTracer()) + connFilters = []p2p.ConnFilterFunc{} + peerFilters = []p2p.PeerFilterFunc{} + ) diff --git a/patches/tools/tools.go.patch b/patches/tools/tools.go.patch new file mode 100644 index 00000000000..64bd83ae08d --- /dev/null +++ b/patches/tools/tools.go.patch @@ -0,0 +1,19 @@ +diff --git a/tools/tools.go b/tools/tools.go +deleted file mode 100644 +index adfaa7f14..000000000 +--- a/tools/tools.go ++++ /dev/null +@@ -1,13 +0,0 @@ +-//go:build tools +- +-// This file uses the recommended method for tracking developer tools in a go module. +-// +-// https://github.com/golang/go/wiki/Modules#how-can-i-track-tool-dependencies-for-a-module +- +-package tools +- +-import ( +- _ "github.com/bufbuild/buf/cmd/buf" +- _ "github.com/golangci/golangci-lint/cmd/golangci-lint" +- _ "github.com/vektra/mockery/v2" +-) diff --git a/patches/types/block.go.patch b/patches/types/block.go.patch new file mode 100644 index 00000000000..dacf8aa3a87 --- /dev/null +++ b/patches/types/block.go.patch @@ -0,0 +1,194 @@ +diff --git a/types/block.go b/types/block.go +index dff8d4ab5..d9d9c7058 100644 +--- a/types/block.go ++++ b/types/block.go +@@ -49,9 +49,10 @@ type Block struct { + LastCommit *Commit `json:"last_commit"` + } + +-// ValidateBasic performs basic validation that doesn't involve state data. +-// It checks the internal consistency of the block. +-// Further validation is done using state#ValidateBlock. ++// ValidateBasic performs basic validation that doesn't involve state data. It ++// checks the internal consistency of the block. Further validation is done ++// using state#ValidateBlock. celestia-app's ProcessProposal checks that the ++// block's DataHash matches the hash of the data availability header. + func (b *Block) ValidateBasic() error { + if b == nil { + return errors.New("nil block") +@@ -79,15 +80,6 @@ func (b *Block) ValidateBasic() error { + ) + } + +- // NOTE: b.Data.Txs may be nil, but b.Data.Hash() still works fine. +- if !bytes.Equal(b.DataHash, b.Data.Hash()) { +- return fmt.Errorf( +- "wrong Header.DataHash. Expected %v, got %v", +- b.Data.Hash(), +- b.DataHash, +- ) +- } +- + // NOTE: b.Evidence.Evidence may be nil, but we're just looping. + for i, ev := range b.Evidence.Evidence { + if err := ev.ValidateBasic(); err != nil { +@@ -105,7 +97,9 @@ func (b *Block) ValidateBasic() error { + return nil + } + +-// fillHeader fills in any remaining header fields that are a function of the block data ++// fillHeader fills in any remaining header fields that are a function of the ++// block data NOTE: we expect celestia-app to populate the block DataHash but we ++// populate it here (in celestia-core) to not break existing tests in this repo. + func (b *Block) fillHeader() { + if b.LastCommitHash == nil { + b.LastCommitHash = b.LastCommit.Hash() +@@ -315,6 +309,27 @@ func MaxDataBytesNoEvidence(maxBytes int64, valsCount int) int64 { + return maxDataBytes + } + ++// MakeBlock returns a new block with an empty header, except what can be ++// computed from itself. ++// It populates the same set of fields validated by ValidateBasic. ++func MakeBlock( ++ height int64, ++ data Data, ++ lastCommit *Commit, ++ evidence []Evidence) *Block { ++ block := &Block{ ++ Header: Header{ ++ Version: cmtversion.Consensus{Block: version.BlockProtocol, App: 0}, ++ Height: height, ++ }, ++ Data: data, ++ Evidence: EvidenceData{Evidence: evidence}, ++ LastCommit: lastCommit, ++ } ++ block.fillHeader() ++ return block ++} ++ + //----------------------------------------------------------------------------- + + // Header defines the structure of a CometBFT block header. +@@ -770,6 +785,7 @@ func CommitToVoteSet(chainID string, commit *Commit, vals *ValidatorSet) *VoteSe + if commitSig.Absent() { + continue // OK, some precommits can be missing. + } ++ //nolint:gosec + added, err := voteSet.AddVote(commit.GetVote(int32(idx))) + if !added || err != nil { + panic(fmt.Sprintf("Failed to reconstruct LastCommit: %v", err)) +@@ -866,6 +882,10 @@ func (commit *Commit) IsCommit() bool { + // ValidateBasic performs basic validation that doesn't involve state data. + // Does not actually check the cryptographic signatures. + func (commit *Commit) ValidateBasic() error { ++ if commit == nil { ++ return errors.New("nil commit") ++ } ++ + if commit.Height < 0 { + return errors.New("negative Height") + } +@@ -988,14 +1008,21 @@ func CommitFromProto(cp *cmtproto.Commit) (*Commit, error) { + + //----------------------------------------------------------------------------- + +-// Data contains the set of transactions included in the block ++// Data contains all the available Data of the block. ++// Data with reserved namespaces (Txs, IntermediateStateRoots, Evidence) and ++// Celestia application specific Blobs. + type Data struct { +- + // Txs that will be applied by state @ block.Height+1. + // NOTE: not all txs here are valid. We're just agreeing on the order first. + // This means that block.AppHash does not include these txs. + Txs Txs `json:"txs"` + ++ // SquareSize is the size of the square after splitting all the block data ++ // into shares. The erasure data is discarded after generation, and keeping this ++ // value avoids unnecessarily regenerating all of the shares when returning ++ // proofs that some element was included in the block ++ SquareSize uint64 `json:"square_size"` ++ + // Volatile + hash cmtbytes.HexBytes + } +@@ -1008,9 +1035,35 @@ func (data *Data) Hash() cmtbytes.HexBytes { + if data.hash == nil { + data.hash = data.Txs.Hash() // NOTE: leaves of merkle tree are TxIDs + } ++ ++ // this is the expected behavior where `data.hash` was set by celestia-app ++ // in PrepareProposal + return data.hash + } + ++type Blob struct { ++ // NamespaceVersion is the version of the namespace. Used in conjunction ++ // with NamespaceID to determine the namespace of this blob. ++ NamespaceVersion uint8 ++ ++ // NamespaceID defines the namespace ID of this blob. Used in conjunction ++ // with NamespaceVersion to determine the namespace of this blob. ++ NamespaceID []byte ++ ++ // Data is the actual data of the blob. ++ // (e.g. a block of a virtual sidechain). ++ Data []byte ++ ++ // ShareVersion is the version of the share format that this blob should use ++ // when encoded into shares. ++ ShareVersion uint8 ++} ++ ++// Namespace returns the namespace of this blob encoded as a byte slice. ++func (b Blob) Namespace() []byte { ++ return append([]byte{b.NamespaceVersion}, b.NamespaceID...) ++} ++ + // StringIndented returns an indented string representation of the transactions. + func (data *Data) StringIndented(indent string) string { + if data == nil { +@@ -1026,9 +1079,8 @@ func (data *Data) StringIndented(indent string) string { + } + return fmt.Sprintf(`Data{ + %s %v +-%s}#%v`, +- indent, strings.Join(txStrings, "\n"+indent+" "), +- indent, data.hash) ++}`, ++ indent, strings.Join(txStrings, "\n"+indent+" ")) + } + + // ToProto converts Data to protobuf +@@ -1043,6 +1095,10 @@ func (data *Data) ToProto() cmtproto.Data { + tp.Txs = txBzs + } + ++ tp.SquareSize = data.SquareSize ++ ++ tp.Hash = data.hash ++ + return *tp + } + +@@ -1064,6 +1120,9 @@ func DataFromProto(dp *cmtproto.Data) (Data, error) { + data.Txs = Txs{} + } + ++ data.SquareSize = dp.SquareSize ++ data.hash = dp.Hash ++ + return *data, nil + } + +@@ -1230,7 +1289,7 @@ func (blockID *BlockID) ToProto() cmtproto.BlockID { + } + } + +-// FromProto sets a protobuf BlockID to the given pointer. ++// BlockIDFromProto sets a protobuf BlockID to the given pointer. + // It returns an error if the block id is invalid. + func BlockIDFromProto(bID *cmtproto.BlockID) (*BlockID, error) { + if bID == nil { diff --git a/patches/types/block_test.go.patch b/patches/types/block_test.go.patch new file mode 100644 index 00000000000..8058465e87f --- /dev/null +++ b/patches/types/block_test.go.patch @@ -0,0 +1,237 @@ +diff --git a/types/block_test.go b/types/block_test.go +index 20722cb62..0190e4762 100644 +--- a/types/block_test.go ++++ b/types/block_test.go +@@ -3,6 +3,7 @@ package types + import ( + // it is ok to use math/rand here: we do not need a cryptographically secure random + // number generator here and we can run the tests a bit faster ++ stdbytes "bytes" + "crypto/rand" + "encoding/hex" + "math" +@@ -44,7 +45,7 @@ func TestBlockAddEvidence(t *testing.T) { + ev := NewMockDuplicateVoteEvidenceWithValidator(h, time.Now(), vals[0], "block-test-chain") + evList := []Evidence{ev} + +- block := MakeBlock(h, txs, commit, evList) ++ block := MakeBlock(h, makeData(txs), commit, evList) + require.NotNil(t, block) + require.Equal(t, 1, len(block.Evidence.Evidence)) + require.NotNil(t, block.EvidenceHash) +@@ -77,13 +78,6 @@ func TestBlockValidateBasic(t *testing.T) { + blk.LastCommit.hash = nil // clear hash or change wont be noticed + }, true}, + {"Remove LastCommitHash", func(blk *Block) { blk.LastCommitHash = []byte("something else") }, true}, +- {"Tampered Data", func(blk *Block) { +- blk.Data.Txs[0] = Tx("something else") +- blk.Data.hash = nil // clear hash or change wont be noticed +- }, true}, +- {"Tampered DataHash", func(blk *Block) { +- blk.DataHash = cmtrand.Bytes(len(blk.DataHash)) +- }, true}, + {"Tampered EvidenceHash", func(blk *Block) { + blk.EvidenceHash = []byte("something else") + }, true}, +@@ -95,7 +89,7 @@ func TestBlockValidateBasic(t *testing.T) { + tc := tc + i := i + t.Run(tc.testName, func(t *testing.T) { +- block := MakeBlock(h, txs, commit, evList) ++ block := MakeBlock(h, makeData(txs), commit, evList) + block.ProposerAddress = valSet.GetProposer().Address + tc.malleateBlock(block) + err = block.ValidateBasic() +@@ -106,13 +100,13 @@ func TestBlockValidateBasic(t *testing.T) { + + func TestBlockHash(t *testing.T) { + assert.Nil(t, (*Block)(nil).Hash()) +- assert.Nil(t, MakeBlock(int64(3), []Tx{Tx("Hello World")}, nil, nil).Hash()) ++ assert.Nil(t, MakeBlock(int64(3), makeData([]Tx{Tx("Hello World")}), nil, nil).Hash()) + } + + func TestBlockMakePartSet(t *testing.T) { + assert.Nil(t, (*Block)(nil).MakePartSet(2)) + +- partSet := MakeBlock(int64(3), []Tx{Tx("Hello World")}, nil, nil).MakePartSet(1024) ++ partSet := MakeBlock(int64(3), makeData([]Tx{Tx("Hello World")}), nil, nil).MakePartSet(1024) + assert.NotNil(t, partSet) + assert.EqualValues(t, 1, partSet.Total()) + } +@@ -130,7 +124,7 @@ func TestBlockMakePartSetWithEvidence(t *testing.T) { + ev := NewMockDuplicateVoteEvidenceWithValidator(h, time.Now(), vals[0], "block-test-chain") + evList := []Evidence{ev} + +- partSet := MakeBlock(h, []Tx{Tx("Hello World")}, commit, evList).MakePartSet(512) ++ partSet := MakeBlock(h, makeData([]Tx{Tx("Hello World")}), commit, evList).MakePartSet(512) + assert.NotNil(t, partSet) + assert.EqualValues(t, 4, partSet.Total()) + } +@@ -147,7 +141,7 @@ func TestBlockHashesTo(t *testing.T) { + ev := NewMockDuplicateVoteEvidenceWithValidator(h, time.Now(), vals[0], "block-test-chain") + evList := []Evidence{ev} + +- block := MakeBlock(h, []Tx{Tx("Hello World")}, commit, evList) ++ block := MakeBlock(h, makeData([]Tx{Tx("Hello World")}), commit, evList) + block.ValidatorsHash = valSet.Hash() + assert.False(t, block.HashesTo([]byte{})) + assert.False(t, block.HashesTo([]byte("something else"))) +@@ -155,7 +149,7 @@ func TestBlockHashesTo(t *testing.T) { + } + + func TestBlockSize(t *testing.T) { +- size := MakeBlock(int64(3), []Tx{Tx("Hello World")}, nil, nil).Size() ++ size := MakeBlock(int64(3), makeData([]Tx{Tx("Hello World")}), nil, nil).Size() + if size <= 0 { + t.Fatal("Size of the block is zero or negative") + } +@@ -166,7 +160,7 @@ func TestBlockString(t *testing.T) { + assert.Equal(t, "nil-Block", (*Block)(nil).StringIndented("")) + assert.Equal(t, "nil-Block", (*Block)(nil).StringShort()) + +- block := MakeBlock(int64(3), []Tx{Tx("Hello World")}, nil, nil) ++ block := MakeBlock(int64(3), makeData([]Tx{Tx("Hello World")}), nil, nil) + assert.NotEqual(t, "nil-Block", block.String()) + assert.NotEqual(t, "nil-Block", block.StringIndented("")) + assert.NotEqual(t, "nil-Block", block.StringShort()) +@@ -201,7 +195,7 @@ func makeBlockID(hash []byte, partSetSize uint32, partSetHash []byte) BlockID { + var nilBytes []byte + + // This follows RFC-6962, i.e. `echo -n ” | sha256sum` +-var emptyBytes = []byte{0xe3, 0xb0, 0xc4, 0x42, 0x98, 0xfc, 0x1c, 0x14, 0x9a, 0xfb, 0xf4, 0xc8, ++var emptyBytes = []byte{0xe3, 0xb0, 0xc4, 0x42, 0x98, 0xfc, 0x1c, 0x14, 0x9a, 0xfb, 0xf4, 0xc8, //nolint:unused + 0x99, 0x6f, 0xb9, 0x24, 0x27, 0xae, 0x41, 0xe4, 0x64, 0x9b, 0x93, 0x4c, 0xa4, 0x95, 0x99, 0x1b, + 0x78, 0x52, 0xb8, 0x55} + +@@ -210,11 +204,6 @@ func TestNilHeaderHashDoesntCrash(t *testing.T) { + assert.Equal(t, nilBytes, []byte((new(Header)).Hash())) + } + +-func TestNilDataHashDoesntCrash(t *testing.T) { +- assert.Equal(t, emptyBytes, []byte((*Data)(nil).Hash())) +- assert.Equal(t, emptyBytes, []byte(new(Data).Hash())) +-} +- + func TestCommit(t *testing.T) { + lastID := makeBlockIDRandom() + h := int64(3) +@@ -629,17 +618,16 @@ func TestBlockIDValidateBasic(t *testing.T) { + func TestBlockProtoBuf(t *testing.T) { + h := cmtrand.Int63() + c1 := randCommit(time.Now()) +- b1 := MakeBlock(h, []Tx{Tx([]byte{1})}, &Commit{Signatures: []CommitSig{}}, []Evidence{}) ++ b1 := MakeBlock(h, makeData([]Tx{Tx([]byte{1})}), &Commit{Signatures: []CommitSig{}}, []Evidence{}) + b1.ProposerAddress = cmtrand.Bytes(crypto.AddressSize) + +- b2 := MakeBlock(h, []Tx{Tx([]byte{1})}, c1, []Evidence{}) +- b2.ProposerAddress = cmtrand.Bytes(crypto.AddressSize) + evidenceTime := time.Date(2019, 1, 1, 0, 0, 0, 0, time.UTC) + evi := NewMockDuplicateVoteEvidence(h, evidenceTime, "block-test-chain") +- b2.Evidence = EvidenceData{Evidence: EvidenceList{evi}} +- b2.EvidenceHash = b2.Evidence.Hash() ++ b2 := MakeBlock(h, makeData([]Tx{Tx([]byte{1})}), c1, []Evidence{evi}) ++ b2.ProposerAddress = cmtrand.Bytes(crypto.AddressSize) ++ b2.Evidence.ByteSize() + +- b3 := MakeBlock(h, []Tx{}, c1, []Evidence{}) ++ b3 := MakeBlock(h, makeData([]Tx{}), c1, []Evidence{}) + b3.ProposerAddress = cmtrand.Bytes(crypto.AddressSize) + testCases := []struct { + msg string +@@ -664,7 +652,7 @@ func TestBlockProtoBuf(t *testing.T) { + if tc.expPass2 { + require.NoError(t, err, tc.msg) + require.EqualValues(t, tc.b1.Header, block.Header, tc.msg) +- require.EqualValues(t, tc.b1.Data, block.Data, tc.msg) ++ require.EqualValues(t, tc.b1.Data, block.Data, tc.msg) // todo + require.EqualValues(t, tc.b1.Evidence.Evidence, block.Evidence.Evidence, tc.msg) + require.EqualValues(t, *tc.b1.LastCommit, *block.LastCommit, tc.msg) + } else { +@@ -673,26 +661,40 @@ func TestBlockProtoBuf(t *testing.T) { + } + } + +-func TestDataProtoBuf(t *testing.T) { +- data := &Data{Txs: Txs{Tx([]byte{1}), Tx([]byte{2}), Tx([]byte{3})}} +- data2 := &Data{Txs: Txs{}} +- testCases := []struct { +- msg string +- data1 *Data +- expPass bool +- }{ +- {"success", data, true}, +- {"success data2", data2, true}, ++func TestBlockDataProtobuf(t *testing.T) { ++ type test struct { ++ name string ++ txs Txs ++ blobs []Blob + } +- for _, tc := range testCases { +- protoData := tc.data1.ToProto() +- d, err := DataFromProto(&protoData) +- if tc.expPass { +- require.NoError(t, err, tc.msg) +- require.EqualValues(t, tc.data1, &d, tc.msg) +- } else { +- require.Error(t, err, tc.msg) +- } ++ tests := []test{ ++ { ++ name: "only txs", txs: Txs([]Tx{stdbytes.Repeat([]byte{1}, 200)}), ++ }, ++ { ++ name: "everything", ++ txs: Txs([]Tx{stdbytes.Repeat([]byte{1}, 200)}), ++ blobs: []Blob{ ++ { ++ NamespaceID: []byte{8, 7, 6, 5, 4, 3, 2, 1}, ++ Data: stdbytes.Repeat([]byte{3, 2, 1, 0}, 100), ++ }, ++ { ++ NamespaceID: []byte{1, 2, 3, 4, 5, 6, 7, 8}, ++ Data: stdbytes.Repeat([]byte{1, 2, 3}, 100), ++ }, ++ }, ++ }, ++ } ++ ++ for _, tt := range tests { ++ d := Data{Txs: tt.txs} ++ firstHash := d.Hash() ++ pd := d.ToProto() ++ d2, err := DataFromProto(&pd) ++ require.NoError(t, err) ++ secondHash := d2.Hash() ++ assert.Equal(t, firstHash, secondHash, tt.name) + } + } + +@@ -854,3 +856,27 @@ func TestBlockIDEquals(t *testing.T) { + assert.True(t, blockIDEmpty.Equals(blockIDEmpty)) + assert.False(t, blockIDEmpty.Equals(blockIDDifferent)) + } ++ ++func TestBlob(t *testing.T) { ++ namespaceVersion := uint8(0) ++ namespaceID := stdbytes.Repeat([]byte{0x01}, 28) ++ data := []byte("data") ++ shareVersion := uint8(0) ++ ++ blob := Blob{ ++ NamespaceVersion: namespaceVersion, ++ NamespaceID: namespaceID, ++ Data: data, ++ ShareVersion: shareVersion, ++ } ++ ++ t.Run("blob.Namespace() returns encoded namespace", func(t *testing.T) { ++ got := blob.Namespace() ++ want := []byte{ ++ 0, // namespace version ++ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, // namespace ID ++ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, // namespace ID ++ } ++ assert.Equal(t, want, got) ++ }) ++} diff --git a/patches/types/event_bus_test.go.patch b/patches/types/event_bus_test.go.patch new file mode 100644 index 00000000000..c6b0c1515f1 --- /dev/null +++ b/patches/types/event_bus_test.go.patch @@ -0,0 +1,119 @@ +diff --git a/types/event_bus_test.go b/types/event_bus_test.go +index 3230df75b..5738509aa 100644 +--- a/types/event_bus_test.go ++++ b/types/event_bus_test.go +@@ -65,6 +65,57 @@ func TestEventBusPublishEventTx(t *testing.T) { + } + } + ++func TestEventBusPublishEventIndexWrapper(t *testing.T) { ++ eventBus := NewEventBus() ++ err := eventBus.Start() ++ require.NoError(t, err) ++ t.Cleanup(func() { ++ if err := eventBus.Stop(); err != nil { ++ t.Error(err) ++ } ++ }) ++ ++ tx := Tx("foo") ++ require.NoError(t, err) ++ ++ result := abci.ResponseDeliverTx{ ++ Data: []byte("bar"), ++ Events: []abci.Event{ ++ {Type: "testType", Attributes: []abci.EventAttribute{{Key: []byte("baz"), Value: []byte("1")}}}, ++ }, ++ } ++ ++ // PublishEventTx adds 3 composite keys, so the query below should work ++ query := fmt.Sprintf("tm.event='Tx' AND tx.height=1 AND tx.hash='%X' AND testType.baz=1", tx.Hash()) ++ txsSub, err := eventBus.Subscribe(context.Background(), "test", cmtquery.MustParse(query)) ++ require.NoError(t, err) ++ ++ done := make(chan struct{}) ++ go func() { ++ msg := <-txsSub.Out() ++ edt := msg.Data().(EventDataTx) ++ assert.Equal(t, int64(1), edt.Height) ++ assert.Equal(t, uint32(0), edt.Index) ++ assert.EqualValues(t, tx, edt.Tx) ++ assert.Equal(t, result, edt.Result) ++ close(done) ++ }() ++ ++ err = eventBus.PublishEventTx(EventDataTx{abci.TxResult{ ++ Height: 1, ++ Index: 0, ++ Tx: tx, ++ Result: result, ++ }}) ++ assert.NoError(t, err) ++ ++ select { ++ case <-done: ++ case <-time.After(1 * time.Second): ++ t.Fatal("did not receive a transaction after 1 sec.") ++ } ++} ++ + func TestEventBusPublishEventNewBlock(t *testing.T) { + eventBus := NewEventBus() + err := eventBus.Start() +@@ -75,7 +126,8 @@ func TestEventBusPublishEventNewBlock(t *testing.T) { + } + }) + +- block := MakeBlock(0, []Tx{}, nil, []Evidence{}) ++ block := MakeBlock(0, makeData([]Tx{}), nil, []Evidence{}) ++ // blockID := BlockID{Hash: block.Hash(), PartSetHeader: block.MakePartSet(BlockPartSizeBytes).Header()} + resultBeginBlock := abci.ResponseBeginBlock{ + Events: []abci.Event{ + {Type: "testType", Attributes: []abci.EventAttribute{{Key: []byte("baz"), Value: []byte("1")}}}, +@@ -92,28 +144,21 @@ func TestEventBusPublishEventNewBlock(t *testing.T) { + blocksSub, err := eventBus.Subscribe(context.Background(), "test", cmtquery.MustParse(query)) + require.NoError(t, err) + +- done := make(chan struct{}) +- go func() { +- msg := <-blocksSub.Out() +- edt := msg.Data().(EventDataNewBlock) +- assert.Equal(t, block, edt.Block) +- assert.Equal(t, resultBeginBlock, edt.ResultBeginBlock) +- assert.Equal(t, resultEndBlock, edt.ResultEndBlock) +- close(done) +- }() +- + err = eventBus.PublishEventNewBlock(EventDataNewBlock{ + Block: block, + ResultBeginBlock: resultBeginBlock, + ResultEndBlock: resultEndBlock, + }) +- assert.NoError(t, err) + +- select { +- case <-done: +- case <-time.After(1 * time.Second): +- t.Fatal("did not receive a block after 1 sec.") +- } ++ done := make(chan struct{}) ++ // go func() { ++ msg := <-blocksSub.Out() ++ edt := msg.Data().(EventDataNewBlock) ++ assert.Equal(t, block, edt.Block) ++ assert.Equal(t, resultBeginBlock, edt.ResultBeginBlock) ++ assert.Equal(t, resultEndBlock, edt.ResultEndBlock) ++ close(done) ++ assert.NoError(t, err) + } + + func TestEventBusPublishEventTxDuplicateKeys(t *testing.T) { +@@ -234,7 +279,7 @@ func TestEventBusPublishEventNewBlockHeader(t *testing.T) { + } + }) + +- block := MakeBlock(0, []Tx{}, nil, []Evidence{}) ++ block := MakeBlock(0, makeData([]Tx{}), nil, []Evidence{}) + resultBeginBlock := abci.ResponseBeginBlock{ + Events: []abci.Event{ + {Type: "testType", Attributes: []abci.EventAttribute{{Key: []byte("baz"), Value: []byte("1")}}}, diff --git a/patches/types/part_set.go.patch b/patches/types/part_set.go.patch new file mode 100644 index 00000000000..dbae7c94a6e --- /dev/null +++ b/patches/types/part_set.go.patch @@ -0,0 +1,57 @@ +diff --git a/types/part_set.go b/types/part_set.go +index 4b1422789..010a7cc5e 100644 +--- a/types/part_set.go ++++ b/types/part_set.go +@@ -171,6 +171,7 @@ type PartSet struct { + // CONTRACT: partSize is greater than zero. + func NewPartSetFromData(data []byte, partSize uint32) *PartSet { + // divide data into 4kb parts. ++ //nolint:gosec + total := (uint32(len(data)) + partSize - 1) / partSize + parts := make([]*Part, total) + partsBytes := make([][]byte, total) +@@ -270,6 +271,27 @@ func (ps *PartSet) Total() uint32 { + } + + func (ps *PartSet) AddPart(part *Part) (bool, error) { ++ if part == nil { ++ return false, fmt.Errorf("nil part") ++ } ++ ++ // The proof should be compatible with the number of parts. ++ if part.Proof.Total != int64(ps.total) { ++ return false, ErrPartSetInvalidProof ++ } ++ ++ // Check hash proof ++ if part.Proof.Verify(ps.Hash(), part.Bytes) != nil { ++ return false, ErrPartSetInvalidProof ++ } ++ ++ return ps.AddPartWithoutProof(part) ++} ++ ++func (ps *PartSet) AddPartWithoutProof(part *Part) (bool, error) { ++ if part == nil { ++ return false, fmt.Errorf("nil part") ++ } + if ps == nil { + return false, nil + } +@@ -286,16 +308,6 @@ func (ps *PartSet) AddPart(part *Part) (bool, error) { + return false, nil + } + +- // The proof should be compatible with the number of parts. +- if part.Proof.Total != int64(ps.total) { +- return false, ErrPartSetInvalidProof +- } +- +- // Check hash proof +- if part.Proof.Verify(ps.Hash(), part.Bytes) != nil { +- return false, ErrPartSetInvalidProof +- } +- + // Add part + ps.parts[part.Index] = part + ps.partsBitArray.SetIndex(int(part.Index), true) diff --git a/patches/types/protobuf.go.patch b/patches/types/protobuf.go.patch new file mode 100644 index 00000000000..021ae5e9565 --- /dev/null +++ b/patches/types/protobuf.go.patch @@ -0,0 +1,12 @@ +diff --git a/types/protobuf.go b/types/protobuf.go +index 489529c54..5362e673a 100644 +--- a/types/protobuf.go ++++ b/types/protobuf.go +@@ -105,6 +105,7 @@ func (tm2pb) ConsensusParams(params *cmtproto.ConsensusParams) *abci.ConsensusPa + }, + Evidence: ¶ms.Evidence, + Validator: ¶ms.Validator, ++ Version: ¶ms.Version, + } + } + diff --git a/patches/types/share_proof.go.patch b/patches/types/share_proof.go.patch new file mode 100644 index 00000000000..2e16bc65b8f --- /dev/null +++ b/patches/types/share_proof.go.patch @@ -0,0 +1,140 @@ +diff --git a/types/share_proof.go b/types/share_proof.go +new file mode 100644 +index 000000000..af28f8150 +--- /dev/null ++++ b/types/share_proof.go +@@ -0,0 +1,134 @@ ++package types ++ ++import ( ++ "errors" ++ "fmt" ++ "math" ++ ++ "github.com/celestiaorg/nmt" ++ "github.com/tendermint/tendermint/pkg/consts" ++ "github.com/tendermint/tendermint/proto/tendermint/crypto" ++ tmproto "github.com/tendermint/tendermint/proto/tendermint/types" ++) ++ ++// ShareProof is an NMT proof that a set of shares exist in a set of rows and a ++// Merkle proof that those rows exist in a Merkle tree with a given data root. ++type ShareProof struct { ++ // Data are the raw shares that are being proven. ++ Data [][]byte `json:"data"` ++ // ShareProofs are NMT proofs that the shares in Data exist in a set of ++ // rows. There will be one ShareProof per row that the shares occupy. ++ ShareProofs []*tmproto.NMTProof `json:"share_proofs"` ++ // NamespaceID is the namespace id of the shares being proven. This ++ // namespace id is used when verifying the proof. If the namespace id doesn't ++ // match the namespace of the shares, the proof will fail verification. ++ NamespaceID []byte `json:"namespace_id"` ++ RowProof RowProof `json:"row_proof"` ++ NamespaceVersion uint32 `json:"namespace_version"` ++} ++ ++func (sp ShareProof) ToProto() tmproto.ShareProof { ++ // TODO consider extracting a ToProto function for RowProof ++ rowRoots := make([][]byte, len(sp.RowProof.RowRoots)) ++ rowProofs := make([]*crypto.Proof, len(sp.RowProof.Proofs)) ++ for i := range sp.RowProof.RowRoots { ++ rowRoots[i] = sp.RowProof.RowRoots[i].Bytes() ++ rowProofs[i] = sp.RowProof.Proofs[i].ToProto() ++ } ++ pbtp := tmproto.ShareProof{ ++ Data: sp.Data, ++ ShareProofs: sp.ShareProofs, ++ NamespaceId: sp.NamespaceID, ++ RowProof: &tmproto.RowProof{ ++ RowRoots: rowRoots, ++ Proofs: rowProofs, ++ StartRow: sp.RowProof.StartRow, ++ EndRow: sp.RowProof.EndRow, ++ }, ++ NamespaceVersion: sp.NamespaceVersion, ++ } ++ ++ return pbtp ++} ++ ++// ShareProofFromProto creates a ShareProof from a proto message. ++// Expects the proof to be pre-validated. ++func ShareProofFromProto(pb tmproto.ShareProof) (ShareProof, error) { ++ return ShareProof{ ++ RowProof: RowProofFromProto(pb.RowProof), ++ Data: pb.Data, ++ ShareProofs: pb.ShareProofs, ++ NamespaceID: pb.NamespaceId, ++ NamespaceVersion: pb.NamespaceVersion, ++ }, nil ++} ++ ++// Validate runs basic validations on the proof then verifies if it is consistent. ++// It returns nil if the proof is valid. Otherwise, it returns a sensible error. ++// The `root` is the block data root that the shares to be proven belong to. ++// Note: these proofs are tested on the app side. ++func (sp ShareProof) Validate(root []byte) error { ++ numberOfSharesInProofs := int32(0) ++ for _, proof := range sp.ShareProofs { ++ // the range is not inclusive from the left. ++ numberOfSharesInProofs += proof.End - proof.Start ++ } ++ ++ if len(sp.ShareProofs) != len(sp.RowProof.RowRoots) { ++ return fmt.Errorf("the number of share proofs %d must equal the number of row roots %d", len(sp.ShareProofs), len(sp.RowProof.RowRoots)) ++ ++ } ++ if len(sp.Data) != int(numberOfSharesInProofs) { ++ return fmt.Errorf("the number of shares %d must equal the number of shares in share proofs %d", len(sp.Data), numberOfSharesInProofs) ++ } ++ ++ for _, proof := range sp.ShareProofs { ++ if proof.Start < 0 { ++ return errors.New("proof index cannot be negative") ++ } ++ if (proof.End - proof.Start) <= 0 { ++ return errors.New("proof total must be positive") ++ } ++ } ++ ++ if err := sp.RowProof.Validate(root); err != nil { ++ return err ++ } ++ ++ if ok := sp.VerifyProof(); !ok { ++ return errors.New("share proof failed to verify") ++ } ++ ++ return nil ++} ++ ++func (sp ShareProof) VerifyProof() bool { ++ cursor := int32(0) ++ for i, proof := range sp.ShareProofs { ++ nmtProof := nmt.NewInclusionProof( ++ int(proof.Start), ++ int(proof.End), ++ proof.Nodes, ++ true, ++ ) ++ sharesUsed := proof.End - proof.Start ++ if sp.NamespaceVersion > math.MaxUint8 { ++ return false ++ } ++ // Consider extracting celestia-app's namespace package. We can't use it ++ // here because that would introduce a circulcar import. ++ //nolint:gosec ++ namespace := append([]byte{uint8(sp.NamespaceVersion)}, sp.NamespaceID...) ++ valid := nmtProof.VerifyInclusion( ++ consts.NewBaseHashFunc(), ++ namespace, ++ sp.Data[cursor:sharesUsed+cursor], ++ sp.RowProof.RowRoots[i], ++ ) ++ if !valid { ++ return false ++ } ++ cursor += sharesUsed ++ } ++ return true ++} diff --git a/patches/types/share_proof_test.go.patch b/patches/types/share_proof_test.go.patch new file mode 100644 index 00000000000..3ed9b07b922 --- /dev/null +++ b/patches/types/share_proof_test.go.patch @@ -0,0 +1,100 @@ +diff --git a/types/share_proof_test.go b/types/share_proof_test.go +new file mode 100644 +index 000000000..79c07b3d2 +--- /dev/null ++++ b/types/share_proof_test.go +@@ -0,0 +1,94 @@ ++package types ++ ++import ( ++ "testing" ++ ++ "github.com/stretchr/testify/assert" ++ "github.com/tendermint/tendermint/pkg/consts" ++ "github.com/tendermint/tendermint/proto/tendermint/types" ++) ++ ++func TestShareProofValidate(t *testing.T) { ++ type testCase struct { ++ name string ++ sp ShareProof ++ root []byte ++ wantErr bool ++ } ++ ++ testCases := []testCase{ ++ { ++ name: "empty share proof returns error", ++ sp: ShareProof{}, ++ root: root, ++ wantErr: true, ++ }, ++ { ++ name: "valid share proof returns no error", ++ sp: validShareProof(), ++ root: root, ++ wantErr: false, ++ }, ++ { ++ name: "share proof with mismatched number of share proofs returns error", ++ sp: mismatchedShareProofs(), ++ root: root, ++ wantErr: true, ++ }, ++ { ++ name: "share proof with mismatched number of shares returns error", ++ sp: mismatchedShares(), ++ root: root, ++ wantErr: true, ++ }, ++ { ++ name: "valid share proof with incorrect root returns error", ++ sp: validShareProof(), ++ root: incorrectRoot, ++ wantErr: true, ++ }, ++ } ++ ++ for _, tc := range testCases { ++ t.Run(tc.name, func(t *testing.T) { ++ got := tc.sp.Validate(tc.root) ++ if tc.wantErr { ++ assert.Error(t, got) ++ return ++ } ++ assert.NoError(t, got) ++ }) ++ } ++} ++ ++func mismatchedShareProofs() ShareProof { ++ sp := validShareProof() ++ sp.ShareProofs = []*types.NMTProof{} ++ return sp ++} ++ ++func mismatchedShares() ShareProof { ++ sp := validShareProof() ++ sp.Data = [][]byte{} ++ return sp ++} ++ ++// validShareProof returns a valid ShareProof for a single share. This test data ++// was copied from celestia-app's pkg/proof/proof_test.go ++// TestNewShareInclusionProof: "1 transaction share" ++func validShareProof() ShareProof { ++ return ShareProof{ ++ Data: [][]uint8{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x1, 0x1, 0x0, 0x0, 0x62, 0xc, 0x0, 0x0, 0x0, 0x2a, 0xf4, 0x3, 0xff, 0xe8, 0x78, 0x6c, 0x48, 0x84, 0x9, 0x5, 0x5, 0x79, 0x8f, 0x29, 0x67, 0xa2, 0xe1, 0x8d, 0x2f, 0xdc, 0xf2, 0x60, 0xe4, 0x62, 0x71, 0xf9, 0xae, 0x92, 0x83, 0x3a, 0x7f, 0xf3, 0xc6, 0x14, 0xb4, 0x17, 0xfc, 0x64, 0x4b, 0x89, 0x18, 0x5e, 0x22, 0x4b, 0x0, 0x82, 0xeb, 0x67, 0x5b, 0x51, 0x43, 0x4e, 0xc3, 0x42, 0x48, 0xc1, 0xfd, 0x88, 0x71, 0xcb, 0xee, 0xf3, 0x92, 0x20, 0x9c, 0x15, 0xc0, 0x4f, 0x11, 0xa4, 0x5, 0xd0, 0xdf, 0xb8, 0x25, 0x60, 0x58, 0xae, 0x2, 0x2d, 0x78, 0xf8, 0x1f, 0x67, 0xeb, 0x88, 0x58, 0x5d, 0x5a, 0x4a, 0x74, 0xe7, 0xdf, 0x38, 0x6a, 0xa4, 0x3f, 0x62, 0xd6, 0x3d, 0x17, 0xd2, 0x7e, 0x92, 0x9c, 0x4a, 0xd0, 0x2b, 0x55, 0x49, 0x3b, 0xa7, 0x5a, 0x29, 0xd5, 0x6b, 0x91, 0xde, 0xfe, 0x5b, 0x39, 0x88, 0xc5, 0xbb, 0x91, 0x16, 0xf6, 0x47, 0xec, 0x8, 0x3, 0x2a, 0x1e, 0x6e, 0x4b, 0x27, 0x34, 0x90, 0x38, 0x46, 0x6e, 0xce, 0x35, 0xdf, 0xd6, 0x1e, 0x1a, 0xf2, 0xf0, 0x6e, 0xa0, 0xfe, 0x84, 0x51, 0xf2, 0xc1, 0x32, 0xd, 0x89, 0x17, 0x5f, 0x4c, 0xab, 0x81, 0xd4, 0x44, 0x5a, 0x55, 0xdb, 0xe5, 0xa7, 0x3c, 0x42, 0xb6, 0xb3, 0x20, 0xc4, 0x81, 0x75, 0x8, 0x5e, 0x39, 0x21, 0x51, 0x4c, 0x93, 0x2c, 0x7c, 0xb3, 0xd0, 0x37, 0xf9, 0x6a, 0xab, 0x93, 0xf0, 0x3f, 0xa2, 0x44, 0x1f, 0x63, 0xae, 0x96, 0x4e, 0x26, 0x7a, 0x1f, 0x18, 0x5b, 0x28, 0x4d, 0x24, 0xe8, 0x98, 0x56, 0xbf, 0x98, 0x44, 0x23, 0x17, 0x85, 0x22, 0x38, 0x56, 0xeb, 0xf3, 0x4e, 0x87, 0x1e, 0xc1, 0x51, 0x6, 0x71, 0xa7, 0xa9, 0x45, 0xef, 0xc7, 0x89, 0x5c, 0xed, 0x68, 0xbd, 0x43, 0x2f, 0xe6, 0xf1, 0x56, 0xef, 0xf, 0x4f, 0x57, 0xaa, 0x8c, 0x5c, 0xbd, 0x21, 0xb4, 0xaa, 0x15, 0x71, 0x6a, 0xdc, 0x12, 0xda, 0xee, 0xd9, 0x19, 0xbc, 0x17, 0xa2, 0x49, 0xd6, 0xbe, 0xd2, 0xc6, 0x6a, 0xbc, 0x53, 0xe4, 0x28, 0xd4, 0xeb, 0xe9, 0x9b, 0xd6, 0x85, 0x89, 0xb9, 0xe8, 0xa2, 0x70, 0x40, 0xad, 0xb1, 0x1a, 0xa0, 0xb1, 0xb5, 0xee, 0xde, 0x6d, 0xa9, 0x2a, 0x4b, 0x6, 0xd1, 0xfa, 0x67, 0x13, 0xac, 0x7d, 0x9a, 0x81, 0xc6, 0xef, 0x78, 0x42, 0x18, 0xf, 0x7b, 0xaf, 0x50, 0xa7, 0xdb, 0xb6, 0xde, 0xab, 0x3, 0xdc, 0x5, 0x14, 0x5f, 0x9, 0xdb, 0x81, 0xe3, 0x72, 0x2, 0x61, 0x23, 0x77, 0x12, 0x82, 0xfc, 0x9, 0x43, 0xfb, 0xd6, 0x38, 0x53, 0xfd, 0x77, 0xe, 0x17, 0xcc, 0x93, 0x5e, 0x4e, 0x60, 0x87, 0xda, 0xbd, 0xfc, 0x86, 0xdd, 0xb1, 0xd6, 0x74, 0x41, 0x71, 0x24, 0xda, 0x1, 0x3f, 0x11, 0x17, 0x9e, 0x54, 0x66, 0xb6, 0xc4, 0x9a, 0xb8, 0x59, 0xb9, 0x13, 0x4e, 0xed, 0x8, 0xe5, 0x99, 0x27, 0xa0, 0x6b, 0x1, 0x6c, 0x8a, 0xbf, 0x20, 0x3d, 0x75, 0xd5, 0x7e, 0xea, 0xe0, 0xef, 0x7f, 0xfe, 0xa8, 0xaf, 0x76, 0xad, 0x30, 0x55, 0x65, 0x9d, 0xbe, 0x30, 0x32, 0x9f, 0x3b, 0xb7, 0xa1, 0x5c, 0x98, 0xef, 0xe1, 0xe4, 0x33, 0x1a, 0x56, 0x5a, 0x22, 0xd1, 0x38, 0x9b, 0xee, 0xfa, 0x11, 0x6f, 0xa7, 0xd7, 0x6, 0x17, 0xdc, 0xc6, 0x4d, 0xbd, 0x3f, 0x3c, 0xe6, 0xac, 0x54, 0x70, 0xda, 0x11, 0xdb, 0x87, 0xe2, 0xc2, 0x26, 0x7e, 0x48, 0x3b, 0xda, 0xf4, 0x98, 0x3c, 0x51}}, ++ ShareProofs: []*types.NMTProof{ ++ { ++ Start: 0, ++ End: 1, ++ Nodes: [][]uint8{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x1, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x1, 0x27, 0x3a, 0x5f, 0x16, 0x36, 0xa3, 0xce, 0x1c, 0x17, 0x58, 0x7e, 0xb8, 0xaa, 0xc8, 0x5e, 0x58, 0x9e, 0xa9, 0x36, 0x3c, 0x3d, 0x5c, 0xb5, 0xc2, 0xf0, 0x26, 0x1a, 0x9a, 0x13, 0xcd, 0x59, 0xb2}, {0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x1, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x1, 0x55, 0xe5, 0x43, 0x2e, 0xa2, 0x32, 0x84, 0x75, 0x8a, 0x88, 0x8d, 0x7c, 0x27, 0xdc, 0x2e, 0x13, 0x1e, 0x44, 0xc4, 0xe7, 0x51, 0x64, 0xe5, 0xe4, 0xf4, 0x7d, 0x4, 0xb8, 0x10, 0x3b, 0x72, 0xa5}, {0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x1, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x1, 0x4d, 0xeb, 0x2a, 0x3c, 0x56, 0x98, 0x49, 0xdb, 0x61, 0x54, 0x12, 0xee, 0xb, 0xeb, 0x29, 0xf8, 0xc9, 0x71, 0x9c, 0xf7, 0x28, 0xbb, 0x7a, 0x85, 0x70, 0xa1, 0x81, 0xc8, 0x5f, 0x6a, 0x63, 0x59}, {0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x1, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x1, 0xf0, 0xb5, 0x59, 0x71, 0xba, 0x6a, 0xf, 0xd1, 0xf, 0x2e, 0x79, 0xd4, 0xdc, 0xfb, 0x93, 0x94, 0x58, 0x3d, 0xd9, 0xef, 0xe2, 0x2b, 0xd4, 0xe3, 0x71, 0xbd, 0xd4, 0xd9, 0xc2, 0xc4, 0xef, 0xd1}, {0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x1, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x1, 0x5, 0x8f, 0xf0, 0x4e, 0x81, 0x8e, 0xc7, 0x2f, 0x35, 0xec, 0x9, 0xdf, 0xf1, 0x41, 0xd5, 0x5a, 0x2f, 0xa3, 0xa0, 0xe5, 0x8d, 0x83, 0x70, 0xf2, 0x11, 0xea, 0xc2, 0xa3, 0x4a, 0x7a, 0xc5, 0x17}, {0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x96, 0x6d, 0x3f, 0x7b, 0xf5, 0xef, 0x38, 0x4b, 0xa5, 0x38, 0x98, 0x7e, 0x3b, 0x4e, 0x12, 0x21, 0xcb, 0xd7, 0xff, 0xd6, 0xf3, 0x7d, 0xf, 0x8a, 0x57, 0xfe, 0x5, 0x5, 0xb6, 0x62, 0xa6, 0xae}}, ++ LeafHash: []uint8(nil), ++ }, ++ }, ++ NamespaceID: consts.TxNamespaceID, ++ RowProof: validRowProof(), ++ NamespaceVersion: uint32(0), ++ } ++} diff --git a/patches/types/test_util.go.patch b/patches/types/test_util.go.patch new file mode 100644 index 00000000000..379ebaa393f --- /dev/null +++ b/patches/types/test_util.go.patch @@ -0,0 +1,58 @@ +diff --git a/types/test_util.go b/types/test_util.go +index 51e321cdb..7eea6f265 100644 +--- a/types/test_util.go ++++ b/types/test_util.go +@@ -5,8 +5,6 @@ import ( + "time" + + cmtproto "github.com/tendermint/tendermint/proto/tendermint/types" +- cmtversion "github.com/tendermint/tendermint/proto/tendermint/version" +- "github.com/tendermint/tendermint/version" + ) + + func MakeCommit(blockID BlockID, height int64, round int32, +@@ -20,12 +18,13 @@ func MakeCommit(blockID BlockID, height int64, round int32, + } + vote := &Vote{ + ValidatorAddress: pubKey.Address(), +- ValidatorIndex: int32(i), +- Height: height, +- Round: round, +- Type: cmtproto.PrecommitType, +- BlockID: blockID, +- Timestamp: now, ++ //nolint:gosec ++ ValidatorIndex: int32(i), ++ Height: height, ++ Round: round, ++ Type: cmtproto.PrecommitType, ++ BlockID: blockID, ++ Timestamp: now, + } + + _, err = signAddVote(validators[i], vote, voteSet) +@@ -81,21 +80,8 @@ func MakeVote( + return vote, nil + } + +-// MakeBlock returns a new block with an empty header, except what can be +-// computed from itself. +-// It populates the same set of fields validated by ValidateBasic. +-func MakeBlock(height int64, txs []Tx, lastCommit *Commit, evidence []Evidence) *Block { +- block := &Block{ +- Header: Header{ +- Version: cmtversion.Consensus{Block: version.BlockProtocol, App: 0}, +- Height: height, +- }, +- Data: Data{ +- Txs: txs, +- }, +- Evidence: EvidenceData{Evidence: evidence}, +- LastCommit: lastCommit, ++func makeData(txs []Tx) Data { ++ return Data{ ++ Txs: txs, + } +- block.fillHeader() +- return block + } diff --git a/patches/types/tx.go.patch b/patches/types/tx.go.patch new file mode 100644 index 00000000000..347d5722404 --- /dev/null +++ b/patches/types/tx.go.patch @@ -0,0 +1,167 @@ +diff --git a/types/tx.go b/types/tx.go +index ddb59dfe5..e6d750564 100644 +--- a/types/tx.go ++++ b/types/tx.go +@@ -6,9 +6,11 @@ import ( + "errors" + "fmt" + ++ "github.com/gogo/protobuf/proto" + "github.com/tendermint/tendermint/crypto/merkle" + "github.com/tendermint/tendermint/crypto/tmhash" + cmtbytes "github.com/tendermint/tendermint/libs/bytes" ++ "github.com/tendermint/tendermint/pkg/consts" + cmtproto "github.com/tendermint/tendermint/proto/tendermint/types" + ) + +@@ -25,12 +27,27 @@ type ( + TxKey [TxKeySize]byte + ) + +-// Hash computes the TMHASH hash of the wire encoded transaction. ++// Hash computes the TMHASH hash of the wire encoded transaction. It attempts to ++// unwrap the transaction if it is a IndexWrapper or a BlobTx. + func (tx Tx) Hash() []byte { ++ if indexWrapper, isIndexWrapper := UnmarshalIndexWrapper(tx); isIndexWrapper { ++ return tmhash.Sum(indexWrapper.Tx) ++ } ++ if blobTx, isBlobTx := UnmarshalBlobTx(tx); isBlobTx { ++ return tmhash.Sum(blobTx.Tx) ++ } + return tmhash.Sum(tx) + } + ++// Key returns the sha256 hash of the wire encoded transaction. It attempts to ++// unwrap the transaction if it is a BlobTx or a IndexWrapper. + func (tx Tx) Key() TxKey { ++ if blobTx, isBlobTx := UnmarshalBlobTx(tx); isBlobTx { ++ return sha256.Sum256(blobTx.Tx) ++ } ++ if indexWrapper, isIndexWrapper := UnmarshalIndexWrapper(tx); isIndexWrapper { ++ return sha256.Sum256(indexWrapper.Tx) ++ } + return sha256.Sum256(tx) + } + +@@ -39,6 +56,19 @@ func (tx Tx) String() string { + return fmt.Sprintf("Tx{%X}", []byte(tx)) + } + ++func (key TxKey) String() string { ++ return fmt.Sprintf("TxKey{%X}", key[:]) ++} ++ ++func TxKeyFromBytes(bytes []byte) (TxKey, error) { ++ if len(bytes) != TxKeySize { ++ return TxKey{}, fmt.Errorf("incorrect tx key size. Expected %d bytes, got %d", TxKeySize, len(bytes)) ++ } ++ var key TxKey ++ copy(key[:], bytes) ++ return key, nil ++} ++ + // Txs is a slice of Tx. + type Txs []Tx + +@@ -74,6 +104,27 @@ func (txs Txs) IndexByHash(hash []byte) int { + return -1 + } + ++// ToSliceOfBytes converts a Txs to slice of byte slices. ++// ++// NOTE: This method should become obsolete once Txs is switched to [][]byte. ++// ref: #2603 https://github.com/tendermint/tendermint/issues/2603 ++func (txs Txs) ToSliceOfBytes() [][]byte { ++ txBzs := make([][]byte, len(txs)) ++ for i := 0; i < len(txs); i++ { ++ txBzs[i] = txs[i] ++ } ++ return txBzs ++} ++ ++// ToTxs converts a raw slice of byte slices into a Txs type. ++func ToTxs(txs [][]byte) Txs { ++ txBzs := make(Txs, len(txs)) ++ for i := 0; i < len(txs); i++ { ++ txBzs[i] = txs[i] ++ } ++ return txBzs ++} ++ + // Proof returns a simple merkle proof for this node. + // Panics if i < 0 or i >= len(txs) + // TODO: optimize this! +@@ -158,3 +209,73 @@ func ComputeProtoSizeForTxs(txs []Tx) int64 { + pdData := data.ToProto() + return int64(pdData.Size()) + } ++ ++// UnmarshalIndexWrapper attempts to unmarshal the provided transaction into an ++// IndexWrapper transaction. It returns true if the provided transaction is an ++// IndexWrapper transaction. An IndexWrapper transaction is a transaction that contains ++// a MsgPayForBlob that has been wrapped with a share index. ++// ++// NOTE: protobuf sometimes does not throw an error if the transaction passed is ++// not a tmproto.IndexWrapper, since the protobuf definition for MsgPayForBlob is ++// kept in the app, we cannot perform further checks without creating an import ++// cycle. ++func UnmarshalIndexWrapper(tx Tx) (indexWrapper cmtproto.IndexWrapper, isIndexWrapper bool) { ++ // attempt to unmarshal into an IndexWrapper transaction ++ err := proto.Unmarshal(tx, &indexWrapper) ++ if err != nil { ++ return indexWrapper, false ++ } ++ if indexWrapper.TypeId != consts.ProtoIndexWrapperTypeID { ++ return indexWrapper, false ++ } ++ return indexWrapper, true ++} ++ ++// MarshalIndexWrapper creates a wrapped Tx that includes the original transaction ++// and the share index of the start of its blob. ++// ++// NOTE: must be unwrapped to be a viable sdk.Tx ++func MarshalIndexWrapper(tx Tx, shareIndexes ...uint32) (Tx, error) { ++ wTx := cmtproto.IndexWrapper{ ++ Tx: tx, ++ ShareIndexes: shareIndexes, ++ TypeId: consts.ProtoIndexWrapperTypeID, ++ } ++ return proto.Marshal(&wTx) ++} ++ ++// UnmarshalBlobTx attempts to unmarshal a transaction into blob transaction. If an ++// error is thrown, false is returned. ++func UnmarshalBlobTx(tx Tx) (bTx cmtproto.BlobTx, isBlob bool) { ++ err := bTx.Unmarshal(tx) ++ if err != nil { ++ return cmtproto.BlobTx{}, false ++ } ++ // perform some quick basic checks to prevent false positives ++ if bTx.TypeId != consts.ProtoBlobTxTypeID { ++ return bTx, false ++ } ++ if len(bTx.Blobs) == 0 { ++ return bTx, false ++ } ++ for _, b := range bTx.Blobs { ++ if len(b.NamespaceId) != consts.NamespaceIDSize { ++ return bTx, false ++ } ++ } ++ return bTx, true ++} ++ ++// MarshalBlobTx creates a BlobTx using a normal transaction and some number of ++// blobs. ++// ++// NOTE: Any checks on the blobs or the transaction must be performed in the ++// application ++func MarshalBlobTx(tx []byte, blobs ...*cmtproto.Blob) (Tx, error) { ++ bTx := cmtproto.BlobTx{ ++ Tx: tx, ++ Blobs: blobs, ++ TypeId: consts.ProtoBlobTxTypeID, ++ } ++ return bTx.Marshal() ++} diff --git a/patches/types/tx_test.go.patch b/patches/types/tx_test.go.patch new file mode 100644 index 00000000000..a124501506e --- /dev/null +++ b/patches/types/tx_test.go.patch @@ -0,0 +1,190 @@ +diff --git a/types/tx_test.go b/types/tx_test.go +index 63624899d..758f00eb4 100644 +--- a/types/tx_test.go ++++ b/types/tx_test.go +@@ -8,7 +8,7 @@ import ( + "github.com/stretchr/testify/require" + + cmtrand "github.com/tendermint/tendermint/libs/rand" +- ctest "github.com/tendermint/tendermint/libs/test" ++ "github.com/tendermint/tendermint/pkg/consts" + cmtproto "github.com/tendermint/tendermint/proto/tendermint/types" + ) + +@@ -20,11 +20,6 @@ func makeTxs(cnt, size int) Txs { + return txs + } + +-func randInt(low, high int) int { +- off := cmtrand.Int() % (high - low) +- return low + off +-} +- + func TestTxIndex(t *testing.T) { + for i := 0; i < 20; i++ { + txs := makeTxs(15, 60) +@@ -51,101 +46,77 @@ func TestTxIndexByHash(t *testing.T) { + } + } + +-func TestValidTxProof(t *testing.T) { +- cases := []struct { +- txs Txs +- }{ +- {Txs{{1, 4, 34, 87, 163, 1}}}, +- {Txs{{5, 56, 165, 2}, {4, 77}}}, +- {Txs{Tx("foo"), Tx("bar"), Tx("baz")}}, +- {makeTxs(20, 5)}, +- {makeTxs(7, 81)}, +- {makeTxs(61, 15)}, +- } ++func TestUnmarshalIndexWrapper(t *testing.T) { ++ // perform a simple test for being unable to decode a non ++ // IndexWrapper transaction ++ tx := Tx{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 0} ++ _, ok := UnmarshalIndexWrapper(tx) ++ require.False(t, ok) ++ ++ data := Data{Txs: []Tx{tx}} ++ ++ // create a proto message that used to be decoded when it shouldn't have ++ randomBlock := MakeBlock( ++ 1, ++ data, ++ &Commit{}, ++ []Evidence{}, ++ ) + +- for h, tc := range cases { +- txs := tc.txs +- root := txs.Hash() +- // make sure valid proof for every tx +- for i := range txs { +- tx := []byte(txs[i]) +- proof := txs.Proof(i) +- assert.EqualValues(t, i, proof.Proof.Index, "%d: %d", h, i) +- assert.EqualValues(t, len(txs), proof.Proof.Total, "%d: %d", h, i) +- assert.EqualValues(t, root, proof.RootHash, "%d: %d", h, i) +- assert.EqualValues(t, tx, proof.Data, "%d: %d", h, i) +- assert.EqualValues(t, txs[i].Hash(), proof.Leaf(), "%d: %d", h, i) +- assert.Nil(t, proof.Validate(root), "%d: %d", h, i) +- assert.NotNil(t, proof.Validate([]byte("foobar")), "%d: %d", h, i) +- +- // read-write must also work +- var ( +- p2 TxProof +- pb2 cmtproto.TxProof +- ) +- pbProof := proof.ToProto() +- bin, err := pbProof.Marshal() +- require.NoError(t, err) +- +- err = pb2.Unmarshal(bin) +- require.NoError(t, err) +- +- p2, err = TxProofFromProto(pb2) +- if assert.Nil(t, err, "%d: %d: %+v", h, i, err) { +- assert.Nil(t, p2.Validate(root), "%d: %d", h, i) +- } +- } +- } ++ protoB, err := randomBlock.ToProto() ++ require.NoError(t, err) ++ ++ rawBlock, err := protoB.Marshal() ++ require.NoError(t, err) ++ ++ // due to protobuf not actually requiring type compatibility ++ // we need to make sure that there is some check ++ _, ok = UnmarshalIndexWrapper(rawBlock) ++ require.False(t, ok) ++ ++ IndexWrapper, err := MarshalIndexWrapper(rawBlock, 0) ++ require.NoError(t, err) ++ ++ // finally, ensure that the unwrapped bytes are identical to the input ++ indexWrapper, ok := UnmarshalIndexWrapper(IndexWrapper) ++ require.True(t, ok) ++ require.Equal(t, rawBlock, indexWrapper.Tx) + } + +-func TestTxProofUnchangable(t *testing.T) { +- // run the other test a bunch... +- for i := 0; i < 40; i++ { +- testTxProofUnchangable(t) ++func TestUnmarshalBlobTx(t *testing.T) { ++ tx := []byte{1, 2, 3, 4, 5, 6, 7, 8, 9, 1, 2, 3, 4, 5, 6, 7, 8, 9} ++ namespaceOne := bytes.Repeat([]byte{1}, consts.NamespaceIDSize) ++ blob := cmtproto.Blob{ ++ NamespaceId: namespaceOne, ++ Data: []byte{1, 2, 3, 4, 5, 6, 7, 8, 9, 1, 2, 3, 4, 5, 6, 7, 8, 9}, ++ ShareVersion: 0, ++ NamespaceVersion: 0, + } +-} + +-func testTxProofUnchangable(t *testing.T) { +- // make some proof +- txs := makeTxs(randInt(2, 100), randInt(16, 128)) +- root := txs.Hash() +- i := randInt(0, len(txs)-1) +- proof := txs.Proof(i) +- +- // make sure it is valid to start with +- assert.Nil(t, proof.Validate(root)) +- pbProof := proof.ToProto() +- bin, err := pbProof.Marshal() ++ bTx, err := MarshalBlobTx(tx, &blob) + require.NoError(t, err) + +- // try mutating the data and make sure nothing breaks +- for j := 0; j < 500; j++ { +- bad := ctest.MutateByteSlice(bin) +- if !bytes.Equal(bad, bin) { +- assertBadProof(t, root, bad, proof) +- } +- } ++ resTx, isBlob := UnmarshalBlobTx(bTx) ++ require.True(t, isBlob) ++ ++ assert.Equal(t, tx, resTx.Tx) ++ require.Len(t, resTx.Blobs, 1) ++ assert.Equal(t, blob, *resTx.Blobs[0]) + } + +-// This makes sure that the proof doesn't deserialize into something valid. +-func assertBadProof(t *testing.T, root []byte, bad []byte, good TxProof) { ++// todo: add fuzzing ++func TestUnmarshalBlobTxFalsePositive(t *testing.T) { ++ tx := []byte("sender-193-0=D16B687628035716B1DA53BE1491A1B3D4CEA3AB=1025") ++ _, isBlob := UnmarshalBlobTx(tx) ++ require.False(t, isBlob) ++} + +- var ( +- proof TxProof +- pbProof cmtproto.TxProof +- ) +- err := pbProof.Unmarshal(bad) +- if err == nil { +- proof, err = TxProofFromProto(pbProof) +- if err == nil { +- err = proof.Validate(root) +- if err == nil { +- // XXX Fix simple merkle proofs so the following is *not* OK. +- // This can happen if we have a slightly different total (where the +- // path ends up the same). If it is something else, we have a real +- // problem. +- assert.NotEqual(t, proof.Proof.Total, good.Proof.Total, "bad: %#v\ngood: %#v", proof, good) +- } +- } +- } ++func TestTxKeyFromBytes(t *testing.T) { ++ tx := Tx("hello") ++ key := tx.Key() ++ key2, err := TxKeyFromBytes(key[:]) ++ require.NoError(t, err) ++ require.Equal(t, key, key2) ++ _, err = TxKeyFromBytes([]byte("foo")) ++ require.Error(t, err) + } diff --git a/types/block.go b/types/block.go index 46c57bd48af..95bef96a046 100644 --- a/types/block.go +++ b/types/block.go @@ -1349,6 +1349,12 @@ type Data struct { // This means that block.AppHash does not include these txs. Txs Txs `json:"txs"` + // SquareSize is the size of the square after splitting all the block data + // into shares. The erasure data is discarded after generation, and keeping this + // value avoids unnecessarily regenerating all of the shares when returning + // proofs that some element was included in the block + SquareSize uint64 `json:"square_size"` + // Volatile hash cmtbytes.HexBytes } @@ -1396,6 +1402,10 @@ func (data *Data) ToProto() cmtproto.Data { tp.Txs = txBzs } + tp.SquareSize = data.SquareSize + + tp.Hash = data.hash + return *tp } @@ -1417,11 +1427,40 @@ func DataFromProto(dp *cmtproto.Data) (Data, error) { data.Txs = Txs{} } + data.SquareSize = data.SquareSize + + data.hash = data.hash + return *data, nil } // ----------------------------------------------------------------------------- +type Blob struct { + // NamespaceVersion is the version of the namespace. Used in conjunction + // with NamespaceID to determine the namespace of this blob. + NamespaceVersion uint8 + + // NamespaceID defines the namespace ID of this blob. Used in conjunction + // with NamespaceVersion to determine the namespace of this blob. + NamespaceID []byte + + // Data is the actual data of the blob. + // (e.g. a block of a virtual sidechain). + Data []byte + + // ShareVersion is the version of the share format that this blob should use + // when encoded into shares. + ShareVersion uint8 +} + +// Namespace returns the namespace of this blob encoded as a byte slice. +func (b Blob) Namespace() []byte { + return append([]byte{b.NamespaceVersion}, b.NamespaceID...) +} + +// ----------------------------------------------------------------------------- + // EvidenceData contains a list of evidence committed by a validator. type EvidenceData struct { Evidence EvidenceList `json:"evidence"` diff --git a/types/event_bus.go b/types/event_bus.go index bfce928d200..c892988befb 100644 --- a/types/event_bus.go +++ b/types/event_bus.go @@ -153,6 +153,10 @@ func (b *EventBus) PublishEventNewBlockEvents(data EventDataNewBlockEvents) erro return b.pubsub.PublishWithEvents(ctx, data, events) } +func (b *EventBus) PublishEventNewSignedBlock(data EventDataSignedBlock) error { + return b.Publish(EventSignedBlock, data) +} + func (b *EventBus) PublishEventNewBlockHeader(data EventDataNewBlockHeader) error { return b.Publish(EventNewBlockHeader, data) } @@ -259,6 +263,10 @@ func (NopEventBus) PublishEventNewBlockHeader(EventDataNewBlockHeader) error { return nil } +func (NopEventBus) PublishEventNewSignedBlock(data EventDataSignedBlock) error { + return nil +} + func (NopEventBus) PublishEventNewBlockEvents(EventDataNewBlockEvents) error { return nil } diff --git a/types/events.go b/types/events.go index edcfc837465..c09037c2fa3 100644 --- a/types/events.go +++ b/types/events.go @@ -19,6 +19,7 @@ const ( EventNewBlock = "NewBlock" EventNewBlockHeader = "NewBlockHeader" EventNewBlockEvents = "NewBlockEvents" + EventSignedBlock = "NewSignedBlock" EventNewEvidence = "NewEvidence" EventPendingTx = "PendingTx" EventTx = "Tx" @@ -49,6 +50,7 @@ type TMEventData interface { //nolint:revive // this empty interface angers the func init() { cmtjson.RegisterType(EventDataNewBlock{}, "tendermint/event/NewBlock") + cmtjson.RegisterType(EventDataSignedBlock{}, "tendermint/event/NewSignedBlock") cmtjson.RegisterType(EventDataNewBlockHeader{}, "tendermint/event/NewBlockHeader") cmtjson.RegisterType(EventDataNewBlockEvents{}, "tendermint/event/NewBlockEvents") cmtjson.RegisterType(EventDataNewEvidence{}, "tendermint/event/NewEvidence") @@ -70,6 +72,15 @@ type EventDataNewBlock struct { ResultFinalizeBlock abci.FinalizeBlockResponse `json:"result_finalize_block"` } +// EventDataSignedBlock contains all the information needed to verify +// the data committed in a block. +type EventDataSignedBlock struct { + Header Header `json:"header"` + Commit Commit `json:"commit"` + ValidatorSet ValidatorSet `json:"validator_set"` + Data Data `json:"data"` +} + type EventDataNewBlockHeader struct { Header Header `json:"header"` } @@ -160,6 +171,7 @@ var ( EventQueryNewEvidence = QueryForEvent(EventNewEvidence) EventQueryNewRound = QueryForEvent(EventNewRound) EventQueryNewRoundStep = QueryForEvent(EventNewRoundStep) + EventQueryNewSignedBlock = QueryForEvent(EventSignedBlock) EventQueryPolka = QueryForEvent(EventPolka) EventQueryRelock = QueryForEvent(EventRelock) EventQueryTimeoutPropose = QueryForEvent(EventTimeoutPropose) @@ -181,6 +193,7 @@ func QueryForEvent(eventType string) cmtpubsub.Query { // BlockEventPublisher publishes all block related events. type BlockEventPublisher interface { PublishEventNewBlock(block EventDataNewBlock) error + PublishEventNewSignedBlock(event EventDataSignedBlock) error PublishEventNewBlockHeader(header EventDataNewBlockHeader) error PublishEventNewBlockEvents(events EventDataNewBlockEvents) error PublishEventNewEvidence(evidence EventDataNewEvidence) error diff --git a/types/row_proof.go b/types/row_proof.go new file mode 100644 index 00000000000..fa5d53625c2 --- /dev/null +++ b/types/row_proof.go @@ -0,0 +1,86 @@ +package types + +import ( + "errors" + "fmt" + + tmproto "github.com/cometbft/cometbft/api/cometbft/types" // need to add proto changes + "github.com/cometbft/cometbft/crypto/merkle" + tmbytes "github.com/cometbft/cometbft/libs/bytes" +) + +// RowProof is a Merkle proof that a set of rows exist in a Merkle tree with a +// given data root. +type RowProof struct { + // RowRoots are the roots of the rows being proven. + RowRoots []tmbytes.HexBytes `json:"row_roots"` + // Proofs is a list of Merkle proofs where each proof proves that a row + // exists in a Merkle tree with a given data root. + Proofs []*merkle.Proof `json:"proofs"` + // StartRow the index of the start row. + // Note: currently, StartRow is not validated as part of the proof verification. + // If this field is used downstream, Validate(root) should be called along with + // extra validation depending on how it's used. + StartRow uint32 `json:"start_row"` + // EndRow the index of the end row. + // Note: currently, EndRow is not validated as part of the proof verification. + // If this field is used downstream, Validate(root) should be called along with + // extra validation depending on how it's used. + EndRow uint32 `json:"end_row"` +} + +// Validate performs checks on the fields of this RowProof. Returns an error if +// the proof fails validation. If the proof passes validation, this function +// attempts to verify the proof. It returns nil if the proof is valid. +func (rp RowProof) Validate(root []byte) error { + if rp.EndRow < rp.StartRow { + return fmt.Errorf("end row %d cannot be less than start row %d", rp.EndRow, rp.StartRow) + } + if int(rp.EndRow-rp.StartRow1) != len(rp.RowRoots) { + return fmt.Errorf("the number of rows %d must equal the number of row roots %d", int(rp.EndRow-rp.StartRow1), len(rp.RowRoots)) + } + if len(rp.Proofs) != len(rp.RowRoots) { + return fmt.Errorf("the number of proofs %d must equal the number of row roots %d", len(rp.Proofs), len(rp.RowRoots)) + } + if !rp.VerifyProof(root) { + return errors.New("row proof failed to verify") + } + + return nil +} + +// VerifyProof verifies that all the row roots in this RowProof exist in a +// Merkle tree with the given root. Returns true if all proofs are valid. +func (rp RowProof) VerifyProof(root []byte) bool { + for i, proof := range rp.Proofs { + err := proof.Verify(root, rp.RowRoots[i]) + if err != nil { + return false + } + } + return true +} + +func RowProofFromProto(p *tmproto.RowProof) RowProof { + if p == nil { + return RowProof{} + } + rowRoots := make([]tmbytes.HexBytes, len(p.RowRoots)) + rowProofs := make([]*merkle.Proof, len(p.Proofs)) + for i := range p.Proofs { + rowRoots[i] = p.RowRoots[i] + rowProofs[i] = &merkle.Proof{ + Total: p.Proofs[i].Total, + Index: p.Proofs[i].Index, + LeafHash: p.Proofs[i].LeafHash, + Aunts: p.Proofs[i].Aunts, + } + } + + return RowProof{ + RowRoots: rowRoots, + Proofs: rowProofs, + StartRow: p.StartRow, + EndRow: p.EndRow, + } +} diff --git a/types/row_proof_test.go b/types/row_proof_test.go new file mode 100644 index 00000000000..62d96a0be7d --- /dev/null +++ b/types/row_proof_test.go @@ -0,0 +1,115 @@ +package types + +import ( + "bytes" + "testing" + + "github.com/cometbft/cometbft/crypto/merkle" + tmbytes "github.com/cometbft/cometbft/libs/bytes" + "github.com/stretchr/testify/assert" +) + +func TestRowProofValidate(t *testing.T) { + type testCase struct { + name string + rp RowProof + root []byte + wantErr bool + } + testCases := []testCase{ + { + name: "empty row proof returns error", + rp: RowProof{}, + root: root, + wantErr: true, + }, + { + name: "row proof with mismatched number of rows and row roots returns error", + rp: mismatchedRowRoots(), + root: root, + wantErr: true, + }, + { + name: "row proof with mismatched number of proofs returns error", + rp: mismatchedProofs(), + root: root, + wantErr: true, + }, + { + name: "row proof with mismatched number of rows returns error", + rp: mismatchedRows(), + root: root, + wantErr: true, + }, + { + name: "valid row proof returns no error", + rp: validRowProof(), + root: root, + wantErr: false, + }, + { + name: "valid row proof with incorrect root returns error", + rp: validRowProof(), + root: incorrectRoot, + wantErr: true, + }, + { + name: "start row greater than end row", + rp: RowProof{StartRow: 10, EndRow: 5}, + root: root, + wantErr: true, + }, + } + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + got := tc.rp.Validate(tc.root) + if tc.wantErr { + assert.Error(t, got) + return + } + assert.NoError(t, got) + }) + } +} + +// root is the root hash of the Merkle tree used in validRowProof +var root = []byte{0x82, 0x37, 0x91, 0xd2, 0x5d, 0x77, 0x7, 0x67, 0x35, 0x3, 0x90, 0x12, 0x10, 0xc4, 0x43, 0x8a, 0x8b, 0x78, 0x4b, 0xbf, 0x5b, 0x8f, 0xa6, 0x40, 0xa9, 0x51, 0xa7, 0xa9, 0xbd, 0x52, 0xd5, 0xf6} + +var incorrectRoot = bytes.Repeat([]byte{0}, 32) + +// validRowProof returns a row proof for one row. This test data copied from +// ceelestia-app's pkg/proof/proof_test.go TestNewShareInclusionProof: "1 +// transaction share" +func validRowProof() RowProof { + return RowProof{ + RowRoots: tmbytes.FromBytes([]byte{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x1, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x9d, 0xe6, 0x38, 0x91, 0xc1, 0x6, 0xaf, 0x81, 0x75, 0x5a, 0x36, 0xf5, 0xb2, 0x62, 0x1e, 0xfa, 0xb9, 0xb8, 0x73, 0x87, 0xef, 0xe3, 0x6b, 0x33, 0xd8, 0xbf, 0xc9, 0x87, 0x1b, 0x8d, 0xfa, 0x8a}), + Proofs: []*merkle.Proof{ + { + Total: 128, + Index: 0, + LeafHash: []uint8{0x0, 0xcc, 0xfb, 0xff, 0x62, 0x10, 0x71, 0x61, 0x2f, 0xb9, 0x5a, 0xb1, 0xc3, 0x83, 0xff, 0x1d, 0x30, 0x31, 0x86, 0x42, 0xe4, 0x8e, 0x59, 0xe8, 0x8b, 0x92, 0x83, 0x11, 0x67, 0xb, 0xfc, 0x9a}, + Aunts: [][]uint8{{0x5c, 0xc6, 0x3b, 0x1e, 0x91, 0xa4, 0xbf, 0x6a, 0xa7, 0xd2, 0x68, 0x1c, 0x44, 0xc1, 0xda, 0xa2, 0x22, 0xed, 0x33, 0xb8, 0xd0, 0x29, 0x48, 0xfc, 0xab, 0x8f, 0x71, 0x50, 0x9c, 0xbb, 0x15, 0xab}, {0xc6, 0x14, 0x2b, 0x33, 0x5d, 0xaa, 0xfa, 0x20, 0xdf, 0x8a, 0x9b, 0xe9, 0x29, 0x9b, 0x34, 0xcd, 0xeb, 0xe7, 0x35, 0x39, 0x5c, 0x58, 0xb1, 0x13, 0x1f, 0x4, 0xeb, 0xdc, 0x33, 0x99, 0xdf, 0x98}, {0xdb, 0x99, 0xe2, 0xdf, 0x86, 0x84, 0x24, 0x90, 0x44, 0x8e, 0x29, 0x26, 0xe1, 0xb2, 0xb0, 0x52, 0x42, 0xf9, 0x73, 0x7, 0x7f, 0xab, 0x1d, 0xa9, 0xad, 0x56, 0x10, 0xf0, 0x58, 0xdf, 0x8, 0xd7}, {0x48, 0xfd, 0xfc, 0x3b, 0x96, 0xa5, 0x19, 0xf5, 0x14, 0xf, 0x37, 0xfd, 0x95, 0xb3, 0x76, 0xfb, 0x7e, 0x5, 0x5b, 0x4d, 0x8b, 0x68, 0x16, 0x81, 0x51, 0x92, 0x44, 0x0, 0xe5, 0xf6, 0x49, 0x16}, {0xfb, 0x45, 0xdc, 0x2, 0x8b, 0xa9, 0x45, 0xfe, 0xa0, 0x7b, 0xeb, 0x62, 0x81, 0x84, 0x95, 0x19, 0x29, 0xf5, 0x78, 0x16, 0x15, 0xb8, 0xf2, 0xa3, 0x94, 0x96, 0xb1, 0x4c, 0x4c, 0xef, 0xf4, 0xd3}, {0x2c, 0x26, 0x82, 0xb1, 0x8c, 0x9f, 0xff, 0x50, 0xde, 0x67, 0x4e, 0x82, 0x3, 0x3, 0xd6, 0xdc, 0x7c, 0x7a, 0xea, 0x1a, 0xe3, 0x9, 0xf0, 0x1a, 0xc6, 0xcd, 0x19, 0x34, 0xc7, 0x54, 0x6, 0x14}, {0xe9, 0x41, 0x8b, 0x1, 0x9a, 0xd6, 0xd3, 0x13, 0x21, 0x14, 0x89, 0x98, 0xbb, 0x81, 0xda, 0xf7, 0xa, 0x36, 0x14, 0xcf, 0xc5, 0xac, 0xbf, 0xc3, 0x48, 0xb0, 0x88, 0x90, 0x45, 0x29, 0x80, 0x23}}, + }, + }, + StartRow: 0, + EndRow: 0, + } +} + +func mismatchedRowRoots() RowProof { + rp := validRowProof() + rp.RowRoots = []tmbytes.HexBytes{} + return rp +} + +func mismatchedProofs() RowProof { + rp := validRowProof() + rp.Proofs = []*merkle.Proof{} + return rp +} + +func mismatchedRows() RowProof { + rp := validRowProof() + rp.EndRow = 10 + return rp +} diff --git a/types/test_util.go b/types/test_util.go index 2a959ef3af9..b15f2eb1f72 100644 --- a/types/test_util.go +++ b/types/test_util.go @@ -108,15 +108,13 @@ func MakeVoteNoError( // MakeBlock returns a new block with an empty header, except what can be // computed from itself. // It populates the same set of fields validated by ValidateBasic. -func MakeBlock(height int64, txs []Tx, lastCommit *Commit, evidence []Evidence) *Block { +func MakeBlock(height int64, data Data, lastCommit *Commit, evidence []Evidence) *Block { block := &Block{ Header: Header{ Version: cmtversion.Consensus{Block: version.BlockProtocol, App: 0}, Height: height, }, - Data: Data{ - Txs: txs, - }, + Data: data, Evidence: EvidenceData{Evidence: evidence}, LastCommit: lastCommit, } diff --git a/types/tx.go b/types/tx.go index 17c0f1441cb..4c065958a84 100644 --- a/types/tx.go +++ b/types/tx.go @@ -9,7 +9,9 @@ import ( cmtproto "github.com/cometbft/cometbft/api/cometbft/types/v2" "github.com/cometbft/cometbft/crypto/merkle" "github.com/cometbft/cometbft/crypto/tmhash" + "github.com/cometbft/cometbft/internal/consts" cmtbytes "github.com/cometbft/cometbft/libs/bytes" + "google.golang.org/protobuf/proto" ) // TxKeySize is the size of the transaction key index. @@ -27,10 +29,23 @@ type ( // Hash computes the TMHASH hash of the wire encoded transaction. func (tx Tx) Hash() cmtbytes.HexBytes { + // TODO: Optimize this by decoding only values we need (lazy decode) + if indexWrapper, isIndexWrapper := UnmarshalIndexWrapper(tx); isIndexWrapper { + return tmhash.Sum(indexWrapper.Tx) + } + if blobTx, isBlobTx := UnmarshalBlobTx(tx); isBlobTx { + return tmhash.Sum(blobTx.Tx) + } return tmhash.Sum(tx) } func (tx Tx) Key() TxKey { + if blobTx, isBlobTx := UnmarshalBlobTx(tx); isBlobTx { + return sha256.Sum256(blobTx.Tx) + } + if indexWrapper, isIndexWrapper := UnmarshalIndexWrapper(tx); isIndexWrapper { + return sha256.Sum256(indexWrapper.Tx) + } return sha256.Sum256(tx) } @@ -39,6 +54,15 @@ func (tx Tx) String() string { return fmt.Sprintf("Tx{%X}", []byte(tx)) } +func TxKeyFromBytes(bytes []byte) (TxKey, error) { + if len(bytes) != TxKeySize { + return TxKey{}, fmt.Errorf("incorrect tx key size. Expected %d bytes, got %d", TxKeySize, len(bytes)) + } + var key TxKey + copy(key[:], bytes) + return key, nil +} + func (txKey TxKey) Hash() []byte { return txKey[:] } @@ -73,6 +97,27 @@ func (txs Txs) IndexByHash(hash []byte) int { return -1 } +// ToSliceOfBytes converts a Txs to slice of byte slices. +// +// NOTE: This method should become obsolete once Txs is switched to [][]byte. +// ref: #2603 https://github.com/tendermint/tendermint/issues/2603 +func (txs Txs) ToSliceOfBytes() [][]byte { + txBzs := make([][]byte, len(txs)) + for i := 0; i < len(txs); i++ { + txBzs[i] = txs[i] + } + return txBzs +} + +// ToTxs converts a raw slice of byte slices into a Txs type. +func ToTxs(txs [][]byte) Txs { + txBzs := make(Txs, len(txs)) + for i := 0; i < len(txs); i++ { + txBzs[i] = txs[i] + } + return txBzs +} + func (txs Txs) Proof(i int) TxProof { hl := txs.hashList() root, proofs := merkle.ProofsFromByteSlices(hl) @@ -106,14 +151,6 @@ func (txs Txs) Less(i, j int) bool { return bytes.Compare(txs[i], txs[j]) == -1 } -func ToTxs(txl [][]byte) Txs { - txs := make([]Tx, 0, len(txl)) - for _, tx := range txl { - txs = append(txs, tx) - } - return txs -} - func (txs Txs) Validate(maxSizeBytes int64) error { var size int64 for _, tx := range txs { @@ -125,15 +162,6 @@ func (txs Txs) Validate(maxSizeBytes int64) error { return nil } -// ToSliceOfBytes converts a Txs to slice of byte slices. -func (txs Txs) ToSliceOfBytes() [][]byte { - txBzs := make([][]byte, len(txs)) - for i := 0; i < len(txs); i++ { - txBzs[i] = txs[i] - } - return txBzs -} - // TxProof represents a Merkle proof of the presence of a transaction in the Merkle tree. type TxProof struct { RootHash cmtbytes.HexBytes `json:"root_hash"` @@ -199,3 +227,73 @@ func ComputeProtoSizeForTxs(txs []Tx) int64 { pdData := data.ToProto() return int64(pdData.Size()) } + +// UnmarshalIndexWrapper attempts to unmarshal the provided transaction into an +// IndexWrapper transaction. It returns true if the provided transaction is an +// IndexWrapper transaction. An IndexWrapper transaction is a transaction that contains +// a MsgPayForBlob that has been wrapped with a share index. +// +// NOTE: protobuf sometimes does not throw an error if the transaction passed is +// not a tmproto.IndexWrapper, since the protobuf definition for MsgPayForBlob is +// kept in the app, we cannot perform further checks without creating an import +// cycle. +func UnmarshalIndexWrapper(tx Tx) (indexWrapper cmtproto.IndexWrapper, isIndexWrapper bool) { + // attempt to unmarshal into an IndexWrapper transaction + err := proto.Unmarshal(tx, &indexWrapper) + if err != nil { + return indexWrapper, false + } + if indexWrapper.TypeId != consts.ProtoIndexWrapperTypeID { + return indexWrapper, false + } + return indexWrapper, true +} + +// MarshalIndexWrapper creates a wrapped Tx that includes the original transaction +// and the share index of the start of its blob. +// +// NOTE: must be unwrapped to be a viable sdk.Tx +func MarshalIndexWrapper(tx Tx, shareIndexes ...uint32) (Tx, error) { + wTx := cmtproto.IndexWrapper{ + Tx: tx, + ShareIndexes: shareIndexes, + TypeId: consts.ProtoIndexWrapperTypeID, + } + return proto.Marshal(&wTx) +} + +// UnmarshalBlobTx attempts to unmarshal a transaction into blob transaction. If an +// error is thrown, false is returned. +func UnmarshalBlobTx(tx Tx) (bTx cmtproto.BlobTx, isBlob bool) { + err := bTx.Unmarshal(tx) + if err != nil { + return cmtproto.BlobTx{}, false + } + // perform some quick basic checks to prevent false positives + if bTx.TypeId != consts.ProtoBlobTxTypeID { + return bTx, false + } + if len(bTx.Blobs) == 0 { + return bTx, false + } + for _, b := range bTx.Blobs { + if len(b.NamespaceId) != consts.NamespaceIDSize { + return bTx, false + } + } + return bTx, true +} + +// MarshalBlobTx creates a BlobTx using a normal transaction and some number of +// blobs. +// +// NOTE: Any checks on the blobs or the transaction must be performed in the +// application +func MarshalBlobTx(tx []byte, blobs ...*cmtproto.Blob) (Tx, error) { + bTx := cmtproto.BlobTx{ + Tx: tx, + Blobs: blobs, + TypeId: consts.ProtoBlobTxTypeID, + } + return bTx.Marshal() +}